Add ELU and HARD_SWISH
Bug: 147482068
Bug: 147481241
Test: NNTest_static and VtsHalNeuralnetworksV1_3TargetTest
Change-Id: Iab8da2a666ad9775dfb53d9297e94962fb651353
Merged-In: Iab8da2a666ad9775dfb53d9297e94962fb651353
(cherry picked from commit aee67f83f9e97e060ff31e16e6898a15f4680d04)
diff --git a/current.txt b/current.txt
index f0e7bb4..d615eaa 100644
--- a/current.txt
+++ b/current.txt
@@ -627,7 +627,7 @@
4167dc3ad35e9cd0d2057d4868c7675ae2c3c9d05bbd614c1f5dccfa5fd68797 android.hardware.neuralnetworks@1.3::IExecutionCallback
7d23020248194abbee8091cc624f39a5a6d7ccba338b172d5d2d3df0cceffbee android.hardware.neuralnetworks@1.3::IPreparedModel
0439a1fbbec7f16e5e4c653d85ac685d51bfafbae15b8f8cca530acdd7d6a8ce android.hardware.neuralnetworks@1.3::IPreparedModelCallback
-162515505235bc770601f02c3537f9ccf11582583bf7b11dd2ec81fab6855333 android.hardware.neuralnetworks@1.3::types
+26c643aedf4e28b8d82e517d9cd70601b37f881e1ea94f09808d9e233517e400 android.hardware.neuralnetworks@1.3::types
3e01d4446cd69fd1c48f8572efd97487bc179564b32bd795800b97bbe10be37b android.hardware.wifi@1.4::IWifi
a64467bae843569f0d465c5be7f0c7a5b987985b55a3ef4794dd5afc68538650 android.hardware.wifi.supplicant@1.3::ISupplicant
44445b8a03d7b9e68b2fbd954672c18a8fce9e32851b0692f4f4ab3407f86ecb android.hardware.wifi.supplicant@1.3::ISupplicantStaIface
diff --git a/neuralnetworks/1.3/types.hal b/neuralnetworks/1.3/types.hal
index 0f51b1a..6a852d1 100644
--- a/neuralnetworks/1.3/types.hal
+++ b/neuralnetworks/1.3/types.hal
@@ -4987,6 +4987,56 @@
WHILE = 97,
/**
+ * Computes exponential linear activation on the input tensor element-wise.
+ *
+ * The output is calculated using the following formula:
+ *
+ * ELU(x) = max(0, x) + min(0, alpha * (exp(x) - 1))
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ *
+ * Inputs:
+ * * 0: A tensor, specifying the input. May be zero-sized.
+ * * 1: A scalar, specifying the alpha parameter.
+ * For input tensor of {@link OperandType::TENSOR_FLOAT16},
+ * the alpha value must be of {@link OperandType::FLOAT16}.
+ * For input tensor of {@link OperandType::TENSOR_FLOAT32},
+ * the alpha value must be of {@link OperandType::FLOAT32}.
+ *
+ * Outputs:
+ * * 0: The output tensor of same shape and type as input0.
+ */
+ ELU = 98,
+
+ /**
+ * Computes hard-swish activation on the input tensor element-wise.
+ *
+ * Hard swish activation is introduced in
+ * https://arxiv.org/pdf/1905.02244.pdf
+ *
+ * The output is calculated using the following formula:
+ *
+ * h-swish(x) = x * max(0, min(6, (x + 3))) / 6
+
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED}
+ *
+ * Inputs:
+ * * 0: A tensor, specifying the input. May be zero-sized.
+ *
+ * Outputs:
+ * * 0: The output tensor of same shape and type as input0.
+ * Scale and zero point of this tensor may be different from the input
+ * tensor's parameters.
+ */
+ HARD_SWISH = 99,
+
+ /**
* DEPRECATED. Since NNAPI 1.2, extensions are the preferred alternative to
* OEM operation and data types.
*
@@ -5008,7 +5058,7 @@
enum OperationTypeRange : uint32_t {
BASE_MIN = 0,
FUNDAMENTAL_MIN = 0,
- FUNDAMENTAL_MAX = 97,
+ FUNDAMENTAL_MAX = 99,
OEM_MIN = 10000,
OEM_MAX = 10000,
BASE_MAX = 0xFFFF,