NNAPI: Clarify behaviour of ops that reduce dimensions
Clarify behaviour of the following ops in the case when inputs would
cause an output to have an empty shape:
* MEAN
* ARGMIN/ARGMAX
* STRIDED_SLICE
Bug: 155508675
Bug: 155660285
Bug: 155508675
Bug: 155238914
Test: VtsHalNeuralnetworksV1_3TargetTest
Change-Id: I92d1c3866a462a99b4eed7782b72f62bb6a076c6
Merged-In: I92d1c3866a462a99b4eed7782b72f62bb6a076c6
(cherry picked from commit d4bba83f6a441b49e07d0656b3febd9c706cd28a)
diff --git a/current.txt b/current.txt
index 803c39a..f03cf70 100644
--- a/current.txt
+++ b/current.txt
@@ -587,10 +587,11 @@
eb2fa0c883c2185d514be0b84c179b283753ef0c1b77b45b4f359bd23bba8b75 android.hardware.neuralnetworks@1.0::IPreparedModel
92e101b30e47bdf526a01c52cecfbe730def5997b8260ab497eb949eb2a6dcdf android.hardware.neuralnetworks@1.0::types
5f6d3097ba84cb63c430787123f4de1b31c11f90b531b98eae9a8623a5ae962a android.hardware.neuralnetworks@1.1::types
+c2711d8748ccbcc858d5d5ec1abf145d9ab4c0b27db8ca215d7c39665a9b6652 android.hardware.neuralnetworks@1.1::types # b/155508675, b/155662254, b/155238914
fb382e986c10b8fbb797a8546e8f9ea6d1107bfe6f3fb7e57f6bbbf1f807a906 android.hardware.neuralnetworks@1.2::IDevice
40e71cd693de5b832325c5d8f081f2ff20a7ba2b89d401cee5b4b3eb0e241681 android.hardware.neuralnetworks@1.2::IPreparedModel
ee1a0dee5be00a6fe2d4d3270068c78016dcb194d768fe07ed894ea20904037f android.hardware.neuralnetworks@1.2::types
-882b1c042ff842d7c52a794fab60bf6c599ef6b100ce99fa1772615096811d05 android.hardware.neuralnetworks@1.2::types # b/155508675
+9c53b727cfa9efde38ebe3914e1e95939cff29c072a1b8c8f419d24853b98831 android.hardware.neuralnetworks@1.2::types # b/155508675, b/155662254, b/155238914, b/155660285
a785a57447a81e9c130eef6904c3a5c256076c6a04588c40620ebd6fa2660d77 android.hardware.radio@1.2::types
1a6e2bd289f22931c526b21916910f1d4c436b7acb9556e4243de4ce8e6cc2e4 android.hardware.soundtrigger@2.0::ISoundTriggerHwCallback
fd65298e1e09e0e3c781ab18305920d757dbe55a3b459ce17814ec5cf6dfee99 android.hardware.wifi@1.0::IWifiP2pIface
@@ -642,7 +643,7 @@
ee9dc34b9925b8367b1111c72bd6d9d375432735e451572ca5a665d8516a7744 android.hardware.neuralnetworks@1.3::IPreparedModel
eee3430cc86c97c7b407495863d8fb61da6f1a64b7721e77b9b4909b11b174e9 android.hardware.neuralnetworks@1.3::IPreparedModelCallback
acf84925f8ee0a651f2ec547ac334034de266479b93af5434f6c1f25e66aba96 android.hardware.neuralnetworks@1.3::types
-07801d19ca8a4f20543dae6b4d0c4d8b87e5161d3c431e973a1839cb7915a666 android.hardware.neuralnetworks@1.3::types # b/155508675
+e9080d04218e98512b63aace9ff3da52f0130238391f15cbbf7df396a3ec9072 android.hardware.neuralnetworks@1.3::types # b/155508675, b/155662254, b/155238914, b/155660285
3e01d4446cd69fd1c48f8572efd97487bc179564b32bd795800b97bbe10be37b android.hardware.wifi@1.4::IWifi
a64467bae843569f0d465c5be7f0c7a5b987985b55a3ef4794dd5afc68538650 android.hardware.wifi.supplicant@1.3::ISupplicant
44445b8a03d7b9e68b2fbd954672c18a8fce9e32851b0692f4f4ab3407f86ecb android.hardware.wifi.supplicant@1.3::ISupplicantStaIface
diff --git a/neuralnetworks/1.1/types.hal b/neuralnetworks/1.1/types.hal
index da7ba78..c8cdd59 100644
--- a/neuralnetworks/1.1/types.hal
+++ b/neuralnetworks/1.1/types.hal
@@ -126,6 +126,8 @@
* * 0: A tensor of the same {@link OperandType} as input0.
* For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
* the scale and zeroPoint must be the same as input0.
+ * If all dimensions are reduced and keep_dims is false, the output
+ * shape is [1].
*/
MEAN = 31,
@@ -232,6 +234,8 @@
* removed.
* For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
* the scale and zeroPoint must be the same as input0.
+ * If all input dimensions are equal to 1 and are to be squeezed, the
+ * output shape is [1].
*/
SQUEEZE = 34,
@@ -278,6 +282,8 @@
* where k is the number of bits set in shrink_axis_mask.
* For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
* the scale and zeroPoint must be the same as input0.
+ * If shrink_axis_mask is true for all input dimensions, the output
+ * shape is [1].
*/
STRIDED_SLICE = 35,
diff --git a/neuralnetworks/1.2/types.hal b/neuralnetworks/1.2/types.hal
index 9eff7ff..92cf2aa 100644
--- a/neuralnetworks/1.2/types.hal
+++ b/neuralnetworks/1.2/types.hal
@@ -1955,6 +1955,8 @@
* * 0: A tensor of the same {@link OperandType} as input0.
* For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
* the scale and zeroPoint must be the same as input0.
+ * If all dimensions are reduced and keep_dims is false, the output
+ * shape is [1].
*/
MEAN = @1.1::OperationType:MEAN,
@@ -2078,6 +2080,8 @@
* removed.
* For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
* the scale and zeroPoint must be the same as input0.
+ * If all input dimensions are equal to 1 and are to be squeezed, the
+ * output shape is [1].
*/
SQUEEZE = @1.1::OperationType:SQUEEZE,
@@ -2125,6 +2129,8 @@
* where k is the number of bits set in shrink_axis_mask.
* For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
* the scale and zeroPoint must be the same as input0.
+ * If shrink_axis_mask is true for all input dimensions, the output
+ * shape is [1].
*/
STRIDED_SLICE = @1.1::OperationType:STRIDED_SLICE,
@@ -2239,6 +2245,7 @@
*
* Outputs:
* * 0: An (n - 1)-D {@link OperandType::TENSOR_INT32} tensor.
+ * If input is 1-dimensional, the output shape is [1].
*/
// There is no underscore in ARG_MAX to avoid name conflict with
// the macro defined in libc/kernel/uapi/linux/limits.h.
@@ -2263,6 +2270,7 @@
*
* Outputs:
* * 0: An (n - 1)-D {@link OperandType::TENSOR_INT32} tensor.
+ * If input is 1-dimensional, the output shape is [1].
*/
ARGMIN = 40, // See ARGMAX for naming discussion.
diff --git a/neuralnetworks/1.3/types.hal b/neuralnetworks/1.3/types.hal
index 83cf442..39ea4c2 100644
--- a/neuralnetworks/1.3/types.hal
+++ b/neuralnetworks/1.3/types.hal
@@ -2012,6 +2012,8 @@
* For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
* {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
* the scale and zeroPoint must be the same as input0.
+ * If all dimensions are reduced and keep_dims is false, the output
+ * shape is [1].
*/
MEAN = @1.2::OperationType:MEAN,
@@ -2141,6 +2143,8 @@
* For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
* {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
* the scale and zeroPoint must be the same as input0.
+ * If all input dimensions are equal to 1 and are to be squeezed, the
+ * output shape is [1].
*/
SQUEEZE = @1.2::OperationType:SQUEEZE,
@@ -2190,6 +2194,8 @@
* For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
* {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
* the scale and zeroPoint must be the same as input0.
+ * If shrink_axis_mask is true for all input dimensions, the output
+ * shape is [1].
*/
STRIDED_SLICE = @1.2::OperationType:STRIDED_SLICE,
@@ -2313,6 +2319,7 @@
*
* Outputs:
* * 0: An (n - 1)-D {@link OperandType::TENSOR_INT32} tensor.
+ * If input is 1-dimensional, the output shape is [1].
*/
// There is no underscore in ARG_MAX to avoid name conflict with
// the macro defined in libc/kernel/uapi/linux/limits.h.
@@ -2338,6 +2345,7 @@
*
* Outputs:
* * 0: An (n - 1)-D {@link OperandType::TENSOR_INT32} tensor.
+ * If input is 1-dimensional, the output shape is [1].
*/
ARGMIN = @1.2::OperationType:ARGMIN, // See ARGMAX for naming discussion.