Add NNAPI specification and infrastructure for FL7.
Adds operations MIRROR_PAD and REVERSE.
Extends RSQRT to support QUANT8_ASYMM and QUANT8_ASYMM_SIGNED.
DOES NOT include tests or CPU reference implementation.
Bug: 202280917
Test: NeuralNetworksTest_static
Test: VtsHalNeuralnetworksTargetTest
Merged-In: I0b2133346b996849faac00c46885e3633c78f024
Change-Id: I0b2133346b996849faac00c46885e3633c78f024
(cherry picked from commit 04ed8595b5587e9f6e71ac45edd17cf92da56c32)
diff --git a/compatibility_matrices/compatibility_matrix.current.xml b/compatibility_matrices/compatibility_matrix.current.xml
index ece4de7..94502d1 100644
--- a/compatibility_matrices/compatibility_matrix.current.xml
+++ b/compatibility_matrices/compatibility_matrix.current.xml
@@ -405,7 +405,7 @@
</hal>
<hal format="aidl" optional="true">
<name>android.hardware.neuralnetworks</name>
- <version>1-2</version>
+ <version>1-3</version>
<interface>
<name>IDevice</name>
<regex-instance>.*</regex-instance>
diff --git a/neuralnetworks/aidl/aidl_api/android.hardware.neuralnetworks/current/android/hardware/neuralnetworks/OperationType.aidl b/neuralnetworks/aidl/aidl_api/android.hardware.neuralnetworks/current/android/hardware/neuralnetworks/OperationType.aidl
index 2eff11b..34506c8 100644
--- a/neuralnetworks/aidl/aidl_api/android.hardware.neuralnetworks/current/android/hardware/neuralnetworks/OperationType.aidl
+++ b/neuralnetworks/aidl/aidl_api/android.hardware.neuralnetworks/current/android/hardware/neuralnetworks/OperationType.aidl
@@ -138,4 +138,6 @@
RANK = 101,
BATCH_MATMUL = 102,
PACK = 103,
+ MIRROR_PAD = 104,
+ REVERSE = 105,
}
diff --git a/neuralnetworks/aidl/android/hardware/neuralnetworks/OperationType.aidl b/neuralnetworks/aidl/android/hardware/neuralnetworks/OperationType.aidl
index 2ec91ac..aebe8d9 100644
--- a/neuralnetworks/aidl/android/hardware/neuralnetworks/OperationType.aidl
+++ b/neuralnetworks/aidl/android/hardware/neuralnetworks/OperationType.aidl
@@ -4318,6 +4318,8 @@
* Supported tensor {@link OperandType}:
* * {@link OperandType::TENSOR_FLOAT16}
* * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM} (since NNAPI feature level 7)
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 7)
*
* Supported tensor rank: from 1.
*
@@ -4326,6 +4328,9 @@
*
* Outputs:
* * 0: The output tensor of same shape as input0.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
+ * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
+ * the scale and zeroPoint can be different from inputs' scale and zeroPoint.
*/
RSQRT = 83,
@@ -5322,4 +5327,68 @@
* * 0: The packed tensor.
*/
PACK = 103,
+
+ /**
+ * Pads a tensor with mirrored values.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED}
+ * * {@link OperandType::TENSOR_INT32}
+ *
+ * Supported tensor rank: from 1.
+ *
+ * Inputs:
+ * * 0: An n-D tensor, specifying the tensor to be padded.
+ * * 1: A 2-D tensor of {@link OperandType::TENSOR_INT32}, the paddings
+ * for each spatial dimension of the input tensor. The shape of the
+ * tensor must be {rank(input0), 2}.
+ * padding[i, 0] specifies the number of elements to be padded in the
+ * front of dimension i.
+ * padding[i, 1] specifies the number of elements to be padded after the
+ * end of dimension i.
+ * * 2: An {@link OperandType::INT32} scalar, specifying the mode.
+ * Options are 0:REFLECT and 1:SYMMETRIC.
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandType} as input0. The
+ * output tensor has the same rank as input0, and each
+ * dimension of the output tensor has the same size as the
+ * corresponding dimension of the input tensor plus the size
+ * of the padding:
+ * output0.dimension[i] =
+ * padding[i, 0] + input0.dimension[i] + padding[i, 1]
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
+ * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ */
+ MIRROR_PAD = 104,
+
+ /**
+ * Reverses a specified dimension of a tensor.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED}
+ * * {@link OperandType::TENSOR_INT32}
+ *
+ * Supported tensor rank: up to 8.
+ *
+ * Inputs:
+ * * 0: Input tensor of rank n.
+ * * 1: Axis tensor of type {@link OperandType::TENSOR_INT32} and shape [1],
+ * specifying which dimension of the input tensor is to be reversed. The dimension
+ * must be in the range [0, n).
+ *
+ * Outputs:
+ * * 0: The reversed tensor.
+ * For {@link OperandType::TENSOR_QUANT8_ASYMM} and
+ * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensors,
+ * the scales and zeroPoint must be the same as input0.
+ */
+ REVERSE = 105,
}
diff --git a/neuralnetworks/aidl/utils/Android.bp b/neuralnetworks/aidl/utils/Android.bp
index bf8fc2d..37ad6d6 100644
--- a/neuralnetworks/aidl/utils/Android.bp
+++ b/neuralnetworks/aidl/utils/Android.bp
@@ -56,13 +56,21 @@
}
cc_library_static {
- name: "neuralnetworks_utils_hal_aidl",
+ name: "neuralnetworks_utils_hal_aidl_v2",
defaults: ["neuralnetworks_utils_hal_aidl_defaults"],
shared_libs: [
"android.hardware.neuralnetworks-V2-ndk",
],
}
+cc_library_static {
+ name: "neuralnetworks_utils_hal_aidl",
+ defaults: ["neuralnetworks_utils_hal_aidl_defaults"],
+ shared_libs: [
+ "android.hardware.neuralnetworks-V3-ndk",
+ ],
+}
+
// A cc_defaults that includes the latest non-experimental AIDL utilities and other AIDL libraries
// that are commonly used together. Modules that always depend on the latest non-experimental
// AIDL features can include this cc_defaults to avoid managing dependency versions explicitly.
@@ -71,7 +79,7 @@
static_libs: [
"android.hardware.common-V2-ndk",
"android.hardware.graphics.common-V2-ndk",
- "android.hardware.neuralnetworks-V2-ndk",
+ "android.hardware.neuralnetworks-V3-ndk",
"neuralnetworks_utils_hal_aidl",
],
}
diff --git a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Utils.h b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Utils.h
index b4e747e..a27487e 100644
--- a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Utils.h
+++ b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Utils.h
@@ -36,6 +36,8 @@
return nn::kVersionFeatureLevel5;
case 2:
return nn::kVersionFeatureLevel6;
+ case 3:
+ return nn::kVersionFeatureLevel7;
default:
return std::nullopt;
}
diff --git a/neuralnetworks/aidl/utils/test/DeviceTest.cpp b/neuralnetworks/aidl/utils/test/DeviceTest.cpp
index 4e9fc46..0366e7d 100644
--- a/neuralnetworks/aidl/utils/test/DeviceTest.cpp
+++ b/neuralnetworks/aidl/utils/test/DeviceTest.cpp
@@ -159,6 +159,8 @@
return "v1";
case nn::Version::Level::FEATURE_LEVEL_6:
return "v2";
+ case nn::Version::Level::FEATURE_LEVEL_7:
+ return "v3";
default:
LOG(FATAL) << "Invalid AIDL version: " << version;
return "invalid";
@@ -893,7 +895,8 @@
}
INSTANTIATE_TEST_SUITE_P(TestDevice, DeviceTest,
- ::testing::Values(nn::kVersionFeatureLevel5, nn::kVersionFeatureLevel6),
+ ::testing::Values(nn::kVersionFeatureLevel5, nn::kVersionFeatureLevel6,
+ nn::kVersionFeatureLevel7),
printDeviceTest);
} // namespace aidl::android::hardware::neuralnetworks::utils