Fix stale NNAPI documentation
The scale and zeroPoint fields are applicable to other types since 1.2.
Also makes some whitespaces changes due to the generated documentation
getting out of sync with the template in frameworks/ml/nn.
Fix: 160406237
Test: generate_api.sh
Test: m
Change-Id: Icf594d40c73ff8c05044c320ac9eb6a9c5a89754
Merged-In: Icf594d40c73ff8c05044c320ac9eb6a9c5a89754
(cherry picked from commit 0d6cefe90b949c9e2e90471f7aa959a0714eeb45)
diff --git a/current.txt b/current.txt
index bdbad8a..82eca81 100644
--- a/current.txt
+++ b/current.txt
@@ -652,6 +652,8 @@
# ABI preserving changes to HALs during Android S
cd84ab19c590e0e73dd2307b591a3093ee18147ef95e6d5418644463a6620076 android.hardware.neuralnetworks@1.2::IDevice
+9625e85f56515ad2cf87b6a1847906db669f746ea4ab02cd3d4ca25abc9b0109 android.hardware.neuralnetworks@1.2::types
+745295adfd826de650eedaf8cc6979f52a1cf30b04ea7a089a132d0089475e95 android.hardware.neuralnetworks@1.3::types
38d65fb20c60a5b823298560fc0825457ecdc49603a4b4e94bf81511790737da android.hardware.radio@1.4::types
954c334efd80e8869b66d1ce5fe2755712d96ba4b3c38d415739c330af5fb4cb android.hardware.radio@1.5::types
diff --git a/neuralnetworks/1.2/types.hal b/neuralnetworks/1.2/types.hal
index 92cf2aa..7441a54 100644
--- a/neuralnetworks/1.2/types.hal
+++ b/neuralnetworks/1.2/types.hal
@@ -4853,15 +4853,18 @@
/**
* Quantized scale of the operand.
*
- * Only applicable if the operand is of type TENSOR_QUANT8_ASYMM or
- * TENSOR_INT32.
+ * Must be 0 when not applicable to an operand type.
+ *
+ * See {@link OperandType}.
*/
float scale;
/**
* Quantized zero-point offset of the operand.
*
- * Only applicable if the operand is of type TENSOR_QUANT8_ASYMM.
+ * Must be 0 when not applicable to an operand type.
+ *
+ * See {@link OperandType}.
*/
int32_t zeroPoint;
diff --git a/neuralnetworks/1.2/types.t b/neuralnetworks/1.2/types.t
index d197f6b..21d88ac 100644
--- a/neuralnetworks/1.2/types.t
+++ b/neuralnetworks/1.2/types.t
@@ -251,15 +251,18 @@
/**
* Quantized scale of the operand.
*
- * Only applicable if the operand is of type TENSOR_QUANT8_ASYMM or
- * TENSOR_INT32.
+ * Must be 0 when not applicable to an operand type.
+ *
+ * See {@link OperandType}.
*/
float scale;
/**
* Quantized zero-point offset of the operand.
*
- * Only applicable if the operand is of type TENSOR_QUANT8_ASYMM.
+ * Must be 0 when not applicable to an operand type.
+ *
+ * See {@link OperandType}.
*/
int32_t zeroPoint;
diff --git a/neuralnetworks/1.3/types.hal b/neuralnetworks/1.3/types.hal
index 3b2b14c..7ec6064 100644
--- a/neuralnetworks/1.3/types.hal
+++ b/neuralnetworks/1.3/types.hal
@@ -5103,8 +5103,8 @@
* signature of this operation. That is, if the operation has (3 + n) inputs
* and m outputs, both subgraphs must have n inputs and m outputs with the same
* types, ranks, dimensions, scales,
- * zeroPoints, and extraParams as the corresponding operation inputs and
- * outputs.
+ * zeroPoints, and extraParams as the corresponding operation
+ * inputs and outputs.
* All of the operands mentioned must have fully specified dimensions.
*
* Inputs:
@@ -5170,15 +5170,15 @@
* * 0: A {@link OperandType::SUBGRAPH} reference to the condition
* subgraph. The subgraph must have (m + k + n) inputs with
* the same types, ranks, dimensions,
- * scales, zeroPoints, and extraParams as the corresponding inputs of
- * the WHILE operation and exactly one output of
- * {@link OperandType::TENSOR_BOOL8} and shape [1].
+ * scales, zeroPoints, and extraParams as the
+ * corresponding inputs of the WHILE operation and exactly one output
+ * of {@link OperandType::TENSOR_BOOL8} and shape [1].
* All of the operands mentioned must have fully specified dimensions.
* * 1: A {@link OperandType::SUBGRAPH} reference to the body subgraph.
* The subgraph must have (m + k + n) inputs and (m + k) outputs with
* the same types, ranks, dimensions,
- * scales, zeroPoints, and extraParams as the corresponding inputs and
- * outputs of the WHILE operation.
+ * scales, zeroPoints, and extraParams as the
+ * corresponding inputs and outputs of the WHILE operation.
* All of the operands mentioned must have fully specified dimensions.
* * (m inputs): Initial values for input-output operands.
* * (k inputs): Initial values for state-only operands.
@@ -5538,15 +5538,18 @@
/**
* Quantized scale of the operand.
*
- * Only applicable if the operand is of type TENSOR_QUANT8_ASYMM or
- * TENSOR_INT32.
+ * Must be 0 when not applicable to an operand type.
+ *
+ * See {@link OperandType}.
*/
float scale;
/**
* Quantized zero-point offset of the operand.
*
- * Only applicable if the operand is of type TENSOR_QUANT8_ASYMM.
+ * Must be 0 when not applicable to an operand type.
+ *
+ * See {@link OperandType}.
*/
int32_t zeroPoint;
diff --git a/neuralnetworks/1.3/types.t b/neuralnetworks/1.3/types.t
index 7220e37..9cffc7a 100644
--- a/neuralnetworks/1.3/types.t
+++ b/neuralnetworks/1.3/types.t
@@ -303,15 +303,18 @@
/**
* Quantized scale of the operand.
*
- * Only applicable if the operand is of type TENSOR_QUANT8_ASYMM or
- * TENSOR_INT32.
+ * Must be 0 when not applicable to an operand type.
+ *
+ * See {@link OperandType}.
*/
float scale;
/**
* Quantized zero-point offset of the operand.
*
- * Only applicable if the operand is of type TENSOR_QUANT8_ASYMM.
+ * Must be 0 when not applicable to an operand type.
+ *
+ * See {@link OperandType}.
*/
int32_t zeroPoint;