Merge "Fix stale NNAPI documentation"
diff --git a/current.txt b/current.txt
index fc8c025..2756c5f 100644
--- a/current.txt
+++ b/current.txt
@@ -768,6 +768,8 @@
# ABI preserving changes to HALs during Android S
cd84ab19c590e0e73dd2307b591a3093ee18147ef95e6d5418644463a6620076 android.hardware.neuralnetworks@1.2::IDevice
+9625e85f56515ad2cf87b6a1847906db669f746ea4ab02cd3d4ca25abc9b0109 android.hardware.neuralnetworks@1.2::types
+745295adfd826de650eedaf8cc6979f52a1cf30b04ea7a089a132d0089475e95 android.hardware.neuralnetworks@1.3::types
# HALs released in Android S
# NOTE: waiting to freeze HALs until later in the release
diff --git a/neuralnetworks/1.2/types.hal b/neuralnetworks/1.2/types.hal
index 92cf2aa..7441a54 100644
--- a/neuralnetworks/1.2/types.hal
+++ b/neuralnetworks/1.2/types.hal
@@ -4853,15 +4853,18 @@
/**
* Quantized scale of the operand.
*
- * Only applicable if the operand is of type TENSOR_QUANT8_ASYMM or
- * TENSOR_INT32.
+ * Must be 0 when not applicable to an operand type.
+ *
+ * See {@link OperandType}.
*/
float scale;
/**
* Quantized zero-point offset of the operand.
*
- * Only applicable if the operand is of type TENSOR_QUANT8_ASYMM.
+ * Must be 0 when not applicable to an operand type.
+ *
+ * See {@link OperandType}.
*/
int32_t zeroPoint;
diff --git a/neuralnetworks/1.2/types.t b/neuralnetworks/1.2/types.t
index d197f6b..21d88ac 100644
--- a/neuralnetworks/1.2/types.t
+++ b/neuralnetworks/1.2/types.t
@@ -251,15 +251,18 @@
/**
* Quantized scale of the operand.
*
- * Only applicable if the operand is of type TENSOR_QUANT8_ASYMM or
- * TENSOR_INT32.
+ * Must be 0 when not applicable to an operand type.
+ *
+ * See {@link OperandType}.
*/
float scale;
/**
* Quantized zero-point offset of the operand.
*
- * Only applicable if the operand is of type TENSOR_QUANT8_ASYMM.
+ * Must be 0 when not applicable to an operand type.
+ *
+ * See {@link OperandType}.
*/
int32_t zeroPoint;
diff --git a/neuralnetworks/1.3/types.hal b/neuralnetworks/1.3/types.hal
index 3b2b14c..7ec6064 100644
--- a/neuralnetworks/1.3/types.hal
+++ b/neuralnetworks/1.3/types.hal
@@ -5103,8 +5103,8 @@
* signature of this operation. That is, if the operation has (3 + n) inputs
* and m outputs, both subgraphs must have n inputs and m outputs with the same
* types, ranks, dimensions, scales,
- * zeroPoints, and extraParams as the corresponding operation inputs and
- * outputs.
+ * zeroPoints, and extraParams as the corresponding operation
+ * inputs and outputs.
* All of the operands mentioned must have fully specified dimensions.
*
* Inputs:
@@ -5170,15 +5170,15 @@
* * 0: A {@link OperandType::SUBGRAPH} reference to the condition
* subgraph. The subgraph must have (m + k + n) inputs with
* the same types, ranks, dimensions,
- * scales, zeroPoints, and extraParams as the corresponding inputs of
- * the WHILE operation and exactly one output of
- * {@link OperandType::TENSOR_BOOL8} and shape [1].
+ * scales, zeroPoints, and extraParams as the
+ * corresponding inputs of the WHILE operation and exactly one output
+ * of {@link OperandType::TENSOR_BOOL8} and shape [1].
* All of the operands mentioned must have fully specified dimensions.
* * 1: A {@link OperandType::SUBGRAPH} reference to the body subgraph.
* The subgraph must have (m + k + n) inputs and (m + k) outputs with
* the same types, ranks, dimensions,
- * scales, zeroPoints, and extraParams as the corresponding inputs and
- * outputs of the WHILE operation.
+ * scales, zeroPoints, and extraParams as the
+ * corresponding inputs and outputs of the WHILE operation.
* All of the operands mentioned must have fully specified dimensions.
* * (m inputs): Initial values for input-output operands.
* * (k inputs): Initial values for state-only operands.
@@ -5538,15 +5538,18 @@
/**
* Quantized scale of the operand.
*
- * Only applicable if the operand is of type TENSOR_QUANT8_ASYMM or
- * TENSOR_INT32.
+ * Must be 0 when not applicable to an operand type.
+ *
+ * See {@link OperandType}.
*/
float scale;
/**
* Quantized zero-point offset of the operand.
*
- * Only applicable if the operand is of type TENSOR_QUANT8_ASYMM.
+ * Must be 0 when not applicable to an operand type.
+ *
+ * See {@link OperandType}.
*/
int32_t zeroPoint;
diff --git a/neuralnetworks/1.3/types.t b/neuralnetworks/1.3/types.t
index 7220e37..9cffc7a 100644
--- a/neuralnetworks/1.3/types.t
+++ b/neuralnetworks/1.3/types.t
@@ -303,15 +303,18 @@
/**
* Quantized scale of the operand.
*
- * Only applicable if the operand is of type TENSOR_QUANT8_ASYMM or
- * TENSOR_INT32.
+ * Must be 0 when not applicable to an operand type.
+ *
+ * See {@link OperandType}.
*/
float scale;
/**
* Quantized zero-point offset of the operand.
*
- * Only applicable if the operand is of type TENSOR_QUANT8_ASYMM.
+ * Must be 0 when not applicable to an operand type.
+ *
+ * See {@link OperandType}.
*/
int32_t zeroPoint;