NNAPI: Add state outputs for {UNI|BI}DIRECTIONAL_SEQUENCE_{RNN|LSTM}
Fix: 138443991
Test: NNTest_static and VtsHalNeuralnetworksV1_3TargetTest
Change-Id: I0a3af22826d438f6f38de1f64042a50f98265e2d
diff --git a/current.txt b/current.txt
index 62b9877..f841866 100644
--- a/current.txt
+++ b/current.txt
@@ -673,7 +673,7 @@
2fa3679ad7c94b5e88724adcd560c561041068a4ca565c63830e68101988746a android.hardware.neuralnetworks@1.3::IFencedExecutionCallback
237b23b126a66f3432658020fed78cdd06ba6297459436fe6bae0ba753370833 android.hardware.neuralnetworks@1.3::IPreparedModel
0439a1fbbec7f16e5e4c653d85ac685d51bfafbae15b8f8cca530acdd7d6a8ce android.hardware.neuralnetworks@1.3::IPreparedModelCallback
-abbc4e1a969881c9f8ab587add5b5e75b08df834c9c969c013ae38cb4bb16f6a android.hardware.neuralnetworks@1.3::types
+2fabd246f985d94a0172dacefb0d6cf19e2aeb2d5f17752653988ef39570a52d android.hardware.neuralnetworks@1.3::types
3e01d4446cd69fd1c48f8572efd97487bc179564b32bd795800b97bbe10be37b android.hardware.wifi@1.4::IWifi
c67aaf26a7a40d14ea61e70e20afacbd0bb906df1704d585ac8599fbb69dd44b android.hardware.wifi.hostapd@1.2::IHostapd
2b5a7ea572b736030c64a3b4043af244425477c4672301780fe15aba5ed393d9 android.hardware.wifi.hostapd@1.2::types
diff --git a/neuralnetworks/1.3/types.hal b/neuralnetworks/1.3/types.hal
index c5dc08c..530f984 100644
--- a/neuralnetworks/1.3/types.hal
+++ b/neuralnetworks/1.3/types.hal
@@ -2556,6 +2556,30 @@
* A 3-D tensor of shape:
* If time-major: [max_time, batch_size, bw_output_size]
* If batch-major: [batch_size, max_time, bw_output_size]
+ * * 2: The forward activation state output.
+ * A 2-D tensor of shape [batch_size, fw_output_size] containing an
+ * activation state from the last time step in the sequence. This
+ * output is optional and can be omitted. If this output is present
+ * then outputs 3-5 must be present as well.
+ * Available since HAL version 1.3.
+ * * 3: The forward cell state output.
+ * A tensor of shape [batch_size, fw_cell_size] containing a cell state
+ * from the last time step in the sequence. This output is optional
+ * and can be omitted. If this output is present
+ * then outputs 2, 4, 5 must be present as well.
+ * Available since HAL version 1.3.
+ * * 4: The backward activation state output.
+ * A 2-D tensor of shape [batch_size, bw_output_size] containing an
+ * activation state from the last time step in the sequence. This
+ * output is optional and can be omitted. If this output is present
+ * then outputs 2, 3, 5 must be present as well.
+ * Available since HAL version 1.3.
+ * * 5: The backward cell state output.
+ * A tensor of shape [batch_size, bw_cell_size] containing a cell state
+ * from the last time step in the sequence. This output is optional
+ * and can be omitted. If this output is present
+ * then outputs 2-4 must be present as well.
+ * Available since HAL version 1.3.
*/
BIDIRECTIONAL_SEQUENCE_LSTM = @1.2::OperationType:BIDIRECTIONAL_SEQUENCE_LSTM,
@@ -2673,6 +2697,18 @@
* (timeMajor). If it is set to true, then the shape is set to
* [maxTime, batchSize, bwNumUnits], otherwise the shape is set to
* [batchSize, maxTime, bwNumUnits].
+ * * 2: The forward hidden state output.
+ * A 2-D tensor of shape [batchSize, fwNumUnits] containing a hidden
+ * state from the last time step in the sequence. This output is
+ * optional and can be omitted. If this output is present then output
+ * 3 must be present as well.
+ * Available since HAL version 1.3.
+ * * 3: The backward hidden state output.
+ * A 2-D tensor of shape [batchSize, bwNumUnits] containing a hidden
+ * state from the last time step in the sequence. This output is
+ * optional and can be omitted. If this output is present then output
+ * 2 must be present as well.
+ * Available since HAL version 1.3.
*/
BIDIRECTIONAL_SEQUENCE_RNN = @1.2::OperationType:BIDIRECTIONAL_SEQUENCE_RNN,
@@ -4656,6 +4692,15 @@
* A 3-D tensor of shape:
* If time-major: [max_time, batch_size, output_size]
* If batch-major: [batch_size, max_time, output_size]
+ * * 1: A tensor of shape [batch_size, output_size] containing a hidden
+ * state from the last time step in the sequence. This output is
+ * optional and can be omitted. If this output is present then
+ * output #2 must be present as well.
+ * Available since HAL version 1.3.
+ * * 2: A tensor of shape [batch_size, cell_size] containing a cell state
+ * from the last time step in the sequence. This output is optional
+ * and can be omitted.
+ * Available since HAL version 1.3.
*/
UNIDIRECTIONAL_SEQUENCE_LSTM = @1.2::OperationType:UNIDIRECTIONAL_SEQUENCE_LSTM,
@@ -4711,6 +4756,10 @@
* it is set to 1, then the output has a shape [maxTime, batchSize,
* numUnits], otherwise the output has a shape [batchSize, maxTime,
* numUnits].
+ * * 1: A tensor of shape [batchSize, numUnits] containing hidden state
+ * from the last time step in the sequence. This output is optional
+ * and can be omitted.
+ * Available since HAL version 1.3.
*/
UNIDIRECTIONAL_SEQUENCE_RNN = @1.2::OperationType:UNIDIRECTIONAL_SEQUENCE_RNN,
diff --git a/neuralnetworks/1.3/vts/functional/ValidateModel.cpp b/neuralnetworks/1.3/vts/functional/ValidateModel.cpp
index 0a35e2d..b9ea430 100644
--- a/neuralnetworks/1.3/vts/functional/ValidateModel.cpp
+++ b/neuralnetworks/1.3/vts/functional/ValidateModel.cpp
@@ -527,9 +527,15 @@
}
}
}
- // BIDIRECTIONAL_SEQUENCE_LSTM and BIDIRECTIONAL_SEQUENCE_RNN can have either one or two
- // outputs depending on their mergeOutputs parameter.
- if (operation.type == OperationType::BIDIRECTIONAL_SEQUENCE_LSTM ||
+ // BIDIRECTIONAL_SEQUENCE_LSTM and BIDIRECTIONAL_SEQUENCE_RNN can have
+ // either one, two, three or four outputs depending on their
+ // mergeOutputs parameter and if state outputs are provided.
+ // UNIDIRECTIONAL_SEQUENCE_LSTM and UNIDIRECTIONAL_SEQUENCE_RNN can have
+ // either one or three outputs depending on whether state outputs are
+ // provided.
+ if (operation.type == OperationType::UNIDIRECTIONAL_SEQUENCE_LSTM ||
+ operation.type == OperationType::UNIDIRECTIONAL_SEQUENCE_RNN ||
+ operation.type == OperationType::BIDIRECTIONAL_SEQUENCE_LSTM ||
operation.type == OperationType::BIDIRECTIONAL_SEQUENCE_RNN) {
for (const size_t outOprand : operation.outputs) {
if (operand == outOprand) {