Merge "vibrator: use package as name"
diff --git a/current.txt b/current.txt
index 69f4164..a08f301 100644
--- a/current.txt
+++ b/current.txt
@@ -574,11 +574,11 @@
# ABI preserving changes to HALs during Android R
b69a7615c508acf5c5201efd1bfa3262167874fc3594e2db5a3ff93addd8ac75 android.hardware.keymaster@4.0::IKeymasterDevice
eb2fa0c883c2185d514be0b84c179b283753ef0c1b77b45b4f359bd23bba8b75 android.hardware.neuralnetworks@1.0::IPreparedModel
-f1109cbb10297b7429a11fab42afa912710b303c9bf20bd5cdb8bd57b9c84186 android.hardware.neuralnetworks@1.0::types
+8eac60e1f724d141c71c69f06d4544acb720a55dfbbcd97fa01bb3d25ee4e2f5 android.hardware.neuralnetworks@1.0::types
5f6d3097ba84cb63c430787123f4de1b31c11f90b531b98eae9a8623a5ae962a android.hardware.neuralnetworks@1.1::types
fb382e986c10b8fbb797a8546e8f9ea6d1107bfe6f3fb7e57f6bbbf1f807a906 android.hardware.neuralnetworks@1.2::IDevice
40e71cd693de5b832325c5d8f081f2ff20a7ba2b89d401cee5b4b3eb0e241681 android.hardware.neuralnetworks@1.2::IPreparedModel
-2d5483fbf59d5fd2de94665a6df05da5c3d09de67561d0db5e9f09e59e9aea46 android.hardware.neuralnetworks@1.2::types
+7f7ef383268c95a1b8fe4e55c662bc806bb0ac11a154f6b049a113a44b0f024f android.hardware.neuralnetworks@1.2::types
a785a57447a81e9c130eef6904c3a5c256076c6a04588c40620ebd6fa2660d77 android.hardware.radio@1.2::types
1a6e2bd289f22931c526b21916910f1d4c436b7acb9556e4243de4ce8e6cc2e4 android.hardware.soundtrigger@2.0::ISoundTriggerHwCallback
fd65298e1e09e0e3c781ab18305920d757dbe55a3b459ce17814ec5cf6dfee99 android.hardware.wifi@1.0::IWifiP2pIface
@@ -595,18 +595,18 @@
adb0efdf1462e9b2e742c0dcadd598666aac551f178be06e755bfcdf5797abd0 android.hardware.keymaster@4.1::IOperation
7a04ea5595ed418ca3e91c28b8bd7353dd988be9be7b0c8c9e64fb4b77bd4523 android.hardware.keymaster@4.1::types
9e59fffceed0dd72a9799e04505db5f777bbbea1af0695ba4107ef6d967c6fda android.hardware.neuralnetworks@1.3::IDevice
-4a6c3b3556da951b4def21ba579a227c022980fe4465df6cdfbe20628fa75f5a android.hardware.neuralnetworks@1.3::IPreparedModel
+258825966435b3ed08832055bb736d81516013e405f161d9ccde9a90cfcdde83 android.hardware.neuralnetworks@1.3::IPreparedModel
94e803236398bed1febb11cc21051bc42ec003700139b099d6c479e02a7ca3c3 android.hardware.neuralnetworks@1.3::IPreparedModelCallback
-cf1d55e8c68300090747ab90b94c22e4c859b29c84ced68a317c595bb115eab2 android.hardware.neuralnetworks@1.3::types
+35668befe89fc7f84d58fc1dab7dd3e4d6067c7eeccbae154fe36cd964dfaef7 android.hardware.neuralnetworks@1.3::types
3e01d4446cd69fd1c48f8572efd97487bc179564b32bd795800b97bbe10be37b android.hardware.wifi@1.4::IWifi
a64467bae843569f0d465c5be7f0c7a5b987985b55a3ef4794dd5afc68538650 android.hardware.wifi.supplicant@1.3::ISupplicant
44445b8a03d7b9e68b2fbd954672c18a8fce9e32851b0692f4f4ab3407f86ecb android.hardware.wifi.supplicant@1.3::ISupplicantStaIface
619fc9839ec6e369cfa9b28e3e9412e6885720ff8f9b5750c1b6ffb905120391 android.hardware.wifi.supplicant@1.3::ISupplicantStaIfaceCallback
c9273429fcf98d797d3bb07fdba6f1be95bf960f9255cde169fd1ca4db85f856 android.hardware.wifi.supplicant@1.3::ISupplicantStaNetwork
9b0a3ab6f4f74b971ed094426d8a443e29b512ff03e1ab50c07156396cdb2483 android.hardware.wifi.supplicant@1.3::types
-b91398c7475d9f6decb760f6e4721bab8bd588b6d36115d3048ebbfdf70ccf7b android.hardware.radio@1.5::types
+17f92261051ee7f08662a9d09a4a7af629551194018ff65855b3740fa22b6094 android.hardware.radio@1.5::types
5ae0401fdaad9b85de7bebc5bdd7388a4ea661c46f1e4929341981b0540c67de android.hardware.radio@1.5::IRadio
-3afac66f21a33bc9c4b80481c7d5540038348651d9a7d8af64ea13610af138da android.hardware.radio@1.5::IRadioIndication
+bc59237dbd93949238081f762710552e76670cb648c0e198138551460ac54b1e android.hardware.radio@1.5::IRadioIndication
f4888f9676890b43a459c6380f335fea7a6ad32ed3bafafeb018a88d6c0be8a4 android.hardware.radio@1.5::IRadioResponse
55f0a15642869ec98a55ea0a5ac049d3e1a6245ff7750deb6bcb7182057eee83 android.hardware.radio.config@1.3::types
b27ab0cd40b0b078cdcd024bfe1061c4c4c065f3519eeb9347fa359a3268a5ae android.hardware.radio.config@1.3::IRadioConfig
diff --git a/keymaster/4.0/vts/functional/keymaster_hidl_hal_test.cpp b/keymaster/4.0/vts/functional/keymaster_hidl_hal_test.cpp
index 194c438..66132ad 100644
--- a/keymaster/4.0/vts/functional/keymaster_hidl_hal_test.cpp
+++ b/keymaster/4.0/vts/functional/keymaster_hidl_hal_test.cpp
@@ -2607,8 +2607,10 @@
.Padding(PaddingMode::NONE));
ASSERT_EQ(ErrorCode::OK, err) << "Got " << err;
- err = Begin(KeyPurpose::DECRYPT,
- AuthorizationSetBuilder().BlockMode(BlockMode::GCM).Padding(PaddingMode::NONE));
+ err = Begin(KeyPurpose::DECRYPT, AuthorizationSetBuilder()
+ .BlockMode(BlockMode::GCM)
+ .Padding(PaddingMode::NONE)
+ .Authorization(TAG_MAC_LENGTH, 128));
EXPECT_EQ(ErrorCode::INCOMPATIBLE_PURPOSE, err) << "Got " << err;
CheckedDeleteKey();
@@ -2621,8 +2623,10 @@
.Authorization(TAG_MIN_MAC_LENGTH, 128)
.Padding(PaddingMode::NONE)));
- err = Begin(KeyPurpose::ENCRYPT,
- AuthorizationSetBuilder().BlockMode(BlockMode::GCM).Padding(PaddingMode::NONE));
+ err = Begin(KeyPurpose::ENCRYPT, AuthorizationSetBuilder()
+ .BlockMode(BlockMode::GCM)
+ .Padding(PaddingMode::NONE)
+ .Authorization(TAG_MAC_LENGTH, 128));
EXPECT_EQ(ErrorCode::INCOMPATIBLE_PURPOSE, err) << "Got " << err;
}
diff --git a/neuralnetworks/1.0/types.hal b/neuralnetworks/1.0/types.hal
index ba9d068..1175a30 100644
--- a/neuralnetworks/1.0/types.hal
+++ b/neuralnetworks/1.0/types.hal
@@ -261,8 +261,8 @@
* filter.
* * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
* tensor of type {@link OperandType::TENSOR_FLOAT32}
- * the bias must be of the same
- * type. For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
+ * the bias must be of the same type.
+ * For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
* the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint
* of 0 and bias_scale == input_scale * filter_scale.
* * 3: An {@link OperandType::INT32} scalar, specifying the padding on
@@ -290,7 +290,8 @@
* * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
* tensor of type {@link OperandType::TENSOR_FLOAT32}
* the bias must be of the same
- * type. For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
+ * type.
+ * For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
* the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint
* of 0 and bias_scale == input_scale * filter_scale.
* * 3: An {@link OperandType::INT32} scalar, specifying the implicit
@@ -355,8 +356,8 @@
* specifying the filter.
* * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
* tensor of type {@link OperandType::TENSOR_FLOAT32}
- * the bias must be of the same
- * type. For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
+ * the bias must be of the same type.
+ * For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
* the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint
* of 0 and bias_scale == input_scale * filter_scale.
* * 3: An {@link OperandType::INT32} scalar, specifying the padding on
@@ -384,8 +385,8 @@
* specifying the filter.
* * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
* tensor of type {@link OperandType::TENSOR_FLOAT32}
- * the bias must be of the same
- * type. For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
+ * the bias must be of the same type.
+ * For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
* the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint
* of 0 and bias_scale == input_scale * filter_scale.
* * 3: An {@link OperandType::INT32} scalar, specifying the implicit
@@ -492,8 +493,6 @@
*
* Supported value tensor {@link OperandType}:
* * {@link OperandType::TENSOR_FLOAT32}
- * * {@link OperandType::TENSOR_INT32}
- * * {@link OperandType::TENSOR_QUANT8_ASYMM}
*
* Supported value tensor rank: from 2
*
@@ -556,10 +555,10 @@
* of output nodes.
* * 2: A 1-D tensor, of shape [num_units], specifying the bias. For input
* tensor of {@link OperandType::TENSOR_FLOAT32}, the bias should
- * also be of {@link OperandType::TENSOR_FLOAT32}. For input tensor
- * of {@link OperandType::TENSOR_QUANT8_ASYMM}, the bias should be
- * of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0 and
- * bias_scale == input_scale * filter_scale.
+ * also be of {@link OperandType::TENSOR_FLOAT32}.
+ * For input tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
+ * the bias should be of {@link OperandType::TENSOR_INT32},
+ * with zeroPoint of 0 and bias_scale == input_scale * filter_scale.
* * 3: An {@link OperandType::INT32} scalar, and has to be one of the
* {@link FusedActivationFunc} values. Specifies the activation to
* invoke on the result.
diff --git a/neuralnetworks/1.2/types.hal b/neuralnetworks/1.2/types.hal
index b111d96..e867120 100644
--- a/neuralnetworks/1.2/types.hal
+++ b/neuralnetworks/1.2/types.hal
@@ -375,8 +375,8 @@
* must be set to 0.
* * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
* tensor of type {@link OperandType::TENSOR_FLOAT32}
- * or {@link OperandType::TENSOR_FLOAT16} the bias must be of the same
- * type. For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
+ * or {@link OperandType::TENSOR_FLOAT16} the bias must be of the same type.
+ * For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
* the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint
* of 0 and bias_scale == input_scale * filter_scale.
* For filter tensor of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL},
@@ -425,7 +425,8 @@
* * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
* tensor of type {@link OperandType::TENSOR_FLOAT32}
* or {@link OperandType::TENSOR_FLOAT16} the bias must be of the same
- * type. For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
+ * type.
+ * For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
* the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint
* of 0 and bias_scale == input_scale * filter_scale.
* For filter tensor of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL},
@@ -523,8 +524,8 @@
* must be set to 3.
* * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
* tensor of type {@link OperandType::TENSOR_FLOAT32}
- * or {@link OperandType::TENSOR_FLOAT16} the bias must be of the same
- * type. For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
+ * or {@link OperandType::TENSOR_FLOAT16} the bias must be of the same type.
+ * For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
* the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint
* of 0 and bias_scale == input_scale * filter_scale.
* For filter tensor of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL},
@@ -569,8 +570,8 @@
* specifying the filter.
* * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
* tensor of type {@link OperandType::TENSOR_FLOAT32}
- * or {@link OperandType::TENSOR_FLOAT16} the bias must be of the same
- * type. For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
+ * or {@link OperandType::TENSOR_FLOAT16} the bias must be of the same type.
+ * For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
* the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint
* of 0 and bias_scale == input_scale * filter_scale.
* For filter tensor of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL},
@@ -705,8 +706,8 @@
*
* Supported value tensor {@link OperandType}:
* * {@link OperandType::TENSOR_FLOAT32}
- * * {@link OperandType::TENSOR_INT32}
- * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * * {@link OperandType::TENSOR_INT32} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM} (since HAL version 1.2)
*
* Supported value tensor rank: from 2
*
@@ -772,10 +773,10 @@
* of output nodes.
* * 2: A 1-D tensor, of shape [num_units], specifying the bias. For input
* tensor of {@link OperandType::TENSOR_FLOAT32}, the bias should
- * also be of {@link OperandType::TENSOR_FLOAT32}. For input tensor
- * of {@link OperandType::TENSOR_QUANT8_ASYMM}, the bias should be
- * of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0 and
- * bias_scale == input_scale * filter_scale.
+ * also be of {@link OperandType::TENSOR_FLOAT32}.
+ * For input tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
+ * the bias should be of {@link OperandType::TENSOR_INT32},
+ * with zeroPoint of 0 and bias_scale == input_scale * filter_scale.
* * 3: An {@link OperandType::INT32} scalar, and has to be one of the
* {@link FusedActivationFunc} values. Specifies the activation to
* invoke on the result.
@@ -2659,7 +2660,8 @@
* order of the boxes corresponds with input0. For input0 of type
* {@link OperandType::TENSOR_QUANT8_ASYMM}, this tensor should be of
* {@link OperandType::TENSOR_QUANT16_ASYMM}, with zeroPoint of 0 and
- * scale of 0.125. Zero num_rois is supported for this tensor.
+ * scale of 0.125.
+ * Zero num_rois is supported for this tensor.
* * 2: A 1-D {@link OperandType::TENSOR_INT32} tensor, of shape
* [num_rois], specifying the batch index of each box. Boxes with
* the same batch index are grouped together.
@@ -2686,6 +2688,7 @@
* [num_output_rois], specifying the score of each output box. The boxes
* are grouped by batches, but the sequential order in each batch is not
* guaranteed. For type of {@link OperandType::TENSOR_QUANT8_ASYMM},
+ * guaranteed. For type of {@link OperandType::TENSOR_QUANT8_ASYMM}
* the scale and zero point must be the same as input0.
* * 1: A 2-D Tensor of the same {@link OperandType} as input1, with shape
* [num_output_rois, 4], specifying the coordinates of each
@@ -2703,7 +2706,7 @@
BOX_WITH_NMS_LIMIT = 44,
/**
- * Casts a tensor to a new type.
+ * Casts a tensor to a type.
*
* This operation ignores the scale and zeroPoint of quanized tensors,
* e.g. it treats a {@link OperandType::TENSOR_QUANT8_ASYMM} input
@@ -3141,8 +3144,8 @@
* {@link SymmPerChannelQuantParams}) must be set to 0.
* * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
* tensor of type {@link OperandType::TENSOR_FLOAT32} or
- * {@link OperandType::TENSOR_FLOAT16}, the bias must be of the same
- * type. For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
+ * {@link OperandType::TENSOR_FLOAT16}, the bias must be of the same type.
+ * For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
* the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint
* of 0 and bias_scale == input_scale * filter_scale. For filter tensor
* of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}, the bias
@@ -3181,7 +3184,8 @@
* * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
* tensor of type {@link OperandType::TENSOR_FLOAT32} or
* {@link OperandType::TENSOR_FLOAT16}, the bias must be of the same
- * type. For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
+ * {@link OperandType::TENSOR_FLOAT16}, the bias must be of the same type.
+ * For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
* the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint
* of 0 and bias_scale == input_scale * filter_scale. For filter tensor
* of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}, the bias
@@ -3661,21 +3665,24 @@
* Outputs:
* * 0: A tensor of the same {@link OperandType} as input0.
* For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
- * the scale and zeroPoint can be diffent from the input0 scale and zeroPoint.
+ * the scales and zeroPoint can be different from input0 scale and zeroPoint.
*/
PRELU = 71,
/**
* Quantizes the input tensor.
*
- * The formula is:
+ * The formula for {@link OperandType::TENSOR_QUANT8_ASYMM} output tensor is:
*
* output = max(0, min(255, round(input / scale) + zeroPoint)
*
- * Supported tensor {@link OperandType}:
+ * Supported input tensor {@link OperandType}:
* * {@link OperandType::TENSOR_FLOAT16}
* * {@link OperandType::TENSOR_FLOAT32}
*
+ * Supported output tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
* Supported tensor rank: from 1
*
* Inputs:
@@ -4325,15 +4332,15 @@
* dimension (SymmPerChannelQuantParams::channelDim) must be set to 0.
* * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
* tensor of type {@link OperandType::TENSOR_FLOAT32} or
- * {@link OperandType::TENSOR_FLOAT16}, the bias should be of the
- * same type. For input tensor of type
- * {@link OperandType::TENSOR_QUANT8_ASYMM}, the bias should be
- * of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0 and
- * bias_scale == input_scale * filter_scale. For filter tensor of
- * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}, the bias
- * must be of {@link OperandType::TENSOR_INT32}, with zeroPoint of
- * 0 and bias_scale of 0. The actual scale of each value 'i' is equal
- * to bias_scale[i] = input_scale * filter_scale[i].
+ * {@link OperandType::TENSOR_FLOAT16}, the bias must be of the
+ * same type.
+ * For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
+ * the bias should be of {@link OperandType::TENSOR_INT32},
+ * with zeroPoint of 0 and bias_scale == input_scale * filter_scale.
+ * For filter tensor of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL},
+ * the bias must be of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0
+ * and bias_scale of 0. The actual scale of each value 'i' is equal to
+ * bias_scale[i] = input_scale * filter_scale[i].
* * 3: An {@link OperandType::INT32} scalar, specifying the padding on
* the left, in the ‘width’ dimension.
* * 4: An {@link OperandType::INT32} scalar, specifying the padding on
@@ -4363,14 +4370,14 @@
* * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
* tensor of type {@link OperandType::TENSOR_FLOAT32} or
* {@link OperandType::TENSOR_FLOAT16}, the bias should be of the
- * same type. For input tensor of type
- * {@link OperandType::TENSOR_QUANT8_ASYMM}, the bias should be
- * of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0 and
- * bias_scale == input_scale * filter_scale. For filter tensor of
- * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}, the bias
- * must be of {@link OperandType::TENSOR_INT32}, with zeroPoint of
- * 0 and bias_scale of 0. The actual scale of each value 'i' is equal
- * to bias_scale[i] = input_scale * filter_scale[i].
+ * same type.
+ * For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
+ * the bias should be of {@link OperandType::TENSOR_INT32},
+ * with zeroPoint of 0 and bias_scale == input_scale * filter_scale.
+ * For filter tensor of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL},
+ * the bias must be of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0
+ * and bias_scale of 0. The actual scale of each value 'i' is equal to
+ * bias_scale[i] = input_scale * filter_scale[i].
* * 3: An {@link OperandType::TENSOR_INT32} tensor, specifying the output
* tensor shape.
* * 4: An {@link OperandType::INT32} scalar, specifying the implicit
diff --git a/neuralnetworks/1.3/IPreparedModel.hal b/neuralnetworks/1.3/IPreparedModel.hal
index c04809f..7aea416 100644
--- a/neuralnetworks/1.3/IPreparedModel.hal
+++ b/neuralnetworks/1.3/IPreparedModel.hal
@@ -18,9 +18,11 @@
import @1.0::ErrorStatus;
import @1.0::Request;
-import @1.2::MeasureTiming;
import @1.2::IExecutionCallback;
import @1.2::IPreparedModel;
+import @1.2::MeasureTiming;
+import @1.2::OutputShape;
+import @1.2::Timing;
/**
* IPreparedModel describes a model that has been prepared for execution and
@@ -62,8 +64,8 @@
* values, the execution should complete successfully (ErrorStatus::NONE):
* There must be no failure unless the device itself is in a bad state.
*
- * Any number of calls to the execute, execute_1_2, execute_1_3, and executeSynchronously
- * functions, in any combination, may be made concurrently, even on the same
+ * Any number of calls to the execute* and executeSynchronously* functions,
+ * in any combination, may be made concurrently, even on the same
* IPreparedModel object.
*
* @param request The input and output information on which the prepared
@@ -87,4 +89,60 @@
*/
execute_1_3(Request request, MeasureTiming measure, IExecutionCallback callback)
generates (ErrorStatus status);
+
+ /**
+ * Performs a synchronous execution on a prepared model.
+ *
+ * The execution is performed synchronously with respect to the caller.
+ * executeSynchronously_1_3 must verify the inputs to the function are
+ * correct. If there is an error, executeSynchronously_1_3 must immediately
+ * return with the appropriate ErrorStatus value. If the inputs to the
+ * function are valid and there is no error, executeSynchronously_1_3 must
+ * perform the execution, and must not return until the execution is
+ * complete.
+ *
+ * The caller must not change the content of any data object referenced by
+ * 'request' (described by the {@link @1.0::DataLocation} of a
+ * {@link @1.0::RequestArgument}) until executeSynchronously_1_3
+ * returns. executeSynchronously_1_3 must not change the content of any of the
+ * data objects corresponding to 'request' inputs.
+ *
+ * If the prepared model was prepared from a model wherein all tensor
+ * operands have fully specified dimensions, and the inputs to the function
+ * are valid, and at execution time every operation's input operands have
+ * legal values, then the execution should complete successfully
+ * (ErrorStatus::NONE): There must be no failure unless the device itself is
+ * in a bad state.
+ *
+ * Any number of calls to the execute* and executeSynchronously* functions,
+ * in any combination, may be made concurrently, even on the same
+ * IPreparedModel object.
+ *
+ * @param request The input and output information on which the prepared
+ * model is to be executed.
+ * @param measure Specifies whether or not to measure duration of the execution.
+ * The duration runs from the time the driver sees the call
+ * to the executeSynchronously_1_3 function to the time the driver
+ * returns from the function.
+ * @return status Error status of the execution, must be:
+ * - NONE if execution is performed successfully
+ * - DEVICE_UNAVAILABLE if driver is offline or busy
+ * - GENERAL_FAILURE if there is an unspecified error
+ * - OUTPUT_INSUFFICIENT_SIZE if at least one output
+ * operand buffer is not large enough to store the
+ * corresponding output
+ * - INVALID_ARGUMENT if one of the input arguments is
+ * invalid
+ * @return outputShapes A list of shape information of model output operands.
+ * The index into "outputShapes" corresponds to the index
+ * of the output operand in the Request outputs vector.
+ * outputShapes must be empty unless the status is either
+ * NONE or OUTPUT_INSUFFICIENT_SIZE.
+ * @return Timing Duration of execution. Unless measure is YES and status is
+ * NONE, all times must be reported as UINT64_MAX. A driver may
+ * choose to report any time as UINT64_MAX, indicating that
+ * measurement is not available.
+ */
+ executeSynchronously_1_3(Request request, MeasureTiming measure)
+ generates (ErrorStatus status, vec<OutputShape> outputShapes, Timing timing);
};
diff --git a/neuralnetworks/1.3/types.hal b/neuralnetworks/1.3/types.hal
index 84c4813..b70bd1b 100644
--- a/neuralnetworks/1.3/types.hal
+++ b/neuralnetworks/1.3/types.hal
@@ -110,6 +110,7 @@
* * {@link OperandType::TENSOR_FLOAT32}
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
* * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
+ * * {@link OperandType::TENSOR_INT32} (since HAL version 1.3)
*
* Supported tensor rank: up to 4
*
@@ -123,11 +124,13 @@
* * 2: An {@link OperandType::INT32} scalar, and has to be one of the
* {@link FusedActivationFunc} values. Specifies the activation to
* invoke on the result.
+ * For a {@link OperandType::TENSOR_INT32} tensor,
+ * the {@link FusedActivationFunc} must be "NONE".
*
* Outputs:
* * 0: The sum, a tensor of the same {@link OperandType} as input0.
* For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
- * {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
* the scale and zeroPoint can be different from inputs' scale and zeroPoint.
*/
ADD = @1.2::OperationType:ADD,
@@ -293,6 +296,18 @@
* * * {@link OperandType::TENSOR_INT32} for bias (scale set to 0.0,
* * * each value scaling is separate and equal to input.scale * filter.scales[channel]).
*
+ * Available since HAL version 1.3:
+ * * Quantized signed (since HAL version 1.3):
+ * * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} for input, filter, and output.
+ * * * {@link OperandType::TENSOR_INT32} for bias (with scale set to
+ * * * input.scale * filter.scale).
+ *
+ * * Quantized signed with filter symmetric per channel quantization (since HAL version 1.3):
+ * * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} for input, and output.
+ * * * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
+ * * * {@link OperandType::TENSOR_INT32} for bias (scale set to 0.0,
+ * * * each value scaling is separate and equal to input.scale * filter.scales[channel]).
+ *
* Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
* With the default data layout NHWC, the data is stored in the order of:
* [batch, height, width, channels]. Alternatively, the data layout could
@@ -313,8 +328,9 @@
* must be set to 0.
* * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
* tensor of type {@link OperandType::TENSOR_FLOAT32}
- * or {@link OperandType::TENSOR_FLOAT16} the bias must be of the same
- * type. For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
+ * or {@link OperandType::TENSOR_FLOAT16} the bias must be of the same type.
+ * For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * and {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED},
* the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint
* of 0 and bias_scale == input_scale * filter_scale.
* For filter tensor of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL},
@@ -363,7 +379,9 @@
* * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
* tensor of type {@link OperandType::TENSOR_FLOAT32}
* or {@link OperandType::TENSOR_FLOAT16} the bias must be of the same
- * type. For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
+ * type.
+ * For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * and {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED},
* the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint
* of 0 and bias_scale == input_scale * filter_scale.
* For filter tensor of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL},
@@ -443,6 +461,18 @@
* * * {@link OperandType::TENSOR_INT32} for bias (scale set to 0.0,
* * * each value scaling is separate and equal to input.scale * filter.scales[channel]).
*
+ * Available since HAL version 1.3:
+ * * Quantized signed (since HAL version 1.3):
+ * * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} for input, filter, and output.
+ * * * {@link OperandType::TENSOR_INT32} for bias (with scale set to
+ * * * input.scale * filter.scale).
+ *
+ * * Quantized signed with filter symmetric per channel quantization (since HAL version 1.3):
+ * * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} for input, and output.
+ * * * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
+ * * * {@link OperandType::TENSOR_INT32} for bias (scale set to 0.0,
+ * * * each value scaling is separate and equal to input.scale * filter.scales[channel]).
+ *
* Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
* With the default data layout NHWC, the data is stored in the order of:
* [batch, height, width, channels]. Alternatively, the data layout could
@@ -461,8 +491,9 @@
* must be set to 3.
* * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
* tensor of type {@link OperandType::TENSOR_FLOAT32}
- * or {@link OperandType::TENSOR_FLOAT16} the bias must be of the same
- * type. For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
+ * or {@link OperandType::TENSOR_FLOAT16} the bias must be of the same type.
+ * For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * and {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED},
* the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint
* of 0 and bias_scale == input_scale * filter_scale.
* For filter tensor of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL},
@@ -507,8 +538,9 @@
* specifying the filter.
* * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
* tensor of type {@link OperandType::TENSOR_FLOAT32}
- * or {@link OperandType::TENSOR_FLOAT16} the bias must be of the same
- * type. For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
+ * or {@link OperandType::TENSOR_FLOAT16} the bias must be of the same type.
+ * For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * and {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED},
* the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint
* of 0 and bias_scale == input_scale * filter_scale.
* For filter tensor of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL},
@@ -569,6 +601,7 @@
* * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
* * {@link OperandType::TENSOR_FLOAT32}
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
*
* Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
* With the default data layout NHWC, the data is stored in the order of:
@@ -589,7 +622,8 @@
* Outputs:
* * 0: The output 4-D tensor, of shape [batch, height*block_size,
* width*block_size, depth/(block_size*block_size)].
- * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
+ * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
* the scale and zeroPoint must be the same as input0.
*/
DEPTH_TO_SPACE = @1.2::OperationType:DEPTH_TO_SPACE,
@@ -605,6 +639,7 @@
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
* * {@link OperandType::TENSOR_QUANT8_SYMM} (since HAL version 1.2)
* * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
*
* Supported output tensor {@link OperandType}:
* * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
@@ -642,9 +677,11 @@
* and an error must be reported.
*
* Supported value tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.3)
* * {@link OperandType::TENSOR_FLOAT32}
- * * {@link OperandType::TENSOR_INT32}
- * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * * {@link OperandType::TENSOR_INT32} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
*
* Supported value tensor rank: from 2
*
@@ -658,7 +695,8 @@
* * 0: A n-D tensor with the same rank and shape as the Values
* tensor, except for the first dimension which has the same size
* as Lookups' only dimension.
- * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
+ * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
* the scale and zeroPoint must be the same as input1.
*/
EMBEDDING_LOOKUP = @1.2::OperationType:EMBEDDING_LOOKUP,
@@ -693,6 +731,7 @@
* * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
* * {@link OperandType::TENSOR_FLOAT32}
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
*
* Supported tensor rank: up to 4.
*
@@ -710,10 +749,11 @@
* of output nodes.
* * 2: A 1-D tensor, of shape [num_units], specifying the bias. For input
* tensor of {@link OperandType::TENSOR_FLOAT32}, the bias should
- * also be of {@link OperandType::TENSOR_FLOAT32}. For input tensor
- * of {@link OperandType::TENSOR_QUANT8_ASYMM}, the bias should be
- * of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0 and
- * bias_scale == input_scale * filter_scale.
+ * also be of {@link OperandType::TENSOR_FLOAT32}.
+ * For input tensor of {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * and {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED},
+ * the bias should be of {@link OperandType::TENSOR_INT32},
+ * with zeroPoint of 0 and bias_scale == input_scale * filter_scale.
* * 3: An {@link OperandType::INT32} scalar, and has to be one of the
* {@link FusedActivationFunc} values. Specifies the activation to
* invoke on the result.
@@ -798,6 +838,7 @@
* * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
* * {@link OperandType::TENSOR_FLOAT32}
* * {@link OperandType::TENSOR_QUANT8_ASYMM} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
*
* Supported tensor rank: up to 4
* Tensors with rank less than 4 are only supported since HAL version 1.2.
@@ -814,6 +855,8 @@
* * 0: A tensor of the same {@link OperandType} and same shape as input0.
* For {@link OperandType::TENSOR_QUANT8_ASYMM},
* the scale must be 1.f / 128 and the zeroPoint must be 128.
+ * For {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED},
+ * the scale must be 1.f / 128 and the zeroPoint must be 0.
*/
L2_NORMALIZATION = @1.2::OperationType:L2_NORMALIZATION,
@@ -1507,6 +1550,7 @@
* * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
* * {@link OperandType::TENSOR_FLOAT32}
* * {@link OperandType::TENSOR_QUANT8_ASYMM} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
*
* Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
* With the default data layout NHWC, the data is stored in the order of:
@@ -1549,7 +1593,8 @@
* Outputs:
* * 0: The output 4-D tensor, of shape
* [batches, new_height, new_width, depth].
- * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
+ * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
* the scale and zeroPoint must be the same as input0.
*/
RESIZE_BILINEAR = @1.2::OperationType:RESIZE_BILINEAR,
@@ -1624,6 +1669,7 @@
* * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
* * {@link OperandType::TENSOR_FLOAT32}
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
*
* Supported tensor rank: up to 4.
* Tensors with rank other than 2 or 4 are only supported since HAL version 1.2.
@@ -1632,9 +1678,10 @@
* * 0: A 2-D or 4-D tensor, specifying the tensor to be reshaped.
* Since HAL version 1.2, this tensor may be zero-sized.
* * 1: A scalar, specifying the positive scaling factor for the exponent,
- * beta. If input0 is of {@link OperandType::TENSOR_FLOAT32} or
- * {@link OperandType::TENSOR_QUANT8_ASYMM}, the scalar must be of
- * {@link OperandType::FLOAT32}.
+ * beta. If input0 is of {@link OperandType::TENSOR_FLOAT32},
+ * {@link OperandType::TENSOR_QUANT8_ASYMM} or
+ * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED}, the scalar
+ * must be of {@link OperandType::FLOAT32}.
* If input0 is of {@link OperandType::TENSOR_FLOAT16}, then the
* scalar must be of {@link OperandType::FLOAT16}.
* * 2: An optional {@link OperandType::INT32} scalar, default to -1,
@@ -1647,6 +1694,8 @@
* * 0: The output tensor of same shape as input0.
* For {@link OperandType::TENSOR_QUANT8_ASYMM},
* the scale must be 1.f / 256 and the zeroPoint must be 0.
+ * For {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED},
+ * the scale must be 1.f / 256 and the zeroPoint must be -128.
*/
SOFTMAX = @1.2::OperationType:SOFTMAX,
@@ -1668,6 +1717,7 @@
* * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
* * {@link OperandType::TENSOR_FLOAT32}
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
*
* Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
* With the default data layout NHWC, the data is stored in the order of:
@@ -1688,7 +1738,8 @@
* Outputs:
* * 0: The output 4-D tensor, of shape [batches, height/block_size,
* width/block_size, depth_in*block_size*block_size].
- * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
+ * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
* the scale and zeroPoint must be the same as input0.
*/
SPACE_TO_DEPTH = @1.2::OperationType:SPACE_TO_DEPTH,
@@ -1812,6 +1863,7 @@
* * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
* * {@link OperandType::TENSOR_FLOAT32}
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
*
* Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
* With the default data layout NHWC, the data is stored in the order of:
@@ -1830,7 +1882,8 @@
*
* Outputs:
* * 0: A tensor of the same {@link OperandType} as input0.
- * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
+ * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
* the scale and zeroPoint must be the same as input0.
*/
BATCH_TO_SPACE_ND = @1.2::OperationType:BATCH_TO_SPACE_ND,
@@ -1925,6 +1978,7 @@
* * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
* * {@link OperandType::TENSOR_FLOAT32}
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
* (full support since HAL version 1.2, see the output section)
*
* Supported tensor rank: up to 4
@@ -1947,7 +2001,8 @@
* of the padding:
* output0.dimension[i] =
* padding[i, 0] + input0.dimension[i] + padding[i, 1]
- * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
+ * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
* the scale and zeroPoint must be the same as input0.
*
* NOTE: Before HAL version 1.2, the pad value for
@@ -1971,6 +2026,7 @@
* * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
* * {@link OperandType::TENSOR_FLOAT32}
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
* (full support since HAL version 1.2, see the output section)
*
* Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
@@ -1998,7 +2054,8 @@
*
* Outputs:
* * 0: A tensor of the same {@link OperandType} as input0.
- * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
+ * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
* the scale and zeroPoint must be the same as input0.
*
* NOTE: Before HAL version 1.2, the pad value for
@@ -2151,6 +2208,7 @@
* * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
* * {@link OperandType::TENSOR_FLOAT32}
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
*
* Supported tensor rank: up to 4
*
@@ -2162,7 +2220,8 @@
*
* Outputs:
* * 0: A tensor of the same {@link OperandType} as input0.
- * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
+ * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
* the scale and zeroPoint must be the same as input0.
*/
TRANSPOSE = @1.2::OperationType:TRANSPOSE,
@@ -2192,6 +2251,7 @@
* * {@link OperandType::TENSOR_FLOAT32}
* * {@link OperandType::TENSOR_INT32}
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
*
* Supported tensor rank: from 1
*
@@ -2216,6 +2276,7 @@
* * {@link OperandType::TENSOR_FLOAT32}
* * {@link OperandType::TENSOR_INT32}
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
*
* Supported tensor rank: from 1
*
@@ -2257,7 +2318,8 @@
* and height, dw and dh is the log-scale relative correction factor
* for the width and height. For input0 of type
* {@link OperandType::TENSOR_QUANT16_ASYMM}, this tensor should be
- * of {@link OperandType::TENSOR_QUANT8_ASYMM}. Zero num_rois is
+ * of {@link OperandType::TENSOR_QUANT8_ASYMM} or
+ * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED}. Zero num_rois is
* supported for this tensor.
* * 2: An 1-D {@link OperandType::TENSOR_INT32} tensor, of shape
* [num_rois], specifying the batch index of each box. Boxes with
@@ -2612,6 +2674,7 @@
* * {@link OperandType::TENSOR_FLOAT16}
* * {@link OperandType::TENSOR_FLOAT32}
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
*
* Inputs:
* * 0: A 2-D Tensor of shape [num_rois, num_classes], specifying the score
@@ -2623,7 +2686,11 @@
* order of the boxes corresponds with input0. For input0 of type
* {@link OperandType::TENSOR_QUANT8_ASYMM}, this tensor should be of
* {@link OperandType::TENSOR_QUANT16_ASYMM}, with zeroPoint of 0 and
- * scale of 0.125. Zero num_rois is supported for this tensor.
+ * scale of 0.125.
+ * For input0 of type {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED},
+ * this tensor should be of {@link OperandType::TENSOR_QUANT16_ASYMM},
+ * with zeroPoint of -128 and scale of 0.125.
+ * Zero num_rois is supported for this tensor.
* * 2: A 1-D {@link OperandType::TENSOR_INT32} tensor, of shape
* [num_rois], specifying the batch index of each box. Boxes with
* the same batch index are grouped together.
@@ -2650,6 +2717,8 @@
* [num_output_rois], specifying the score of each output box. The boxes
* are grouped by batches, but the sequential order in each batch is not
* guaranteed. For type of {@link OperandType::TENSOR_QUANT8_ASYMM},
+ * guaranteed. For type of {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * or {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED},
* the scale and zero point must be the same as input0.
* * 1: A 2-D Tensor of the same {@link OperandType} as input1, with shape
* [num_output_rois, 4], specifying the coordinates of each
@@ -2667,7 +2736,7 @@
BOX_WITH_NMS_LIMIT = @1.2::OperationType:BOX_WITH_NMS_LIMIT,
/**
- * Casts a tensor to a new type.
+ * Casts a tensor to a type.
*
* This operation ignores the scale and zeroPoint of quanized tensors,
* e.g. it treats a {@link OperandType::TENSOR_QUANT8_ASYMM} input
@@ -2678,6 +2747,14 @@
* * {@link OperandType::TENSOR_FLOAT32}
* * {@link OperandType::TENSOR_INT32}
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * Since HAL version 1.3, casting tensors of the following
+ * {@link OperandType} to the same {@link OperandType} is supported:
+ * * {@link OperandType::TENSOR_BOOL8}
+ * * {@link OperandType::TENSOR_INT32}
+ * * {@link OperandType::TENSOR_QUANT16_ASYMM}
+ * * {@link OperandType::TENSOR_QUANT16_SYMM}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED}
+ * * {@link OperandType::TENSOR_QUANT8_SYMM}
*
* Supported tensor rank: from 1
*
@@ -2708,6 +2785,7 @@
* * {@link OperandType::TENSOR_FLOAT16}
* * {@link OperandType::TENSOR_FLOAT32}
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
*
* Supported tensor rank: up to 4
*
@@ -2722,7 +2800,8 @@
*
* Outputs:
* * 0: A tensor of the same {@link OperandType} and same shape as input0.
- * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
+ * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
* the scale and zeroPoint must be the same as input0.
*/
CHANNEL_SHUFFLE = @1.2::OperationType:CHANNEL_SHUFFLE,
@@ -2816,6 +2895,7 @@
* * {@link OperandType::TENSOR_FLOAT32}
* * {@link OperandType::TENSOR_INT32}
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
*
* Supported tensor rank: from 1
*
@@ -2861,6 +2941,7 @@
* * {@link OperandType::TENSOR_FLOAT32}
* * {@link OperandType::TENSOR_INT32}
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
*
* Supported tensor rank: from 1
*
@@ -2872,7 +2953,8 @@
* Outputs:
* * 0: An (n + 1)-D tensor with the same {@link OperandType} and data as
* input0.
- * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
+ * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
* the scale and zeroPoint must be the same as input0.
*/
EXPAND_DIMS = @1.2::OperationType:EXPAND_DIMS,
@@ -2896,6 +2978,7 @@
* * {@link OperandType::TENSOR_FLOAT32}
* * {@link OperandType::TENSOR_INT32}
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
*
* Supported tensor rank: from 1
*
@@ -2910,7 +2993,8 @@
*
* Outputs:
* * 0: An (n + k - 1)-D tensor with the same {@link OperandType} as input0.
- * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
+ * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
* the scale and zeroPoint must be the same as input0.
*/
GATHER = @1.2::OperationType:GATHER,
@@ -2931,6 +3015,7 @@
* * {@link OperandType::TENSOR_FLOAT16}
* * {@link OperandType::TENSOR_FLOAT32}
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
*
* Inputs:
* * 0: A 4-D Tensor specifying the score of each anchor at each
@@ -2948,11 +3033,13 @@
* dimensions is the channel dimension.
* * 2: A 2-D Tensor of shape [num_anchors, 4], specifying the shape of each
* predefined anchor, with format [x1, y1, x2, y2]. For input0 of type
- * {@link OperandType::TENSOR_QUANT8_ASYMM}, this tensor should be of
+ * {@link OperandType::TENSOR_QUANT8_ASYMM} or
+ * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED}, this tensor should be of
* {@link OperandType::TENSOR_QUANT16_SYMM}, with scale of 0.125.
* * 3: A 2-D Tensor of shape [batches, 2], specifying the size of
* each image in the batch, with format [image_height, image_width].
- * For input0 of type {@link OperandType::TENSOR_QUANT8_ASYMM}, this
+ * For input0 of type {@link OperandType::TENSOR_QUANT8_ASYMM} or
+ * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED}, this
* tensor should be of {@link OperandType::TENSOR_QUANT16_SYMM}, with
* scale of 0.125.
* * 4: An {@link OperandType::FLOAT32} scalar, specifying the ratio
@@ -2979,7 +3066,8 @@
* [num_output_rois], specifying the score of each output box.
* The boxes are grouped by batches, but the sequential order in
* each batch is not guaranteed. For type of
- * {@link OperandType::TENSOR_QUANT8_ASYMM}, the scale and zero
+ * {@link OperandType::TENSOR_QUANT8_ASYMM} or
+ * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED}, the scale and zero
* point must be the same as input0.
* * 1: A tensor of the same {@link OperandType} as input3, of shape
* [num_output_rois, 4], specifying the coordinates of each output
@@ -3002,6 +3090,7 @@
* * {@link OperandType::TENSOR_FLOAT32}
* * {@link OperandType::TENSOR_INT32}
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
*
* Supported tensor rank: from 1
*
@@ -3025,6 +3114,7 @@
* * {@link OperandType::TENSOR_FLOAT32}
* * {@link OperandType::TENSOR_INT32}
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
*
* Supported tensor rank: from 1
*
@@ -3081,12 +3171,23 @@
* * * {@link OperandType::TENSOR_INT32} for bias (with scale set to
* * * input.scale * filter.scale).
*
+ * * Quantized signed (since HAL version 1.3):
+ * * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} for input, filter, and output.
+ * * * {@link OperandType::TENSOR_INT32} for bias (with scale set to
+ * * * input.scale * filter.scale).
+ *
* * Quantized with symmetric per channel quantization for the filter:
* * * {@link OperandType::TENSOR_QUANT8_ASYMM} for input, and output.
* * * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
* * * {@link OperandType::TENSOR_INT32} for bias (scale set to 0.0,
* * * each value scaling is separate and equal to input.scale * filter.scales[channel]).
*
+ * * Quantized signed with filter symmetric per channel quantization (since HAL version 1.3):
+ * * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} for input, and output.
+ * * * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
+ * * * {@link OperandType::TENSOR_INT32} for bias (scale set to 0.0,
+ * * * each value scaling is separate and equal to input.scale * filter.scales[channel]).
+ *
* Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
* With the default data layout NHWC, the data is stored in the order of:
* [batch, height, width, channels]. Alternatively, the data layout could
@@ -3105,8 +3206,9 @@
* {@link SymmPerChannelQuantParams}) must be set to 0.
* * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
* tensor of type {@link OperandType::TENSOR_FLOAT32} or
- * {@link OperandType::TENSOR_FLOAT16}, the bias must be of the same
- * type. For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
+ * {@link OperandType::TENSOR_FLOAT16}, the bias must be of the same type.
+ * For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM} and
+ * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED}
* the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint
* of 0 and bias_scale == input_scale * filter_scale. For filter tensor
* of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}, the bias
@@ -3145,7 +3247,9 @@
* * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
* tensor of type {@link OperandType::TENSOR_FLOAT32} or
* {@link OperandType::TENSOR_FLOAT16}, the bias must be of the same
- * type. For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
+ * {@link OperandType::TENSOR_FLOAT16}, the bias must be of the same type.
+ * For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM} and
+ * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED}
* the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint
* of 0 and bias_scale == input_scale * filter_scale. For filter tensor
* of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}, the bias
@@ -3170,7 +3274,8 @@
* Outputs:
* * 0: The output 4-D tensor, of shape
* [batches, out_height, out_width, depth_out].
- * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
+ * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
* the scale and zeroPoint can be different from inputs' scale and zeroPoint.
*/
GROUPED_CONV_2D = @1.2::OperationType:GROUPED_CONV_2D,
@@ -3190,6 +3295,7 @@
* * {@link OperandType::TENSOR_FLOAT16}
* * {@link OperandType::TENSOR_FLOAT32}
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
*
* Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
* With the default data layout NHWC, the data is stored in the order of:
@@ -3206,13 +3312,18 @@
* {@link OperandType::TENSOR_QUANT8_ASYMM}, this tensor should
* be of {@link OperandType::TENSOR_QUANT16_ASYMM}, with zeroPoint
* of 0 and scale of 0.125.
+ * For input0 of type
+ * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED}, this tensor
+ * should be of {@link OperandType::TENSOR_QUANT16_ASYMM}, with
+ * zeroPoint of -128 and scale of 0.125.
* * 2: An {@link OperandType::BOOL} scalar, set to true to specify
* NCHW data layout for input0. Set to false for NHWC.
*
* Outputs:
* * 0: A tensor of the same {@link OperandType} as input0, with shape
* [num_boxes, num_keypoints], specifying score of the keypoints.
- * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} or
+ * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
* the scale and zeroPoint can be different from input0 scale and zeroPoint.
* * 1: A tensor of the same {@link OperandType} as input1, with shape
* [num_boxes, num_keypoints, 2], specifying the location of
@@ -3283,6 +3394,7 @@
* * {@link OperandType::TENSOR_FLOAT32}
* * {@link OperandType::TENSOR_INT32}
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
*
* Supported tensor rank: from 1
*
@@ -3307,6 +3419,7 @@
* * {@link OperandType::TENSOR_FLOAT32}
* * {@link OperandType::TENSOR_INT32}
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
*
* Supported tensor rank: from 1
*
@@ -3434,6 +3547,7 @@
* * {@link OperandType::TENSOR_FLOAT32}
* * {@link OperandType::TENSOR_INT32}
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
*
* Supported tensor rank: from 1.
*
@@ -3446,7 +3560,8 @@
*
* Outputs:
* * 0: A tensor of the same {@link OperandType} as input0.
- * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
+ * {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
* the scale and zeroPoint can be different from inputs' scale and zeroPoint.
*/
MAXIMUM = @1.2::OperationType:MAXIMUM,
@@ -3459,6 +3574,7 @@
* * {@link OperandType::TENSOR_FLOAT32}
* * {@link OperandType::TENSOR_INT32}
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
*
* Supported tensor rank: from 1.
*
@@ -3471,7 +3587,8 @@
*
* Outputs:
* * 0: A tensor of the same {@link OperandType} as input0.
- * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
+ * {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
* the scale and zeroPoint can be different from inputs' scale and zeroPoint.
*/
MINIMUM = @1.2::OperationType:MINIMUM,
@@ -3503,6 +3620,7 @@
* * {@link OperandType::TENSOR_FLOAT32}
* * {@link OperandType::TENSOR_INT32}
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
*
* Supported tensor rank: from 1
*
@@ -3526,6 +3644,7 @@
* * {@link OperandType::TENSOR_FLOAT16}
* * {@link OperandType::TENSOR_FLOAT32}
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
*
* Supported tensor rank: up to 4
*
@@ -3543,7 +3662,8 @@
* pad value must be of {@link OperandType::FLOAT16}.
* For input tensor of {@link OperandType::TENSOR_FLOAT32}, the
* pad value must be of {@link OperandType::FLOAT32}.
- * For input tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
+ * For input tensor of {@link OperandType::TENSOR_QUANT8_ASYMM} and
+ * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED},
* the pad value must be of {@link OperandType::INT32}. The
* scale and zeroPoint are assumed to be the same as in input0.
*
@@ -3555,7 +3675,8 @@
* of the padding:
* output0.dimension[i] =
* padding[i, 0] + input0.dimension[i] + padding[i, 1]
- * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
+ * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
* the scale and zeroPoint must be the same as input0.
*/
PAD_V2 = @1.2::OperationType:PAD_V2,
@@ -3614,6 +3735,7 @@
* * {@link OperandType::TENSOR_FLOAT16}
* * {@link OperandType::TENSOR_FLOAT32}
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
*
* Supported tensor rank: from 1
*
@@ -3624,22 +3746,32 @@
*
* Outputs:
* * 0: A tensor of the same {@link OperandType} as input0.
- * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
- * the scale and zeroPoint can be diffent from the input0 scale and zeroPoint.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
+ * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
+ * the scales and zeroPoint can be different from input0 scale and zeroPoint.
*/
PRELU = @1.2::OperationType:PRELU,
/**
* Quantizes the input tensor.
*
- * The formula is:
+ * The formula for {@link OperandType::TENSOR_QUANT8_ASYMM} output tensor is:
*
* output = max(0, min(255, round(input / scale) + zeroPoint)
*
- * Supported tensor {@link OperandType}:
+ * The formula for {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} output
+ * tensor is:
+ *
+ * output = max(-128, min(127, round(input / scale) + zeroPoint)
+ *
+ * Supported input tensor {@link OperandType}:
* * {@link OperandType::TENSOR_FLOAT16}
* * {@link OperandType::TENSOR_FLOAT32}
*
+ * Supported output tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
+ *
* Supported tensor rank: from 1
*
* Inputs:
@@ -3647,7 +3779,8 @@
*
* Outputs:
* * 0: The output tensor of same shape as input0, but with
- * {@link OperandType::TENSOR_QUANT8_ASYMM}.
+ * {@link OperandType::TENSOR_QUANT8_ASYMM} or.
+ * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED}.
*/
QUANTIZE = @1.2::OperationType:QUANTIZE,
@@ -3955,6 +4088,7 @@
* * {@link OperandType::TENSOR_FLOAT16}
* * {@link OperandType::TENSOR_FLOAT32}
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
*
* Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
* With the default data layout NHWC, the data is stored in the order of:
@@ -3993,7 +4127,8 @@
* Outputs:
* * 0: A tensor of the same {@link OperandType} as input0. The output
* shape is [num_rois, out_height, out_width, depth].
- * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
+ * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
* the scale and zeroPoint can be different from the input0 scale and zeroPoint.
*/
ROI_ALIGN = @1.2::OperationType:ROI_ALIGN,
@@ -4014,6 +4149,7 @@
* * {@link OperandType::TENSOR_FLOAT16}
* * {@link OperandType::TENSOR_FLOAT32}
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
*
* Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
* With the default data layout NHWC, the data is stored in the order of:
@@ -4024,7 +4160,8 @@
* * 0: A 4-D tensor, specifying the feature map.
* * 1: A 2-D Tensor of shape [num_rois, 4], specifying the locations of
* the regions of interest, each line with format [x1, y1, x2, y2].
- * For input0 of type {@link OperandType::TENSOR_QUANT8_ASYMM},
+ * For input0 of type {@link OperandType::TENSOR_QUANT8_ASYMM} and
+ * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
* this tensor should be of {@link OperandType::TENSOR_QUANT16_ASYMM},
* with zeroPoint of 0 and scale of 0.125.
* * 2: An 1-D {@link OperandType::TENSOR_INT32} tensor, of shape
@@ -4044,7 +4181,8 @@
* Outputs:
* * 0: A tensor of the same {@link OperandType} as input0. The output
* shape is [num_rois, out_height, out_width, depth].
- * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * For input0 of type {@link OperandType::TENSOR_QUANT8_ASYMM} and
+ * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
* the scale and zeroPoint must be the same as input0.
*/
ROI_POOLING = @1.2::OperationType:ROI_POOLING,
@@ -4133,6 +4271,7 @@
* * {@link OperandType::TENSOR_FLOAT32}
* * {@link OperandType::TENSOR_INT32}
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
*
* Supported tensor rank: from 1
*
@@ -4145,7 +4284,8 @@
*
* Outputs:
* * 0: An n-D tensor of the same type as the input containing the slice.
- * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
+ * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
* its scale and zeroPoint has to be same as the input0 scale and zeroPoint.
*/
SLICE = @1.2::OperationType:SLICE,
@@ -4158,6 +4298,7 @@
* * {@link OperandType::TENSOR_FLOAT32}
* * {@link OperandType::TENSOR_INT32}
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
*
* Supported tensor rank: from 1
*
@@ -4170,7 +4311,8 @@
*
* Outputs:
* * 0 ~ (num_splits - 1): Resulting subtensors.
- * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
+ * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
* the scale and zeroPoint must be the same as input0.
*/
SPLIT = @1.2::OperationType:SPLIT,
@@ -4206,6 +4348,7 @@
* * {@link OperandType::TENSOR_FLOAT32}
* * {@link OperandType::TENSOR_INT32}
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
*
* Supported tensor rank: from 1
*
@@ -4216,7 +4359,8 @@
*
* Outputs:
* * 0: A tiled tensor of the same {@link OperandType} and rank as `input`.
- * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
+ * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
* the scale and zeroPoint must be the same as input0.
*/
TILE = @1.2::OperationType:TILE,
@@ -4232,6 +4376,7 @@
* * {@link OperandType::TENSOR_FLOAT32}
* * {@link OperandType::TENSOR_INT32}
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
*
* Supported tensor rank: from 1
*
@@ -4243,7 +4388,8 @@
* Outputs:
* * 0: An n-D tensor of the same type as the input, containing the k
* largest elements along each last dimensional slice.
- * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
+ * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
* the scale and zeroPoint must be the same as input0.
* * 1: An n-D tensor of type {@link OperandType::TENSOR_INT32}
* containing the indices of values within the last dimension of input.
@@ -4278,6 +4424,18 @@
* * * {@link OperandType::TENSOR_INT32} for bias (scale set to 0.0,
* * * each value scaling is separate and equal to input.scale * filter.scales[channel]).
*
+ * Available since HAL version 1.3:
+ * * Quantized signed (since HAL version 1.3):
+ * * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} for input, filter, and output.
+ * * * {@link OperandType::TENSOR_INT32} for bias (with scale set to
+ * * * input.scale * filter.scale).
+ *
+ * * Quantized signed with filter symmetric per channel quantization (since HAL version 1.3):
+ * * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} for input, and output.
+ * * * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
+ * * * {@link OperandType::TENSOR_INT32} for bias (scale set to 0.0,
+ * * * each value scaling is separate and equal to input.scale * filter.scales[channel]).
+ *
* Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
* With the default data layout NHWC, the data is stored in the order of:
* [batch, height, width, channels]. Alternatively, the data layout could
@@ -4295,15 +4453,16 @@
* dimension (SymmPerChannelQuantParams::channelDim) must be set to 0.
* * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
* tensor of type {@link OperandType::TENSOR_FLOAT32} or
- * {@link OperandType::TENSOR_FLOAT16}, the bias should be of the
- * same type. For input tensor of type
- * {@link OperandType::TENSOR_QUANT8_ASYMM}, the bias should be
- * of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0 and
- * bias_scale == input_scale * filter_scale. For filter tensor of
- * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}, the bias
- * must be of {@link OperandType::TENSOR_INT32}, with zeroPoint of
- * 0 and bias_scale of 0. The actual scale of each value 'i' is equal
- * to bias_scale[i] = input_scale * filter_scale[i].
+ * {@link OperandType::TENSOR_FLOAT16}, the bias must be of the
+ * same type.
+ * For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * and {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED},
+ * the bias should be of {@link OperandType::TENSOR_INT32},
+ * with zeroPoint of 0 and bias_scale == input_scale * filter_scale.
+ * For filter tensor of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL},
+ * the bias must be of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0
+ * and bias_scale of 0. The actual scale of each value 'i' is equal to
+ * bias_scale[i] = input_scale * filter_scale[i].
* * 3: An {@link OperandType::INT32} scalar, specifying the padding on
* the left, in the ‘width’ dimension.
* * 4: An {@link OperandType::INT32} scalar, specifying the padding on
@@ -4333,14 +4492,15 @@
* * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
* tensor of type {@link OperandType::TENSOR_FLOAT32} or
* {@link OperandType::TENSOR_FLOAT16}, the bias should be of the
- * same type. For input tensor of type
- * {@link OperandType::TENSOR_QUANT8_ASYMM}, the bias should be
- * of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0 and
- * bias_scale == input_scale * filter_scale. For filter tensor of
- * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}, the bias
- * must be of {@link OperandType::TENSOR_INT32}, with zeroPoint of
- * 0 and bias_scale of 0. The actual scale of each value 'i' is equal
- * to bias_scale[i] = input_scale * filter_scale[i].
+ * same type.
+ * For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * and {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED},
+ * the bias should be of {@link OperandType::TENSOR_INT32},
+ * with zeroPoint of 0 and bias_scale == input_scale * filter_scale.
+ * For filter tensor of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL},
+ * the bias must be of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0
+ * and bias_scale of 0. The actual scale of each value 'i' is equal to
+ * bias_scale[i] = input_scale * filter_scale[i].
* * 3: An {@link OperandType::TENSOR_INT32} tensor, specifying the output
* tensor shape.
* * 4: An {@link OperandType::INT32} scalar, specifying the implicit
@@ -4359,7 +4519,8 @@
* Outputs:
* * 0: The output 4-D tensor, of shape
* [batches, out_height, out_width, depth_out].
- * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
+ * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
* the scale and zeroPoint can be different from inputs' scale and zeroPoint.
*/
TRANSPOSE_CONV_2D = @1.2::OperationType:TRANSPOSE_CONV_2D,
@@ -4539,6 +4700,7 @@
* * {@link OperandType::TENSOR_FLOAT16}
* * {@link OperandType::TENSOR_FLOAT32}
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
*
* Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
* With the default data layout NHWC, the data is stored in the order of:
@@ -4578,7 +4740,8 @@
* Outputs:
* * 0: The output 4-D tensor, of shape
* [batches, new_height, new_width, depth].
- * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
+ * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
* the scale and zeroPoint must be the same as input0.
*/
RESIZE_NEAREST_NEIGHBOR = @1.2::OperationType:RESIZE_NEAREST_NEIGHBOR,
diff --git a/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp
index f61240e..e3c5376 100644
--- a/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp
+++ b/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp
@@ -215,7 +215,7 @@
hidl_vec<OutputShape>* outputShapes,
Timing* timing) {
ErrorStatus result;
- Return<void> ret = preparedModel->executeSynchronously(
+ Return<void> ret = preparedModel->executeSynchronously_1_3(
request, measure,
[&result, outputShapes, timing](ErrorStatus error, const hidl_vec<OutputShape>& shapes,
const Timing& time) {
@@ -452,7 +452,7 @@
EvaluatePreparedModel(preparedModel, testModel, TestKind::DYNAMIC_SHAPE);
} break;
case TestKind::QUANTIZATION_COUPLING: {
- ASSERT_TRUE(testModel.hasQuant8AsymmOperands());
+ ASSERT_TRUE(testModel.hasQuant8CoupledOperands());
createPreparedModel(device, model, &preparedModel, /*reportSkipping*/ false);
TestModel signedQuantizedModel = convertQuant8AsymmOperandsToSigned(testModel);
sp<IPreparedModel> preparedCoupledModel;
@@ -521,7 +521,7 @@
[](const TestModel& testModel) { return !testModel.expectFailure; });
INSTANTIATE_GENERATED_TEST(DISABLED_QuantizationCouplingTest, [](const TestModel& testModel) {
- return testModel.hasQuant8AsymmOperands() && testModel.operations.size() == 1;
+ return testModel.hasQuant8CoupledOperands() && testModel.operations.size() == 1;
});
} // namespace android::hardware::neuralnetworks::V1_3::vts::functional
diff --git a/neuralnetworks/1.3/vts/functional/ValidateModel.cpp b/neuralnetworks/1.3/vts/functional/ValidateModel.cpp
index 242e12e..14ab897 100644
--- a/neuralnetworks/1.3/vts/functional/ValidateModel.cpp
+++ b/neuralnetworks/1.3/vts/functional/ValidateModel.cpp
@@ -323,13 +323,15 @@
// - CAST's argument can be any of TENSOR_(FLOAT16|FLOAT32|INT32|QUANT8_ASYMM).
// - RANDOM_MULTINOMIAL's argument can be either TENSOR_FLOAT16 or TENSOR_FLOAT32.
// - DEQUANTIZE input can be any of
- // TENSOR_(QUANT8_ASYMM|QUANT8_SYMM|QUANT8_SYMM_PER_CHANNEL), output can
- // be of either TENSOR_FLOAT16 or TENSOR_FLOAT32.
+ // TENSOR_(QUANT8_ASYMM|QUANT8_ASYMM_SIGNED|QUANT8_SYMM|QUANT8_SYMM_PER_CHANNEL),
+ // output can be of either TENSOR_FLOAT16 or TENSOR_FLOAT32.
// - QUANTIZE input can be either TENSOR_FLOAT16 or TENSOR_FLOAT32
// - CONV_2D filter type (arg 1) can be QUANT8_ASYMM or QUANT8_SYMM_PER_CHANNEL
// - DEPTHWISE_CONV_2D filter type (arg 1) can be QUANT8_ASYMM or QUANT8_SYMM_PER_CHANNEL
// - GROUPED_CONV_2D filter type (arg 1) can be QUANT8_ASYMM or QUANT8_SYMM_PER_CHANNEL
// - TRANSPOSE_CONV_2D filter type (arg 1) can be QUANT8_ASYMM or QUANT8_SYMM_PER_CHANNEL
+ // - AXIS_ALIGNED_BBOX_TRANSFORM bounding boxes (arg 1) can be of
+ // TENSOR_QUANT8_ASYMM or TENSOR_QUANT8_ASYMM_SIGNED.
switch (operation.type) {
case OperationType::LSH_PROJECTION: {
if (operand == operation.inputs[1]) {
@@ -340,7 +342,8 @@
case OperationType::ARGMAX:
case OperationType::ARGMIN: {
if (type == OperandType::TENSOR_FLOAT16 || type == OperandType::TENSOR_FLOAT32 ||
- type == OperandType::TENSOR_INT32 || type == OperandType::TENSOR_QUANT8_ASYMM) {
+ type == OperandType::TENSOR_INT32 || type == OperandType::TENSOR_QUANT8_ASYMM ||
+ type == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
return true;
}
} break;
@@ -364,6 +367,7 @@
case OperationType::DEQUANTIZE: {
if (operand == operation.inputs[0] &&
(type == OperandType::TENSOR_QUANT8_ASYMM ||
+ type == OperandType::TENSOR_QUANT8_ASYMM_SIGNED ||
type == OperandType::TENSOR_QUANT8_SYMM ||
type == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL)) {
return true;
@@ -383,6 +387,13 @@
return true;
}
} break;
+ case OperationType::AXIS_ALIGNED_BBOX_TRANSFORM: {
+ if (operand == operation.inputs[1] &&
+ (type == OperandType::TENSOR_QUANT8_ASYMM ||
+ type == OperandType::TENSOR_QUANT8_ASYMM_SIGNED)) {
+ return true;
+ }
+ } break;
default:
break;
}
diff --git a/neuralnetworks/1.3/vts/functional/ValidateRequest.cpp b/neuralnetworks/1.3/vts/functional/ValidateRequest.cpp
index 2cf30d5..8092d04 100644
--- a/neuralnetworks/1.3/vts/functional/ValidateRequest.cpp
+++ b/neuralnetworks/1.3/vts/functional/ValidateRequest.cpp
@@ -79,9 +79,9 @@
// synchronous
{
- SCOPED_TRACE(message + " [executeSynchronously]");
+ SCOPED_TRACE(message + " [executeSynchronously_1_3]");
- Return<void> executeStatus = preparedModel->executeSynchronously(
+ Return<void> executeStatus = preparedModel->executeSynchronously_1_3(
request, measure,
[](ErrorStatus error, const hidl_vec<OutputShape>& outputShapes,
const Timing& timing) {
@@ -158,8 +158,8 @@
}
void validateRequestFailure(const sp<IPreparedModel>& preparedModel, const Request& request) {
- SCOPED_TRACE("Expecting request to fail [executeSynchronously]");
- Return<void> executeStatus = preparedModel->executeSynchronously(
+ SCOPED_TRACE("Expecting request to fail [executeSynchronously_1_3]");
+ Return<void> executeStatus = preparedModel->executeSynchronously_1_3(
request, MeasureTiming::NO,
[](ErrorStatus error, const hidl_vec<OutputShape>& outputShapes, const Timing& timing) {
ASSERT_NE(ErrorStatus::NONE, error);
diff --git a/radio/1.5/IRadioIndication.hal b/radio/1.5/IRadioIndication.hal
index 81452ab..879e9ad 100644
--- a/radio/1.5/IRadioIndication.hal
+++ b/radio/1.5/IRadioIndication.hal
@@ -30,4 +30,33 @@
* @param enabled whether uiccApplications are enabled, or disabled
*/
oneway uiccApplicationsEnablementChanged(RadioIndicationType type, bool enabled);
+
+ /**
+ * Report that Registration or a Location/Routing/Tracking Area update has failed.
+ *
+ * <p>Indicate whenever a registration procedure, including a location, routing, or tracking
+ * area update fails. This includes procedures that do not necessarily result in a change of
+ * the modem's registration status. If the modem's registration status changes, that is
+ * reflected in the onNetworkStateChanged() and subsequent get{Voice/Data}RegistrationState().
+ *
+ * @param cellIdentity the CellIdentity, which must include the globally unique identifier for
+ * the cell (for example, all components of the CGI or ECGI).
+ * @param chosenPlmn a 5 or 6 digit alphanumeric PLMN (MCC|MNC) among those broadcast by the
+ * cell that was chosen for the failed registration attempt.
+ * @param domain Domain::CS, Domain::PS, or both in case of a combined procedure.
+ * @param causeCode the primary failure cause code of the procedure.
+ * For GSM/UMTS (MM), values are in TS 24.008 Sec 10.5.95
+ * For GSM/UMTS (GMM), values are in TS 24.008 Sec 10.5.147
+ * For LTE (EMM), cause codes are TS 24.301 Sec 9.9.3.9
+ * For NR (5GMM), cause codes are TS 24.501 Sec 9.11.3.2
+ * MAX_INT if this value is unused.
+ * @param additionalCauseCode the cause code of any secondary/combined procedure if appropriate.
+ * For UMTS, if a combined attach succeeds for PS only, then the GMM cause code shall be
+ * included as an additionalCauseCode.
+ * For LTE (ESM), cause codes are in TS 24.301 9.9.4.4
+ * MAX_INT if this value is unused.
+ */
+ oneway registrationFailed(
+ RadioIndicationType type, CellIdentity cellIdentity, string chosenPlmn,
+ bitfield<Domain> domain, int32_t causeCode, int32_t additionalCauseCode);
};
diff --git a/radio/1.5/types.hal b/radio/1.5/types.hal
index 3c6e67a..2a2ba11 100644
--- a/radio/1.5/types.hal
+++ b/radio/1.5/types.hal
@@ -22,14 +22,22 @@
import @1.1::RadioAccessSpecifier;
import @1.1::ScanType;
import @1.1::UtranBands;
+import @1.2::CellIdentityCdma;
+import @1.2::CellIdentityGsm;
+import @1.2::CellIdentityWcdma;
+import @1.2::CellIdentityTdscdma;
+import @1.2::CellIdentityLte;
import @1.2::NetworkScanRequest;
import @1.4::AccessNetwork;
import @1.4::ApnTypes;
+import @1.4::CellIdentityNr;
import @1.4::DataCallFailCause;
import @1.4::DataConnActiveStatus;
import @1.4::DataProfileInfo;
import @1.4::PdpProtocolType;
+import android.hidl.safe_union@1.0::Monostate;
+
/**
* Defining signal strength type.
*/
@@ -394,3 +402,22 @@
int32_t mtu;
};
+enum Domain : int32_t {
+ /** Circuit-switched */
+ CS = 1 << 0,
+
+ /** Packet-switched */
+ PS = 1 << 1,
+};
+
+/** A union representing the CellIdentity of a single cell */
+safe_union CellIdentity {
+ Monostate noinit;
+
+ CellIdentityGsm gsm;
+ CellIdentityWcdma wcdma;
+ CellIdentityTdscdma tdscdma;
+ CellIdentityCdma cdma;
+ CellIdentityLte lte;
+ CellIdentityNr nr;
+};
diff --git a/radio/1.5/vts/functional/radio_hidl_hal_utils_v1_5.h b/radio/1.5/vts/functional/radio_hidl_hal_utils_v1_5.h
index ba11257..49a315d 100644
--- a/radio/1.5/vts/functional/radio_hidl_hal_utils_v1_5.h
+++ b/radio/1.5/vts/functional/radio_hidl_hal_utils_v1_5.h
@@ -739,6 +739,13 @@
Return<void> modemReset(RadioIndicationType type,
const ::android::hardware::hidl_string& reason);
+
+ Return<void> registrationFailed(
+ RadioIndicationType type,
+ const ::android::hardware::radio::V1_5::CellIdentity& cellIdentity,
+ const ::android::hardware::hidl_string& chosenPlmn,
+ ::android::hardware::hidl_bitfield<::android::hardware::radio::V1_5::Domain> domain,
+ int32_t causeCode, int32_t additionalCauseCode);
};
// Test environment for Radio HIDL HAL.
diff --git a/radio/1.5/vts/functional/radio_indication.cpp b/radio/1.5/vts/functional/radio_indication.cpp
index acffbbe..2416605 100644
--- a/radio/1.5/vts/functional/radio_indication.cpp
+++ b/radio/1.5/vts/functional/radio_indication.cpp
@@ -333,3 +333,12 @@
bool /*enabled*/) {
return Void();
}
+
+Return<void> RadioIndication_v1_5::registrationFailed(
+ RadioIndicationType /*type*/,
+ const ::android::hardware::radio::V1_5::CellIdentity& /*cellIdentity*/,
+ const ::android::hardware::hidl_string& /*chosenPlmn*/,
+ ::android::hardware::hidl_bitfield<::android::hardware::radio::V1_5::Domain> /*domain*/,
+ int32_t /*causeCode*/, int32_t /*additionalCauseCode*/) {
+ return Void();
+}