Merge "VTS tests for IWifiStaIface@1.2." into pi-dev
diff --git a/bluetooth/a2dp/1.0/vts/functional/Android.bp b/bluetooth/a2dp/1.0/vts/functional/Android.bp
new file mode 100644
index 0000000..f1ffc45
--- /dev/null
+++ b/bluetooth/a2dp/1.0/vts/functional/Android.bp
@@ -0,0 +1,26 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+cc_test {
+ name: "VtsHalBluetoothA2dpV1_0TargetTest",
+ defaults: ["VtsHalTargetTestDefaults"],
+ srcs: ["VtsHalBluetoothA2dpV1_0TargetTest.cpp"],
+ static_libs: [
+ "android.hardware.bluetooth@1.0",
+ "android.hardware.bluetooth.a2dp@1.0",
+ "libbluetooth-types",
+ ],
+}
diff --git a/bluetooth/a2dp/1.0/vts/functional/VtsHalBluetoothA2dpV1_0TargetTest.cpp b/bluetooth/a2dp/1.0/vts/functional/VtsHalBluetoothA2dpV1_0TargetTest.cpp
new file mode 100644
index 0000000..1a0342f
--- /dev/null
+++ b/bluetooth/a2dp/1.0/vts/functional/VtsHalBluetoothA2dpV1_0TargetTest.cpp
@@ -0,0 +1,126 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "bluetooth_a2dp_hidl_hal_test"
+
+#include <android-base/logging.h>
+#include <android/hardware/bluetooth/a2dp/1.0/IBluetoothAudioHost.h>
+#include <android/hardware/bluetooth/a2dp/1.0/IBluetoothAudioOffload.h>
+#include <hardware/bluetooth.h>
+#include <utils/Log.h>
+
+#include <VtsHalHidlTargetCallbackBase.h>
+#include <VtsHalHidlTargetTestBase.h>
+#include <VtsHalHidlTargetTestEnvBase.h>
+
+using ::android::hardware::bluetooth::a2dp::V1_0::IBluetoothAudioHost;
+using ::android::hardware::bluetooth::a2dp::V1_0::IBluetoothAudioOffload;
+using ::android::hardware::bluetooth::a2dp::V1_0::Status;
+using ::android::hardware::bluetooth::a2dp::V1_0::CodecType;
+using ::android::hardware::bluetooth::a2dp::V1_0::SampleRate;
+using ::android::hardware::bluetooth::a2dp::V1_0::BitsPerSample;
+using ::android::hardware::bluetooth::a2dp::V1_0::ChannelMode;
+using ::android::hardware::bluetooth::a2dp::V1_0::CodecConfiguration;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+using ::android::sp;
+
+// Test environment for Bluetooth HIDL A2DP HAL.
+class BluetoothA2dpHidlEnvironment : public ::testing::VtsHalHidlTargetTestEnvBase {
+ public:
+ // get the test environment singleton
+ static BluetoothA2dpHidlEnvironment* Instance() {
+ static BluetoothA2dpHidlEnvironment* instance = new BluetoothA2dpHidlEnvironment;
+ return instance;
+ }
+
+ virtual void registerTestServices() override { registerTestService<IBluetoothAudioOffload>(); }
+
+ private:
+ BluetoothA2dpHidlEnvironment() {}
+};
+
+// The main test class for Bluetooth A2DP HIDL HAL.
+class BluetoothA2dpHidlTest : public ::testing::VtsHalHidlTargetTestBase {
+ public:
+ virtual void SetUp() override {
+ // currently test passthrough mode only
+ audio_offload = ::testing::VtsHalHidlTargetTestBase::getService<IBluetoothAudioOffload>(
+ BluetoothA2dpHidlEnvironment::Instance()->getServiceName<IBluetoothAudioOffload>());
+ ASSERT_NE(audio_offload, nullptr);
+
+ audio_host = new BluetoothAudioHost(*this);
+ ASSERT_NE(audio_host, nullptr);
+
+ codec.codecType = CodecType::AAC;
+ codec.sampleRate = SampleRate::RATE_44100;
+ codec.bitsPerSample = BitsPerSample::BITS_16;
+ codec.channelMode = ChannelMode::STEREO;
+ codec.encodedAudioBitrate = 320000;
+ codec.peerMtu = 1000;
+ }
+
+ virtual void TearDown() override {}
+
+ // A simple test implementation of IBluetoothAudioHost.
+ class BluetoothAudioHost
+ : public ::testing::VtsHalHidlTargetCallbackBase<BluetoothA2dpHidlTest>,
+ public IBluetoothAudioHost {
+ BluetoothA2dpHidlTest& parent_;
+
+ public:
+ BluetoothAudioHost(BluetoothA2dpHidlTest& parent) : parent_(parent){};
+ virtual ~BluetoothAudioHost() = default;
+
+ Return<void> startStream() override {
+ parent_.audio_offload->streamStarted(Status::SUCCESS);
+ return Void();
+ };
+
+ Return<void> suspendStream() override {
+ parent_.audio_offload->streamSuspended(Status::SUCCESS);
+ return Void();
+ };
+
+ Return<void> stopStream() override { return Void(); };
+ };
+
+ // audio_host is for the Audio HAL to send stream start/suspend/stop commands to Bluetooth
+ sp<IBluetoothAudioHost> audio_host;
+ // audio_offload is for the Bluetooth HAL to report session started/ended and handled audio
+ // stream started/suspended
+ sp<IBluetoothAudioOffload> audio_offload;
+ // codec is the currently used codec
+ CodecConfiguration codec;
+};
+
+// Empty test: Initialize()/Close() are called in SetUp()/TearDown().
+TEST_F(BluetoothA2dpHidlTest, InitializeAndClose) {}
+
+// Test start and end session
+TEST_F(BluetoothA2dpHidlTest, StartAndEndSession) {
+ EXPECT_EQ(Status::SUCCESS, audio_offload->startSession(audio_host, codec));
+ audio_offload->endSession();
+}
+
+int main(int argc, char** argv) {
+ ::testing::AddGlobalTestEnvironment(BluetoothA2dpHidlEnvironment::Instance());
+ ::testing::InitGoogleTest(&argc, argv);
+ BluetoothA2dpHidlEnvironment::Instance()->init(&argc, argv);
+ int status = RUN_ALL_TESTS();
+ LOG(INFO) << "Test result = " << status;
+ return status;
+}
diff --git a/camera/provider/2.4/default/CameraProvider.cpp b/camera/provider/2.4/default/CameraProvider.cpp
index 8e37b26..6313939 100644
--- a/camera/provider/2.4/default/CameraProvider.cpp
+++ b/camera/provider/2.4/default/CameraProvider.cpp
@@ -298,7 +298,8 @@
return true;
}
- mPreferredHal3MinorVersion = property_get_int32("ro.camera.wrapper.hal3TrebleMinorVersion", 3);
+ mPreferredHal3MinorVersion =
+ property_get_int32("ro.vendor.camera.wrapper.hal3TrebleMinorVersion", 3);
ALOGV("Preferred HAL 3 minor version is %d", mPreferredHal3MinorVersion);
switch(mPreferredHal3MinorVersion) {
case 2:
diff --git a/compatibility_matrices/Android.mk b/compatibility_matrices/Android.mk
index 948b4fe..ee97433 100644
--- a/compatibility_matrices/Android.mk
+++ b/compatibility_matrices/Android.mk
@@ -34,30 +34,47 @@
LOCAL_MODULE := framework_compatibility_matrix.legacy.xml
LOCAL_MODULE_STEM := compatibility_matrix.legacy.xml
LOCAL_SRC_FILES := $(LOCAL_MODULE_STEM)
-LOCAL_KERNEL_VERSIONS := 3.18.0 4.4.0 4.9.0
+LOCAL_KERNEL_VERSIONS := \
+ 3.18.0 \
+ 4.4.0 \
+ 4.9.0 \
+ 4.14.0 \
+
include $(BUILD_FRAMEWORK_COMPATIBILITY_MATRIX)
include $(CLEAR_VARS)
LOCAL_MODULE := framework_compatibility_matrix.1.xml
LOCAL_MODULE_STEM := compatibility_matrix.1.xml
LOCAL_SRC_FILES := $(LOCAL_MODULE_STEM)
-LOCAL_KERNEL_VERSIONS := 3.18.0 4.4.0 4.9.0
+LOCAL_KERNEL_VERSIONS := \
+ 3.18.0 \
+ 4.4.0 \
+ 4.9.0 \
+ 4.14.0 \
+
include $(BUILD_FRAMEWORK_COMPATIBILITY_MATRIX)
include $(CLEAR_VARS)
LOCAL_MODULE := framework_compatibility_matrix.2.xml
LOCAL_MODULE_STEM := compatibility_matrix.2.xml
LOCAL_SRC_FILES := $(LOCAL_MODULE_STEM)
-LOCAL_KERNEL_VERSIONS := 3.18.0 4.4.0 4.9.0
-include $(BUILD_FRAMEWORK_COMPATIBILITY_MATRIX)
+LOCAL_KERNEL_VERSIONS := \
+ 3.18.0 \
+ 4.4.0 \
+ 4.9.0 \
+ 4.14.0 \
-# TODO(b/72409164): STOPSHIP: update kernel version requirements
+include $(BUILD_FRAMEWORK_COMPATIBILITY_MATRIX)
include $(CLEAR_VARS)
LOCAL_MODULE := framework_compatibility_matrix.3.xml
LOCAL_MODULE_STEM := compatibility_matrix.3.xml
LOCAL_SRC_FILES := $(LOCAL_MODULE_STEM)
-LOCAL_KERNEL_VERSIONS := 4.4.0 4.9.0
+LOCAL_KERNEL_VERSIONS := \
+ 4.4.0 \
+ 4.9.0 \
+ 4.14.0 \
+
include $(BUILD_FRAMEWORK_COMPATIBILITY_MATRIX)
# Framework Compatibility Matrix (common to all FCM versions)
diff --git a/current.txt b/current.txt
index f744797..7e4b102 100644
--- a/current.txt
+++ b/current.txt
@@ -241,11 +241,11 @@
86ba9c03978b79a742e990420bc5ced0673d25a939f82572996bef92621e2014 android.hardware.cas@1.0::IMediaCasService
503da837d1a67cbdb7c08a033e927e5430ae1b159d98bf72c6336b4dcc5e76f5 android.hardware.cas.native@1.0::types
619600109232ed64b827c8a11beed8070b1827ae464547d7aa146cf0473b4bca android.hardware.cas.native@1.0::IDescrambler
-246a56d37d57a47224562c9d077b4a2886ce6242b9311bd98a17325944c280d7 android.hardware.neuralnetworks@1.0::types
93eb3757ceaf21590fa4cd1d4a7dfe3b3794af5396100a6d25630879352abce9 android.hardware.neuralnetworks@1.0::IDevice
f66f9a38541bf92001d3adcce678cd7e3da2262124befb460b1c9aea9492813b android.hardware.neuralnetworks@1.0::IExecutionCallback
953607822954435874f4b81686440a604e2a88cdd2d9164c6293f3d5772510d7 android.hardware.neuralnetworks@1.0::IPreparedModel
73e03573494ba96f0e711ab7f1956c5b2d54c3da690cd7ecf4d6d0f287447730 android.hardware.neuralnetworks@1.0::IPreparedModelCallback
+246a56d37d57a47224562c9d077b4a2886ce6242b9311bd98a17325944c280d7 android.hardware.neuralnetworks@1.0::types
f4945e397b5dea41bb64518dfde59be71245d8a125fd1e0acffeb57ac7b08fed android.hardware.thermal@1.1::IThermal
c8bc853546dd55584611def2a9fa1d99f657e3366c976d2f60fe6b8aa6d2cb87 android.hardware.thermal@1.1::IThermalCallback
@@ -258,7 +258,9 @@
fb92e2b40f8e9d494e8fd3b4ac18499a3216342e7cff160714c3bbf3660b6e79 android.hardware.gnss@1.0::IGnssConfiguration
251594ea9b27447bfa005ebd806e58fb0ae4aad84a69938129c9800ec0c64eda android.hardware.gnss@1.0::IGnssMeasurementCallback
4e7169919d24fbe5573e5bcd683d0bd7abf553a4e6c34c41f9dfc1e12050db07 android.hardware.gnss@1.0::IGnssNavigationMessageCallback
-1488db5ffb8a7979488d1084761aab8bca2f59bc9a02d75cdefc296afeaf591b android.hardware.neuralnetworks@1.0::types
+5804ca86611d72e5481f022b3a0c1b334217f2e4988dad25730c42af2d1f4d1c android.hardware.neuralnetworks@1.0::IDevice
+12e8dca4ab7d8aadd0ef8f1b438021938e2396139e85db2ed65783b08800aa52 android.hardware.neuralnetworks@1.0::IExecutionCallback
+702f9a4cd3b7486a4b04f7155b737757ac2ca4b3548976d5782ad3cae9ff9780 android.hardware.neuralnetworks@1.0::types
d4840db8efabdf1e4b344fc981cd36e5fe81a39aff6e199f6d06c1c8da413efd android.hardware.radio@1.0::types
b280c4704dfcc548a9bf127b59b7c3578f460c50cce70a06b66fe0df8b27cff0 android.hardware.wifi@1.0::types
@@ -336,8 +338,8 @@
b8c7ed58aa8740361e63d0ce9e7c94227572a629f356958840b34809d2393a7c android.hardware.media.bufferpool@1.0::IClientManager
4a2c0dc82780e6c90731725a103feab8ab6ecf85a64e049b9cbd2b2c61620fe1 android.hardware.media.bufferpool@1.0::IConnection
6aef1218e5949f867b0104752ac536c1b707222a403341720de90141df129e3e android.hardware.media.bufferpool@1.0::types
-1529409ed76ae87facab152b770495e9e62544fcc5215daabf146c28d588bab9 android.hardware.neuralnetworks@1.1::IDevice
-e808a6f61cd7b47887c599d8843e67a2dcbf4ec5aadd5d22fdce93020070ef1b android.hardware.neuralnetworks@1.1::types
+7698dc2382a2eeb43541840e3ee624f34108efdfb976b2bfa7c13ef15fb8c4c4 android.hardware.neuralnetworks@1.1::IDevice
+5604001029a255648a9e955de0a822a48d9ba7cc259b106fb8be0cd43dc8eece android.hardware.neuralnetworks@1.1::types
8d3d86da0bfa4bf070970d8303c659f67f35d670c287d45a3f542e4fedadd578 android.hardware.nfc@1.1::INfc
e85f566698d2a2c28100e264fcf2c691a066756ddf8dd341d009ff50cfe10614 android.hardware.nfc@1.1::INfcClientCallback
5e278fcaa3287d397d8eebe1c22aaa28150f5caae1cf9381cd6dc32cb37899c5 android.hardware.nfc@1.1::types
diff --git a/keymaster/3.0/vts/functional/keymaster_hidl_hal_test.cpp b/keymaster/3.0/vts/functional/keymaster_hidl_hal_test.cpp
index fbe5237..3a181a9 100644
--- a/keymaster/3.0/vts/functional/keymaster_hidl_hal_test.cpp
+++ b/keymaster/3.0/vts/functional/keymaster_hidl_hal_test.cpp
@@ -2918,28 +2918,6 @@
}
/*
- * EncryptionOperationsTest.AesEcbWithUserId
- *
- * Verifies that AES ECB mode works when Tag::USER_ID is specified.
- */
-TEST_F(EncryptionOperationsTest, AesEcbWithUserId) {
- string key = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
- ASSERT_EQ(ErrorCode::OK, ImportKey(AuthorizationSetBuilder()
- .Authorization(TAG_NO_AUTH_REQUIRED)
- .Authorization(TAG_USER_ID, 0)
- .AesEncryptionKey(key.size() * 8)
- .EcbMode()
- .Padding(PaddingMode::PKCS7),
- KeyFormat::RAW, key));
-
- string message = "Hello World!";
- auto params = AuthorizationSetBuilder().BlockMode(BlockMode::ECB).Padding(PaddingMode::PKCS7);
- string ciphertext = EncryptMessage(message, params);
- string plaintext = DecryptMessage(ciphertext, params);
- EXPECT_EQ(message, plaintext);
-}
-
-/*
* EncryptionOperationsTest.AesEcbRoundTripSuccess
*
* Verifies that AES encryption fails in the correct way when an unauthorized mode is specified.
diff --git a/neuralnetworks/1.0/IDevice.hal b/neuralnetworks/1.0/IDevice.hal
index 49c2967..62fb2ba 100644
--- a/neuralnetworks/1.0/IDevice.hal
+++ b/neuralnetworks/1.0/IDevice.hal
@@ -36,7 +36,7 @@
/**
* Gets the supported operations in a model.
*
- * getSupportedSubgraph indicates which operations of a model are fully
+ * getSupportedOperations indicates which operations of a model are fully
* supported by the vendor driver. If an operation may not be supported for
* any reason, getSupportedOperations must return false for that operation.
*
diff --git a/neuralnetworks/1.0/IExecutionCallback.hal b/neuralnetworks/1.0/IExecutionCallback.hal
index ef0f454..9c06166 100644
--- a/neuralnetworks/1.0/IExecutionCallback.hal
+++ b/neuralnetworks/1.0/IExecutionCallback.hal
@@ -28,7 +28,7 @@
* ErrorStatus resulting from the execution. If the asynchronous task
* is not launched, notify must be invoked with the appropriate error.
*
- * @return param Error status returned from launching the asynchronous task
+ * @param status Error status returned from launching the asynchronous task
* (if the launch fails) or from the asynchronous task itself
* (if the launch succeeds). Must be:
* - NONE if the asynchronous execution was successful
diff --git a/neuralnetworks/1.0/types.hal b/neuralnetworks/1.0/types.hal
index a9c91cd..8c07fcc 100644
--- a/neuralnetworks/1.0/types.hal
+++ b/neuralnetworks/1.0/types.hal
@@ -24,38 +24,40 @@
* Types prefaced with TENSOR_* must be used for tensor data (i.e., tensors
* with at least one dimension). Types not prefaced by TENSOR_* represent
* scalar values and must have no dimensions.
+ *
+ * Although many types are defined, most operators accept just a few
+ * types. Most used are {@link OperandType::TENSOR_FLOAT32},
+ * {@link OperandType::TENSOR_QUANT8_ASYMM},
+ * and {@link OperandType::INT32}.
*/
enum OperandType : int32_t {
- /**
- * The following entries are used to declare scalars.
- */
+ /** A 32 bit floating point scalar value. */
FLOAT32 = 0,
+ /** A signed 32 bit integer scalar value. */
INT32 = 1,
+ /** An unsigned 32 bit integer scalar value. */
UINT32 = 2,
- /**
- * The following entries are used to declare tensors.
- */
+ /** A tensor of 32 bit floating point values. */
TENSOR_FLOAT32 = 3,
+ /** A tensor of 32 bit integer values. */
TENSOR_INT32 = 4,
-
- /**
- * A tensor of 8 bit integers that represent real numbers.
+ /** A tensor of 8 bit integers that represent real numbers.
*
* Attached to this tensor are two numbers that can be used to convert the
* 8 bit integer to the real value and vice versa. These two numbers are:
- * - scale: a 32 bit floating point value greater than zero
- * - zero_value: a 32 bit integer
+ * - scale: a 32 bit floating point value greater than zero.
+ * - zeroPoint: a 32 bit integer, in range [0, 255].
*
* The formula is:
- * real_value = (integer_value - zero_value) * scale.
+ * real_value = (integer_value - zeroPoint) * scale.
*/
TENSOR_QUANT8_ASYMM = 5,
- /**
- * The following entries are OEM specific operand types.
- */
+ /** OEM specific scalar value. */
OEM = 10000,
+
+ /** A tensor of OEM specific values. */
TENSOR_OEM_BYTE = 10001,
};
@@ -66,9 +68,9 @@
*/
enum OperationType : int32_t {
/**
- * Adds two tensors, elment-wise.
+ * Adds two tensors, element-wise.
*
- * Takes two input tensors of identical type and compatible dimensions. The output
+ * Takes two input tensors of identical type and compatible dimensions. The output
* is the sum of both input tensors, optionally modified by an activation function.
*
* Two dimensions are compatible when:
@@ -79,22 +81,25 @@
* It starts with the trailing dimensions, and works its way forward.
*
* Example:
- * input1.dimension = {4, 1, 2}
+ *
+ * input1.dimension = {4, 1, 2}
* input2.dimension = {5, 4, 3, 1}
* output.dimension = {5, 4, 3, 2}
*
- * Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
- * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * Supported tensor types:
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
* Supported tensor rank: up to 4
*
* Inputs:
- * 0: A tensor.
- * 1: A tensor of the same type, and compatible dimensions as input0.
- * 2: An INT32 value, and has to be one of the {@link FusedActivationFunc} values.
- * Specifies the activation to invoke on the result of each addition.
+ * * 0: A tensor.
+ * * 1: A tensor of the same type, and compatible dimensions as input0.
+ * * 2: An INT32 value, and has to be one of the {@link FusedActivationFunc} values.
+ * Specifies the activation to invoke on the result of each addition.
*
- * Ouputs:
- * 0: The sum, a tensor of the same type as input0.
+ * Outputs:
+ * * 0: The sum, a tensor of the same type as input0.
*/
ADD = 0,
@@ -103,29 +108,50 @@
*
* The output dimensions are functions of the filter dimensions, stride, and padding.
*
- * The values in output Tensor is computed as:
+ * The values in the output tensor are computed as:
+ *
* output[batch, row, col, channel] =
* sum_{i, j}(input[batch, row + i, col + j, channel]) / sum(1)
*
- * Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
- * {@link OperandType::TENSOR_QUANT8_ASYMM}
- * Supported tensor rank: 4, with "NHWC" data layout.
+ * Supported tensor types:
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
*
- * Inputs:
- * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the input.
- * 1: An INT32 value, specifying the padding on the left, in the ‘width’ dimension.
- * 2: An INT32 value, specifying the padding on the right,in the ‘width’ dimension.
- * 3: An INT32 value, specifying the padding on the top, in the ‘height’ dimension.
- * 4: An INT32 value, specifying the padding on the bottom, in the ‘height’ dimension.
- * 5: An INT32 value, specifying the output stride in the ‘width’ dimension.
- * 6: An INT32 value, specifying the output stride in the ‘height’ dimension.
- * 7: An INT32 value, specifying the filter width.
- * 8: An INT32 value, specifying the filter height.
- * 9: An INT32 value, and has to be one of the {@link FusedActivationFunc} values.
- * Specifies the activation to invoke on the result of each addition.
+ * Supported tensor rank: 4, with "NHWC" (i.e., Num_samples, Height, Width, and Channels)
+ * data layout.
*
- * Ouputs:
- * 0: The output 4-D tensor, of shape [batches, out_height, out_width, depth].
+ * Both explicit padding and implicit padding are supported.
+ *
+ * Inputs (explicit padding):
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the input.
+ * * 1: An INT32 value, specifying the padding on the left, in the ‘width’ dimension.
+ * * 2: An INT32 value, specifying the padding on the right,in the ‘width’ dimension.
+ * * 3: An INT32 value, specifying the padding on the top, in the ‘height’ dimension.
+ * * 4: An INT32 value, specifying the padding on the bottom, in the ‘height’ dimension.
+ * * 5: An INT32 value, specifying the stride when walking through input
+ * in the ‘width’ dimension.
+ * * 6: An INT32 value, specifying the stride when walking through input
+ * in the ‘height’ dimension.
+ * * 7: An INT32 value, specifying the filter width.
+ * * 8: An INT32 value, specifying the filter height.
+ * * 9: An INT32 value, and has to be one of the {@link FusedActivationFunc} values.
+ * Specifies the activation to invoke on the result of each addition.
+ *
+ * Inputs (implicit padding):
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the input.
+ * * 1: An INT32 value, specifying the implicit padding scheme, has to be one of the
+ * following values: {0 (NONE), 1 (SAME), 2 (VALID)}.
+ * * 2: An INT32 value, specifying the stride when walking through input
+ * in the ‘width’ dimension.
+ * * 3: An INT32 value, specifying the stride when walking through input
+ * in the ‘height’ dimension.
+ * * 4: An INT32 value, specifying the filter width.
+ * * 5: An INT32 value, specifying the filter height.
+ * * 6: An INT32 value, and has to be one of the {@link FusedActivationFunc} values.
+ * Specifies the activation to invoke on the result of each addition.
+ *
+ * Outputs:
+ * * 0: The output 4-D tensor, of shape [batches, out_height, out_width, depth].
*/
AVERAGE_POOL_2D = 1,
@@ -135,19 +161,21 @@
* The input tensors must have identical type and the same dimensions except the
* dimension along the concatenation axis.
*
- * Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
- * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * Supported tensor types:
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
* Supported tensor rank: up to 4
*
* Inputs:
- * 0 ~ n: The list on n input tensors, of shape [D0, D1, ..., Daxis(i), ..., Dm]
- * n+1: An INT32 value, specifying the concatenation axis.
- * n+2: An INT32 value, and has to be one of the {@link FusedActivationFunc} values.
- * Specifies the activation to invoke on the result of each addition.
+ * * 0 ~ n-1: The list of n input tensors, of shape [D0, D1, ..., Daxis(i), ..., Dm].
+ * For inputs of {@link OperandType::TENSOR_QUANT8_ASYMM} type, all
+ * input tensors must have the same scale and zeroPoint.
+ * * n: An INT32 value, specifying the concatenation axis.
*
- * Ouputs:
- * 0: The output, a tensor of the same type as the input tensors.
- The output shape is [D0, D1, ..., sum(Daxis(i)), ..., Dm].
+ * Outputs:
+ * * 0: The output, a tensor of the same type as the input tensors.
+ * The output shape is [D0, D1, ..., sum(Daxis(i)), ..., Dm].
*/
CONCATENATION = 2,
@@ -159,7 +187,8 @@
*
* The output dimensions are functions of the filter dimensions, stride, and padding.
*
- * The values in output Tensor is computed as:
+ * The values in the output tensor are computed as:
+ *
* output[batch, row, col, channel] =
* sum_{i, j} (
* input[batch, row + i, col + j, k] *
@@ -167,77 +196,135 @@
* bias[channel]
* )
*
- * Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
- * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * Supported tensor types:
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
* Supported tensor rank: 4, with "NHWC" data layout.
*
- * Inputs:
- * 0: A 4-D tensor, of shape [batches, height, width, depth_in], specifying the input.
- * 1: A 4-D tensor, of shape [depth_out, filter_height, filter_width, depth_in],
- * specifying the filter.
- * 2: A 1-D tensor, of shape [depth_out], specifying the bias.
- * For input tensor of {@link OperandType::TENSOR_FLOAT32} type, the bias should
- * also be of {@link OperandType::TENSOR_FLOAT32}.
- * For input tensor of {@link OperandType::TENSOR_QUANT8_ASYMM} type, the bias
- * should be of {@link OperandType::TENSOR_INT32}.
- * 3: An INT32 value, specifying the padding on the left, in the ‘width’ dimension.
- * 4: An INT32 value, specifying the padding on the right,in the ‘width’ dimension.
- * 5: An INT32 value, specifying the padding on the top, in the ‘height’ dimension.
- * 6: An INT32 value, specifying the padding on the bottom, in the ‘height’ dimension.
- * 7: An INT32 value, specifying the output stride in the ‘width’ dimension.
- * 8: An INT32 value, specifying the output stride in the ‘height’ dimension.
- * 9: An INT32 value, and has to be one of the {@link FusedActivationFunc} values.
- * Specifies the activation to invoke on the result of each addition.
+ * Both explicit padding and implicit padding are supported.
*
- * Ouputs:
- * 0: The output 4-D tensor, of shape [batches, out_height, out_width, depth_out].
+ * Inputs (explicit padding):
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], specifying the input.
+ * * 1: A 4-D tensor, of shape [depth_out, filter_height, filter_width, depth_in],
+ * specifying the filter.
+ * * 2: A 1-D tensor, of shape [depth_out], specifying the bias.
+ * For input tensor of {@link OperandType::TENSOR_FLOAT32} type, the bias should
+ * also be of {@link OperandType::TENSOR_FLOAT32}.
+ * For input tensor of {@link OperandType::TENSOR_QUANT8_ASYMM} type, the bias
+ * should be of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0 and
+ * bias_scale == input_scale * filter_scale.
+ * * 3: An INT32 value, specifying the padding on the left, in the ‘width’ dimension.
+ * * 4: An INT32 value, specifying the padding on the right,in the ‘width’ dimension.
+ * * 5: An INT32 value, specifying the padding on the top, in the ‘height’ dimension.
+ * * 6: An INT32 value, specifying the padding on the bottom, in the ‘height’ dimension.
+ * * 7: An INT32 value, specifying the stride when walking through input
+ * in the ‘width’ dimension.
+ * * 8: An INT32 value, specifying the stride when walking through input
+ * in the ‘height’ dimension.
+ * * 9: An INT32 value, and has to be one of the {@link FusedActivationFunc} values.
+ * Specifies the activation to invoke on the result of each addition.
+ *
+ * Inputs (implicit padding):
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], specifying the input.
+ * * 1: A 4-D tensor, of shape [depth_out, filter_height, filter_width, depth_in],
+ * specifying the filter.
+ * * 2: A 1-D tensor, of shape [depth_out], specifying the bias.
+ * For input tensor of {@link OperandType::TENSOR_FLOAT32} type, the bias should
+ * also be of {@link OperandType::TENSOR_FLOAT32}.
+ * For input tensor of {@link OperandType::TENSOR_QUANT8_ASYMM} type, the bias
+ * should be of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0 and
+ * bias_scale == input_scale * filter_scale.
+ * * 3: An INT32 value, specifying the implicit padding scheme, has to be one of the
+ * following values: {0 (NONE), 1 (SAME), 2 (VALID)}.
+ * * 4: An INT32 value, specifying the stride when walking through input
+ * in the ‘width’ dimension.
+ * * 5: An INT32 value, specifying the stride when walking through input
+ * in the ‘height’ dimension.
+ * * 6: An INT32 value, and has to be one of the {@link FusedActivationFunc} values.
+ * Specifies the activation to invoke on the result of each addition.
+ *
+ * Outputs:
+ * * 0: The output 4-D tensor, of shape [batches, out_height, out_width, depth_out].
+ * For output tensor of {@link OperandType::TENSOR_QUANT8_ASYMM} type, the following
+ * condition must be satisfied: output_scale > input_scale * filter_scale.
*/
CONV_2D = 3,
/**
- * Performs an depthwise 2-D convolution operation.
+ * Performs a depthwise 2-D convolution operation.
*
* Given an input tensor of shape [batches, height, width, depth_in] and a filter
- * tensor of shape [depth_out, filter_height, filter_width, depth_in] containing
- * in_channels convolutional filters of depth 1, DEPTHWISE_CONV applies a different
+ * tensor of shape [1, filter_height, filter_width, depth_out] containing
+ * depth_out convolutional filters of depth 1, DEPTHWISE_CONV applies a different
* filter to each input channel (expanding from 1 channel to channel_multiplier channels
* for each), then concatenates the results together.
*
* The output has depth_out = depth_in * depth_multiplier channels.
* The output dimensions are functions of the filter dimensions, stride, and padding.
*
- * The values in output Tensor is computed as:
+ * The values in the output tensor are computed as:
+ *
* output[b, i, j, k * channel_multiplier + q] =
* sum_{di, dj} (
* input[b, strides[1] * i + di, strides[2] * j + dj, k] *
- * filter[di, dj, k, q]
+ * filter[1, di, dj, k * channel_multiplier + q]
* )
*
- * Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
- * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * Supported tensor types:
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
* Supported tensor rank: 4, with "NHWC" data layout.
*
- * Inputs:
- * 0: A 4-D tensor, of shape [batches, height, width, depth_in], specifying the input.
- * 1: A 4-D tensor, of shape [1, filter_height, filter_width, depth_out],
- * specifying the filter.
- * 2: A 1-D tensor, of shape [depth_out], specifying the bias.
- * For input tensor of {@link OperandType::TENSOR_FLOAT32} type, the bias should
- * also be of {@link OperandType::TENSOR_FLOAT32}.
- * For input tensor of {@link OperandType::TENSOR_QUANT8_ASYMM} type, the bias
- * should be of {@link OperandType::TENSOR_INT32}.
- * 3: An INT32 value, specifying the padding on the left, in the ‘width’ dimension.
- * 4: An INT32 value, specifying the padding on the right,in the ‘width’ dimension.
- * 5: An INT32 value, specifying the padding on the top, in the ‘height’ dimension.
- * 6: An INT32 value, specifying the padding on the bottom, in the ‘height’ dimension.
- * 7: An INT32 value, specifying the output stride in the ‘width’ dimension.
- * 8: An INT32 value, specifying the output stride in the ‘height’ dimension.
- * 9: An INT32 value, specifying the depthwise multiplier.
- * 10: An INT32 value, and has to be one of the {@link FusedActivationFunc} values.
- * Specifies the activation to invoke on the result of each addition.
+ * Both explicit padding and implicit padding are supported.
*
- * Ouputs:
- * 0: The output 4-D tensor, of shape [batches, out_height, out_width, depth_out].
+ * Inputs (explicit padding):
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], specifying the input.
+ * * 1: A 4-D tensor, of shape [1, filter_height, filter_width, depth_out],
+ * specifying the filter.
+ * * 2: A 1-D tensor, of shape [depth_out], specifying the bias.
+ * For input tensor of {@link OperandType::TENSOR_FLOAT32} type, the bias should
+ * also be of {@link OperandType::TENSOR_FLOAT32}.
+ * For input tensor of {@link OperandType::TENSOR_QUANT8_ASYMM} type, the bias
+ * should be of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0 and
+ * bias_scale == input_scale * filter_scale.
+ * * 3: An INT32 value, specifying the padding on the left, in the ‘width’ dimension.
+ * * 4: An INT32 value, specifying the padding on the right,in the ‘width’ dimension.
+ * * 5: An INT32 value, specifying the padding on the top, in the ‘height’ dimension.
+ * * 6: An INT32 value, specifying the padding on the bottom, in the ‘height’ dimension.
+ * * 7: An INT32 value, specifying the stride when walking through input
+ * in the ‘width’ dimension.
+ * * 8: An INT32 value, specifying the stride when walking through input
+ * in the ‘height’ dimension.
+ * * 9: An INT32 value, specifying the depthwise multiplier.
+ * * 10: An INT32 value, and has to be one of the {@link FusedActivationFunc} values.
+ * Specifies the activation to invoke on the result of each addition.
+ *
+ * Inputs (implicit padding):
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], specifying the input.
+ * * 1: A 4-D tensor, of shape [1, filter_height, filter_width, depth_out],
+ * specifying the filter.
+ * * 2: A 1-D tensor, of shape [depth_out], specifying the bias.
+ * For input tensor of {@link OperandType::TENSOR_FLOAT32} type, the bias should
+ * also be of {@link OperandType::TENSOR_FLOAT32}.
+ * For input tensor of {@link OperandType::TENSOR_QUANT8_ASYMM} type, the bias
+ * should be of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0 and
+ * bias_scale == input_scale * filter_scale.
+ * * 3: An INT32 value, specifying the implicit padding scheme, has to be one of the
+ * following values: {0 (NONE), 1 (SAME), 2 (VALID)}.
+ * * 4: An INT32 value, specifying the stride when walking through input
+ * in the ‘width’ dimension.
+ * * 5: An INT32 value, specifying the stride when walking through input
+ * in the ‘height’ dimension.
+ * * 6: An INT32 value, specifying the depthwise multiplier.
+ * * 7: An INT32 value, and has to be one of the {@link FusedActivationFunc} values.
+ * Specifies the activation to invoke on the result of each addition.
+ *
+ * Outputs:
+ * * 0: The output 4-D tensor, of shape [batches, out_height, out_width, depth_out].
+ * For output tensor of {@link OperandType::TENSOR_QUANT8_ASYMM} type, the following
+ * condition must be satisfied: output_scale > input_scale * filter_scale.
*/
DEPTHWISE_CONV_2D = 4,
@@ -255,18 +342,20 @@
* input_height * block_size.
* The depth of the input tensor must be divisible by block_size * block_size
*
- * Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
- * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * Supported tensor types:
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
* Supported tensor rank: 4, with "NHWC" data layout.
*
* Inputs:
- * 0: A 4-D tensor, of shape [batches, height, width, depth_in], specifying the input.
- * 1: An INT32 value, specifying the block_size. block_size must be >=1 and
- * block_size * block_size must be a divisor of the input depth.
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], specifying the input.
+ * * 1: An INT32 value, specifying the block_size. block_size must be >=1 and
+ * block_size * block_size must be a divisor of the input depth.
*
- * Ouputs:
- * 0: The output 4-D tensor, of shape [batch, height*block_size, width*block_size,
- * depth/(block_size*block_size)].
+ * Outputs:
+ * * 0: The output 4-D tensor, of shape [batch, height*block_size, width*block_size,
+ * depth/(block_size*block_size)].
*/
DEPTH_TO_SPACE = 5,
@@ -274,53 +363,69 @@
* Dequantizes the input tensor.
*
* The formula is:
- * output = (input - zero_value) * scale.
*
- * Supported tensor types: {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * output = (input - zeroPoint) * scale.
+ *
+ * Supported tensor types:
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
* Supported tensor rank: up to 4
*
* Inputs:
- * 0: A tensor of type {@link OperandType::TENSOR_QUANT8_ASYMM}.
+ * * 0: A tensor of type {@link OperandType::TENSOR_QUANT8_ASYMM}.
*
- * Ouputs:
- * 0: The output tensor of same shape as input0, but with type
- {@link OperandType::TENSOR_FLOAT32}.
+ * Outputs:
+ * * 0: The output tensor of same shape as input0, but with type
+ * {@link OperandType::TENSOR_FLOAT32}.
*/
DEQUANTIZE = 6,
/**
- * Looks up items from a given tensor.
+ * Looks up sub-tensors in the input tensor.
*
- * Each item in the output is a raw copy of the corresponding item in
- * the input “values”. If the the given “lookup” indices are out of bounds,
- * the op will fail and an error will be reported.
+ * This operator takes for input a tensor of values (Values) and
+ * a one-dimensional tensor of selection indices (Lookups).
+ * The output tensor is the concatenation of sub-tensors of Values as
+ * selected by Lookups.
+ *
+ * Think of Values as being sliced along its first dimension:
+ * The entries in Lookups select which slices are concatenated together
+ * to create the output tensor.
+ *
+ * For example, if Values has shape of [40, 200, 300] and
+ * Lookups has shape of [3], all three values found in Lookups are
+ * expected to be between 0 and 39. The resulting tensor must
+ * have shape of [3, 200, 300].
+ *
+ * If a value in Lookups is out of bounds, the operation must fail
+ * and an error must be reported.
*
* Inputs:
- * * 0: Values. An n-D tensor of any type X (where n >= 2). E.g., if n is 2,
- * then the shape would be [lookup_dimension, values_dimension], where
- * “lookup_dimension” corresponds to the indexing dimension in the lookup
- * table, and “values_dimension” to the contents.
- * * 1: Lookups. An 1-D tensor of type T, of shape [lookup_size], where
- * “lookup_size” is the number of elements to look for, and each entry
- * corresponds to the first dimension of the “values” tensor.
+ * * 0: Lookups. A 1-D tensor of {@link OperandType::TENSOR_INT32} type.
+ * The values are indices into the first dimension of Values.
+ * * 1: Values. An n-D tensor, where n >= 2, from which sub-tensors are
+ * extracted.
*
* Output:
- * * 0: A n-D tensor of type X and the same rank and shape as the “values”
- * tensor, except for the first dimension which has size “lookup_size”.
+ * * 0: A n-D tensor with the same rank and shape as the Values
+ * tensor, except for the first dimension which has the same size
+ * as Lookups' only dimension.
*/
EMBEDDING_LOOKUP = 7,
/**
* Computes element-wise floor() on the input tensor.
*
- * Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
+ * Supported tensor types:
+ * * {@link OperandType::TENSOR_FLOAT32}
+ *
* Supported tensor rank: up to 4
*
* Inputs:
- * 0: A tensor.
+ * * 0: A tensor.
*
- * Ouputs:
- * 0: The output, a tensor of the same type and dimensions as input0.
+ * Outputs:
+ * * 0: The output tensor, of the same type and dimensions as the input tensor.
*/
FLOOR = 8,
@@ -329,66 +434,104 @@
* tensor with each element in the output tensor.
*
* This layer implements the operation:
+ *
* outputs = activation(inputs * weights’ + bias)
*
- * Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
- * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * Supported tensor types:
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
* Supported tensor rank: up to 4.
*
* Inputs:
- * 0: A tensor, specifying the input. If rank is greater than 2, then it gets flattened to
- * a 2-D Tensor. The 2-D Tensor is handled as if dimensions corresponded to shape
- * [batch_size, input_size], where “batch_size” corresponds to the batching dimension,
- * and “input_size” is the size of the input.
- * 1: A 2-D tensor, specifying the weights, of shape [num_units, input_size], where “num_units”
- * corresponds to the number of output nodes.
- * 2: A 1-D tensor, of shape [num_units], specifying the bias.
- * For input tensor of {@link OperandType::TENSOR_FLOAT32} type, the bias should
- * also be of {@link OperandType::TENSOR_FLOAT32}.
- * For input tensor of {@link OperandType::TENSOR_QUANT8_ASYMM} type, the bias
- * should be of {@link OperandType::TENSOR_INT32}.
- * 3: An INT32 value, and has to be one of the {@link FusedActivationFunc} values.
- * Specifies the activation to invoke on the result of each addition.
+ * * 0: A tensor, specifying the input. If rank is greater than 2, then it gets flattened to
+ * a 2-D Tensor. The 2-D Tensor is handled as if dimensions corresponded to shape
+ * [batch_size, input_size], where “batch_size” corresponds to the batching dimension,
+ * and “input_size” is the size of the input.
+ * * 1: A 2-D tensor, specifying the weights, of shape [num_units, input_size], where
+ * "num_units" corresponds to the number of output nodes.
+ * * 2: A 1-D tensor, of shape [num_units], specifying the bias.
+ * For input tensor of {@link OperandType::TENSOR_FLOAT32} type, the bias should
+ * also be of {@link OperandType::TENSOR_FLOAT32}.
+ * For input tensor of {@link OperandType::TENSOR_QUANT8_ASYMM} type, the bias
+ * should be of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0 and
+ * bias_scale == input_scale * filter_scale.
+ * * 3: An INT32 value, and has to be one of the {@link FusedActivationFunc} values.
+ * Specifies the activation to invoke on the result of each addition.
*
- * Ouputs:
- * 0: The output tensor, of shape [batch_size, num_units].
+ * Outputs:
+ * * 0: The output tensor, of shape [batch_size, num_units].
+ * For output tensor of {@link OperandType::TENSOR_QUANT8_ASYMM} type, the following
+ * condition must be satisfied: output_scale > input_scale * filter_scale.
*/
FULLY_CONNECTED = 9,
/**
- * Looks up values of a hash table with given keys.
+ * Looks up sub-tensors in the input tensor using a key-value map.
+ *
+ * This operator takes for input a tensor of values (Values),
+ * a one-dimensional tensor of selection values (Lookups) and
+ * a one-dimensional tensor that maps these values to Values
+ * indexes. The output tensor is the concatenation of sub-tensors of
+ * Values as selected by Lookups via Keys.
+ *
+ * Think of Values as being sliced along its outer-most dimension.
+ * The output is a concatenation of selected slices, with one slice
+ * for each entry of Lookups. The slice selected is the one at the
+ * same index as the Maps entry that matches the value in Lookups.
+ *
+ * For a hit, the corresponding sub-tensor of Values is included
+ * in the Output tensor. For a miss, the corresponding sub-tensor in
+ * Output must have zero values.
+ *
+ * For example, if Values has shape of [40, 200, 300],
+ * Keys should have a shape of [40]. If Lookups tensor has shape
+ * of [3], three slices are being concatenated, so the resulting tensor
+ * must have the shape of [3, 200, 300]. If the first entry in Lookups
+ * has the value 123456, that value must be located in Keys tensor.
+ * If the sixth entry of Keys contains 123456, the sixth slice of Values
+ * must be selected. If no entry in Keys has 123456, a slice of zeroes
+ * must be concatenated.
*
* Inputs:
- * * 0: Lookups. A 1-D int32 tensor with shape [ k ].
- * * 1: Keys. A 1-D int32 tensor with shape [ n ], *MUST* be sorted in
- * ascending order.
- * * 2: Values. A tensor with shape [ n … ].
+ * * 0: Lookups. A 1-D {@link OperandType::TENSOR_INT32} tensor with shape [ k ].
+ * * 1: Keys. A 1-D {@link OperandType::TENSOR_INT32} tensor with shape [ n ];
+ * Keys and Values pair represent a map, i.e., the ith element
+ * in Keys (Keys[i]) is the key to select the ith sub-tensor
+ * in Values (Values[i]), where 0 <= i <= n-1.
+ * Keys tensor *MUST* be sorted in ascending order.
+ * * 2: Values. A tensor with shape of [ n, … ]; i.e., the first dimension must be n.
*
* Outputs:
* * 0: Output. A tensor with shape [ k …].
- * * 1: Hits. A uint8 tensor with shape [ k ] indicates whether the lookup
- * hits or not.
+ * * 1: Hits. A boolean tensor with shape [ k ] indicates whether the lookup
+ * hits (True) or not (False).
+ * Stored as {@link OperandType::TENSOR_QUANT8_ASYMM} with offset 0 and scale 1.0f.
+ * A non-zero byte represents True, a hit. A zero indicates otherwise.
*/
HASHTABLE_LOOKUP = 10,
/**
- * Applies L2 normalization along a the depth dimension.
+ * Applies L2 normalization along the depth dimension.
*
- * The values in output Tensor is computed as:
+ * The values in the output tensor are computed as:
+ *
* output[batch, row, col, channel] =
* input[batch, row, col, channel] /
* sqrt(sum_{c} pow(input[batch, row, col, c], 2))
*
- * For x with more dimensions, independently normalizes each 1-D slice along dimension dim.
+ * For input tensor with more dimensions, independently normalizes each 1-D slice along dimension dim.
*
- * Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
- * Supported tensor rank: 4, with "NHWC" data layout.
+ * Supported tensor types:
+ * * {@link OperandType::TENSOR_FLOAT32}
+ *
+ * Supported tensor rank: 4, with "NHWC" data layout (i.e., Num_samples, Height, Width, and Channels).
*
* Inputs:
- * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the input.
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth].
*
- * Ouputs:
- * 0: The output 4-D tensor, of shape [batches, out_height, out_width, depth].
+ * Outputs:
+ * * 0: The output 4-D tensor, of shape [batches, out_height, out_width, depth].
*/
L2_NORMALIZATION = 11,
@@ -397,28 +540,48 @@
*
* The output dimensions are functions of the filter dimensions, stride, and padding.
*
- * The values in output Tensor is computed as:
+ * The values in the output tensor are computed as:
+ *
* output[batch, row, col, channel] =
* sqrt(sum_{i, j} pow(input[batch, row + i, col + j, channel], 2) / sum(1))
*
- * Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
+ * Supported tensor types:
+ * * {@link OperandType::TENSOR_FLOAT32}
+ *
* Supported tensor rank: 4, with "NHWC" data layout.
*
- * Inputs:
- * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the input.
- * 1: An INT32 value, specifying the padding on the left, in the ‘width’ dimension.
- * 2: An INT32 value, specifying the padding on the right,in the ‘width’ dimension.
- * 3: An INT32 value, specifying the padding on the top, in the ‘height’ dimension.
- * 4: An INT32 value, specifying the padding on the bottom, in the ‘height’ dimension.
- * 5: An INT32 value, specifying the output stride in the ‘width’ dimension.
- * 6: An INT32 value, specifying the output stride in the ‘height’ dimension.
- * 7: An INT32 value, specifying the filter width.
- * 8: An INT32 value, specifying the filter height.
- * 9: An INT32 value, and has to be one of the {@link FusedActivationFunc} values.
- * Specifies the activation to invoke on the result of each addition.
+ * Both explicit padding and implicit padding are supported.
*
- * Ouputs:
- * 0: The output 4-D tensor, of shape [batches, out_height, out_width, depth].
+ * Inputs (explicit padding):
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the input.
+ * * 1: An INT32 value, specifying the padding on the left, in the ‘width’ dimension.
+ * * 2: An INT32 value, specifying the padding on the right,in the ‘width’ dimension.
+ * * 3: An INT32 value, specifying the padding on the top, in the ‘height’ dimension.
+ * * 4: An INT32 value, specifying the padding on the bottom, in the ‘height’ dimension.
+ * * 5: An INT32 value, specifying the stride when walking through input
+ * in the ‘width’ dimension.
+ * * 6: An INT32 value, specifying the stride when walking through input
+ * in the ‘height’ dimension.
+ * * 7: An INT32 value, specifying the filter width.
+ * * 8: An INT32 value, specifying the filter height.
+ * * 9: An INT32 value, and has to be one of the {@link FusedActivationFunc} values.
+ * Specifies the activation to invoke on the result of each addition.
+ *
+ * Inputs (implicit padding):
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the input.
+ * * 1: An INT32 value, specifying the implicit padding scheme, has to be one of the
+ * following values: {0 (NONE), 1 (SAME), 2 (VALID)}.
+ * * 2: An INT32 value, specifying the stride when walking through input
+ * in the ‘width’ dimension.
+ * * 3: An INT32 value, specifying the stride when walking through input
+ * in the ‘height’ dimension.
+ * * 4: An INT32 value, specifying the filter width.
+ * * 5: An INT32 value, specifying the filter height.
+ * * 6: An INT32 value, and has to be one of the {@link FusedActivationFunc} values.
+ * Specifies the activation to invoke on the result of each addition.
+ *
+ * Outputs:
+ * * 0: The output 4-D tensor, of shape [batches, out_height, out_width, depth].
*/
L2_POOL_2D = 12,
@@ -429,41 +592,49 @@
* dimension), and each vector is normalized independently. Within a given vector,
* each component is divided by the weighted, squared sum of inputs within depth_radius.
*
- * In details:
+ * The output is calculated using this formula:
+ *
* sqr_sum[a, b, c, d] =
* sum(pow(input[a, b, c, d - depth_radius : d + depth_radius + 1], 2)
* output = input / pow((bias + alpha * sqr_sum), beta)
*
- * Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
+ * Supported tensor types:
+ * * {@link OperandType::TENSOR_FLOAT32}
+ *
* Supported tensor rank: 4, with "NHWC" data layout.
*
* Inputs:
- * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the input.
- * 1: An INT32 value, specifying the radius of the normalization window.
- * 2: A FLOAT32 value, specifying the bias, must not be zero.
- * 3: A FLOAT32 value, specifying the scale factor, alpha.
- * 4: A FLOAT32 value, specifying the exponent, beta.
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the input.
+ * * 1: An INT32 value, specifying the radius of the normalization window.
+ * * 2: A FLOAT32 value, specifying the bias, must not be zero.
+ * * 3: A FLOAT32 value, specifying the scale factor, alpha.
+ * * 4: A FLOAT32 value, specifying the exponent, beta.
*
- * Ouputs:
- * 0: The output tensor of same shape as input0.
+ * Outputs:
+ * * 0: The output tensor of same shape as input0.
*/
LOCAL_RESPONSE_NORMALIZATION = 13,
/**
* Computes sigmoid activation on the input tensor element-wise.
*
- * In details:
+ * The output is calculated using this formula:
+ *
* output = 1 / (1 + exp(-input))
*
- * Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
- * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * Supported tensor types:
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
* Supported tensor rank: up to 4.
*
* Inputs:
- * 0: A tensor, specifying the input.
+ * * 0: A tensor, specifying the input.
*
- * Ouputs:
- * 0: The output tensor of same shape as input0.
+ * Outputs:
+ * * 0: The output tensor of same shape as input0.
+ * For {@link OperandType::TENSOR_QUANT8_ASYMM} type,
+ * the scale must be 1.f / 256 and the zeroPoint must be 0.
*/
LOGISTIC = 14,
@@ -502,102 +673,165 @@
LSH_PROJECTION = 15,
/**
- * Long short-term memory unit (LSTM) recurrent network layer.
+ * Performs a single time step in a Long Short-Term Memory (LSTM) layer
*
- * The default non-peephole implementation is based on:
- * http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf
+ * The LSTM operation is described by the following equations.
+ *
+ * \f{eqnarray*}{
+ * i_t =& \sigma(W_{xi}x_t+W_{hi}h_{t-1}+W_{ci}C_{t-1}+b_i) & \\
+ * f_t =& \sigma(W_{xf}x_t+W_{hf}h_{t-1}+W_{cf}C_{t-1}+b_f) & \\
+ * C_t =& clip(f_t \odot C_{t-1} + i_t \odot g(W_{xc}x_t+W_{hc}h_{t-1}+b_c),\ t_{cell})& \\
+ * o_t =& \sigma(W_{xo}x_t+W_{ho}h_{t-1}+W_{co}C_t+b_o)& \\
+ * & clip(W_{proj}(o_t \odot g(C_t))+b_{proj},\ t_{proj}) & if\ there\ is\ a\ projection; \\
+ * h_t =& & \\
+ * & o_t \odot g(C_t) & otherwise. \\
+ * \f}
+ * Where:
+ * * \f$x_t\f$ is the input,
+ * * \f$i_t\f$ is the input gate,
+ * * \f$f_t\f$ is the forget gate,
+ * * \f$C_t\f$ is the cell state,
+ * * \f$o_t\f$ is the output,
+ * * \f$h_t\f$ is the output state,
+ * * \f$\sigma\f$ is the logistic sigmoid function,
+ * * \f$g\f$ is the cell input and cell output activation function, usually \f$tahn\f$,
+ * * \f$W_{xi}\f$ is the input-to-input weight matrix,
+ * * \f$W_{hi}\f$ is the recurrent to input weight matrix,
+ * * \f$W_{ci}\f$ is the cell-to-input weight matrix,
+ * * \f$b_i\f$ is the input gate bias,
+ * * \f$W_{xf}\f$ is the input-to-forget weight matrix,
+ * * \f$W_{hf}\f$ is the recurrent-to-forget weight matrix,
+ * * \f$W_{cf}\f$ is the cell-to-forget weight matrix,
+ * * \f$b_f\f$ is the forget gate bias,
+ * * \f$W_{xc}\f$ is the input-to-cell weight matrix,
+ * * \f$W_{hc}\f$ is the recurrent-to-cell weight matrix,
+ * * \f$b_c\f$ is the cell bias,
+ * * \f$W_{xo}\f$ is the input-to-output weight matrix,
+ * * \f$W_{ho}\f$ is the recurrent-to-output weight matrix,
+ * * \f$W_{co}\f$ is the cell-to-output weight matrix,
+ * * \f$b_o\f$ is the output gate bias,
+ * * \f$W_{proj}\f$ is the projection weight matrix,
+ * * \f$b_{proj}\f$ is the projection bias,
+ * * \f$t_{cell}\f$ is the threshold for clipping the cell state, and
+ * * \f$t_{proj}\f$ is the threshold for clipping the projected output.
+ * * \f$\odot\f$ is the <a href="https://en.wikipedia.org/wiki/Hadamard_product_(matrices)">
+ * Hadamard product</a> that takes two matrices and produces another
+ * matrix, each element of which is the product of the corresponding
+ * elements of the input matrices.
+ *
+ * The operation has the following independently optional inputs:
+ * * The input-to-input weights (\f$W_{xi}\f$), recurrent-to-input weights (\f$W_{hi}\f$),
+ * cell-to-input (\f$W_{ci}\f$) weights, and input gate bias (\f$b_i\f$) either all have values,
+ * or none of them have values (i.e., all set to null). If they have no
+ * values, coupling of input and forget gates (CIFG) is used, in which case
+ * the input gate (\f$i_t\f$) is calculated using the following equation instead.
+ * \f{eqnarray*}{
+ * i_t = 1 - f_t
+ * \f}
+ * * The cell-to-input weights (\f$W_{ci}\f$), cell-to-forget weights (\f$W_{cf}\f$), and cell-to-output
+ * weights (\f$W_{co}\f$) either all have values or none of them have values.
+ * If they have values, the peephole optimization is used.
+ * * The projection weights (\f$W_{proj}\f$) is required only for the recurrent projection
+ * layer, and should otherwise have no value.
+ * * The projection bias (\f$b_{proj}\f$) may (but not required to) have a value if the
+ * recurrent projection layer exists, and should otherwise have no value.
+ *
+ * References:
+ *
+ * The default non-peephole non-CIFG implementation is based on:
+ * http://www.bioinf.jku.at/publications/older/2604.pdf
* S. Hochreiter and J. Schmidhuber. "Long Short-Term Memory". Neural
* Computation, 9(8):1735-1780, 1997.
*
- * The peephole implementation is based on:
+ * The peephole implementation and projection layer is based on:
* https://research.google.com/pubs/archive/43905.pdf
* Hasim Sak, Andrew Senior, and Francoise Beaufays. "Long short-term memory
* recurrent neural network architectures for large scale acoustic modeling."
* INTERSPEECH, 2014.
+ * (However, the concept of peephole optimization was introduced in work
+ * prior to this paper.)
*
* The coupling of input and forget gate (CIFG) is based on:
* http://arxiv.org/pdf/1503.04069.pdf
* Greff et al. "LSTM: A Search Space Odyssey"
*
- * The class has the following independently optional inputs:
- * * If input gate (if CIFG): “input_to_forget_weights”,
- * “recurrent_to_input_weights”, “cell_to_input_weights”, “input_gate_bias”.
- * * If no peephole connections: “cell_to_input_weights”,
- * “cell_to_forget_weights”, “cell_to_output_weights”.
- * * If no projection layer: “projection_weights” and “projection_bias”.
- * * If no projection bias: “projection_bias”.
- *
- * Supported tensor types:
+ * Supported tensor types (type T):
* * {@link OperandType::TENSOR_FLOAT32}
*
* Inputs:
- * * 0: Input.
+ * * 0: The input (\f$x_t\f$).
* A 2-D tensor of type T, of shape [batch_size, input_size], where
* “batch_size” corresponds to the batching dimension, and “input_size”
* is the size of the input.
- * * 1: input_to_input_weights.
+ * * 1: The input-to-input weights (\f$W_{xi}\f$). Optional.
* A 2-D tensor of type T, of shape [num_units, input_size], where
* “num_units” corresponds to the number of cell units.
- * * 2: input_to_forget_weights.
+ * * 2: The input-to-forget weights (\f$W_{xf}\f$).
* A 2-D tensor of type T, of shape [num_units, input_size].
- * * 3: input_to_cell_weights.
+ * * 3: The input-to-cell weights (\f$W_{xc}\f$).
* A 2-D tensor of type T, of shape [num_units, input_size].
- * * 4: input_to_output_weights.
+ * * 4: The input-to-output weights (\f$W_{xo}\f$).
* A 2-D tensor of type T, of shape [num_units, input_size].
- * * 5: recurrent_to_input_weights.
+ * * 5: The recurrent-to-input weights (\f$W_{hi}\f$). Optional.
* A 2-D tensor of type T, of shape [num_units, output_size], where
* “output_size” corresponds to either the number of cell units (i.e.,
* “num_units”), or the second dimension of the “projection_weights”, if
* defined.
- * * 6: recurrent_to_forget_weights.
+ * * 6: The recurrent-to-forget weights (\f$W_{hf}\f$).
* A 2-D tensor of type T, of shape [num_units, output_size].
- * * 7: recurrent_to_cell_weights.
+ * * 7: The recurrent-to-cell weights (\f$W_{hc}\f$).
* A 2-D tensor of type T, of shape [num_units, output_size].
- * * 8: recurrent_to_output_weights.
+ * * 8: The recurrent-to-output weights (\f$W_{ho}\f$).
* A 2-D tensor of type T, of shape [num_units, output_size].
- * * 9: cell_to_input_weights.
+ * * 9: The cell-to-input weights (\f$W_{ci}\f$). Optional.
* A 1-D tensor of type T, of shape [num_units].
- * * 10:cell_to_forget_weights.
+ * * 10:The cell-to-forget weights (\f$W_{cf}\f$). Optional.
* A 1-D tensor of type T, of shape [num_units].
- * * 11:cell_to_output_weights.
+ * * 11:The cell-to-output weights (\f$W_{co}\f$). Optional.
* A 1-D tensor of type T, of shape [num_units].
- * * 12:input_gate_bias.
+ * * 12:The input gate bias (\f$b_i\f$). Optional.
* A 1-D tensor of type T, of shape [num_units].
- * * 13:forget_gate_bias.
+ * * 13:The forget gate bias (\f$b_f\f$).
* A 1-D tensor of type T, of shape [num_units].
- * * 14:cell_bias.
+ * * 14:The cell bias (\f$b_c\f$).
* A 1-D tensor of type T, of shape [num_units].
- * * 15:output_gate_bias.
+ * * 15:The output gate bias (\f$b_o\f$).
* A 1-D tensor of type T, of shape [num_units].
- * * 16:projection_weights.
+ * * 16:The projection weights (\f$W_{proj}\f$). Optional.
* A 2-D tensor of type T, of shape [output_size, num_units].
- * * 17:projection_bias.
+ * * 17:The projection bias (\f$b_{proj}\f$). Optional.
* A 1-D tensor of type T, of shape [output_size].
- *
- * Parameters:
- * * 18:fused_activation_function.
- * An (optional) ActivationFunctionType indicating the activation
- * function.
- * If “NONE” is specified then it results in a linear activation.
- * * 19:cell_clip.
- * A clipping threshold for the cell state, such that values are bound
+ * * 18:The output state (in) (\f$h_{t-1}\f$).
+ * A 2-D tensor of type T, of shape [batch_size, output_size].
+ * * 19:The cell state (in) (\f$C_{t-1}\f$).
+ * A 2-D tensor of type T, of shape [batch_size, num_units].
+ * * 20:The activation function (\f$g\f$).
+ * A value indicating the activation function:
+ * <ul>
+ * <li>0: None;
+ * <li>1: Relu;
+ * <li>3: Relu6;
+ * <li>4: Tanh;
+ * <li>6: Sigmoid.
+ * </ul>
+ * * 21:The clipping threshold (\f$t_{cell}\f$) for the cell state, such that values are bound
* within [-cell_clip, cell_clip]. If set to 0.0 then clipping is
* disabled.
- * * 20:proj_clip.
- * A clipping threshold for the output from the projection layer, such
+ * * 22:The clipping threshold (\f$t_{proj}\f$) for the output from the projection layer, such
* that values are bound within [-proj_clip, proj_clip]. If set to 0.0
* then clipping is disabled.
*
* Outputs:
- * * 0: scratch_buffer.
- * A 3-D tensor of type T, of shape [batch_size, num_cell, 4].
- * * 1: output_state.
+ * * 0: The scratch buffer.
+ * A 2-D tensor of type T, of shape [batch_size, num_units * 4] with
+ * CIFG, or [batch_size, num_units * 3] without CIFG.
+ * * 1: The output state (out) (\f$h_t\f$).
* A 2-D tensor of type T, of shape [batch_size, output_size].
- * * 2: cell_state.
+ * * 2: The cell state (out) (\f$C_t\f$).
* A 2-D tensor of type T, of shape [batch_size, num_units].
- * * 3: output.
+ * * 3: The output (\f$o_t\f$).
* A 2-D tensor of type T, of shape [batch_size, output_size]. This is
- * effectively the same as the current “output_state” value.
+ * effectively the same as the current “output state (out)” value.
*/
LSTM = 16,
@@ -606,36 +840,56 @@
*
* The output dimensions are functions of the filter dimensions, stride, and padding.
*
- * The values in output Tensor is computed as:
+ * The values in the output tensor are computed as:
+ *
* output[batch, row, col, channel] =
* max_{i, j} (input[batch, row + i, col + j, channel])
*
- * Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
- * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * Supported tensor types:
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
* Supported tensor rank: 4, with "NHWC" data layout.
*
- * Inputs:
- * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the input.
- * 1: An INT32 value, specifying the padding on the left, in the ‘width’ dimension.
- * 2: An INT32 value, specifying the padding on the right,in the ‘width’ dimension.
- * 3: An INT32 value, specifying the padding on the top, in the ‘height’ dimension.
- * 4: An INT32 value, specifying the padding on the bottom, in the ‘height’ dimension.
- * 5: An INT32 value, specifying the output stride in the ‘width’ dimension.
- * 6: An INT32 value, specifying the output stride in the ‘height’ dimension.
- * 7: An INT32 value, specifying the filter width.
- * 8: An INT32 value, specifying the filter height.
- * 9: An INT32 value, and has to be one of the {@link FusedActivationFunc} values.
- * Specifies the activation to invoke on the result of each addition.
+ * Both explicit padding and implicit padding are supported.
*
- * Ouputs:
- * 0: The output 4-D tensor, of shape [batches, out_height, out_width, depth].
+ * Inputs (explicit padding):
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the input.
+ * * 1: An INT32 value, specifying the padding on the left, in the ‘width’ dimension.
+ * * 2: An INT32 value, specifying the padding on the right,in the ‘width’ dimension.
+ * * 3: An INT32 value, specifying the padding on the top, in the ‘height’ dimension.
+ * * 4: An INT32 value, specifying the padding on the bottom, in the ‘height’ dimension.
+ * * 5: An INT32 value, specifying the stride when walking through input
+ * in the ‘width’ dimension.
+ * * 6: An INT32 value, specifying the stride when walking through input
+ * in the ‘height’ dimension.
+ * * 7: An INT32 value, specifying the filter width.
+ * * 8: An INT32 value, specifying the filter height.
+ * * 9: An INT32 value, and has to be one of the {@link FusedActivationFunc} values.
+ * Specifies the activation to invoke on the result of each addition.
+ *
+ * Inputs (implicit padding):
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the input.
+ * * 1: An INT32 value, specifying the implicit padding scheme, has to be one of the
+ * following values: {0 (NONE), 1 (SAME), 2 (VALID)}.
+ * * 2: An INT32 value, specifying the stride when walking through input
+ * in the ‘width’ dimension.
+ * * 3: An INT32 value, specifying the stride when walking through input
+ * in the ‘height’ dimension.
+ * * 4: An INT32 value, specifying the filter width.
+ * * 5: An INT32 value, specifying the filter height.
+ * * 6: An INT32 value, and has to be one of the {@link FusedActivationFunc} values.
+ * Specifies the activation to invoke on the result of each addition.
+ *
+ * Outputs:
+ * * 0: The output 4-D tensor, of shape [batches, out_height, out_width, depth].
*/
MAX_POOL_2D = 17,
/**
- * Multiplies two tensors, elment-wise.
+ * Multiplies two tensors, element-wise.
*
- * Takes two input tensors of identical type and compatible dimensions. The output
+ * Takes two input tensors of identical type and compatible dimensions. The output
* is the product of both input tensors, optionally modified by an activation function.
*
* Two dimensions are compatible when:
@@ -645,72 +899,85 @@
* The size of the resulting output is the maximum size along each dimension of the
* input operands. It starts with the trailing dimensions, and works its way forward.
*
- * Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
- * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * Supported tensor types:
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
* Supported tensor rank: up to 4
*
* Inputs:
- * 0: A tensor.
- * 1: A tensor of the same type, and compatible dimensions as input0.
- * 2: An INT32 value, and has to be one of the {@link FusedActivationFunc} values.
- * Specifies the activation to invoke on the result of each addition.
+ * * 0: A tensor.
+ * * 1: A tensor of the same type, and compatible dimensions as input0.
+ * * 2: An INT32 value, and has to be one of the {@link FusedActivationFunc} values.
+ * Specifies the activation to invoke on the result of each addition.
*
- * Ouputs:
- * 0: The product, a tensor of the same type as input0.
+ * Outputs:
+ * * 0: The product, a tensor of the same type as input0.
+ * For output tensor of {@link OperandType::TENSOR_QUANT8_ASYMM} type, the following
+ * condition must be satisfied: output_scale > input1_scale * input2_scale.
*/
MUL = 18,
/**
* Computes rectified linear activation on the input tensor element-wise.
*
- * In details:
+ * The output is calculated using this formula:
+ *
* output = max(0, input)
*
- * Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
- * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * Supported tensor types:
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
* Supported tensor rank: up to 4.
*
* Inputs:
- * 0: A tensor, specifying the input.
+ * * 0: A tensor, specifying the input.
*
- * Ouputs:
- * 0: The output tensor of same shape as input0.
+ * Outputs:
+ * * 0: The output tensor of same shape as input0.
*/
RELU = 19,
/**
* Computes rectified linear 1 activation on the input tensor element-wise.
*
- * In details:
+ * The output is calculated using this formula:
+ *
* output = min(1.f, max(-1.f, input))
*
- * Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
- * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * Supported tensor types:
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
* Supported tensor rank: up to 4.
*
* Inputs:
- * 0: A tensor, specifying the input.
+ * * 0: A tensor, specifying the input.
*
- * Ouputs:
- * 0: The output tensor of same shape as input0.
+ * Outputs:
+ * * 0: The output tensor of same shape as input0.
*/
RELU1 = 20,
/**
* Computes rectified linear 6 activation on the input tensor element-wise.
*
- * In details:
+ * The output is calculated using this formula:
+ *
* output = min(6, max(0, input))
*
- * Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
- * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * Supported tensor types:
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
* Supported tensor rank: up to 4.
*
* Inputs:
- * 0: A tensor, specifying the input.
+ * * 0: A tensor, specifying the input.
*
- * Ouputs:
- * 0: The output tensor of same shape as input0.
+ * Outputs:
+ * * 0: The output tensor of same shape as input0.
*/
RELU6 = 21,
@@ -720,36 +987,41 @@
* Given tensor, this operation returns a tensor that has the same values as tensor,
* but with a newly specified shape.
*
- * Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
- * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * Supported tensor types:
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
* Supported tensor rank: up to 4.
*
* Inputs:
- * 0: A tensor, specifying the tensor to be reshaped.
- * 1: A 1-D tensor of type {@link OperandType::TENSOR_INT32}, defining the shape
- * of the output tensor. The number of elements implied by shape must be the same
- * as the number of elements in the input tensor.
+ * * 0: A tensor, specifying the tensor to be reshaped.
+ * * 1: A 1-D tensor of type {@link OperandType::TENSOR_INT32}, defining the shape
+ * of the output tensor. The number of elements implied by shape must be the same
+ * as the number of elements in the input tensor.
*
- * Ouputs:
- * 0: The output tensor, of shape specified by the input shape.
+ * Outputs:
+ * * 0: The output tensor, of shape specified by the input shape.
*/
RESHAPE = 22,
/**
* Resizes images to given size using the bilinear interpretation.
*
- * Resized images will be distorted if their original aspect ratio is not the same as input.
+ * Resized images must be distorted if their output aspect ratio is not the same as
+ * input aspect ratio.
*
- * Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
+ * Supported tensor types:
+ * * {@link OperandType::TENSOR_FLOAT32}
+ *
* Supported tensor rank: 4, with "NHWC" data layout.
*
* Inputs:
- * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the input.
- * 1: An INT32 value, specifying the output width of the output tensor.
- * 2: An INT32 value, specifying the output height of the output tensor.
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the input.
+ * * 1: An INT32 value, specifying the output height of the output tensor.
+ * * 2: An INT32 value, specifying the output width of the output tensor.
*
- * Ouputs:
- * 0: The output 4-D tensor, of shape [batches, new_height, new_width, depth].
+ * Outputs:
+ * * 0: The output 4-D tensor, of shape [batches, new_height, new_width, depth].
*/
RESIZE_BILINEAR = 23,
@@ -768,7 +1040,7 @@
* * “activation” is the function passed as the “fused_activation_function”
* argument (if not “NONE”).
*
- * Supported tensor types:
+ * Supported tensor types (Type T):
* * {@link OperandType::TENSOR_FLOAT32}
*
* Inputs:
@@ -784,21 +1056,18 @@
* corresponding to the weights from each unit.
* * 3: bias.
* A 1-D tensor of type T, of shape [num_units].
- *
- * For FLOAT32 input tensor, bias must also be FLOAT32.
- * For UINT8 input tensor, bias must be INT32.
- *
- * Parameters
- * * 4: fused_activation_function.
- * An (optional) ActivationFunctionType indicating the activation
+ * * 4: hidden state (in).
+ * A 2-D tensor of type T, of shape [batch_size, num_units].
+ * * 5: fused_activation_function.
+ * An optional {@link FusedActivationFunc} value indicating the activation
* function. If “NONE” is specified then it results in a linear
* activation.
*
- * * 5: Hidden state.
+ * Outputs:
+ * * 0: hidden state (out).
* A 2-D tensor of type T, of shape [batch_size, num_units].
*
- * Outputs:
- * * 0: output.
+ * * 1: output.
* A 2-D tensor of type T, of shape [batch_size, num_units]. This is
* effectively the same as the current state value.
*/
@@ -808,21 +1077,26 @@
* Computes the softmax activation on the input tensor element-wise, per batch, by
* normalizing the input vector so the maximum coefficient is zero.
*
- * In details:
+ * The output is calculated using this formula:
+ *
* output[batch, i] =
* exp((input[batch, i] - max(input[batch, :])) * beta) /
* sum_{k}{exp((input[batch, k] - max(input[batch, :])) * beta)}
*
- * Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
- * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * Supported tensor types:
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
* Supported tensor rank: 2 or 4.
*
* Inputs:
- * 0: A 2-D or 4-D tensor, specifying the tensor to be reshaped.
- * 1: A FLOAT32 value, specifying the scaling factor for the exponent, beta.
+ * * 0: A 2-D or 4-D tensor, specifying the tensor to be reshaped.
+ * * 1: A FLOAT32 value, specifying the positive scaling factor for the exponent, beta.
*
- * Ouputs:
- * 0: The output tensor of same shape as input0.
+ * Outputs:
+ * * 0: The output tensor of same shape as input0.
+ * For {@link OperandType::TENSOR_QUANT8_ASYMM} type,
+ * the scale must be 1.f / 256 and the zeroPoint must be 0.
*/
SOFTMAX = 25,
@@ -839,18 +1113,20 @@
* The depth of the output tensor is input_depth * block_size * block_size.
* The input tensor's height and width must be divisible by block_size.
*
- * Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
- * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * Supported tensor types:
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
* Supported tensor rank: 4, with "NHWC" data layout.
*
* Inputs:
- * 0: A 4-D tensor, of shape [batches, height, width, depth_in], specifying the input.
- * 1: An INT32 value, specifying the block_size. block_size must be >=1 and
- * block_size must be a divisor of both the input height and width.
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], specifying the input.
+ * * 1: An INT32 value, specifying the block_size. block_size must be >=1 and
+ * block_size must be a divisor of both the input height and width.
*
- * Ouputs:
- * 0: The output 4-D tensor, of shape [batch, height/block_size, width/block_size,
- * depth*block_size*block_size].
+ * Outputs:
+ * * 0: The output 4-D tensor, of shape [batch, height/block_size, width/block_size,
+ * depth*block_size*block_size].
*/
SPACE_TO_DEPTH = 26,
@@ -874,8 +1150,8 @@
*
* Specifically, for rank 1, this layer implements the operation:
*
- * memory = push(conv1d(inputs, weights_feature, feature_dim, "VALID"));
- * outputs = activation(memory * weights_time + bias);
+ * memory = push(conv1d(inputs, weights_feature, feature_dim, "PADDING_VALID"));
+ * outputs = activation(memory * weights_time + bias);
*
* Where:
* * “weights_feature” is a weights matrix that processes the inputs (by
@@ -892,7 +1168,7 @@
* Each rank adds a dimension to the weights matrices by means of stacking
* the filters.
*
- * Supported tensor types:
+ * Supported tensor types (type T):
* * {@link OperandType::TENSOR_FLOAT32}
*
* Inputs:
@@ -907,20 +1183,17 @@
* A 2-D tensor of type T, of shape [num_units, memory_size], where
* “memory_size” corresponds to the fixed-size of the memory.
* * 3: bias.
- * A optional 1-D tensor of type T, of shape [num_units].
- *
- * For FLOAT32 input tensor, bias must also be FLOAT32.
- * For UINT8 input tensor, bias must be INT32.
- *
- * Parameters:
- * * 4: rank.
+ * An optional 1-D tensor of type T, of shape [num_units].
+ * * 4: state (in).
+ * A 2-D tensor of type T, of shape [batch_size, (memory_size - 1) * num_units * rank].
+ * * 5: rank.
* The rank of the SVD approximation.
- * * 5: fused_activation_function.
- * An (optional) ActivationFunctionType indicating the activation function.
+ * * 6: fused_activation_function.
+ * An optional {@link FusedActivationFunc} value indicating the activation function.
* If “NONE” is specified then it results in a linear activation.
*
* Outputs:
- * * 0: state.
+ * * 0: state (out).
* A 2-D tensor of type T, of shape [batch_size, (memory_size - 1) * num_units * rank].
* * 1: output.
* A 2-D tensor of type T, of shape [batch_size, num_units].
@@ -930,17 +1203,20 @@
/**
* Computes hyperbolic tangent of input tensor element-wise.
*
- * In details:
+ * The output is calculated using this formula:
+ *
* output = tanh(input)
*
- * Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
+ * Supported tensor types:
+ * * {@link OperandType::TENSOR_FLOAT32}
+ *
* Supported tensor rank: up to 4.
*
* Inputs:
- * 0: A tensor, specifying the input.
+ * * 0: A tensor, specifying the input.
*
- * Ouputs:
- * 0: The output tensor of same shape as input0.
+ * Outputs:
+ * * 0: The output tensor of same shape as input0.
*/
TANH = 28,
@@ -967,8 +1243,8 @@
*/
enum OperandLifeTime : int32_t {
/**
- * The operand is internal to the model. It's created by an operation
- * and consumed by other operations.
+ * The operand is internal to the model. It's created by an operation and
+ * consumed by other operations.
*/
TEMPORARY_VARIABLE,
@@ -1081,7 +1357,11 @@
vec<uint32_t> dimensions;
/**
- * The number of operations that use this operand as input.
+ * The number of times this operand appears as an operation input.
+ *
+ * (For example, if this operand appears once in one operation's
+ * input list, and three times in another operation's input list,
+ * then numberOfConsumers = 4.)
*/
uint32_t numberOfConsumers;
@@ -1108,7 +1388,7 @@
/**
* Where to find the data for this operand.
* If the lifetime is TEMPORARY_VARIABLE, MODEL_INPUT, MODEL_OUTPUT, or NO_VALUE:
- * - All the fields will be 0.
+ * - All the fields must be 0.
* If the lifetime is CONSTANT_COPY:
* - location.poolIndex is 0.
* - location.offset is the offset in bytes into Model.operandValues.
@@ -1216,7 +1496,7 @@
* Updated dimension information.
*
* If dimensions.size() > 0, dimension information was provided along with the
- * argument. This can be the case for models that accept inputs of varying size.
+ * argument. This can be the case for models that accept inputs of varying size.
* This can't change the rank, just the value of the dimensions that were
* unspecified in the model.
*/
diff --git a/neuralnetworks/1.0/vts/functional/Callbacks.h b/neuralnetworks/1.0/vts/functional/Callbacks.h
index 2ac6130..570a4fb 100644
--- a/neuralnetworks/1.0/vts/functional/Callbacks.h
+++ b/neuralnetworks/1.0/vts/functional/Callbacks.h
@@ -30,10 +30,6 @@
* "notify". This "notify" call awakens any client threads waiting on the
* callback object.
*
- * callback object. When the asynchronous task has finished its workload or has
- * failed to launch, it must immediately call "notify", awakening any client
- * threads waiting on the callback object.
- *
* The CallbackBase class implements some of the base synchronization common to
* both PrepareModelCallback and ExecutionCallback. For consistency, any HIDL
* callback class must inherit from CallbackBase as well as the HIDL callback
diff --git a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp
index 4f9d528..ed1fb94 100644
--- a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp
+++ b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp
@@ -242,8 +242,8 @@
// launch prepare model
sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
ASSERT_NE(nullptr, preparedModelCallback.get());
- Return<ErrorStatus> prepareLaunchStatus =
- device->prepareModel_1_1(model, preparedModelCallback);
+ Return<ErrorStatus> prepareLaunchStatus = device->prepareModel_1_1(
+ model, ExecutionPreference::FAST_SINGLE_ANSWER, preparedModelCallback);
ASSERT_TRUE(prepareLaunchStatus.isOk());
ASSERT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus));
diff --git a/neuralnetworks/1.1/IDevice.hal b/neuralnetworks/1.1/IDevice.hal
index ca22555..1335bde 100644
--- a/neuralnetworks/1.1/IDevice.hal
+++ b/neuralnetworks/1.1/IDevice.hal
@@ -41,7 +41,7 @@
/**
* Gets the supported operations in a model.
*
- * getSupportedSubgraph indicates which operations of a model are fully
+ * getSupportedOperations indicates which operations of a model are fully
* supported by the vendor driver. If an operation may not be supported for
* any reason, getSupportedOperations must return false for that operation.
*
@@ -102,6 +102,8 @@
* Multiple threads can call prepareModel on the same model concurrently.
*
* @param model The model to be prepared for execution.
+ * @param preference Indicates the intended execution behavior of a prepared
+ * model.
* @param callback A callback object used to return the error status of
* preparing the model for execution and the prepared model
* if successful, nullptr otherwise. The callback object's
@@ -115,6 +117,7 @@
* - INVALID_ARGUMENT if one of the input arguments is
* invalid
*/
- prepareModel_1_1(Model model, IPreparedModelCallback callback)
+ prepareModel_1_1(Model model, ExecutionPreference preference,
+ IPreparedModelCallback callback)
generates (ErrorStatus status);
};
diff --git a/neuralnetworks/1.1/types.hal b/neuralnetworks/1.1/types.hal
index 1d470d6..8290fbb 100644
--- a/neuralnetworks/1.1/types.hal
+++ b/neuralnetworks/1.1/types.hal
@@ -27,25 +27,24 @@
*/
enum OperationType : @1.0::OperationType {
/**
- * BatchToSpace for N-D tensors.
+ * BatchToSpace for N-dimensional tensors.
*
- * This operation reshapes the "batch" dimension 0 into M + 1 dimensions of shape
+ * This operation reshapes the batch dimension (dimension 0) into M + 1 dimensions of shape
* block_shape + [batch], interleaves these blocks back into the grid defined by the
* spatial dimensions [1, ..., M], to obtain a result with the same rank as the input.
- * The spatial dimensions of this intermediate result are then optionally cropped
- * according to the amount to crop to produce the output.
+ *
* This is the reverse of SpaceToBatch.
*
- * Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
- * {@link OperandType::TENSOR_QUANT8_ASYMM}
- * Supported tensor rank: up to 4
+ * Supported tensor types:
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: 4
*
* Inputs:
- * 0: An n-D tensor, specifying the input.
+ * 0: An n-D tensor, specifying the tensor to be reshaped
* 1: A 1-D Tensor of type TENSOR_INT32, the block sizes for each spatial dimension of the
* input tensor. All values must be >= 1.
- * 2: A 1-D Tensor of type TENSOR_INT32, the amount to crop for each spatial diemension of the
- * input tensor. All values must be >= 0.
*
* Outputs:
* 0: A tensor of the same type as input0.
@@ -53,9 +52,9 @@
BATCH_TO_SPACE_ND = 29,
/**
- * Divides the second tensor from the first tensor, element-wise.
+ * Element-wise division of two tensors.
*
- * Takes two input tensors of identical OperandType and compatible dimensions. The output
+ * Takes two input tensors of identical type and compatible dimensions. The output
* is the result of dividing the first input tensor by the second, optionally
* modified by an activation function.
*
@@ -71,7 +70,9 @@
* input2.dimension = {5, 4, 3, 1}
* output.dimension = {5, 4, 3, 2}
*
- * Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
+ * Supported tensor types:
+ * * {@link OperandType::TENSOR_FLOAT32}
+ *
* Supported tensor rank: up to 4
*
* Inputs:
@@ -88,15 +89,17 @@
/**
* Computes the mean of elements across dimensions of a tensor.
*
- * Reduces input tensor along the dimensions given in axis. Unless keep_dims is true,
- * the rank of the tensor is reduced by 1 for each entry in axis. If keep_dims is
- * true, the reduced dimensions are retained with length 1.
+ * Reduces the input tensor along the given dimensions to reduce. Unless keep_dims
+ * is true, the rank of the tensor is reduced by 1 for each entry in axis.
+ * If keep_dims is true, the reduced dimensions are retained with length 1.
*
- * If axis has no entries, all dimensions are reduced, and a tensor with a single
- * element is returned.
+ * If dimensions to reduce have no entries, all dimensions are reduced, and a tensor with
+ * a single element is returned.
*
- * Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
- * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * Supported tensor types:
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
* Supported tensor rank: up to 4
*
* Inputs:
@@ -115,14 +118,18 @@
*
* This operation pads a tensor according to the specified paddings.
*
- * Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
- * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * Supported tensor types:
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
* Supported tensor rank: up to 4
*
* Inputs:
- * 0: An n-D tensor, specifying the input.
- * 1: A 2-D Tensor of type TENSOR_INT32. The paddings, before and after for each spatial dimension
- * of the input tensor.
+ * 0: An n-D tensor, specifying the tensor to be padded.
+ * 1: A 2-D Tensor of type TENSOR_INT32, the paddings for each spatial dimension of the
+ * input tensor. The shape of the tensor must be {rank(input0), 2}.
+ * padding[i, 0] specifies the number of element to be padded in the front of dimension i.
+ * padding[i, 1] specifies the number of element to be padded after the end of dimension i.
*
* Outputs:
* 0: A tensor of the same type as input0.
@@ -130,7 +137,7 @@
PAD = 32,
/**
- * SpaceToBatch for N-D tensors.
+ * SpaceToBatch for N-Dimensional tensors.
*
* This operation divides "spatial" dimensions [1, ..., M] of the input into a grid of blocks
* of shape block_shape, and interleaves these blocks with the "batch" dimension (0) such that
@@ -139,16 +146,20 @@
* batch position. Prior to division into blocks, the spatial dimensions of the input are
* optionally zero padded according to paddings.
*
- * Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
- * {@link OperandType::TENSOR_QUANT8_ASYMM}
- * Supported tensor rank: up to 4
+ * Supported tensor types:
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: 4
*
* Inputs:
* 0: An n-D tensor, specifying the input.
* 1: A 1-D Tensor of type TENSOR_INT32, the block sizes for each spatial dimension of the
* input tensor. All values must be >= 1.
* 2: A 2-D Tensor of type TENSOR_INT32, the paddings for each spatial diemension of the
- * input tensor. All values must be >= 0.
+ * input tensor. All values must be >= 0. The shape of the tensor must be {rank(input0), 2}.
+ * padding[i, 0] specifies the number of element to be padded in the front of dimension i.
+ * padding[i, 1] specifies the number of element to be padded after the end of dimension i.
*
* Outputs:
* 0: A tensor of the same type as input0.
@@ -160,17 +171,20 @@
*
* Given a tensor input, this operation returns a tensor of the same type with all
* dimensions of size 1 removed. If you don't want to remove all size 1 dimensions,
- * you can remove specific size 1 dimensions by specifying axis.
+ * you can remove specific size 1 dimensions by specifying the axes (input1).
*
- * Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
- * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * Supported tensor types:
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
* Supported tensor rank: up to 4
*
* Inputs:
- * 0: An n-D tensor, specifying the input.
- * 1: An 1-D Tensor of type TENSOR_INT32. The dimensions to squeeze. If None (the default),
- * squeezes all dimensions. If specified, only squeezes the dimensions listed. The dimension
- * index starts at 0. It is an error to squeeze a dimension that is not 1.
+ * 0: An n-D tensor, the tensor to be squeezed.
+ * 1: An optional 1-D tensor of type TENSOR_INT32. The dimensions to squeeze. If specified
+ * only squeezes the dimensions listed. Otherwise, squeezes all dimensions.
+ * The dimension index starts at 0. An error must be reported if squeezing a dimension that
+ * is not 1.
*
* Outputs:
* 0: A tensor of the same type as input0. Contains the same data as input, but has one or more
@@ -181,23 +195,25 @@
/**
* Extracts a strided slice of a tensor.
*
- * This op extracts a slice of size (end-begin)/stride from the given input tensor.
- * Starting at the location specified by begin the slice continues by adding
+ * Roughly speaking, this op extracts a slice of size (end - begin) / stride from the given
+ * input tensor. Starting at the location specified by begin the slice continues by adding
* stride to the index until all dimensions are not less than end. Note that a stride can
* be negative, which causes a reverse slice.
*
- * Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
- * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * Supported tensor types:
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
* Supported tensor rank: up to 4
*
* Inputs:
- * 0: An n-D tensor, specifying the input.
+ * 0: An n-D tensor, specifying the tensor to be sliced.
* 1: A 1-D Tensor of type TENSOR_INT32, the starts of the dimensions of the input
- * tensor to be sliced.
+ * tensor to be sliced. The length must be of rank(input0).
* 2: A 1-D Tensor of type TENSOR_INT32, the ends of the dimensions of the input
- * tensor to be sliced.
+ * tensor to be sliced. The length must be of rank(input0).
* 3: A 1-D Tensor of type TENSOR_INT32, the strides of the dimensions of the input
- * tensor to be sliced.
+ * tensor to be sliced. The length must be of rank(input0).
*
* Outputs:
* 0: A tensor of the same type as input0.
@@ -205,7 +221,7 @@
STRIDED_SLICE = 35,
/**
- * Subtracts the second tensor from the first tensor, element-wise.
+ * Element-wise subtraction of two tensors.
*
* Takes two input tensors of identical type and compatible dimensions. The output
* is the result of subtracting the second input tensor from the first one, optionally
@@ -223,7 +239,9 @@
* input2.dimension = {5, 4, 3, 1}
* output.dimension = {5, 4, 3, 2}
*
- * Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
+ * Supported tensor types:
+ * * {@link OperandType::TENSOR_FLOAT32}
+ *
* Supported tensor rank: up to 4
*
* Inputs:
@@ -240,18 +258,20 @@
/**
* Transposes the input tensor, permuting the dimensions according to the perm tensor.
*
- * The returned tensor's dimension i must correspond to the input dimension perm[i].
+ * The returned tensor's dimension i corresponds to the input dimension perm[i].
* If perm is not given, it is set to (n-1...0), where n is the rank of the input tensor.
* Hence by default, this operation performs a regular matrix transpose on 2-D input Tensors.
*
- * Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
- * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * Supported tensor types:
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
* Supported tensor rank: up to 4
*
* Inputs:
- * 0: An n-D tensor, specifying the input.
- * 1: A 1-D Tensor of type TENSOR_INT32, the permutation of the dimensions of the input
- * tensor.
+ * 0: An n-D tensor, specifying the tensor to be transposed.
+ * 1: An optional 1-D Tensor of type TENSOR_INT32, the permutation of the dimensions of the
+ * input tensor.
*
* Outputs:
* 0: A tensor of the same type as input0.
@@ -362,3 +382,24 @@
*/
bool relaxComputationFloat32toFloat16;
};
+
+/**
+ * Execution preferences.
+ */
+enum ExecutionPreference : int32_t {
+ /**
+ * Prefer executing in a way that minimizes battery drain.
+ * This is desirable for compilations that will be executed often.
+ */
+ LOW_POWER = 0,
+ /**
+ * Prefer returning a single answer as fast as possible, even if this causes
+ * more power consumption.
+ */
+ FAST_SINGLE_ANSWER = 1,
+ /**
+ * Prefer maximizing the throughput of successive frames, for example when
+ * processing successive frames coming from the camera.
+ */
+ SUSTAINED_SPEED = 2,
+};
diff --git a/neuralnetworks/1.1/vts/functional/ValidateModel.cpp b/neuralnetworks/1.1/vts/functional/ValidateModel.cpp
index 7a20e26..3aa55f8 100644
--- a/neuralnetworks/1.1/vts/functional/ValidateModel.cpp
+++ b/neuralnetworks/1.1/vts/functional/ValidateModel.cpp
@@ -50,13 +50,13 @@
}
static void validatePrepareModel(const sp<IDevice>& device, const std::string& message,
- const V1_1::Model& model) {
+ const V1_1::Model& model, ExecutionPreference preference) {
SCOPED_TRACE(message + " [prepareModel_1_1]");
sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
ASSERT_NE(nullptr, preparedModelCallback.get());
Return<ErrorStatus> prepareLaunchStatus =
- device->prepareModel_1_1(model, preparedModelCallback);
+ device->prepareModel_1_1(model, preference, preparedModelCallback);
ASSERT_TRUE(prepareLaunchStatus.isOk());
ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(prepareLaunchStatus));
@@ -67,15 +67,24 @@
ASSERT_EQ(nullptr, preparedModel.get());
}
+static bool validExecutionPreference(ExecutionPreference preference) {
+ return preference == ExecutionPreference::LOW_POWER ||
+ preference == ExecutionPreference::FAST_SINGLE_ANSWER ||
+ preference == ExecutionPreference::SUSTAINED_SPEED;
+}
+
// Primary validation function. This function will take a valid model, apply a
// mutation to it to invalidate the model, then pass it to interface calls that
// use the model. Note that the model here is passed by value, and any mutation
// to the model does not leave this function.
static void validate(const sp<IDevice>& device, const std::string& message, V1_1::Model model,
- const std::function<void(Model*)>& mutation) {
+ const std::function<void(Model*)>& mutation,
+ ExecutionPreference preference = ExecutionPreference::FAST_SINGLE_ANSWER) {
mutation(&model);
- validateGetSupportedOperations(device, message, model);
- validatePrepareModel(device, message, model);
+ if (validExecutionPreference(preference)) {
+ validateGetSupportedOperations(device, message, model);
+ }
+ validatePrepareModel(device, message, model, preference);
}
// Delete element from hidl_vec. hidl_vec doesn't support a "remove" operation,
@@ -486,6 +495,22 @@
}
}
+///////////////////////// VALIDATE EXECUTION PREFERENCE /////////////////////////
+
+static const int32_t invalidExecutionPreferences[] = {
+ static_cast<int32_t>(ExecutionPreference::LOW_POWER) - 1, // lower bound
+ static_cast<int32_t>(ExecutionPreference::SUSTAINED_SPEED) + 1, // upper bound
+};
+
+static void mutateExecutionPreferenceTest(const sp<IDevice>& device, const V1_1::Model& model) {
+ for (int32_t preference : invalidExecutionPreferences) {
+ const std::string message =
+ "mutateExecutionPreferenceTest: preference " + std::to_string(preference);
+ validate(device, message, model, [](Model*) {},
+ static_cast<ExecutionPreference>(preference));
+ }
+}
+
////////////////////////// ENTRY POINT //////////////////////////////
void ValidationTest::validateModel(const V1_1::Model& model) {
@@ -503,6 +528,7 @@
removeOperationOutputTest(device, model);
addOperationInputTest(device, model);
addOperationOutputTest(device, model);
+ mutateExecutionPreferenceTest(device, model);
}
} // namespace functional
diff --git a/neuralnetworks/1.1/vts/functional/ValidateRequest.cpp b/neuralnetworks/1.1/vts/functional/ValidateRequest.cpp
index bd96614..b42f561 100644
--- a/neuralnetworks/1.1/vts/functional/ValidateRequest.cpp
+++ b/neuralnetworks/1.1/vts/functional/ValidateRequest.cpp
@@ -60,8 +60,8 @@
// launch prepare model
sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
ASSERT_NE(nullptr, preparedModelCallback.get());
- Return<ErrorStatus> prepareLaunchStatus =
- device->prepareModel_1_1(model, preparedModelCallback);
+ Return<ErrorStatus> prepareLaunchStatus = device->prepareModel_1_1(
+ model, ExecutionPreference::FAST_SINGLE_ANSWER, preparedModelCallback);
ASSERT_TRUE(prepareLaunchStatus.isOk());
ASSERT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus));
diff --git a/radio/1.2/vts/functional/radio_hidl_hal_api.cpp b/radio/1.2/vts/functional/radio_hidl_hal_api.cpp
index ee130f8..0febd38 100644
--- a/radio/1.2/vts/functional/radio_hidl_hal_api.cpp
+++ b/radio/1.2/vts/functional/radio_hidl_hal_api.cpp
@@ -30,10 +30,8 @@
.geranBands = {GeranBands::BAND_450, GeranBands::BAND_480},
.channels = {1,2}};
- V1_2::NetworkScanRequest request = {
- .type = ScanType::ONE_SHOT,
- .interval = 60,
- .specifiers = {specifier}};
+ ::android::hardware::radio::V1_2::NetworkScanRequest request = {
+ .type = ScanType::ONE_SHOT, .interval = 60, .specifiers = {specifier}};
Return<void> res = radio_v1_2->startNetworkScan_1_2(serial, request);
ASSERT_OK(res);
@@ -42,9 +40,9 @@
EXPECT_EQ(serial, radioRsp_v1_2->rspInfo.serial);
ALOGI("startNetworkScan, rspInfo.error = %s\n", toString(radioRsp_v1_2->rspInfo.error).c_str());
- if (cardStatus.cardState == CardState::ABSENT) {
+ if (cardStatus.base.cardState == CardState::ABSENT) {
ASSERT_TRUE(CheckAnyOfErrors(radioRsp_v1_2->rspInfo.error, {RadioError::SIM_ABSENT}));
- } else if (cardStatus.cardState == CardState::PRESENT) {
+ } else if (cardStatus.base.cardState == CardState::PRESENT) {
ASSERT_TRUE(CheckAnyOfErrors(radioRsp_v1_2->rspInfo.error, {RadioError::NONE}));
}
}
@@ -55,9 +53,8 @@
TEST_F(RadioHidlTest_v1_2, startNetworkScan_InvalidArgument) {
const int serial = GetRandomSerialNumber();
- V1_2::NetworkScanRequest request = {
- .type = ScanType::ONE_SHOT,
- .interval = 60};
+ ::android::hardware::radio::V1_2::NetworkScanRequest request = {.type = ScanType::ONE_SHOT,
+ .interval = 60};
Return<void> res = radio_v1_2->startNetworkScan_1_2(serial, request);
ASSERT_OK(res);
@@ -67,10 +64,10 @@
ALOGI("startNetworkScan_InvalidArgument, rspInfo.error = %s\n",
toString(radioRsp_v1_2->rspInfo.error).c_str());
- if (cardStatus.cardState == CardState::ABSENT) {
+ if (cardStatus.base.cardState == CardState::ABSENT) {
ASSERT_TRUE(CheckAnyOfErrors(radioRsp_v1_2->rspInfo.error,
{RadioError::SIM_ABSENT, RadioError::INVALID_ARGUMENTS}));
- } else if (cardStatus.cardState == CardState::PRESENT) {
+ } else if (cardStatus.base.cardState == CardState::PRESENT) {
ASSERT_TRUE(
CheckAnyOfErrors(radioRsp_v1_2->rspInfo.error, {RadioError::INVALID_ARGUMENTS}));
}
@@ -87,7 +84,7 @@
.geranBands = {GeranBands::BAND_450, GeranBands::BAND_480},
.channels = {1,2}};
- V1_2::NetworkScanRequest request = {
+ ::android::hardware::radio::V1_2::NetworkScanRequest request = {
.type = ScanType::ONE_SHOT,
.interval = 4,
.specifiers = {specifier},
@@ -103,10 +100,10 @@
ALOGI("startNetworkScan_InvalidInterval1, rspInfo.error = %s\n",
toString(radioRsp_v1_2->rspInfo.error).c_str());
- if (cardStatus.cardState == CardState::ABSENT) {
+ if (cardStatus.base.cardState == CardState::ABSENT) {
ASSERT_TRUE(CheckAnyOfErrors(radioRsp_v1_2->rspInfo.error,
{RadioError::SIM_ABSENT, RadioError::INVALID_ARGUMENTS}));
- } else if (cardStatus.cardState == CardState::PRESENT) {
+ } else if (cardStatus.base.cardState == CardState::PRESENT) {
ASSERT_TRUE(
CheckAnyOfErrors(radioRsp_v1_2->rspInfo.error, {RadioError::INVALID_ARGUMENTS}));
}
@@ -123,7 +120,7 @@
.geranBands = {GeranBands::BAND_450, GeranBands::BAND_480},
.channels = {1,2}};
- V1_2::NetworkScanRequest request = {
+ ::android::hardware::radio::V1_2::NetworkScanRequest request = {
.type = ScanType::ONE_SHOT,
.interval = 301,
.specifiers = {specifier},
@@ -139,10 +136,10 @@
ALOGI("startNetworkScan_InvalidInterval2, rspInfo.error = %s\n",
toString(radioRsp_v1_2->rspInfo.error).c_str());
- if (cardStatus.cardState == CardState::ABSENT) {
+ if (cardStatus.base.cardState == CardState::ABSENT) {
ASSERT_TRUE(CheckAnyOfErrors(radioRsp_v1_2->rspInfo.error,
{RadioError::SIM_ABSENT, RadioError::INVALID_ARGUMENTS}));
- } else if (cardStatus.cardState == CardState::PRESENT) {
+ } else if (cardStatus.base.cardState == CardState::PRESENT) {
ASSERT_TRUE(
CheckAnyOfErrors(radioRsp_v1_2->rspInfo.error, {RadioError::INVALID_ARGUMENTS}));
}
@@ -159,7 +156,7 @@
.geranBands = {GeranBands::BAND_450, GeranBands::BAND_480},
.channels = {1,2}};
- V1_2::NetworkScanRequest request = {
+ ::android::hardware::radio::V1_2::NetworkScanRequest request = {
.type = ScanType::ONE_SHOT,
.interval = 60,
.specifiers = {specifier},
@@ -175,10 +172,10 @@
ALOGI("startNetworkScan_InvalidMaxSearchTime1, rspInfo.error = %s\n",
toString(radioRsp_v1_2->rspInfo.error).c_str());
- if (cardStatus.cardState == CardState::ABSENT) {
+ if (cardStatus.base.cardState == CardState::ABSENT) {
ASSERT_TRUE(CheckAnyOfErrors(radioRsp_v1_2->rspInfo.error,
{RadioError::SIM_ABSENT, RadioError::INVALID_ARGUMENTS}));
- } else if (cardStatus.cardState == CardState::PRESENT) {
+ } else if (cardStatus.base.cardState == CardState::PRESENT) {
ASSERT_TRUE(
CheckAnyOfErrors(radioRsp_v1_2->rspInfo.error, {RadioError::INVALID_ARGUMENTS}));
}
@@ -195,7 +192,7 @@
.geranBands = {GeranBands::BAND_450, GeranBands::BAND_480},
.channels = {1,2}};
- V1_2::NetworkScanRequest request = {
+ ::android::hardware::radio::V1_2::NetworkScanRequest request = {
.type = ScanType::ONE_SHOT,
.interval = 60,
.specifiers = {specifier},
@@ -211,10 +208,10 @@
ALOGI("startNetworkScan_InvalidMaxSearchTime2, rspInfo.error = %s\n",
toString(radioRsp_v1_2->rspInfo.error).c_str());
- if (cardStatus.cardState == CardState::ABSENT) {
+ if (cardStatus.base.cardState == CardState::ABSENT) {
ASSERT_TRUE(CheckAnyOfErrors(radioRsp_v1_2->rspInfo.error,
{RadioError::SIM_ABSENT, RadioError::INVALID_ARGUMENTS}));
- } else if (cardStatus.cardState == CardState::PRESENT) {
+ } else if (cardStatus.base.cardState == CardState::PRESENT) {
ASSERT_TRUE(
CheckAnyOfErrors(radioRsp_v1_2->rspInfo.error, {RadioError::INVALID_ARGUMENTS}));
}
@@ -231,7 +228,7 @@
.geranBands = {GeranBands::BAND_450, GeranBands::BAND_480},
.channels = {1,2}};
- V1_2::NetworkScanRequest request = {
+ ::android::hardware::radio::V1_2::NetworkScanRequest request = {
.type = ScanType::ONE_SHOT,
.interval = 60,
.specifiers = {specifier},
@@ -247,10 +244,10 @@
ALOGI("startNetworkScan_InvalidPeriodicity1, rspInfo.error = %s\n",
toString(radioRsp_v1_2->rspInfo.error).c_str());
- if (cardStatus.cardState == CardState::ABSENT) {
+ if (cardStatus.base.cardState == CardState::ABSENT) {
ASSERT_TRUE(CheckAnyOfErrors(radioRsp_v1_2->rspInfo.error,
{RadioError::SIM_ABSENT, RadioError::INVALID_ARGUMENTS}));
- } else if (cardStatus.cardState == CardState::PRESENT) {
+ } else if (cardStatus.base.cardState == CardState::PRESENT) {
ASSERT_TRUE(
CheckAnyOfErrors(radioRsp_v1_2->rspInfo.error, {RadioError::INVALID_ARGUMENTS}));
}
@@ -267,7 +264,7 @@
.geranBands = {GeranBands::BAND_450, GeranBands::BAND_480},
.channels = {1,2}};
- V1_2::NetworkScanRequest request = {
+ ::android::hardware::radio::V1_2::NetworkScanRequest request = {
.type = ScanType::ONE_SHOT,
.interval = 60,
.specifiers = {specifier},
@@ -283,10 +280,10 @@
ALOGI("startNetworkScan_InvalidPeriodicity2, rspInfo.error = %s\n",
toString(radioRsp_v1_2->rspInfo.error).c_str());
- if (cardStatus.cardState == CardState::ABSENT) {
+ if (cardStatus.base.cardState == CardState::ABSENT) {
ASSERT_TRUE(CheckAnyOfErrors(radioRsp_v1_2->rspInfo.error,
{RadioError::SIM_ABSENT, RadioError::INVALID_ARGUMENTS}));
- } else if (cardStatus.cardState == CardState::PRESENT) {
+ } else if (cardStatus.base.cardState == CardState::PRESENT) {
ASSERT_TRUE(
CheckAnyOfErrors(radioRsp_v1_2->rspInfo.error, {RadioError::INVALID_ARGUMENTS}));
}
@@ -303,7 +300,7 @@
.geranBands = {GeranBands::BAND_450, GeranBands::BAND_480},
.channels = {1,2}};
- V1_2::NetworkScanRequest request = {
+ ::android::hardware::radio::V1_2::NetworkScanRequest request = {
.type = ScanType::ONE_SHOT,
.interval = 60,
.specifiers = {specifier},
@@ -319,10 +316,10 @@
ALOGI("startNetworkScan_InvalidArgument, rspInfo.error = %s\n",
toString(radioRsp_v1_2->rspInfo.error).c_str());
- if (cardStatus.cardState == CardState::ABSENT) {
+ if (cardStatus.base.cardState == CardState::ABSENT) {
ASSERT_TRUE(CheckAnyOfErrors(radioRsp_v1_2->rspInfo.error,
{RadioError::NONE, RadioError::SIM_ABSENT}));
- } else if (cardStatus.cardState == CardState::PRESENT) {
+ } else if (cardStatus.base.cardState == CardState::PRESENT) {
ASSERT_TRUE(CheckAnyOfErrors(radioRsp_v1_2->rspInfo.error, {RadioError::NONE}));
}
}
@@ -338,7 +335,7 @@
.geranBands = {GeranBands::BAND_450, GeranBands::BAND_480},
.channels = {1,2}};
- V1_2::NetworkScanRequest request = {
+ ::android::hardware::radio::V1_2::NetworkScanRequest request = {
.type = ScanType::ONE_SHOT,
.interval = 60,
.specifiers = {specifier},
@@ -355,10 +352,10 @@
ALOGI("startNetworkScan_InvalidArgument, rspInfo.error = %s\n",
toString(radioRsp_v1_2->rspInfo.error).c_str());
- if (cardStatus.cardState == CardState::ABSENT) {
+ if (cardStatus.base.cardState == CardState::ABSENT) {
ASSERT_TRUE(CheckAnyOfErrors(radioRsp_v1_2->rspInfo.error,
{RadioError::NONE, RadioError::SIM_ABSENT}));
- } else if (cardStatus.cardState == CardState::PRESENT) {
+ } else if (cardStatus.base.cardState == CardState::PRESENT) {
ASSERT_TRUE(CheckAnyOfErrors(radioRsp_v1_2->rspInfo.error, {RadioError::NONE}));
}
}
@@ -369,8 +366,8 @@
TEST_F(RadioHidlTest_v1_2, setIndicationFilter_1_2) {
const int serial = GetRandomSerialNumber();
- Return<void> res =
- radio_v1_2->setIndicationFilter_1_2(serial, static_cast<int>(IndicationFilter::ALL));
+ Return<void> res = radio_v1_2->setIndicationFilter_1_2(
+ serial, static_cast<int>(::android::hardware::radio::V1_2::IndicationFilter::ALL));
ASSERT_OK(res);
EXPECT_EQ(std::cv_status::no_timeout, wait());
EXPECT_EQ(RadioResponseType::SOLICITED, radioRsp_v1_2->rspInfo.type);
@@ -390,7 +387,7 @@
Return<void> res = radio_v1_2->setSignalStrengthReportingCriteria(
serial, 5000,
10, // hysteresisDb too large given threshold list deltas
- {-109, -103, -97, -89}, V1_2::AccessNetwork::GERAN);
+ {-109, -103, -97, -89}, ::android::hardware::radio::V1_2::AccessNetwork::GERAN);
ASSERT_OK(res);
EXPECT_EQ(std::cv_status::no_timeout, wait());
EXPECT_EQ(RadioResponseType::SOLICITED, radioRsp_v1_2->rspInfo.type);
@@ -407,8 +404,8 @@
TEST_F(RadioHidlTest_v1_2, setSignalStrengthReportingCriteria_EmptyParams) {
const int serial = GetRandomSerialNumber();
- Return<void> res = radio_v1_2->setSignalStrengthReportingCriteria(serial, 0, 0, {},
- V1_2::AccessNetwork::GERAN);
+ Return<void> res = radio_v1_2->setSignalStrengthReportingCriteria(
+ serial, 0, 0, {}, ::android::hardware::radio::V1_2::AccessNetwork::GERAN);
ASSERT_OK(res);
EXPECT_EQ(std::cv_status::no_timeout, wait());
EXPECT_EQ(RadioResponseType::SOLICITED, radioRsp_v1_2->rspInfo.type);
@@ -426,7 +423,8 @@
const int serial = GetRandomSerialNumber();
Return<void> res = radio_v1_2->setSignalStrengthReportingCriteria(
- serial, 5000, 2, {-109, -103, -97, -89}, V1_2::AccessNetwork::GERAN);
+ serial, 5000, 2, {-109, -103, -97, -89},
+ ::android::hardware::radio::V1_2::AccessNetwork::GERAN);
ASSERT_OK(res);
EXPECT_EQ(std::cv_status::no_timeout, wait());
EXPECT_EQ(RadioResponseType::SOLICITED, radioRsp_v1_2->rspInfo.type);
@@ -444,7 +442,8 @@
const int serial = GetRandomSerialNumber();
Return<void> res = radio_v1_2->setSignalStrengthReportingCriteria(
- serial, 5000, 2, {-110, -97, -73, -49, -25}, V1_2::AccessNetwork::UTRAN);
+ serial, 5000, 2, {-110, -97, -73, -49, -25},
+ ::android::hardware::radio::V1_2::AccessNetwork::UTRAN);
ASSERT_OK(res);
EXPECT_EQ(std::cv_status::no_timeout, wait());
EXPECT_EQ(RadioResponseType::SOLICITED, radioRsp_v1_2->rspInfo.type);
@@ -462,7 +461,8 @@
const int serial = GetRandomSerialNumber();
Return<void> res = radio_v1_2->setSignalStrengthReportingCriteria(
- serial, 5000, 2, {-140, -128, -118, -108, -98, -44}, V1_2::AccessNetwork::EUTRAN);
+ serial, 5000, 2, {-140, -128, -118, -108, -98, -44},
+ ::android::hardware::radio::V1_2::AccessNetwork::EUTRAN);
ASSERT_OK(res);
EXPECT_EQ(std::cv_status::no_timeout, wait());
EXPECT_EQ(RadioResponseType::SOLICITED, radioRsp_v1_2->rspInfo.type);
@@ -480,7 +480,8 @@
const int serial = GetRandomSerialNumber();
Return<void> res = radio_v1_2->setSignalStrengthReportingCriteria(
- serial, 5000, 2, {-105, -90, -75, -65}, V1_2::AccessNetwork::CDMA2000);
+ serial, 5000, 2, {-105, -90, -75, -65},
+ ::android::hardware::radio::V1_2::AccessNetwork::CDMA2000);
ASSERT_OK(res);
EXPECT_EQ(std::cv_status::no_timeout, wait());
EXPECT_EQ(RadioResponseType::SOLICITED, radioRsp_v1_2->rspInfo.type);
@@ -500,7 +501,8 @@
Return<void> res = radio_v1_2->setLinkCapacityReportingCriteria(
serial, 5000,
5000, // hysteresisDlKbps too big for thresholds delta
- 100, {1000, 5000, 10000, 20000}, {500, 1000, 5000, 10000}, V1_2::AccessNetwork::GERAN);
+ 100, {1000, 5000, 10000, 20000}, {500, 1000, 5000, 10000},
+ ::android::hardware::radio::V1_2::AccessNetwork::GERAN);
ASSERT_OK(res);
EXPECT_EQ(std::cv_status::no_timeout, wait());
EXPECT_EQ(RadioResponseType::SOLICITED, radioRsp_v1_2->rspInfo.type);
@@ -520,7 +522,8 @@
Return<void> res = radio_v1_2->setLinkCapacityReportingCriteria(
serial, 5000, 500,
1000, // hysteresisUlKbps too big for thresholds delta
- {1000, 5000, 10000, 20000}, {500, 1000, 5000, 10000}, V1_2::AccessNetwork::GERAN);
+ {1000, 5000, 10000, 20000}, {500, 1000, 5000, 10000},
+ ::android::hardware::radio::V1_2::AccessNetwork::GERAN);
ASSERT_OK(res);
EXPECT_EQ(std::cv_status::no_timeout, wait());
EXPECT_EQ(RadioResponseType::SOLICITED, radioRsp_v1_2->rspInfo.type);
@@ -537,8 +540,8 @@
TEST_F(RadioHidlTest_v1_2, setLinkCapacityReportingCriteria_emptyParams) {
const int serial = GetRandomSerialNumber();
- Return<void> res = radio_v1_2->setLinkCapacityReportingCriteria(serial, 0, 0, 0, {}, {},
- V1_2::AccessNetwork::GERAN);
+ Return<void> res = radio_v1_2->setLinkCapacityReportingCriteria(
+ serial, 0, 0, 0, {}, {}, ::android::hardware::radio::V1_2::AccessNetwork::GERAN);
ASSERT_OK(res);
EXPECT_EQ(std::cv_status::no_timeout, wait());
EXPECT_EQ(RadioResponseType::SOLICITED, radioRsp_v1_2->rspInfo.type);
@@ -557,7 +560,7 @@
Return<void> res = radio_v1_2->setLinkCapacityReportingCriteria(
serial, 5000, 500, 100, {1000, 5000, 10000, 20000}, {500, 1000, 5000, 10000},
- V1_2::AccessNetwork::GERAN);
+ ::android::hardware::radio::V1_2::AccessNetwork::GERAN);
ASSERT_OK(res);
EXPECT_EQ(std::cv_status::no_timeout, wait());
EXPECT_EQ(RadioResponseType::SOLICITED, radioRsp_v1_2->rspInfo.type);
@@ -574,7 +577,8 @@
TEST_F(RadioHidlTest_v1_2, setupDataCall_1_2) {
const int serial = GetRandomSerialNumber();
- V1_2::AccessNetwork accessNetwork = V1_2::AccessNetwork::EUTRAN;
+ ::android::hardware::radio::V1_2::AccessNetwork accessNetwork =
+ ::android::hardware::radio::V1_2::AccessNetwork::EUTRAN;
DataProfileInfo dataProfileInfo;
memset(&dataProfileInfo, 0, sizeof(dataProfileInfo));
@@ -600,7 +604,8 @@
bool roamingAllowed = false;
bool isRoaming = false;
- V1_2::DataRequestReason reason = V1_2::DataRequestReason::NORMAL;
+ ::android::hardware::radio::V1_2::DataRequestReason reason =
+ ::android::hardware::radio::V1_2::DataRequestReason::NORMAL;
std::vector<hidl_string> addresses = {""};
std::vector<hidl_string> dnses = {""};
@@ -613,12 +618,12 @@
EXPECT_EQ(RadioResponseType::SOLICITED, radioRsp_v1_2->rspInfo.type);
EXPECT_EQ(serial, radioRsp_v1_2->rspInfo.serial);
- if (cardStatus.cardState == CardState::ABSENT) {
+ if (cardStatus.base.cardState == CardState::ABSENT) {
ASSERT_TRUE(CheckAnyOfErrors(
radioRsp_v1_2->rspInfo.error,
{RadioError::SIM_ABSENT, RadioError::RADIO_NOT_AVAILABLE, RadioError::INVALID_ARGUMENTS,
RadioError::OP_NOT_ALLOWED_BEFORE_REG_TO_NW, RadioError::REQUEST_NOT_SUPPORTED}));
- } else if (cardStatus.cardState == CardState::PRESENT) {
+ } else if (cardStatus.base.cardState == CardState::PRESENT) {
ASSERT_TRUE(CheckAnyOfErrors(
radioRsp_v1_2->rspInfo.error,
{RadioError::NONE, RadioError::RADIO_NOT_AVAILABLE, RadioError::INVALID_ARGUMENTS,
@@ -632,7 +637,8 @@
TEST_F(RadioHidlTest_v1_2, deactivateDataCall_1_2) {
const int serial = GetRandomSerialNumber();
int cid = 1;
- V1_2::DataRequestReason reason = V1_2::DataRequestReason::NORMAL;
+ ::android::hardware::radio::V1_2::DataRequestReason reason =
+ ::android::hardware::radio::V1_2::DataRequestReason::NORMAL;
Return<void> res = radio_v1_2->deactivateDataCall_1_2(serial, cid, reason);
ASSERT_OK(res);
@@ -641,13 +647,13 @@
EXPECT_EQ(RadioResponseType::SOLICITED, radioRsp_v1_2->rspInfo.type);
EXPECT_EQ(serial, radioRsp_v1_2->rspInfo.serial);
- if (cardStatus.cardState == CardState::ABSENT) {
+ if (cardStatus.base.cardState == CardState::ABSENT) {
ASSERT_TRUE(CheckAnyOfErrors(
radioRsp_v1_2->rspInfo.error,
{RadioError::NONE, RadioError::RADIO_NOT_AVAILABLE, RadioError::INVALID_CALL_ID,
RadioError::INVALID_STATE, RadioError::INVALID_ARGUMENTS,
RadioError::REQUEST_NOT_SUPPORTED, RadioError::CANCELLED, RadioError::SIM_ABSENT}));
- } else if (cardStatus.cardState == CardState::PRESENT) {
+ } else if (cardStatus.base.cardState == CardState::PRESENT) {
ASSERT_TRUE(CheckAnyOfErrors(
radioRsp_v1_2->rspInfo.error,
{RadioError::NONE, RadioError::RADIO_NOT_AVAILABLE, RadioError::INVALID_CALL_ID,
@@ -710,3 +716,33 @@
radioRsp_v1_2->rspInfo.error,
{RadioError::NONE, RadioError::RADIO_NOT_AVAILABLE, RadioError::NOT_PROVISIONED}));
}
+
+/*
+ * Test IRadio.getAvailableBandModes() for the response returned.
+ */
+TEST_F(RadioHidlTest_v1_2, getAvailableBandModes) {
+ int serial = GetRandomSerialNumber();
+
+ Return<void> res = radio_v1_2->getAvailableBandModes(serial);
+ ASSERT_OK(res);
+ EXPECT_EQ(std::cv_status::no_timeout, wait());
+ EXPECT_EQ(RadioResponseType::SOLICITED, radioRsp_v1_2->rspInfo.type);
+ EXPECT_EQ(serial, radioRsp_v1_2->rspInfo.serial);
+ ALOGI("getAvailableBandModes, rspInfo.error = %s\n",
+ toString(radioRsp_v1_2->rspInfo.error).c_str());
+ ASSERT_TRUE(
+ CheckAnyOfErrors(radioRsp_v1_2->rspInfo.error,
+ {RadioError::NONE, RadioError::RADIO_NOT_AVAILABLE, RadioError::MODEM_ERR,
+ RadioError::INTERNAL_ERR,
+ // If REQUEST_NOT_SUPPORTED is returned, then it should also be returned
+ // for setRandMode().
+ RadioError::REQUEST_NOT_SUPPORTED}));
+ bool hasUnspecifiedBandMode = false;
+ if (radioRsp_v1_2->rspInfo.error == RadioError::NONE) {
+ for (const RadioBandMode& mode : radioRsp_v1_2->radioBandModes) {
+ // Automatic mode selection must be supported
+ if (mode == RadioBandMode::BAND_MODE_UNSPECIFIED) hasUnspecifiedBandMode = true;
+ }
+ ASSERT_TRUE(hasUnspecifiedBandMode);
+ }
+}
diff --git a/radio/1.2/vts/functional/radio_hidl_hal_test.cpp b/radio/1.2/vts/functional/radio_hidl_hal_test.cpp
index d74d077..edac1aa 100644
--- a/radio/1.2/vts/functional/radio_hidl_hal_test.cpp
+++ b/radio/1.2/vts/functional/radio_hidl_hal_test.cpp
@@ -17,14 +17,18 @@
#include <radio_hidl_hal_utils_v1_2.h>
void RadioHidlTest_v1_2::SetUp() {
- radio_v1_2 = ::testing::VtsHalHidlTargetTestBase::getService<V1_2::IRadio>(
- RadioHidlEnvironment::Instance()->getServiceName<V1_2::IRadio>(
- hidl_string(RADIO_SERVICE_NAME)));
+ radio_v1_2 =
+ ::testing::VtsHalHidlTargetTestBase::getService<::android::hardware::radio::V1_2::IRadio>(
+ RadioHidlEnvironment::Instance()
+ ->getServiceName<::android::hardware::radio::V1_2::IRadio>(
+ hidl_string(RADIO_SERVICE_NAME)));
if (radio_v1_2 == NULL) {
sleep(60);
- radio_v1_2 = ::testing::VtsHalHidlTargetTestBase::getService<V1_2::IRadio>(
- RadioHidlEnvironment::Instance()->getServiceName<V1_2::IRadio>(
- hidl_string(RADIO_SERVICE_NAME)));
+ radio_v1_2 = ::testing::VtsHalHidlTargetTestBase::getService<
+ ::android::hardware::radio::V1_2::IRadio>(
+ RadioHidlEnvironment::Instance()
+ ->getServiceName<::android::hardware::radio::V1_2::IRadio>(
+ hidl_string(RADIO_SERVICE_NAME)));
}
ASSERT_NE(nullptr, radio_v1_2.get());
@@ -71,4 +75,4 @@
}
count_--;
return status;
-}
+}
\ No newline at end of file
diff --git a/radio/1.2/vts/functional/radio_hidl_hal_utils_v1_2.h b/radio/1.2/vts/functional/radio_hidl_hal_utils_v1_2.h
index c61913c..2d0ea29 100644
--- a/radio/1.2/vts/functional/radio_hidl_hal_utils_v1_2.h
+++ b/radio/1.2/vts/functional/radio_hidl_hal_utils_v1_2.h
@@ -22,14 +22,14 @@
#include <condition_variable>
#include <mutex>
-#include <android/hardware/radio/1.1/IRadioIndication.h>
-#include <android/hardware/radio/1.1/IRadioResponse.h>
#include <android/hardware/radio/1.2/IRadio.h>
+#include <android/hardware/radio/1.2/IRadioIndication.h>
+#include <android/hardware/radio/1.2/IRadioResponse.h>
#include <android/hardware/radio/1.2/types.h>
#include "vts_test_util.h"
-using namespace ::android::hardware::radio;
+using namespace ::android::hardware::radio::V1_2;
using namespace ::android::hardware::radio::V1_1;
using namespace ::android::hardware::radio::V1_0;
@@ -44,21 +44,24 @@
#define RADIO_SERVICE_NAME "slot1"
class RadioHidlTest_v1_2;
-extern CardStatus cardStatus;
+extern ::android::hardware::radio::V1_2::CardStatus cardStatus;
/* Callback class for radio response v1_2*/
-class RadioResponse_v1_2 : public V1_1::IRadioResponse {
+class RadioResponse_v1_2 : public ::android::hardware::radio::V1_2::IRadioResponse {
protected:
RadioHidlTest_v1_2& parent_v1_2;
public:
+ hidl_vec<RadioBandMode> radioBandModes;
+
RadioResponseInfo rspInfo;
RadioResponse_v1_2(RadioHidlTest_v1_2& parent_v1_2);
virtual ~RadioResponse_v1_2() = default;
- Return<void> getIccCardStatusResponse(const RadioResponseInfo& info,
- const CardStatus& cardStatus);
+ Return<void> getIccCardStatusResponse(
+ const RadioResponseInfo& info,
+ const ::android::hardware::radio::V1_0::CardStatus& cardStatus);
Return<void> supplyIccPinForAppResponse(const RadioResponseInfo& info,
int32_t remainingRetries);
@@ -81,8 +84,9 @@
Return<void> supplyNetworkDepersonalizationResponse(const RadioResponseInfo& info,
int32_t remainingRetries);
- Return<void> getCurrentCallsResponse(const RadioResponseInfo& info,
- const ::android::hardware::hidl_vec<Call>& calls);
+ Return<void> getCurrentCallsResponse(
+ const RadioResponseInfo& info,
+ const ::android::hardware::hidl_vec<::android::hardware::radio::V1_0::Call>& calls);
Return<void> dialResponse(const RadioResponseInfo& info);
@@ -104,14 +108,17 @@
Return<void> getLastCallFailCauseResponse(const RadioResponseInfo& info,
const LastCallFailCauseInfo& failCauseInfo);
- Return<void> getSignalStrengthResponse(const RadioResponseInfo& info,
- const SignalStrength& sigStrength);
+ Return<void> getSignalStrengthResponse(
+ const RadioResponseInfo& info,
+ const ::android::hardware::radio::V1_0::SignalStrength& sigStrength);
- Return<void> getVoiceRegistrationStateResponse(const RadioResponseInfo& info,
- const VoiceRegStateResult& voiceRegResponse);
+ Return<void> getVoiceRegistrationStateResponse(
+ const RadioResponseInfo& info,
+ const ::android::hardware::radio::V1_0::VoiceRegStateResult& voiceRegResponse);
- Return<void> getDataRegistrationStateResponse(const RadioResponseInfo& info,
- const DataRegStateResult& dataRegResponse);
+ Return<void> getDataRegistrationStateResponse(
+ const RadioResponseInfo& info,
+ const ::android::hardware::radio::V1_0::DataRegStateResult& dataRegResponse);
Return<void> getOperatorResponse(const RadioResponseInfo& info,
const ::android::hardware::hidl_string& longName,
@@ -310,8 +317,9 @@
Return<void> getVoiceRadioTechnologyResponse(const RadioResponseInfo& info,
RadioTechnology rat);
- Return<void> getCellInfoListResponse(const RadioResponseInfo& info,
- const ::android::hardware::hidl_vec<CellInfo>& cellInfo);
+ Return<void> getCellInfoListResponse(
+ const RadioResponseInfo& info,
+ const ::android::hardware::hidl_vec<::android::hardware::radio::V1_0::CellInfo>& cellInfo);
Return<void> setCellInfoListRateResponse(const RadioResponseInfo& info);
@@ -406,27 +414,33 @@
Return<void> setLinkCapacityReportingCriteriaResponse(const RadioResponseInfo& info);
- Return<void> getIccCardStatusResponse_1_2(const RadioResponseInfo& info,
- const CardStatus& card_status);
+ Return<void> getIccCardStatusResponse_1_2(
+ const RadioResponseInfo& info,
+ const ::android::hardware::radio::V1_2::CardStatus& card_status);
- Return<void> getCurrentCallsResponse_1_2(const RadioResponseInfo& info,
- const ::android::hardware::hidl_vec<Call>& calls);
+ Return<void> getCurrentCallsResponse_1_2(
+ const RadioResponseInfo& info,
+ const ::android::hardware::hidl_vec<::android::hardware::radio::V1_2::Call>& calls);
- Return<void> getSignalStrengthResponse_1_2(const RadioResponseInfo& info,
- const SignalStrength& sig_strength);
+ Return<void> getSignalStrengthResponse_1_2(
+ const RadioResponseInfo& info,
+ const ::android::hardware::radio::V1_2::SignalStrength& sig_strength);
Return<void> getCellInfoListResponse_1_2(
- const RadioResponseInfo& info, const ::android::hardware::hidl_vec<CellInfo>& cellInfo);
+ const RadioResponseInfo& info,
+ const ::android::hardware::hidl_vec<::android::hardware::radio::V1_2::CellInfo>& cellInfo);
Return<void> getVoiceRegistrationStateResponse_1_2(
- const RadioResponseInfo& info, const V1_2::VoiceRegStateResult& voiceRegResponse);
+ const RadioResponseInfo& info,
+ const ::android::hardware::radio::V1_2::VoiceRegStateResult& voiceRegResponse);
Return<void> getDataRegistrationStateResponse_1_2(
- const RadioResponseInfo& info, const V1_2::DataRegStateResult& dataRegResponse);
+ const RadioResponseInfo& info,
+ const ::android::hardware::radio::V1_2::DataRegStateResult& dataRegResponse);
};
/* Callback class for radio indication */
-class RadioIndication_v1_2 : public V1_1::IRadioIndication {
+class RadioIndication_v1_2 : public ::android::hardware::radio::V1_2::IRadioIndication {
protected:
RadioHidlTest_v1_2& parent_v1_2;
@@ -435,26 +449,33 @@
virtual ~RadioIndication_v1_2() = default;
/* 1.2 Api */
- Return<void> networkScanResult_1_2(RadioIndicationType type,
- const V1_2::NetworkScanResult& result);
+ Return<void> networkScanResult_1_2(
+ RadioIndicationType type,
+ const ::android::hardware::radio::V1_2::NetworkScanResult& result);
- Return<void> cellInfoList_1_2(RadioIndicationType type,
- const ::android::hardware::hidl_vec<V1_2::CellInfo>& records);
+ Return<void> cellInfoList_1_2(
+ RadioIndicationType type,
+ const ::android::hardware::hidl_vec<::android::hardware::radio::V1_2::CellInfo>& records);
- Return<void> currentLinkCapacityEstimate(RadioIndicationType type,
- const V1_2::LinkCapacityEstimate& lce);
+ Return<void> currentLinkCapacityEstimate(
+ RadioIndicationType type,
+ const ::android::hardware::radio::V1_2::LinkCapacityEstimate& lce);
Return<void> currentPhysicalChannelConfigs(
RadioIndicationType type,
- const ::android::hardware::hidl_vec<V1_2::PhysicalChannelConfig>& configs);
+ const ::android::hardware::hidl_vec<
+ ::android::hardware::radio::V1_2::PhysicalChannelConfig>& configs);
- Return<void> currentSignalStrength_1_2(RadioIndicationType type,
- const V1_2::SignalStrength& signalStrength);
+ Return<void> currentSignalStrength_1_2(
+ RadioIndicationType type,
+ const ::android::hardware::radio::V1_2::SignalStrength& signalStrength);
/* 1.1 Api */
Return<void> carrierInfoForImsiEncryption(RadioIndicationType info);
- Return<void> networkScanResult(RadioIndicationType type, const NetworkScanResult& result);
+ Return<void> networkScanResult(
+ RadioIndicationType type,
+ const ::android::hardware::radio::V1_1::NetworkScanResult& result);
Return<void> keepaliveStatus(RadioIndicationType type, const KeepaliveStatus& status);
@@ -480,8 +501,9 @@
const ::android::hardware::hidl_string& nitzTime,
uint64_t receivedTime);
- Return<void> currentSignalStrength(RadioIndicationType type,
- const SignalStrength& signalStrength);
+ Return<void> currentSignalStrength(
+ RadioIndicationType type,
+ const ::android::hardware::radio::V1_0::SignalStrength& signalStrength);
Return<void> dataCallListChanged(
RadioIndicationType type, const ::android::hardware::hidl_vec<SetupDataCallResult>& dcList);
@@ -539,8 +561,9 @@
Return<void> voiceRadioTechChanged(RadioIndicationType type, RadioTechnology rat);
- Return<void> cellInfoList(RadioIndicationType type,
- const ::android::hardware::hidl_vec<CellInfo>& records);
+ Return<void> cellInfoList(
+ RadioIndicationType type,
+ const ::android::hardware::hidl_vec<::android::hardware::radio::V1_0::CellInfo>& records);
Return<void> imsNetworkStateChanged(RadioIndicationType type);
@@ -575,7 +598,9 @@
static RadioHidlEnvironment* instance = new RadioHidlEnvironment;
return instance;
}
- virtual void registerTestServices() override { registerTestService<V1_2::IRadio>(); }
+ virtual void registerTestServices() override {
+ registerTestService<::android::hardware::radio::V1_2::IRadio>();
+ }
private:
RadioHidlEnvironment() {}
@@ -598,11 +623,11 @@
std::cv_status wait();
/* radio service handle */
- sp<V1_2::IRadio> radio_v1_2;
+ sp<::android::hardware::radio::V1_2::IRadio> radio_v1_2;
/* radio response handle */
sp<RadioResponse_v1_2> radioRsp_v1_2;
/* radio indication handle */
sp<RadioIndication_v1_2> radioInd_v1_2;
-};
+};
\ No newline at end of file
diff --git a/radio/1.2/vts/functional/radio_indication.cpp b/radio/1.2/vts/functional/radio_indication.cpp
index 57f5cb0..eba9dc0 100644
--- a/radio/1.2/vts/functional/radio_indication.cpp
+++ b/radio/1.2/vts/functional/radio_indication.cpp
@@ -20,29 +20,33 @@
/* 1.2 Apis */
Return<void> RadioIndication_v1_2::networkScanResult_1_2(
- RadioIndicationType /*type*/, const V1_2::NetworkScanResult& /*result*/) {
+ RadioIndicationType /*type*/,
+ const ::android::hardware::radio::V1_2::NetworkScanResult& /*result*/) {
return Void();
}
Return<void> RadioIndication_v1_2::cellInfoList_1_2(
RadioIndicationType /*type*/,
- const ::android::hardware::hidl_vec<V1_2::CellInfo>& /*records*/) {
+ const ::android::hardware::hidl_vec<::android::hardware::radio::V1_2::CellInfo>& /*records*/) {
return Void();
}
Return<void> RadioIndication_v1_2::currentLinkCapacityEstimate(
- RadioIndicationType /*type*/, const V1_2::LinkCapacityEstimate& /*lce*/) {
+ RadioIndicationType /*type*/,
+ const ::android::hardware::radio::V1_2::LinkCapacityEstimate& /*lce*/) {
return Void();
}
Return<void> RadioIndication_v1_2::currentPhysicalChannelConfigs(
RadioIndicationType /*type*/,
- const ::android::hardware::hidl_vec<V1_2::PhysicalChannelConfig>& /*configs*/) {
+ const ::android::hardware::hidl_vec<
+ ::android::hardware::radio::V1_2::PhysicalChannelConfig>& /*configs*/) {
return Void();
}
Return<void> RadioIndication_v1_2::currentSignalStrength_1_2(
- RadioIndicationType /*type*/, const V1_2::SignalStrength& /*signalStrength*/) {
+ RadioIndicationType /*type*/,
+ const ::android::hardware::radio::V1_2::SignalStrength& /*signalStrength*/) {
return Void();
}
@@ -51,8 +55,9 @@
return Void();
}
-Return<void> RadioIndication_v1_2::networkScanResult(RadioIndicationType /*type*/,
- const NetworkScanResult& /*result*/) {
+Return<void> RadioIndication_v1_2::networkScanResult(
+ RadioIndicationType /*type*/,
+ const ::android::hardware::radio::V1_1::NetworkScanResult& /*result*/) {
return Void();
}
@@ -101,8 +106,9 @@
return Void();
}
-Return<void> RadioIndication_v1_2::currentSignalStrength(RadioIndicationType /*type*/,
- const SignalStrength& /*signalStrength*/) {
+Return<void> RadioIndication_v1_2::currentSignalStrength(
+ RadioIndicationType /*type*/,
+ const ::android::hardware::radio::V1_0::SignalStrength& /*signalStrength*/) {
return Void();
}
@@ -224,7 +230,8 @@
}
Return<void> RadioIndication_v1_2::cellInfoList(
- RadioIndicationType /*type*/, const ::android::hardware::hidl_vec<CellInfo>& /*records*/) {
+ RadioIndicationType /*type*/,
+ const ::android::hardware::hidl_vec<::android::hardware::radio::V1_0::CellInfo>& /*records*/) {
return Void();
}
@@ -276,4 +283,4 @@
Return<void> RadioIndication_v1_2::modemReset(RadioIndicationType /*type*/,
const ::android::hardware::hidl_string& /*reason*/) {
return Void();
-}
+}
\ No newline at end of file
diff --git a/radio/1.2/vts/functional/radio_response.cpp b/radio/1.2/vts/functional/radio_response.cpp
index 9195689..85ec3e0 100644
--- a/radio/1.2/vts/functional/radio_response.cpp
+++ b/radio/1.2/vts/functional/radio_response.cpp
@@ -16,13 +16,14 @@
#include <radio_hidl_hal_utils_v1_2.h>
-CardStatus cardStatus;
+::android::hardware::radio::V1_2::CardStatus cardStatus;
RadioResponse_v1_2::RadioResponse_v1_2(RadioHidlTest_v1_2& parent) : parent_v1_2(parent) {}
/* 1.0 Apis */
-Return<void> RadioResponse_v1_2::getIccCardStatusResponse(const RadioResponseInfo& /*info*/,
- const CardStatus& /*card_status*/) {
+Return<void> RadioResponse_v1_2::getIccCardStatusResponse(
+ const RadioResponseInfo& /*info*/,
+ const ::android::hardware::radio::V1_0::CardStatus& /*card_status*/) {
return Void();
}
@@ -62,7 +63,8 @@
}
Return<void> RadioResponse_v1_2::getCurrentCallsResponse(
- const RadioResponseInfo& /*info*/, const ::android::hardware::hidl_vec<Call>& /*calls*/) {
+ const RadioResponseInfo& /*info*/,
+ const ::android::hardware::hidl_vec<::android::hardware::radio::V1_0::Call>& /*calls*/) {
return Void();
}
@@ -107,18 +109,21 @@
return Void();
}
-Return<void> RadioResponse_v1_2::getSignalStrengthResponse(const RadioResponseInfo& /*info*/,
- const SignalStrength& /*sig_strength*/) {
+Return<void> RadioResponse_v1_2::getSignalStrengthResponse(
+ const RadioResponseInfo& /*info*/,
+ const ::android::hardware::radio::V1_0::SignalStrength& /*sig_strength*/) {
return Void();
}
Return<void> RadioResponse_v1_2::getVoiceRegistrationStateResponse(
- const RadioResponseInfo& /*info*/, const VoiceRegStateResult& /*voiceRegResponse*/) {
+ const RadioResponseInfo& /*info*/,
+ const ::android::hardware::radio::V1_0::VoiceRegStateResult& /*voiceRegResponse*/) {
return Void();
}
Return<void> RadioResponse_v1_2::getDataRegistrationStateResponse(
- const RadioResponseInfo& /*info*/, const DataRegStateResult& /*dataRegResponse*/) {
+ const RadioResponseInfo& /*info*/,
+ const ::android::hardware::radio::V1_0::DataRegStateResult& /*dataRegResponse*/) {
return Void();
}
@@ -312,8 +317,10 @@
}
Return<void> RadioResponse_v1_2::getAvailableBandModesResponse(
- const RadioResponseInfo& /*info*/,
- const ::android::hardware::hidl_vec<RadioBandMode>& /*bandModes*/) {
+ const RadioResponseInfo& info, const ::android::hardware::hidl_vec<RadioBandMode>& bandModes) {
+ rspInfo = info;
+ radioBandModes = bandModes;
+ parent_v1_2.notify();
return Void();
}
@@ -515,7 +522,7 @@
Return<void> RadioResponse_v1_2::getCellInfoListResponse(
const RadioResponseInfo& /*info*/,
- const ::android::hardware::hidl_vec<CellInfo>& /*cellInfo*/) {
+ const ::android::hardware::hidl_vec<::android::hardware::radio::V1_0::CellInfo>& /*cellInfo*/) {
return Void();
}
@@ -704,8 +711,9 @@
return Void();
}
-Return<void> RadioResponse_v1_2::getIccCardStatusResponse_1_2(const RadioResponseInfo& info,
- const CardStatus& card_status) {
+Return<void> RadioResponse_v1_2::getIccCardStatusResponse_1_2(
+ const RadioResponseInfo& info,
+ const ::android::hardware::radio::V1_2::CardStatus& card_status) {
rspInfo = info;
cardStatus = card_status;
parent_v1_2.notify();
@@ -713,32 +721,37 @@
}
Return<void> RadioResponse_v1_2::getCurrentCallsResponse_1_2(
- const RadioResponseInfo& info, const ::android::hardware::hidl_vec<Call>& /*calls*/) {
+ const RadioResponseInfo& info,
+ const ::android::hardware::hidl_vec<::android::hardware::radio::V1_2::Call>& /*calls*/) {
rspInfo = info;
parent_v1_2.notify();
return Void();
}
Return<void> RadioResponse_v1_2::getSignalStrengthResponse_1_2(
- const RadioResponseInfo& info, const SignalStrength& /*sig_strength*/) {
+ const RadioResponseInfo& info,
+ const ::android::hardware::radio::V1_2::SignalStrength& /*sig_strength*/) {
rspInfo = info;
parent_v1_2.notify();
return Void();
}
Return<void> RadioResponse_v1_2::getCellInfoListResponse_1_2(
- const RadioResponseInfo& info, const ::android::hardware::hidl_vec<CellInfo>& /*cellInfo*/) {
+ const RadioResponseInfo& info,
+ const ::android::hardware::hidl_vec<::android::hardware::radio::V1_2::CellInfo>& /*cellInfo*/) {
rspInfo = info;
parent_v1_2.notify();
return Void();
}
Return<void> RadioResponse_v1_2::getVoiceRegistrationStateResponse_1_2(
- const RadioResponseInfo& /*info*/, const V1_2::VoiceRegStateResult& /*voiceRegResponse*/) {
+ const RadioResponseInfo& /*info*/,
+ const ::android::hardware::radio::V1_2::VoiceRegStateResult& /*voiceRegResponse*/) {
return Void();
}
Return<void> RadioResponse_v1_2::getDataRegistrationStateResponse_1_2(
- const RadioResponseInfo& /*info*/, const V1_2::DataRegStateResult& /*dataRegResponse*/) {
+ const RadioResponseInfo& /*info*/,
+ const ::android::hardware::radio::V1_2::DataRegStateResult& /*dataRegResponse*/) {
return Void();
-}
+}
\ No newline at end of file