Merge "graphics/common: add stable AIDL types"
diff --git a/audio/5.0/config/api/current.txt b/audio/5.0/config/api/current.txt
index c665781..a1d8e1e 100644
--- a/audio/5.0/config/api/current.txt
+++ b/audio/5.0/config/api/current.txt
@@ -237,6 +237,7 @@
method public audio.policy.configuration.V5_0.GainMode getMode();
method public String getName();
method public int getStepValueMB();
+ method public boolean getUseForVolume();
method public void setChannel_mask(String);
method public void setDefaultValueMB(int);
method public void setMaxRampMs(int);
@@ -246,6 +247,7 @@
method public void setMode(audio.policy.configuration.V5_0.GainMode);
method public void setName(String);
method public void setStepValueMB(int);
+ method public void setUseForVolume(boolean);
}
public class GlobalConfiguration {
diff --git a/audio/5.0/config/audio_policy_configuration.xsd b/audio/5.0/config/audio_policy_configuration.xsd
index 2e1a722..284d2e2 100644
--- a/audio/5.0/config/audio_policy_configuration.xsd
+++ b/audio/5.0/config/audio_policy_configuration.xsd
@@ -446,6 +446,7 @@
<xs:attribute name="stepValueMB" type="xs:int" use="optional"/>
<xs:attribute name="minRampMs" type="xs:int" use="optional"/>
<xs:attribute name="maxRampMs" type="xs:int" use="optional"/>
+ <xs:attribute name="useForVolume" type="xs:boolean" use="optional"/>
</xs:complexType>
</xs:element>
</xs:sequence>
diff --git a/audio/6.0/config/api/current.txt b/audio/6.0/config/api/current.txt
index e67831c..ddd4d1c 100644
--- a/audio/6.0/config/api/current.txt
+++ b/audio/6.0/config/api/current.txt
@@ -237,6 +237,7 @@
method public audio.policy.configuration.V6_0.GainMode getMode();
method public String getName();
method public int getStepValueMB();
+ method public boolean getUseForVolume();
method public void setChannel_mask(String);
method public void setDefaultValueMB(int);
method public void setMaxRampMs(int);
@@ -246,6 +247,7 @@
method public void setMode(audio.policy.configuration.V6_0.GainMode);
method public void setName(String);
method public void setStepValueMB(int);
+ method public void setUseForVolume(boolean);
}
public class GlobalConfiguration {
diff --git a/audio/6.0/config/audio_policy_configuration.xsd b/audio/6.0/config/audio_policy_configuration.xsd
index 29f6f38..3fab7dc 100644
--- a/audio/6.0/config/audio_policy_configuration.xsd
+++ b/audio/6.0/config/audio_policy_configuration.xsd
@@ -447,6 +447,7 @@
<xs:attribute name="stepValueMB" type="xs:int" use="optional"/>
<xs:attribute name="minRampMs" type="xs:int" use="optional"/>
<xs:attribute name="maxRampMs" type="xs:int" use="optional"/>
+ <xs:attribute name="useForVolume" type="xs:boolean" use="optional"/>
</xs:complexType>
</xs:element>
</xs:sequence>
diff --git a/audio/core/all-versions/vts/functional/6.0/AudioPrimaryHidlHalTest.cpp b/audio/core/all-versions/vts/functional/6.0/AudioPrimaryHidlHalTest.cpp
index c56445c..30f8a7a 100644
--- a/audio/core/all-versions/vts/functional/6.0/AudioPrimaryHidlHalTest.cpp
+++ b/audio/core/all-versions/vts/functional/6.0/AudioPrimaryHidlHalTest.cpp
@@ -73,9 +73,12 @@
getCachedPolicyConfig().getModuleFromName(std::get<PARAM_DEVICE_NAME>(device));
for (const auto& ioProfile : module->getOutputProfiles()) {
for (const auto& profile : ioProfile->getAudioProfiles()) {
- auto configs = ConfigHelper::combineAudioConfig(profile->getChannels(),
- profile->getSampleRates(),
- profile->getFormat());
+ const auto& channels = profile->getChannels();
+ const auto& sampleRates = profile->getSampleRates();
+ auto configs = ConfigHelper::combineAudioConfig(
+ vector<audio_channel_mask_t>(channels.begin(), channels.end()),
+ vector<uint32_t>(sampleRates.begin(), sampleRates.end()),
+ profile->getFormat());
auto flags = ioProfile->getFlags();
for (auto& config : configs) {
// Some combinations of flags declared in the config file require special
@@ -125,9 +128,12 @@
getCachedPolicyConfig().getModuleFromName(std::get<PARAM_DEVICE_NAME>(device));
for (const auto& ioProfile : module->getInputProfiles()) {
for (const auto& profile : ioProfile->getAudioProfiles()) {
- auto configs = ConfigHelper::combineAudioConfig(profile->getChannels(),
- profile->getSampleRates(),
- profile->getFormat());
+ const auto& channels = profile->getChannels();
+ const auto& sampleRates = profile->getSampleRates();
+ auto configs = ConfigHelper::combineAudioConfig(
+ vector<audio_channel_mask_t>(channels.begin(), channels.end()),
+ vector<uint32_t>(sampleRates.begin(), sampleRates.end()),
+ profile->getFormat());
for (const auto& config : configs) {
result.emplace_back(device, config, AudioInputFlag(ioProfile->getFlags()));
}
diff --git a/audio/core/all-versions/vts/functional/ConfigHelper.h b/audio/core/all-versions/vts/functional/ConfigHelper.h
index 604c0c5..48aae8c 100644
--- a/audio/core/all-versions/vts/functional/ConfigHelper.h
+++ b/audio/core/all-versions/vts/functional/ConfigHelper.h
@@ -79,8 +79,8 @@
return {};
}
- static vector<AudioConfig> combineAudioConfig(android::ChannelMaskSet channelMasks,
- android::SampleRateSet sampleRates,
+ static vector<AudioConfig> combineAudioConfig(vector<audio_channel_mask_t> channelMasks,
+ vector<uint32_t> sampleRates,
audio_format_t format) {
vector<AudioConfig> configs;
configs.reserve(channelMasks.size() * sampleRates.size());
diff --git a/automotive/can/1.0/default/Android.bp b/automotive/can/1.0/default/Android.bp
index 8aa1d6b..ee2e92b 100644
--- a/automotive/can/1.0/default/Android.bp
+++ b/automotive/can/1.0/default/Android.bp
@@ -39,6 +39,7 @@
"CanBus.cpp",
"CanBusNative.cpp",
"CanBusVirtual.cpp",
+ "CanBusSlcan.cpp",
"CanController.cpp",
"CanSocket.cpp",
"CloseHandle.cpp",
diff --git a/automotive/can/1.0/default/CanBus.cpp b/automotive/can/1.0/default/CanBus.cpp
index 38a9974..42d2e3c 100644
--- a/automotive/can/1.0/default/CanBus.cpp
+++ b/automotive/can/1.0/default/CanBus.cpp
@@ -30,9 +30,7 @@
namespace V1_0 {
namespace implementation {
-/**
- * Whether to log sent/received packets.
- */
+/** Whether to log sent/received packets. */
static constexpr bool kSuperVerbose = false;
Return<Result> CanBus::send(const CanMessage& message) {
@@ -85,6 +83,8 @@
return {};
}
+CanBus::CanBus() {}
+
CanBus::CanBus(const std::string& ifname) : mIfname(ifname) {}
CanBus::~CanBus() {
diff --git a/automotive/can/1.0/default/CanBus.h b/automotive/can/1.0/default/CanBus.h
index 30a2924..365e90c 100644
--- a/automotive/can/1.0/default/CanBus.h
+++ b/automotive/can/1.0/default/CanBus.h
@@ -48,12 +48,22 @@
bool down();
protected:
+ /**
+ * Blank constructor, since some interface types (such as SLCAN) don't get a name until after
+ * being initialized.
+ *
+ * If using this constructor, you MUST initialize mIfname prior to the completion of preUp().
+ */
+ CanBus();
+
CanBus(const std::string& ifname);
/**
* Prepare the SocketCAN interface.
*
* After calling this method, mIfname network interface is available and ready to be brought up.
+ *
+ * \return OK on success, or an error state on failure. See ICanController::Result
*/
virtual ICanController::Result preUp();
@@ -61,11 +71,13 @@
* Cleanup after bringing the interface down.
*
* This is a counterpart to preUp().
+ *
+ * \return true upon success and false upon failure
*/
virtual bool postDown();
/** Network interface name. */
- const std::string mIfname;
+ std::string mIfname;
private:
struct CanMessageListener {
diff --git a/automotive/can/1.0/default/CanBusSlcan.cpp b/automotive/can/1.0/default/CanBusSlcan.cpp
new file mode 100644
index 0000000..7dce838
--- /dev/null
+++ b/automotive/can/1.0/default/CanBusSlcan.cpp
@@ -0,0 +1,166 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "CanBusSlcan.h"
+
+#include <android-base/logging.h>
+#include <libnetdevice/can.h>
+#include <libnetdevice/libnetdevice.h>
+
+#include <net/if.h>
+#include <sys/ioctl.h>
+#include <sys/stat.h>
+#include <termios.h>
+
+namespace android {
+namespace hardware {
+namespace automotive {
+namespace can {
+namespace V1_0 {
+namespace implementation {
+
+namespace slcanprotocol {
+static const std::string kOpenCommand = "O\r";
+static const std::string kCloseCommand = "C\r";
+static constexpr int kSlcanDiscipline = N_SLCAN;
+static constexpr int kDefaultDiscipline = N_TTY;
+
+static const std::map<uint32_t, std::string> kBitrateCommands = {
+ {10000, "C\rS0\r"}, {20000, "C\rS1\r"}, {50000, "C\rS2\r"},
+ {100000, "C\rS3\r"}, {125000, "C\rS4\r"}, {250000, "C\rS5\r"},
+ {500000, "C\rS6\r"}, {800000, "C\rS7\r"}, {1000000, "C\rS8\r"}};
+} // namespace slcanprotocol
+
+/**
+ * Serial Line CAN constructor
+ * \param string uartName - name of slcan device (e.x. /dev/ttyUSB0)
+ * \param uint32_t bitrate - speed of the CAN bus (125k = MSCAN, 500k = HSCAN)
+ */
+CanBusSlcan::CanBusSlcan(const std::string& uartName, uint32_t bitrate)
+ : CanBus(), mUartName(uartName), kBitrate(bitrate) {}
+
+ICanController::Result CanBusSlcan::preUp() {
+ // verify valid bitrate and translate to serial command format
+ const auto lookupIt = slcanprotocol::kBitrateCommands.find(kBitrate);
+ if (lookupIt == slcanprotocol::kBitrateCommands.end()) {
+ return ICanController::Result::BAD_BAUDRATE;
+ }
+ const auto canBitrateCommand = lookupIt->second;
+
+ /* Attempt to open the uart in r/w without blocking or becoming the
+ * controlling terminal */
+ mFd = base::unique_fd(open(mUartName.c_str(), O_RDWR | O_NONBLOCK | O_NOCTTY));
+ if (!mFd.ok()) {
+ LOG(ERROR) << "SLCAN Failed to open " << mUartName << ": " << strerror(errno);
+ return ICanController::Result::BAD_ADDRESS;
+ }
+
+ // blank terminal settings and pull them from the device
+ struct termios terminalSettings = {};
+ if (tcgetattr(mFd.get(), &terminalSettings) < 0) {
+ LOG(ERROR) << "Failed to read attrs of" << mUartName << ": " << strerror(errno);
+ return ICanController::Result::UNKNOWN_ERROR;
+ }
+
+ // change settings to raw mode
+ cfmakeraw(&terminalSettings);
+
+ // disable software flow control
+ terminalSettings.c_iflag &= ~IXOFF;
+ // enable hardware flow control
+ terminalSettings.c_cflag |= CRTSCTS;
+
+ struct serial_struct serialSettings;
+ // get serial settings
+ if (ioctl(mFd.get(), TIOCGSERIAL, &serialSettings) < 0) {
+ LOG(ERROR) << "Failed to read serial settings from " << mUartName << ": "
+ << strerror(errno);
+ return ICanController::Result::UNKNOWN_ERROR;
+ }
+ // set low latency mode
+ serialSettings.flags |= ASYNC_LOW_LATENCY;
+ // apply serial settings
+ if (ioctl(mFd.get(), TIOCSSERIAL, &serialSettings) < 0) {
+ LOG(ERROR) << "Failed to set low latency mode on " << mUartName << ": " << strerror(errno);
+ return ICanController::Result::UNKNOWN_ERROR;
+ }
+
+ /* TCSADRAIN applies settings after we finish writing the rest of our
+ * changes (as opposed to TCSANOW, which changes immediately) */
+ if (tcsetattr(mFd.get(), TCSADRAIN, &terminalSettings) < 0) {
+ LOG(ERROR) << "Failed to apply terminal settings to " << mUartName << ": "
+ << strerror(errno);
+ return ICanController::Result::UNKNOWN_ERROR;
+ }
+
+ // apply speed setting for CAN
+ if (write(mFd.get(), canBitrateCommand.c_str(), canBitrateCommand.length()) <= 0) {
+ LOG(ERROR) << "Failed to apply CAN bitrate: " << strerror(errno);
+ return ICanController::Result::UNKNOWN_ERROR;
+ }
+
+ // set open flag TODO: also support listen only
+ if (write(mFd.get(), slcanprotocol::kOpenCommand.c_str(),
+ slcanprotocol::kOpenCommand.length()) <= 0) {
+ LOG(ERROR) << "Failed to set open flag: " << strerror(errno);
+ return ICanController::Result::UNKNOWN_ERROR;
+ }
+
+ // set line discipline to slcan
+ if (ioctl(mFd.get(), TIOCSETD, &slcanprotocol::kSlcanDiscipline) < 0) {
+ LOG(ERROR) << "Failed to set line discipline to slcan: " << strerror(errno);
+ return ICanController::Result::UNKNOWN_ERROR;
+ }
+
+ // get the name of the device we created
+ struct ifreq ifrequest = {};
+ if (ioctl(mFd.get(), SIOCGIFNAME, ifrequest.ifr_name) < 0) {
+ LOG(ERROR) << "Failed to get the name of the created device: " << strerror(errno);
+ return ICanController::Result::UNKNOWN_ERROR;
+ }
+
+ // Update the CanBus object with name that was assigned to it
+ mIfname = ifrequest.ifr_name;
+
+ return ICanController::Result::OK;
+}
+
+bool CanBusSlcan::postDown() {
+ // reset the line discipline to TTY mode
+ if (ioctl(mFd.get(), TIOCSETD, &slcanprotocol::kDefaultDiscipline) < 0) {
+ LOG(ERROR) << "Failed to reset line discipline!";
+ return false;
+ }
+
+ // issue the close command
+ if (write(mFd.get(), slcanprotocol::kCloseCommand.c_str(),
+ slcanprotocol::kCloseCommand.length()) <= 0) {
+ LOG(ERROR) << "Failed to close tty!";
+ return false;
+ }
+
+ // close our unique_fd
+ mFd.reset();
+
+ return true;
+}
+
+} // namespace implementation
+} // namespace V1_0
+} // namespace can
+} // namespace automotive
+} // namespace hardware
+} // namespace android
diff --git a/automotive/can/1.0/default/CanBusSlcan.h b/automotive/can/1.0/default/CanBusSlcan.h
new file mode 100644
index 0000000..2713da8
--- /dev/null
+++ b/automotive/can/1.0/default/CanBusSlcan.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <linux/serial.h>
+#include <linux/tty.h>
+#include <net/if.h>
+#include <termios.h>
+#include "CanBus.h"
+
+namespace android {
+namespace hardware {
+namespace automotive {
+namespace can {
+namespace V1_0 {
+namespace implementation {
+
+struct CanBusSlcan : public CanBus {
+ CanBusSlcan(const std::string& uartName, uint32_t bitrate);
+
+ protected:
+ virtual ICanController::Result preUp() override;
+ virtual bool postDown() override;
+
+ private:
+ const std::string mUartName;
+ const uint32_t kBitrate;
+ base::unique_fd mFd;
+};
+
+} // namespace implementation
+} // namespace V1_0
+} // namespace can
+} // namespace automotive
+} // namespace hardware
+} // namespace android
diff --git a/automotive/can/1.0/default/CanController.cpp b/automotive/can/1.0/default/CanController.cpp
index 3b63fe4..ffdc912 100644
--- a/automotive/can/1.0/default/CanController.cpp
+++ b/automotive/can/1.0/default/CanController.cpp
@@ -17,6 +17,7 @@
#include "CanController.h"
#include "CanBusNative.h"
+#include "CanBusSlcan.h"
#include "CanBusVirtual.h"
#include <android-base/logging.h>
@@ -34,10 +35,8 @@
using IfaceIdDisc = ICanController::BusConfiguration::InterfaceIdentifier::hidl_discriminator;
Return<void> CanController::getSupportedInterfaceTypes(getSupportedInterfaceTypes_cb _hidl_cb) {
- _hidl_cb({
- ICanController::InterfaceType::VIRTUAL,
- ICanController::InterfaceType::SOCKETCAN,
- });
+ _hidl_cb({ICanController::InterfaceType::VIRTUAL, ICanController::InterfaceType::SOCKETCAN,
+ ICanController::InterfaceType::SLCAN});
return {};
}
@@ -77,6 +76,12 @@
} else {
return ICanController::Result::BAD_ADDRESS;
}
+ } else if (config.iftype == ICanController::InterfaceType::SLCAN) {
+ if (config.interfaceId.getDiscriminator() == IfaceIdDisc::address) {
+ busService = new CanBusSlcan(config.interfaceId.address(), config.baudrate);
+ } else {
+ return ICanController::Result::BAD_ADDRESS;
+ }
} else {
return ICanController::Result::NOT_SUPPORTED;
}
diff --git a/automotive/can/1.0/default/CanSocket.cpp b/automotive/can/1.0/default/CanSocket.cpp
index ecf4044..86e12d1 100644
--- a/automotive/can/1.0/default/CanSocket.cpp
+++ b/automotive/can/1.0/default/CanSocket.cpp
@@ -33,12 +33,10 @@
using namespace std::chrono_literals;
-/**
- * How frequently the read thread checks whether the interface was asked to be down.
+/* How frequently the read thread checks whether the interface was asked to be down.
*
* Note: This does *not* affect read timing or bandwidth, just CPU load vs time to
- * down the interface.
- */
+ * down the interface. */
static constexpr auto kReadPooling = 100ms;
std::unique_ptr<CanSocket> CanSocket::open(const std::string& ifname, ReadCallback rdcb,
@@ -105,8 +103,7 @@
while (!mStopReaderThread) {
/* The ideal would be to have a blocking read(3) call and interrupt it with shutdown(3).
- * This is unfortunately not supported for SocketCAN, so we need to rely on select(3).
- */
+ * This is unfortunately not supported for SocketCAN, so we need to rely on select(3). */
const auto sel = selectRead(mSocket, kReadPooling);
if (sel == 0) continue; // timeout
if (sel == -1) {
@@ -127,8 +124,7 @@
* Apart from the added complexity, it's possible the added calculations and system calls
* would add so much time to the processing pipeline so the precision of the reported time
* was buried under the subsystem latency. Let's just use a local time since boot here and
- * leave precise hardware timestamps for custom proprietary implementations (if needed).
- */
+ * leave precise hardware timestamps for custom proprietary implementations (if needed). */
const std::chrono::nanoseconds ts(elapsedRealtimeNano());
if (nbytes != CAN_MTU) {
diff --git a/automotive/can/1.0/default/CanSocket.h b/automotive/can/1.0/default/CanSocket.h
index 284e1ea..c98330b 100644
--- a/automotive/can/1.0/default/CanSocket.h
+++ b/automotive/can/1.0/default/CanSocket.h
@@ -31,9 +31,7 @@
namespace V1_0 {
namespace implementation {
-/**
- * Wrapper around SocketCAN socket.
- */
+/** Wrapper around SocketCAN socket. */
struct CanSocket {
using ReadCallback = std::function<void(const struct canfd_frame&, std::chrono::nanoseconds)>;
using ErrorCallback = std::function<void(int errnoVal)>;
diff --git a/automotive/can/1.0/default/CloseHandle.h b/automotive/can/1.0/default/CloseHandle.h
index 5191739..eade109 100644
--- a/automotive/can/1.0/default/CloseHandle.h
+++ b/automotive/can/1.0/default/CloseHandle.h
@@ -26,9 +26,7 @@
namespace V1_0 {
namespace implementation {
-/**
- * Generic ICloseHandle implementation ignoring double-close events.
- */
+/** Generic ICloseHandle implementation ignoring double-close events. */
struct CloseHandle : public ICloseHandle {
using Callback = std::function<void()>;
diff --git a/automotive/can/1.0/default/libnetdevice/NetlinkRequest.h b/automotive/can/1.0/default/libnetdevice/NetlinkRequest.h
index 21202e3..ba9b65b 100644
--- a/automotive/can/1.0/default/libnetdevice/NetlinkRequest.h
+++ b/automotive/can/1.0/default/libnetdevice/NetlinkRequest.h
@@ -27,9 +27,7 @@
typedef unsigned short rtattrtype_t; // as in rtnetlink.h
typedef __u16 nlmsgtype_t; // as in netlink.h
-/**
- * Implementation details, do not use outside NetlinkRequest template.
- */
+/** Implementation details, do not use outside NetlinkRequest template. */
namespace impl {
struct rtattr* addattr_l(struct nlmsghdr* n, size_t maxLen, rtattrtype_t type, const void* data,
@@ -59,7 +57,7 @@
mRequest.nlmsg.nlmsg_flags = flags;
}
- /** Returns pointer to raw netlink message header. */
+ /** \return pointer to raw netlink message header. */
struct nlmsghdr* header() {
return &mRequest.nlmsg;
}
@@ -89,9 +87,7 @@
if (ap == nullptr) mIsGood = false;
}
- /**
- * Guard class to frame nested attributes. See nest(int).
- */
+ /** Guard class to frame nested attributes. See nest(int). */
struct Nest {
Nest(NetlinkRequest& req, rtattrtype_t type) : mReq(req), mAttr(req.nestStart(type)) {}
~Nest() { mReq.nestEnd(mAttr); }
diff --git a/automotive/can/1.0/default/libnetdevice/NetlinkSocket.h b/automotive/can/1.0/default/libnetdevice/NetlinkSocket.h
index 81d6224..90e1f3f 100644
--- a/automotive/can/1.0/default/libnetdevice/NetlinkSocket.h
+++ b/automotive/can/1.0/default/libnetdevice/NetlinkSocket.h
@@ -38,8 +38,8 @@
/**
* Send Netlink message to Kernel.
*
- * @param msg Message to send, nlmsg_seq will be set to next sequence number
- * @return true, if succeeded
+ * \param msg Message to send, nlmsg_seq will be set to next sequence number
+ * \return true, if succeeded
*/
template <class T, unsigned int BUFSIZE>
bool send(NetlinkRequest<T, BUFSIZE>& req) {
@@ -50,7 +50,7 @@
/**
* Receive Netlink ACK message from Kernel.
*
- * @return true if received ACK message, false in case of error
+ * \return true if received ACK message, false in case of error
*/
bool receiveAck();
diff --git a/automotive/can/1.0/default/libnetdevice/include/libnetdevice/can.h b/automotive/can/1.0/default/libnetdevice/include/libnetdevice/can.h
index ec3f962..d75361e 100644
--- a/automotive/can/1.0/default/libnetdevice/include/libnetdevice/can.h
+++ b/automotive/can/1.0/default/libnetdevice/include/libnetdevice/can.h
@@ -27,13 +27,16 @@
/**
* Opens and binds SocketCAN socket.
*
- * @param ifname Interface to open a socket against
- * @return Socket's FD or -1 in case of failure
+ * \param ifname Interface to open a socket against
+ * \return Socket's FD or -1 in case of failure
*/
base::unique_fd socket(const std::string& ifname);
/**
* Sets CAN interface bitrate.
+ *
+ * \param ifname Interface for which the bitrate is to be set
+ * \return true on success, false on failure
*/
bool setBitrate(std::string ifname, uint32_t bitrate);
diff --git a/automotive/can/1.0/default/libnetdevice/include/libnetdevice/libnetdevice.h b/automotive/can/1.0/default/libnetdevice/include/libnetdevice/libnetdevice.h
index 33d5de5..e22eafb 100644
--- a/automotive/can/1.0/default/libnetdevice/include/libnetdevice/libnetdevice.h
+++ b/automotive/can/1.0/default/libnetdevice/include/libnetdevice/libnetdevice.h
@@ -25,49 +25,49 @@
/**
* Checks, if the network interface exists.
*
- * @param ifname Interface to check
- * @return true if it exists, false otherwise
+ * \param ifname Interface to check
+ * \return true if it exists, false otherwise
*/
bool exists(std::string ifname);
/**
* Checks if network interface is up.
*
- * @param ifname Interface to check
- * @return true/false if the check succeeded, nullopt otherwise
+ * \param ifname Interface to check
+ * \return true/false if the check succeeded, nullopt otherwise
*/
std::optional<bool> isUp(std::string ifname);
/**
* Brings network interface up.
*
- * @param ifname Interface to bring up
- * @return true in case of success, false otherwise
+ * \param ifname Interface to bring up
+ * \return true in case of success, false otherwise
*/
bool up(std::string ifname);
/**
* Brings network interface down.
*
- * @param ifname Interface to bring down
- * @return true in case of success, false otherwise
+ * \param ifname Interface to bring down
+ * \return true in case of success, false otherwise
*/
bool down(std::string ifname);
/**
* Adds virtual link.
*
- * @param dev the name of the new virtual device
- * @param type the type of the new device
- * @return true in case of success, false otherwise
+ * \param dev the name of the new virtual device
+ * \param type the type of the new device
+ * \return true in case of success, false otherwise
*/
bool add(std::string dev, std::string type);
/**
* Deletes virtual link.
*
- * @param dev the name of the device to remove
- * @return true in case of success, false otherwise
+ * \param dev the name of the device to remove
+ * \return true in case of success, false otherwise
*/
bool del(std::string dev);
diff --git a/automotive/can/1.0/tools/canhaldump.cpp b/automotive/can/1.0/tools/canhaldump.cpp
index 5713d17..99fd14a 100644
--- a/automotive/can/1.0/tools/canhaldump.cpp
+++ b/automotive/can/1.0/tools/canhaldump.cpp
@@ -49,7 +49,7 @@
} else {
std::cout << " [" << message.payload.size() << "] ";
for (const auto byte : message.payload) {
- std::cout << " " << unsigned(byte);
+ std::cout << " " << std::setfill('0') << std::setw(2) << unsigned(byte);
}
}
std::cout << std::nouppercase << std::dec << std::endl;
diff --git a/automotive/can/1.0/vts/functional/VtsHalCanBusVirtualV1_0TargetTest.cpp b/automotive/can/1.0/vts/functional/VtsHalCanBusVirtualV1_0TargetTest.cpp
index ba29c29..225984d 100644
--- a/automotive/can/1.0/vts/functional/VtsHalCanBusVirtualV1_0TargetTest.cpp
+++ b/automotive/can/1.0/vts/functional/VtsHalCanBusVirtualV1_0TargetTest.cpp
@@ -91,8 +91,7 @@
EXPECT_EQ(ICanController::Result::OK, result);
/* Not using ICanBus::getService here, since it ignores interfaces not in the manifest
- * file -- this is a test, so we don't want to add dummy services to a device manifest.
- */
+ * file -- this is a test, so we don't want to add dummy services to a device manifest. */
auto manager = hidl::manager::V1_2::IServiceManager::getService();
auto service = manager->get(ICanBus::descriptor, config.name);
mBus = ICanBus::castFrom(service);
diff --git a/automotive/can/1.0/vts/functional/VtsHalCanControllerV1_0TargetTest.cpp b/automotive/can/1.0/vts/functional/VtsHalCanControllerV1_0TargetTest.cpp
index 70f9fe4..64e7a96 100644
--- a/automotive/can/1.0/vts/functional/VtsHalCanControllerV1_0TargetTest.cpp
+++ b/automotive/can/1.0/vts/functional/VtsHalCanControllerV1_0TargetTest.cpp
@@ -95,8 +95,7 @@
void CanControllerHalTest::assertRegistered(std::string srvname, bool expectRegistered) {
/* Not using ICanBus::tryGetService here, since it ignores interfaces not in the manifest
- * file -- this is a test, so we don't want to add dummy services to a device manifest.
- */
+ * file -- this is a test, so we don't want to add dummy services to a device manifest. */
auto manager = hidl::manager::V1_2::IServiceManager::getService();
auto busService = manager->get(ICanBus::descriptor, srvname);
ASSERT_EQ(expectRegistered, busService.withDefault(nullptr) != nullptr)
diff --git a/automotive/vehicle/2.0/default/common/include/vhal_v2_0/VehicleConnector.h b/automotive/vehicle/2.0/default/common/include/vhal_v2_0/VehicleConnector.h
new file mode 100644
index 0000000..56ecd67
--- /dev/null
+++ b/automotive/vehicle/2.0/default/common/include/vhal_v2_0/VehicleConnector.h
@@ -0,0 +1,141 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef android_hardware_automotive_vehicle_V2_0_VehicleConnector_H_
+#define android_hardware_automotive_vehicle_V2_0_VehicleConnector_H_
+
+#include <vector>
+
+#include <android/hardware/automotive/vehicle/2.0/types.h>
+
+namespace android {
+namespace hardware {
+namespace automotive {
+namespace vehicle {
+namespace V2_0 {
+
+/**
+ * This file defines the interface of client/server pair for HAL-vehicle
+ * communication. Vehicle HAL may use this interface to talk to the vehicle
+ * regardless of the underlying communication channels.
+ */
+
+/**
+ * Vehicle HAL talks to the vehicle through a client, instead of accessing
+ * the car bus directly, to give us more flexibility on the implementation.
+ * Android OS do not need direct access to the vehicle, and the communication
+ * channel is also customizable.
+ *
+ * Client lives on the Android (HAL) side to talk to the vehicle
+ */
+class IVehicleClient {
+ public:
+ IVehicleClient() = default;
+
+ IVehicleClient(const IVehicleClient&) = delete;
+
+ IVehicleClient& operator=(const IVehicleClient&) = delete;
+
+ IVehicleClient(IVehicleClient&&) = default;
+
+ virtual ~IVehicleClient() = default;
+
+ // Get configuration of all properties from server
+ virtual std::vector<VehiclePropConfig> getAllPropertyConfig() const = 0;
+
+ // Send the set property request to server
+ virtual StatusCode setProperty(const VehiclePropValue& value) = 0;
+
+ // Receive a new property value from server
+ virtual void onPropertyValue(const VehiclePropValue& value) = 0;
+};
+
+/**
+ * Server lives on the vehicle side to talk to Android HAL
+ */
+class IVehicleServer {
+ public:
+ IVehicleServer() = default;
+
+ IVehicleServer(const IVehicleServer&) = delete;
+
+ IVehicleServer& operator=(const IVehicleServer&) = delete;
+
+ IVehicleServer(IVehicleServer&&) = default;
+
+ virtual ~IVehicleServer() = default;
+
+ // Receive the get property configuration request from HAL.
+ // Return a list of all property config
+ virtual std::vector<VehiclePropConfig> onGetAllPropertyConfig() const = 0;
+
+ // Receive the set property request from HAL.
+ // Process the setting and return the status code
+ virtual StatusCode onSetProperty(const VehiclePropValue& value) = 0;
+
+ // Receive a new property value from car (via direct connection to the car bus or the emulator)
+ // and forward the value to HAL
+ virtual void onPropertyValueFromCar(const VehiclePropValue& value) = 0;
+};
+
+/**
+ * If Android has direct access to the vehicle, then the client and
+ * the server may act in passthrough mode to avoid extra IPC
+ *
+ * Template is used here for spliting the logic of operating Android objects (VehicleClientType),
+ * talking to cars (VehicleServerType) and the commucation between client and server (passthrough
+ * mode in this case), so that we can easily combine different parts together without duplicating
+ * codes (for example, in Google VHAL, the server talks to the fake car in the same way no matter
+ * if it is on top of passthrough connector or VSOCK or any other communication channels between
+ * client and server)
+ *
+ * The alternative may be factoring the common logic of every operations for both client and
+ * server. Which is not always the case. Making sure different non-template connectors calling
+ * the same method is hard, especially when the engineer maintaining the code may not be aware
+ * of it when making changes. Template is a clean and easy way to solve this problem in this
+ * case.
+ */
+template <typename VehicleClientType, typename VehicleServerType>
+class IPassThroughConnector : public VehicleClientType, public VehicleServerType {
+ static_assert(std::is_base_of_v<IVehicleClient, VehicleClientType>);
+ static_assert(std::is_base_of_v<IVehicleServer, VehicleServerType>);
+
+ public:
+ std::vector<VehiclePropConfig> getAllPropertyConfig() const override {
+ return this->onGetAllPropertyConfig();
+ }
+
+ StatusCode setProperty(const VehiclePropValue& value) override {
+ return this->onSetProperty(value);
+ }
+
+ void onPropertyValueFromCar(const VehiclePropValue& value) override {
+ return this->onPropertyValue(value);
+ }
+
+ // To be implemented:
+ // virtual std::vector<VehiclePropConfig> onGetAllPropertyConfig() = 0;
+ // virtual void onPropertyValue(const VehiclePropValue& value) = 0;
+ // virtual StatusCode onSetProperty(const VehiclePropValue& value) = 0;
+};
+
+} // namespace V2_0
+} // namespace vehicle
+} // namespace automotive
+} // namespace hardware
+} // namespace android
+
+#endif // android_hardware_automotive_vehicle_V2_0_VehicleConnector_H_
diff --git a/automotive/vehicle/2.0/default/impl/vhal_v2_0/SocketComm.cpp b/automotive/vehicle/2.0/default/impl/vhal_v2_0/SocketComm.cpp
index 9eb8894..068333c 100644
--- a/automotive/vehicle/2.0/default/impl/vhal_v2_0/SocketComm.cpp
+++ b/automotive/vehicle/2.0/default/impl/vhal_v2_0/SocketComm.cpp
@@ -92,7 +92,10 @@
}
ALOGI("%s: Listening for connections on port %d", __FUNCTION__, DEBUG_SOCKET);
- ::listen(mListenFd, 1);
+ if (::listen(mListenFd, 1) == -1) {
+ ALOGE("%s: Error on listening: errno: %d: %s", __FUNCTION__, errno, strerror(errno));
+ return false;
+ }
return true;
}
diff --git a/compatibility_matrices/compatibility_matrix.current.xml b/compatibility_matrices/compatibility_matrix.current.xml
index 1535979..fb8d395 100644
--- a/compatibility_matrices/compatibility_matrix.current.xml
+++ b/compatibility_matrices/compatibility_matrix.current.xml
@@ -476,7 +476,7 @@
</hal>
<hal format="hidl" optional="true">
<name>android.hardware.vibrator</name>
- <version>1.0-4</version>
+ <version>1.0-3</version>
<interface>
<name>IVibrator</name>
<instance>default</instance>
diff --git a/configstore/1.1/default/seccomp_policy/configstore@1.1-arm64.policy b/configstore/1.1/default/seccomp_policy/configstore@1.1-arm64.policy
index 937fddd..a609620 100644
--- a/configstore/1.1/default/seccomp_policy/configstore@1.1-arm64.policy
+++ b/configstore/1.1/default/seccomp_policy/configstore@1.1-arm64.policy
@@ -45,6 +45,7 @@
getdents64: 1
clock_gettime: 1
getpid: 1
+gettid: 1
# used during process crash by crash_dump to dump process info
rt_sigprocmask: 1
diff --git a/current.txt b/current.txt
index a114bf4..26de8ff 100644
--- a/current.txt
+++ b/current.txt
@@ -594,16 +594,20 @@
ce8dbe76eb9ee94b46ef98f725be992e760a5751073d4f4912484026541371f3 android.hardware.health@2.1::IHealth
26f04510a0b57aba5167c5c0a7c2f077c2acbb98b81902a072517829fd9fd67f android.hardware.health@2.1::IHealthInfoCallback
db47f4ceceb1f06c656f39caa70c557b0f8471ef59fd58611bea667ffca20101 android.hardware.health@2.1::types
+c228aaa27f66c48e147159a4f4996c5273191fece1b08de31bd171c61334855e android.hardware.keymaster@4.1::IKeymasterDevice
+adb0efdf1462e9b2e742c0dcadd598666aac551f178be06e755bfcdf5797abd0 android.hardware.keymaster@4.1::IOperation
+7a04ea5595ed418ca3e91c28b8bd7353dd988be9be7b0c8c9e64fb4b77bd4523 android.hardware.keymaster@4.1::types
9e59fffceed0dd72a9799e04505db5f777bbbea1af0695ba4107ef6d967c6fda android.hardware.neuralnetworks@1.3::IDevice
-fd5a2b723b75acbdd9f31bd07e0f83293c52f99f8d9b87bf58eeb6018f665fde android.hardware.neuralnetworks@1.3::IPreparedModelCallback
-b74fe72cfe438f50e772e6a307657ff449d5bde83c15dd1f140ff2edbe73499c android.hardware.neuralnetworks@1.3::types
+4a6c3b3556da951b4def21ba579a227c022980fe4465df6cdfbe20628fa75f5a android.hardware.neuralnetworks@1.3::IPreparedModel
+94e803236398bed1febb11cc21051bc42ec003700139b099d6c479e02a7ca3c3 android.hardware.neuralnetworks@1.3::IPreparedModelCallback
+c511b1427b1c3f76af90967bbddaaf250db983a8d3abb9ff189fb5a807cf3d4d android.hardware.neuralnetworks@1.3::types
3e01d4446cd69fd1c48f8572efd97487bc179564b32bd795800b97bbe10be37b android.hardware.wifi@1.4::IWifi
a64467bae843569f0d465c5be7f0c7a5b987985b55a3ef4794dd5afc68538650 android.hardware.wifi.supplicant@1.3::ISupplicant
44445b8a03d7b9e68b2fbd954672c18a8fce9e32851b0692f4f4ab3407f86ecb android.hardware.wifi.supplicant@1.3::ISupplicantStaIface
619fc9839ec6e369cfa9b28e3e9412e6885720ff8f9b5750c1b6ffb905120391 android.hardware.wifi.supplicant@1.3::ISupplicantStaIfaceCallback
c9273429fcf98d797d3bb07fdba6f1be95bf960f9255cde169fd1ca4db85f856 android.hardware.wifi.supplicant@1.3::ISupplicantStaNetwork
9b0a3ab6f4f74b971ed094426d8a443e29b512ff03e1ab50c07156396cdb2483 android.hardware.wifi.supplicant@1.3::types
-41c602462ccd1b19cfd645994be4de4c07fc197ff58a54e84476b31908e61e21 android.hardware.radio@1.5::types
-a8691c71747c3f14f7a043598e856425077f755e55990507a9132ad62f8ab3f7 android.hardware.radio@1.5::IRadio
+ba759fab7dd4cd621b492ef8f44312d4a716af786786f404d1477cc8cea6f392 android.hardware.radio@1.5::types
+c8e81d912827a5d49b2ddcdc4eb4556c5d231a899a1dca879309e04210daa4a0 android.hardware.radio@1.5::IRadio
a62a93faf173b14a6175b683ebf61ffa568dc61f81e369d2dce7b1265e86cf2f android.hardware.radio@1.5::IRadioIndication
-15daf260aaf6781b911450bc94e1a164901f9c0fe0bda68f8434f0a903f66e05 android.hardware.radio@1.5::IRadioResponse
+260ce05806d753d728f844d405e832179ed7d9b65986ec18fef3d21cf7285587 android.hardware.radio@1.5::IRadioResponse
diff --git a/health/storage/1.0/vts/functional/Android.bp b/health/storage/1.0/vts/functional/Android.bp
index 87502f8..a30cdde 100644
--- a/health/storage/1.0/vts/functional/Android.bp
+++ b/health/storage/1.0/vts/functional/Android.bp
@@ -22,6 +22,6 @@
shared_libs: [
"libhidlbase",
],
- test_suites: ["general-tests"],
+ test_suites: ["general-tests", "vts-core"],
}
diff --git a/health/storage/1.0/vts/functional/VtsHalHealthStorageV1_0TargetTest.cpp b/health/storage/1.0/vts/functional/VtsHalHealthStorageV1_0TargetTest.cpp
index 2365124..eaa44ec 100644
--- a/health/storage/1.0/vts/functional/VtsHalHealthStorageV1_0TargetTest.cpp
+++ b/health/storage/1.0/vts/functional/VtsHalHealthStorageV1_0TargetTest.cpp
@@ -14,11 +14,12 @@
* limitations under the License.
*/
-#include <VtsHalHidlTargetTestBase.h>
-#include <VtsHalHidlTargetTestEnvBase.h>
#include <android-base/logging.h>
#include <android/hardware/health/storage/1.0/IStorage.h>
+#include <gtest/gtest.h>
+#include <hidl/GtestPrinter.h>
#include <hidl/HidlTransportSupport.h>
+#include <hidl/ServiceManagement.h>
#include <unistd.h>
#include <thread>
@@ -101,25 +102,10 @@
Result mResult{Result::UNKNOWN_ERROR};
};
-/** Test environment for Health Storage HIDL HAL. */
-class HealthStorageHidlEnvironment : public ::testing::VtsHalHidlTargetTestEnvBase {
- public:
- /** get the test environment singleton */
- static HealthStorageHidlEnvironment* Instance() {
- static HealthStorageHidlEnvironment* instance = new HealthStorageHidlEnvironment();
- return instance;
- }
- virtual void registerTestServices() override { registerTestService<IStorage>(); }
-
- private:
- HealthStorageHidlEnvironment() {}
-};
-
-class HealthStorageHidlTest : public ::testing::VtsHalHidlTargetTestBase {
+class HealthStorageHidlTest : public ::testing::TestWithParam<std::string> {
public:
virtual void SetUp() override {
- fs = ::testing::VtsHalHidlTargetTestBase::getService<IStorage>(
- HealthStorageHidlEnvironment::Instance()->getServiceName<IStorage>());
+ fs = IStorage::getService(GetParam());
ASSERT_NE(fs, nullptr);
LOG(INFO) << "Service is remote " << fs->isRemote();
@@ -153,7 +139,7 @@
/**
* Ensure garbage collection works on null callback.
*/
-TEST_F(HealthStorageHidlTest, GcNullCallback) {
+TEST_P(HealthStorageHidlTest, GcNullCallback) {
auto ret = fs->garbageCollect(kDevGcTimeoutSec, nullptr);
ASSERT_OK(ret);
@@ -167,28 +153,20 @@
/**
* Ensure garbage collection works on non-null callback.
*/
-TEST_F(HealthStorageHidlTest, GcNonNullCallback) {
+TEST_P(HealthStorageHidlTest, GcNonNullCallback) {
sp<GcCallback> cb = new GcCallback();
auto ret = fs->garbageCollect(kDevGcTimeoutSec, cb);
ASSERT_OK(ret);
cb->waitForResult(kDevGcTimeout + kDevGcTolerance + kRpcTime, Result::SUCCESS);
}
+INSTANTIATE_TEST_SUITE_P(
+ PerInstance, HealthStorageHidlTest,
+ testing::ValuesIn(android::hardware::getAllHalInstanceNames(IStorage::descriptor)),
+ android::hardware::PrintInstanceNameToString);
+
} // namespace V1_0
} // namespace storage
} // namespace health
} // namespace hardware
} // namespace android
-
-int main(int argc, char** argv) {
- using ::android::hardware::configureRpcThreadpool;
- using ::android::hardware::health::storage::V1_0::HealthStorageHidlEnvironment;
-
- configureRpcThreadpool(1, false /* callerWillJoin*/);
- ::testing::AddGlobalTestEnvironment(HealthStorageHidlEnvironment::Instance());
- ::testing::InitGoogleTest(&argc, argv);
- HealthStorageHidlEnvironment::Instance()->init(&argc, argv);
- int status = RUN_ALL_TESTS();
- LOG(INFO) << "Test result = " << status;
- return status;
-}
diff --git a/keymaster/4.0/vts/functional/keymaster_hidl_hal_test.cpp b/keymaster/4.0/vts/functional/keymaster_hidl_hal_test.cpp
index c5acf8c..0e12283 100644
--- a/keymaster/4.0/vts/functional/keymaster_hidl_hal_test.cpp
+++ b/keymaster/4.0/vts/functional/keymaster_hidl_hal_test.cpp
@@ -1540,89 +1540,6 @@
}
}
-/*
- * SigningOperationsTest.HmacRfc4231TestCase6
- *
- * Validates against the test vectors from RFC 4231 test case 6.
- */
-TEST_F(SigningOperationsTest, HmacRfc4231TestCase6) {
- string key(131, 0xaa);
- string message = "Test Using Larger Than Block-Size Key - Hash Key First";
-
- uint8_t sha_224_expected[] = {
- 0x95, 0xe9, 0xa0, 0xdb, 0x96, 0x20, 0x95, 0xad, 0xae, 0xbe, 0x9b, 0x2d, 0x6f, 0x0d,
- 0xbc, 0xe2, 0xd4, 0x99, 0xf1, 0x12, 0xf2, 0xd2, 0xb7, 0x27, 0x3f, 0xa6, 0x87, 0x0e,
- };
- uint8_t sha_256_expected[] = {
- 0x60, 0xe4, 0x31, 0x59, 0x1e, 0xe0, 0xb6, 0x7f, 0x0d, 0x8a, 0x26,
- 0xaa, 0xcb, 0xf5, 0xb7, 0x7f, 0x8e, 0x0b, 0xc6, 0x21, 0x37, 0x28,
- 0xc5, 0x14, 0x05, 0x46, 0x04, 0x0f, 0x0e, 0xe3, 0x7f, 0x54,
- };
- uint8_t sha_384_expected[] = {
- 0x4e, 0xce, 0x08, 0x44, 0x85, 0x81, 0x3e, 0x90, 0x88, 0xd2, 0xc6, 0x3a,
- 0x04, 0x1b, 0xc5, 0xb4, 0x4f, 0x9e, 0xf1, 0x01, 0x2a, 0x2b, 0x58, 0x8f,
- 0x3c, 0xd1, 0x1f, 0x05, 0x03, 0x3a, 0xc4, 0xc6, 0x0c, 0x2e, 0xf6, 0xab,
- 0x40, 0x30, 0xfe, 0x82, 0x96, 0x24, 0x8d, 0xf1, 0x63, 0xf4, 0x49, 0x52,
- };
- uint8_t sha_512_expected[] = {
- 0x80, 0xb2, 0x42, 0x63, 0xc7, 0xc1, 0xa3, 0xeb, 0xb7, 0x14, 0x93, 0xc1, 0xdd,
- 0x7b, 0xe8, 0xb4, 0x9b, 0x46, 0xd1, 0xf4, 0x1b, 0x4a, 0xee, 0xc1, 0x12, 0x1b,
- 0x01, 0x37, 0x83, 0xf8, 0xf3, 0x52, 0x6b, 0x56, 0xd0, 0x37, 0xe0, 0x5f, 0x25,
- 0x98, 0xbd, 0x0f, 0xd2, 0x21, 0x5d, 0x6a, 0x1e, 0x52, 0x95, 0xe6, 0x4f, 0x73,
- 0xf6, 0x3f, 0x0a, 0xec, 0x8b, 0x91, 0x5a, 0x98, 0x5d, 0x78, 0x65, 0x98,
- };
-
- CheckHmacTestVector(key, message, Digest::SHA_2_256, make_string(sha_256_expected));
- if (SecLevel() != SecurityLevel::STRONGBOX) {
- CheckHmacTestVector(key, message, Digest::SHA_2_224, make_string(sha_224_expected));
- CheckHmacTestVector(key, message, Digest::SHA_2_384, make_string(sha_384_expected));
- CheckHmacTestVector(key, message, Digest::SHA_2_512, make_string(sha_512_expected));
- }
-}
-
-/*
- * SigningOperationsTest.HmacRfc4231TestCase7
- *
- * Validates against the test vectors from RFC 4231 test case 7.
- */
-TEST_F(SigningOperationsTest, HmacRfc4231TestCase7) {
- string key(131, 0xaa);
- string message =
- "This is a test using a larger than block-size key and a larger than "
- "block-size data. The key needs to be hashed before being used by the HMAC "
- "algorithm.";
-
- uint8_t sha_224_expected[] = {
- 0x3a, 0x85, 0x41, 0x66, 0xac, 0x5d, 0x9f, 0x02, 0x3f, 0x54, 0xd5, 0x17, 0xd0, 0xb3,
- 0x9d, 0xbd, 0x94, 0x67, 0x70, 0xdb, 0x9c, 0x2b, 0x95, 0xc9, 0xf6, 0xf5, 0x65, 0xd1,
- };
- uint8_t sha_256_expected[] = {
- 0x9b, 0x09, 0xff, 0xa7, 0x1b, 0x94, 0x2f, 0xcb, 0x27, 0x63, 0x5f,
- 0xbc, 0xd5, 0xb0, 0xe9, 0x44, 0xbf, 0xdc, 0x63, 0x64, 0x4f, 0x07,
- 0x13, 0x93, 0x8a, 0x7f, 0x51, 0x53, 0x5c, 0x3a, 0x35, 0xe2,
- };
- uint8_t sha_384_expected[] = {
- 0x66, 0x17, 0x17, 0x8e, 0x94, 0x1f, 0x02, 0x0d, 0x35, 0x1e, 0x2f, 0x25,
- 0x4e, 0x8f, 0xd3, 0x2c, 0x60, 0x24, 0x20, 0xfe, 0xb0, 0xb8, 0xfb, 0x9a,
- 0xdc, 0xce, 0xbb, 0x82, 0x46, 0x1e, 0x99, 0xc5, 0xa6, 0x78, 0xcc, 0x31,
- 0xe7, 0x99, 0x17, 0x6d, 0x38, 0x60, 0xe6, 0x11, 0x0c, 0x46, 0x52, 0x3e,
- };
- uint8_t sha_512_expected[] = {
- 0xe3, 0x7b, 0x6a, 0x77, 0x5d, 0xc8, 0x7d, 0xba, 0xa4, 0xdf, 0xa9, 0xf9, 0x6e,
- 0x5e, 0x3f, 0xfd, 0xde, 0xbd, 0x71, 0xf8, 0x86, 0x72, 0x89, 0x86, 0x5d, 0xf5,
- 0xa3, 0x2d, 0x20, 0xcd, 0xc9, 0x44, 0xb6, 0x02, 0x2c, 0xac, 0x3c, 0x49, 0x82,
- 0xb1, 0x0d, 0x5e, 0xeb, 0x55, 0xc3, 0xe4, 0xde, 0x15, 0x13, 0x46, 0x76, 0xfb,
- 0x6d, 0xe0, 0x44, 0x60, 0x65, 0xc9, 0x74, 0x40, 0xfa, 0x8c, 0x6a, 0x58,
- };
-
- CheckHmacTestVector(key, message, Digest::SHA_2_256, make_string(sha_256_expected));
- if (SecLevel() != SecurityLevel::STRONGBOX) {
- CheckHmacTestVector(key, message, Digest::SHA_2_224, make_string(sha_224_expected));
- CheckHmacTestVector(key, message, Digest::SHA_2_384, make_string(sha_384_expected));
- CheckHmacTestVector(key, message, Digest::SHA_2_512, make_string(sha_512_expected));
- }
-}
-
typedef KeymasterHidlTest VerificationOperationsTest;
/*
@@ -4421,25 +4338,29 @@
* to specify how many following bytes will be used to encode the length.
*/
TEST_F(AttestationTest, AttestationApplicationIDLengthProperlyEncoded) {
- auto creation_time = std::chrono::system_clock::now();
- ASSERT_EQ(ErrorCode::OK, GenerateKey(AuthorizationSetBuilder()
- .Authorization(TAG_NO_AUTH_REQUIRED)
- .EcdsaSigningKey(EcCurve::P_256)
- .Digest(Digest::SHA_2_256)));
+ std::vector<uint32_t> app_id_lengths{143, 258};
+ for (uint32_t length : app_id_lengths) {
+ auto creation_time = std::chrono::system_clock::now();
+ ASSERT_EQ(ErrorCode::OK, GenerateKey(AuthorizationSetBuilder()
+ .Authorization(TAG_NO_AUTH_REQUIRED)
+ .EcdsaSigningKey(EcCurve::P_256)
+ .Digest(Digest::SHA_2_256)));
- hidl_vec<hidl_vec<uint8_t>> cert_chain;
- const string app_id(143, 'a');
- ASSERT_EQ(ErrorCode::OK,
- AttestKey(AuthorizationSetBuilder()
- .Authorization(TAG_ATTESTATION_CHALLENGE, HidlBuf("challenge"))
- .Authorization(TAG_ATTESTATION_APPLICATION_ID, HidlBuf(app_id)),
- &cert_chain));
- EXPECT_GE(cert_chain.size(), 2U);
+ hidl_vec<hidl_vec<uint8_t>> cert_chain;
+ const string app_id(length, 'a');
+ ASSERT_EQ(ErrorCode::OK,
+ AttestKey(AuthorizationSetBuilder()
+ .Authorization(TAG_ATTESTATION_CHALLENGE, HidlBuf("challenge"))
+ .Authorization(TAG_ATTESTATION_APPLICATION_ID, HidlBuf(app_id)),
+ &cert_chain));
+ EXPECT_GE(cert_chain.size(), 2U);
- EXPECT_TRUE(verify_attestation_record("challenge", app_id, //
- key_characteristics_.softwareEnforced, //
- key_characteristics_.hardwareEnforced, //
- SecLevel(), cert_chain[0], creation_time));
+ EXPECT_TRUE(verify_attestation_record("challenge", app_id, //
+ key_characteristics_.softwareEnforced, //
+ key_characteristics_.hardwareEnforced, //
+ SecLevel(), cert_chain[0], creation_time));
+ CheckedDeleteKey();
+ }
}
/*
* AttestationTest.AesAttestation
@@ -4683,6 +4604,57 @@
}
}
+typedef KeymasterHidlTest TransportLimitTest;
+
+/*
+ * TransportLimitTest.LargeFinishInput
+ *
+ * Verifies that passing large input data to finish either succeeds or fails as expected.
+ */
+TEST_F(TransportLimitTest, LargeFinishInput) {
+ ASSERT_EQ(ErrorCode::OK, GenerateKey(AuthorizationSetBuilder()
+ .Authorization(TAG_NO_AUTH_REQUIRED)
+ .AesEncryptionKey(128)
+ .BlockMode(BlockMode::ECB)
+ .Padding(PaddingMode::NONE)));
+
+ for (int msg_size = 10 /*1KB*/; msg_size <= 17 /*128KB*/; msg_size++) {
+ auto cipher_params =
+ AuthorizationSetBuilder().BlockMode(BlockMode::ECB).Padding(PaddingMode::NONE);
+
+ AuthorizationSet out_params;
+ EXPECT_EQ(ErrorCode::OK, Begin(KeyPurpose::ENCRYPT, cipher_params, &out_params));
+
+ string plain_message = std::string(1 << msg_size, 'x');
+ string encrypted_message;
+ auto rc = Finish(plain_message, &encrypted_message);
+
+ if (rc == ErrorCode::OK) {
+ EXPECT_EQ(plain_message.size(), encrypted_message.size())
+ << "Encrypt finish returned OK, but did not consume all of the given input";
+ } else {
+ EXPECT_EQ(ErrorCode::INVALID_INPUT_LENGTH, rc)
+ << "Encrypt finish failed in an unexpected way when given a large input";
+ continue;
+ }
+ cipher_params.push_back(out_params);
+
+ EXPECT_EQ(ErrorCode::OK, Begin(KeyPurpose::DECRYPT, cipher_params));
+
+ string decrypted_message;
+ rc = Finish(encrypted_message, &decrypted_message);
+
+ if (rc == ErrorCode::OK) {
+ EXPECT_EQ(plain_message.size(), decrypted_message.size())
+ << "Decrypt finish returned OK, did not consume all of the given input";
+ } else {
+ EXPECT_EQ(ErrorCode::INVALID_INPUT_LENGTH, rc)
+ << "Encrypt finish failed in an unexpected way when given a large input";
+ }
+ }
+
+ CheckedDeleteKey();
+}
} // namespace test
} // namespace V4_0
diff --git a/neuralnetworks/1.2/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.2/vts/functional/GeneratedTestHarness.cpp
index aacb385..c1bf494 100644
--- a/neuralnetworks/1.2/vts/functional/GeneratedTestHarness.cpp
+++ b/neuralnetworks/1.2/vts/functional/GeneratedTestHarness.cpp
@@ -58,8 +58,20 @@
using V1_1::ExecutionPreference;
using HidlToken = hidl_array<uint8_t, static_cast<uint32_t>(Constant::BYTE_SIZE_OF_CACHE_TOKEN)>;
+namespace {
+
+enum class Executor { ASYNC, SYNC, BURST };
+
enum class OutputType { FULLY_SPECIFIED, UNSPECIFIED, INSUFFICIENT };
+struct TestConfig {
+ Executor executor;
+ MeasureTiming measureTiming;
+ OutputType outputType;
+};
+
+} // namespace
+
Model createModel(const TestModel& testModel) {
// Model operands.
hidl_vec<Operand> operands(testModel.operands.size());
@@ -194,31 +206,31 @@
return android::nn::ExecutionBurstController::create(preparedModel,
std::chrono::microseconds{0});
}
-enum class Executor { ASYNC, SYNC, BURST };
void EvaluatePreparedModel(const sp<IPreparedModel>& preparedModel, const TestModel& testModel,
- Executor executor, MeasureTiming measure, OutputType outputType) {
+ const TestConfig& testConfig) {
// If output0 does not have size larger than one byte, we can not test with insufficient buffer.
- if (outputType == OutputType::INSUFFICIENT && !isOutputSizeGreaterThanOne(testModel, 0)) {
+ if (testConfig.outputType == OutputType::INSUFFICIENT &&
+ !isOutputSizeGreaterThanOne(testModel, 0)) {
return;
}
Request request = createRequest(testModel);
- if (outputType == OutputType::INSUFFICIENT) {
+ if (testConfig.outputType == OutputType::INSUFFICIENT) {
makeOutputInsufficientSize(/*outputIndex=*/0, &request);
}
ErrorStatus executionStatus;
hidl_vec<OutputShape> outputShapes;
Timing timing;
- switch (executor) {
+ switch (testConfig.executor) {
case Executor::ASYNC: {
SCOPED_TRACE("asynchronous");
// launch execution
sp<ExecutionCallback> executionCallback = new ExecutionCallback();
- Return<ErrorStatus> executionLaunchStatus =
- ExecutePreparedModel(preparedModel, request, measure, executionCallback);
+ Return<ErrorStatus> executionLaunchStatus = ExecutePreparedModel(
+ preparedModel, request, testConfig.measureTiming, executionCallback);
ASSERT_TRUE(executionLaunchStatus.isOk());
EXPECT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(executionLaunchStatus));
@@ -234,8 +246,8 @@
SCOPED_TRACE("synchronous");
// execute
- Return<ErrorStatus> executionReturnStatus =
- ExecutePreparedModel(preparedModel, request, measure, &outputShapes, &timing);
+ Return<ErrorStatus> executionReturnStatus = ExecutePreparedModel(
+ preparedModel, request, testConfig.measureTiming, &outputShapes, &timing);
ASSERT_TRUE(executionReturnStatus.isOk());
executionStatus = static_cast<ErrorStatus>(executionReturnStatus);
@@ -258,14 +270,14 @@
// execute burst
int n;
std::tie(n, outputShapes, timing, std::ignore) =
- controller->compute(request, measure, keys);
+ controller->compute(request, testConfig.measureTiming, keys);
executionStatus = nn::convertResultCodeToErrorStatus(n);
break;
}
}
- if (outputType != OutputType::FULLY_SPECIFIED &&
+ if (testConfig.outputType != OutputType::FULLY_SPECIFIED &&
executionStatus == ErrorStatus::GENERAL_FAILURE) {
LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
"execute model that it does not support.";
@@ -274,7 +286,7 @@
<< std::endl;
GTEST_SKIP();
}
- if (measure == MeasureTiming::NO) {
+ if (testConfig.measureTiming == MeasureTiming::NO) {
EXPECT_EQ(UINT64_MAX, timing.timeOnDevice);
EXPECT_EQ(UINT64_MAX, timing.timeInDriver);
} else {
@@ -283,7 +295,7 @@
}
}
- switch (outputType) {
+ switch (testConfig.outputType) {
case OutputType::FULLY_SPECIFIED:
// If the model output operands are fully specified, outputShapes must be either
// either empty, or have the same number of elements as the number of outputs.
@@ -321,44 +333,29 @@
void EvaluatePreparedModel(const sp<IPreparedModel>& preparedModel, const TestModel& testModel,
bool testDynamicOutputShape) {
+ std::initializer_list<OutputType> outputTypesList;
+ std::initializer_list<MeasureTiming> measureTimingList;
+ std::initializer_list<Executor> executorList;
+
if (testDynamicOutputShape) {
- EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::NO,
- OutputType::UNSPECIFIED);
- EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::NO,
- OutputType::UNSPECIFIED);
- EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::NO,
- OutputType::UNSPECIFIED);
- EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::YES,
- OutputType::UNSPECIFIED);
- EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::YES,
- OutputType::UNSPECIFIED);
- EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::YES,
- OutputType::UNSPECIFIED);
- EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::NO,
- OutputType::INSUFFICIENT);
- EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::NO,
- OutputType::INSUFFICIENT);
- EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::NO,
- OutputType::INSUFFICIENT);
- EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::YES,
- OutputType::INSUFFICIENT);
- EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::YES,
- OutputType::INSUFFICIENT);
- EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::YES,
- OutputType::INSUFFICIENT);
+ outputTypesList = {OutputType::UNSPECIFIED, OutputType::INSUFFICIENT};
+ measureTimingList = {MeasureTiming::NO, MeasureTiming::YES};
+ executorList = {Executor::ASYNC, Executor::SYNC, Executor::BURST};
} else {
- EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::NO,
- OutputType::FULLY_SPECIFIED);
- EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::NO,
- OutputType::FULLY_SPECIFIED);
- EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::NO,
- OutputType::FULLY_SPECIFIED);
- EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::YES,
- OutputType::FULLY_SPECIFIED);
- EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::YES,
- OutputType::FULLY_SPECIFIED);
- EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::YES,
- OutputType::FULLY_SPECIFIED);
+ outputTypesList = {OutputType::FULLY_SPECIFIED};
+ measureTimingList = {MeasureTiming::NO, MeasureTiming::YES};
+ executorList = {Executor::ASYNC, Executor::SYNC, Executor::BURST};
+ }
+
+ for (const OutputType outputType : outputTypesList) {
+ for (const MeasureTiming measureTiming : measureTimingList) {
+ for (const Executor executor : executorList) {
+ const TestConfig testConfig = {.executor = executor,
+ .measureTiming = measureTiming,
+ .outputType = outputType};
+ EvaluatePreparedModel(preparedModel, testModel, testConfig);
+ }
+ }
}
}
diff --git a/neuralnetworks/1.3/Android.bp b/neuralnetworks/1.3/Android.bp
index 8e3e9f1..0b07a58 100644
--- a/neuralnetworks/1.3/Android.bp
+++ b/neuralnetworks/1.3/Android.bp
@@ -9,6 +9,7 @@
srcs: [
"types.hal",
"IDevice.hal",
+ "IPreparedModel.hal",
"IPreparedModelCallback.hal",
],
interfaces: [
diff --git a/neuralnetworks/1.3/IPreparedModel.hal b/neuralnetworks/1.3/IPreparedModel.hal
new file mode 100644
index 0000000..c04809f
--- /dev/null
+++ b/neuralnetworks/1.3/IPreparedModel.hal
@@ -0,0 +1,90 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.neuralnetworks@1.3;
+
+import @1.0::ErrorStatus;
+import @1.0::Request;
+import @1.2::MeasureTiming;
+import @1.2::IExecutionCallback;
+import @1.2::IPreparedModel;
+
+/**
+ * IPreparedModel describes a model that has been prepared for execution and
+ * is used to launch executions.
+ */
+interface IPreparedModel extends @1.2::IPreparedModel {
+ /**
+ * Launches an asynchronous execution on a prepared model.
+ *
+ * The execution is performed asynchronously with respect to the caller.
+ * execute_1_3 must verify the inputs to the function are correct. If there is
+ * an error, execute_1_3 must immediately invoke the callback with the
+ * appropriate ErrorStatus value, then return with the same ErrorStatus. If
+ * the inputs to the function are valid and there is no error, execute_1_3 must
+ * launch an asynchronous task to perform the execution in the background,
+ * and immediately return with ErrorStatus::NONE. If the asynchronous task
+ * fails to launch, execute_1_3 must immediately invoke the callback with
+ * ErrorStatus::GENERAL_FAILURE, then return with
+ * ErrorStatus::GENERAL_FAILURE.
+ *
+ * When the asynchronous task has finished its execution, it must
+ * immediately invoke the callback object provided as an input to the
+ * execute_1_3 function. This callback must be provided with the ErrorStatus of
+ * the execution.
+ *
+ * If the launch is successful, the caller must not change the content of
+ * any data object referenced by 'request' (described by the
+ * {@link @1.0::DataLocation} of a {@link @1.0::RequestArgument}) until the
+ * asynchronous task has invoked the callback object. The asynchronous task
+ * must not change the content of any of the data objects corresponding to
+ * 'request' inputs.
+ *
+ * If the prepared model was prepared from a model wherein all tensor
+ * operands have fully specified dimensions, and the inputs to the function
+ * are valid, then:
+ * - the execution should launch successfully (ErrorStatus::NONE): There
+ * must be no failure unless the device itself is in a bad state.
+ * - if at execution time every operation's input operands have legal
+ * values, the execution should complete successfully (ErrorStatus::NONE):
+ * There must be no failure unless the device itself is in a bad state.
+ *
+ * Any number of calls to the execute, execute_1_2, execute_1_3, and executeSynchronously
+ * functions, in any combination, may be made concurrently, even on the same
+ * IPreparedModel object.
+ *
+ * @param request The input and output information on which the prepared
+ * model is to be executed.
+ * @param measure Specifies whether or not to measure duration of the execution.
+ * The duration runs from the time the driver sees the call
+ * to the execute_1_3 function to the time the driver invokes
+ * the callback.
+ * @param callback A callback object used to return the error status of
+ * the execution. The callback object's notify function must
+ * be called exactly once, even if the execution was
+ * unsuccessful.
+ * @return status Error status of the call, must be:
+ * - NONE if task is successfully launched
+ * - DEVICE_UNAVAILABLE if driver is offline or busy
+ * - GENERAL_FAILURE if there is an unspecified error
+ * - OUTPUT_INSUFFICIENT_SIZE if provided output buffer is
+ * not large enough to store the resultant values
+ * - INVALID_ARGUMENT if one of the input arguments is
+ * invalid
+ */
+ execute_1_3(Request request, MeasureTiming measure, IExecutionCallback callback)
+ generates (ErrorStatus status);
+};
diff --git a/neuralnetworks/1.3/IPreparedModelCallback.hal b/neuralnetworks/1.3/IPreparedModelCallback.hal
index 7cc5ae0..ff295a2 100644
--- a/neuralnetworks/1.3/IPreparedModelCallback.hal
+++ b/neuralnetworks/1.3/IPreparedModelCallback.hal
@@ -18,7 +18,7 @@
import @1.0::ErrorStatus;
import @1.2::IPreparedModelCallback;
-import @1.2::IPreparedModel;
+import IPreparedModel;
/**
* IPreparedModelCallback must be used to return a prepared model produced by an
diff --git a/neuralnetworks/1.3/types.hal b/neuralnetworks/1.3/types.hal
index 86ab287..7df14b1 100644
--- a/neuralnetworks/1.3/types.hal
+++ b/neuralnetworks/1.3/types.hal
@@ -73,6 +73,4500 @@
BASE_MAX = 0xFFFF,
};
+/**
+ * Operation types.
+ *
+ * The type of an operation in a model.
+ */
+enum OperationType : int32_t {
+
+ /**
+ * Adds two tensors, element-wise.
+ *
+ * Takes two input tensors of identical {@link OperandType} and compatible
+ * dimensions. The output is the sum of both input tensors, optionally
+ * modified by an activation function.
+ *
+ * Two dimensions are compatible when:
+ * 1. they are equal, or
+ * 2. one of them is 1
+ *
+ * The size of the output is the maximum size along each dimension of the
+ * input operands. It starts with the trailing dimensions, and works its
+ * way forward.
+ *
+ * Example:
+ *
+ * input1.dimension = {4, 1, 2}
+ * input2.dimension = {5, 4, 3, 1}
+ * output.dimension = {5, 4, 3, 2}
+ *
+ * Since HAL version 1.2, generic zero-sized input tensor is supported. Zero
+ * dimension is only compatible with 0 or 1. The size of the output
+ * dimension is zero if either of corresponding input dimension is zero.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: up to 4
+ *
+ * Inputs:
+ * * 0: A tensor.
+ * * 1: A tensor of the same {@link OperandType}, and compatible dimensions
+ * as input0.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scales and zeroPoint can be different from input0 scale and zeroPoint.
+ * * 2: An {@link OperandType::INT32} scalar, and has to be one of the
+ * {@link FusedActivationFunc} values. Specifies the activation to
+ * invoke on the result.
+ *
+ * Outputs:
+ * * 0: The sum, a tensor of the same {@link OperandType} as input0.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint can be different from inputs' scale and zeroPoint.
+ */
+ ADD = @1.2::OperationType:ADD,
+
+ /**
+ * Performs a 2-D average pooling operation.
+ *
+ * The output dimensions are functions of the filter dimensions, stride, and
+ * padding.
+ *
+ * The values in the output tensor are computed as:
+ *
+ * output[b, i, j, channel] =
+ * sum_{di, dj}(
+ * input[b, strides[1] * i + di, strides[2] * j + dj, channel]
+ * ) / sum(1)
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
+ * With the default data layout NHWC, the data is stored in the order of:
+ * [batch, height, width, channels]. Alternatively, the data layout could
+ * be NCHW, the data storage order of: [batch, channels, height, width].
+ * NCHW is supported since HAL version 1.2.
+ *
+ * Both explicit padding and implicit padding are supported.
+ *
+ * Inputs (explicit padding):
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
+ * the input.
+ * Since HAL version 1.2, zero batches is supported for this tensor.
+ * * 1: An {@link OperandType::INT32} scalar, specifying the padding on
+ * the left, in the ‘width’ dimension.
+ * * 2: An {@link OperandType::INT32} scalar, specifying the padding on
+ * the right, in the ‘width’ dimension.
+ * * 3: An {@link OperandType::INT32} scalar, specifying the padding on
+ * the top, in the ‘height’ dimension.
+ * * 4: An {@link OperandType::INT32} scalar, specifying the padding on
+ * the bottom, in the ‘height’ dimension.
+ * * 5: An {@link OperandType::INT32} scalar, specifying the stride when
+ * walking through input in the ‘width’ dimension.
+ * * 6: An {@link OperandType::INT32} scalar, specifying the stride when
+ * walking through input in the ‘height’ dimension.
+ * * 7: An {@link OperandType::INT32} scalar, specifying the filter
+ * width.
+ * * 8: An {@link OperandType::INT32} scalar, specifying the filter
+ * height.
+ * * 9: An {@link OperandType::INT32} scalar, and has to be one of the
+ * {@link FusedActivationFunc} values. Specifies the activation to
+ * invoke on the result.
+ * * 10: An optional {@link OperandType::BOOL} scalar, default to false.
+ * Set to true to specify NCHW data layout for input0 and output0.
+ * Available since HAL version 1.2.
+ *
+ * Inputs (implicit padding):
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
+ * the input.
+ * Since HAL version 1.2, zero batches is supported for this tensor.
+ * * 1: An {@link OperandType::INT32} scalar, specifying the implicit
+ * padding scheme, has to be one of the
+ * following values: {0 (NONE), 1 (SAME), 2 (VALID)}.
+ * * 2: An {@link OperandType::INT32} scalar, specifying the stride when
+ * walking through input in the ‘width’ dimension.
+ * * 3: An {@link OperandType::INT32} scalar, specifying the stride when
+ * walking through input in the ‘height’ dimension.
+ * * 4: An {@link OperandType::INT32} scalar, specifying the filter
+ * width.
+ * * 5: An {@link OperandType::INT32} scalar, specifying the filter
+ * height.
+ * * 6: An {@link OperandType::INT32} scalar, and has to be one of the
+ * {@link FusedActivationFunc} values. Specifies the activation to
+ * invoke on the result.
+ * * 7: An optional {@link OperandType::BOOL} scalar, default to false.
+ * Set to true to specify NCHW data layout for input0 and output0.
+ * Available since HAL version 1.2.
+ *
+ * Outputs:
+ * * 0: The output 4-D tensor, of shape
+ * [batches, out_height, out_width, depth].
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ */
+ AVERAGE_POOL_2D = @1.2::OperationType:AVERAGE_POOL_2D,
+
+ /**
+ * Concatenates the input tensors along the given dimension.
+ *
+ * The input tensors must have identical {@link OperandType} and the same
+ * dimensions except the dimension along the concatenation axis.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * (full support since HAL version 1.2, see the input section)
+ *
+ * Supported tensor rank: up to 4
+ *
+ * Inputs:
+ * * 0 ~ n-1: The list of n input tensors, of shape
+ * [D0, D1, ..., Daxis(i), ..., Dm].
+ * Before HAL version 1.2, all input tensors of
+ * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * must have the same scale and zeroPoint as the output tensor.
+ * Since HAL version 1.2, zero-sized tensors are supported.
+ * * n: An {@link OperandType::INT32} scalar, specifying the
+ * concatenation axis.
+ *
+ * Outputs:
+ * * 0: The output, a tensor of the same {@link OperandType} as the input
+ * tensors. The output shape is [D0, D1, ..., sum(Daxis(i)), ..., Dm].
+ * Since HAL version 1.2, for a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint values can be different from
+ * input tensors. Before HAL version 1.2 they have to be the same as for the input tensors.
+ */
+ CONCATENATION = @1.2::OperationType:CONCATENATION,
+
+ /**
+ * Performs a 2-D convolution operation.
+ *
+ * The CONV_2D op sweeps a 2-D filter that can mix channels together over a
+ * batch of images, applying the filter to each window of each image of the
+ * appropriate size.
+ *
+ * The output dimensions are functions of the filter dimensions, stride, and
+ * padding.
+ *
+ * The values in the output tensor are computed as:
+ *
+ * output[b, i, j, channel] =
+ * sum_{di, dj, k} (
+ * input[b, strides[1] * i + di, strides[2] * j + dj, k] *
+ * filter[channel, di, dj, k]
+ * ) + bias[channel]
+ *
+ * Supported tensor {@link OperandType} configurations:
+ * * 32 bit floating point:
+ * * * {@link OperandType::TENSOR_FLOAT32} for input, filter, output, and bias.
+ *
+ * * Quantized:
+ * * * {@link OperandType::TENSOR_QUANT8_ASYMM} for input, filter, and output.
+ * * * {@link OperandType::TENSOR_INT32} for bias (with scale set to
+ * * * input.scale * filter.scale).
+ *
+ * Available since HAL version 1.2:
+ * * 16 bit floating point:
+ * * * {@link OperandType::TENSOR_FLOAT16} for input, filter, output, and bias.
+ *
+ * * Quantized with symmetric per channel quantization for the filter:
+ * * * {@link OperandType::TENSOR_QUANT8_ASYMM} for input, and output.
+ * * * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
+ * * * {@link OperandType::TENSOR_INT32} for bias (scale set to 0.0,
+ * * * each value scaling is separate and equal to input.scale * filter.scales[channel]).
+ *
+ * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
+ * With the default data layout NHWC, the data is stored in the order of:
+ * [batch, height, width, channels]. Alternatively, the data layout could
+ * be NCHW, the data storage order of: [batch, channels, height, width].
+ * NCHW is supported since HAL version 1.2.
+ *
+ * Both explicit padding and implicit padding are supported.
+ *
+ * Inputs (explicit padding):
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
+ * specifying the input.
+ * Since HAL version 1.2, zero batches is supported for this tensor.
+ * * 1: A 4-D tensor, of shape
+ * [depth_out, filter_height, filter_width, depth_in], specifying the
+ * filter.
+ * For tensor of type {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}
+ * the channel dimension (SymmPerChannelQuantParams::channelDim)
+ * must be set to 0.
+ * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
+ * tensor of type {@link OperandType::TENSOR_FLOAT32}
+ * or {@link OperandType::TENSOR_FLOAT16} the bias must be of the same
+ * type. For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
+ * the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint
+ * of 0 and bias_scale == input_scale * filter_scale.
+ * For filter tensor of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL},
+ * the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0
+ * and bias_scale of 0. The actual scale of each value 'i' is equal to
+ * bias_scale[i] = input_scale * filter_scale[i].
+ * * 3: An {@link OperandType::INT32} scalar, specifying the padding on
+ * the left, in the ‘width’ dimension.
+ * * 4: An {@link OperandType::INT32} scalar, specifying the padding on
+ * the right, in the ‘width’ dimension.
+ * * 5: An {@link OperandType::INT32} scalar, specifying the padding on
+ * the top, in the ‘height’ dimension.
+ * * 6: An {@link OperandType::INT32} scalar, specifying the padding on
+ * the bottom, in the ‘height’ dimension.
+ * * 7: An {@link OperandType::INT32} scalar, specifying the stride when
+ * walking through input in the ‘width’ dimension.
+ * * 8: An {@link OperandType::INT32} scalar, specifying the stride when
+ * walking through input in the ‘height’ dimension.
+ * * 9: An {@link OperandType::INT32} scalar, and has to be one of the
+ * {@link FusedActivationFunc} values. Specifies the activation to
+ * invoke on the result.
+ * * 10: An optional {@link OperandType::BOOL} scalar, default to false.
+ * Set to true to specify NCHW data layout for input0 and output0.
+ * Available since HAL version 1.2.
+ * * 11: An optional {@link OperandType::INT32} scalar, specifying the dilation
+ * factor for width. Defaults to 1. If set to k > 1, there will be k-1 skipped
+ * cells between each filter element on width dimension. If this input is set,
+ * input 12 (dilation factor for height) must be specified as well.
+ * Available since HAL version 1.2.
+ * * 12: An optional {@link OperandType::INT32} scalar, specifying the dilation
+ * factor for height. Defaults to 1. If set to k > 1, there will be k-1 skipped
+ * cells between each filter element on height dimension. If this input is set,
+ * input 11 (dilation factor for width) must be specified as well.
+ * Available since HAL version 1.2.
+ *
+ * Inputs (implicit padding):
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
+ * specifying the input.
+ * Since HAL version 1.2, zero batches is supported for this tensor.
+ * * 1: A 4-D tensor, of shape
+ * [depth_out, filter_height, filter_width, depth_in], specifying the
+ * filter.
+ * For tensor of type {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}
+ * the channel dimension (SymmPerChannelQuantParams::channelDim)
+ * must be set to 0.
+ * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
+ * tensor of type {@link OperandType::TENSOR_FLOAT32}
+ * or {@link OperandType::TENSOR_FLOAT16} the bias must be of the same
+ * type. For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
+ * the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint
+ * of 0 and bias_scale == input_scale * filter_scale.
+ * For filter tensor of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL},
+ * the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0
+ * and bias_scale of 0. The actual scale of each value 'i' is equal to
+ * bias_scale[i] = input_scale * filter_scale[i].
+ * * 3: An {@link OperandType::INT32} scalar, specifying the implicit
+ * padding scheme, has to be one of the
+ * following values: {0 (NONE), 1 (SAME), 2 (VALID)}.
+ * * 4: An {@link OperandType::INT32} scalar, specifying the stride when
+ * walking through input in the ‘width’ dimension.
+ * * 5: An {@link OperandType::INT32} scalar, specifying the stride when
+ * walking through input in the ‘height’ dimension.
+ * * 6: An {@link OperandType::INT32} scalar, and has to be one of the
+ * {@link FusedActivationFunc} values. Specifies the activation to
+ * invoke on the result.
+ * * 7: An optional {@link OperandType::BOOL} scalar, default to false.
+ * Set to true to specify NCHW data layout for input0 and output0.
+ * Available since HAL version 1.2.
+ * * 8: An optional {@link OperandType::INT32} scalar, specifying the dilation
+ * factor for width. Defaults to 1. If set to k > 1, there will be k-1 skipped
+ * cells between each filter element on width dimension. If this input is set,
+ * input 9 (dilation factor for height) must be specified as well.
+ * Available since HAL version 1.2.
+ * * 9: An optional {@link OperandType::INT32} scalar, specifying the dilation
+ * factor for height. Defaults to 1. If set to k > 1, there will be k-1 skipped
+ * cells between each filter element on height dimension. If this input is set,
+ * input 8 (dilation factor for width) must be specified as well.
+ * Available since HAL version 1.2.
+ *
+ * Outputs:
+ * * 0: The output 4-D tensor, of shape
+ * [batches, out_height, out_width, depth_out].
+ * Before HAL version 1.2, for output tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
+ * the following condition must be satisfied: output_scale > input_scale * filter_scale
+ */
+ CONV_2D = @1.2::OperationType:CONV_2D,
+
+ /**
+ * Performs a depthwise 2-D convolution operation.
+ *
+ * Given an input tensor of shape [batches, height, width, depth_in] and a
+ * filter tensor of shape [1, filter_height, filter_width, depth_out]
+ * containing depth_out convolutional filters of depth 1, DEPTHWISE_CONV
+ * applies a different filter to each input channel (expanding from 1
+ * channel to channel_multiplier channels for each), then concatenates the
+ * results together.
+ *
+ * The output has depth_out = depth_in * depth_multiplier channels.
+ * The output dimensions are functions of the filter dimensions, stride, and
+ * padding.
+ *
+ * The values in the output tensor are computed as:
+ *
+ * output[b, i, j, k * channel_multiplier + q] =
+ * sum_{di, dj} (
+ * input[b, strides[1] * i + di, strides[2] * j + dj, k] *
+ * filter[1, di, dj, k * channel_multiplier + q]
+ * ) + bias[k * channel_multiplier + q]
+ *
+ * Supported tensor {@link OperandType} configurations:
+ * * 32 bit floating point:
+ * * * {@link OperandType::TENSOR_FLOAT32} for input, filter, output, and bias.
+ *
+ * * Quantized:
+ * * * {@link OperandType::TENSOR_QUANT8_ASYMM} for input, filter, and output.
+ * * * {@link OperandType::TENSOR_INT32} for bias (with scale set to
+ * * * input.scale * filter.scale).
+ *
+ * Available since HAL version 1.2:
+ * * 16 bit floating point:
+ * * * {@link OperandType::TENSOR_FLOAT16} for input, filter, output, and bias.
+ *
+ * * Quantized with symmetric per channel quantization for the filter:
+ * * * {@link OperandType::TENSOR_QUANT8_ASYMM} for input, and output.
+ * * * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
+ * * * {@link OperandType::TENSOR_INT32} for bias (scale set to 0.0,
+ * * * each value scaling is separate and equal to input.scale * filter.scales[channel]).
+ *
+ * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
+ * With the default data layout NHWC, the data is stored in the order of:
+ * [batch, height, width, channels]. Alternatively, the data layout could
+ * be NCHW, the data storage order of: [batch, channels, height, width].
+ * NCHW is supported since HAL version 1.2.
+ *
+ * Both explicit padding and implicit padding are supported.
+ *
+ * Inputs (explicit padding):
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
+ * specifying the input.
+ * * 1: A 4-D tensor, of shape [1, filter_height, filter_width, depth_out],
+ * specifying the filter.
+ * For tensor of type {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}
+ * the channel dimension (SymmPerChannelQuantParams::channelDim)
+ * must be set to 3.
+ * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
+ * tensor of type {@link OperandType::TENSOR_FLOAT32}
+ * or {@link OperandType::TENSOR_FLOAT16} the bias must be of the same
+ * type. For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
+ * the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint
+ * of 0 and bias_scale == input_scale * filter_scale.
+ * For filter tensor of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL},
+ * the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0
+ * and bias_scale of 0. The actual scale of each value 'i' is equal to
+ * bias_scale[i] = input_scale * filter_scale[i].
+ * * 3: An {@link OperandType::INT32} scalar, specifying the padding on
+ * the left, in the ‘width’ dimension.
+ * * 4: An {@link OperandType::INT32} scalar, specifying the padding on
+ * the right, in the ‘width’ dimension.
+ * * 5: An {@link OperandType::INT32} scalar, specifying the padding on
+ * the top, in the ‘height’ dimension.
+ * * 6: An {@link OperandType::INT32} scalar, specifying the padding on
+ * the bottom, in the ‘height’ dimension.
+ * * 7: An {@link OperandType::INT32} scalar, specifying the stride when
+ * walking through input in the ‘width’ dimension.
+ * * 8: An {@link OperandType::INT32} scalar, specifying the stride when
+ * walking through input in the ‘height’ dimension.
+ * * 9: An {@link OperandType::INT32} scalar, specifying the depthwise
+ * multiplier.
+ * * 10: An {@link OperandType::INT32} scalar, and has to be one of the
+ * {@link FusedActivationFunc} values. Specifies the activation to
+ * invoke on the result.
+ * * 11: An optional {@link OperandType::BOOL} scalar, default to false.
+ * Set to true to specify NCHW data layout for input0 and output0.
+ * Available since HAL version 1.2.
+ * * 12: An optional {@link OperandType::INT32} scalar, specifying the dilation
+ * factor for width. Defaults to 1. If set to k > 1, there will be k-1 skipped
+ * cells between each filter element on width dimension. If this input is set,
+ * input 13 (dilation factor for height) must be specified as well.
+ * Available since HAL version 1.2.
+ * * 13: An optional {@link OperandType::INT32} scalar, specifying the dilation
+ * factor for height. Defaults to 1. If set to k > 1, there will be k-1 skipped
+ * cells between each filter element on height dimension. If this input is set,
+ * input 12 (dilation factor for width) must be specified as well.
+ * Available since HAL version 1.2.
+ *
+ * Inputs (implicit padding):
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
+ * specifying the input.
+ * * 1: A 4-D tensor, of shape [1, filter_height, filter_width, depth_out],
+ * specifying the filter.
+ * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
+ * tensor of type {@link OperandType::TENSOR_FLOAT32}
+ * or {@link OperandType::TENSOR_FLOAT16} the bias must be of the same
+ * type. For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
+ * the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint
+ * of 0 and bias_scale == input_scale * filter_scale.
+ * For filter tensor of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL},
+ * the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0
+ * and bias_scale of 0. The actual scale of each value 'i' is equal to
+ * bias_scale[i] = input_scale * filter_scale[i].
+ * * 3: An {@link OperandType::INT32} scalar, specifying the implicit
+ * padding scheme, has to be one of the
+ * following values: {0 (NONE), 1 (SAME), 2 (VALID)}.
+ * * 4: An {@link OperandType::INT32} scalar, specifying the stride when
+ * walking through input in the ‘width’ dimension.
+ * * 5: An {@link OperandType::INT32} scalar, specifying the stride when
+ * walking through input in the ‘height’ dimension.
+ * * 6: An {@link OperandType::INT32} scalar, specifying the depthwise
+ * multiplier.
+ * * 7: An {@link OperandType::INT32} scalar, and has to be one of the
+ * {@link FusedActivationFunc} values. Specifies the activation to
+ * invoke on the result.
+ * * 8: An optional {@link OperandType::BOOL} scalar, default to false.
+ * Set to true to specify NCHW data layout for input0 and output0.
+ * Available since HAL version 1.2.
+ * * 9: An optional {@link OperandType::INT32} scalar, specifying the dilation
+ * factor for width. Defaults to 1. If set to k > 1, there will be k-1 skipped
+ * cells between each filter element on width dimension. If this input is set,
+ * input 10 (dilation factor for height) must be specified as well.
+ * Available since HAL version 1.2.
+ * * 10: An optional {@link OperandType::INT32} scalar, specifying the dilation
+ * factor for height. Defaults to 1. If set to k > 1, there will be k-1 skipped
+ * cells between each filter element on height dimension. If this input is set,
+ * input 9 (dilation factor for width) must be specified as well.
+ * Available since HAL version 1.2.
+ *
+ * Outputs:
+ * * 0: The output 4-D tensor, of shape
+ * [batches, out_height, out_width, depth_out]. Before HAL version 1.2, for
+ * output tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
+ * the following condition must be satisfied:
+ * output_scale > input_scale * filter_scale
+ */
+ DEPTHWISE_CONV_2D = @1.2::OperationType:DEPTHWISE_CONV_2D,
+
+ /**
+ * Rearranges data from depth into blocks of spatial data.
+ *
+ * More specifically, this op outputs a copy of the input tensor where
+ * values from the depth dimension are moved in spatial blocks to the height
+ * and width dimensions. The value block_size indicates the input block size
+ * and how the data is moved.
+ *
+ * Chunks of data of size block_size * block_size from depth are rearranged
+ * into non-overlapping blocks of size block_size x block_size.
+ *
+ * The width of the output tensor is input_depth * block_size, whereas the
+ * height is input_height * block_size. The depth of the input tensor must
+ * be divisible by block_size * block_size
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
+ * With the default data layout NHWC, the data is stored in the order of:
+ * [batch, height, width, channels]. Alternatively, the data layout could
+ * be NCHW, the data storage order of: [batch, channels, height, width].
+ * NCHW is supported since HAL version 1.2.
+ *
+ * Inputs:
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
+ * specifying the input.
+ * * 1: An {@link OperandType::INT32} scalar, specifying the block_size.
+ * block_size must be >=1 and block_size * block_size must be a divisor
+ * of the input depth.
+ * * 2: An optional {@link OperandType::BOOL} scalar, default to false.
+ * Set to true to specify NCHW data layout for input0 and output0.
+ * Available since HAL version 1.2.
+ *
+ * Outputs:
+ * * 0: The output 4-D tensor, of shape [batch, height*block_size,
+ * width*block_size, depth/(block_size*block_size)].
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ */
+ DEPTH_TO_SPACE = @1.2::OperationType:DEPTH_TO_SPACE,
+
+ /**
+ * Dequantizes the input tensor.
+ *
+ * The formula is:
+ *
+ * output = (input - zeroPoint) * scale.
+ *
+ * Supported input tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * * {@link OperandType::TENSOR_QUANT8_SYMM} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} (since HAL version 1.2)
+ *
+ * Supported output tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}.
+ *
+ * Supported tensor rank: up to 4
+ *
+ * Inputs:
+ * * 0: A tensor.
+ * Since HAL version 1.2, this tensor may be zero-sized.
+ *
+ * Outputs:
+ * * 0: A tensor with the same shape as input0.
+ */
+ DEQUANTIZE = @1.2::OperationType:DEQUANTIZE,
+
+ /**
+ * Looks up sub-tensors in the input tensor.
+ *
+ * This operator takes for input a tensor of values (Values) and
+ * a one-dimensional tensor of selection indices (Lookups).
+ * The output tensor is the concatenation of sub-tensors of Values as
+ * selected by Lookups.
+ *
+ * Think of Values as being sliced along its first dimension:
+ * The entries in Lookups select which slices are concatenated together
+ * to create the output tensor.
+ *
+ * For example, if Values has shape of [40, 200, 300] and
+ * Lookups has shape of [3], all three values found in Lookups are
+ * expected to be between 0 and 39. The resulting tensor must
+ * have shape of [3, 200, 300].
+ *
+ * If a value in Lookups is out of bounds, the operation must fail
+ * and an error must be reported.
+ *
+ * Supported value tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_INT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported value tensor rank: from 2
+ *
+ * Inputs:
+ * * 0: Lookups. A 1-D tensor of {@link OperandType::TENSOR_INT32}.
+ * The values are indices into the first dimension of Values.
+ * * 1: Values. An n-D tensor, where n >= 2, from which sub-tensors are
+ * extracted.
+ *
+ * Output:
+ * * 0: A n-D tensor with the same rank and shape as the Values
+ * tensor, except for the first dimension which has the same size
+ * as Lookups' only dimension.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input1.
+ */
+ EMBEDDING_LOOKUP = @1.2::OperationType:EMBEDDING_LOOKUP,
+
+ /**
+ * Computes element-wise floor() on the input tensor.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ *
+ * Supported tensor rank: up to 4
+ *
+ * Inputs:
+ * * 0: A tensor.
+ *
+ * Outputs:
+ * * 0: The output tensor, of the same {@link OperandType} and dimensions as
+ * the input tensor.
+ */
+ FLOOR = @1.2::OperationType:FLOOR,
+
+ /**
+ * Denotes a fully (densely) connected layer, which connects all elements
+ * in the input tensor with each element in the output tensor.
+ *
+ * This layer implements the operation:
+ *
+ * outputs = activation(inputs * weights’ + bias)
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: up to 4.
+ *
+ * Inputs:
+ * * 0: A tensor of at least rank 2, specifying the input. If rank is
+ * greater than 2, then it gets flattened to a 2-D Tensor. The
+ * (flattened) 2-D Tensor is reshaped (if necessary) to
+ * [batch_size, input_size], where "input_size" corresponds to the
+ * number of inputs to the layer, matching the second dimension of
+ * weights, and "batch_size" is calculated by dividing the number of
+ * elements by "input_size".
+ * Since HAL version 1.2, zero batch_size is supported for this tensor.
+ * * 1: A 2-D tensor, specifying the weights, of shape
+ * [num_units, input_size], where "num_units" corresponds to the number
+ * of output nodes.
+ * * 2: A 1-D tensor, of shape [num_units], specifying the bias. For input
+ * tensor of {@link OperandType::TENSOR_FLOAT32}, the bias should
+ * also be of {@link OperandType::TENSOR_FLOAT32}. For input tensor
+ * of {@link OperandType::TENSOR_QUANT8_ASYMM}, the bias should be
+ * of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0 and
+ * bias_scale == input_scale * filter_scale.
+ * * 3: An {@link OperandType::INT32} scalar, and has to be one of the
+ * {@link FusedActivationFunc} values. Specifies the activation to
+ * invoke on the result.
+ *
+ * Outputs:
+ * * 0: The output tensor, of shape [batch_size, num_units]. Before HAL version 1.2, for
+ * output tensor of {@link OperandType::TENSOR_QUANT8_ASYMM}, the following
+ * condition must be satisfied: output_scale > input_scale * filter_scale.
+ */
+ FULLY_CONNECTED = @1.2::OperationType:FULLY_CONNECTED,
+
+ /**
+ * Looks up sub-tensors in the input tensor using a key-value map.
+ *
+ * This operator takes for input a tensor of values (Values),
+ * a one-dimensional tensor of selection values (Lookups) and
+ * a one-dimensional tensor that maps these values to Values
+ * indexes. The output tensor is the concatenation of sub-tensors of
+ * Values as selected by Lookups via Keys.
+ *
+ * Think of Values as being sliced along its outer-most dimension.
+ * The output is a concatenation of selected slices, with one slice
+ * for each entry of Lookups. The slice selected is the one at the
+ * same index as the Maps entry that matches the value in Lookups.
+ *
+ * For a hit, the corresponding sub-tensor of Values is included
+ * in the Output tensor. For a miss, the corresponding sub-tensor in
+ * Output must have zero values.
+ *
+ * For example, if Values has shape of [40, 200, 300],
+ * Keys should have a shape of [40]. If Lookups tensor has shape
+ * of [3], three slices are being concatenated, so the resulting tensor
+ * must have the shape of [3, 200, 300]. If the first entry in Lookups
+ * has the value 123456, that value must be located in Keys tensor.
+ * If the sixth entry of Keys contains 123456, the sixth slice of Values
+ * must be selected. If no entry in Keys has 123456, a slice of zeroes
+ * must be concatenated.
+ *
+ * Supported value tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_INT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported value tensor rank: from 2
+ *
+ * Inputs:
+ * * 0: Lookups. A 1-D {@link OperandType::TENSOR_INT32} tensor with
+ * shape [ k ].
+ * * 1: Keys. A 1-D {@link OperandType::TENSOR_INT32} tensor with shape
+ * [ n ]; Keys and Values pair represent a map, i.e., the ith element
+ * in Keys (Keys[i]) is the key to select the ith sub-tensor in Values
+ * (Values[i]), where 0 <= i <= n-1. Keys tensor *MUST* be sorted in
+ * ascending order.
+ * * 2: Values. A tensor with shape of [ n, … ]; i.e., the first dimension
+ * must be n.
+ *
+ * Outputs:
+ * * 0: Output. A tensor with shape [ k …].
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input2.
+ * * 1: Hits. A boolean tensor with shape [ k ] indicates whether the lookup
+ * hits (True) or not (False).
+ * Stored as {@link OperandType::TENSOR_QUANT8_ASYMM} with offset 0
+ * and scale 1.0f.
+ * A non-zero byte represents True, a hit. A zero indicates otherwise.
+ */
+ HASHTABLE_LOOKUP = @1.2::OperationType:HASHTABLE_LOOKUP,
+
+ /**
+ * Applies L2 normalization along the depth dimension.
+ *
+ * The values in the output tensor are computed as:
+ *
+ * output[batch, row, col, channel] =
+ * input[batch, row, col, channel] /
+ * sqrt(sum_{c} pow(input[batch, row, col, c], 2))
+ *
+ * For input tensor with rank less than 4, independently normalizes each
+ * 1-D slice along dimension dim.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM} (since HAL version 1.2)
+ *
+ * Supported tensor rank: up to 4
+ * Tensors with rank less than 4 are only supported since HAL version 1.2.
+ *
+ * Inputs:
+ * * 0: An n-D tensor, specifying the tensor to be normalized.
+ * * 1: An optional {@link OperandType::INT32} scalar, default to -1,
+ * specifying the dimension normalization would be performed on.
+ * Negative index is used to specify axis from the end (e.g. -1 for
+ * the last axis). Must be in the range [-n, n).
+ * Available since HAL version 1.2.
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandType} and same shape as input0.
+ * For {@link OperandType::TENSOR_QUANT8_ASYMM},
+ * the scale must be 1.f / 128 and the zeroPoint must be 128.
+ */
+ L2_NORMALIZATION = @1.2::OperationType:L2_NORMALIZATION,
+
+ /**
+ * Performs an 2-D L2 pooling operation.
+ *
+ * The output dimensions are functions of the filter dimensions, stride, and
+ * padding.
+ *
+ * The values in the output tensor are computed as:
+ *
+ * output[b, i, j, c] =
+ * sqrt(sum_{di, dj} pow(input[b, strides[1] * i + di, strides[2] * j + dj, c], 2) /
+ * sum(1))
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ *
+ * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
+ * With the default data layout NHWC, the data is stored in the order of:
+ * [batch, height, width, channels]. Alternatively, the data layout could
+ * be NCHW, the data storage order of: [batch, channels, height, width].
+ * NCHW is supported since HAL version 1.2.
+ *
+ * Both explicit padding and implicit padding are supported.
+ *
+ * Inputs (explicit padding):
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
+ * the input.
+ * Since HAL version 1.2, zero batches is supported for this tensor.
+ * * 1: An {@link OperandType::INT32} scalar, specifying the padding on
+ * the left, in the ‘width’ dimension.
+ * * 2: An {@link OperandType::INT32} scalar, specifying the padding on
+ * the right, in the ‘width’ dimension.
+ * * 3: An {@link OperandType::INT32} scalar, specifying the padding on
+ * the top, in the ‘height’ dimension.
+ * * 4: An {@link OperandType::INT32} scalar, specifying the padding on
+ * the bottom, in the ‘height’ dimension.
+ * * 5: An {@link OperandType::INT32} scalar, specifying the stride when
+ * walking through input in the ‘width’ dimension.
+ * * 6: An {@link OperandType::INT32} scalar, specifying the stride when
+ * walking through input in the ‘height’ dimension.
+ * * 7: An {@link OperandType::INT32} scalar, specifying the filter
+ * width.
+ * * 8: An {@link OperandType::INT32} scalar, specifying the filter
+ * height.
+ * * 9: An {@link OperandType::INT32} scalar, and has to be one of the
+ * {@link FusedActivationFunc} values. Specifies the activation to
+ * invoke on the result.
+ * * 10: An optional {@link OperandType::BOOL} scalar, default to false.
+ * Set to true to specify NCHW data layout for input0 and output0.
+ * Available since HAL version 1.2.
+ *
+ * Inputs (implicit padding):
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
+ * the input.
+ * Since HAL version 1.2, zero batches is supported for this tensor.
+ * * 1: An {@link OperandType::INT32} scalar, specifying the implicit
+ * padding scheme, has to be one of the
+ * following values: {0 (NONE), 1 (SAME), 2 (VALID)}.
+ * * 2: An {@link OperandType::INT32} scalar, specifying the stride when
+ * walking through input in the ‘width’ dimension.
+ * * 3: An {@link OperandType::INT32} scalar, specifying the stride when
+ * walking through input in the ‘height’ dimension.
+ * * 4: An {@link OperandType::INT32} scalar, specifying the filter
+ * width.
+ * * 5: An {@link OperandType::INT32} scalar, specifying the filter
+ * height.
+ * * 6: An {@link OperandType::INT32} scalar, and has to be one of the
+ * {@link FusedActivationFunc} values. Specifies the activation to
+ * invoke on the result.
+ * * 7: An optional {@link OperandType::BOOL} scalar, default to false.
+ * Set to true to specify NCHW data layout for input0 and output0.
+ * Available since HAL version 1.2.
+ *
+ * Outputs:
+ * * 0: The output 4-D tensor, of shape
+ * [batches, out_height, out_width, depth].
+ */
+ L2_POOL_2D = @1.2::OperationType:L2_POOL_2D,
+
+ /**
+ * Applies Local Response Normalization along the depth dimension.
+ *
+ * The 4-D input tensor is treated as a 3-D array of 1-D vectors (along the
+ * last dimension), and each vector is normalized independently. Within a
+ * given vector, each component is divided by the weighted, squared sum of
+ * inputs within depth_radius.
+ *
+ * The output is calculated using this formula:
+ *
+ * sqr_sum[a, b, c, d] = sum(
+ * pow(input[a, b, c, d - depth_radius : d + depth_radius + 1], 2))
+ * output = input / pow((bias + alpha * sqr_sum), beta)
+ *
+ * For input tensor with rank less than 4, independently normalizes each
+ * 1-D slice along specified dimension.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ *
+ * Supported tensor rank: up to 4
+ * Tensors with rank less than 4 are only supported since HAL version 1.2.
+ *
+ * Inputs:
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
+ * the input.
+ * * 1: An {@link OperandType::INT32} scalar, specifying the radius of
+ * the normalization window.
+ * * 2: A scalar, specifying the bias, must not be zero.
+ * For input tensor of {@link OperandType::TENSOR_FLOAT16}, the bias
+ * value must be of {@link OperandType::FLOAT16}.
+ * For input tensor of {@link OperandType::TENSOR_FLOAT32}, the bias
+ * value must be of {@link OperandType::FLOAT32}.
+ * * 3: A scalar, specifying the scale factor, alpha.
+ * For input tensor of {@link OperandType::TENSOR_FLOAT16}, the
+ * alpha value must be of {@link OperandType::FLOAT16}.
+ * For input tensor of {@link OperandType::TENSOR_FLOAT32}, the
+ * alpha value must be of {@link OperandType::FLOAT32}.
+ * * 4: A scalar, specifying the exponent, beta.
+ * For input tensor of {@link OperandType::TENSOR_FLOAT16}, the beta
+ * value must be of {@link OperandType::FLOAT16}.
+ * For input tensor of {@link OperandType::TENSOR_FLOAT32}, the beta
+ * value must be of {@link OperandType::FLOAT32}.
+ * * 5: An optional {@link OperandType::INT32} scalar, default to -1,
+ * specifying the dimension normalization would be performed on.
+ * Negative index is used to specify axis from the end (e.g. -1 for
+ * the last axis). Must be in the range [-n, n).
+ * Available since HAL version 1.2.
+ *
+ * Outputs:
+ * * 0: The output tensor of same shape as input0.
+ */
+ LOCAL_RESPONSE_NORMALIZATION = @1.2::OperationType:LOCAL_RESPONSE_NORMALIZATION,
+
+ /**
+ * Computes sigmoid activation on the input tensor element-wise.
+ *
+ * The output is calculated using this formula:
+ *
+ * output = 1 / (1 + exp(-input))
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: up to 4.
+ *
+ * Inputs:
+ * * 0: A tensor, specifying the input.
+ * Since HAL version 1.2, this tensor may be zero-sized.
+ *
+ * Outputs:
+ * * 0: The output tensor of same shape as input0.
+ * For {@link OperandType::TENSOR_QUANT8_ASYMM},
+ * the scale must be 1.f / 256 and the zeroPoint must be 0.
+ */
+ LOGISTIC = @1.2::OperationType:LOGISTIC,
+
+ /**
+ * Projects an input to a bit vector via locality senstive hashing.
+ *
+ * Supported input tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_INT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported input tensor rank: from 1
+ *
+ * Inputs:
+ * * 0: Hash functions. Dim.size == 2, DataType: Float.
+ * Tensor[0].Dim[0]: Number of hash functions.
+ * Tensor[0].Dim[1]: Number of projected output bits generated by each
+ * hash function.
+ * If the projection type is Sparse:
+ * Tensor[0].Dim[1] + ceil(log2(Tensor[0].Dim[0])) <= 32
+ *
+ * * 1: Input. Dim.size >= 1, no restriction on DataType.
+ * * 2: Weight. Optional. Dim.size == 1, DataType: Float.
+ * If not set, each input element is considered to have the same weight
+ * of 1.0.
+ * Tensor[1].Dim[0] == Tensor[2].Dim[0]
+ * * 3: Type:
+ * Sparse:
+ * Value LSHProjectionType_SPARSE(=3) (since HAL version 1.2).
+ * Computed bit vector is considered to be sparse.
+ * Each output element is an int32 made up of multiple bits
+ * computed from hash functions.
+ *
+ * NOTE: To avoid collisions across hash functions, an offset value
+ * of k * (1 << Tensor[0].Dim[1]) will be added to each signature,
+ * where k is the index of the hash function.
+ *
+ * Value LSHProjectionType_SPARSE_DEPRECATED(=1).
+ * Legacy behavior that does not include the offset value.
+ *
+ * Dense:
+ * Value LSHProjectionType_DENSE(=2).
+ * Computed bit vector is considered to be dense. Each output
+ * element represents a bit and can take the value of either
+ * 0 or 1.
+ *
+ * Outputs:
+ * * 0: If the projection type is Sparse:
+ * Output.Dim == { Tensor[0].Dim[0] }
+ * A tensor of int32 that represents hash signatures.
+ *
+ * If the projection type is Dense:
+ * Output.Dim == { Tensor[0].Dim[0] * Tensor[0].Dim[1] }
+ * A flattened tensor that represents projected bit vectors.
+ * The offset value for sparse projections was added in HAL version 1.2.
+ */
+ LSH_PROJECTION = @1.2::OperationType:LSH_PROJECTION,
+
+ /**
+ * Performs a single time step in a Long Short-Term Memory (LSTM) layer
+ *
+ * The LSTM operation is described by the following equations.
+ *
+ * \f{eqnarray*}{
+ * i_t =& \sigma(W_{xi}x_t+W_{hi}h_{t-1}+W_{ci}C_{t-1}+b_i) & \\
+ * f_t =& \sigma(W_{xf}x_t+W_{hf}h_{t-1}+W_{cf}C_{t-1}+b_f) & \\
+ * C_t =& clip(f_t \odot C_{t-1} + i_t \odot
+ * g(W_{xc}x_t+W_{hc}h_{t-1}+b_c),\ t_{cell}) & \\
+ * o_t =& \sigma(W_{xo}x_t+W_{ho}h_{t-1}+W_{co}C_t+b_o) & \\
+ * & & \\
+ * & clip(W_{proj}(o_t \odot g(C_t))+b_{proj},\ t_{proj})
+ * & if\ there\ is\ a\ projection; \\
+ * h_t =& & \\
+ * & o_t \odot g(C_t) & otherwise. \\
+ * \f}
+ * Where:
+ * * \f$x_t\f$ is the input,
+ * * \f$i_t\f$ is the input gate,
+ * * \f$f_t\f$ is the forget gate,
+ * * \f$C_t\f$ is the cell state,
+ * * \f$o_t\f$ is the output,
+ * * \f$h_t\f$ is the output state,
+ * * \f$\sigma\f$ is the logistic sigmoid function,
+ * * \f$g\f$ is the cell input and cell output activation function, usually
+ * \f$tahn\f$,
+ * * \f$W_{xi}\f$ is the input-to-input weight matrix,
+ * * \f$W_{hi}\f$ is the recurrent to input weight matrix,
+ * * \f$W_{ci}\f$ is the cell-to-input weight matrix,
+ * * \f$b_i\f$ is the input gate bias,
+ * * \f$W_{xf}\f$ is the input-to-forget weight matrix,
+ * * \f$W_{hf}\f$ is the recurrent-to-forget weight matrix,
+ * * \f$W_{cf}\f$ is the cell-to-forget weight matrix,
+ * * \f$b_f\f$ is the forget gate bias,
+ * * \f$W_{xc}\f$ is the input-to-cell weight matrix,
+ * * \f$W_{hc}\f$ is the recurrent-to-cell weight matrix,
+ * * \f$b_c\f$ is the cell bias,
+ * * \f$W_{xo}\f$ is the input-to-output weight matrix,
+ * * \f$W_{ho}\f$ is the recurrent-to-output weight matrix,
+ * * \f$W_{co}\f$ is the cell-to-output weight matrix,
+ * * \f$b_o\f$ is the output gate bias,
+ * * \f$W_{proj}\f$ is the projection weight matrix,
+ * * \f$b_{proj}\f$ is the projection bias,
+ * * \f$t_{cell}\f$ is the threshold for clipping the cell state, and
+ * * \f$t_{proj}\f$ is the threshold for clipping the projected output.
+ * * \f$\odot\f$ is the
+ * <a href="https://en.wikipedia.org/wiki/Hadamard_product_(matrices)">
+ * Hadamard product</a> that takes two matrices and produces another
+ * matrix, each element of which is the product of the corresponding
+ * elements of the input matrices.
+ *
+ * Since HAL version 1.2 LSTM supports layer normalization.
+ * In case layer normalization is used, the inputs to internal activation
+ * functions (sigmoid and \f$g\f$) are normalized, rescaled and recentered
+ * following an approach from section 3.1 from
+ * https://arxiv.org/pdf/1607.06450.pdf
+ *
+ * The operation has the following independently optional inputs:
+ * * The cell-to-input weights (\f$W_{ci}\f$), cell-to-forget weights
+ * (\f$W_{cf}\f$) and cell-to-output weights (\f$W_{co}\f$) either all
+ * have values or neither of them have values (i.e., all set to null). If
+ * they have values, the peephole optimization is used.
+ * * The input-to-input weights (\f$W_{xi}\f$), recurrent-to-input weights
+ * (\f$W_{hi}\f$) and input gate bias (\f$b_i\f$) either all have values,
+ * or none of them have values. If they have no values, coupling of input
+ * and forget gates (CIFG) is used, in which case the input gate
+ * (\f$i_t\f$) is calculated using the following equation instead.
+ * \f{eqnarray*}{
+ * i_t = 1 - f_t
+ * \f}
+ * In case peephole optimization is used and CIFG is not used
+ * cell-to-input (\f$W_{ci}\f$) weights must be present. Otherwise, the
+ * cell-to-input weights must have no value.
+ * * The projection weights (\f$W_{proj}\f$) is required only for the
+ * recurrent projection layer, and should otherwise have no value.
+ * * The projection bias (\f$b_{proj}\f$) may (but not required to) have a
+ * value if the recurrent projection layer exists, and should otherwise
+ * have no value.
+ * * (HAL version 1.2 or later) The four layer normalization weights either all have
+ * values or none of them have values. Additionally, if CIFG is used,
+ * input layer normalization weights tensor is omitted and the other layer
+ * normalization weights either all have values or none of them have
+ * values. Layer normalization is used when the values of all the layer
+ * normalization weights are present.
+ *
+ * References:
+ *
+ * The default non-peephole non-CIFG implementation is based on:
+ * http://www.bioinf.jku.at/publications/older/2604.pdf
+ * S. Hochreiter and J. Schmidhuber. "Long Short-Term Memory". Neural
+ * Computation, 9(8):1735-1780, 1997.
+ *
+ * The peephole implementation and projection layer is based on:
+ * https://research.google.com/pubs/archive/43905.pdf
+ * Hasim Sak, Andrew Senior, and Francoise Beaufays. "Long short-term memory
+ * recurrent neural network architectures for large scale acoustic
+ * modeling." INTERSPEECH, 2014.
+ * (However, the concept of peephole optimization was introduced in work
+ * prior to this paper.)
+ *
+ * The coupling of input and forget gate (CIFG) is based on:
+ * http://arxiv.org/pdf/1503.04069.pdf
+ * Greff et al. "LSTM: A Search Space Odyssey"
+ *
+ * The layer normalization is based on:
+ * https://arxiv.org/pdf/1607.06450.pdf
+ * Jimmy Ba et al. "Layer Normalization"
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ *
+ * All input and output tensors must be of the same type.
+ *
+ * Inputs:
+ * * 0: The input (\f$x_t\f$).
+ * A 2-D tensor of shape [batch_size, input_size], where “batch_size”
+ * corresponds to the batching dimension, and “input_size” is the size
+ * of the input.
+ * * 1: The input-to-input weights (\f$W_{xi}\f$). Optional.
+ * A 2-D tensor of shape [num_units, input_size], where “num_units”
+ * corresponds to the number of cell units.
+ * * 2: The input-to-forget weights (\f$W_{xf}\f$).
+ * A 2-D tensor of shape [num_units, input_size].
+ * * 3: The input-to-cell weights (\f$W_{xc}\f$).
+ * A 2-D tensor of shape [num_units, input_size].
+ * * 4: The input-to-output weights (\f$W_{xo}\f$).
+ * A 2-D tensor of shape [num_units, input_size].
+ * * 5: The recurrent-to-input weights (\f$W_{hi}\f$). Optional.
+ * A 2-D tensor of shape [num_units, output_size], where “output_size”
+ * corresponds to either the number of cell units (i.e., “num_units”),
+ * or the second dimension of the “projection_weights”, if defined.
+ * * 6: The recurrent-to-forget weights (\f$W_{hf}\f$).
+ * A 2-D tensor of shape [num_units, output_size].
+ * * 7: The recurrent-to-cell weights (\f$W_{hc}\f$).
+ * A 2-D tensor of shape [num_units, output_size].
+ * * 8: The recurrent-to-output weights (\f$W_{ho}\f$).
+ * A 2-D tensor of shape [num_units, output_size].
+ * * 9: The cell-to-input weights (\f$W_{ci}\f$). Optional.
+ * A 1-D tensor of shape [num_units].
+ * * 10:The cell-to-forget weights (\f$W_{cf}\f$). Optional.
+ * A 1-D tensor of shape [num_units].
+ * * 11:The cell-to-output weights (\f$W_{co}\f$). Optional.
+ * A 1-D tensor of shape [num_units].
+ * * 12:The input gate bias (\f$b_i\f$). Optional.
+ * A 1-D tensor of shape [num_units].
+ * * 13:The forget gate bias (\f$b_f\f$).
+ * A 1-D tensor of shape [num_units].
+ * * 14:The cell bias (\f$b_c\f$).
+ * A 1-D tensor of shape [num_units].
+ * * 15:The output gate bias (\f$b_o\f$).
+ * A 1-D tensor of shape [num_units].
+ * * 16:The projection weights (\f$W_{proj}\f$). Optional.
+ * A 2-D tensor of shape [output_size, num_units].
+ * * 17:The projection bias (\f$b_{proj}\f$). Optional.
+ * A 1-D tensor of shape [output_size].
+ * * 18:The output state (in) (\f$h_{t-1}\f$).
+ * A 2-D tensor of shape [batch_size, output_size].
+ * * 19:The cell state (in) (\f$C_{t-1}\f$).
+ * A 2-D tensor of shape [batch_size, num_units].
+ * * 20:The activation function (\f$g\f$).
+ * A value indicating the activation function:
+ * <ul>
+ * <li>0: None;
+ * <li>1: Relu;
+ * <li>3: Relu6;
+ * <li>4: Tanh;
+ * <li>6: Sigmoid.
+ * </ul>
+ * * 21:The clipping threshold (\f$t_{cell}\f$) for the cell state, such
+ * that values are bound within [-cell_clip, cell_clip]. If set to 0.0
+ * then clipping is disabled.
+ * Until HAL version 1.2 this scalar must be of type {@link
+ * OperandType::FLOAT32}. Since HAL version 1.2, if all the input
+ * tensors have type {@link OperandType::TENSOR_FLOAT32}, this
+ * scalar must be of the type {@link OperandType::FLOAT32},
+ * otherwise if all the input tensors have the type {@link
+ * OperandType::TENSOR_FLOAT16}, this scalar must be of type {@link
+ * OperandType::FLOAT16}.
+ * * 22:The clipping threshold (\f$t_{proj}\f$) for the output from the
+ * projection layer, such that values are bound within
+ * [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
+ * Until HAL version 1.2 this scalar must be of type {@link
+ * OperandType::FLOAT32}. Since HAL version 1.2, if all the input
+ * tensors have type {@link OperandType::TENSOR_FLOAT32}, this
+ * scalar must be of the type {@link OperandType::FLOAT32},
+ * otherwise if all the input tensors have the type {@link
+ * OperandType::TENSOR_FLOAT16}, this scalar must be of type {@link
+ * OperandType::FLOAT16}.
+ * Since HAL version 1.2 there are additional inputs to this op:
+ * * 23:The input layer normalization weights.
+ * A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
+ * to activation at input gate.
+ * * 24:The forget layer normalization weights.
+ * A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
+ * to activation at forget gate.
+ * * 25:The cell layer normalization weights.
+ * A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
+ * to activation at cell gate.
+ * * 26:The output layer normalization weights.
+ * A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
+ * to activation at output gate.
+ *
+ * Outputs:
+ * * 0: The scratch buffer.
+ * A 2-D tensor of shape [batch_size, num_units * 3] with CIFG, or
+ * [batch_size, num_units * 4] without CIFG.
+ * * 1: The output state (out) (\f$h_t\f$).
+ * A 2-D tensor of shape [batch_size, output_size].
+ * * 2: The cell state (out) (\f$C_t\f$).
+ * A 2-D tensor of shape [batch_size, num_units].
+ * * 3: The output (\f$o_t\f$).
+ * A 2-D tensor of shape [batch_size, output_size]. This is effectively
+ * the same as the current “output state (out)” value.
+ */
+ LSTM = @1.2::OperationType:LSTM,
+
+ /**
+ * Performs an 2-D max pooling operation.
+ *
+ * The output dimensions are functions of the filter dimensions, stride, and
+ * padding.
+ *
+ * The values in the output tensor are computed as:
+ *
+ * output[b, i, j, channel] =
+ * max_{di, dj} (
+ * input[b, strides[1] * i + di, strides[2] * j + dj, channel]
+ * )
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
+ * With the default data layout NHWC, the data is stored in the order of:
+ * [batch, height, width, channels]. Alternatively, the data layout could
+ * be NCHW, the data storage order of: [batch, channels, height, width].
+ * NCHW is supported since HAL version 1.2.
+ *
+ * Both explicit padding and implicit padding are supported.
+ *
+ * Inputs (explicit padding):
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
+ * the input.
+ * Since HAL version 1.2, zero batches is supported for this tensor.
+ * * 1: An {@link OperandType::INT32} scalar, specifying the padding on
+ * the left, in the ‘width’ dimension.
+ * * 2: An {@link OperandType::INT32} scalar, specifying the padding on
+ * the right, in the ‘width’ dimension.
+ * * 3: An {@link OperandType::INT32} scalar, specifying the padding on
+ * the top, in the ‘height’ dimension.
+ * * 4: An {@link OperandType::INT32} scalar, specifying the padding on
+ * the bottom, in the ‘height’ dimension.
+ * * 5: An {@link OperandType::INT32} scalar, specifying the stride when
+ * walking through input in the ‘width’ dimension.
+ * * 6: An {@link OperandType::INT32} scalar, specifying the stride when
+ * walking through input in the ‘height’ dimension.
+ * * 7: An {@link OperandType::INT32} scalar, specifying the filter
+ * width.
+ * * 8: An {@link OperandType::INT32} scalar, specifying the filter
+ * height.
+ * * 9: An {@link OperandType::INT32} scalar, and has to be one of the
+ * {@link FusedActivationFunc} values. Specifies the activation to
+ * invoke on the result.
+ * * 10: An optional {@link OperandType::BOOL} scalar, default to false.
+ * Set to true to specify NCHW data layout for input0 and output0.
+ * Available since HAL version 1.2.
+ *
+ * Inputs (implicit padding):
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
+ * the input.
+ * Since HAL version 1.2, zero batches is supported for this tensor.
+ * * 1: An {@link OperandType::INT32} scalar, specifying the implicit
+ * padding scheme, has to be one of the
+ * following values: {0 (NONE), 1 (SAME), 2 (VALID)}.
+ * * 2: An {@link OperandType::INT32} scalar, specifying the stride when
+ * walking through input in the ‘width’ dimension.
+ * * 3: An {@link OperandType::INT32} scalar, specifying the stride when
+ * walking through input in the ‘height’ dimension.
+ * * 4: An {@link OperandType::INT32} scalar, specifying the filter
+ * width.
+ * * 5: An {@link OperandType::INT32} scalar, specifying the filter
+ * height.
+ * * 6: An {@link OperandType::INT32} scalar, and has to be one of the
+ * {@link FusedActivationFunc} values. Specifies the activation to
+ * invoke on the result.
+ * * 7: An optional {@link OperandType::BOOL} scalar, default to false.
+ * Set to true to specify NCHW data layout for input0 and output0.
+ * Available since HAL version 1.2.
+ *
+ * Outputs:
+ * * 0: The output 4-D tensor, of shape
+ * [batches, out_height, out_width, depth].
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ */
+ MAX_POOL_2D = @1.2::OperationType:MAX_POOL_2D,
+
+ /**
+ * Multiplies two tensors, element-wise.
+ *
+ * Takes two input tensors of identical {@link OperandType} and compatible
+ * dimensions. The output is the product of both input tensors, optionally
+ * modified by an activation function.
+ *
+ * Two dimensions are compatible when:
+ * 1. they are equal, or
+ * 2. one of them is 1
+ *
+ * The size of the resulting output is the maximum size along each dimension
+ * of the input operands. It starts with the trailing dimensions, and works
+ * its way forward.
+ *
+ * Since HAL version 1.2, generic zero-sized input tensor is supported. Zero
+ * dimension is only compatible with 0 or 1. The size of the output
+ * dimension is zero if either of corresponding input dimension is zero.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: up to 4
+ *
+ * Inputs:
+ * * 0: A tensor.
+ * * 1: A tensor of the same {@link OperandType}, and compatible dimensions
+ * as input0.
+ * * 2: An {@link OperandType::INT32} scalar, and has to be one of the
+ * {@link FusedActivationFunc} values. Specifies the activation to
+ * invoke on the result.
+ *
+ * Outputs:
+ * * 0: The product, a tensor of the same {@link OperandType} as input0.
+ * For output tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
+ * the following condition must be satisfied:
+ * output_scale > input1_scale * input2_scale.
+ */
+ MUL = @1.2::OperationType:MUL,
+
+ /**
+ * Computes rectified linear activation on the input tensor element-wise.
+ *
+ * The output is calculated using this formula:
+ *
+ * output = max(0, input)
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: up to 4.
+ *
+ * Inputs:
+ * * 0: A tensor, specifying the input.
+ * Since HAL version 1.2, this tensor may be zero-sized.
+ *
+ * Outputs:
+ * * 0: The output tensor of same shape as input0.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ */
+ RELU = @1.2::OperationType:RELU,
+
+ /**
+ * Computes rectified linear 1 activation on the input tensor element-wise.
+ *
+ * The output is calculated using this formula:
+ *
+ * output = min(1.f, max(-1.f, input))
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: up to 4.
+ *
+ * Inputs:
+ * * 0: A tensor, specifying the input.
+ * Since HAL version 1.2, this tensor may be zero-sized.
+ *
+ * Outputs:
+ * * 0: The output tensor of the same shape as input0.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ */
+ RELU1 = @1.2::OperationType:RELU1,
+
+ /**
+ * Computes rectified linear 6 activation on the input tensor element-wise.
+ *
+ * The output is calculated using this formula:
+ *
+ * output = min(6, max(0, input))
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: up to 4.
+ *
+ * Inputs:
+ * * 0: A tensor, specifying the input.
+ * Since HAL version 1.2, this tensor may be zero-sized.
+ *
+ * Outputs:
+ * * 0: The output tensor of same shape as input0.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ */
+ RELU6 = @1.2::OperationType:RELU6,
+
+ /**
+ * Reshapes a tensor.
+ *
+ * Given tensor, this operation returns a tensor that has the same values as
+ * tensor, but with a newly specified shape.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: up to 4.
+ *
+ * Inputs:
+ * * 0: A tensor, specifying the tensor to be reshaped.
+ * * 1: A 1-D tensor of {@link OperandType::TENSOR_INT32}, defining the
+ * shape of the output tensor. The number of elements implied by shape
+ * must be the same as the number of elements in the input tensor.
+ *
+ * If one component of shape is the special value -1, the size of that
+ * dimension is computed so that the total size remains constant. In
+ * particular, a shape of [-1] flattens into 1-D. At most one component
+ * of shape can be -1.
+ *
+ * Outputs:
+ * * 0: The output tensor, of shape specified by the input shape.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ */
+ RESHAPE = @1.2::OperationType:RESHAPE,
+
+ /**
+ * Resizes images to given size using the bilinear interpretation.
+ *
+ * Resized images must be distorted if their output aspect ratio is not the
+ * same as input aspect ratio. The corner pixels of output may not be the
+ * same as corner pixels of input.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM} (since HAL version 1.2)
+ *
+ * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
+ * With the default data layout NHWC, the data is stored in the order of:
+ * [batch, height, width, channels]. Alternatively, the data layout could
+ * be NCHW, the data storage order of: [batch, channels, height, width].
+ * NCHW is supported since HAL version 1.2.
+ *
+ * Both resizing by shape and resizing by scale are supported.
+ *
+ * Inputs (resizing by shape):
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
+ * the input.
+ * Since HAL version 1.2, zero batches is supported for this tensor.
+ * * 1: An {@link OperandType::INT32} scalar, specifying the output
+ * width of the output tensor.
+ * * 2: An {@link OperandType::INT32} scalar, specifying the output
+ * height of the output tensor.
+ * * 3: An optional {@link OperandType::BOOL} scalar, default to false.
+ * Set to true to specify NCHW data layout for input0 and output0.
+ * Available since HAL version 1.2.
+ *
+ * Inputs (resizing by scale, since HAL version 1.2):
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
+ * the input. Zero batches is supported for this tensor.
+ * * 1: A scalar, specifying width_scale, the scaling factor of the width
+ * dimension from the input tensor to the output tensor. The output
+ * width is calculated as new_width = floor(width * width_scale).
+ * The scalar must be of {@link OperandType::FLOAT16} if input0 is
+ * of {@link OperandType::TENSOR_FLOAT16} and of
+ * {@link OperandType::FLOAT32} otherwise.
+ * * 2: A scalar, specifying height_scale, the scaling factor of the height
+ * dimension from the input tensor to the output tensor. The output
+ * height is calculated as new_height = floor(height * height_scale).
+ * The scalar must be of {@link OperandType::FLOAT16} if input0 is
+ * of {@link OperandType::TENSOR_FLOAT16} and of
+ * {@link OperandType::FLOAT32} otherwise.
+ * * 3: An optional {@link OperandType::BOOL} scalar, default to false.
+ * Set to true to specify NCHW data layout for input0 and output0.
+ *
+ * Outputs:
+ * * 0: The output 4-D tensor, of shape
+ * [batches, new_height, new_width, depth].
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ */
+ RESIZE_BILINEAR = @1.2::OperationType:RESIZE_BILINEAR,
+
+ /**
+ * A basic recurrent neural network layer.
+ *
+ * This layer implements the operation:
+ * outputs = state = activation(inputs * input_weights +
+ * state * recurrent_weights + bias)
+ *
+ * Where:
+ * * “input_weights” is a weight matrix that multiplies the inputs;
+ * * “recurrent_weights” is a weight matrix that multiplies the current
+ * “state” which itself is the output from the previous time step
+ * computation;
+ * * “bias” is a bias vector (added to each output vector in the batch);
+ * * “activation” is the function passed as the “fused_activation_function”
+ * argument (if not “NONE”).
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ *
+ * The input tensors must all be the same type.
+ *
+ * Inputs:
+ * * 0: input.
+ * A 2-D tensor of shape [batch_size, input_size], where “batch_size”
+ * corresponds to the batching dimension, and “input_size” is the size
+ * of the input.
+ * * 1: weights.
+ * A 2-D tensor of shape [num_units, input_size], where “num_units”
+ * corresponds to the number of units.
+ * * 2: recurrent_weights.
+ * A 2-D tensor of shape [num_units, num_units], with columns
+ * corresponding to the weights from each unit.
+ * * 3: bias.
+ * A 1-D tensor of shape [num_units].
+ * * 4: hidden state (in).
+ * A 2-D tensor of shape [batch_size, num_units].
+ * * 5: fused_activation_function.
+ * An optional {@link FusedActivationFunc} value indicating the
+ * activation function. If “NONE” is specified then it results in a
+ * linear activation.
+ *
+ * Outputs:
+ * * 0: hidden state (out).
+ * A 2-D tensor of shape [batch_size, num_units].
+ *
+ * * 1: output.
+ * A 2-D tensor of shape [batch_size, num_units]. This is effectively
+ * the same as the current state value.
+ */
+ RNN = @1.2::OperationType:RNN,
+
+ /**
+ * Computes the softmax activation on the input tensor element-wise, per
+ * batch, by normalizing the input vector so the maximum coefficient is
+ * zero.
+ *
+ * The output is calculated using this formula:
+ *
+ * output[batch, i] =
+ * exp((input[batch, i] - max(input[batch, :])) * beta) /
+ * sum_{k}{exp((input[batch, k] - max(input[batch, :])) * beta)}
+ *
+ * For input tensor with rank other than 2, the activation will be applied
+ * independently on each 1-D slice along specified dimension.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: up to 4.
+ * Tensors with rank other than 2 or 4 are only supported since HAL version 1.2.
+ *
+ * Inputs:
+ * * 0: A 2-D or 4-D tensor, specifying the tensor to be reshaped.
+ * Since HAL version 1.2, this tensor may be zero-sized.
+ * * 1: A scalar, specifying the positive scaling factor for the exponent,
+ * beta. If input0 is of {@link OperandType::TENSOR_FLOAT32} or
+ * {@link OperandType::TENSOR_QUANT8_ASYMM}, the scalar must be of
+ * {@link OperandType::FLOAT32}.
+ * If input0 is of {@link OperandType::TENSOR_FLOAT16}, then the
+ * scalar must be of {@link OperandType::FLOAT16}.
+ * * 2: An optional {@link OperandType::INT32} scalar, default to -1,
+ * specifying the dimension the activation would be performed on.
+ * Negative index is used to specify axis from the end (e.g. -1 for
+ * the last axis). Must be in the range [-n, n).
+ * Available since HAL version 1.2.
+ *
+ * Outputs:
+ * * 0: The output tensor of same shape as input0.
+ * For {@link OperandType::TENSOR_QUANT8_ASYMM},
+ * the scale must be 1.f / 256 and the zeroPoint must be 0.
+ */
+ SOFTMAX = @1.2::OperationType:SOFTMAX,
+
+ /**
+ * Rearranges blocks of spatial data, into depth.
+ *
+ * More specifically, this op outputs a copy of the input tensor where
+ * values from the height and width dimensions are moved to the depth
+ * dimension. The value block_size indicates the input block size and how
+ * the data is moved.
+ *
+ * Chunks of data of size block_size * block_size from depth are rearranged
+ * into non-overlapping blocks of size block_size x block_size.
+ *
+ * The depth of the output tensor is input_depth * block_size * block_size.
+ * The input tensor's height and width must be divisible by block_size.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
+ * With the default data layout NHWC, the data is stored in the order of:
+ * [batch, height, width, channels]. Alternatively, the data layout could
+ * be NCHW, the data storage order of: [batch, channels, height, width].
+ * NCHW is supported since HAL version 1.2.
+ *
+ * Inputs:
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
+ * specifying the input.
+ * * 1: An {@link OperandType::INT32} scalar, specifying the block_size.
+ * block_size must be >=1 and block_size must be a divisor of both the
+ * input height and width.
+ * * 2: An optional {@link OperandType::BOOL} scalar, default to false.
+ * Set to true to specify NCHW data layout for input0 and output0.
+ * Available since HAL version 1.2.
+ *
+ * Outputs:
+ * * 0: The output 4-D tensor, of shape [batches, height/block_size,
+ * width/block_size, depth_in*block_size*block_size].
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ */
+ SPACE_TO_DEPTH = @1.2::OperationType:SPACE_TO_DEPTH,
+
+ /**
+ * SVDF op is a kind of stateful layer derived from the notion that a
+ * densely connected layer that's processing a sequence of input frames can
+ * be approximated by using a singular value decomposition of each of its
+ * nodes. The implementation is based on:
+ *
+ * https://research.google.com/pubs/archive/43813.pdf
+ *
+ * P. Nakkiran, R. Alvarez, R. Prabhavalkar, C. Parada.
+ * “Compressing Deep Neural Networks using a Rank-Constrained Topology”.
+ * INTERSPEECH, 2015.
+ *
+ * It processes the incoming input using a 2-stage filtering mechanism:
+ * * stage 1 performs filtering on the "features" dimension, whose outputs
+ * get pushed into a memory of fixed-size memory_size.
+ * * stage 2 performs filtering on the "time" dimension of the memory_size
+ * memoized outputs of stage 1.
+ *
+ * Specifically, for rank 1, this layer implements the operation:
+ *
+ * memory = push(conv1d(inputs, weights_feature, feature_dim,
+ * "PADDING_VALID"));
+ * outputs = activation(memory * weights_time + bias);
+ *
+ * Where:
+ * * “weights_feature” is a weights matrix that processes the inputs (by
+ * convolving the input with every “feature filter”), and whose outputs
+ * get pushed, stacked in order, into the fixed-size “memory” (the oldest
+ * entry gets dropped);
+ * * “weights_time” is a weights matrix that processes the “memory” (by a
+ * batched matrix multiplication on the num_units);
+ * * “bias” is an optional bias vector (added to each output vector in the
+ * batch); and
+ * * “activation” is the function passed as the “fused_activation_function”
+ * argument (if not “NONE”).
+ *
+ * Each rank adds a dimension to the weights matrices by means of stacking
+ * the filters.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ *
+ * All input tensors must be the same type.
+ *
+ * Inputs:
+ * * 0: input.
+ * A 2-D tensor of shape [batch_size, input_size], where “batch_size”
+ * corresponds to the batching dimension, and “input_size” is the size
+ * of the input.
+ * * 1: weights_feature.
+ * A 2-D tensor of shape [num_units, input_size], where “num_units”
+ * corresponds to the number of units.
+ * * 2: weights_time.
+ * A 2-D tensor of shape [num_units, memory_size], where “memory_size”
+ * corresponds to the fixed-size of the memory.
+ * * 3: bias.
+ * An optional 1-D tensor of shape [num_units].
+ * * 4: state (in).
+ * A 2-D tensor of shape [batch_size, (memory_size - 1) * num_units * rank].
+ * * 5: rank.
+ * The rank of the SVD approximation.
+ * * 6: fused_activation_function.
+ * An optional {@link FusedActivationFunc} value indicating the
+ * activation function. If “NONE” is specified then it results in a
+ * linear activation.
+ *
+ * Outputs:
+ * * 0: state (out).
+ * A 2-D tensor of the same {@link OperandType} as the inputs, with shape
+ * [batch_size, (memory_size - 1) * num_units * rank].
+ * * 1: output.
+ * A 2-D tensor of the same {@link OperandType} as the inputs, with shape
+ * [batch_size, num_units].
+ */
+ SVDF = @1.2::OperationType:SVDF,
+
+ /**
+ * Computes hyperbolic tangent of input tensor element-wise.
+ *
+ * The output is calculated using this formula:
+ *
+ * output = tanh(input)
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM} (since HAL version 1.2)
+ *
+ * Supported tensor rank: up to 4.
+ *
+ * Inputs:
+ * * 0: A tensor, specifying the input.
+ * Since HAL version 1.2, this tensor may be zero-sized.
+ *
+ * Outputs:
+ * * 0: The output tensor of same shape as input0.
+ * For {@link OperandType::TENSOR_QUANT8_ASYMM},
+ * the scale must be 1.f / 128 and the zeroPoint must be 128.
+ */
+ TANH = @1.2::OperationType:TANH,
+
+ /**
+ * BatchToSpace for N-dimensional tensors.
+ *
+ * This operation reshapes the batch dimension (dimension 0) into M + 1
+ * dimensions of shape block_shape + [batch], interleaves these blocks back
+ * into the grid defined by the spatial dimensions [1, ..., M], to obtain a
+ * result with the same rank as the input.
+ *
+ * This is the reverse of SpaceToBatch.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
+ * With the default data layout NHWC, the data is stored in the order of:
+ * [batch, height, width, channels]. Alternatively, the data layout could
+ * be NCHW, the data storage order of: [batch, channels, height, width].
+ * NCHW is supported since HAL version 1.2.
+ *
+ * Inputs:
+ * * 0: An n-D tensor, specifying the tensor to be reshaped
+ * * 1: A 1-D Tensor of {@link OperandType::TENSOR_INT32}, the block
+ * sizes for each spatial dimension of the input tensor. All values
+ * must be >= 1.
+ * * 2: An optional {@link OperandType::BOOL} scalar, default to false.
+ * Set to true to specify NCHW data layout for input0 and output0.
+ * Available since API level 29.
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandType} as input0.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ */
+ BATCH_TO_SPACE_ND = @1.2::OperationType:BATCH_TO_SPACE_ND,
+
+ /**
+ * Element-wise division of two tensors.
+ *
+ * Takes two input tensors of identical {@link OperandType} and compatible
+ * dimensions. The output is the result of dividing the first input tensor
+ * by the second, optionally modified by an activation function.
+ *
+ * Two dimensions are compatible when:
+ * 1. they are equal, or
+ * 2. one of them is 1
+ *
+ * The size of the output is the maximum size along each dimension of the
+ * input operands. It starts with the trailing dimensions, and works its way
+ * forward.
+ *
+ * Example:
+ * input1.dimension = {4, 1, 2}
+ * input2.dimension = {5, 4, 3, 1}
+ * output.dimension = {5, 4, 3, 2}
+ *
+ * Since HAL version 1.2, generic zero-sized input tensor is supported. Zero
+ * dimension is only compatible with 0 or 1. The size of the output
+ * dimension is zero if either of corresponding input dimension is zero.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ *
+ * Supported tensor rank: up to 4
+ *
+ * Inputs:
+ * * 0: An n-D tensor, specifying the first input.
+ * * 1: A tensor of the same {@link OperandType}, and compatible dimensions
+ * as input0.
+ * * 2: An {@link OperandType::INT32} scalar, and has to be one of the
+ * {@link FusedActivationFunc} values. Specifies the activation to
+ * invoke on the result.
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandType} as input0.
+ */
+ DIV = @1.2::OperationType:DIV,
+
+ /**
+ * Computes the mean of elements across dimensions of a tensor.
+ *
+ * Reduces the input tensor along the given dimensions to reduce. Unless
+ * keep_dims is true, the rank of the tensor is reduced by 1 for each entry
+ * in axis. If keep_dims is true, the reduced dimensions are retained with
+ * length 1.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: up to 4
+ *
+ * Inputs:
+ * * 0: A tensor, specifying the input.
+ * * 1: A 1-D Tensor of {@link OperandType::TENSOR_INT32}. The dimensions
+ * to reduce. Must be in the range
+ * [-rank(input_tensor), rank(input_tensor)).
+ *
+ * NOTE: When the operation was introduced, the documentation
+ * incorrectly stated that if dimensions were empty, the operation
+ * would reduce across all dimensions. This behavior was never
+ * implemented.
+ *
+ * * 2: An {@link OperandType::INT32} scalar, keep_dims. If positive,
+ * retains reduced dimensions with length 1.
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandType} as input0.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be same as input0.
+ */
+ MEAN = @1.2::OperationType:MEAN,
+
+ /**
+ * Pads a tensor.
+ *
+ * This operation pads a tensor according to the specified paddings.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * (full support since HAL version 1.2, see the output section)
+ *
+ * Supported tensor rank: up to 4
+ *
+ * Inputs:
+ * * 0: An n-D tensor, specifying the tensor to be padded.
+ * * 1: A 2-D Tensor of {@link OperandType::TENSOR_INT32}, the paddings
+ * for each spatial dimension of the input tensor. The shape of the
+ * tensor must be {rank(input0), 2}.
+ * padding[i, 0] specifies the number of elements to be padded in the
+ * front of dimension i.
+ * padding[i, 1] specifies the number of elements to be padded after the
+ * end of dimension i.
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandType} as input0. The
+ * output tensor has the same rank as input0, and each
+ * dimension of the output tensor has the same size as the
+ * corresponding dimension of the input tensor plus the size
+ * of the padding:
+ * output0.dimension[i] =
+ * padding[i, 0] + input0.dimension[i] + padding[i, 1]
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ *
+ * NOTE: Before HAL version 1.2, the pad value for
+ * {@link OperandType::TENSOR_QUANT8_ASYMM} is undefined.
+ * Since HAL version 1.2, the pad value is always the logical zero.
+ */
+ PAD = @1.2::OperationType:PAD,
+
+ /**
+ * SpaceToBatch for N-Dimensional tensors.
+ *
+ * This operation divides "spatial" dimensions [1, ..., M] of the input into
+ * a grid of blocks of shape block_shape, and interleaves these blocks with
+ * the "batch" dimension (0) such that in the output, the spatial dimensions
+ * [1, ..., M] correspond to the position within the grid, and the batch
+ * dimension combines both the position within a spatial block and the
+ * original batch position. Prior to division into blocks, the spatial
+ * dimensions of the input are optionally zero padded according to paddings.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * (full support since HAL version 1.2, see the output section)
+ *
+ * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
+ * With the default data layout NHWC, the data is stored in the order of:
+ * [batch, height, width, channels]. Alternatively, the data layout could
+ * be NCHW, the data storage order of: [batch, channels, height, width].
+ * NCHW is supported since HAL version 1.2.
+ *
+ * Inputs:
+ * * 0: An n-D tensor, specifying the input.
+ * * 1: A 1-D Tensor of {@link OperandType::TENSOR_INT32}, the block
+ * sizes for each spatial dimension of the input tensor. All values
+ * must be >= 1.
+ * * 2: A 2-D Tensor of {@link OperandType::TENSOR_INT32}, the paddings
+ * for each spatial dimension of the input tensor. All values must be
+ * >= 0. The shape of the tensor must be {M, 2}, where M is the number
+ * of spatial dimensions.
+ * padding[i, 0] specifies the number of element to be padded in the
+ * front of dimension i.
+ * padding[i, 1] specifies the number of element to be padded after the
+ * end of dimension i.
+ * * 3: An optional {@link OperandType::BOOL} scalar, default to false.
+ * Set to true to specify NCHW data layout for input0 and output0.
+ * Available since HAL version 1.2.
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandType} as input0.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ *
+ * NOTE: Before HAL version 1.2, the pad value for
+ * {@link OperandType::TENSOR_QUANT8_ASYMM} is undefined.
+ * Since HAL version 1.2, the pad value is always the logical zero.
+ */
+ SPACE_TO_BATCH_ND = @1.2::OperationType:SPACE_TO_BATCH_ND,
+
+ /**
+ * Removes dimensions of size 1 from the shape of a tensor.
+ *
+ * Given a tensor input, this operation returns a tensor of the same
+ * {@link OperandType} with all dimensions of size 1 removed. If you don't
+ * want to remove all size 1 dimensions, you can remove specific size 1
+ * dimensions by specifying the axes (input1).
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: up to 4
+ *
+ * Inputs:
+ * * 0: An n-D tensor, the tensor to be squeezed.
+ * * 1: An optional 1-D tensor of {@link OperandType::TENSOR_INT32}. The
+ * dimensions to squeeze. If specified only squeezes the dimensions
+ * listed. Otherwise, squeezes all dimensions. The dimension index
+ * starts at 0. An error must be reported if squeezing a dimension that
+ * is not 1.
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandType} as input0. Contains the
+ * same data as input, but has one or more dimensions of size 1
+ * removed.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ */
+ SQUEEZE = @1.2::OperationType:SQUEEZE,
+
+ /**
+ * Extracts a strided slice of a tensor.
+ *
+ * Roughly speaking, this op extracts a slice of size (end - begin) / stride
+ * from the given input tensor. Starting at the location specified by begin
+ * the slice continues by adding stride to the index until all dimensions
+ * are not less than end. Note that a stride can be negative, which causes a
+ * reverse slice.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: up to 4
+ *
+ * Inputs:
+ * * 0: An n-D tensor, specifying the tensor to be sliced.
+ * * 1: begin, a 1-D tensor of {@link OperandType::TENSOR_INT32}. The
+ * starts of the dimensions of the input tensor to be sliced. The
+ * length must be of rank(input0).
+ * * 2: end, a 1-D tensor of {@link OperandType::TENSOR_INT32}. The
+ * ends of the dimensions of the input tensor to be sliced. The length
+ * must be of rank(input0).
+ * * 3: strides, a 1-D tensor of {@link OperandType::TENSOR_INT32}. The
+ * strides of the dimensions of the input tensor to be sliced. The
+ * length must be of rank(input0). The entries must be non-zero.
+ * * 4: begin_mask, an {@link OperandType::INT32} scalar. If the ith bit
+ * of begin_mask is set, begin[i] is ignored and the fullest possible
+ * range in that dimension is used instead.
+ * * 5: end_mask, an {@link OperandType::INT32} scalar. If the ith bit of
+ * end_mask is set, end[i] is ignored and the fullest possible range in
+ * that dimension is used instead.
+ * * 6: shrink_axis_mask, an {@link OperandType::INT32} scalar. If the
+ * ith bit of shrink_axis_mask is set, the ith dimension specification
+ * shrinks the dimensionality by 1, taking on the value at index
+ * begin[i]. In this case, the ith specification must define a
+ * slice of size 1, e.g. begin[i] = x, end[i] = x + 1.
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandType} as input0 and rank (n - k),
+ * where k is the number of bits set in shrink_axis_mask.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ */
+ STRIDED_SLICE = @1.2::OperationType:STRIDED_SLICE,
+
+ /**
+ * Element-wise subtraction of two tensors.
+ *
+ * Takes two input tensors of identical {@link OperandType} and compatible
+ * dimensions. The output is the result of subtracting the second input
+ * tensor from the first one, optionally modified by an activation function.
+ *
+ * Two dimensions are compatible when:
+ * 1. they are equal, or
+ * 2. one of them is 1
+ *
+ * The size of the output is the maximum size along each dimension of the
+ * input operands. It starts with the trailing dimensions, and works its way
+ * forward.
+ *
+ * Example:
+ * input1.dimension = {4, 1, 2}
+ * input2.dimension = {5, 4, 3, 1}
+ * output.dimension = {5, 4, 3, 2}
+ *
+ * Since HAL version 1.2, generic zero-sized input tensor is supported. Zero
+ * dimension is only compatible with 0 or 1. The size of the output
+ * dimension is zero if either of corresponding input dimension is zero.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM} (since HAL version 1.2)
+ *
+ * Supported tensor rank: up to 4
+ *
+ * Inputs:
+ * * 0: An n-D tensor, specifying the first input.
+ * * 1: A tensor of the same {@link OperandType}, and compatible dimensions
+ * as input0.
+ * * 2: An {@link OperandType::INT32} scalar, and has to be one of the
+ * {@link FusedActivationFunc} values. Specifies the activation to
+ * invoke on the result.
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandType} as input0.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint can be different from inputs' scale and zeroPoint.
+ */
+ SUB = @1.2::OperationType:SUB,
+
+ /**
+ * Transposes the input tensor, permuting the dimensions according to the
+ * perm tensor.
+ *
+ * The returned tensor's dimension i corresponds to the input dimension
+ * perm[i]. If perm is not given, it is set to (n-1...0), where n is the
+ * rank of the input tensor. Hence by default, this operation performs a
+ * regular matrix transpose on 2-D input Tensors.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: up to 4
+ *
+ * Inputs:
+ * * 0: An n-D tensor, specifying the tensor to be transposed.
+ * Since HAL version 1.2, this tensor may be zero-sized.
+ * * 1: An optional 1-D Tensor of {@link OperandType::TENSOR_INT32},
+ * the permutation of the dimensions of the input tensor.
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandType} as input0.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ */
+ TRANSPOSE = @1.2::OperationType:TRANSPOSE,
+
+ /**
+ * Computes the absolute value of a tensor, element-wise.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ *
+ * Supported tensor rank: from 1.
+ *
+ * Inputs:
+ * * 0: A tensor.
+ *
+ * Outputs:
+ * * 0: The output tensor of same shape as input0.
+ */
+ ABS = @1.2::OperationType:ABS,
+
+ /**
+ * Returns the index of the largest element along an axis.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_INT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: from 1
+ *
+ * Inputs:
+ * * 0: An n-D tensor specifying the input. Must be non-empty.
+ * * 1: An {@link OperandType::INT32} scalar specifying the axis to
+ * reduce across. Negative index is used to specify axis from the
+ * end (e.g. -1 for the last axis). Must be in the range [-n, n).
+ *
+ * Outputs:
+ * * 0: An (n - 1)-D {@link OperandType::TENSOR_INT32} tensor.
+ */
+ // There is no underscore in ARG_MAX to avoid name conflict with
+ // the macro defined in libc/kernel/uapi/linux/limits.h.
+ ARGMAX = @1.2::OperationType:ARGMAX,
+
+ /**
+ * Returns the index of the smallest element along an axis.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_INT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: from 1
+ *
+ * Inputs:
+ * * 0: An n-D tensor specifying the input. Must be non-empty.
+ * * 1: An {@link OperandType::INT32} scalar specifying the axis to
+ * reduce across. Negative index is used to specify axis from the
+ * end (e.g. -1 for the last axis). Must be in the range [-n, n).
+ *
+ * Outputs:
+ * * 0: An (n - 1)-D {@link OperandType::TENSOR_INT32} tensor.
+ */
+ ARGMIN = @1.2::OperationType:ARGMIN, // See ARGMAX for naming discussion.
+
+ /**
+ * Transform axis-aligned bounding box proposals using bounding box deltas.
+ *
+ * Given the positions of bounding box proposals and the corresponding
+ * bounding box deltas for each class, return the refined bounding box
+ * regions. The resulting bounding boxes are cliped against the edges of
+ * the image.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT16_ASYMM}
+ *
+ * Inputs:
+ * * 0: A 2-D Tensor of shape [num_rois, 4], specifying the locations of the
+ * bounding box proposals, each line with format [x1, y1, x2, y2].
+ * For tensor of type {@link OperandType::TENSOR_QUANT16_ASYMM},
+ * the zeroPoint must be 0 and the scale must be 0.125. Zero num_rois
+ * is supported for this tensor.
+ * * 1: A 2-D Tensor of shape [num_rois, num_classes * 4], specifying the
+ * bounding box delta for each region of interest and each class. The
+ * bounding box deltas are organized in the following order
+ * [dx, dy, dw, dh], where dx and dy is the relative correction factor
+ * for the center position of the bounding box with respect to the width
+ * and height, dw and dh is the log-scale relative correction factor
+ * for the width and height. For input0 of type
+ * {@link OperandType::TENSOR_QUANT16_ASYMM}, this tensor should be
+ * of {@link OperandType::TENSOR_QUANT8_ASYMM}. Zero num_rois is
+ * supported for this tensor.
+ * * 2: An 1-D {@link OperandType::TENSOR_INT32} tensor, of shape
+ * [num_rois], specifying the batch index of each box. Boxes with
+ * the same batch index are grouped together. Zero num_rois is
+ * supported for this tensor.
+ * * 3: A 2-D Tensor of shape [batches, 2], specifying the information of
+ * each image in the batch, each line with format
+ * [image_height, image_width].
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandType} as input0, with shape
+ * [num_rois, num_classes * 4], specifying the coordinates of each
+ * output bounding box for each class, with format [x1, y1, x2, y2].
+ * For type of {@link OperandType::TENSOR_QUANT16_ASYMM}, the
+ * scale must be 0.125 and the zero point must be 0.
+ */
+ AXIS_ALIGNED_BBOX_TRANSFORM = @1.2::OperationType:AXIS_ALIGNED_BBOX_TRANSFORM,
+
+ /**
+ * Performs a forward LSTM on the input followed by a backward LSTM.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ *
+ * Supported tensor rank: 3, either time-major or batch-major.
+ *
+ * All input and output tensors must be of the same type.
+ *
+ *
+ * Inputs:
+ * * 0: The input.
+ * A 3-D tensor of shape:
+ * If time-major: [max_time, batch_size, input_size]
+ * If batch-major: [batch_size, max_time, input_size]
+ * where "max_time" is the number of timesteps (sequence length),
+ * "batch_size" corresponds to the batching dimension, and
+ * "input_size" is the size of the input.
+ * * 1: The forward input-to-input weights. Optional.
+ * A 2-D tensor of shape [fw_num_units, input_size], where “fw_num_units”
+ * corresponds to the number of forward cell units.
+ * * 2: The forward input-to-forget weights.
+ * A 2-D tensor of shape [fw_num_units, input_size].
+ * * 3: The forward input-to-cell weights.
+ * A 2-D tensor of shape [fw_num_units, input_size].
+ * * 4: The forward input-to-output weights.
+ * A 2-D tensor of shape [fw_num_units, input_size].
+ * * 5: The forward recurrent-to-input weights. Optional.
+ * A 2-D tensor of shape [fw_num_units, fw_output_size], where “fw_output_size”
+ * corresponds to either the number of cell units (i.e., fw_num_units),
+ * or the second dimension of the “fw_projection_weights”, if defined.
+ * * 6: The forward recurrent-to-forget weights.
+ * A 2-D tensor of shape [fw_num_units, fw_output_size].
+ * * 7: The forward recurrent-to-cell weights.
+ * A 2-D tensor of shape [fw_num_units, fw_output_size].
+ * * 8: The forward recurrent-to-output weights.
+ * A 2-D tensor of shape [fw_num_units, fw_output_size].
+ * * 9: The forward cell-to-input weights. Optional.
+ * A 1-D tensor of shape [fw_num_units].
+ * * 10: The forward cell-to-forget weights. Optional.
+ * A 1-D tensor of shape [fw_num_units].
+ * * 11: The forward cell-to-output weights. Optional.
+ * A 1-D tensor of shape [fw_num_units].
+ * * 12: The forward input gate bias. Optional.
+ * A 1-D tensor of shape [fw_num_units].
+ * * 13: The forward forget gate bias.
+ * A 1-D tensor of shape [fw_num_units].
+ * * 14: The forward cell gate bias.
+ * A 1-D tensor of shape [fw_num_units].
+ * * 15: The forward output gate bias.
+ * A 1-D tensor of shape [fw_num_units].
+ * * 16: The forward projection weights. Optional.
+ * A 2-D tensor of shape [fw_output_size, fw_num_units].
+ * * 17: The forward projection bias. Optional.
+ * A 1-D tensor of shape [fw_output_size].
+ * * 18: The backward input-to-input weights. Optional.
+ * A 2-D tensor of shape [bw_num_units, input_size], where “bw_num_units”
+ * corresponds to the number of backward cell units.
+ * * 19: The backward input-to-forget weights.
+ * A 2-D tensor of shape [bw_num_units, input_size].
+ * * 20: The backward input-to-cell weights.
+ * A 2-D tensor of shape [bw_num_units, input_size].
+ * * 21: The backward input-to-output weights.
+ * A 2-D tensor of shape [bw_num_units, input_size].
+ * * 22: The backward recurrent-to-input weights. Optional.
+ * A 2-D tensor of shape [bw_num_units, bw_output_size], where “bw_output_size”
+ * corresponds to either the number of cell units (i.e., “bw_num_units”),
+ * or the second dimension of the “bw_projection_weights”, if defined.
+ * * 23: The backward recurrent-to-forget weights.
+ * A 2-D tensor of shape [bw_num_units, bw_output_size].
+ * * 24: The backward recurrent-to-cell weights.
+ * A 2-D tensor of shape [bw_num_units, bw_output_size].
+ * * 25: The backward recurrent-to-output weights.
+ * A 2-D tensor of shape [bw_num_units, bw_output_size].
+ * * 26: The backward cell-to-input weights. Optional.
+ * A 1-D tensor of shape [bw_num_units].
+ * * 27: The backward cell-to-forget weights. Optional.
+ * A 1-D tensor of shape [bw_num_units].
+ * * 28: The backward cell-to-output weights. Optional.
+ * A 1-D tensor of shape [bw_num_units].
+ * * 29: The backward input gate bias. Optional.
+ * A 1-D tensor of shape [bw_num_units].
+ * * 30: The backward forget gate bias.
+ * A 1-D tensor of shape [bw_num_units].
+ * * 31: The backward cell gate bias.
+ * A 1-D tensor of shape [bw_num_units].
+ * * 32: The backward output gate bias.
+ * A 1-D tensor of shape [bw_num_units].
+ * * 33: The backward projection weights. Optional.
+ * A 2-D tensor of shape [bw_output_size, bw_num_units].
+ * * 34: The backward projection bias. Optional.
+ * A 1-D tensor of shape [bw_output_size].
+ * * 35: The forward input activation state.
+ * A 2-D tensor of shape [batch_size, bw_output_size].
+ * * 36: The forward input cell state.
+ * A 2-D tensor of shape [batch_size, bw_num_units].
+ * * 37: The backward input activation state.
+ * A 2-D tensor of shape [batch_size, bw_output_size].
+ * * 38: The backward input cell state.
+ * A 2-D tensor of shape [batch_size, bw_num_units].
+ * * 39: The auxiliary input. Optional.
+ * A 3-D tensor of shape [max_time, batch_size, input_size], where “batch_size”
+ * corresponds to the batching dimension, and “input_size” is the size
+ * of the input.
+ * * 40: The forward auxiliary input-to-input weights. Optional.
+ * A 2-D tensor of shape [fw_num_units, input_size].
+ * * 41: The forward auxiliary input-to-forget weights. Optional.
+ * A 2-D tensor of shape [fw_num_units, input_size].
+ * * 42: The forward auxiliary input-to-cell weights. Optional.
+ * A 2-D tensor of shape [fw_num_units, input_size].
+ * * 43: The forward auxiliary input-to-output weights. Optional.
+ * A 2-D tensor of shape [fw_num_units, input_size].
+ * * 44: The backward auxiliary input-to-input weights. Optional.
+ * A 2-D tensor of shape [bw_num_units, input_size].
+ * * 45: The backward auxiliary input-to-forget weights. Optional.
+ * A 2-D tensor of shape [bw_num_units, input_size].
+ * * 46: The backward auxiliary input-to-cell weights. Optional.
+ * A 2-D tensor of shape [bw_num_units, input_size].
+ * * 47: The backward auxiliary input-to-output weights. Optional.
+ * A 2-D tensor of shape [bw_num_units, input_size].
+ * * 48: The activation function.
+ * A value indicating the activation function:
+ * <ul>
+ * <li>0: None;
+ * <li>1: Relu;
+ * <li>3: Relu6;
+ * <li>4: Tanh;
+ * <li>6: Sigmoid.
+ * </ul>
+ * * 49: The clipping threshold for the cell state, such
+ * that values are bound within [-cell_clip, cell_clip]. If set to 0.0
+ * then clipping is disabled.
+ * If all the input tensors have type {@link OperandType::TENSOR_FLOAT32},
+ * this scalar must be of the type {@link OperandType::FLOAT32},
+ * otherwise if all the input tensors have the type {@link OperandType::TENSOR_FLOAT16},
+ * this scalar must be of type {@link OperandType::FLOAT16}.
+ * * 50: The clipping threshold for the output from the
+ * projection layer, such that values are bound within
+ * [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
+ * If all the input tensors have type {@link OperandType::TENSOR_FLOAT32},
+ * this scalar must be of the type {@link OperandType::FLOAT32},
+ * otherwise if all the input tensors have the type {@link OperandType::TENSOR_FLOAT16},
+ * this scalar must be of type {@link OperandType::FLOAT16}.
+ * * 51: merge_outputs
+ * An {@link OperandType::BOOL} scalar specifying if the outputs
+ * from forward and backward cells should be merged.
+ * * 52: time_major
+ * An {@link OperandType::BOOL} scalar specifying the shape format
+ * of input and output tensors.
+ * * 53: The forward input layer normalization weights. Optional.
+ * A 1-D tensor of shape [fw_num_units]. Used to rescale normalized inputs
+ * to activation at input gate.
+ * * 54: The forward forget layer normalization weights. Optional.
+ * A 1-D tensor of shape [fw_num_units]. Used to rescale normalized inputs
+ * to activation at forget gate.
+ * * 55: The forward cell layer normalization weights. Optional.
+ * A 1-D tensor of shape [fw_num_units]. Used to rescale normalized inputs
+ * to activation at cell gate.
+ * * 56: The forward output layer normalization weights. Optional.
+ * A 1-D tensor of shape [fw_num_units]. Used to rescale normalized inputs
+ * to activation at output gate.
+ * * 57: The backward input layer normalization weights. Optional.
+ * A 1-D tensor of shape [bw_num_units]. Used to rescale normalized inputs
+ * to activation at input gate.
+ * * 58: The backward forget layer normalization weights. Optional.
+ * A 1-D tensor of shape [bw_num_units]. Used to rescale normalized inputs
+ * to activation at forget gate.
+ * * 59: The backward cell layer normalization weights. Optional.
+ * A 1-D tensor of shape [bw_num_units]. Used to rescale normalized inputs
+ * to activation at cell gate.
+ * * 60: The backward output layer normalization weights. Optional.
+ * A 1-D tensor of shape [bw_num_units]. Used to rescale normalized inputs
+ * to activation at output gate.
+ *
+ * Outputs:
+ * * 0: The forward output.
+ * A 3-D tensor of shape:
+ * If time-major and not merge_outputs:
+ * [max_time, batch_size, fw_output_size]
+ * If time-major and merge_outputs:
+ * [max_time, batch_size, fw_output_size + bw_output_size]
+ * If batch-major and not merge_outputs:
+ * [batch_size, max_time, fw_output_size]
+ * If batch-major and merge_outputs:
+ * [batch_size, max_time, fw_output_size + bw_output_size]
+ * * 1: The backward output. Unused if merge_outputs is true.
+ * A 3-D tensor of shape:
+ * If time-major: [max_time, batch_size, bw_output_size]
+ * If batch-major: [batch_size, max_time, bw_output_size]
+ */
+ BIDIRECTIONAL_SEQUENCE_LSTM = @1.2::OperationType:BIDIRECTIONAL_SEQUENCE_LSTM,
+
+ /**
+ * A recurrent neural network layer that applies a basic RNN cell to a
+ * sequence of inputs in forward and backward directions.
+ *
+ * This Op unrolls the input along the sequence dimension, and implements
+ * the following operation for each element in the sequence s =
+ * 1...sequence_length:
+ * fw_outputs[s] = fw_state = activation(inputs[s] * fw_input_weights’ +
+ * fw_state * fw_recurrent_weights’ + fw_bias)
+ *
+ * And for each element in sequence t = sequence_length : 1
+ * bw_outputs[t] = bw_state = activation(inputs[t] * bw_input_weights’ +
+ * bw_state * bw_recurrent_weights’ + bw_bias)
+ *
+ * Where:
+ * * “{fw,bw}_input_weights” is a weight matrix that multiplies the inputs;
+ * * “{fw,bw}_recurrent_weights” is a weight matrix that multiplies the
+ * current “state” which itself is the output from the previous time step
+ * computation;
+ * * “{fw,bw}_bias” is a bias vector (added to each output vector in the
+ * batch);
+ * * “activation” is the function passed as the “fused_activation_function”
+ * argument (if not “NONE”).
+ *
+ * The op also supports an auxiliary input. Regular cell feeds one input
+ * into the two RNN cells in the following way:
+ *
+ * INPUT (INPUT_REVERSED)
+ * | |
+ * ---------------------
+ * | FW_RNN BW_RNN |
+ * ---------------------
+ * | |
+ * FW_OUT BW_OUT
+ *
+ * An op with an auxiliary input takes two inputs and feeds them into the
+ * RNN cells in the following way:
+ *
+ * AUX_INPUT (AUX_INPUT_REVERSED)
+ * | |
+ * INPUT | (INPUT_R'D.)|
+ * | | | |
+ * -----------------------
+ * | \ / \ / |
+ * | FW_RNN BW_RNN |
+ * -----------------------
+ * | |
+ * FW_OUT BW_OUT
+ *
+ * While stacking this op on top of itself, this allows to connect both
+ * forward and backward outputs from previous cell to the next cell's
+ * inputs.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ *
+ * The input tensors must all be the same type.
+ *
+ * Inputs:
+ * * 0: input.
+ * A 3-D tensor. The shape is defined by the input 6 (timeMajor). If
+ * it is set to true, then the input has a shape [maxTime, batchSize,
+ * inputSize], otherwise the input has a shape [batchSize, maxTime,
+ * inputSize].
+ * * 1: fwWeights.
+ * A 2-D tensor of shape [fwNumUnits, inputSize].
+ * * 2: fwRecurrentWeights.
+ * A 2-D tensor of shape [fwNumUnits, fwNumUnits].
+ * * 3: fwBias.
+ * A 1-D tensor of shape [fwNumUnits].
+ * * 4: fwHiddenState.
+ * A 2-D tensor of shape [batchSize, fwNumUnits]. Specifies a hidden
+ * state input for the first time step of the computation.
+ * * 5: bwWeights.
+ * A 2-D tensor of shape [bwNumUnits, inputSize].
+ * * 6: bwRecurrentWeights.
+ * A 2-D tensor of shape [bwNumUnits, bwNumUnits].
+ * * 7: bwBias.
+ * A 1-D tensor of shape [bwNumUnits].
+ * * 8: bwHiddenState
+ * A 2-D tensor of shape [batchSize, bwNumUnits]. Specifies a hidden
+ * state input for the first time step of the computation.
+ * * 9: auxInput.
+ * A 3-D tensor. The shape is the same as of the input 0.
+ * * 10:fwAuxWeights.
+ * A 2-D tensor of shape [fwNumUnits, inputSize].
+ * * 11:bwAuxWeights.
+ * A 2-D tensor of shape [bwNumUnits, inputSize].
+ * * 12:fusedActivationFunction.
+ * A {@link FusedActivationFunc} value indicating the activation function. If
+ * “NONE” is specified then it results in a linear activation.
+ * * 13:timeMajor
+ * An {@link OperandType::BOOL} scalar specifying the shape format
+ * of input and output tensors.
+ * * 14:mergeOutputs
+ * An {@link OperandType::BOOL} scalar specifying if the outputs
+ * from forward and backward cells are separate (if set to false) or
+ * concatenated (if set to true).
+ * Outputs:
+ * * 0: fwOutput.
+ * A 3-D tensor. The first two dimensions of the shape are defined by
+ * the input 6 (timeMajor) and the third dimension is defined by the
+ * input 14 (mergeOutputs). If timeMajor is set to true, then the first
+ * two dimensions are [maxTime, batchSize], otherwise they are set to
+ * [batchSize, maxTime]. If mergeOutputs is set to true, then the third
+ * dimension is equal to (fwNumUnits + bwNumUnits), otherwise it is set
+ * to fwNumUnits.
+ * * 1: bwOutput.
+ * A 3-D tensor. If the input 14 (mergeOutputs) is set to true, then
+ * this tensor is not produced. The shape is defined by the input 6
+ * (timeMajor). If it is set to true, then the shape is set to
+ * [maxTime, batchSize, bwNumUnits], otherwise the shape is set to
+ * [batchSize, maxTime, bwNumUnits].
+ */
+ BIDIRECTIONAL_SEQUENCE_RNN = @1.2::OperationType:BIDIRECTIONAL_SEQUENCE_RNN,
+
+ /**
+ * Greedily selects a subset of bounding boxes in descending order of score.
+ *
+ * This op applies NMS algorithm to each class. In each loop of execution,
+ * the box with maximum score gets selected and removed from the pending set.
+ * The scores of the rest of boxes are lowered according to the
+ * intersection-over-union (IOU) overlapping with the previously selected
+ * boxes and a specified NMS kernel method. Any boxes with score less
+ * than a threshold are removed from the pending set.
+ *
+ * Three NMS kernels are supported:
+ * * Hard: score_new = score_old * (1 if IoU < threshold else 0)
+ * * Linear: score_new = score_old * (1 if IoU < threshold else 1 - IoU)
+ * * Gaussian: score_new = score_old * exp(- IoU^2 / sigma)
+ *
+ * Axis-aligned bounding boxes are represented by its upper-left corner
+ * coordinate (x1,y1) and lower-right corner coordinate (x2,y2). A valid
+ * bounding box should satisfy x1 <= x2 and y1 <= y2.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Inputs:
+ * * 0: A 2-D Tensor of shape [num_rois, num_classes], specifying the score
+ * of each bounding box proposal. The boxes are grouped by batches in the
+ * first dimension. Zero num_rois is supported for this tensor.
+ * * 1: A 2-D Tensor specifying the bounding boxes of shape
+ * [num_rois, num_classes * 4], organized in the order [x1, y1, x2, y2].
+ * The boxes are grouped by batches in the first dimension. The sequential
+ * order of the boxes corresponds with input0. For input0 of type
+ * {@link OperandType::TENSOR_QUANT8_ASYMM}, this tensor should be of
+ * {@link OperandType::TENSOR_QUANT16_ASYMM}, with zeroPoint of 0 and
+ * scale of 0.125. Zero num_rois is supported for this tensor.
+ * * 2: A 1-D {@link OperandType::TENSOR_INT32} tensor, of shape
+ * [num_rois], specifying the batch index of each box. Boxes with
+ * the same batch index are grouped together.
+ * * 3: An {@link OperandType::FLOAT32} scalar, score_threshold. Boxes
+ * with scores lower than the threshold are filtered before sending
+ * to the NMS algorithm.
+ * * 4: An {@link OperandType::INT32} scalar, specifying the maximum
+ * number of selected bounding boxes for each image. Set to a negative
+ * value for unlimited number of output bounding boxes.
+ * * 5: An {@link OperandType::INT32} scalar, specifying the NMS
+ * kernel method, options are 0:hard, 1:linear, 2:gaussian.
+ * * 6: An {@link OperandType::FLOAT32} scalar, specifying the IoU
+ * threshold in hard and linear NMS kernel. This field is ignored if
+ * gaussian kernel is selected.
+ * * 7: An {@link OperandType::FLOAT32} scalar, specifying the sigma in
+ * gaussian NMS kernel. This field is ignored if gaussian kernel is
+ * not selected.
+ * * 8: An {@link OperandType::FLOAT32} scalar, nms_score_threshold.
+ * Boxes with scores lower than the threshold are dropped during the
+ * score updating phase in soft NMS.
+ *
+ * Outputs:
+ * * 0: A 1-D Tensor of the same {@link OperandType} as input0, with shape
+ * [num_output_rois], specifying the score of each output box. The boxes
+ * are grouped by batches, but the sequential order in each batch is not
+ * guaranteed. For type of {@link OperandType::TENSOR_QUANT8_ASYMM},
+ * the scale and zero point must be the same as input0.
+ * * 1: A 2-D Tensor of the same {@link OperandType} as input1, with shape
+ * [num_output_rois, 4], specifying the coordinates of each
+ * output bounding box with the same format as input1. The sequential
+ * order of the boxes corresponds with output0. For type of
+ * {@link OperandType::TENSOR_QUANT16_ASYMM}, the scale must be
+ * 0.125 and the zero point must be 0.
+ * * 2: A 1-D {@link OperandType::TENSOR_INT32} tensor, of shape
+ * [num_output_rois], specifying the class of each output box. The
+ * sequential order of the boxes corresponds with output0.
+ * * 3: A 1-D {@link OperandType::TENSOR_INT32} tensor, of shape
+ * [num_output_rois], specifying the batch index of each box. Boxes
+ * with the same batch index are grouped together.
+ */
+ BOX_WITH_NMS_LIMIT = @1.2::OperationType:BOX_WITH_NMS_LIMIT,
+
+ /**
+ * Casts a tensor to a new type.
+ *
+ * This operation ignores the scale and zeroPoint of quanized tensors,
+ * e.g. it treats a {@link OperandType::TENSOR_QUANT8_ASYMM} input
+ * as a tensor of uint8 values.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_INT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: from 1
+ *
+ * Inputs:
+ * * 0: A tensor.
+ *
+ * Outputs:
+ * * 0: A tensor with the same shape as input0.
+ */
+ CAST = @1.2::OperationType:CAST,
+
+ /**
+ * Shuffle the channels of the input tensor.
+ *
+ * Given an input tensor and a integer value of num_groups, CHANNEL_SHUFFLE
+ * divide the channel dimension into num_groups groups, and reorganize the
+ * channels by grouping channels with the same index in each group.
+ *
+ * Along the channel dimension, the output is calculated using this formula:
+ *
+ * output_channel[k * num_groups + g] = input_channel[g * group_size + k]
+ *
+ * where group_size = num_channels / num_groups
+ *
+ * The number of channels must be divisible by num_groups.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: up to 4
+ *
+ * Inputs:
+ * * 0: An n-D tensor, specifying the tensor to be shuffled.
+ * * 1: An {@link OperandType::INT32} scalar, specifying the number of
+ * groups.
+ * * 2: An {@link OperandType::INT32} scalar, specifying the dimension
+ * channel shuffle would be performed on. Negative index is used to
+ * specify axis from the end (e.g. -1 for the last axis). Must be in
+ * the range [-n, n).
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandType} and same shape as input0.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ */
+ CHANNEL_SHUFFLE = @1.2::OperationType:CHANNEL_SHUFFLE,
+
+ /**
+ * Apply postprocessing steps to bounding box detections.
+ *
+ * Bounding box detections are generated by applying transformation on a set
+ * of predefined anchors with the bounding box deltas from bounding box
+ * regression. A final step of hard NMS is applied to limit the number of
+ * returned boxes.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ *
+ * Inputs:
+ * * 0: A 3-D Tensor of shape [batches, num_anchors, num_classes], specifying
+ * the score of each anchor with each class. Class 0 for each
+ * [batches, num_anchors, 0] is background and will be ignored.
+ * * 1: A 3-D Tensor of shape [batches, num_anchors, length_box_encoding], with
+ * the first four values in length_box_encoding specifying the bounding
+ * box deltas. The box deltas are encoded in the order of [dy, dx, dh, dw],
+ * where dy and dx is the linear-scale relative correction factor for the
+ * center position of the bounding box with respect to the width and height,
+ * dh and dw is the log-scale relative correction factor for the width and
+ * height. All the entries in length_box_encoding beyond the first four
+ * values are ignored in this operation.
+ * * 2: A 2-D Tensor of shape [num_anchors, 4], specifying the shape of each
+ * predefined anchor, with format [ctr_y, ctr_x, h, w], where ctr_y and
+ * ctr_x are the center position of the box, and h and w are the height
+ * and the width.
+ * * 3: An {@link OperandType::FLOAT32} scalar, specifying the scaling
+ * factor for dy in bounding box deltas.
+ * * 4: An {@link OperandType::FLOAT32} scalar, specifying the scaling
+ * factor for dx in bounding box deltas.
+ * * 5: An {@link OperandType::FLOAT32} scalar, specifying the scaling
+ * factor for dh in bounding box deltas.
+ * * 6: An {@link OperandType::FLOAT32} scalar, specifying the scaling
+ * factor for dw in bounding box deltas.
+ * * 7: An {@link OperandType::BOOL} scalar, set to true to use regular
+ * multi-class NMS algorithm that do NMS separately for each class,
+ * set to false for a faster algorithm that only do one single NMS
+ * using the highest class score..
+ * * 8: An {@link OperandType::INT32} scalar, max_num_detections, specifying
+ * the maximum number of boxes for the output. Boxes with the lowest
+ * scores are discarded to meet the limit.
+ * * 9: An {@link OperandType::INT32} scalar, only used when input7 is
+ * set to false, specifying the maximum number of classes per detection.
+ * * 10: An {@link OperandType::INT32} scalar, only used when input7 is
+ * set to true, specifying the maximum number of detections when
+ * applying NMS algorithm for each single class.
+ * * 11: A scalar, score_threshold. Boxes with scores lower than the
+ * threshold are filtered before sending to the NMS algorithm. The
+ * scalar must be of {@link OperandType::FLOAT16} if input0 is of
+ * {@link OperandType::TENSOR_FLOAT16} and of
+ * {@link OperandType::FLOAT32} if input0 is of
+ * {@link OperandType::TENSOR_FLOAT32}.
+ * * 12: A scalar, specifying the IoU threshold for hard NMS. The scalar
+ * must be of {@link OperandType::FLOAT16} if input0 is of
+ * {@link OperandType::TENSOR_FLOAT16} and of
+ * {@link OperandType::FLOAT32} if input0 is of
+ * {@link OperandType::TENSOR_FLOAT32}.
+ * * 13: An {@link OperandType::BOOL} scalar, set to true to include
+ * background class in the list of label map for the output, set
+ * to false to not include the background. When the background
+ * class is included, it has label 0 and the output classes start
+ * at 1 in the label map, otherwise, the output classes start at 0.
+ *
+ * Outputs:
+ * * 0: A 2-D tensor of the same {@link OperandType} as input0, with shape
+ * [batches, max_num_detections], specifying the score of each output
+ * detections.
+ * * 1: A 3-D tensor of shape [batches, max_num_detections, 4], specifying the
+ * coordinates of each output bounding box, with format
+ * [y1, x1, y2, x2].
+ * * 2: A 2-D {@link OperandType::TENSOR_INT32} tensor, of shape
+ * [batches, max_num_detections], specifying the class label for each
+ * output detection.
+ * * 3: An 1-D {@link OperandType::TENSOR_INT32} tensor, of shape [batches],
+ * specifying the number of valid output detections for each batch.
+ */
+ DETECTION_POSTPROCESSING = @1.2::OperationType:DETECTION_POSTPROCESSING,
+
+ /**
+ * For input tensors x and y, computes x == y elementwise.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_BOOL8}
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_INT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: from 1
+ *
+ * This operation supports broadcasting.
+ *
+ * Inputs:
+ * * 0: A tensor.
+ * * 1: A tensor of the same {@link OperandType} and dimensions compatible
+ * with input0.
+ *
+ * Outputs:
+ * * 0: A tensor of {@link OperandType::TENSOR_BOOL8}.
+ */
+ EQUAL = @1.2::OperationType:EQUAL,
+
+ /**
+ * Computes exponential of x element-wise.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ *
+ * Supported tensor rank: from 1.
+ *
+ * Inputs:
+ * * 0: A tensor.
+ *
+ * Outputs:
+ * * 0: The output tensor of same shape as input0.
+ */
+ EXP = @1.2::OperationType:EXP,
+
+ /**
+ * Inserts a dimension of 1 into a tensor's shape.
+ *
+ * Given a tensor input, this operation inserts a dimension of 1 at the
+ * given dimension index of input's shape. The dimension index starts at
+ * zero; if you specify a negative dimension index, it is counted backward
+ * from the end.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_INT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: from 1
+ *
+ * Inputs:
+ * * 0: An n-D tensor.
+ * * 1: An {@link OperandType::INT32} scalar specifying the dimension
+ * index to expand. Must be in the range [-(n + 1), (n + 1)).
+ *
+ * Outputs:
+ * * 0: An (n + 1)-D tensor with the same {@link OperandType} and data as
+ * input0.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ */
+ EXPAND_DIMS = @1.2::OperationType:EXPAND_DIMS,
+
+ /**
+ * Gathers values along an axis.
+ *
+ * Produces an output tensor with shape
+ * input0.dimension[:axis] + indices.dimension + input0.dimension[axis + 1:]
+ * where:
+ * # Vector indices (output is rank(input0)).
+ * output[a_0, ..., a_n, i, b_0, ..., b_n] =
+ * input0[a_0, ..., a_n, indices[i], b_0, ..., b_n]
+ *
+ * # Higher rank indices (output is rank(input0) + rank(indices) - 1).
+ * output[a_0, ..., a_n, i, ..., j, b_0, ... b_n] =
+ * input0[a_0, ..., a_n, indices[i, ..., j], b_0, ..., b_n]
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_INT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: from 1
+ *
+ * Inputs:
+ * * 0: An n-D tensor from which to gather values.
+ * * 1: An {@link OperandType::INT32} scalar specifying the axis.
+ * Negative index is used to specify axis from the end
+ * (e.g. -1 for the last axis). Must be in the range [-n, n).
+ * * 2: A k-D tensor {@link OperandType::TENSOR_INT32} of indices.
+ * The values must be in the bounds of the corresponding dimensions
+ * of input0.
+ *
+ * Outputs:
+ * * 0: An (n + k - 1)-D tensor with the same {@link OperandType} as input0.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ */
+ GATHER = @1.2::OperationType:GATHER,
+
+ /**
+ * Generate aixs-aligned bounding box proposals.
+ *
+ * Bounding box proposals are generated by applying transformation on a set
+ * of predefined anchors with the bounding box deltas from bounding box
+ * regression. A final step of hard NMS is applied to limit the number of
+ * returned boxes.
+ *
+ * Axis-aligned bounding boxes are represented by its upper-left corner
+ * coordinate (x1,y1) and lower-right corner coordinate (x2,y2). A valid
+ * bounding box should satisfy x1 <= x2 and y1 <= y2.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Inputs:
+ * * 0: A 4-D Tensor specifying the score of each anchor at each
+ * location. With "NHWC" data layout, the tensor shape is
+ * [batches, height, width, num_anchors]. With "NCHW" data layout,
+ * the tensor shape is [batches, num_anchors, height, width].
+ * * 1: A 4-D Tensor specifying the bounding box deltas. With "NHWC" data
+ * layout, the tensor shape is [batches, height, width, num_anchors * 4].
+ * With "NCHW" data layout, the tensor shape is
+ * [batches, num_anchors * 4, height, width]. The box deltas are encoded
+ * in the order of [dx, dy, dw, dh], where dx and dy is the linear-scale
+ * relative correction factor for the center position of the bounding box
+ * with respect to the width and height, dw and dh is the log-scale
+ * relative correction factor for the width and height. The last
+ * dimensions is the channel dimension.
+ * * 2: A 2-D Tensor of shape [num_anchors, 4], specifying the shape of each
+ * predefined anchor, with format [x1, y1, x2, y2]. For input0 of type
+ * {@link OperandType::TENSOR_QUANT8_ASYMM}, this tensor should be of
+ * {@link OperandType::TENSOR_QUANT16_SYMM}, with scale of 0.125.
+ * * 3: A 2-D Tensor of shape [batches, 2], specifying the size of
+ * each image in the batch, with format [image_height, image_width].
+ * For input0 of type {@link OperandType::TENSOR_QUANT8_ASYMM}, this
+ * tensor should be of {@link OperandType::TENSOR_QUANT16_SYMM}, with
+ * scale of 0.125.
+ * * 4: An {@link OperandType::FLOAT32} scalar, specifying the ratio
+ * from the height of original image to the height of feature map.
+ * * 5: An {@link OperandType::FLOAT32} scalar, specifying the ratio
+ * from the width of original image to the width of feature map.
+ * * 6: An {@link OperandType::INT32} scalar, specifying the maximum
+ * number of boxes before going into the hard NMS algorithm. Boxes
+ * with the lowest scores are discarded to meet the limit. Set to
+ * a non-positive value for unlimited number.
+ * * 7: An {@link OperandType::INT32} scalar, specifying the maximum
+ * number of boxes returning from the hard NMS algorithm. Boxes
+ * with the lowest scores are discarded to meet the limit. Set to
+ * a non-positive value for unlimited number.
+ * * 8: An {@link OperandType::FLOAT32} scalar, specifying the IoU
+ * threshold for hard NMS.
+ * * 9: An {@link OperandType::FLOAT32} scalar, min_size. Boxes with
+ * height or width lower than the absolute threshold are filtered out.
+ * * 10: An {@link OperandType::BOOL} scalar, set to true to specify
+ * NCHW data layout for input0 and input1. Set to false for NHWC.
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandType} as input0, of shape
+ * [num_output_rois], specifying the score of each output box.
+ * The boxes are grouped by batches, but the sequential order in
+ * each batch is not guaranteed. For type of
+ * {@link OperandType::TENSOR_QUANT8_ASYMM}, the scale and zero
+ * point must be the same as input0.
+ * * 1: A tensor of the same {@link OperandType} as input3, of shape
+ * [num_output_rois, 4], specifying the coordinates of each output
+ * bounding box for each class, with format [x1, y1, x2, y2].
+ * The sequential order of the boxes corresponds with output0.
+ * For type of {@link OperandType::TENSOR_QUANT16_ASYMM}, the
+ * scale must be 0.125 and the zero point must be 0.
+ * * 2: A 1-D {@link OperandType::TENSOR_INT32} tensor, of shape
+ * [num_output_rois], specifying the batch index of each box. Boxes
+ * with the same batch index are grouped together.
+ */
+ GENERATE_PROPOSALS = @1.2::OperationType:GENERATE_PROPOSALS,
+
+ /**
+ * For input tensors x and y, computes x > y elementwise.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_BOOL8}
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_INT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: from 1
+ *
+ * This operation supports broadcasting.
+ *
+ * Inputs:
+ * * 0: A tensor.
+ * * 1: A tensor of the same {@link OperandType} and dimensions compatible
+ * with input0.
+ *
+ * Outputs:
+ * * 0: A tensor of {@link OperandType::TENSOR_BOOL8}.
+ */
+ GREATER = @1.2::OperationType:GREATER,
+ /**
+ * For input tensors x and y, computes x >= y elementwise.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_BOOL8}
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_INT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: from 1
+ *
+ * This operation supports broadcasting.
+ *
+ * Inputs:
+ * * 0: A tensor.
+ * * 1: A tensor of the same {@link OperandType} and dimensions compatible
+ * with input0.
+ *
+ * Outputs:
+ * * 0: A tensor of {@link OperandType::TENSOR_BOOL8}.
+ */
+ GREATER_EQUAL = @1.2::OperationType:GREATER_EQUAL,
+
+ /**
+ * Performs a grouped 2-D convolution operation.
+ *
+ * Given an input tensor of shape [batches, height, width, depth_in] and a
+ * filter tensor of shape [depth_out, filter_height, filter_width, depth_group]
+ * containing depth_out convolutional filters of depth depth_group, GROUPED_CONV
+ * applies a group of different filters to each input channel group, then
+ * concatenates the results together.
+ *
+ * Specifically, the input channels are divided into num_groups groups, each with
+ * depth depth_group, i.e. depth_in = num_groups * depth_group. The convolutional
+ * filters are also divided into num_groups groups, i.e. depth_out is divisible
+ * by num_groups. GROUPED_CONV applies each group of filters to the corresponding
+ * input channel group, and the result are concatenated together.
+ *
+ * The output dimensions are functions of the filter dimensions, stride, and
+ * padding.
+ *
+ * The values in the output tensor are computed as:
+ *
+ * output[b, i, j, g * channel_multiplier + q] =
+ * sum_{di, dj, dk} (
+ * input[b, strides[1] * i + di, strides[2] * j + dj,
+ * g * depth_group + dk] *
+ * filter[g * channel_multiplier + q, di, dj, dk]
+ * ) + bias[channel]
+ *
+ * where channel_multiplier = depth_out / num_groups
+ *
+ * Supported tensor {@link OperandType} configurations:
+ * * 16 bit floating point:
+ * * * {@link OperandType::TENSOR_FLOAT16} for input, filter, output, and bias.
+ *
+ * * 32 bit floating point:
+ * * * {@link OperandType::TENSOR_FLOAT32} for input, filter, output, and bias.
+ *
+ * * Quantized:
+ * * * {@link OperandType::TENSOR_QUANT8_ASYMM} for input, filter, and output.
+ * * * {@link OperandType::TENSOR_INT32} for bias (with scale set to
+ * * * input.scale * filter.scale).
+ *
+ * * Quantized with symmetric per channel quantization for the filter:
+ * * * {@link OperandType::TENSOR_QUANT8_ASYMM} for input, and output.
+ * * * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
+ * * * {@link OperandType::TENSOR_INT32} for bias (scale set to 0.0,
+ * * * each value scaling is separate and equal to input.scale * filter.scales[channel]).
+ *
+ * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
+ * With the default data layout NHWC, the data is stored in the order of:
+ * [batch, height, width, channels]. Alternatively, the data layout could
+ * be NCHW, the data storage order of: [batch, channels, height, width].
+ *
+ * Both explicit padding and implicit padding are supported.
+ *
+ * Inputs (explicit padding):
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
+ * specifying the input, where depth_in = num_groups * depth_group.
+ * * 1: A 4-D tensor, of shape
+ * [depth_out, filter_height, filter_width, depth_group], specifying
+ * the filter, where depth_out must be divisible by num_groups. For
+ * tensor of type {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}
+ * the channel dimension (channelDim at
+ * {@link SymmPerChannelQuantParams}) must be set to 0.
+ * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
+ * tensor of type {@link OperandType::TENSOR_FLOAT32} or
+ * {@link OperandType::TENSOR_FLOAT16}, the bias must be of the same
+ * type. For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
+ * the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint
+ * of 0 and bias_scale == input_scale * filter_scale. For filter tensor
+ * of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}, the bias
+ * should be of {@link OperandType::TENSOR_INT32}, with zeroPoint of
+ * 0 and bias_scale of 0. The actual scale of each value 'i' is equal to
+ * bias_scale[i] = input_scale * filter_scale[i].
+ * * 3: An {@link OperandType::INT32} scalar, specifying the padding on
+ * the left, in the ‘width’ dimension.
+ * * 4: An {@link OperandType::INT32} scalar, specifying the padding on
+ * the right, in the ‘width’ dimension.
+ * * 5: An {@link OperandType::INT32} scalar, specifying the padding on
+ * the top, in the ‘height’ dimension.
+ * * 6: An {@link OperandType::INT32} scalar, specifying the padding on
+ * the bottom, in the ‘height’ dimension.
+ * * 7: An {@link OperandType::INT32} scalar, specifying the stride when
+ * walking through input in the ‘width’ dimension.
+ * * 8: An {@link OperandType::INT32} scalar, specifying the stride when
+ * walking through input in the ‘height’ dimension.
+ * * 9: An {@link OperandType::INT32} scalar, specifying the number of
+ groups.
+ * * 10: An {@link OperandType::INT32} scalar, and has to be one of the
+ * {@link FusedActivationFunc} values. Specifies the activation to
+ * invoke on the result.
+ * * 11: An {@link OperandType::BOOL} scalar, set to true to specify
+ * NCHW data layout for input0 and output0. Set to false for NHWC.
+ *
+ * Inputs (implicit padding):
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
+ * specifying the input, where depth_in = num_groups * depth_group.
+ * * 1: A 4-D tensor, of shape
+ * [depth_out, filter_height, filter_width, depth_group], specifying
+ * the filter, where depth_out must be divisible by num_groups. For
+ * tensor of type {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}
+ * the channel dimension (SymmPerChannelQuantParams::channelDim)
+ * must be set to 0.
+ * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
+ * tensor of type {@link OperandType::TENSOR_FLOAT32} or
+ * {@link OperandType::TENSOR_FLOAT16}, the bias must be of the same
+ * type. For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
+ * the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint
+ * of 0 and bias_scale == input_scale * filter_scale. For filter tensor
+ * of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}, the bias
+ * should be of {@link OperandType::TENSOR_INT32}, with zeroPoint of
+ * 0 and bias_scale of 0. The actual scale of each value 'i' is equal to
+ * bias_scale[i] = input_scale * filter_scale[i].
+ * * 3: An {@link OperandType::INT32} scalar, specifying the implicit
+ * padding scheme, has to be one of the
+ * following values: {0 (NONE), 1 (SAME), 2 (VALID)}.
+ * * 4: An {@link OperandType::INT32} scalar, specifying the stride when
+ * walking through input in the ‘width’ dimension.
+ * * 5: An {@link OperandType::INT32} scalar, specifying the stride when
+ * walking through input in the ‘height’ dimension.
+ * * 6: An {@link OperandType::INT32} scalar, specifying the number of
+ * groups.
+ * * 7: An {@link OperandType::INT32} scalar, and has to be one of the
+ * {@link FusedActivationFunc} values. Specifies the activation to
+ * invoke on the result.
+ * * 8: An {@link OperandType::BOOL} scalar, set to true to specify
+ * NCHW data layout for input0 and output0. Set to false for NHWC.
+ *
+ * Outputs:
+ * * 0: The output 4-D tensor, of shape
+ * [batches, out_height, out_width, depth_out].
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint can be different from inputs' scale and zeroPoint.
+ */
+ GROUPED_CONV_2D = @1.2::OperationType:GROUPED_CONV_2D,
+
+ /**
+ * Localize the maximum keypoints from heatmaps.
+ *
+ * This operation approximates the accurate maximum keypoint scores and
+ * indices after bicubic upscaling by using Taylor expansion up to the
+ * quadratic term.
+ *
+ * The bounding box is represented by its upper-left corner coordinate
+ * (x1,y1) and lower-right corner coordinate (x2,y2) in the original image.
+ * A valid bounding box should satisfy x1 <= x2 and y1 <= y2.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
+ * With the default data layout NHWC, the data is stored in the order of:
+ * [batch, height, width, channels]. Alternatively, the data layout could
+ * be NCHW, the data storage order of: [batch, channels, height, width].
+ *
+ * Inputs:
+ * * 0: A 4-D Tensor of shape
+ * [num_boxes, heatmap_size, heatmap_size, num_keypoints],
+ * specifying the heatmaps, the height and width of heatmaps should
+ * be the same, and must be greater than or equal to 2.
+ * * 1: A 2-D Tensor of shape [num_boxes, 4], specifying the bounding boxes,
+ * each with format [x1, y1, x2, y2]. For input0 of type
+ * {@link OperandType::TENSOR_QUANT8_ASYMM}, this tensor should
+ * be of {@link OperandType::TENSOR_QUANT16_ASYMM}, with zeroPoint
+ * of 0 and scale of 0.125.
+ * * 2: An {@link OperandType::BOOL} scalar, set to true to specify
+ * NCHW data layout for input0. Set to false for NHWC.
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandType} as input0, with shape
+ * [num_boxes, num_keypoints], specifying score of the keypoints.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint can be different from input0 scale and zeroPoint.
+ * * 1: A tensor of the same {@link OperandType} as input1, with shape
+ * [num_boxes, num_keypoints, 2], specifying the location of
+ * the keypoints, the second dimension is organized as
+ * [keypoint_x, keypoint_y].
+ * For type of {@link OperandType::TENSOR_QUANT16_ASYMM}, the
+ * scale must be 0.125 and the zero point must be 0.
+ */
+ HEATMAP_MAX_KEYPOINT = @1.2::OperationType:HEATMAP_MAX_KEYPOINT,
+
+ /**
+ * Applies instance normalization to the input tensor.
+ *
+ * The values in the output tensor are computed as:
+ *
+ * output[b, h, w, c] =
+ * (input[b, h, w, c] - mean[b, c]) * gamma /
+ * sqrt(var[b, c] + epsilon) + beta
+ *
+ * Where the mean and variance are computed across the spatial dimensions:
+ *
+ * mean[b, c] =
+ * sum_{h, w}(input[b, h, w, c]) / sum(1)
+ *
+ * var[b, c] =
+ * sum_{h, w}(pow(input[b, h, w, c] - mean[b, c], 2)) / sum(1)
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ *
+ * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
+ * With the default data layout NHWC, the data is stored in the order of:
+ * [batch, height, width, channels]. Alternatively, the data layout could
+ * be NCHW, the data storage order of: [batch, channels, height, width].
+ *
+ * Inputs:
+ * * 0: An n-D tensor, specifying the tensor to be normalized.
+ * * 1: A scalar, specifying gamma, the scale applied to the normalized
+ * tensor. The scalar must be of {@link OperandType::FLOAT16} if
+ * input0 is of {@link OperandType::TENSOR_FLOAT16} and of
+ * {@link OperandType::FLOAT32} if input0 is of
+ * {@link OperandType::TENSOR_FLOAT32}.
+ * * 2: A scalar, specifying beta, the offset applied to the normalized
+ * tensor. The scalar must be of {@link OperandType::FLOAT16} if
+ * input0 is of {@link OperandType::TENSOR_FLOAT16} and of
+ * {@link OperandType::FLOAT32} if input0 is of
+ * {@link OperandType::TENSOR_FLOAT32}.
+ * * 3: A scalar, specifying epsilon, the small value added to variance to
+ * avoid dividing by zero. The scalar must be of {@link OperandType::FLOAT16} if
+ * input0 is of {@link OperandType::TENSOR_FLOAT16} and of
+ * {@link OperandType::FLOAT32} if input0 is of
+ * {@link OperandType::TENSOR_FLOAT32}.
+ * * 4: An {@link OperandType::BOOL} scalar, set to true to specify
+ * NCHW data layout for input0 and output0. Set to false for NHWC.
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandType} and same shape as input0.
+ */
+ INSTANCE_NORMALIZATION = @1.2::OperationType:INSTANCE_NORMALIZATION,
+
+ /**
+ * For input tensors x and y, computes x < y elementwise.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_BOOL8}
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_INT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: from 1
+ *
+ * This operation supports broadcasting.
+ *
+ * Inputs:
+ * * 0: A tensor.
+ * * 1: A tensor of the same {@link OperandType} and dimensions compatible
+ * with input0.
+ *
+ * Outputs:
+ * * 0: A tensor of {@link OperandType::TENSOR_BOOL8}.
+ */
+ LESS = @1.2::OperationType:LESS,
+
+ /**
+ * For input tensors x and y, computes x <= y elementwise.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_BOOL8}
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_INT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: from 1
+ *
+ * This operation supports broadcasting.
+ *
+ * Inputs:
+ * * 0: A tensor.
+ * * 1: A tensor of the same {@link OperandType} and dimensions compatible
+ * with input0.
+ *
+ * Outputs:
+ * * 0: A tensor of {@link OperandType::TENSOR_BOOL8}.
+ */
+ LESS_EQUAL = @1.2::OperationType:LESS_EQUAL,
+
+ /**
+ * Computes natural logarithm of x element-wise.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ *
+ * Supported tensor rank: from 1.
+ *
+ * Inputs:
+ * * 0: A tensor.
+ *
+ * Outputs:
+ * * 0: The output tensor of same shape as input0.
+ */
+ LOG = @1.2::OperationType:LOG,
+
+ /**
+ * Returns the truth value of x AND y element-wise.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_BOOL8}
+ *
+ * Supported tensor rank: from 1
+ *
+ * This operation supports broadcasting.
+ *
+ * Inputs:
+ * * 0: A tensor of {@link OperandType::TENSOR_BOOL8}.
+ * * 1: A tensor of {@link OperandType::TENSOR_BOOL8} and dimensions
+ * compatible with input0.
+ *
+ * Outputs:
+ * * 0: A tensor of {@link OperandType::TENSOR_BOOL8}.
+ */
+ LOGICAL_AND = @1.2::OperationType:LOGICAL_AND,
+
+ /**
+ * Computes the truth value of NOT x element-wise.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_BOOL8}
+ *
+ * Supported tensor rank: from 1.
+ *
+ * Inputs:
+ * * 0: A tensor.
+ *
+ * Outputs:
+ * * 0: The output tensor of same shape as input0.
+ */
+ LOGICAL_NOT = @1.2::OperationType:LOGICAL_NOT,
+
+ /**
+ * Returns the truth value of x OR y element-wise.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_BOOL8}
+ *
+ * Supported tensor rank: from 1
+ *
+ * This operation supports broadcasting.
+ *
+ * Inputs:
+ * * 0: A tensor of {@link OperandType::TENSOR_BOOL8}.
+ * * 1: A tensor of {@link OperandType::TENSOR_BOOL8} and dimensions
+ * compatible with input0.
+ *
+ * Outputs:
+ * * 0: A tensor of {@link OperandType::TENSOR_BOOL8}.
+ */
+ LOGICAL_OR = @1.2::OperationType:LOGICAL_OR,
+
+ /**
+ * Computes the log softmax activations given logits.
+ *
+ * The output is calculated using this formula:
+ *
+ * output = logits * beta - log(reduce_sum(exp(logits * beta), axis))
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ *
+ * Supported tensor rank: from 1.
+ *
+ * Inputs:
+ * * 0: A tensor specifying the input logits.
+ * * 1: A scalar, specifying the positive scaling factor for the exponent,
+ * beta.
+ * For input tensor of {@link OperandType::TENSOR_FLOAT16}, the beta
+ * value must be of {@link OperandType::FLOAT16}.
+ * For input tensor of {@link OperandType::TENSOR_FLOAT32}, the beta
+ * value must be of {@link OperandType::FLOAT32}.
+ * * 2: An {@link OperandType::INT32} scalar specifying the axis to
+ * reduce across. Negative index is used to specify axis from the
+ * end (e.g. -1 for the last axis). Must be in the range [-n, n).
+ *
+ * Outputs:
+ * * 0: The output tensor of the same {@link OperandType} and shape as
+ * input0.
+ */
+ LOG_SOFTMAX = @1.2::OperationType:LOG_SOFTMAX,
+
+ /**
+ * Returns the element-wise maximum of two tensors.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_INT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: from 1.
+ *
+ * Inputs:
+ * * 0: A tensor.
+ * * 1: A tensor of the same {@link OperandType} and compatible dimensions
+ * with input0.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scales and zeroPoint can be different from input0 scale and zeroPoint.
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandType} as input0.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint can be different from inputs' scale and zeroPoint.
+ */
+ MAXIMUM = @1.2::OperationType:MAXIMUM,
+
+ /**
+ * Returns the element-wise minimum of two tensors.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_INT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: from 1.
+ *
+ * Inputs:
+ * * 0: A tensor.
+ * * 1: A tensor of the same {@link OperandType} and compatible dimensions
+ * with input0.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scales and zeroPoint can be different from input0 scale and zeroPoint.
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandType} as input0.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint can be different from inputs' scale and zeroPoint.
+ */
+ MINIMUM = @1.2::OperationType:MINIMUM,
+
+ /**
+ * Computes numerical negative value element-wise.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_INT32}
+ *
+ * Supported tensor rank: from 1.
+ *
+ * Inputs:
+ * * 0: A tensor.
+ *
+ * Outputs:
+ * * 0: The output tensor of same shape as input0.
+ */
+ NEG = @1.2::OperationType:NEG,
+
+ /**
+ * For input tensors x and y, computes x != y elementwise.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_BOOL8}
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_INT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: from 1
+ *
+ * This operation supports broadcasting.
+ *
+ * Inputs:
+ * * 0: A tensor.
+ * * 1: A tensor of the same {@link OperandType} and dimensions compatible
+ * with input0.
+ *
+ * Outputs:
+ * * 0: A tensor of {@link OperandType::TENSOR_BOOL8}.
+ */
+ NOT_EQUAL = @1.2::OperationType:NOT_EQUAL,
+
+ /**
+ * Pads a tensor with the given constant value according to the specified
+ * paddings.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: up to 4
+ *
+ * Inputs:
+ * * 0: An n-D tensor, specifying the tensor to be padded.
+ * * 1: A 2-D Tensor of {@link OperandType::TENSOR_INT32}, the paddings
+ * for each spatial dimension of the input tensor. The shape of the
+ * tensor must be {rank(input0), 2}.
+ * padding[i, 0] specifies the number of elements to be padded in the
+ * front of dimension i.
+ * padding[i, 1] specifies the number of elements to be padded after
+ * the end of dimension i.
+ * * 2: An scalar specifying the value to use for padding input0.
+ * For input tensor of {@link OperandType::TENSOR_FLOAT16}, the
+ * pad value must be of {@link OperandType::FLOAT16}.
+ * For input tensor of {@link OperandType::TENSOR_FLOAT32}, the
+ * pad value must be of {@link OperandType::FLOAT32}.
+ * For input tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
+ * the pad value must be of {@link OperandType::INT32}. The
+ * scale and zeroPoint are assumed to be the same as in input0.
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandType} as input0. The
+ * output tensor has the same rank as input0, and each
+ * dimension of the output tensor has the same size as the
+ * corresponding dimension of the input tensor plus the size
+ * of the padding:
+ * output0.dimension[i] =
+ * padding[i, 0] + input0.dimension[i] + padding[i, 1]
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ */
+ PAD_V2 = @1.2::OperationType:PAD_V2,
+
+ /**
+ * Computes the power of one value to another.
+ *
+ * Given a tensor base and a tensor exponent, this operation computes
+ * base^exponent elementwise.
+ *
+ * This operations supports broadcasting. The size of the output is the
+ * maximum size along each dimension of the input operands. It starts with
+ * the trailing dimensions, and works its way forward.
+ *
+ * For example:
+ * base.dimension = {4, 1, 2}
+ * exponent.dimension = {5, 4, 3, 1}
+ * output.dimension = {5, 4, 3, 2}
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ *
+ * Supported tensor rank: from 1
+ *
+ * Inputs:
+ * * 0: A tensor specifying the base.
+ * * 1: A tensor specifying the exponent.
+ *
+ * Outputs:
+ * * 0: An output tensor.
+ */
+ POW = @1.2::OperationType:POW,
+
+ /**
+ * Parametric Rectified Linear Unit.
+ *
+ * It follows: f(x) = alpha * x for x < 0, f(x) = x for x >= 0, where alpha
+ * is a learned array with the same {@link OperandType} and compatible
+ * dimensions as input x.
+ *
+ * Two dimensions are compatible when:
+ * 1. they are equal, or
+ * 2. one of them is 1
+ *
+ * The size of the output is the maximum size along each dimension of the
+ * input operands. It starts with the trailing dimensions, and works its way
+ * forward.
+ *
+ * Example:
+ * input.dimension = {4, 1, 2}
+ * alpha.dimension = {5, 4, 3, 1}
+ * output.dimension = {5, 4, 3, 2}
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: from 1
+ *
+ * Inputs:
+ * * 0: A tensor, specifying the input.
+ * * 1: A tensor of the same {@link OperandType}, and compatible dimensions
+ * as input0, specifying the alpha.
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandType} as input0.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint can be diffent from the input0 scale and zeroPoint.
+ */
+ PRELU = @1.2::OperationType:PRELU,
+
+ /**
+ * Quantizes the input tensor.
+ *
+ * The formula is:
+ *
+ * output = max(0, min(255, round(input / scale) + zeroPoint)
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ *
+ * Supported tensor rank: from 1
+ *
+ * Inputs:
+ * * 0: A tensor, may be zero-sized.
+ *
+ * Outputs:
+ * * 0: The output tensor of same shape as input0, but with
+ * {@link OperandType::TENSOR_QUANT8_ASYMM}.
+ */
+ QUANTIZE = @1.2::OperationType:QUANTIZE,
+
+ /**
+ * A version of quantized LSTM, using 16 bit quantization for internal
+ * state.
+ *
+ * There is no projection layer, so cell state size is equal to the output
+ * size.
+ *
+ * Inputs:
+ * * 0: A 2-D tensor of type {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * and shape [numBatches, inputSize] specifying the input to the LSTM
+ * cell. Tensor is quantized with a fixed quantization range of
+ * [-1, 127/128] (scale = 1/128, zeroPoint = 128).
+ * * 1: The input-to-input weights.
+ * A 2-D tensor of type {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * and shape [outputSize, inputSize] specifying input-to-input part of
+ * weights for fully-connected layer inside the LSTM cell.
+ * Quantization zero point and scale must be the same across all the
+ * weights.
+ * * 2: The input-to-forget weights.
+ * A 2-D tensor of type {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * and shape [outputSize, inputSize] specifying input-to-forget part of
+ * weights for fully-connected layer inside the LSTM cell.
+ * Quantization zero point and scale must be the same across all the
+ * weights.
+ * * 3: The input-to-cell weights.
+ * A 2-D tensor of type {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * and shape [outputSize, inputSize] specifying input-to-cell part of
+ * weights for fully-connected layer inside the LSTM cell.
+ * Quantization zero point and scale must be the same across all the
+ * weights.
+ * * 4: The input-to-output weights.
+ * A 2-D tensor of type {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * and shape [outputSize, inputSize] specifying input-to-output part of
+ * weights for fully-connected layer inside the LSTM cell.
+ * Quantization zero point and scale must be the same across all the
+ * weights.
+ * * 5: The recurrent-to-input weights.
+ * A 2-D tensor of type {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * and shape [outputSize, outputSize] specifying recurrent-to-input part
+ * of weights for fully-connected layer inside the LSTM cell.
+ * Quantization zero point and scale must be the same across all the
+ * weights.
+ * * 6: The recurrent-to-forget weights.
+ * A 2-D tensor of type {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * and shape [outputSize, outputSize] specifying recurrent-to-forget
+ * part of weights for fully-connected layer inside the LSTM cell.
+ * Quantization zero point and scale must be the same across all the
+ * weights.
+ * * 7: The recurrent-to-cell weights.
+ * A 2-D tensor of type {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * and shape [outputSize, outputSize] specifying recurrent-to-cell part
+ * of weights for fully-connected layer inside the LSTM cell.
+ * Quantization zero point and scale must be the same across all the
+ * weights.
+ * * 8: The recurrent-to-output weights.
+ * A 2-D tensor of type {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * and shape [outputSize, outputSize] specifying recurrent-to-output
+ * part of weights for fully-connected layer inside the LSTM cell.
+ * Quantization zero point and scale must be the same across all the
+ * weights.
+ * * 9: The input gate bias.
+ * A 1-D tensor of type {@link OperandType::TENSOR_INT32} and shape
+ * [outputSize] specifying the bias for the fully-connected layer
+ * inside the LSTM cell. Bias is quantized with scale being a product
+ * of input and weights scales and zeroPoint equal to 0.
+ * * 10:The forget gate bias.
+ * A 1-D tensor of type {@link OperandType::TENSOR_INT32} and shape
+ * [outputSize] specifying the bias for the fully-connected layer
+ * inside the LSTM cell. Bias is quantized with scale being a product
+ * of input and weights scales and zeroPoint equal to 0.
+ * * 11:The cell bias.
+ * A 1-D tensor of type {@link OperandType::TENSOR_INT32} and shape
+ * [outputSize] specifying the bias for the fully-connected layer
+ * inside the LSTM cell. Bias is quantized with scale being a product
+ * of input and weights scales and zeroPoint equal to 0.
+ * * 12:The output gate bias.
+ * A 1-D tensor of type {@link OperandType::TENSOR_INT32} and shape
+ * [outputSize] specifying the bias for the fully-connected layer
+ * inside the LSTM cell. Bias is quantized with scale being a product
+ * of input and weights scales and zeroPoint equal to 0.
+ * * 13: A 2-D tensor of type {@link OperandType::TENSOR_QUANT16_SYMM}
+ * and shape [numBatches, outputSize] specifying the cell state from the
+ * previous time step of the LSTM cell. It is quantized using a
+ * quantization range of [-2^4, 2^4 * 32767/32768] (scale = 2^4 /
+ * 32768, zeroPoint = 0).
+ * * 14: A 2-D tensor of type {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * and shape [numBathes, outputSize] specifying the output of the LSTM
+ * cell from previous time-step. Tensor is quantized with a fixed
+ * quantization range of [-1, 127/128] (scale = 1/128, zeroPoint =
+ * 128).
+ *
+ *
+ * Outputs:
+ * * 0: A 2-D tensor of type {@link OperandType::TENSOR_QUANT16_SYMM}
+ * and shape [numBatches, outputSize] which contains a cell state from
+ * the current time step. Tensor is quantized using a quantization
+ * range of [-2^4, 2^4 * 32767/32768] (scale = 2^4 / 32768, zeroPoint =
+ * 0).
+ * * 1: A 2-D tensor of type {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * and shape [numBathes, outputSize] which contains the output value.
+ * Tensor is quantized with a fixed quantization range of [-1, 127/128]
+ * (scale = 1/128, zeroPoint = 128).
+ */
+ QUANTIZED_16BIT_LSTM = @1.2::OperationType:QUANTIZED_16BIT_LSTM,
+
+ /**
+ * Draws samples from a multinomial distribution.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ *
+ * Inputs:
+ * * 0: A 2-D tensor with shape [batches, classes], specifying the
+ * unnormalized log-probabilities for all classes.
+ * * 1: A scalar {@link OperandType::INT32}, specifying the number of
+ * independent samples to draw for each row slice.
+ * * 2: A 1-D {@link OperandType::TENSOR_INT32} tensor with shape [2],
+ * specifying seeds used to initialize the random distribution.
+ * Outputs:
+ * * 0: A 2-D {@link OperandType::TENSOR_INT32} tensor with shape
+ * [batches, samples], containing the drawn samples.
+ */
+ RANDOM_MULTINOMIAL = @1.2::OperationType:RANDOM_MULTINOMIAL,
+
+ /**
+ * Reduces a tensor by computing the "logical and" of elements along given
+ * dimensions.
+ *
+ * If keep_dims is true, the reduced dimensions are
+ * retained with length 1. Otherwise, the rank of the tensor is reduced by
+ * 1 for each entry in dimensions.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_BOOL8}
+ *
+ * Supported tensor rank: up to 4
+ *
+ * Inputs:
+ * * 0: An n-D tensor.
+ * * 1: A 1-D tensor of {@link OperandType::TENSOR_INT32}. The dimensions
+ * to reduce. Dimension values must be in the range [-n, n).
+ * * 2: An {@link OperandType::BOOL} scalar, keep_dims. If true,
+ * retains reduced dimensions with length 1.
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandType} as input0.
+ */
+ REDUCE_ALL = @1.2::OperationType:REDUCE_ALL,
+
+ /**
+ * Reduces a tensor by computing the "logical or" of elements along given
+ * dimensions.
+ *
+ * If keep_dims is true, the reduced dimensions are
+ * retained with length 1. Otherwise, the rank of the tensor is reduced by
+ * 1 for each entry in dimensions.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_BOOL8}
+ *
+ * Supported tensor rank: up to 4
+ *
+ * Inputs:
+ * * 0: An n-D tensor.
+ * * 1: A 1-D tensor of {@link OperandType::TENSOR_INT32}. The dimensions
+ * to reduce. Dimension values must be in the range [-n, n).
+ * * 2: An {@link OperandType::BOOL} scalar, keep_dims. If true,
+ * retains reduced dimensions with length 1.
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandType} as input0.
+ */
+ REDUCE_ANY = @1.2::OperationType:REDUCE_ANY,
+
+ /**
+ * Reduces a tensor by computing the maximum of elements along given
+ * dimensions.
+ *
+ * If keep_dims is true, the reduced dimensions are
+ * retained with length 1. Otherwise, the rank of the tensor is reduced by
+ * 1 for each entry in dimensions.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: up to 4
+ *
+ * Inputs:
+ * * 0: An n-D tensor.
+ * * 1: A 1-D tensor of {@link OperandType::TENSOR_INT32}. The dimensions
+ * to reduce. Dimension values must be in the range [-n, n).
+ * * 2: An {@link OperandType::BOOL} scalar, keep_dims. If true,
+ * retains reduced dimensions with length 1.
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandType} as input0.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ */
+ REDUCE_MAX = @1.2::OperationType:REDUCE_MAX,
+
+ /**
+ * Reduces a tensor by computing the minimum of elements along given
+ * dimensions.
+ *
+ * If keep_dims is true, the reduced dimensions are
+ * retained with length 1. Otherwise, the rank of the tensor is reduced by
+ * 1 for each entry in dimensions.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: up to 4
+ *
+ * Inputs:
+ * * 0: An n-D tensor.
+ * * 1: A 1-D tensor of {@link OperandType::TENSOR_INT32}. The dimensions
+ * to reduce. Dimension values must be in the range [-n, n).
+ * * 2: An {@link OperandType::BOOL} scalar, keep_dims. If true,
+ * retains reduced dimensions with length 1.
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandType} as input0.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ */
+ REDUCE_MIN = @1.2::OperationType:REDUCE_MIN,
+
+ /**
+ * Reduces a tensor by multiplying elements along given dimensions.
+ *
+ * If keep_dims is true, the reduced dimensions are
+ * retained with length 1. Otherwise, the rank of the tensor is reduced by
+ * 1 for each entry in dimensions.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ *
+ * Supported tensor rank: up to 4
+ *
+ * Inputs:
+ * * 0: An n-D tensor.
+ * * 1: A 1-D tensor of {@link OperandType::TENSOR_INT32}. The dimensions
+ * to reduce. Dimension values must be in the range [-n, n).
+ * * 2: An {@link OperandType::BOOL} scalar, keep_dims. If true,
+ * retains reduced dimensions with length 1.
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandType} as input0.
+ */
+ REDUCE_PROD = @1.2::OperationType:REDUCE_PROD,
+
+ /**
+ * Reduces a tensor by summing elements along given dimensions.
+ *
+ * If keep_dims is true, the reduced dimensions are
+ * retained with length 1. Otherwise, the rank of the tensor is reduced by
+ * 1 for each entry in dimensions.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ *
+ * Supported tensor rank: up to 4
+ *
+ * Inputs:
+ * * 0: An n-D tensor.
+ * * 1: A 1-D tensor of {@link OperandType::TENSOR_INT32}. The dimensions
+ * to reduce. Dimension values must be in the range [-n, n).
+ * * 2: An {@link OperandType::BOOL} scalar, keep_dims. If true,
+ * retains reduced dimensions with length 1.
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandType} as input0.
+ */
+ REDUCE_SUM = @1.2::OperationType:REDUCE_SUM,
+
+ /**
+ * Select and scale the feature map of each region of interest to a unified
+ * output size by average pooling sampling points from bilinear interpolation.
+ *
+ * The region of interest is represented by its upper-left corner coordinate
+ * (x1,y1) and lower-right corner coordinate (x2,y2) in the original image.
+ * A spatial scaling factor is applied to map into feature map coordinate.
+ * A valid region of interest should satisfy x1 <= x2 and y1 <= y2.
+ *
+ * No rounding is applied in this operation. The sampling points are unified
+ * distributed in the pooling bin and their values are calculated by bilinear
+ * interpolation.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
+ * With the default data layout NHWC, the data is stored in the order of:
+ * [batch, height, width, channels]. Alternatively, the data layout could
+ * be NCHW, the data storage order of: [batch, channels, height, width].
+ *
+ * Inputs:
+ * * 0: A 4-D tensor, specifying the feature map.
+ * * 1: A 2-D Tensor of shape [num_rois, 4], specifying the locations of
+ * the regions of interest, each line with format [x1, y1, x2, y2].
+ * For input0 of type {@link OperandType::TENSOR_QUANT8_ASYMM},
+ * this tensor should be of {@link OperandType::TENSOR_QUANT16_ASYMM},
+ * with zeroPoint of 0 and scale of 0.125. Zero num_rois is
+ * supported for this tensor.
+ * * 2: An 1-D {@link OperandType::TENSOR_INT32} tensor, of shape
+ * [num_rois], specifying the batch index of each box. Boxes with
+ * the same batch index are grouped together. Zero num_rois is
+ * supported for this tensor.
+ * * 3: An {@link OperandType::INT32} scalar, specifying the output
+ * height of the output tensor.
+ * * 4: An {@link OperandType::INT32} scalar, specifying the output
+ * width of the output tensor.
+ * * 5: An {@link OperandType::FLOAT32} scalar, specifying the ratio
+ * from the height of original image to the height of feature map.
+ * * 6: An {@link OperandType::FLOAT32} scalar, specifying the ratio
+ * from the width of original image to the width of feature map.
+ * * 7: An {@link OperandType::INT32} scalar, specifying the number of
+ * sampling points in height dimension used to compute the output.
+ * Set to 0 for adaptive value of ceil(roi_height/out_height).
+ * * 8: An {@link OperandType::INT32} scalar, specifying the number of
+ * sampling points in width dimension used to compute the output.
+ * Set to 0 for adaptive value of ceil(roi_width/out_width).
+ * * 9: An {@link OperandType::BOOL} scalar, set to true to specify
+ * NCHW data layout for input0 and output0. Set to false for NHWC.
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandType} as input0. The output
+ * shape is [num_rois, out_height, out_width, depth].
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint can be different from the input0 scale and zeroPoint.
+ */
+ ROI_ALIGN = @1.2::OperationType:ROI_ALIGN,
+
+ /**
+ * Select and scale the feature map of each region of interest to a unified
+ * output size by max-pooling.
+ *
+ * The region of interest is represented by its upper-left corner coordinate
+ * (x1,y1) and lower-right corner coordinate (x2,y2) in the original image.
+ * A spatial scaling factor is applied to map into feature map coordinate.
+ * A valid region of interest should satisfy x1 <= x2 and y1 <= y2.
+ *
+ * Rounding is applied in this operation to ensure integer boundary for
+ * regions of interest and pooling bins.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
+ * With the default data layout NHWC, the data is stored in the order of:
+ * [batch, height, width, channels]. Alternatively, the data layout could
+ * be NCHW, the data storage order of: [batch, channels, height, width].
+ *
+ * Inputs:
+ * * 0: A 4-D tensor, specifying the feature map.
+ * * 1: A 2-D Tensor of shape [num_rois, 4], specifying the locations of
+ * the regions of interest, each line with format [x1, y1, x2, y2].
+ * For input0 of type {@link OperandType::TENSOR_QUANT8_ASYMM},
+ * this tensor should be of {@link OperandType::TENSOR_QUANT16_ASYMM},
+ * with zeroPoint of 0 and scale of 0.125.
+ * * 2: An 1-D {@link OperandType::TENSOR_INT32} tensor, of shape
+ * [num_rois], specifying the batch index of each box. Boxes with
+ * the same batch index are grouped together.
+ * * 3: An {@link OperandType::INT32} scalar, specifying the output
+ * height of the output tensor.
+ * * 4: An {@link OperandType::INT32} scalar, specifying the output
+ * width of the output tensor.
+ * * 5: An {@link OperandType::FLOAT32} scalar, specifying the ratio
+ * from the height of original image to the height of feature map.
+ * * 6: An {@link OperandType::FLOAT32} scalar, specifying the ratio
+ * from the width of original image to the width of feature map.
+ * * 7: An {@link OperandType::BOOL} scalar, set to true to specify
+ * NCHW data layout for input0 and output0. Set to false for NHWC.
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandType} as input0. The output
+ * shape is [num_rois, out_height, out_width, depth].
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ */
+ ROI_POOLING = @1.2::OperationType:ROI_POOLING,
+
+ /**
+ * Computes reciprocal of square root of x element-wise.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ *
+ * Supported tensor rank: from 1.
+ *
+ * Inputs:
+ * * 0: A tensor.
+ *
+ * Outputs:
+ * * 0: The output tensor of same shape as input0.
+ */
+ RSQRT = @1.2::OperationType:RSQRT,
+
+ /**
+ * Using a tensor of booleans c and input tensors x and y select values
+ * elementwise from both input tensors:
+ *
+ * O[i] = C[i] ? x[i] : y[i].
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_INT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: from 1
+ *
+ * Inputs:
+ * * 0: A tensor of type {@link OperandType::TENSOR_BOOL8} acting as a
+ * mask that chooses, based on the value at each element, whether the
+ * corresponding element in the output should be taken from input1 (if
+ * true) or input2 (if false).
+ * * 1: An input tensor of the same shape as input0.
+ * * 2: An input tensor of the same shape and type as input1.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scales and zeroPoint can be different from input1 scale and zeroPoint.
+ *
+ * Outputs:
+ * * 0: A tensor of the same type and shape as input1 and input2.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint can be different from inputs' scale and zeroPoint.
+ *
+ */
+ SELECT = @1.2::OperationType:SELECT,
+
+ /**
+ * Computes sin of x element-wise.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ *
+ * Supported tensor rank: from 1.
+ *
+ * Inputs:
+ * * 0: A tensor.
+ *
+ * Outputs:
+ * * 0: The output tensor of same shape as input0.
+ */
+ SIN = @1.2::OperationType:SIN,
+
+ /**
+ * Extracts a slice of specified size from the input tensor starting at a
+ * specified location.
+ *
+ * The starting location is specified as a 1-D tensor containing offsets
+ * for each dimension. The size is specified as a 1-D tensor containing
+ * either size of a slice along corresponding dimension or -1. In the latter
+ * case, all the remaining elements in dimension are included in the slice.
+ *
+ * A sum of begin offset and a size of a slice must not exceed size of a
+ * corresponding dimension.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_INT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: from 1
+ *
+ * Inputs:
+ * * 0: An n-D tensor to take slice from, may be zero-sized.
+ * * 1: A 1-D tensor of type {@link OperandType::TENSOR_INT32} specifying
+ * the beginning indices of the slice in each dimension.
+ * * 2: A 1-D tensor of type {@link OperandType::TENSOR_INT32} specifying
+ * the size of the slice in each dimension.
+ *
+ * Outputs:
+ * * 0: An n-D tensor of the same type as the input containing the slice.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * its scale and zeroPoint has to be same as the input0 scale and zeroPoint.
+ */
+ SLICE = @1.2::OperationType:SLICE,
+
+ /**
+ * Splits a tensor along a given axis into num_splits subtensors.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_INT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: from 1
+ *
+ * Inputs:
+ * * 0: An n-D tensor to split.
+ * * 1: An {@link OperandType::INT32} scalar specifying the axis along
+ * which to split.
+ * * 2: An {@link OperandType::INT32} scalar indicating the number of
+ * splits along given axis. Must evenly divide axis size.
+ *
+ * Outputs:
+ * * 0 ~ (num_splits - 1): Resulting subtensors.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ */
+ SPLIT = @1.2::OperationType:SPLIT,
+
+ /**
+ * Computes square root of x element-wise.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ *
+ * Supported tensor rank: from 1.
+ *
+ * Inputs:
+ * * 0: A tensor.
+ *
+ * Outputs:
+ * * 0: The output tensor of same shape as input0.
+ */
+ SQRT = @1.2::OperationType:SQRT,
+
+ /**
+ * Constructs a tensor by tiling a given tensor.
+ *
+ * This operation creates a new tensor by replicating `input` `multiples`
+ * times. The output tensor's i-th dimension has `input.dims(i) * multiples[i]`
+ * elements, and the values of `input` are replicated `multiples[i]` times
+ * along the i-th dimension.
+ * For example, tiling `[a b c d]` by `[2]` produces `[a b c d a b c d]`.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_INT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: from 1
+ *
+ * Inputs:
+ * * 0: input, an n-D tensor specifying the input.
+ * * 1: multiples, a 1-D tensor of {@link OperandType::TENSOR_INT32}.
+ * The length of multiples must be n.
+ *
+ * Outputs:
+ * * 0: A tiled tensor of the same {@link OperandType} and rank as `input`.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ */
+ TILE = @1.2::OperationType:TILE,
+
+ /**
+ * Finds values and indices of the k largest entries for the last dimension.
+ *
+ * Resulting values in each dimensions are sorted in descending order. If
+ * two values are equal, the one with larger index appears first.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_INT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: from 1
+ *
+ * Inputs:
+ * * 0: input, an n-D tensor specifying the input.
+ * * 1: k, an {@link OperandType::INT32} scalar, specifying the number of
+ * top elements to look for along the last dimension.
+ *
+ * Outputs:
+ * * 0: An n-D tensor of the same type as the input, containing the k
+ * largest elements along each last dimensional slice.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ * * 1: An n-D tensor of type {@link OperandType::TENSOR_INT32}
+ * containing the indices of values within the last dimension of input.
+ */
+ TOPK_V2 = @1.2::OperationType:TOPK_V2,
+
+ /**
+ * Performs the transpose of 2-D convolution operation.
+ *
+ * This operation is sometimes called "deconvolution" after Deconvolutional
+ * Networks, but is actually the transpose (gradient) of
+ * {@link OperandType::CONV_2D} rather than an actual deconvolution.
+ *
+ * The output dimensions are functions of the filter dimensions, stride, and
+ * padding.
+ *
+ * Supported tensor {@link OperandType} configurations:
+ * * 16 bit floating point:
+ * * * {@link OperandType::TENSOR_FLOAT16} for input, filter, output, and bias.
+ *
+ * * 32 bit floating point:
+ * * * {@link OperandType::TENSOR_FLOAT32} for input, filter, output, and bias.
+ *
+ * * Quantized:
+ * * * {@link OperandType::TENSOR_QUANT8_ASYMM} for input, filter, and output.
+ * * * {@link OperandType::TENSOR_INT32} for bias (with scale set to
+ * * * input.scale * filter.scale).
+ *
+ * * Quantized with symmetric per channel quantization for the filter:
+ * * * {@link OperandType::TENSOR_QUANT8_ASYMM} for input, and output.
+ * * * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
+ * * * {@link OperandType::TENSOR_INT32} for bias (scale set to 0.0,
+ * * * each value scaling is separate and equal to input.scale * filter.scales[channel]).
+ *
+ * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
+ * With the default data layout NHWC, the data is stored in the order of:
+ * [batch, height, width, channels]. Alternatively, the data layout could
+ * be NCHW, the data storage order of: [batch, channels, height, width].
+ *
+ * Both explicit padding and implicit padding are supported.
+ *
+ * Inputs (explicit padding):
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
+ * specifying the input.
+ * * 1: A 4-D tensor, of shape
+ * [depth_out, filter_height, filter_width, depth_in], specifying the
+ * filter. For tensor of type
+ * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} the channel
+ * dimension (SymmPerChannelQuantParams::channelDim) must be set to 0.
+ * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
+ * tensor of type {@link OperandType::TENSOR_FLOAT32} or
+ * {@link OperandType::TENSOR_FLOAT16}, the bias should be of the
+ * same type. For input tensor of type
+ * {@link OperandType::TENSOR_QUANT8_ASYMM}, the bias should be
+ * of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0 and
+ * bias_scale == input_scale * filter_scale. For filter tensor of
+ * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}, the bias
+ * must be of {@link OperandType::TENSOR_INT32}, with zeroPoint of
+ * 0 and bias_scale of 0. The actual scale of each value 'i' is equal
+ * to bias_scale[i] = input_scale * filter_scale[i].
+ * * 3: An {@link OperandType::INT32} scalar, specifying the padding on
+ * the left, in the ‘width’ dimension.
+ * * 4: An {@link OperandType::INT32} scalar, specifying the padding on
+ * the right, in the ‘width’ dimension.
+ * * 5: An {@link OperandType::INT32} scalar, specifying the padding on
+ * the top, in the ‘height’ dimension.
+ * * 6: An {@link OperandType::INT32} scalar, specifying the padding on
+ * the bottom, in the ‘height’ dimension.
+ * * 7: An {@link OperandType::INT32} scalar, specifying the stride when
+ * walking through input in the ‘width’ dimension.
+ * * 8: An {@link OperandType::INT32} scalar, specifying the stride when
+ * walking through input in the ‘height’ dimension.
+ * * 9: An {@link OperandType::INT32} scalar, and has to be one of the
+ * {@link FusedActivationFunc} values. Specifies the activation to
+ * invoke on the result.
+ * * 10: An {@link OperandType::BOOL} scalar, set to true to specify
+ * NCHW data layout for input0 and output0. Set to false for NHWC.
+ *
+ * Inputs (implicit padding):
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
+ * specifying the input.
+ * * 1: A 4-D tensor, of shape
+ * [depth_out, filter_height, filter_width, depth_in], specifying the
+ * filter. For tensor of type
+ * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} the channel
+ * dimension (SymmPerChannelQuantParams::channelDim) must be set to 0.
+ * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
+ * tensor of type {@link OperandType::TENSOR_FLOAT32} or
+ * {@link OperandType::TENSOR_FLOAT16}, the bias should be of the
+ * same type. For input tensor of type
+ * {@link OperandType::TENSOR_QUANT8_ASYMM}, the bias should be
+ * of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0 and
+ * bias_scale == input_scale * filter_scale. For filter tensor of
+ * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}, the bias
+ * must be of {@link OperandType::TENSOR_INT32}, with zeroPoint of
+ * 0 and bias_scale of 0. The actual scale of each value 'i' is equal
+ * to bias_scale[i] = input_scale * filter_scale[i].
+ * * 3: An {@link OperandType::TENSOR_INT32} tensor, specifying the output
+ * tensor shape.
+ * * 4: An {@link OperandType::INT32} scalar, specifying the implicit
+ * padding scheme, has to be one of the
+ * following values: {0 (NONE), 1 (SAME), 2 (VALID)}.
+ * * 5: An {@link OperandType::INT32} scalar, specifying the stride when
+ * walking through input in the ‘width’ dimension.
+ * * 6: An {@link OperandType::INT32} scalar, specifying the stride when
+ * walking through input in the ‘height’ dimension.
+ * * 7: An {@link OperandType::INT32} scalar, and has to be one of the
+ * {@link FusedActivationFunc} values. Specifies the activation to
+ * invoke on the result.
+ * * 8: An {@link OperandType::BOOL} scalar, set to true to specify
+ * NCHW data layout for input0 and output0. Set to false for NHWC.
+ *
+ * Outputs:
+ * * 0: The output 4-D tensor, of shape
+ * [batches, out_height, out_width, depth_out].
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint can be different from inputs' scale and zeroPoint.
+ */
+ TRANSPOSE_CONV_2D = @1.2::OperationType:TRANSPOSE_CONV_2D,
+
+ /**
+ * A recurrent neural network specified by an LSTM cell.
+ *
+ * Performs (fully) dynamic unrolling of input.
+ *
+ * This Op unrolls the input along the time dimension, and implements the
+ * following operation for each element in the sequence
+ * s = 1...sequence_length:
+ * outputs[s] = projection(state = activation(LSTMOp(inputs[s])))
+ *
+ * Where LSTMOp is the LSTM op as in {@link OperandType::LSTM},
+ * the "projection" is an optional projection layer from state and output
+ * and the “activation” is the function passed as the
+ * “fused_activation_function” argument (if not “NONE”).
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ *
+ * Supported tensor rank: 3, either time-major or batch-major.
+ *
+ * All input and output tensors must be of the same type.
+ *
+ * Inputs:
+ * * 0: The input (\f$x_t\f$).
+ * A 3-D tensor of shape:
+ * If time-major: [max_time, batch_size, input_size]
+ * If batch-major: [batch_size, max_time, input_size]
+ * where “max_time” is the number of timesteps (sequence length),
+ * “batch_size” corresponds to the batching dimension, and
+ * “input_size” is the size of the input.
+ * * 1: The input-to-input weights (\f$W_{xi}\f$). Optional.
+ * A 2-D tensor of shape [num_units, input_size], where “num_units”
+ * corresponds to the number of cell units.
+ * * 2: The input-to-forget weights (\f$W_{xf}\f$).
+ * A 2-D tensor of shape [num_units, input_size].
+ * * 3: The input-to-cell weights (\f$W_{xc}\f$).
+ * A 2-D tensor of shape [num_units, input_size].
+ * * 4: The input-to-output weights (\f$W_{xo}\f$).
+ * A 2-D tensor of shape [num_units, input_size].
+ * * 5: The recurrent-to-input weights (\f$W_{hi}\f$). Optional.
+ * A 2-D tensor of shape [num_units, output_size], where “output_size”
+ * corresponds to either the number of cell units (i.e., “num_units”),
+ * or the second dimension of the “projection_weights”, if defined.
+ * * 6: The recurrent-to-forget weights (\f$W_{hf}\f$).
+ * A 2-D tensor of shape [num_units, output_size].
+ * * 7: The recurrent-to-cell weights (\f$W_{hc}\f$).
+ * A 2-D tensor of shape [num_units, output_size].
+ * * 8: The recurrent-to-output weights (\f$W_{ho}\f$).
+ * A 2-D tensor of shape [num_units, output_size].
+ * * 9: The cell-to-input weights (\f$W_{ci}\f$). Optional.
+ * A 1-D tensor of shape [num_units].
+ * * 10:The cell-to-forget weights (\f$W_{cf}\f$). Optional.
+ * A 1-D tensor of shape [num_units].
+ * * 11:The cell-to-output weights (\f$W_{co}\f$). Optional.
+ * A 1-D tensor of shape [num_units].
+ * * 12:The input gate bias (\f$b_i\f$). Optional.
+ * A 1-D tensor of shape [num_units].
+ * * 13:The forget gate bias (\f$b_f\f$).
+ * A 1-D tensor of shape [num_units].
+ * * 14:The cell bias (\f$b_c\f$).
+ * A 1-D tensor of shape [num_units].
+ * * 15:The output gate bias (\f$b_o\f$).
+ * A 1-D tensor of shape [num_units].
+ * * 16:The projection weights (\f$W_{proj}\f$). Optional.
+ * A 2-D tensor of shape [output_size, num_units].
+ * * 17:The projection bias (\f$b_{proj}\f$). Optional.
+ * A 1-D tensor of shape [output_size].
+ * * 18:The output state (in) (\f$h_{t-1}\f$).
+ * A 2-D tensor of shape [batch_size, output_size].
+ * * 19:The cell state (in) (\f$C_{t-1}\f$).
+ * A 2-D tensor of shape [batch_size, num_units].
+ * * 20:The activation function (\f$g\f$).
+ * A value indicating the activation function:
+ * <ul>
+ * <li>0: None;
+ * <li>1: Relu;
+ * <li>3: Relu6;
+ * <li>4: Tanh;
+ * <li>6: Sigmoid.
+ * </ul>
+ * * 21:The clipping threshold (\f$t_{cell}\f$) for the cell state, such
+ * that values are bound within [-cell_clip, cell_clip]. If set to 0.0
+ * then clipping is disabled.
+ * * 22:The clipping threshold (\f$t_{proj}\f$) for the output from the
+ * projection layer, such that values are bound within
+ * [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
+ * * 23:Time-major if true, batch-major if false.
+ * * 24:The input layer normalization weights. Optional.
+ * A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
+ * to activation at input gate.
+ * * 25:The forget layer normalization weights. Optional.
+ * A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
+ * to activation at forget gate.
+ * * 26:The cell layer normalization weights. Optional.
+ * A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
+ * to activation at cell gate.
+ * * 27:The output layer normalization weights. Optional.
+ * A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
+ * to activation at output gate.
+ *
+ * Outputs:
+ * * 0: The output (\f$o_t\f$).
+ * A 3-D tensor of shape:
+ * If time-major: [max_time, batch_size, output_size]
+ * If batch-major: [batch_size, max_time, output_size]
+ */
+ UNIDIRECTIONAL_SEQUENCE_LSTM = @1.2::OperationType:UNIDIRECTIONAL_SEQUENCE_LSTM,
+
+ /**
+ * A recurrent neural network layer that applies a basic RNN cell to a
+ * sequence of inputs.
+ *
+ * This layer unrolls the input along the sequence dimension, and implements
+ * the following operation
+ * for each element in the sequence s = 1...sequence_length:
+ * outputs[s] = state = activation(inputs[s] * input_weights’ + state *
+ * recurrent_weights’ + bias)
+ *
+ * Where:
+ * * “input_weights” is a weight matrix that multiplies the inputs;
+ * * “recurrent_weights” is a weight matrix that multiplies the current
+ * “state” which itself is the output from the previous time step
+ * computation;
+ * * “bias” is a bias vector (added to each output vector in the batch);
+ * * “activation” is the function passed as the “fused_activation_function”
+ * argument (if not “NONE”).
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ *
+ * The input tensors must all be the same type.
+ *
+ * Inputs:
+ * * 0: input.
+ * A 3-D tensor. The shape is defined by the input 6 (timeMajor). If
+ * it is set to 1, then the input has a shape [maxTime, batchSize,
+ * inputSize], otherwise the input has a shape [batchSize, maxTime,
+ * inputSize].
+ * * 1: weights.
+ * A 2-D tensor of shape [numUnits, inputSize].
+ * * 2: recurrent_weights.
+ * A 2-D tensor of shape [numUnits, numUnits].
+ * * 3: bias.
+ * A 1-D tensor of shape [numUnits].
+ * * 4: hidden state
+ * A 2-D tensor of shape [batchSize, numUnits]. Specifies a hidden
+ * state input for the first time step of the computation.
+ * * 5: fusedActivationFunction.
+ * A {@link FusedActivationFunc} value indicating the activation function. If
+ * “NONE” is specified then it results in a linear activation.
+ * * 6: timeMajor
+ * An {@link OperandType::INT32} scalar specifying the shape format
+ * of input and output tensors. Must be set to either 0 or 1.
+ * Outputs:
+ * * 0: output.
+ * A 3-D tensor. The shape is defined by the input 6 (timeMajor). If
+ * it is set to 1, then the output has a shape [maxTime, batchSize,
+ * numUnits], otherwise the output has a shape [batchSize, maxTime,
+ * numUnits].
+ */
+ UNIDIRECTIONAL_SEQUENCE_RNN = @1.2::OperationType:UNIDIRECTIONAL_SEQUENCE_RNN,
+
+ /**
+ * Resizes images to given size using the nearest neighbor interpretation.
+ *
+ * Resized images must be distorted if their output aspect ratio is not the
+ * same as input aspect ratio. The corner pixels of output may not be the
+ * same as corner pixels of input.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
+ * With the default data layout NHWC, the data is stored in the order of:
+ * [batch, height, width, channels]. Alternatively, the data layout could
+ * be NCHW, the data storage order of: [batch, channels, height, width].
+ *
+ * Both resizing by shape and resizing by scale are supported.
+ *
+ * Inputs (resizing by shape):
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
+ * the input. Zero batches is supported for this tensor.
+ * * 1: An {@link OperandType::INT32} scalar, specifying the output
+ * width of the output tensor.
+ * * 2: An {@link OperandType::INT32} scalar, specifying the output
+ * height of the output tensor.
+ * * 3: An {@link OperandType::BOOL} scalar, default to false.
+ * Set to true to specify NCHW data layout for input0 and output0.
+ *
+ * Inputs (resizing by scale):
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
+ * the input. Zero batches is supported for this tensor.
+ * * 1: A scalar, specifying width_scale, the scaling factor of the width
+ * dimension from the input tensor to the output tensor. The output
+ * width is calculated as new_width = floor(width * width_scale).
+ * The scalar must be of {@link OperandType::FLOAT16} if input0 is
+ * of {@link OperandType::TENSOR_FLOAT16} and of
+ * {@link OperandType::FLOAT32} otherwise.
+ * * 2: A scalar, specifying height_scale, the scaling factor of the height
+ * dimension from the input tensor to the output tensor. The output
+ * height is calculated as new_height = floor(height * height_scale).
+ * The scalar must be of {@link OperandType::FLOAT16} if input0 is
+ * of {@link OperandType::TENSOR_FLOAT16} and of
+ * {@link OperandType::FLOAT32} otherwise.
+ * * 3: An {@link OperandType::BOOL} scalar, default to false.
+ * Set to true to specify NCHW data layout for input0 and output0.
+ *
+ * Outputs:
+ * * 0: The output 4-D tensor, of shape
+ * [batches, new_height, new_width, depth].
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ */
+ RESIZE_NEAREST_NEIGHBOR = @1.2::OperationType:RESIZE_NEAREST_NEIGHBOR,
+
+ /**
+ * DEPRECATED. Since NNAPI 1.2, extensions are the preferred alternative to
+ * OEM operation and data types.
+ *
+ * This operation is OEM specific. It should only be used for OEM
+ * applications.
+ */
+ OEM_OPERATION = @1.2::OperationType:OEM_OPERATION,
+ /* ADDING A NEW FUNDAMENTAL OPERATION REQUIRES UPDATING THE VALUE OF
+ * OperationTypeRange::FUNDAMENTAL_MAX.
+ */
+ /* ADDING A NEW OEM OPERATION REQUIRES UPDATING THE VALUE OF
+ * OperationTypeRange::OEM_MAX.
+ */
+};
+
+/**
+ * The range of values in the OperationType enum.
+ */
+enum OperationTypeRange : uint32_t {
+ BASE_MIN = 0,
+ FUNDAMENTAL_MIN = 0,
+ FUNDAMENTAL_MAX = 94,
+ OEM_MIN = 10000,
+ OEM_MAX = 10000,
+ BASE_MAX = 0xFFFF,
+};
+
/**
* The capabilities of a driver.
@@ -109,6 +4603,32 @@
};
/**
+ * Describes one operation of the model's graph.
+ */
+struct Operation {
+ /**
+ * The operation type.
+ *
+ * Besides the values listed in {@link OperationType}, any value above
+ * {@link OperationTypeRange::BASE_MAX} is possible and should be interpreted
+ * as an extension type according to {@link Model::extensionNameToPrefix}.
+ */
+ OperationType type;
+
+ /**
+ * Describes the table that contains the indexes of the inputs of the
+ * operation. The offset is the index in the operandIndexes table.
+ */
+ vec<uint32_t> inputs;
+
+ /**
+ * Describes the table that contains the indexes of the outputs of the
+ * operation. The offset is the index in the operandIndexes table.
+ */
+ vec<uint32_t> outputs;
+};
+
+/**
* Describes one operand of the model's graph.
*/
struct Operand {
@@ -233,28 +4753,6 @@
};
/**
- * Describes one operation of the model's graph.
- */
-struct Operation {
- /**
- * The operation type.
- */
- OperationType type;
-
- /**
- * Describes the table that contains the indexes of the inputs of the
- * operation. The offset is the index in the operandIndexes table.
- */
- vec<uint32_t> inputs;
-
- /**
- * Describes the table that contains the indexes of the outputs of the
- * operation. The offset is the index in the operandIndexes table.
- */
- vec<uint32_t> outputs;
-};
-
-/**
* A Neural Network Model.
*
* This includes not only the execution graph, but also constant data such as
diff --git a/neuralnetworks/1.3/types.t b/neuralnetworks/1.3/types.t
index d41cfd2..e06f5d6 100644
--- a/neuralnetworks/1.3/types.t
+++ b/neuralnetworks/1.3/types.t
@@ -44,6 +44,47 @@
BASE_MAX = 0xFFFF,
};
+/**
+ * Operation types.
+ *
+ * The type of an operation in a model.
+ */
+enum OperationType : int32_t {
+
+%insert Operation_1.0
+
+%insert Operation_1.1
+
+%insert Operation_1.2
+
+ /**
+ * DEPRECATED. Since NNAPI 1.2, extensions are the preferred alternative to
+ * OEM operation and data types.
+ *
+ * This operation is OEM specific. It should only be used for OEM
+ * applications.
+ */
+ OEM_OPERATION = @1.2::OperationType:OEM_OPERATION,
+ /* ADDING A NEW FUNDAMENTAL OPERATION REQUIRES UPDATING THE VALUE OF
+ * OperationTypeRange::FUNDAMENTAL_MAX.
+ */
+ /* ADDING A NEW OEM OPERATION REQUIRES UPDATING THE VALUE OF
+ * OperationTypeRange::OEM_MAX.
+ */
+};
+
+/**
+ * The range of values in the OperationType enum.
+ */
+enum OperationTypeRange : uint32_t {
+ BASE_MIN = 0,
+ FUNDAMENTAL_MIN = 0,
+%insert Operation_1.3_MAX
+ OEM_MIN = 10000,
+ OEM_MAX = 10000,
+ BASE_MAX = 0xFFFF,
+};
+
/**
* The capabilities of a driver.
@@ -80,6 +121,32 @@
};
/**
+ * Describes one operation of the model's graph.
+ */
+struct Operation {
+ /**
+ * The operation type.
+ *
+ * Besides the values listed in {@link OperationType}, any value above
+ * {@link OperationTypeRange::BASE_MAX} is possible and should be interpreted
+ * as an extension type according to {@link Model::extensionNameToPrefix}.
+ */
+ OperationType type;
+
+ /**
+ * Describes the table that contains the indexes of the inputs of the
+ * operation. The offset is the index in the operandIndexes table.
+ */
+ vec<uint32_t> inputs;
+
+ /**
+ * Describes the table that contains the indexes of the outputs of the
+ * operation. The offset is the index in the operandIndexes table.
+ */
+ vec<uint32_t> outputs;
+};
+
+/**
* Describes one operand of the model's graph.
*/
struct Operand {
@@ -204,28 +271,6 @@
};
/**
- * Describes one operation of the model's graph.
- */
-struct Operation {
- /**
- * The operation type.
- */
- OperationType type;
-
- /**
- * Describes the table that contains the indexes of the inputs of the
- * operation. The offset is the index in the operandIndexes table.
- */
- vec<uint32_t> inputs;
-
- /**
- * Describes the table that contains the indexes of the outputs of the
- * operation. The offset is the index in the operandIndexes table.
- */
- vec<uint32_t> outputs;
-};
-
-/**
* A Neural Network Model.
*
* This includes not only the execution graph, but also constant data such as
diff --git a/neuralnetworks/1.3/vts/functional/Callbacks.cpp b/neuralnetworks/1.3/vts/functional/Callbacks.cpp
index 435db46..4f08e72 100644
--- a/neuralnetworks/1.3/vts/functional/Callbacks.cpp
+++ b/neuralnetworks/1.3/vts/functional/Callbacks.cpp
@@ -54,7 +54,7 @@
}
Return<void> PreparedModelCallback::notify_1_3(ErrorStatus errorStatus,
- const sp<V1_2::IPreparedModel>& preparedModel) {
+ const sp<V1_3::IPreparedModel>& preparedModel) {
return notify(errorStatus, preparedModel);
}
diff --git a/neuralnetworks/1.3/vts/functional/CompilationCachingTests.cpp b/neuralnetworks/1.3/vts/functional/CompilationCachingTests.cpp
index ea2398b..d8a7534 100644
--- a/neuralnetworks/1.3/vts/functional/CompilationCachingTests.cpp
+++ b/neuralnetworks/1.3/vts/functional/CompilationCachingTests.cpp
@@ -52,7 +52,6 @@
using V1_0::ErrorStatus;
using V1_1::ExecutionPreference;
using V1_2::Constant;
-using V1_2::IPreparedModel;
using V1_2::OperationType;
namespace float32_model {
diff --git a/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp
index 4f9b6f9..2ec2988 100644
--- a/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp
+++ b/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp
@@ -29,6 +29,7 @@
#include <android/hardware/neuralnetworks/1.2/IPreparedModelCallback.h>
#include <android/hardware/neuralnetworks/1.2/types.h>
#include <android/hardware/neuralnetworks/1.3/IDevice.h>
+#include <android/hardware/neuralnetworks/1.3/IPreparedModel.h>
#include <android/hardware/neuralnetworks/1.3/IPreparedModelCallback.h>
#include <android/hardware/neuralnetworks/1.3/types.h>
#include <android/hidl/allocator/1.0/IAllocator.h>
@@ -61,17 +62,27 @@
using V1_0::Request;
using V1_1::ExecutionPreference;
using V1_2::Constant;
-using V1_2::IPreparedModel;
using V1_2::MeasureTiming;
-using V1_2::OperationType;
using V1_2::OutputShape;
using V1_2::SymmPerChannelQuantParams;
using V1_2::Timing;
using V1_2::implementation::ExecutionCallback;
using HidlToken = hidl_array<uint8_t, static_cast<uint32_t>(Constant::BYTE_SIZE_OF_CACHE_TOKEN)>;
+namespace {
+
+enum class Executor { ASYNC, SYNC, BURST };
+
enum class OutputType { FULLY_SPECIFIED, UNSPECIFIED, INSUFFICIENT };
+struct TestConfig {
+ Executor executor;
+ MeasureTiming measureTiming;
+ OutputType outputType;
+};
+
+} // namespace
+
Model createModel(const TestModel& testModel) {
// Model operands.
hidl_vec<Operand> operands(testModel.operands.size());
@@ -181,7 +192,7 @@
static Return<ErrorStatus> ExecutePreparedModel(const sp<IPreparedModel>& preparedModel,
const Request& request, MeasureTiming measure,
sp<ExecutionCallback>& callback) {
- return preparedModel->execute_1_2(request, measure, callback);
+ return preparedModel->execute_1_3(request, measure, callback);
}
static Return<ErrorStatus> ExecutePreparedModel(const sp<IPreparedModel>& preparedModel,
const Request& request, MeasureTiming measure,
@@ -206,31 +217,31 @@
return android::nn::ExecutionBurstController::create(preparedModel,
std::chrono::microseconds{0});
}
-enum class Executor { ASYNC, SYNC, BURST };
void EvaluatePreparedModel(const sp<IPreparedModel>& preparedModel, const TestModel& testModel,
- Executor executor, MeasureTiming measure, OutputType outputType) {
+ const TestConfig& testConfig) {
// If output0 does not have size larger than one byte, we can not test with insufficient buffer.
- if (outputType == OutputType::INSUFFICIENT && !isOutputSizeGreaterThanOne(testModel, 0)) {
+ if (testConfig.outputType == OutputType::INSUFFICIENT &&
+ !isOutputSizeGreaterThanOne(testModel, 0)) {
return;
}
Request request = createRequest(testModel);
- if (outputType == OutputType::INSUFFICIENT) {
+ if (testConfig.outputType == OutputType::INSUFFICIENT) {
makeOutputInsufficientSize(/*outputIndex=*/0, &request);
}
ErrorStatus executionStatus;
hidl_vec<OutputShape> outputShapes;
Timing timing;
- switch (executor) {
+ switch (testConfig.executor) {
case Executor::ASYNC: {
SCOPED_TRACE("asynchronous");
// launch execution
sp<ExecutionCallback> executionCallback = new ExecutionCallback();
- Return<ErrorStatus> executionLaunchStatus =
- ExecutePreparedModel(preparedModel, request, measure, executionCallback);
+ Return<ErrorStatus> executionLaunchStatus = ExecutePreparedModel(
+ preparedModel, request, testConfig.measureTiming, executionCallback);
ASSERT_TRUE(executionLaunchStatus.isOk());
EXPECT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(executionLaunchStatus));
@@ -246,8 +257,8 @@
SCOPED_TRACE("synchronous");
// execute
- Return<ErrorStatus> executionReturnStatus =
- ExecutePreparedModel(preparedModel, request, measure, &outputShapes, &timing);
+ Return<ErrorStatus> executionReturnStatus = ExecutePreparedModel(
+ preparedModel, request, testConfig.measureTiming, &outputShapes, &timing);
ASSERT_TRUE(executionReturnStatus.isOk());
executionStatus = static_cast<ErrorStatus>(executionReturnStatus);
@@ -270,14 +281,14 @@
// execute burst
int n;
std::tie(n, outputShapes, timing, std::ignore) =
- controller->compute(request, measure, keys);
+ controller->compute(request, testConfig.measureTiming, keys);
executionStatus = nn::convertResultCodeToErrorStatus(n);
break;
}
}
- if (outputType != OutputType::FULLY_SPECIFIED &&
+ if (testConfig.outputType != OutputType::FULLY_SPECIFIED &&
executionStatus == ErrorStatus::GENERAL_FAILURE) {
LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
"execute model that it does not support.";
@@ -286,7 +297,7 @@
<< std::endl;
GTEST_SKIP();
}
- if (measure == MeasureTiming::NO) {
+ if (testConfig.measureTiming == MeasureTiming::NO) {
EXPECT_EQ(UINT64_MAX, timing.timeOnDevice);
EXPECT_EQ(UINT64_MAX, timing.timeInDriver);
} else {
@@ -295,7 +306,7 @@
}
}
- switch (outputType) {
+ switch (testConfig.outputType) {
case OutputType::FULLY_SPECIFIED:
// If the model output operands are fully specified, outputShapes must be either
// either empty, or have the same number of elements as the number of outputs.
@@ -333,44 +344,29 @@
void EvaluatePreparedModel(const sp<IPreparedModel>& preparedModel, const TestModel& testModel,
bool testDynamicOutputShape) {
+ std::initializer_list<OutputType> outputTypesList;
+ std::initializer_list<MeasureTiming> measureTimingList;
+ std::initializer_list<Executor> executorList;
+
if (testDynamicOutputShape) {
- EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::NO,
- OutputType::UNSPECIFIED);
- EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::NO,
- OutputType::UNSPECIFIED);
- EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::NO,
- OutputType::UNSPECIFIED);
- EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::YES,
- OutputType::UNSPECIFIED);
- EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::YES,
- OutputType::UNSPECIFIED);
- EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::YES,
- OutputType::UNSPECIFIED);
- EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::NO,
- OutputType::INSUFFICIENT);
- EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::NO,
- OutputType::INSUFFICIENT);
- EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::NO,
- OutputType::INSUFFICIENT);
- EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::YES,
- OutputType::INSUFFICIENT);
- EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::YES,
- OutputType::INSUFFICIENT);
- EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::YES,
- OutputType::INSUFFICIENT);
+ outputTypesList = {OutputType::UNSPECIFIED, OutputType::INSUFFICIENT};
+ measureTimingList = {MeasureTiming::NO, MeasureTiming::YES};
+ executorList = {Executor::ASYNC, Executor::SYNC, Executor::BURST};
} else {
- EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::NO,
- OutputType::FULLY_SPECIFIED);
- EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::NO,
- OutputType::FULLY_SPECIFIED);
- EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::NO,
- OutputType::FULLY_SPECIFIED);
- EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::YES,
- OutputType::FULLY_SPECIFIED);
- EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::YES,
- OutputType::FULLY_SPECIFIED);
- EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::YES,
- OutputType::FULLY_SPECIFIED);
+ outputTypesList = {OutputType::FULLY_SPECIFIED};
+ measureTimingList = {MeasureTiming::NO, MeasureTiming::YES};
+ executorList = {Executor::ASYNC, Executor::SYNC, Executor::BURST};
+ }
+
+ for (const OutputType outputType : outputTypesList) {
+ for (const MeasureTiming measureTiming : measureTimingList) {
+ for (const Executor executor : executorList) {
+ const TestConfig testConfig = {.executor = executor,
+ .measureTiming = measureTiming,
+ .outputType = outputType};
+ EvaluatePreparedModel(preparedModel, testModel, testConfig);
+ }
+ }
}
}
diff --git a/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.h b/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.h
index b9277cf..45cff5b 100644
--- a/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.h
+++ b/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.h
@@ -17,8 +17,8 @@
#ifndef ANDROID_HARDWARE_NEURALNETWORKS_V1_3_GENERATED_TEST_HARNESS_H
#define ANDROID_HARDWARE_NEURALNETWORKS_V1_3_GENERATED_TEST_HARNESS_H
-#include <android/hardware/neuralnetworks/1.2/IPreparedModel.h>
#include <android/hardware/neuralnetworks/1.3/IDevice.h>
+#include <android/hardware/neuralnetworks/1.3/IPreparedModel.h>
#include <android/hardware/neuralnetworks/1.3/types.h>
#include <functional>
#include <vector>
@@ -55,10 +55,9 @@
Model createModel(const test_helper::TestModel& testModel);
-void PrepareModel(const sp<IDevice>& device, const Model& model,
- sp<V1_2::IPreparedModel>* preparedModel);
+void PrepareModel(const sp<IDevice>& device, const Model& model, sp<IPreparedModel>* preparedModel);
-void EvaluatePreparedModel(const sp<V1_2::IPreparedModel>& preparedModel,
+void EvaluatePreparedModel(const sp<IPreparedModel>& preparedModel,
const test_helper::TestModel& testModel, bool testDynamicOutputShape);
} // namespace android::hardware::neuralnetworks::V1_3::vts::functional
diff --git a/neuralnetworks/1.3/vts/functional/TestAssertions.cpp b/neuralnetworks/1.3/vts/functional/TestAssertions.cpp
index 7361078..a7569e6 100644
--- a/neuralnetworks/1.3/vts/functional/TestAssertions.cpp
+++ b/neuralnetworks/1.3/vts/functional/TestAssertions.cpp
@@ -25,8 +25,6 @@
#define CHECK_TEST_ENUM(EnumType, enumValue) \
static_assert(static_cast<EnumType>(Test##EnumType::enumValue) == EnumType::enumValue)
-using V1_2::OperationType;
-
CHECK_TEST_ENUM(OperandType, FLOAT32);
CHECK_TEST_ENUM(OperandType, INT32);
CHECK_TEST_ENUM(OperandType, UINT32);
diff --git a/neuralnetworks/1.3/vts/functional/ValidateBurst.cpp b/neuralnetworks/1.3/vts/functional/ValidateBurst.cpp
index 2c97294..7df8046 100644
--- a/neuralnetworks/1.3/vts/functional/ValidateBurst.cpp
+++ b/neuralnetworks/1.3/vts/functional/ValidateBurst.cpp
@@ -40,7 +40,6 @@
using V1_2::FmqResultDatum;
using V1_2::IBurstCallback;
using V1_2::IBurstContext;
-using V1_2::IPreparedModel;
using V1_2::MeasureTiming;
using V1_2::Timing;
using ExecutionBurstCallback = ExecutionBurstController::ExecutionBurstCallback;
diff --git a/neuralnetworks/1.3/vts/functional/ValidateModel.cpp b/neuralnetworks/1.3/vts/functional/ValidateModel.cpp
index bdda790..46bbd3f 100644
--- a/neuralnetworks/1.3/vts/functional/ValidateModel.cpp
+++ b/neuralnetworks/1.3/vts/functional/ValidateModel.cpp
@@ -27,8 +27,6 @@
using V1_0::ErrorStatus;
using V1_0::OperandLifeTime;
using V1_1::ExecutionPreference;
-using V1_2::IPreparedModel;
-using V1_2::OperationType;
using V1_2::OperationTypeRange;
using V1_2::SymmPerChannelQuantParams;
using HidlToken =
@@ -61,7 +59,7 @@
preparedModelCallback->wait();
ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, prepareReturnStatus);
- sp<IPreparedModel> preparedModel = getPreparedModel_1_2(preparedModelCallback);
+ sp<IPreparedModel> preparedModel = getPreparedModel_1_3(preparedModelCallback);
ASSERT_EQ(nullptr, preparedModel.get());
}
diff --git a/neuralnetworks/1.3/vts/functional/ValidateRequest.cpp b/neuralnetworks/1.3/vts/functional/ValidateRequest.cpp
index c00512c..2cf30d5 100644
--- a/neuralnetworks/1.3/vts/functional/ValidateRequest.cpp
+++ b/neuralnetworks/1.3/vts/functional/ValidateRequest.cpp
@@ -29,7 +29,6 @@
using V1_0::ErrorStatus;
using V1_0::Request;
-using V1_2::IPreparedModel;
using V1_2::MeasureTiming;
using V1_2::OutputShape;
using V1_2::Timing;
@@ -61,11 +60,11 @@
// asynchronous
{
- SCOPED_TRACE(message + " [execute_1_2]");
+ SCOPED_TRACE(message + " [execute_1_3]");
sp<ExecutionCallback> executionCallback = new ExecutionCallback();
Return<ErrorStatus> executeLaunchStatus =
- preparedModel->execute_1_2(request, measure, executionCallback);
+ preparedModel->execute_1_3(request, measure, executionCallback);
ASSERT_TRUE(executeLaunchStatus.isOk());
ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(executeLaunchStatus));
diff --git a/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.cpp b/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.cpp
index da5d95b..625913d 100644
--- a/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.cpp
+++ b/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.cpp
@@ -34,7 +34,6 @@
using V1_0::ErrorStatus;
using V1_0::Request;
using V1_1::ExecutionPreference;
-using V1_2::IPreparedModel;
// internal helper function
void createPreparedModel(const sp<IDevice>& device, const Model& model,
@@ -64,7 +63,7 @@
// retrieve prepared model
preparedModelCallback->wait();
const ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
- *preparedModel = getPreparedModel_1_2(preparedModelCallback);
+ *preparedModel = getPreparedModel_1_3(preparedModelCallback);
// The getSupportedOperations_1_3 call returns a list of operations that are
// guaranteed not to fail if prepareModel_1_3 is called, and
@@ -165,7 +164,7 @@
INSTANTIATE_GENERATED_TEST(ValidationTest, [](const test_helper::TestModel&) { return true; });
-sp<IPreparedModel> getPreparedModel_1_2(const sp<PreparedModelCallback>& callback) {
+sp<IPreparedModel> getPreparedModel_1_3(const sp<PreparedModelCallback>& callback) {
sp<V1_0::IPreparedModel> preparedModelV1_0 = callback->getPreparedModel();
return IPreparedModel::castFrom(preparedModelV1_0).withDefault(nullptr);
}
diff --git a/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.h b/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.h
index 9ccc911..8cb42d4 100644
--- a/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.h
+++ b/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.h
@@ -17,8 +17,8 @@
#ifndef ANDROID_HARDWARE_NEURALNETWORKS_V1_3_VTS_HAL_NEURALNETWORKS_H
#define ANDROID_HARDWARE_NEURALNETWORKS_V1_3_VTS_HAL_NEURALNETWORKS_H
-#include <android/hardware/neuralnetworks/1.2/IPreparedModel.h>
#include <android/hardware/neuralnetworks/1.3/IDevice.h>
+#include <android/hardware/neuralnetworks/1.3/IPreparedModel.h>
#include <android/hardware/neuralnetworks/1.3/types.h>
#include <gtest/gtest.h>
#include "1.0/Utils.h"
@@ -47,11 +47,10 @@
// Create an IPreparedModel object. If the model cannot be prepared,
// "preparedModel" will be nullptr instead.
void createPreparedModel(const sp<IDevice>& device, const Model& model,
- sp<V1_2::IPreparedModel>* preparedModel);
+ sp<IPreparedModel>* preparedModel);
// Utility function to get PreparedModel from callback and downcast to V1_2.
-sp<V1_2::IPreparedModel> getPreparedModel_1_2(
- const sp<implementation::PreparedModelCallback>& callback);
+sp<IPreparedModel> getPreparedModel_1_3(const sp<implementation::PreparedModelCallback>& callback);
} // namespace android::hardware::neuralnetworks::V1_3::vts::functional
diff --git a/neuralnetworks/1.3/vts/functional/include/1.3/Callbacks.h b/neuralnetworks/1.3/vts/functional/include/1.3/Callbacks.h
index 9376a92..fb19a84 100644
--- a/neuralnetworks/1.3/vts/functional/include/1.3/Callbacks.h
+++ b/neuralnetworks/1.3/vts/functional/include/1.3/Callbacks.h
@@ -137,7 +137,7 @@
* nullptr if the model was unable to be prepared.
*/
Return<void> notify_1_3(V1_0::ErrorStatus status,
- const sp<V1_2::IPreparedModel>& preparedModel) override;
+ const sp<V1_3::IPreparedModel>& preparedModel) override;
/**
* PreparedModelCallback::wait blocks until notify* has been called on the
diff --git a/radio/1.5/IRadio.hal b/radio/1.5/IRadio.hal
index de20dd0..74ec56d 100644
--- a/radio/1.5/IRadio.hal
+++ b/radio/1.5/IRadio.hal
@@ -17,6 +17,8 @@
package android.hardware.radio@1.5;
import @1.4::IRadio;
+import @1.5::AccessNetwork;
+import @1.5::SignalThresholdInfo;
/**
* This interface is used by telephony and telecom to talk to cellular radio.
@@ -27,4 +29,30 @@
* setResponseFunctions must work with @1.5::IRadioResponse and @1.5::IRadioIndication.
*/
interface IRadio extends @1.4::IRadio {
+
+ /**
+ * Sets the signal strength reporting criteria.
+ *
+ * The resulting reporting rules are the AND of all the supplied criteria. For each RAN
+ * The hysteresisDb and thresholds apply to only the following measured quantities:
+ * -GERAN - RSSI
+ * -CDMA2000 - RSSI
+ * -UTRAN - RSCP
+ * -EUTRAN - RSRP/RSRQ/RSSNR
+ * -NGRAN - SSRSRP/SSRSRQ/SSSINR
+ *
+ * Note: Reporting criteria must be individually set for each RAN. For any unset reporting
+ * criteria, the value is implementation-defined.
+ *
+ * Response callback is
+ * IRadioResponse.setSignalStrengthReportingCriteriaResponse_1_5()
+ *
+ * @param serial Serial number of request.
+ * @param signalThresholdInfo Signal threshold info including the threshold values,
+ * hysteresisDb, and hysteresisMs. See @1.5::SignalThresholdInfo
+ * for details.
+ * @param accessNetwork The type of network for which to apply these thresholds.
+ */
+ oneway setSignalStrengthReportingCriteria_1_5(int32_t serial,
+ SignalThresholdInfo signalThresholdInfo, AccessNetwork accessNetwork);
};
diff --git a/radio/1.5/IRadioResponse.hal b/radio/1.5/IRadioResponse.hal
index d4c4f76..91dc1e0 100644
--- a/radio/1.5/IRadioResponse.hal
+++ b/radio/1.5/IRadioResponse.hal
@@ -23,4 +23,13 @@
* Interface declaring response functions to solicited radio requests.
*/
interface IRadioResponse extends @1.4::IRadioResponse {
+ /**
+ * @param info Response info struct containing response type, serial no. and error
+ *
+ * Valid errors returned:
+ * RadioError:NONE
+ * RadioError:INVALID_ARGUMENTS
+ * RadioError:RADIO_NOT_AVAILABLE
+ */
+ oneway setSignalStrengthReportingCriteriaResponse_1_5(RadioResponseInfo info);
};
diff --git a/radio/1.5/types.hal b/radio/1.5/types.hal
index a639a8d..9ef1d31 100644
--- a/radio/1.5/types.hal
+++ b/radio/1.5/types.hal
@@ -15,3 +15,102 @@
*/
package android.hardware.radio@1.5;
+
+import @1.4::AccessNetwork;
+
+/**
+ * Defining signal strength type.
+ */
+enum SignalMeasurementType : int32_t {
+ /**
+ * Received Signal Strength Indication.
+ * Range: -113 dBm and -51 dBm
+ * Used RAN: GERAN, CDMA2000
+ * Reference: 3GPP TS 27.007 section 8.5.
+ */
+ RSSI = 1,
+ /**
+ * Received Signal Code Power.
+ * Range: -120 dBm to -25 dBm;
+ * Used RAN: UTRAN
+ * Reference: 3GPP TS 25.123, section 9.1.1.1
+ */
+ RSCP = 2,
+ /**
+ * Reference Signal Received Power.
+ * Range: -140 dBm to -44 dBm;
+ * Used RAN: EUTRAN
+ * Reference: 3GPP TS 36.133 9.1.4
+ */
+ RSRP = 3,
+ /**
+ * Reference Signal Received Quality
+ * Range: -20 dB to -3 dB;
+ * Used RAN: EUTRAN
+ * Reference: 3GPP TS 36.133 9.1.7
+ */
+ RSRQ = 4,
+ /**
+ * Reference Signal Signal to Noise Ratio
+ * Range: -20 dB to -30 dB;
+ * Used RAN: EUTRAN
+ * Note: this field is optional; how to support it can be decided by the
+ * corresponding vendor. Though the response code is not enforced,
+ * vendor's implementation must ensure this interface not crashing.
+ */
+ RSSNR = 5,
+ /**
+ * 5G SS reference signal received power.
+ * Range: -140 dBm to -44 dBm.
+ * Used RAN: NGRAN
+ * Reference: 3GPP TS 38.215.
+ */
+ SSRSRP = 6,
+ /**
+ * 5G SS reference signal received quality.
+ * Range: -20 dB to -3 dB.
+ * Used RAN: NGRAN
+ * Reference: 3GPP TS 38.215.
+ */
+ SSRSRQ = 7,
+ /**
+ * 5G SS signal-to-noise and interference ratio.
+ * Range: -23 dB to 40 dB
+ * Used RAN: NGRAN
+ * Reference: 3GPP TS 38.215 section 5.1.*, 3GPP TS 38.133 section 10.1.16.1.
+ */
+ SSSINR = 8,
+};
+
+/**
+ * Contains the threshold values of each signal measurement type.
+ */
+struct SignalThresholdInfo {
+ /** Signal Measurement Type */
+ SignalMeasurementType signalMeasurement;
+
+ /** A hysteresis time in milliseconds to prevent flapping. A value of 0 disables hysteresis */
+ int32_t hysteresisMs;
+
+ /**
+ * An interval in dB defining the required magnitude change between reports.
+ * hysteresisDb must be smaller than the smallest threshold delta.
+ * An interval value of 0 disables hysteresis.
+ */
+ int32_t hysteresisDb;
+
+ /**
+ * List of threshold values.
+ * Range and unit must reference specific @1.5::SignalMeasurementType.
+ * The threshold values for which to apply criteria.
+ * A vector size of 0 disables the use of thresholds for reporting.
+ */
+ vec<int32_t> thresholds;
+};
+
+enum AccessNetwork : @1.4::AccessNetwork {
+ /**
+ * Next-Generation Radio Access Network (NGRAN)
+ */
+ NGRAN = 6,
+};
diff --git a/radio/1.5/vts/functional/radio_hidl_hal_api.cpp b/radio/1.5/vts/functional/radio_hidl_hal_api.cpp
index b86fa5f..d173411 100644
--- a/radio/1.5/vts/functional/radio_hidl_hal_api.cpp
+++ b/radio/1.5/vts/functional/radio_hidl_hal_api.cpp
@@ -17,3 +17,262 @@
#include <radio_hidl_hal_utils_v1_5.h>
#define ASSERT_OK(ret) ASSERT_TRUE(ret.isOk())
+
+/*
+ * Test IRadio.setSignalStrengthReportingCriteria_1_5() with invalid hysteresisDb
+ */
+TEST_F(RadioHidlTest_v1_5, setSignalStrengthReportingCriteria_1_5_invalidHysteresisDb) {
+ serial = GetRandomSerialNumber();
+
+ ::android::hardware::radio::V1_5::SignalThresholdInfo signalThresholdInfo;
+ signalThresholdInfo.signalMeasurement = SignalMeasurementType::RSSI;
+ signalThresholdInfo.hysteresisMs = 5000;
+ signalThresholdInfo.hysteresisDb = 10; // hysteresisDb too large given threshold list deltas
+ signalThresholdInfo.thresholds = {-109, -103, -97, -89};
+
+ Return<void> res = radio_v1_5->setSignalStrengthReportingCriteria_1_5(
+ serial, signalThresholdInfo, ::android::hardware::radio::V1_5::AccessNetwork::GERAN);
+ ASSERT_OK(res);
+ EXPECT_EQ(std::cv_status::no_timeout, wait());
+ EXPECT_EQ(RadioResponseType::SOLICITED, radioRsp_v1_5->rspInfo.type);
+ EXPECT_EQ(serial, radioRsp_v1_5->rspInfo.serial);
+
+ ALOGI("setSignalStrengthReportingCriteria_1_5_invalidHysteresisDb, rspInfo.error = %s\n",
+ toString(radioRsp_v1_5->rspInfo.error).c_str());
+ ASSERT_TRUE(CheckAnyOfErrors(radioRsp_v1_5->rspInfo.error, {RadioError::INVALID_ARGUMENTS}));
+}
+
+/*
+ * Test IRadio.setSignalStrengthReportingCriteria_1_5() with empty thresholds
+ */
+TEST_F(RadioHidlTest_v1_5, setSignalStrengthReportingCriteria_1_5_EmptyThresholds) {
+ serial = GetRandomSerialNumber();
+
+ ::android::hardware::radio::V1_5::SignalThresholdInfo signalThresholdInfo;
+ signalThresholdInfo.signalMeasurement = SignalMeasurementType::RSSI;
+ signalThresholdInfo.hysteresisMs = 0;
+ signalThresholdInfo.hysteresisDb = 0;
+
+ Return<void> res = radio_v1_5->setSignalStrengthReportingCriteria_1_5(
+ serial, signalThresholdInfo, ::android::hardware::radio::V1_5::AccessNetwork::GERAN);
+ ASSERT_OK(res);
+ EXPECT_EQ(std::cv_status::no_timeout, wait());
+ EXPECT_EQ(RadioResponseType::SOLICITED, radioRsp_v1_5->rspInfo.type);
+ EXPECT_EQ(serial, radioRsp_v1_5->rspInfo.serial);
+
+ ALOGI("setSignalStrengthReportingCriteria_1_5_EmptyParams, rspInfo.error = %s\n",
+ toString(radioRsp_v1_5->rspInfo.error).c_str());
+ ASSERT_TRUE(CheckAnyOfErrors(radioRsp_v1_5->rspInfo.error, {RadioError::NONE}));
+}
+
+/*
+ * Test IRadio.setSignalStrengthReportingCriteria_1_5() for GERAN
+ */
+TEST_F(RadioHidlTest_v1_5, setSignalStrengthReportingCriteria_1_5_Geran) {
+ serial = GetRandomSerialNumber();
+
+ ::android::hardware::radio::V1_5::SignalThresholdInfo signalThresholdInfo;
+ signalThresholdInfo.signalMeasurement = SignalMeasurementType::RSSI;
+ signalThresholdInfo.hysteresisMs = 5000;
+ signalThresholdInfo.hysteresisDb = 2;
+ signalThresholdInfo.thresholds = {-109, -103, -97, -89};
+
+ Return<void> res = radio_v1_5->setSignalStrengthReportingCriteria_1_5(
+ serial, signalThresholdInfo, ::android::hardware::radio::V1_5::AccessNetwork::GERAN);
+ ASSERT_OK(res);
+ EXPECT_EQ(std::cv_status::no_timeout, wait());
+ EXPECT_EQ(RadioResponseType::SOLICITED, radioRsp_v1_5->rspInfo.type);
+ EXPECT_EQ(serial, radioRsp_v1_5->rspInfo.serial);
+
+ ALOGI("setSignalStrengthReportingCriteria_1_5_Geran, rspInfo.error = %s\n",
+ toString(radioRsp_v1_5->rspInfo.error).c_str());
+ ASSERT_TRUE(CheckAnyOfErrors(radioRsp_v1_5->rspInfo.error, {RadioError::NONE}));
+}
+
+/*
+ * Test IRadio.setSignalStrengthReportingCriteria_1_5() for UTRAN
+ */
+TEST_F(RadioHidlTest_v1_5, setSignalStrengthReportingCriteria_1_5_Utran) {
+ serial = GetRandomSerialNumber();
+
+ ::android::hardware::radio::V1_5::SignalThresholdInfo signalThresholdInfo;
+ signalThresholdInfo.signalMeasurement = SignalMeasurementType::RSCP;
+ signalThresholdInfo.hysteresisMs = 5000;
+ signalThresholdInfo.hysteresisDb = 2;
+ signalThresholdInfo.thresholds = {-110, -97, -73, -49, -25};
+
+ Return<void> res = radio_v1_5->setSignalStrengthReportingCriteria_1_5(
+ serial, signalThresholdInfo, ::android::hardware::radio::V1_5::AccessNetwork::UTRAN);
+ ASSERT_OK(res);
+ EXPECT_EQ(std::cv_status::no_timeout, wait());
+ EXPECT_EQ(RadioResponseType::SOLICITED, radioRsp_v1_5->rspInfo.type);
+ EXPECT_EQ(serial, radioRsp_v1_5->rspInfo.serial);
+
+ ALOGI("setSignalStrengthReportingCriteria_1_5_Utran, rspInfo.error = %s\n",
+ toString(radioRsp_v1_5->rspInfo.error).c_str());
+ ASSERT_TRUE(CheckAnyOfErrors(radioRsp_v1_5->rspInfo.error, {RadioError::NONE}));
+}
+
+/*
+ * Test IRadio.setSignalStrengthReportingCriteria_1_5() for EUTRAN
+ */
+TEST_F(RadioHidlTest_v1_5, setSignalStrengthReportingCriteria_1_5_Eutran_RSRP) {
+ serial = GetRandomSerialNumber();
+
+ ::android::hardware::radio::V1_5::SignalThresholdInfo signalThresholdInfo;
+ signalThresholdInfo.signalMeasurement = SignalMeasurementType::RSRP;
+ signalThresholdInfo.hysteresisMs = 5000;
+ signalThresholdInfo.hysteresisDb = 2;
+ signalThresholdInfo.thresholds = {-128, -108, -88, -68};
+
+ Return<void> res = radio_v1_5->setSignalStrengthReportingCriteria_1_5(
+ serial, signalThresholdInfo, ::android::hardware::radio::V1_5::AccessNetwork::EUTRAN);
+ ASSERT_OK(res);
+ EXPECT_EQ(std::cv_status::no_timeout, wait());
+ EXPECT_EQ(RadioResponseType::SOLICITED, radioRsp_v1_5->rspInfo.type);
+ EXPECT_EQ(serial, radioRsp_v1_5->rspInfo.serial);
+
+ ALOGI("setSignalStrengthReportingCriteria_1_5_Eutran, rspInfo.error = %s\n",
+ toString(radioRsp_v1_5->rspInfo.error).c_str());
+ ASSERT_TRUE(CheckAnyOfErrors(radioRsp_v1_5->rspInfo.error, {RadioError::NONE}));
+}
+
+/*
+ * Test IRadio.setSignalStrengthReportingCriteria_1_5() for EUTRAN
+ */
+TEST_F(RadioHidlTest_v1_5, setSignalStrengthReportingCriteria_1_5_Eutran_RSRQ) {
+ serial = GetRandomSerialNumber();
+
+ ::android::hardware::radio::V1_5::SignalThresholdInfo signalThresholdInfo;
+ signalThresholdInfo.signalMeasurement = SignalMeasurementType::RSRQ;
+ signalThresholdInfo.hysteresisMs = 5000;
+ signalThresholdInfo.hysteresisDb = 2;
+ signalThresholdInfo.thresholds = {-27, -20, -13, -6};
+
+ Return<void> res = radio_v1_5->setSignalStrengthReportingCriteria_1_5(
+ serial, signalThresholdInfo, ::android::hardware::radio::V1_5::AccessNetwork::EUTRAN);
+ ASSERT_OK(res);
+ EXPECT_EQ(std::cv_status::no_timeout, wait());
+ EXPECT_EQ(RadioResponseType::SOLICITED, radioRsp_v1_5->rspInfo.type);
+ EXPECT_EQ(serial, radioRsp_v1_5->rspInfo.serial);
+
+ ALOGI("setSignalStrengthReportingCriteria_1_5_Eutran, rspInfo.error = %s\n",
+ toString(radioRsp_v1_5->rspInfo.error).c_str());
+ ASSERT_TRUE(CheckAnyOfErrors(radioRsp_v1_5->rspInfo.error, {RadioError::NONE}));
+}
+
+/*
+ * Test IRadio.setSignalStrengthReportingCriteria_1_5() for EUTRAN
+ */
+TEST_F(RadioHidlTest_v1_5, setSignalStrengthReportingCriteria_1_5_Eutran_RSSNR) {
+ serial = GetRandomSerialNumber();
+
+ ::android::hardware::radio::V1_5::SignalThresholdInfo signalThresholdInfo;
+ signalThresholdInfo.signalMeasurement = SignalMeasurementType::RSSNR;
+ signalThresholdInfo.hysteresisMs = 5000;
+ signalThresholdInfo.hysteresisDb = 2;
+ signalThresholdInfo.thresholds = {-10, 0, 10, 20};
+
+ Return<void> res = radio_v1_5->setSignalStrengthReportingCriteria_1_5(
+ serial, signalThresholdInfo, ::android::hardware::radio::V1_5::AccessNetwork::EUTRAN);
+ ASSERT_OK(res);
+ EXPECT_EQ(std::cv_status::no_timeout, wait());
+ EXPECT_EQ(RadioResponseType::SOLICITED, radioRsp_v1_5->rspInfo.type);
+ EXPECT_EQ(serial, radioRsp_v1_5->rspInfo.serial);
+}
+
+/*
+ * Test IRadio.setSignalStrengthReportingCriteria_1_5() for CDMA2000
+ */
+TEST_F(RadioHidlTest_v1_5, setSignalStrengthReportingCriteria_1_5_Cdma2000) {
+ serial = GetRandomSerialNumber();
+
+ ::android::hardware::radio::V1_5::SignalThresholdInfo signalThresholdInfo;
+ signalThresholdInfo.signalMeasurement = SignalMeasurementType::RSSI;
+ signalThresholdInfo.hysteresisMs = 5000;
+ signalThresholdInfo.hysteresisDb = 2;
+ signalThresholdInfo.thresholds = {-105, -90, -75, -65};
+
+ Return<void> res = radio_v1_5->setSignalStrengthReportingCriteria_1_5(
+ serial, signalThresholdInfo, ::android::hardware::radio::V1_5::AccessNetwork::CDMA2000);
+ ASSERT_OK(res);
+ EXPECT_EQ(std::cv_status::no_timeout, wait());
+ EXPECT_EQ(RadioResponseType::SOLICITED, radioRsp_v1_5->rspInfo.type);
+ EXPECT_EQ(serial, radioRsp_v1_5->rspInfo.serial);
+
+ ALOGI("setSignalStrengthReportingCriteria_1_5_Cdma2000, rspInfo.error = %s\n",
+ toString(radioRsp_v1_5->rspInfo.error).c_str());
+ ASSERT_TRUE(CheckAnyOfErrors(radioRsp_v1_5->rspInfo.error, {RadioError::NONE}));
+}
+
+/*
+ * Test IRadio.setSignalStrengthReportingCriteria_1_5() for NGRAN_SSRSRP
+ */
+TEST_F(RadioHidlTest_v1_5, setSignalStrengthReportingCriteria_1_5_NGRAN_SSRSRP) {
+ serial = GetRandomSerialNumber();
+
+ ::android::hardware::radio::V1_5::SignalThresholdInfo signalThresholdInfo;
+ signalThresholdInfo.signalMeasurement = SignalMeasurementType::SSRSRP;
+ signalThresholdInfo.hysteresisMs = 5000;
+ signalThresholdInfo.hysteresisDb = 0;
+ signalThresholdInfo.thresholds = {-105, -90, -75, -65};
+
+ Return<void> res = radio_v1_5->setSignalStrengthReportingCriteria_1_5(
+ serial, signalThresholdInfo, ::android::hardware::radio::V1_5::AccessNetwork::NGRAN);
+ ASSERT_OK(res);
+ EXPECT_EQ(std::cv_status::no_timeout, wait());
+ EXPECT_EQ(RadioResponseType::SOLICITED, radioRsp_v1_5->rspInfo.type);
+ EXPECT_EQ(serial, radioRsp_v1_5->rspInfo.serial);
+
+ ALOGI("setSignalStrengthReportingCriteria_1_5_NGRAN_SSRSRP, rspInfo.error = %s\n",
+ toString(radioRsp_v1_5->rspInfo.error).c_str());
+ ASSERT_TRUE(CheckAnyOfErrors(radioRsp_v1_5->rspInfo.error, {RadioError::NONE}));
+}
+
+/*
+ * Test IRadio.setSignalStrengthReportingCriteria_1_5() for NGRAN_SSRSRQ
+ */
+TEST_F(RadioHidlTest_v1_5, setSignalStrengthReportingCriteria_1_5_NGRAN_SSRSRQ) {
+ serial = GetRandomSerialNumber();
+
+ ::android::hardware::radio::V1_5::SignalThresholdInfo signalThresholdInfo;
+ signalThresholdInfo.signalMeasurement = SignalMeasurementType::SSRSRQ;
+ signalThresholdInfo.hysteresisMs = 5000;
+ signalThresholdInfo.hysteresisDb = 0;
+ signalThresholdInfo.thresholds = {-15, -10, -5, -4};
+
+ Return<void> res = radio_v1_5->setSignalStrengthReportingCriteria_1_5(
+ serial, signalThresholdInfo, ::android::hardware::radio::V1_5::AccessNetwork::NGRAN);
+ ASSERT_OK(res);
+ EXPECT_EQ(std::cv_status::no_timeout, wait());
+ EXPECT_EQ(RadioResponseType::SOLICITED, radioRsp_v1_5->rspInfo.type);
+ EXPECT_EQ(serial, radioRsp_v1_5->rspInfo.serial);
+
+ ALOGI("setSignalStrengthReportingCriteria_1_5_NGRAN_SSRSRQ, rspInfo.error = %s\n",
+ toString(radioRsp_v1_5->rspInfo.error).c_str());
+ ASSERT_TRUE(CheckAnyOfErrors(radioRsp_v1_5->rspInfo.error, {RadioError::NONE}));
+}
+
+/*
+ * Test IRadio.setSignalStrengthReportingCriteria_1_5() for NGRAN_SSSINR
+ */
+TEST_F(RadioHidlTest_v1_5, setSignalStrengthReportingCriteria_1_5_NGRAN_SSSINR) {
+ serial = GetRandomSerialNumber();
+
+ ::android::hardware::radio::V1_5::SignalThresholdInfo signalThresholdInfo;
+ signalThresholdInfo.signalMeasurement = SignalMeasurementType::SSSINR;
+ signalThresholdInfo.hysteresisMs = 5000;
+ signalThresholdInfo.hysteresisDb = 0;
+ signalThresholdInfo.thresholds = {-10, 3, 16, 18};
+
+ Return<void> res = radio_v1_5->setSignalStrengthReportingCriteria_1_5(
+ serial, signalThresholdInfo, ::android::hardware::radio::V1_5::AccessNetwork::NGRAN);
+ ASSERT_OK(res);
+ EXPECT_EQ(std::cv_status::no_timeout, wait());
+ EXPECT_EQ(RadioResponseType::SOLICITED, radioRsp_v1_5->rspInfo.type);
+ EXPECT_EQ(serial, radioRsp_v1_5->rspInfo.serial);
+
+ ALOGI("setSignalStrengthReportingCriteria_1_5_NGRAN_SSSINR, rspInfo.error = %s\n",
+ toString(radioRsp_v1_5->rspInfo.error).c_str());
+ ASSERT_TRUE(CheckAnyOfErrors(radioRsp_v1_5->rspInfo.error, {RadioError::NONE}));
+}
diff --git a/radio/1.5/vts/functional/radio_hidl_hal_utils_v1_5.h b/radio/1.5/vts/functional/radio_hidl_hal_utils_v1_5.h
index 799702b..683fdfc 100644
--- a/radio/1.5/vts/functional/radio_hidl_hal_utils_v1_5.h
+++ b/radio/1.5/vts/functional/radio_hidl_hal_utils_v1_5.h
@@ -521,6 +521,9 @@
Return<void> getAllowedCarriersResponse_1_4(const RadioResponseInfo& info,
const CarrierRestrictionsWithPriority& carriers,
SimLockMultiSimPolicy multiSimPolicy);
+
+ /* 1.5 Api */
+ Return<void> setSignalStrengthReportingCriteriaResponse_1_5(const RadioResponseInfo& info);
};
/* Callback class for radio indication */
diff --git a/radio/1.5/vts/functional/radio_response.cpp b/radio/1.5/vts/functional/radio_response.cpp
index 1e5cc47..29a9250 100644
--- a/radio/1.5/vts/functional/radio_response.cpp
+++ b/radio/1.5/vts/functional/radio_response.cpp
@@ -885,3 +885,11 @@
parent_v1_5.notify(info.serial);
return Void();
}
+
+/* 1.5 Apis */
+Return<void> RadioResponse_v1_5::setSignalStrengthReportingCriteriaResponse_1_5(
+ const RadioResponseInfo& info) {
+ rspInfo = info;
+ parent_v1_5.notify(info.serial);
+ return Void();
+}
\ No newline at end of file
diff --git a/sensors/common/vts/utils/GrallocWrapper.cpp b/sensors/common/vts/utils/GrallocWrapper.cpp
index 1cad913..e63faa2 100644
--- a/sensors/common/vts/utils/GrallocWrapper.cpp
+++ b/sensors/common/vts/utils/GrallocWrapper.cpp
@@ -147,8 +147,8 @@
.width = size,
.height = 1,
.layerCount = 1,
- .usage = kBufferUsage,
.format = static_cast<decltype(descriptorInfo.format)>(PixelFormat::BLOB),
+ .usage = kBufferUsage,
};
BufferDescriptor descriptor;
diff --git a/soundtrigger/2.0/vts/functional/Android.bp b/soundtrigger/2.0/vts/functional/Android.bp
index f6207c4..13dcdec 100644
--- a/soundtrigger/2.0/vts/functional/Android.bp
+++ b/soundtrigger/2.0/vts/functional/Android.bp
@@ -19,5 +19,5 @@
defaults: ["VtsHalTargetTestDefaults"],
srcs: ["VtsHalSoundtriggerV2_0TargetTest.cpp"],
static_libs: ["android.hardware.soundtrigger@2.0"],
- test_suites: ["general-tests"],
+ test_suites: ["general-tests", "vts-core"],
}
diff --git a/soundtrigger/2.0/vts/functional/VtsHalSoundtriggerV2_0TargetTest.cpp b/soundtrigger/2.0/vts/functional/VtsHalSoundtriggerV2_0TargetTest.cpp
index 59ac13e..d7a7d08 100644
--- a/soundtrigger/2.0/vts/functional/VtsHalSoundtriggerV2_0TargetTest.cpp
+++ b/soundtrigger/2.0/vts/functional/VtsHalSoundtriggerV2_0TargetTest.cpp
@@ -23,15 +23,15 @@
#include <android/log.h>
#include <cutils/native_handle.h>
+#include <gtest/gtest.h>
+#include <hidl/GtestPrinter.h>
+#include <hidl/ServiceManagement.h>
#include <log/log.h>
#include <android/hardware/audio/common/2.0/types.h>
#include <android/hardware/soundtrigger/2.0/ISoundTriggerHw.h>
#include <android/hardware/soundtrigger/2.0/types.h>
-#include <VtsHalHidlTargetTestBase.h>
-#include <VtsHalHidlTargetTestEnvBase.h>
-
#define SHORT_TIMEOUT_PERIOD (1)
using ::android::hardware::audio::common::V2_0::AudioDevice;
@@ -86,27 +86,11 @@
int mCount;
};
-// Test environment for SoundTrigger HIDL HAL.
-class SoundTriggerHidlEnvironment : public ::testing::VtsHalHidlTargetTestEnvBase {
- public:
- // get the test environment singleton
- static SoundTriggerHidlEnvironment* Instance() {
- static SoundTriggerHidlEnvironment* instance = new SoundTriggerHidlEnvironment;
- return instance;
- }
-
- virtual void registerTestServices() override { registerTestService<ISoundTriggerHw>(); }
-
- private:
- SoundTriggerHidlEnvironment() {}
-};
-
// The main test class for Sound Trigger HIDL HAL.
-class SoundTriggerHidlTest : public ::testing::VtsHalHidlTargetTestBase {
+class SoundTriggerHidlTest : public ::testing::TestWithParam<std::string> {
public:
virtual void SetUp() override {
- mSoundTriggerHal = ::testing::VtsHalHidlTargetTestBase::getService<ISoundTriggerHw>(
- SoundTriggerHidlEnvironment::Instance()->getServiceName<ISoundTriggerHw>());
+ mSoundTriggerHal = ISoundTriggerHw::getService(GetParam());
ASSERT_NE(nullptr, mSoundTriggerHal.get());
mCallback = new SoundTriggerHwCallback(*this);
ASSERT_NE(nullptr, mCallback.get());
@@ -167,7 +151,7 @@
* - the implementation supports at least one sound model and one key phrase
* - the implementation supports at least VOICE_TRIGGER recognition mode
*/
-TEST_F(SoundTriggerHidlTest, GetProperties) {
+TEST_P(SoundTriggerHidlTest, GetProperties) {
ISoundTriggerHw::Properties halProperties;
Return<void> hidlReturn;
int ret = -ENODEV;
@@ -194,7 +178,7 @@
* There is no way to verify that implementation actually can load a sound model because each
* sound model is vendor specific.
*/
-TEST_F(SoundTriggerHidlTest, LoadInvalidModelFail) {
+TEST_P(SoundTriggerHidlTest, LoadInvalidModelFail) {
Return<void> hidlReturn;
int ret = -ENODEV;
ISoundTriggerHw::PhraseSoundModel model;
@@ -220,7 +204,7 @@
* Verifies that:
* - the implementation returns error when passed a sound model with random data.
*/
-TEST_F(SoundTriggerHidlTest, LoadGenericSoundModelFail) {
+TEST_P(SoundTriggerHidlTest, LoadGenericSoundModelFail) {
int ret = -ENODEV;
ISoundTriggerHw::SoundModel model;
SoundModelHandle handle = 0;
@@ -251,7 +235,7 @@
* - the implementation returns an error when called without a valid loaded sound model
*
*/
-TEST_F(SoundTriggerHidlTest, UnloadModelNoModelFail) {
+TEST_P(SoundTriggerHidlTest, UnloadModelNoModelFail) {
Return<int32_t> hidlReturn(0);
SoundModelHandle halHandle = 0;
@@ -271,7 +255,7 @@
* There is no way to verify that implementation actually starts recognition because no model can
* be loaded.
*/
-TEST_F(SoundTriggerHidlTest, StartRecognitionNoModelFail) {
+TEST_P(SoundTriggerHidlTest, StartRecognitionNoModelFail) {
Return<int32_t> hidlReturn(0);
SoundModelHandle handle = 0;
PhraseRecognitionExtra phrase;
@@ -299,7 +283,7 @@
* - the implementation returns an error when called without an active recognition running
*
*/
-TEST_F(SoundTriggerHidlTest, StopRecognitionNoAStartFail) {
+TEST_P(SoundTriggerHidlTest, StopRecognitionNoAStartFail) {
Return<int32_t> hidlReturn(0);
SoundModelHandle handle = 0;
@@ -316,7 +300,7 @@
* - the implementation implements this optional method or indicates it is not support by
* returning -ENOSYS
*/
-TEST_F(SoundTriggerHidlTest, stopAllRecognitions) {
+TEST_P(SoundTriggerHidlTest, stopAllRecognitions) {
Return<int32_t> hidlReturn(0);
hidlReturn = mSoundTriggerHal->stopAllRecognitions();
@@ -325,11 +309,7 @@
EXPECT_TRUE(hidlReturn == 0 || hidlReturn == -ENOSYS);
}
-int main(int argc, char** argv) {
- ::testing::AddGlobalTestEnvironment(SoundTriggerHidlEnvironment::Instance());
- ::testing::InitGoogleTest(&argc, argv);
- SoundTriggerHidlEnvironment::Instance()->init(&argc, argv);
- int status = RUN_ALL_TESTS();
- ALOGI("Test result = %d", status);
- return status;
-}
+INSTANTIATE_TEST_SUITE_P(
+ PerInstance, SoundTriggerHidlTest,
+ testing::ValuesIn(android::hardware::getAllHalInstanceNames(ISoundTriggerHw::descriptor)),
+ android::hardware::PrintInstanceNameToString);
diff --git a/soundtrigger/2.1/vts/functional/Android.bp b/soundtrigger/2.1/vts/functional/Android.bp
index f1eb35d..7830fe2 100644
--- a/soundtrigger/2.1/vts/functional/Android.bp
+++ b/soundtrigger/2.1/vts/functional/Android.bp
@@ -25,5 +25,5 @@
"android.hardware.soundtrigger@2.1",
"libhidlmemory"
],
- test_suites: ["general-tests"],
+ test_suites: ["general-tests", "vts-core"],
}
diff --git a/soundtrigger/2.1/vts/functional/VtsHalSoundtriggerV2_1TargetTest.cpp b/soundtrigger/2.1/vts/functional/VtsHalSoundtriggerV2_1TargetTest.cpp
index 0a2eeac..7f06ed9 100644
--- a/soundtrigger/2.1/vts/functional/VtsHalSoundtriggerV2_1TargetTest.cpp
+++ b/soundtrigger/2.1/vts/functional/VtsHalSoundtriggerV2_1TargetTest.cpp
@@ -23,6 +23,9 @@
#include <android/log.h>
#include <cutils/native_handle.h>
+#include <gtest/gtest.h>
+#include <hidl/GtestPrinter.h>
+#include <hidl/ServiceManagement.h>
#include <log/log.h>
#include <android/hardware/audio/common/2.0/types.h>
@@ -32,9 +35,6 @@
#include <android/hidl/allocator/1.0/IAllocator.h>
#include <hidlmemory/mapping.h>
-#include <VtsHalHidlTargetTestBase.h>
-#include <VtsHalHidlTargetTestEnvBase.h>
-
#define SHORT_TIMEOUT_PERIOD (1)
using ::android::sp;
@@ -94,27 +94,11 @@
int mCount;
};
-// Test environment for SoundTrigger HIDL HAL.
-class SoundTriggerHidlEnvironment : public ::testing::VtsHalHidlTargetTestEnvBase {
- public:
- // get the test environment singleton
- static SoundTriggerHidlEnvironment* Instance() {
- static SoundTriggerHidlEnvironment* instance = new SoundTriggerHidlEnvironment;
- return instance;
- }
-
- virtual void registerTestServices() override { registerTestService<ISoundTriggerHw>(); }
-
- private:
- SoundTriggerHidlEnvironment() {}
-};
-
// The main test class for Sound Trigger HIDL HAL.
-class SoundTriggerHidlTest : public ::testing::VtsHalHidlTargetTestBase {
+class SoundTriggerHidlTest : public ::testing::TestWithParam<std::string> {
public:
virtual void SetUp() override {
- mSoundTriggerHal = ::testing::VtsHalHidlTargetTestBase::getService<ISoundTriggerHw>(
- SoundTriggerHidlEnvironment::Instance()->getServiceName<ISoundTriggerHw>());
+ mSoundTriggerHal = ISoundTriggerHw::getService(GetParam());
ASSERT_NE(nullptr, mSoundTriggerHal.get());
mCallback = new SoundTriggerHwCallback(*this);
ASSERT_NE(nullptr, mCallback.get());
@@ -196,7 +180,7 @@
* - the implementation supports at least one sound model and one key phrase
* - the implementation supports at least VOICE_TRIGGER recognition mode
*/
-TEST_F(SoundTriggerHidlTest, GetProperties) {
+TEST_P(SoundTriggerHidlTest, GetProperties) {
ISoundTriggerHw::Properties halProperties;
Return<void> hidlReturn;
int ret = -ENODEV;
@@ -223,7 +207,7 @@
* There is no way to verify that implementation actually can load a sound model because each
* sound model is vendor specific.
*/
-TEST_F(SoundTriggerHidlTest, LoadInvalidModelFail) {
+TEST_P(SoundTriggerHidlTest, LoadInvalidModelFail) {
Return<void> hidlReturn;
int ret = -ENODEV;
V2_0_ISoundTriggerHw::PhraseSoundModel model;
@@ -252,7 +236,7 @@
* There is no way to verify that implementation actually can load a sound model because each
* sound model is vendor specific.
*/
-TEST_F(SoundTriggerHidlTest, LoadInvalidModelFail_2_1) {
+TEST_P(SoundTriggerHidlTest, LoadInvalidModelFail_2_1) {
Return<void> hidlReturn;
int ret = -ENODEV;
ISoundTriggerHw::PhraseSoundModel model;
@@ -277,7 +261,7 @@
* Verifies that:
* - the implementation returns an error when passed an empty sound model
*/
-TEST_F(SoundTriggerHidlTest, LoadEmptyGenericSoundModelFail) {
+TEST_P(SoundTriggerHidlTest, LoadEmptyGenericSoundModelFail) {
int ret = -ENODEV;
V2_0_ISoundTriggerHw::SoundModel model;
SoundModelHandle handle = 0;
@@ -301,7 +285,7 @@
* Verifies that:
* - the implementation returns error when passed a sound model with random data.
*/
-TEST_F(SoundTriggerHidlTest, LoadGenericSoundModelFail) {
+TEST_P(SoundTriggerHidlTest, LoadGenericSoundModelFail) {
int ret = -ENODEV;
V2_0_ISoundTriggerHw::SoundModel model;
SoundModelHandle handle = 0;
@@ -329,7 +313,7 @@
* Verifies that:
* - the implementation returns error when passed a sound model with random data.
*/
-TEST_F(SoundTriggerHidlTest, LoadEmptyGenericSoundModelFail_2_1) {
+TEST_P(SoundTriggerHidlTest, LoadEmptyGenericSoundModelFail_2_1) {
int ret = -ENODEV;
ISoundTriggerHw::SoundModel model;
SoundModelHandle handle = 0;
@@ -353,7 +337,7 @@
* Verifies that:
* - the implementation returns error when passed a sound model with random data.
*/
-TEST_F(SoundTriggerHidlTest, LoadGenericSoundModelFail_2_1) {
+TEST_P(SoundTriggerHidlTest, LoadGenericSoundModelFail_2_1) {
int ret = -ENODEV;
ISoundTriggerHw::SoundModel model;
SoundModelHandle handle = 0;
@@ -394,7 +378,7 @@
* - the implementation returns an error when called without a valid loaded sound model
*
*/
-TEST_F(SoundTriggerHidlTest, UnloadModelNoModelFail) {
+TEST_P(SoundTriggerHidlTest, UnloadModelNoModelFail) {
Return<int32_t> hidlReturn(0);
SoundModelHandle halHandle = 0;
@@ -414,7 +398,7 @@
* There is no way to verify that implementation actually starts recognition because no model can
* be loaded.
*/
-TEST_F(SoundTriggerHidlTest, StartRecognitionNoModelFail) {
+TEST_P(SoundTriggerHidlTest, StartRecognitionNoModelFail) {
Return<int32_t> hidlReturn(0);
SoundModelHandle handle = 0;
PhraseRecognitionExtra phrase;
@@ -444,7 +428,7 @@
* There is no way to verify that implementation actually starts recognition because no model can
* be loaded.
*/
-TEST_F(SoundTriggerHidlTest, StartRecognitionNoModelFail_2_1) {
+TEST_P(SoundTriggerHidlTest, StartRecognitionNoModelFail_2_1) {
Return<int32_t> hidlReturn(0);
SoundModelHandle handle = 0;
PhraseRecognitionExtra phrase;
@@ -472,7 +456,7 @@
* - the implementation returns an error when called without an active recognition running
*
*/
-TEST_F(SoundTriggerHidlTest, StopRecognitionNoAStartFail) {
+TEST_P(SoundTriggerHidlTest, StopRecognitionNoAStartFail) {
Return<int32_t> hidlReturn(0);
SoundModelHandle handle = 0;
@@ -489,7 +473,7 @@
* - the implementation implements this optional method or indicates it is not supported by
* returning -ENOSYS
*/
-TEST_F(SoundTriggerHidlTest, stopAllRecognitions) {
+TEST_P(SoundTriggerHidlTest, stopAllRecognitions) {
Return<int32_t> hidlReturn(0);
hidlReturn = mSoundTriggerHal->stopAllRecognitions();
@@ -498,11 +482,7 @@
EXPECT_TRUE(hidlReturn == 0 || hidlReturn == -ENOSYS);
}
-int main(int argc, char** argv) {
- ::testing::AddGlobalTestEnvironment(SoundTriggerHidlEnvironment::Instance());
- ::testing::InitGoogleTest(&argc, argv);
- SoundTriggerHidlEnvironment::Instance()->init(&argc, argv);
- int status = RUN_ALL_TESTS();
- ALOGI("Test result = %d", status);
- return status;
-}
+INSTANTIATE_TEST_SUITE_P(
+ PerInstance, SoundTriggerHidlTest,
+ testing::ValuesIn(android::hardware::getAllHalInstanceNames(ISoundTriggerHw::descriptor)),
+ android::hardware::PrintInstanceNameToString);
diff --git a/soundtrigger/2.2/vts/functional/Android.bp b/soundtrigger/2.2/vts/functional/Android.bp
index 08ccd7b..b5d241d 100644
--- a/soundtrigger/2.2/vts/functional/Android.bp
+++ b/soundtrigger/2.2/vts/functional/Android.bp
@@ -23,5 +23,5 @@
"android.hardware.soundtrigger@2.1",
"android.hardware.soundtrigger@2.2",
],
- test_suites: ["general-tests"],
+ test_suites: ["general-tests", "vts-core"],
}
diff --git a/soundtrigger/2.2/vts/functional/VtsHalSoundtriggerV2_2TargetTest.cpp b/soundtrigger/2.2/vts/functional/VtsHalSoundtriggerV2_2TargetTest.cpp
index 0f37816..1cce5a1 100644
--- a/soundtrigger/2.2/vts/functional/VtsHalSoundtriggerV2_2TargetTest.cpp
+++ b/soundtrigger/2.2/vts/functional/VtsHalSoundtriggerV2_2TargetTest.cpp
@@ -23,42 +23,26 @@
#include <android/log.h>
#include <cutils/native_handle.h>
+#include <gtest/gtest.h>
+#include <hidl/GtestPrinter.h>
+#include <hidl/ServiceManagement.h>
#include <log/log.h>
#include <android/hardware/audio/common/2.0/types.h>
#include <android/hardware/soundtrigger/2.0/ISoundTriggerHw.h>
#include <android/hardware/soundtrigger/2.2/ISoundTriggerHw.h>
-#include <VtsHalHidlTargetTestBase.h>
-#include <VtsHalHidlTargetTestEnvBase.h>
-
using ::android::sp;
using ::android::hardware::Return;
using ::android::hardware::soundtrigger::V2_0::ISoundTriggerHwCallback;
using ::android::hardware::soundtrigger::V2_0::SoundModelHandle;
using ::android::hardware::soundtrigger::V2_2::ISoundTriggerHw;
-// Test environment for SoundTrigger HIDL HAL.
-class SoundTriggerHidlEnvironment : public ::testing::VtsHalHidlTargetTestEnvBase {
- public:
- // get the test environment singleton
- static SoundTriggerHidlEnvironment* Instance() {
- static SoundTriggerHidlEnvironment* instance = new SoundTriggerHidlEnvironment;
- return instance;
- }
-
- void registerTestServices() override { registerTestService<ISoundTriggerHw>(); }
-
- private:
- SoundTriggerHidlEnvironment() {}
-};
-
// The main test class for Sound Trigger HIDL HAL.
-class SoundTriggerHidlTest : public ::testing::VtsHalHidlTargetTestBase {
+class SoundTriggerHidlTest : public ::testing::TestWithParam<std::string> {
public:
void SetUp() override {
- mSoundTriggerHal = ::testing::VtsHalHidlTargetTestBase::getService<ISoundTriggerHw>(
- SoundTriggerHidlEnvironment::Instance()->getServiceName<ISoundTriggerHw>());
+ mSoundTriggerHal = ISoundTriggerHw::getService(GetParam());
ASSERT_NE(nullptr, mSoundTriggerHal.get());
}
@@ -77,18 +61,13 @@
* - the implementation returns -ENOSYS with invalid model handle
*
*/
-TEST_F(SoundTriggerHidlTest, GetModelStateInvalidModel) {
+TEST_P(SoundTriggerHidlTest, GetModelStateInvalidModel) {
SoundModelHandle handle = 0;
Return<int32_t> hidlReturn = mSoundTriggerHal->getModelState(handle);
EXPECT_TRUE(hidlReturn.isOk());
EXPECT_EQ(-ENOSYS, hidlReturn);
}
-
-int main(int argc, char** argv) {
- ::testing::AddGlobalTestEnvironment(SoundTriggerHidlEnvironment::Instance());
- ::testing::InitGoogleTest(&argc, argv);
- SoundTriggerHidlEnvironment::Instance()->init(&argc, argv);
- int status = RUN_ALL_TESTS();
- ALOGI("Test result = %d", status);
- return status;
-}
+INSTANTIATE_TEST_SUITE_P(
+ PerInstance, SoundTriggerHidlTest,
+ testing::ValuesIn(android::hardware::getAllHalInstanceNames(ISoundTriggerHw::descriptor)),
+ android::hardware::PrintInstanceNameToString);
diff --git a/vibrator/1.x/example/Android.bp b/vibrator/1.3/example/Android.bp
similarity index 81%
rename from vibrator/1.x/example/Android.bp
rename to vibrator/1.3/example/Android.bp
index afbbb75..07f1c26 100644
--- a/vibrator/1.x/example/Android.bp
+++ b/vibrator/1.3/example/Android.bp
@@ -14,11 +14,11 @@
// limitations under the License.
cc_binary {
- name: "android.hardware.vibrator@1.x-service.example",
+ name: "android.hardware.vibrator@1.3-service.example",
vendor: true,
relative_install_path: "hw",
- init_rc: ["android.hardware.vibrator@1.x-service.example.rc"],
- vintf_fragments: ["android.hardware.vibrator@1.x-service.example.xml"],
+ init_rc: ["android.hardware.vibrator@1.3-service.example.rc"],
+ vintf_fragments: ["android.hardware.vibrator@1.3-service.example.xml"],
srcs: ["service.cpp", "Vibrator.cpp"],
cflags: ["-Wall", "-Werror"],
shared_libs: [
@@ -29,6 +29,5 @@
"android.hardware.vibrator@1.1",
"android.hardware.vibrator@1.2",
"android.hardware.vibrator@1.3",
- "android.hardware.vibrator@1.4",
],
}
diff --git a/vibrator/1.x/example/OWNERS b/vibrator/1.3/example/OWNERS
similarity index 100%
rename from vibrator/1.x/example/OWNERS
rename to vibrator/1.3/example/OWNERS
diff --git a/vibrator/1.x/example/Vibrator.cpp b/vibrator/1.3/example/Vibrator.cpp
similarity index 86%
rename from vibrator/1.x/example/Vibrator.cpp
rename to vibrator/1.3/example/Vibrator.cpp
index 4dd1cb9..b529437 100644
--- a/vibrator/1.x/example/Vibrator.cpp
+++ b/vibrator/1.3/example/Vibrator.cpp
@@ -23,7 +23,7 @@
namespace android {
namespace hardware {
namespace vibrator {
-namespace V1_4 {
+namespace V1_3 {
namespace implementation {
static constexpr uint32_t MS_PER_S = 1000;
@@ -100,25 +100,7 @@
}
}
-Return<void> Vibrator::perform_1_3(V1_3::Effect effect, EffectStrength strength,
- perform_cb _hidl_cb) {
- return perform<decltype(effect)>(effect, strength, _hidl_cb);
-}
-
-// Methods from ::android::hardware::vibrator::V1_4::IVibrator follow.
-
-Return<hidl_bitfield<Capabilities>> Vibrator::getCapabilities() {
- return Capabilities::ON_COMPLETION_CALLBACK | Capabilities::PERFORM_COMPLETION_CALLBACK;
-}
-
-Return<Status> Vibrator::on_1_4(uint32_t timeoutMs, const sp<IVibratorCallback>& callback) {
- mCallback = callback;
- return on(timeoutMs);
-}
-
-Return<void> Vibrator::perform_1_4(V1_3::Effect effect, EffectStrength strength,
- const sp<IVibratorCallback>& callback, perform_cb _hidl_cb) {
- mCallback = callback;
+Return<void> Vibrator::perform_1_3(Effect effect, EffectStrength strength, perform_cb _hidl_cb) {
return perform<decltype(effect)>(effect, strength, _hidl_cb);
}
@@ -166,14 +148,6 @@
return Status::UNSUPPORTED_OPERATION;
} else {
ALOGI("Enabled: %s -> %s\n", mEnabled ? "true" : "false", enabled ? "true" : "false");
- if (mEnabled && !enabled) {
- if (auto callback = mCallback) {
- mCallback = nullptr;
- if (auto ret = callback->onComplete(); !ret.isOk()) {
- ALOGE("Failed completion callback: %s", ret.description().c_str());
- }
- }
- }
mEnabled = enabled;
return Status::OK;
}
@@ -297,7 +271,7 @@
}
} // namespace implementation
-} // namespace V1_4
+} // namespace V1_3
} // namespace vibrator
} // namespace hardware
} // namespace android
diff --git a/vibrator/1.x/example/Vibrator.h b/vibrator/1.3/example/Vibrator.h
similarity index 75%
rename from vibrator/1.x/example/Vibrator.h
rename to vibrator/1.3/example/Vibrator.h
index ff63431..5180774 100644
--- a/vibrator/1.x/example/Vibrator.h
+++ b/vibrator/1.3/example/Vibrator.h
@@ -13,21 +13,20 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-#ifndef ANDROID_HARDWARE_VIBRATOR_V1_x_VIBRATOR_H
-#define ANDROID_HARDWARE_VIBRATOR_V1_x_VIBRATOR_H
+#ifndef ANDROID_HARDWARE_VIBRATOR_V1_3_VIBRATOR_H
+#define ANDROID_HARDWARE_VIBRATOR_V1_3_VIBRATOR_H
-#include <android/hardware/vibrator/1.4/IVibrator.h>
+#include <android/hardware/vibrator/1.3/IVibrator.h>
#include <hidl/Status.h>
namespace android {
namespace hardware {
namespace vibrator {
-namespace V1_4 {
+namespace V1_3 {
namespace implementation {
using android::hardware::vibrator::V1_0::EffectStrength;
using android::hardware::vibrator::V1_0::Status;
-using android::hardware::vibrator::V1_3::Effect;
class Vibrator : public IVibrator {
public:
@@ -52,14 +51,7 @@
// Methods from ::android::hardware::vibrator::V1_3::IVibrator follow.
Return<bool> supportsExternalControl() override;
Return<Status> setExternalControl(bool enabled) override;
- Return<void> perform_1_3(V1_3::Effect effect, EffectStrength strength,
- perform_cb _hidl_cb) override;
-
- // Methods from ::android::hardware::vibrator::V1_4::IVibrator follow.
- Return<hidl_bitfield<Capabilities>> getCapabilities() override;
- Return<Status> on_1_4(uint32_t timeoutMs, const sp<IVibratorCallback>& callback) override;
- Return<void> perform_1_4(V1_3::Effect effect, EffectStrength strength,
- const sp<IVibratorCallback>& callback, perform_cb _hidl_cb) override;
+ Return<void> perform_1_3(Effect effect, EffectStrength strength, perform_cb _hidl_cb) override;
private:
Return<void> perform(Effect effect, EffectStrength strength, perform_cb _hidl_cb);
@@ -80,12 +72,11 @@
bool mExternalControl{false};
std::mutex mMutex;
timer_t mTimer{nullptr};
- sp<IVibratorCallback> mCallback{nullptr};
};
} // namespace implementation
-} // namespace V1_4
+} // namespace V1_3
} // namespace vibrator
} // namespace hardware
} // namespace android
-#endif // ANDROID_HARDWARE_VIBRATOR_V1_x_VIBRATOR_H
+#endif // ANDROID_HARDWARE_VIBRATOR_V1_3_VIBRATOR_H
diff --git a/vibrator/1.3/example/android.hardware.vibrator@1.3-service.example.rc b/vibrator/1.3/example/android.hardware.vibrator@1.3-service.example.rc
new file mode 100644
index 0000000..ed7a562
--- /dev/null
+++ b/vibrator/1.3/example/android.hardware.vibrator@1.3-service.example.rc
@@ -0,0 +1,4 @@
+service vendor.vibrator-1-3 /vendor/bin/hw/android.hardware.vibrator@1.3-service.example
+ class hal
+ user system
+ group system
diff --git a/vibrator/1.x/example/android.hardware.vibrator@1.x-service.example.xml b/vibrator/1.3/example/android.hardware.vibrator@1.3-service.example.xml
similarity index 89%
rename from vibrator/1.x/example/android.hardware.vibrator@1.x-service.example.xml
rename to vibrator/1.3/example/android.hardware.vibrator@1.3-service.example.xml
index ebc8c4b..172aa21 100644
--- a/vibrator/1.x/example/android.hardware.vibrator@1.x-service.example.xml
+++ b/vibrator/1.3/example/android.hardware.vibrator@1.3-service.example.xml
@@ -2,7 +2,7 @@
<hal format="hidl">
<name>android.hardware.vibrator</name>
<transport>hwbinder</transport>
- <version>1.4</version>
+ <version>1.3</version>
<interface>
<name>IVibrator</name>
<instance>default</instance>
diff --git a/vibrator/1.x/example/service.cpp b/vibrator/1.3/example/service.cpp
similarity index 82%
rename from vibrator/1.x/example/service.cpp
rename to vibrator/1.3/example/service.cpp
index 13c6691..449996e 100644
--- a/vibrator/1.x/example/service.cpp
+++ b/vibrator/1.3/example/service.cpp
@@ -13,17 +13,17 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-#define LOG_TAG "android.hardware.vibrator@1.x-service.example"
+#define LOG_TAG "android.hardware.vibrator@1.3-service.example"
-#include <android/hardware/vibrator/1.4/IVibrator.h>
+#include <android/hardware/vibrator/1.3/IVibrator.h>
#include <hidl/HidlTransportSupport.h>
#include "Vibrator.h"
using android::hardware::configureRpcThreadpool;
using android::hardware::joinRpcThreadpool;
-using android::hardware::vibrator::V1_4::IVibrator;
-using android::hardware::vibrator::V1_4::implementation::Vibrator;
+using android::hardware::vibrator::V1_3::IVibrator;
+using android::hardware::vibrator::V1_3::implementation::Vibrator;
using namespace android;
status_t registerVibratorService() {
diff --git a/vibrator/1.4/Android.bp b/vibrator/1.4/Android.bp
index cf31fcd..acfc795 100644
--- a/vibrator/1.4/Android.bp
+++ b/vibrator/1.4/Android.bp
@@ -3,9 +3,6 @@
hidl_interface {
name: "android.hardware.vibrator@1.4",
root: "android.hardware",
- vndk: {
- enabled: true,
- },
srcs: [
"types.hal",
"IVibrator.hal",
diff --git a/vibrator/1.x/example/android.hardware.vibrator@1.x-service.example.rc b/vibrator/1.x/example/android.hardware.vibrator@1.x-service.example.rc
deleted file mode 100644
index 4893db6..0000000
--- a/vibrator/1.x/example/android.hardware.vibrator@1.x-service.example.rc
+++ /dev/null
@@ -1,4 +0,0 @@
-service vendor.vibrator-1-x /vendor/bin/hw/android.hardware.vibrator@1.x-service.example
- class hal
- user system
- group system
diff --git a/wifi/1.0/vts/functional/Android.bp b/wifi/1.0/vts/functional/Android.bp
index 6fa6e7e..bf77503 100644
--- a/wifi/1.0/vts/functional/Android.bp
+++ b/wifi/1.0/vts/functional/Android.bp
@@ -30,6 +30,7 @@
],
static_libs: [
"android.hardware.wifi@1.0",
+ "libwifi-system-iface"
],
}
@@ -49,8 +50,9 @@
"android.hardware.wifi@1.1",
"android.hardware.wifi@1.2",
"android.hardware.wifi@1.3",
+ "libwifi-system-iface"
],
- test_suites: ["general-tests"],
+ test_suites: ["general-tests", "vts-core"],
}
// These tests are split out so that they can be conditioned on presence of the
@@ -66,8 +68,9 @@
static_libs: [
"VtsHalWifiV1_0TargetTestUtil",
"android.hardware.wifi@1.0",
+ "libwifi-system-iface"
],
- test_suites: ["general-tests"],
+ test_suites: ["general-tests", "vts-core"],
}
// These tests are split out so that they can be conditioned on presence of
@@ -83,6 +86,7 @@
static_libs: [
"VtsHalWifiV1_0TargetTestUtil",
"android.hardware.wifi@1.0",
+ "libwifi-system-iface"
],
- test_suites: ["general-tests"],
+ test_suites: ["general-tests", "vts-core"],
}
diff --git a/wifi/1.0/vts/functional/VtsHalWifiV1_0TargetTest.cpp b/wifi/1.0/vts/functional/VtsHalWifiV1_0TargetTest.cpp
index 9d25014..128dae5 100644
--- a/wifi/1.0/vts/functional/VtsHalWifiV1_0TargetTest.cpp
+++ b/wifi/1.0/vts/functional/VtsHalWifiV1_0TargetTest.cpp
@@ -14,34 +14,8 @@
* limitations under the License.
*/
-#include <android-base/logging.h>
+#include <VtsHalHidlTargetTestEnvBase.h>
-#include "wifi_hidl_test_utils.h"
-
-class WifiVtsHidlEnvironment_1_0 : public WifiHidlEnvironment {
- public:
- // get the test environment singleton
- static WifiVtsHidlEnvironment_1_0* Instance() {
- static WifiVtsHidlEnvironment_1_0* instance =
- new WifiVtsHidlEnvironment_1_0;
- return instance;
- }
-
- virtual void registerTestServices() override {
- registerTestService<android::hardware::wifi::V1_0::IWifi>();
- }
-
- private:
- WifiVtsHidlEnvironment_1_0() {}
-};
-
-WifiHidlEnvironment* gEnv = WifiVtsHidlEnvironment_1_0::Instance();
-
-int main(int argc, char** argv) {
- ::testing::AddGlobalTestEnvironment(gEnv);
- ::testing::InitGoogleTest(&argc, argv);
- gEnv->init(&argc, argv);
- int status = RUN_ALL_TESTS();
- LOG(INFO) << "Test result = " << status;
- return status;
-}
+// TODO(b/143892896): Remove this file after wifi_hidl_test_utils.cpp is
+// updated.
+::testing::VtsHalHidlTargetTestEnvBase* gEnv = nullptr;
\ No newline at end of file
diff --git a/wifi/1.0/vts/functional/wifi_ap_iface_hidl_test.cpp b/wifi/1.0/vts/functional/wifi_ap_iface_hidl_test.cpp
index c55221d..8be8a0c 100644
--- a/wifi/1.0/vts/functional/wifi_ap_iface_hidl_test.cpp
+++ b/wifi/1.0/vts/functional/wifi_ap_iface_hidl_test.cpp
@@ -16,35 +16,37 @@
#include <android-base/logging.h>
+#include <android/hardware/wifi/1.0/IWifi.h>
#include <android/hardware/wifi/1.0/IWifiApIface.h>
-
-#include <VtsHalHidlTargetTestBase.h>
+#include <gtest/gtest.h>
+#include <hidl/GtestPrinter.h>
+#include <hidl/ServiceManagement.h>
#include "wifi_hidl_call_util.h"
#include "wifi_hidl_test_utils.h"
+using ::android::sp;
using ::android::hardware::wifi::V1_0::IfaceType;
+using ::android::hardware::wifi::V1_0::IWifi;
using ::android::hardware::wifi::V1_0::IWifiApIface;
using ::android::hardware::wifi::V1_0::WifiBand;
using ::android::hardware::wifi::V1_0::WifiStatusCode;
-using ::android::sp;
/**
* Fixture to use for all AP Iface HIDL interface tests.
*/
-class WifiApIfaceHidlTest : public ::testing::VtsHalHidlTargetTestBase {
+class WifiApIfaceHidlTest : public ::testing::TestWithParam<std::string> {
public:
virtual void SetUp() override {
- wifi_ap_iface_ = getWifiApIface();
+ wifi_ap_iface_ = getWifiApIface(GetInstanceName());
ASSERT_NE(nullptr, wifi_ap_iface_.get());
}
- virtual void TearDown() override {
- stopWifi();
- }
+ virtual void TearDown() override { stopWifi(GetInstanceName()); }
protected:
sp<IWifiApIface> wifi_ap_iface_;
+ std::string GetInstanceName() { return GetParam(); }
};
/*
@@ -52,16 +54,15 @@
* Ensures that an instance of the IWifiApIface proxy object is
* successfully created.
*/
-TEST(WifiApIfaceHidlTestNoFixture, Create) {
- EXPECT_NE(nullptr, getWifiApIface().get());
- stopWifi();
+TEST_P(WifiApIfaceHidlTest, Create) {
+ // The creation of a proxy object is tested as part of SetUp method.
}
/*
* GetType:
* Ensures that the correct interface type is returned for AP interface.
*/
-TEST_F(WifiApIfaceHidlTest, GetType) {
+TEST_P(WifiApIfaceHidlTest, GetType) {
const auto& status_and_type = HIDL_INVOKE(wifi_ap_iface_, getType);
EXPECT_EQ(WifiStatusCode::SUCCESS, status_and_type.first.code);
EXPECT_EQ(IfaceType::AP, status_and_type.second);
@@ -72,7 +73,7 @@
* Ensures that a call to set the country code will return with a success
* status code.
*/
-TEST_F(WifiApIfaceHidlTest, SetCountryCode) {
+TEST_P(WifiApIfaceHidlTest, SetCountryCode) {
const android::hardware::hidl_array<int8_t, 2> kCountryCode{
std::array<int8_t, 2>{{0x55, 0x53}}};
EXPECT_EQ(WifiStatusCode::SUCCESS,
@@ -83,9 +84,15 @@
* GetValidFrequenciesForBand:
* Ensures that we can retrieve valid frequencies for 2.4 GHz band.
*/
-TEST_F(WifiApIfaceHidlTest, GetValidFrequenciesForBand) {
+TEST_P(WifiApIfaceHidlTest, GetValidFrequenciesForBand) {
const auto& status_and_freqs = HIDL_INVOKE(
wifi_ap_iface_, getValidFrequenciesForBand, WifiBand::BAND_24GHZ);
EXPECT_EQ(WifiStatusCode::SUCCESS, status_and_freqs.first.code);
EXPECT_GT(status_and_freqs.second.size(), 0u);
}
+
+INSTANTIATE_TEST_SUITE_P(
+ PerInstance, WifiApIfaceHidlTest,
+ testing::ValuesIn(
+ android::hardware::getAllHalInstanceNames(IWifi::descriptor)),
+ android::hardware::PrintInstanceNameToString);
\ No newline at end of file
diff --git a/wifi/1.0/vts/functional/wifi_chip_hidl_ap_test.cpp b/wifi/1.0/vts/functional/wifi_chip_hidl_ap_test.cpp
index 232ffdd..33817d5 100644
--- a/wifi/1.0/vts/functional/wifi_chip_hidl_ap_test.cpp
+++ b/wifi/1.0/vts/functional/wifi_chip_hidl_ap_test.cpp
@@ -16,9 +16,11 @@
#include <android-base/logging.h>
+#include <android/hardware/wifi/1.0/IWifi.h>
#include <android/hardware/wifi/1.0/IWifiChip.h>
-
-#include <VtsHalHidlTargetTestBase.h>
+#include <gtest/gtest.h>
+#include <hidl/GtestPrinter.h>
+#include <hidl/ServiceManagement.h>
#include "wifi_hidl_call_util.h"
#include "wifi_hidl_test_utils.h"
@@ -26,6 +28,7 @@
using ::android::sp;
using ::android::hardware::wifi::V1_0::ChipModeId;
using ::android::hardware::wifi::V1_0::IfaceType;
+using ::android::hardware::wifi::V1_0::IWifi;
using ::android::hardware::wifi::V1_0::IWifiApIface;
using ::android::hardware::wifi::V1_0::IWifiChip;
using ::android::hardware::wifi::V1_0::IWifiIface;
@@ -35,14 +38,14 @@
/**
* Fixture for IWifiChip tests that are conditioned on SoftAP support.
*/
-class WifiChipHidlApTest : public ::testing::VtsHalHidlTargetTestBase {
+class WifiChipHidlApTest : public ::testing::TestWithParam<std::string> {
public:
virtual void SetUp() override {
- wifi_chip_ = getWifiChip();
+ wifi_chip_ = getWifiChip(GetInstanceName());
ASSERT_NE(nullptr, wifi_chip_.get());
}
- virtual void TearDown() override { stopWifi(); }
+ virtual void TearDown() override { stopWifi(GetInstanceName()); }
protected:
// Helper function to configure the Chip in one of the supported modes.
@@ -72,6 +75,9 @@
}
sp<IWifiChip> wifi_chip_;
+
+ private:
+ std::string GetInstanceName() { return GetParam(); }
};
/*
@@ -79,7 +85,7 @@
* Configures the chip in AP mode and ensures that at least 1 iface creation
* succeeds.
*/
-TEST_F(WifiChipHidlApTest, CreateApIface) {
+TEST_P(WifiChipHidlApTest, CreateApIface) {
configureChipForIfaceType(IfaceType::AP, true);
sp<IWifiApIface> iface;
@@ -93,7 +99,7 @@
* before creating the iface. Then, create the iface and ensure that
* iface name is returned via the list.
*/
-TEST_F(WifiChipHidlApTest, GetApIfaceNames) {
+TEST_P(WifiChipHidlApTest, GetApIfaceNames) {
configureChipForIfaceType(IfaceType::AP, true);
const auto& status_and_iface_names1 =
@@ -125,7 +131,7 @@
* the iface object using the correct name and ensure any other name
* doesn't retrieve an iface object.
*/
-TEST_F(WifiChipHidlApTest, GetApIface) {
+TEST_P(WifiChipHidlApTest, GetApIface) {
configureChipForIfaceType(IfaceType::AP, true);
sp<IWifiApIface> ap_iface;
@@ -151,7 +157,7 @@
* the iface object using the correct name and ensure any other name
* doesn't remove the iface.
*/
-TEST_F(WifiChipHidlApTest, RemoveApIface) {
+TEST_P(WifiChipHidlApTest, RemoveApIface) {
configureChipForIfaceType(IfaceType::AP, true);
sp<IWifiApIface> ap_iface;
@@ -166,3 +172,9 @@
// No such iface exists now. So, this should return failure.
EXPECT_EQ(WifiStatusCode::ERROR_INVALID_ARGS, removeApIface(iface_name));
}
+
+INSTANTIATE_TEST_SUITE_P(
+ PerInstance, WifiChipHidlApTest,
+ testing::ValuesIn(
+ android::hardware::getAllHalInstanceNames(IWifi::descriptor)),
+ android::hardware::PrintInstanceNameToString);
\ No newline at end of file
diff --git a/wifi/1.0/vts/functional/wifi_chip_hidl_nan_test.cpp b/wifi/1.0/vts/functional/wifi_chip_hidl_nan_test.cpp
index 595f23a..95f223d 100644
--- a/wifi/1.0/vts/functional/wifi_chip_hidl_nan_test.cpp
+++ b/wifi/1.0/vts/functional/wifi_chip_hidl_nan_test.cpp
@@ -16,9 +16,11 @@
#include <android-base/logging.h>
+#include <android/hardware/wifi/1.0/IWifi.h>
#include <android/hardware/wifi/1.0/IWifiChip.h>
-
-#include <VtsHalHidlTargetTestBase.h>
+#include <gtest/gtest.h>
+#include <hidl/GtestPrinter.h>
+#include <hidl/ServiceManagement.h>
#include "wifi_hidl_call_util.h"
#include "wifi_hidl_test_utils.h"
@@ -26,6 +28,7 @@
using ::android::sp;
using ::android::hardware::wifi::V1_0::ChipModeId;
using ::android::hardware::wifi::V1_0::IfaceType;
+using ::android::hardware::wifi::V1_0::IWifi;
using ::android::hardware::wifi::V1_0::IWifiChip;
using ::android::hardware::wifi::V1_0::IWifiIface;
using ::android::hardware::wifi::V1_0::IWifiNanIface;
@@ -35,14 +38,14 @@
/**
* Fixture for IWifiChip tests that are conditioned on NAN support.
*/
-class WifiChipHidlNanTest : public ::testing::VtsHalHidlTargetTestBase {
+class WifiChipHidlNanTest : public ::testing::TestWithParam<std::string> {
public:
virtual void SetUp() override {
- wifi_chip_ = getWifiChip();
+ wifi_chip_ = getWifiChip(GetInstanceName());
ASSERT_NE(nullptr, wifi_chip_.get());
}
- virtual void TearDown() override { stopWifi(); }
+ virtual void TearDown() override { stopWifi(GetInstanceName()); }
protected:
// Helper function to configure the Chip in one of the supported modes.
@@ -72,6 +75,9 @@
}
sp<IWifiChip> wifi_chip_;
+
+ private:
+ std::string GetInstanceName() { return GetParam(); }
};
/*
@@ -79,7 +85,7 @@
* Configures the chip in NAN mode and ensures that at least 1 iface creation
* succeeds.
*/
-TEST_F(WifiChipHidlNanTest, CreateNanIface) {
+TEST_P(WifiChipHidlNanTest, CreateNanIface) {
configureChipForIfaceType(IfaceType::NAN, true);
sp<IWifiNanIface> iface;
@@ -93,7 +99,7 @@
* before creating the iface. Then, create the iface and ensure that
* iface name is returned via the list.
*/
-TEST_F(WifiChipHidlNanTest, GetNanIfaceNames) {
+TEST_P(WifiChipHidlNanTest, GetNanIfaceNames) {
configureChipForIfaceType(IfaceType::NAN, true);
const auto& status_and_iface_names1 =
@@ -125,7 +131,7 @@
* the iface object using the correct name and ensure any other name
* doesn't retrieve an iface object.
*/
-TEST_F(WifiChipHidlNanTest, GetNanIface) {
+TEST_P(WifiChipHidlNanTest, GetNanIface) {
configureChipForIfaceType(IfaceType::NAN, true);
sp<IWifiNanIface> nan_iface;
@@ -151,7 +157,7 @@
* the iface object using the correct name and ensure any other name
* doesn't remove the iface.
*/
-TEST_F(WifiChipHidlNanTest, RemoveNanIface) {
+TEST_P(WifiChipHidlNanTest, RemoveNanIface) {
configureChipForIfaceType(IfaceType::NAN, true);
sp<IWifiNanIface> nan_iface;
@@ -167,3 +173,9 @@
// No such iface exists now. So, this should return failure.
EXPECT_EQ(WifiStatusCode::ERROR_INVALID_ARGS, removeNanIface(iface_name));
}
+
+INSTANTIATE_TEST_SUITE_P(
+ PerInstance, WifiChipHidlNanTest,
+ testing::ValuesIn(
+ android::hardware::getAllHalInstanceNames(IWifi::descriptor)),
+ android::hardware::PrintInstanceNameToString);
\ No newline at end of file
diff --git a/wifi/1.0/vts/functional/wifi_chip_hidl_test.cpp b/wifi/1.0/vts/functional/wifi_chip_hidl_test.cpp
index 2601b78..ec96fcf 100644
--- a/wifi/1.0/vts/functional/wifi_chip_hidl_test.cpp
+++ b/wifi/1.0/vts/functional/wifi_chip_hidl_test.cpp
@@ -16,10 +16,12 @@
#include <android-base/logging.h>
+#include <android/hardware/wifi/1.0/IWifi.h>
#include <android/hardware/wifi/1.0/IWifiChip.h>
#include <android/hardware/wifi/1.3/IWifiChip.h>
-
-#include <VtsHalHidlTargetTestBase.h>
+#include <gtest/gtest.h>
+#include <hidl/GtestPrinter.h>
+#include <hidl/ServiceManagement.h>
#include "wifi_hidl_call_util.h"
#include "wifi_hidl_test_utils.h"
@@ -27,19 +29,20 @@
using ::android::sp;
using ::android::hardware::hidl_string;
using ::android::hardware::hidl_vec;
-using ::android::hardware::wifi::V1_0::IfaceType;
using ::android::hardware::wifi::V1_0::ChipId;
using ::android::hardware::wifi::V1_0::ChipModeId;
-using ::android::hardware::wifi::V1_0::WifiDebugRingBufferStatus;
-using ::android::hardware::wifi::V1_0::WifiDebugRingBufferVerboseLevel;
-using ::android::hardware::wifi::V1_0::WifiDebugHostWakeReasonStats;
-using ::android::hardware::wifi::V1_0::WifiStatus;
-using ::android::hardware::wifi::V1_0::WifiStatusCode;
+using ::android::hardware::wifi::V1_0::IfaceType;
+using ::android::hardware::wifi::V1_0::IWifi;
using ::android::hardware::wifi::V1_0::IWifiChip;
using ::android::hardware::wifi::V1_0::IWifiIface;
using ::android::hardware::wifi::V1_0::IWifiP2pIface;
using ::android::hardware::wifi::V1_0::IWifiRttController;
using ::android::hardware::wifi::V1_0::IWifiStaIface;
+using ::android::hardware::wifi::V1_0::WifiDebugHostWakeReasonStats;
+using ::android::hardware::wifi::V1_0::WifiDebugRingBufferStatus;
+using ::android::hardware::wifi::V1_0::WifiDebugRingBufferVerboseLevel;
+using ::android::hardware::wifi::V1_0::WifiStatus;
+using ::android::hardware::wifi::V1_0::WifiStatusCode;
extern WifiHidlEnvironment* gEnv;
@@ -67,14 +70,14 @@
* Tests that require SoftAP or NAN support should go into WifiChipHidlApTest or
* WifiChipHidlNanTest respectively.
*/
-class WifiChipHidlTest : public ::testing::VtsHalHidlTargetTestBase {
+class WifiChipHidlTest : public ::testing::TestWithParam<std::string> {
public:
virtual void SetUp() override {
- wifi_chip_ = getWifiChip();
+ wifi_chip_ = getWifiChip(GetInstanceName());
ASSERT_NE(nullptr, wifi_chip_.get());
}
- virtual void TearDown() override { stopWifi(); }
+ virtual void TearDown() override { stopWifi(GetInstanceName()); }
protected:
// Helper function to configure the Chip in one of the supported modes.
@@ -136,6 +139,9 @@
}
sp<IWifiChip> wifi_chip_;
+
+ protected:
+ std::string GetInstanceName() { return GetParam(); }
};
/*
@@ -143,15 +149,14 @@
* Ensures that an instance of the IWifiChip proxy object is
* successfully created.
*/
-TEST(WifiChipHidlTestNoFixture, Create) {
- EXPECT_NE(nullptr, getWifiChip().get());
- stopWifi();
+TEST_P(WifiChipHidlTest, Create) {
+ // The creation of a proxy object is tested as part of SetUp method.
}
/*
* GetId:
*/
-TEST_F(WifiChipHidlTest, GetId) {
+TEST_P(WifiChipHidlTest, GetId) {
EXPECT_EQ(WifiStatusCode::SUCCESS,
HIDL_INVOKE(wifi_chip_, getId).first.code);
}
@@ -159,7 +164,7 @@
/*
* GetAvailableMode:
*/
-TEST_F(WifiChipHidlTest, GetAvailableModes) {
+TEST_P(WifiChipHidlTest, GetAvailableModes) {
const auto& status_and_modes = HIDL_INVOKE(wifi_chip_, getAvailableModes);
EXPECT_EQ(WifiStatusCode::SUCCESS, status_and_modes.first.code);
EXPECT_LT(0u, status_and_modes.second.size());
@@ -168,17 +173,17 @@
/*
* ConfigureChip:
*/
-TEST_F(WifiChipHidlTest, ConfigureChip) {
+TEST_P(WifiChipHidlTest, ConfigureChip) {
const auto& status_and_modes = HIDL_INVOKE(wifi_chip_, getAvailableModes);
EXPECT_EQ(WifiStatusCode::SUCCESS, status_and_modes.first.code);
EXPECT_LT(0u, status_and_modes.second.size());
for (const auto& mode : status_and_modes.second) {
// configureChip() requires to be called with a fresh IWifiChip object.
- wifi_chip_ = getWifiChip();
+ wifi_chip_ = getWifiChip(GetInstanceName());
ASSERT_NE(nullptr, wifi_chip_.get());
EXPECT_EQ(WifiStatusCode::SUCCESS,
HIDL_INVOKE(wifi_chip_, configureChip, mode.id).code);
- stopWifi();
+ stopWifi(GetInstanceName());
// Sleep for 5 milliseconds between each wifi state toggle.
usleep(5000);
}
@@ -187,7 +192,7 @@
/*
* GetCapabilities:
*/
-TEST_F(WifiChipHidlTest, GetCapabilities) {
+TEST_P(WifiChipHidlTest, GetCapabilities) {
configureChipForIfaceType(IfaceType::STA, true);
const auto& status_and_caps = HIDL_INVOKE(wifi_chip_, getCapabilities);
if (status_and_caps.first.code != WifiStatusCode::SUCCESS) {
@@ -200,7 +205,7 @@
/*
* GetMode:
*/
-TEST_F(WifiChipHidlTest, GetMode) {
+TEST_P(WifiChipHidlTest, GetMode) {
ChipModeId chip_mode_id = configureChipForIfaceType(IfaceType::STA, true);
const auto& status_and_mode = HIDL_INVOKE(wifi_chip_, getMode);
EXPECT_EQ(WifiStatusCode::SUCCESS, status_and_mode.first.code);
@@ -210,7 +215,7 @@
/*
* RequestChipDebugInfo:
*/
-TEST_F(WifiChipHidlTest, RequestChipDebugInfo) {
+TEST_P(WifiChipHidlTest, RequestChipDebugInfo) {
configureChipForIfaceType(IfaceType::STA, true);
const auto& status_and_chip_info =
HIDL_INVOKE(wifi_chip_, requestChipDebugInfo);
@@ -222,7 +227,7 @@
/*
* RequestFirmwareDebugDump
*/
-TEST_F(WifiChipHidlTest, RequestFirmwareDebugDump) {
+TEST_P(WifiChipHidlTest, RequestFirmwareDebugDump) {
uint32_t caps = configureChipForStaIfaceAndGetCapabilities();
const auto& status_and_firmware_dump =
HIDL_INVOKE(wifi_chip_, requestFirmwareDebugDump);
@@ -237,7 +242,7 @@
/*
* RequestDriverDebugDump
*/
-TEST_F(WifiChipHidlTest, RequestDriverDebugDump) {
+TEST_P(WifiChipHidlTest, RequestDriverDebugDump) {
uint32_t caps = configureChipForStaIfaceAndGetCapabilities();
const auto& status_and_driver_dump =
HIDL_INVOKE(wifi_chip_, requestDriverDebugDump);
@@ -254,7 +259,7 @@
/*
* GetDebugRingBuffersStatus
*/
-TEST_F(WifiChipHidlTest, GetDebugRingBuffersStatus) {
+TEST_P(WifiChipHidlTest, GetDebugRingBuffersStatus) {
uint32_t caps = configureChipForStaIfaceAndGetCapabilities();
const auto& status_and_ring_buffer_status =
HIDL_INVOKE(wifi_chip_, getDebugRingBuffersStatus);
@@ -273,7 +278,7 @@
/*
* StartLoggingToDebugRingBuffer
*/
-TEST_F(WifiChipHidlTest, StartLoggingToDebugRingBuffer) {
+TEST_P(WifiChipHidlTest, StartLoggingToDebugRingBuffer) {
uint32_t caps = configureChipForStaIfaceAndGetCapabilities();
std::string ring_name;
const auto& status_and_ring_buffer_status =
@@ -301,7 +306,7 @@
/*
* ForceDumpToDebugRingBuffer
*/
-TEST_F(WifiChipHidlTest, ForceDumpToDebugRingBuffer) {
+TEST_P(WifiChipHidlTest, ForceDumpToDebugRingBuffer) {
uint32_t caps = configureChipForStaIfaceAndGetCapabilities();
std::string ring_name;
const auto& status_and_ring_buffer_status =
@@ -327,7 +332,7 @@
/*
* GetDebugHostWakeReasonStats
*/
-TEST_F(WifiChipHidlTest, GetDebugHostWakeReasonStats) {
+TEST_P(WifiChipHidlTest, GetDebugHostWakeReasonStats) {
uint32_t caps = configureChipForStaIfaceAndGetCapabilities();
const auto& status_and_debug_wake_reason =
HIDL_INVOKE(wifi_chip_, getDebugHostWakeReasonStats);
@@ -345,7 +350,7 @@
* Configures the chip in P2P mode and ensures that at least 1 iface creation
* succeeds.
*/
-TEST_F(WifiChipHidlTest, CreateP2pIface) {
+TEST_P(WifiChipHidlTest, CreateP2pIface) {
configureChipForIfaceType(IfaceType::P2P, true);
sp<IWifiP2pIface> iface;
@@ -359,7 +364,7 @@
* before creating the iface. Then, create the iface and ensure that
* iface name is returned via the list.
*/
-TEST_F(WifiChipHidlTest, GetP2pIfaceNames) {
+TEST_P(WifiChipHidlTest, GetP2pIfaceNames) {
configureChipForIfaceType(IfaceType::P2P, true);
const auto& status_and_iface_names1 =
@@ -391,7 +396,7 @@
* the iface object using the correct name and ensure any other name
* doesn't retrieve an iface object.
*/
-TEST_F(WifiChipHidlTest, GetP2pIface) {
+TEST_P(WifiChipHidlTest, GetP2pIface) {
configureChipForIfaceType(IfaceType::P2P, true);
sp<IWifiP2pIface> p2p_iface;
@@ -417,7 +422,7 @@
* the iface object using the correct name and ensure any other name
* doesn't remove the iface.
*/
-TEST_F(WifiChipHidlTest, RemoveP2pIface) {
+TEST_P(WifiChipHidlTest, RemoveP2pIface) {
configureChipForIfaceType(IfaceType::P2P, true);
sp<IWifiP2pIface> p2p_iface;
@@ -438,7 +443,7 @@
* Configures the chip in STA mode and ensures that at least 1 iface creation
* succeeds.
*/
-TEST_F(WifiChipHidlTest, CreateStaIface) {
+TEST_P(WifiChipHidlTest, CreateStaIface) {
configureChipForIfaceType(IfaceType::STA, true);
sp<IWifiStaIface> iface;
@@ -452,7 +457,7 @@
* before creating the iface. Then, create the iface and ensure that
* iface name is returned via the list.
*/
-TEST_F(WifiChipHidlTest, GetStaIfaceNames) {
+TEST_P(WifiChipHidlTest, GetStaIfaceNames) {
configureChipForIfaceType(IfaceType::STA, true);
const auto& status_and_iface_names1 =
@@ -484,7 +489,7 @@
* the iface object using the correct name and ensure any other name
* doesn't retrieve an iface object.
*/
-TEST_F(WifiChipHidlTest, GetStaIface) {
+TEST_P(WifiChipHidlTest, GetStaIface) {
configureChipForIfaceType(IfaceType::STA, true);
sp<IWifiStaIface> sta_iface;
@@ -510,7 +515,7 @@
* the iface object using the correct name and ensure any other name
* doesn't remove the iface.
*/
-TEST_F(WifiChipHidlTest, RemoveStaIface) {
+TEST_P(WifiChipHidlTest, RemoveStaIface) {
configureChipForIfaceType(IfaceType::STA, true);
sp<IWifiStaIface> sta_iface;
@@ -529,7 +534,7 @@
/*
* CreateRttController
*/
-TEST_F(WifiChipHidlTest, CreateRttController) {
+TEST_P(WifiChipHidlTest, CreateRttController) {
configureChipForIfaceType(IfaceType::STA, true);
sp<IWifiStaIface> iface;
@@ -541,3 +546,9 @@
EXPECT_EQ(WifiStatusCode::SUCCESS, status_and_rtt_controller.first.code);
EXPECT_NE(nullptr, status_and_rtt_controller.second.get());
}
+
+INSTANTIATE_TEST_SUITE_P(
+ PerInstance, WifiChipHidlTest,
+ testing::ValuesIn(
+ android::hardware::getAllHalInstanceNames(IWifi::descriptor)),
+ android::hardware::PrintInstanceNameToString);
\ No newline at end of file
diff --git a/wifi/1.0/vts/functional/wifi_hidl_test.cpp b/wifi/1.0/vts/functional/wifi_hidl_test.cpp
index b8e501c..512701a 100644
--- a/wifi/1.0/vts/functional/wifi_hidl_test.cpp
+++ b/wifi/1.0/vts/functional/wifi_hidl_test.cpp
@@ -18,7 +18,9 @@
#include <android/hardware/wifi/1.0/IWifi.h>
-#include <VtsHalHidlTargetTestBase.h>
+#include <gtest/gtest.h>
+#include <hidl/GtestPrinter.h>
+#include <hidl/ServiceManagement.h>
#include "wifi_hidl_test_utils.h"
@@ -28,13 +30,14 @@
/**
* Fixture to use for all root Wifi HIDL interface tests.
*/
-class WifiHidlTest : public ::testing::VtsHalHidlTargetTestBase {
+class WifiHidlTest : public ::testing::TestWithParam<std::string> {
public:
virtual void SetUp() override {}
- virtual void TearDown() override { stopWifi(); }
+ virtual void TearDown() override { stopWifi(GetInstanceName()); }
protected:
+ std::string GetInstanceName() { return GetParam(); }
};
/*
@@ -42,7 +45,12 @@
* Ensures that an instance of the IWifi proxy object is
* successfully created.
*/
-TEST(WifiHidlTestNoFixture, Create) {
- EXPECT_NE(nullptr, getWifi().get());
- stopWifi();
+TEST_P(WifiHidlTest, Create) {
+ // The creation of a proxy object is tested as part of SetUp method.
}
+
+INSTANTIATE_TEST_SUITE_P(
+ PerInstance, WifiHidlTest,
+ testing::ValuesIn(
+ android::hardware::getAllHalInstanceNames(IWifi::descriptor)),
+ android::hardware::PrintInstanceNameToString);
\ No newline at end of file
diff --git a/wifi/1.0/vts/functional/wifi_hidl_test_utils.cpp b/wifi/1.0/vts/functional/wifi_hidl_test_utils.cpp
index f89f7b4..d584d4b 100644
--- a/wifi/1.0/vts/functional/wifi_hidl_test_utils.cpp
+++ b/wifi/1.0/vts/functional/wifi_hidl_test_utils.cpp
@@ -18,12 +18,15 @@
#include <VtsHalHidlTargetTestBase.h>
+#include <wifi_system/interface_tool.h>
+
#include "wifi_hidl_call_util.h"
#include "wifi_hidl_test_utils.h"
using ::android::hardware::wifi::V1_0::IWifi;
using ::android::hardware::wifi::V1_0::IWifiApIface;
using ::android::hardware::wifi::V1_0::IWifiChip;
+using ::android::hardware::wifi::V1_0::IWifiIface;
using ::android::hardware::wifi::V1_0::IWifiNanIface;
using ::android::hardware::wifi::V1_0::IWifiP2pIface;
using ::android::hardware::wifi::V1_0::IWifiRttController;
@@ -36,6 +39,7 @@
using ::android::sp;
using ::android::hardware::hidl_string;
using ::android::hardware::hidl_vec;
+using ::android::wifi_system::InterfaceTool;
extern WifiHidlEnvironment* gEnv;
@@ -131,6 +135,16 @@
return status_and_chip.second;
}
+void setIfaceUp(const sp<IWifiIface>& iface) {
+ // Set the iface up before retrurning the object.
+ const auto& status_and_name = HIDL_INVOKE(iface, getName);
+ if (status_and_name.first.code == WifiStatusCode::SUCCESS) {
+ const auto& iface_name = status_and_name.second;
+ InterfaceTool iface_tool;
+ iface_tool.SetUpState(iface_name.c_str(), true);
+ }
+}
+
sp<IWifiApIface> getWifiApIface(const std::string& instance_name) {
sp<IWifiChip> wifi_chip = getWifiChip(instance_name);
if (!wifi_chip.get()) {
@@ -143,6 +157,7 @@
if (status_and_iface.first.code != WifiStatusCode::SUCCESS) {
return nullptr;
}
+ setIfaceUp(status_and_iface.second);
return status_and_iface.second;
}
@@ -158,6 +173,7 @@
if (status_and_iface.first.code != WifiStatusCode::SUCCESS) {
return nullptr;
}
+ setIfaceUp(status_and_iface.second);
return status_and_iface.second;
}
@@ -173,6 +189,7 @@
if (status_and_iface.first.code != WifiStatusCode::SUCCESS) {
return nullptr;
}
+ setIfaceUp(status_and_iface.second);
return status_and_iface.second;
}
@@ -188,6 +205,7 @@
if (status_and_iface.first.code != WifiStatusCode::SUCCESS) {
return nullptr;
}
+ setIfaceUp(status_and_iface.second);
return status_and_iface.second;
}
@@ -196,7 +214,7 @@
if (!wifi_chip.get()) {
return nullptr;
}
- sp<IWifiStaIface> wifi_sta_iface = getWifiStaIface();
+ sp<IWifiStaIface> wifi_sta_iface = getWifiStaIface(instance_name);
if (!wifi_sta_iface.get()) {
return nullptr;
}
diff --git a/wifi/1.0/vts/functional/wifi_nan_iface_hidl_test.cpp b/wifi/1.0/vts/functional/wifi_nan_iface_hidl_test.cpp
index 64b4fb6..422e3f6 100644
--- a/wifi/1.0/vts/functional/wifi_nan_iface_hidl_test.cpp
+++ b/wifi/1.0/vts/functional/wifi_nan_iface_hidl_test.cpp
@@ -16,10 +16,12 @@
#include <android-base/logging.h>
+#include <android/hardware/wifi/1.0/IWifi.h>
#include <android/hardware/wifi/1.0/IWifiNanIface.h>
#include <android/hardware/wifi/1.0/IWifiNanIfaceEventCallback.h>
-
-#include <VtsHalHidlTargetTestBase.h>
+#include <gtest/gtest.h>
+#include <hidl/GtestPrinter.h>
+#include <hidl/ServiceManagement.h>
#include <chrono>
#include <condition_variable>
#include <mutex>
@@ -29,27 +31,28 @@
using namespace ::android::hardware::wifi::V1_0;
+using ::android::sp;
using ::android::hardware::Return;
using ::android::hardware::Void;
-using ::android::sp;
+using ::android::hardware::wifi::V1_0::IWifi;
#define TIMEOUT_PERIOD 10
/**
* Fixture to use for all NAN Iface HIDL interface tests.
*/
-class WifiNanIfaceHidlTest : public ::testing::VtsHalHidlTargetTestBase {
- public:
+class WifiNanIfaceHidlTest : public ::testing::TestWithParam<std::string> {
+ public:
virtual void SetUp() override {
- iwifiNanIface = getWifiNanIface();
- ASSERT_NE(nullptr, iwifiNanIface.get());
- ASSERT_EQ(WifiStatusCode::SUCCESS, HIDL_INVOKE(iwifiNanIface, registerEventCallback,
- new WifiNanIfaceEventCallback(*this)).code);
+ iwifiNanIface = getWifiNanIface(GetInstanceName());
+ ASSERT_NE(nullptr, iwifiNanIface.get());
+ ASSERT_EQ(WifiStatusCode::SUCCESS,
+ HIDL_INVOKE(iwifiNanIface, registerEventCallback,
+ new WifiNanIfaceEventCallback(*this))
+ .code);
}
- virtual void TearDown() override {
- stopWifi();
- }
+ virtual void TearDown() override { stopWifi(GetInstanceName()); }
/* Used as a mechanism to inform the test about data/event callback */
inline void notify() {
@@ -438,6 +441,8 @@
NanFollowupReceivedInd nanFollowupReceivedInd;
NanDataPathRequestInd nanDataPathRequestInd;
NanDataPathConfirmInd nanDataPathConfirmInd;
+
+ std::string GetInstanceName() { return GetParam(); }
};
/*
@@ -445,9 +450,8 @@
* Ensures that an instance of the IWifiNanIface proxy object is
* successfully created.
*/
-TEST(WifiNanIfaceHidlTestNoFixture, Create) {
- ASSERT_NE(nullptr, getWifiNanIface().get());
- stopWifi();
+TEST_P(WifiNanIfaceHidlTest, Create) {
+ // The creation of a proxy object is tested as part of SetUp method.
}
/*
@@ -455,41 +459,51 @@
* Ensure that API calls fail with ERROR_WIFI_IFACE_INVALID when using an interface once wifi
* is disabled.
*/
-TEST(WifiNanIfaceHidlTestNoFixture, FailOnIfaceInvalid) {
- android::sp<IWifiNanIface> iwifiNanIface = getWifiNanIface();
- ASSERT_NE(nullptr, iwifiNanIface.get());
- stopWifi();
- sleep(5); // make sure that all chips/interfaces are invalidated
- ASSERT_EQ(WifiStatusCode::ERROR_WIFI_IFACE_INVALID,
- HIDL_INVOKE(iwifiNanIface, getCapabilitiesRequest, 0).code);
+TEST_P(WifiNanIfaceHidlTest, FailOnIfaceInvalid) {
+ stopWifi(GetInstanceName());
+ android::sp<IWifiNanIface> iwifiNanIface =
+ getWifiNanIface(GetInstanceName());
+ ASSERT_NE(nullptr, iwifiNanIface.get());
+ stopWifi(GetInstanceName());
+ sleep(5); // make sure that all chips/interfaces are invalidated
+ ASSERT_EQ(WifiStatusCode::ERROR_WIFI_IFACE_INVALID,
+ HIDL_INVOKE(iwifiNanIface, getCapabilitiesRequest, 0).code);
}
/*
* getCapabilitiesRequest: validate that returns capabilities.
*/
-TEST_F(WifiNanIfaceHidlTest, getCapabilitiesRequest) {
- uint16_t inputCmdId = 10;
- callbackType = INVALID;
- ASSERT_EQ(WifiStatusCode::SUCCESS,
+TEST_P(WifiNanIfaceHidlTest, getCapabilitiesRequest) {
+ uint16_t inputCmdId = 10;
+ callbackType = INVALID;
+ ASSERT_EQ(
+ WifiStatusCode::SUCCESS,
HIDL_INVOKE(iwifiNanIface, getCapabilitiesRequest, inputCmdId).code);
- // wait for a callback
- ASSERT_EQ(std::cv_status::no_timeout, wait(NOTIFY_CAPABILITIES_RESPONSE));
- ASSERT_EQ(NOTIFY_CAPABILITIES_RESPONSE, callbackType);
- ASSERT_EQ(id, inputCmdId);
+ // wait for a callback
+ ASSERT_EQ(std::cv_status::no_timeout, wait(NOTIFY_CAPABILITIES_RESPONSE));
+ ASSERT_EQ(NOTIFY_CAPABILITIES_RESPONSE, callbackType);
+ ASSERT_EQ(id, inputCmdId);
- // check for reasonable capability values
- EXPECT_GT(capabilities.maxConcurrentClusters, (unsigned int) 0);
- EXPECT_GT(capabilities.maxPublishes, (unsigned int) 0);
- EXPECT_GT(capabilities.maxSubscribes, (unsigned int) 0);
- EXPECT_EQ(capabilities.maxServiceNameLen, (unsigned int) 255);
- EXPECT_EQ(capabilities.maxMatchFilterLen, (unsigned int) 255);
- EXPECT_GT(capabilities.maxTotalMatchFilterLen, (unsigned int) 255);
- EXPECT_EQ(capabilities.maxServiceSpecificInfoLen, (unsigned int) 255);
- EXPECT_GE(capabilities.maxExtendedServiceSpecificInfoLen, (unsigned int) 255);
- EXPECT_GT(capabilities.maxNdiInterfaces, (unsigned int) 0);
- EXPECT_GT(capabilities.maxNdpSessions, (unsigned int) 0);
- EXPECT_GT(capabilities.maxAppInfoLen, (unsigned int) 0);
- EXPECT_GT(capabilities.maxQueuedTransmitFollowupMsgs, (unsigned int) 0);
- EXPECT_GT(capabilities.maxSubscribeInterfaceAddresses, (unsigned int) 0);
- EXPECT_NE(capabilities.supportedCipherSuites, (unsigned int) 0);
+ // check for reasonable capability values
+ EXPECT_GT(capabilities.maxConcurrentClusters, (unsigned int)0);
+ EXPECT_GT(capabilities.maxPublishes, (unsigned int)0);
+ EXPECT_GT(capabilities.maxSubscribes, (unsigned int)0);
+ EXPECT_EQ(capabilities.maxServiceNameLen, (unsigned int)255);
+ EXPECT_EQ(capabilities.maxMatchFilterLen, (unsigned int)255);
+ EXPECT_GT(capabilities.maxTotalMatchFilterLen, (unsigned int)255);
+ EXPECT_EQ(capabilities.maxServiceSpecificInfoLen, (unsigned int)255);
+ EXPECT_GE(capabilities.maxExtendedServiceSpecificInfoLen,
+ (unsigned int)255);
+ EXPECT_GT(capabilities.maxNdiInterfaces, (unsigned int)0);
+ EXPECT_GT(capabilities.maxNdpSessions, (unsigned int)0);
+ EXPECT_GT(capabilities.maxAppInfoLen, (unsigned int)0);
+ EXPECT_GT(capabilities.maxQueuedTransmitFollowupMsgs, (unsigned int)0);
+ EXPECT_GT(capabilities.maxSubscribeInterfaceAddresses, (unsigned int)0);
+ EXPECT_NE(capabilities.supportedCipherSuites, (unsigned int)0);
}
+
+INSTANTIATE_TEST_SUITE_P(
+ PerInstance, WifiNanIfaceHidlTest,
+ testing::ValuesIn(
+ android::hardware::getAllHalInstanceNames(IWifi::descriptor)),
+ android::hardware::PrintInstanceNameToString);
\ No newline at end of file
diff --git a/wifi/1.0/vts/functional/wifi_p2p_iface_hidl_test.cpp b/wifi/1.0/vts/functional/wifi_p2p_iface_hidl_test.cpp
index 269eb6c..8f33271 100644
--- a/wifi/1.0/vts/functional/wifi_p2p_iface_hidl_test.cpp
+++ b/wifi/1.0/vts/functional/wifi_p2p_iface_hidl_test.cpp
@@ -16,25 +16,29 @@
#include <android-base/logging.h>
+#include <android/hardware/wifi/1.0/IWifi.h>
#include <android/hardware/wifi/1.0/IWifiP2pIface.h>
-
-#include <VtsHalHidlTargetTestBase.h>
+#include <gtest/gtest.h>
+#include <hidl/GtestPrinter.h>
+#include <hidl/ServiceManagement.h>
#include "wifi_hidl_test_utils.h"
-using ::android::hardware::wifi::V1_0::IWifiP2pIface;
using ::android::sp;
+using ::android::hardware::wifi::V1_0::IWifi;
+using ::android::hardware::wifi::V1_0::IWifiP2pIface;
/**
* Fixture to use for all P2P Iface HIDL interface tests.
*/
-class WifiP2pIfaceHidlTest : public ::testing::VtsHalHidlTargetTestBase {
+class WifiP2pIfaceHidlTest : public ::testing::TestWithParam<std::string> {
public:
virtual void SetUp() override {}
- virtual void TearDown() override { stopWifi(); }
+ virtual void TearDown() override { stopWifi(GetInstanceName()); }
protected:
+ std::string GetInstanceName() { return GetParam(); }
};
/*
@@ -42,7 +46,13 @@
* Ensures that an instance of the IWifiP2pIface proxy object is
* successfully created.
*/
-TEST(WifiP2pIfaceHidlTestNoFixture, Create) {
- EXPECT_NE(nullptr, getWifiP2pIface().get());
- stopWifi();
+TEST_P(WifiP2pIfaceHidlTest, Create) {
+ stopWifi(GetInstanceName());
+ EXPECT_NE(nullptr, getWifiP2pIface(GetInstanceName()).get());
}
+
+INSTANTIATE_TEST_SUITE_P(
+ PerInstance, WifiP2pIfaceHidlTest,
+ testing::ValuesIn(
+ android::hardware::getAllHalInstanceNames(IWifi::descriptor)),
+ android::hardware::PrintInstanceNameToString);
\ No newline at end of file
diff --git a/wifi/1.0/vts/functional/wifi_rtt_controller_hidl_test.cpp b/wifi/1.0/vts/functional/wifi_rtt_controller_hidl_test.cpp
index e13086d..e1ee34f 100644
--- a/wifi/1.0/vts/functional/wifi_rtt_controller_hidl_test.cpp
+++ b/wifi/1.0/vts/functional/wifi_rtt_controller_hidl_test.cpp
@@ -16,25 +16,29 @@
#include <android-base/logging.h>
+#include <android/hardware/wifi/1.0/IWifi.h>
#include <android/hardware/wifi/1.0/IWifiRttController.h>
-
-#include <VtsHalHidlTargetTestBase.h>
+#include <gtest/gtest.h>
+#include <hidl/GtestPrinter.h>
+#include <hidl/ServiceManagement.h>
#include "wifi_hidl_test_utils.h"
-using ::android::hardware::wifi::V1_0::IWifiRttController;
using ::android::sp;
+using ::android::hardware::wifi::V1_0::IWifi;
+using ::android::hardware::wifi::V1_0::IWifiRttController;
/**
* Fixture to use for all RTT controller HIDL interface tests.
*/
-class WifiRttControllerHidlTest : public ::testing::VtsHalHidlTargetTestBase {
+class WifiRttControllerHidlTest : public ::testing::TestWithParam<std::string> {
public:
virtual void SetUp() override {}
- virtual void TearDown() override { stopWifi(); }
+ virtual void TearDown() override { stopWifi(GetInstanceName()); }
protected:
+ std::string GetInstanceName() { return GetParam(); }
};
/*
@@ -42,7 +46,13 @@
* Ensures that an instance of the IWifiRttController proxy object is
* successfully created.
*/
-TEST(WifiRttControllerHidlTestNoFixture, Create) {
- EXPECT_NE(nullptr, getWifiRttController().get());
- stopWifi();
+TEST_P(WifiRttControllerHidlTest, Create) {
+ stopWifi(GetInstanceName());
+ EXPECT_NE(nullptr, getWifiRttController(GetInstanceName()).get());
}
+
+INSTANTIATE_TEST_SUITE_P(
+ PerInstance, WifiRttControllerHidlTest,
+ testing::ValuesIn(
+ android::hardware::getAllHalInstanceNames(IWifi::descriptor)),
+ android::hardware::PrintInstanceNameToString);
\ No newline at end of file
diff --git a/wifi/1.0/vts/functional/wifi_sta_iface_hidl_test.cpp b/wifi/1.0/vts/functional/wifi_sta_iface_hidl_test.cpp
index a413863..30b6fba 100644
--- a/wifi/1.0/vts/functional/wifi_sta_iface_hidl_test.cpp
+++ b/wifi/1.0/vts/functional/wifi_sta_iface_hidl_test.cpp
@@ -16,10 +16,12 @@
#include <android-base/logging.h>
+#include <android/hardware/wifi/1.0/IWifi.h>
#include <android/hardware/wifi/1.0/IWifiStaIface.h>
#include <android/hardware/wifi/1.3/IWifiStaIface.h>
-
-#include <VtsHalHidlTargetTestBase.h>
+#include <gtest/gtest.h>
+#include <hidl/GtestPrinter.h>
+#include <hidl/ServiceManagement.h>
#include "wifi_hidl_call_util.h"
#include "wifi_hidl_test_utils.h"
@@ -28,6 +30,7 @@
using ::android::hardware::wifi::V1_0::Bssid;
using ::android::hardware::wifi::V1_0::CommandId;
using ::android::hardware::wifi::V1_0::IfaceType;
+using ::android::hardware::wifi::V1_0::IWifi;
using ::android::hardware::wifi::V1_0::IWifiStaIface;
using ::android::hardware::wifi::V1_0::Rssi;
using ::android::hardware::wifi::V1_0::Ssid;
@@ -41,14 +44,14 @@
/**
* Fixture to use for all STA Iface HIDL interface tests.
*/
-class WifiStaIfaceHidlTest : public ::testing::VtsHalHidlTargetTestBase {
+class WifiStaIfaceHidlTest : public ::testing::TestWithParam<std::string> {
public:
virtual void SetUp() override {
- wifi_sta_iface_ = getWifiStaIface();
+ wifi_sta_iface_ = getWifiStaIface(GetInstanceName());
ASSERT_NE(nullptr, wifi_sta_iface_.get());
}
- virtual void TearDown() override { stopWifi(); }
+ virtual void TearDown() override { stopWifi(GetInstanceName()); }
protected:
bool isCapabilitySupported(IWifiStaIface::StaIfaceCapabilityMask cap_mask) {
@@ -59,6 +62,7 @@
}
sp<IWifiStaIface> wifi_sta_iface_;
+ std::string GetInstanceName() { return GetParam(); }
};
/*
@@ -66,15 +70,14 @@
* Ensures that an instance of the IWifiStaIface proxy object is
* successfully created.
*/
-TEST(WifiStaIfaceHidlTestNoFixture, Create) {
- EXPECT_NE(nullptr, getWifiStaIface().get());
- stopWifi();
+TEST_P(WifiStaIfaceHidlTest, Create) {
+ // The creation of a proxy object is tested as part of SetUp method.
}
/*
* GetCapabilities:
*/
-TEST_F(WifiStaIfaceHidlTest, GetCapabilities) {
+TEST_P(WifiStaIfaceHidlTest, GetCapabilities) {
const auto& status_and_caps = HIDL_INVOKE(wifi_sta_iface_, getCapabilities);
EXPECT_EQ(WifiStatusCode::SUCCESS, status_and_caps.first.code);
EXPECT_GT(status_and_caps.second, 0u);
@@ -84,7 +87,7 @@
* GetType:
* Ensures that the correct interface type is returned for station interface.
*/
-TEST_F(WifiStaIfaceHidlTest, GetType) {
+TEST_P(WifiStaIfaceHidlTest, GetType) {
const auto& status_and_type = HIDL_INVOKE(wifi_sta_iface_, getType);
EXPECT_EQ(WifiStatusCode::SUCCESS, status_and_type.first.code);
EXPECT_EQ(IfaceType::STA, status_and_type.second);
@@ -94,7 +97,7 @@
* GetApfPacketFilterCapabilities:
* Ensures that we can retrieve APF packet filter capabilites.
*/
-TEST_F(WifiStaIfaceHidlTest, GetApfPacketFilterCapabilities) {
+TEST_P(WifiStaIfaceHidlTest, GetApfPacketFilterCapabilities) {
if (!isCapabilitySupported(IWifiStaIface::StaIfaceCapabilityMask::APF)) {
// No-op if APF packet filer is not supported.
return;
@@ -109,7 +112,7 @@
* GetBackgroundScanCapabilities:
* Ensures that we can retrieve background scan capabilities.
*/
-TEST_F(WifiStaIfaceHidlTest, GetBackgroundScanCapabilities) {
+TEST_P(WifiStaIfaceHidlTest, GetBackgroundScanCapabilities) {
if (!isCapabilitySupported(
IWifiStaIface::StaIfaceCapabilityMask::BACKGROUND_SCAN)) {
// No-op if background scan is not supported.
@@ -125,7 +128,7 @@
* GetValidFrequenciesForBand:
* Ensures that we can retrieve valid frequencies for 2.4 GHz band.
*/
-TEST_F(WifiStaIfaceHidlTest, GetValidFrequenciesForBand) {
+TEST_P(WifiStaIfaceHidlTest, GetValidFrequenciesForBand) {
const auto& status_and_freqs = HIDL_INVOKE(
wifi_sta_iface_, getValidFrequenciesForBand, WifiBand::BAND_24GHZ);
EXPECT_EQ(WifiStatusCode::SUCCESS, status_and_freqs.first.code);
@@ -137,7 +140,7 @@
* Ensures that calls to enable, disable, and retrieve link layer stats
* will return a success status code.
*/
-TEST_F(WifiStaIfaceHidlTest, LinkLayerStatsCollection) {
+TEST_P(WifiStaIfaceHidlTest, LinkLayerStatsCollection) {
if (!isCapabilitySupported(
IWifiStaIface::StaIfaceCapabilityMask::LINK_LAYER_STATS)) {
// No-op if link layer stats is not supported.
@@ -172,7 +175,7 @@
* Ensures that calls to disable RSSI monitoring will return an error status
* code if RSSI monitoring is not enabled.
*/
-TEST_F(WifiStaIfaceHidlTest, RSSIMonitoring) {
+TEST_P(WifiStaIfaceHidlTest, RSSIMonitoring) {
if (!isCapabilitySupported(
IWifiStaIface::StaIfaceCapabilityMask::RSSI_MONITOR)) {
// No-op if RSSI monitor is not supported.
@@ -197,7 +200,7 @@
* Ensures that calls to configure and enable roaming will return a success
* status code.
*/
-TEST_F(WifiStaIfaceHidlTest, RoamingControl) {
+TEST_P(WifiStaIfaceHidlTest, RoamingControl) {
if (!isCapabilitySupported(
IWifiStaIface::StaIfaceCapabilityMask::CONTROL_ROAMING)) {
// No-op if roaming control is not supported.
@@ -242,9 +245,9 @@
* Ensures that calls to enable neighbor discovery offload will return a success
* status code.
*/
-TEST_F(WifiStaIfaceHidlTest, EnableNDOffload) {
- if (!isCapabilitySupported(
- IWifiStaIface::StaIfaceCapabilityMask::ND_OFFLOAD)) {
+TEST_P(WifiStaIfaceHidlTest, EnableNDOffload) {
+ if (!isCapabilitySupported(
+ IWifiStaIface::StaIfaceCapabilityMask::ND_OFFLOAD)) {
// No-op if nd offload is not supported.
return;
}
@@ -257,7 +260,7 @@
* Ensures that calls to set scanning MAC OUI will return a success status
* code.
*/
-TEST_F(WifiStaIfaceHidlTest, SetScanningMacOui) {
+TEST_P(WifiStaIfaceHidlTest, SetScanningMacOui) {
if (!isCapabilitySupported(
IWifiStaIface::StaIfaceCapabilityMask::SCAN_RAND)) {
// No-op if SetScanningMacOui is not supported.
@@ -274,9 +277,9 @@
* Ensures that calls to start packet fate monitoring and retrieve TX/RX
* packets will return a success status code.
*/
-TEST_F(WifiStaIfaceHidlTest, PacketFateMonitoring) {
- if (!isCapabilitySupported(
- IWifiStaIface::StaIfaceCapabilityMask::DEBUG_PACKET_FATE)) {
+TEST_P(WifiStaIfaceHidlTest, PacketFateMonitoring) {
+ if (!isCapabilitySupported(
+ IWifiStaIface::StaIfaceCapabilityMask::DEBUG_PACKET_FATE)) {
// No-op if packet fate monitor is not supported.
return;
}
@@ -291,3 +294,9 @@
EXPECT_EQ(WifiStatusCode::SUCCESS,
HIDL_INVOKE(wifi_sta_iface_, getDebugRxPacketFates).first.code);
}
+
+INSTANTIATE_TEST_SUITE_P(
+ PerInstance, WifiStaIfaceHidlTest,
+ testing::ValuesIn(
+ android::hardware::getAllHalInstanceNames(IWifi::descriptor)),
+ android::hardware::PrintInstanceNameToString);
\ No newline at end of file
diff --git a/wifi/1.1/vts/functional/Android.bp b/wifi/1.1/vts/functional/Android.bp
index 6662314..775031e 100644
--- a/wifi/1.1/vts/functional/Android.bp
+++ b/wifi/1.1/vts/functional/Android.bp
@@ -26,6 +26,7 @@
"android.hardware.wifi@1.1",
"android.hardware.wifi@1.2",
"android.hardware.wifi@1.3",
+ "libwifi-system-iface"
],
- test_suites: ["general-tests"],
+ test_suites: ["general-tests", "vts-core"],
}
diff --git a/wifi/1.1/vts/functional/VtsHalWifiV1_1TargetTest.cpp b/wifi/1.1/vts/functional/VtsHalWifiV1_1TargetTest.cpp
index 673fed3..4b62b15 100644
--- a/wifi/1.1/vts/functional/VtsHalWifiV1_1TargetTest.cpp
+++ b/wifi/1.1/vts/functional/VtsHalWifiV1_1TargetTest.cpp
@@ -14,34 +14,8 @@
* limitations under the License.
*/
-#include <android-base/logging.h>
-#include <android/hardware/wifi/1.1/IWifi.h>
+#include <VtsHalHidlTargetTestEnvBase.h>
-#include "wifi_hidl_test_utils.h"
-
-class WifiHidlEnvironment_1_1 : public WifiHidlEnvironment {
- public:
- // get the test environment singleton
- static WifiHidlEnvironment_1_1* Instance() {
- static WifiHidlEnvironment_1_1* instance = new WifiHidlEnvironment_1_1;
- return instance;
- }
-
- virtual void registerTestServices() override {
- registerTestService<android::hardware::wifi::V1_1::IWifi>();
- }
-
- private:
- WifiHidlEnvironment_1_1() {}
-};
-
-WifiHidlEnvironment* gEnv = WifiHidlEnvironment_1_1::Instance();
-
-int main(int argc, char** argv) {
- ::testing::AddGlobalTestEnvironment(gEnv);
- ::testing::InitGoogleTest(&argc, argv);
- gEnv->init(&argc, argv);
- int status = RUN_ALL_TESTS();
- LOG(INFO) << "Test result = " << status;
- return status;
-}
+// TODO(b/143892896): Remove this file after wifi_hidl_test_utils.cpp is
+// updated.
+::testing::VtsHalHidlTargetTestEnvBase* gEnv = nullptr;
diff --git a/wifi/1.1/vts/functional/wifi_chip_hidl_test.cpp b/wifi/1.1/vts/functional/wifi_chip_hidl_test.cpp
index 6323547..08de240 100644
--- a/wifi/1.1/vts/functional/wifi_chip_hidl_test.cpp
+++ b/wifi/1.1/vts/functional/wifi_chip_hidl_test.cpp
@@ -19,8 +19,9 @@
#include <android/hardware/wifi/1.1/IWifi.h>
#include <android/hardware/wifi/1.1/IWifiChip.h>
#include <android/hardware/wifi/1.3/IWifiChip.h>
-
-#include <VtsHalHidlTargetTestBase.h>
+#include <gtest/gtest.h>
+#include <hidl/GtestPrinter.h>
+#include <hidl/ServiceManagement.h>
#include "wifi_hidl_call_util.h"
#include "wifi_hidl_test_utils.h"
@@ -45,14 +46,14 @@
/**
* Fixture to use for all Wifi chip HIDL interface tests.
*/
-class WifiChipHidlTest : public ::testing::VtsHalHidlTargetTestBase {
+class WifiChipHidlTest : public ::testing::TestWithParam<std::string> {
public:
virtual void SetUp() override {
- wifi_chip_ = IWifiChip::castFrom(getWifiChip());
+ wifi_chip_ = IWifiChip::castFrom(getWifiChip(GetInstanceName()));
ASSERT_NE(nullptr, wifi_chip_.get());
}
- virtual void TearDown() override { stopWifi(); }
+ virtual void TearDown() override { stopWifi(GetInstanceName()); }
protected:
uint32_t configureChipForStaIfaceAndGetCapabilities() {
@@ -77,12 +78,15 @@
}
sp<IWifiChip> wifi_chip_;
+
+ private:
+ std::string GetInstanceName() { return GetParam(); }
};
/*
* SelectTxPowerScenario
*/
-TEST_F(WifiChipHidlTest, SelectTxPowerScenario) {
+TEST_P(WifiChipHidlTest, SelectTxPowerScenario) {
uint32_t caps = configureChipForStaIfaceAndGetCapabilities();
const auto& status =
HIDL_INVOKE(wifi_chip_, selectTxPowerScenario, kFakePowerScenario);
@@ -96,7 +100,7 @@
/*
* ResetTxPowerScenario
*/
-TEST_F(WifiChipHidlTest, ResetTxPowerScenario) {
+TEST_P(WifiChipHidlTest, ResetTxPowerScenario) {
uint32_t caps = configureChipForStaIfaceAndGetCapabilities();
const auto& status =
HIDL_INVOKE(wifi_chip_, resetTxPowerScenario);
@@ -106,3 +110,9 @@
EXPECT_EQ(WifiStatusCode::ERROR_NOT_SUPPORTED, status.code);
}
}
+
+INSTANTIATE_TEST_SUITE_P(
+ PerInstance, WifiChipHidlTest,
+ testing::ValuesIn(
+ android::hardware::getAllHalInstanceNames(IWifi::descriptor)),
+ android::hardware::PrintInstanceNameToString);
\ No newline at end of file
diff --git a/wifi/1.2/vts/functional/Android.bp b/wifi/1.2/vts/functional/Android.bp
index b2956ce..f43e49e 100644
--- a/wifi/1.2/vts/functional/Android.bp
+++ b/wifi/1.2/vts/functional/Android.bp
@@ -28,8 +28,10 @@
"android.hardware.wifi@1.1",
"android.hardware.wifi@1.2",
"android.hardware.wifi@1.3",
+ "libwifi-system-iface"
],
- test_suites: ["general-tests"],
+ disable_framework: true,
+ test_suites: ["general-tests", "vts-core"],
}
cc_test {
@@ -44,6 +46,8 @@
"android.hardware.wifi@1.0",
"android.hardware.wifi@1.1",
"android.hardware.wifi@1.2",
+ "libwifi-system-iface"
],
- test_suites: ["general-tests"],
+ disable_framework: true,
+ test_suites: ["general-tests", "vts-core"],
}
diff --git a/wifi/1.2/vts/functional/VtsHalWifiV1_2TargetTest.cpp b/wifi/1.2/vts/functional/VtsHalWifiV1_2TargetTest.cpp
index c765cdc..52c7a4a 100644
--- a/wifi/1.2/vts/functional/VtsHalWifiV1_2TargetTest.cpp
+++ b/wifi/1.2/vts/functional/VtsHalWifiV1_2TargetTest.cpp
@@ -14,35 +14,8 @@
* limitations under the License.
*/
-#include <android-base/logging.h>
-#include <android/hardware/wifi/1.2/IWifi.h>
+#include <VtsHalHidlTargetTestEnvBase.h>
-#include "wifi_hidl_test_utils.h"
-
-using ::android::hardware::wifi::V1_2::IWifi;
-
-// Test environment for Wifi HIDL HAL.
-class WifiHidlEnvironment_1_2 : public WifiHidlEnvironment {
- public:
- // get the test environment singleton
- static WifiHidlEnvironment_1_2* Instance() {
- static WifiHidlEnvironment_1_2* instance = new WifiHidlEnvironment_1_2;
- return instance;
- }
-
- virtual void registerTestServices() override { registerTestService<IWifi>(); }
-
- private:
- WifiHidlEnvironment_1_2() {}
-};
-
-WifiHidlEnvironment_1_2* gEnv = WifiHidlEnvironment_1_2::Instance();
-
-int main(int argc, char** argv) {
- ::testing::AddGlobalTestEnvironment(gEnv);
- ::testing::InitGoogleTest(&argc, argv);
- gEnv->init(&argc, argv);
- int status = RUN_ALL_TESTS();
- LOG(INFO) << "Test result = " << status;
- return status;
-}
+// TODO(b/143892896): Remove this file after wifi_hidl_test_utils.cpp is
+// updated.
+::testing::VtsHalHidlTargetTestEnvBase* gEnv = nullptr;
\ No newline at end of file
diff --git a/wifi/1.2/vts/functional/wifi_chip_hidl_test.cpp b/wifi/1.2/vts/functional/wifi_chip_hidl_test.cpp
index 9d567fe..47faec8 100644
--- a/wifi/1.2/vts/functional/wifi_chip_hidl_test.cpp
+++ b/wifi/1.2/vts/functional/wifi_chip_hidl_test.cpp
@@ -16,12 +16,14 @@
#include <android-base/logging.h>
+#include <android/hardware/wifi/1.2/IWifi.h>
#include <android/hardware/wifi/1.2/IWifiChip.h>
#include <android/hardware/wifi/1.2/IWifiChipEventCallback.h>
#include <android/hardware/wifi/1.3/IWifiChip.h>
-
+#include <gtest/gtest.h>
+#include <hidl/GtestPrinter.h>
+#include <hidl/ServiceManagement.h>
#include <VtsHalHidlTargetCallbackBase.h>
-#include <VtsHalHidlTargetTestBase.h>
#include "wifi_hidl_call_util.h"
#include "wifi_hidl_test_utils.h"
@@ -50,14 +52,14 @@
/**
* Fixture to use for all Wifi chip HIDL interface tests.
*/
-class WifiChipHidlTest : public ::testing::VtsHalHidlTargetTestBase {
+class WifiChipHidlTest : public ::testing::TestWithParam<std::string> {
public:
virtual void SetUp() override {
- wifi_chip_ = IWifiChip::castFrom(getWifiChip());
+ wifi_chip_ = IWifiChip::castFrom(getWifiChip(GetInstanceName()));
ASSERT_NE(nullptr, wifi_chip_.get());
}
- virtual void TearDown() override { stopWifi(); }
+ virtual void TearDown() override { stopWifi(GetInstanceName()); }
// A simple test implementation of WifiChipEventCallback.
class WifiChipEventCallback
@@ -123,6 +125,9 @@
}
sp<IWifiChip> wifi_chip_;
+
+ private:
+ std::string GetInstanceName() { return GetParam(); }
};
/*
@@ -130,7 +135,7 @@
* This test case tests the selectTxPowerScenario_1_2() API with SAR scenarios
* newly defined in 1.2
*/
-TEST_F(WifiChipHidlTest, SelectTxPowerScenario_1_2_body) {
+TEST_P(WifiChipHidlTest, SelectTxPowerScenario_1_2_body) {
uint32_t caps = configureChipForStaIfaceAndGetCapabilities();
const auto& status =
HIDL_INVOKE(wifi_chip_, selectTxPowerScenario_1_2, kPowerScenarioBody);
@@ -147,7 +152,7 @@
* This test case tests the selectTxPowerScenario_1_2() API with previously
* defined SAR scenarios
*/
-TEST_F(WifiChipHidlTest, SelectTxPowerScenario_1_2_voiceCall) {
+TEST_P(WifiChipHidlTest, SelectTxPowerScenario_1_2_voiceCall) {
uint32_t caps = configureChipForStaIfaceAndGetCapabilities();
const auto& status =
HIDL_INVOKE(wifi_chip_, selectTxPowerScenario_1_2, kPowerScenarioVoiceCall);
@@ -167,9 +172,15 @@
* since event is triggered internally in the HAL implementation, and can not be
* triggered from the test case
*/
-TEST_F(WifiChipHidlTest, registerEventCallback_1_2) {
+TEST_P(WifiChipHidlTest, registerEventCallback_1_2) {
sp<WifiChipEventCallback> wifiChipEventCallback = new WifiChipEventCallback();
const auto& status =
HIDL_INVOKE(wifi_chip_, registerEventCallback_1_2, wifiChipEventCallback);
EXPECT_EQ(WifiStatusCode::SUCCESS, status.code);
}
+
+INSTANTIATE_TEST_SUITE_P(
+ PerInstance, WifiChipHidlTest,
+ testing::ValuesIn(android::hardware::getAllHalInstanceNames(
+ ::android::hardware::wifi::V1_2::IWifi::descriptor)),
+ android::hardware::PrintInstanceNameToString);
\ No newline at end of file
diff --git a/wifi/1.2/vts/functional/wifi_nan_iface_hidl_test.cpp b/wifi/1.2/vts/functional/wifi_nan_iface_hidl_test.cpp
index 4dbc82b..f3f76e1 100644
--- a/wifi/1.2/vts/functional/wifi_nan_iface_hidl_test.cpp
+++ b/wifi/1.2/vts/functional/wifi_nan_iface_hidl_test.cpp
@@ -16,10 +16,12 @@
#include <android-base/logging.h>
+#include <android/hardware/wifi/1.2/IWifi.h>
#include <android/hardware/wifi/1.2/IWifiNanIface.h>
#include <android/hardware/wifi/1.2/IWifiNanIfaceEventCallback.h>
-
-#include <VtsHalHidlTargetTestBase.h>
+#include <gtest/gtest.h>
+#include <hidl/GtestPrinter.h>
+#include <hidl/ServiceManagement.h>
#include <chrono>
#include <condition_variable>
#include <mutex>
@@ -36,19 +38,19 @@
#define TIMEOUT_PERIOD 10
-android::sp<android::hardware::wifi::V1_2::IWifiNanIface>
-getWifiNanIface_1_2() {
+android::sp<android::hardware::wifi::V1_2::IWifiNanIface> getWifiNanIface_1_2(
+ const std::string& instance_name) {
return android::hardware::wifi::V1_2::IWifiNanIface::castFrom(
- getWifiNanIface());
+ getWifiNanIface(instance_name));
}
/**
* Fixture to use for all NAN Iface HIDL interface tests.
*/
-class WifiNanIfaceHidlTest : public ::testing::VtsHalHidlTargetTestBase {
+class WifiNanIfaceHidlTest : public ::testing::TestWithParam<std::string> {
public:
virtual void SetUp() override {
- iwifiNanIface = getWifiNanIface_1_2();
+ iwifiNanIface = getWifiNanIface_1_2(GetInstanceName());
ASSERT_NE(nullptr, iwifiNanIface.get());
ASSERT_EQ(WifiStatusCode::SUCCESS,
HIDL_INVOKE(iwifiNanIface, registerEventCallback_1_2,
@@ -56,7 +58,7 @@
.code);
}
- virtual void TearDown() override { stopWifi(); }
+ virtual void TearDown() override { stopWifi(GetInstanceName()); }
/* Used as a mechanism to inform the test about data/event callback */
inline void notify() {
@@ -458,6 +460,8 @@
::android::hardware::wifi::V1_2::NanDataPathConfirmInd
nanDataPathConfirmInd_1_2;
NanDataPathScheduleUpdateInd nanDataPathScheduleUpdateInd;
+
+ std::string GetInstanceName() { return GetParam(); }
};
/*
@@ -465,15 +469,14 @@
* Ensures that an instance of the IWifiNanIface proxy object is
* successfully created.
*/
-TEST(WifiNanIfaceHidlTestNoFixture, Create) {
- ASSERT_NE(nullptr, getWifiNanIface_1_2().get());
- stopWifi();
+TEST_P(WifiNanIfaceHidlTest, Create) {
+ // The creation of a proxy object is tested as part of SetUp method.
}
/*
* enableRequest_1_2InvalidArgs: validate that fails with invalid arguments
*/
-TEST_F(WifiNanIfaceHidlTest, enableRequest_1_2InvalidArgs) {
+TEST_P(WifiNanIfaceHidlTest, enableRequest_1_2InvalidArgs) {
uint16_t inputCmdId = 10;
callbackType = INVALID;
NanEnableRequest nanEnableRequest = {};
@@ -493,7 +496,7 @@
* enableRequest_1_2ShimInvalidArgs: validate that fails with invalid arguments
* to the shim
*/
-TEST_F(WifiNanIfaceHidlTest, enableRequest_1_2ShimInvalidArgs) {
+TEST_P(WifiNanIfaceHidlTest, enableRequest_1_2ShimInvalidArgs) {
uint16_t inputCmdId = 10;
NanEnableRequest nanEnableRequest = {};
nanEnableRequest.configParams.numberOfPublishServiceIdsInBeacon =
@@ -508,7 +511,7 @@
/*
* configRequest_1_2InvalidArgs: validate that fails with invalid arguments
*/
-TEST_F(WifiNanIfaceHidlTest, configRequest_1_2InvalidArgs) {
+TEST_P(WifiNanIfaceHidlTest, configRequest_1_2InvalidArgs) {
uint16_t inputCmdId = 10;
callbackType = INVALID;
NanConfigRequest nanConfigRequest = {};
@@ -528,7 +531,7 @@
* configRequest_1_2ShimInvalidArgs: validate that fails with invalid arguments
* to the shim
*/
-TEST_F(WifiNanIfaceHidlTest, configRequest_1_2ShimInvalidArgs) {
+TEST_P(WifiNanIfaceHidlTest, configRequest_1_2ShimInvalidArgs) {
uint16_t inputCmdId = 10;
NanConfigRequest nanConfigRequest = {};
nanConfigRequest.numberOfPublishServiceIdsInBeacon = 128; // must be <= 127
@@ -538,3 +541,9 @@
nanConfigRequest, nanConfigRequestSupp)
.code);
}
+
+INSTANTIATE_TEST_SUITE_P(
+ PerInstance, WifiNanIfaceHidlTest,
+ testing::ValuesIn(android::hardware::getAllHalInstanceNames(
+ ::android::hardware::wifi::V1_2::IWifi::descriptor)),
+ android::hardware::PrintInstanceNameToString);
\ No newline at end of file
diff --git a/wifi/1.2/vts/functional/wifi_sta_iface_hidl_test.cpp b/wifi/1.2/vts/functional/wifi_sta_iface_hidl_test.cpp
index 92f5d14..1b907b2 100644
--- a/wifi/1.2/vts/functional/wifi_sta_iface_hidl_test.cpp
+++ b/wifi/1.2/vts/functional/wifi_sta_iface_hidl_test.cpp
@@ -19,9 +19,11 @@
#include <android-base/logging.h>
+#include <android/hardware/wifi/1.2/IWifi.h>
#include <android/hardware/wifi/1.2/IWifiStaIface.h>
-
-#include <VtsHalHidlTargetTestBase.h>
+#include <gtest/gtest.h>
+#include <hidl/GtestPrinter.h>
+#include <hidl/ServiceManagement.h>
#include "wifi_hidl_call_util.h"
#include "wifi_hidl_test_utils.h"
@@ -34,14 +36,15 @@
/**
* Fixture to use for all STA Iface HIDL interface tests.
*/
-class WifiStaIfaceHidlTest : public ::testing::VtsHalHidlTargetTestBase {
+class WifiStaIfaceHidlTest : public ::testing::TestWithParam<std::string> {
public:
virtual void SetUp() override {
- wifi_sta_iface_ = IWifiStaIface::castFrom(getWifiStaIface());
+ wifi_sta_iface_ =
+ IWifiStaIface::castFrom(getWifiStaIface(GetInstanceName()));
ASSERT_NE(nullptr, wifi_sta_iface_.get());
}
- virtual void TearDown() override { stopWifi(); }
+ virtual void TearDown() override { stopWifi(GetInstanceName()); }
protected:
bool isCapabilitySupported(IWifiStaIface::StaIfaceCapabilityMask cap_mask) {
@@ -52,6 +55,9 @@
}
sp<IWifiStaIface> wifi_sta_iface_;
+
+ private:
+ std::string GetInstanceName() { return GetParam(); }
};
/*
@@ -59,7 +65,7 @@
* Ensures that calls to set MAC address will return a success status
* code.
*/
-TEST_F(WifiStaIfaceHidlTest, SetMacAddress) {
+TEST_P(WifiStaIfaceHidlTest, SetMacAddress) {
const android::hardware::hidl_array<uint8_t, 6> kMac{
std::array<uint8_t, 6>{{0x12, 0x22, 0x33, 0x52, 0x10, 0x41}}};
EXPECT_EQ(WifiStatusCode::SUCCESS,
@@ -76,7 +82,7 @@
* TODO: We can't execute APF opcodes from this test because there's no way
* to loop test packets through the wifi firmware (b/73804303#comment29).
*/
-TEST_F(WifiStaIfaceHidlTest, DISABLED_ReadApfPacketFilterData) {
+TEST_P(WifiStaIfaceHidlTest, DISABLED_ReadApfPacketFilterData) {
if (!isCapabilitySupported(IWifiStaIface::StaIfaceCapabilityMask::APF)) {
// Disable test if APF packet filer is not supported.
LOG(WARNING) << "TEST SKIPPED: APF packet filtering not supported";
@@ -107,3 +113,9 @@
EXPECT_EQ(status_and_data.second, data);
}
+
+INSTANTIATE_TEST_SUITE_P(
+ PerInstance, WifiStaIfaceHidlTest,
+ testing::ValuesIn(android::hardware::getAllHalInstanceNames(
+ ::android::hardware::wifi::V1_2::IWifi::descriptor)),
+ android::hardware::PrintInstanceNameToString);
\ No newline at end of file
diff --git a/wifi/1.3/vts/functional/Android.bp b/wifi/1.3/vts/functional/Android.bp
index 53c8f08..fe9c791 100644
--- a/wifi/1.3/vts/functional/Android.bp
+++ b/wifi/1.3/vts/functional/Android.bp
@@ -28,5 +28,8 @@
"android.hardware.wifi@1.1",
"android.hardware.wifi@1.2",
"android.hardware.wifi@1.3",
+ "libwifi-system-iface"
],
+ disable_framework: true,
+ test_suites: ["general-tests", "vts-core"],
}
diff --git a/wifi/1.3/vts/functional/VtsHalWifiV1_3TargetTest.cpp b/wifi/1.3/vts/functional/VtsHalWifiV1_3TargetTest.cpp
index faf426e..52c7a4a 100644
--- a/wifi/1.3/vts/functional/VtsHalWifiV1_3TargetTest.cpp
+++ b/wifi/1.3/vts/functional/VtsHalWifiV1_3TargetTest.cpp
@@ -14,37 +14,8 @@
* limitations under the License.
*/
-#include <android-base/logging.h>
-#include <android/hardware/wifi/1.3/IWifi.h>
+#include <VtsHalHidlTargetTestEnvBase.h>
-#include "wifi_hidl_test_utils.h"
-
-using ::android::hardware::wifi::V1_3::IWifi;
-
-// Test environment for Wifi HIDL HAL.
-class WifiHidlEnvironment_1_3 : public WifiHidlEnvironment {
- public:
- // get the test environment singleton
- static WifiHidlEnvironment_1_3* Instance() {
- static WifiHidlEnvironment_1_3* instance = new WifiHidlEnvironment_1_3;
- return instance;
- }
-
- virtual void registerTestServices() override {
- registerTestService<android::hardware::wifi::V1_3::IWifi>();
- }
-
- private:
- WifiHidlEnvironment_1_3() {}
-};
-
-WifiHidlEnvironment_1_3* gEnv = WifiHidlEnvironment_1_3::Instance();
-
-int main(int argc, char** argv) {
- ::testing::AddGlobalTestEnvironment(gEnv);
- ::testing::InitGoogleTest(&argc, argv);
- gEnv->init(&argc, argv);
- int status = RUN_ALL_TESTS();
- LOG(INFO) << "Test result = " << status;
- return status;
-}
+// TODO(b/143892896): Remove this file after wifi_hidl_test_utils.cpp is
+// updated.
+::testing::VtsHalHidlTargetTestEnvBase* gEnv = nullptr;
\ No newline at end of file
diff --git a/wifi/1.3/vts/functional/wifi_chip_hidl_test.cpp b/wifi/1.3/vts/functional/wifi_chip_hidl_test.cpp
index d980fcb..db93967 100644
--- a/wifi/1.3/vts/functional/wifi_chip_hidl_test.cpp
+++ b/wifi/1.3/vts/functional/wifi_chip_hidl_test.cpp
@@ -16,9 +16,11 @@
#include <android-base/logging.h>
+#include <android/hardware/wifi/1.3/IWifi.h>
#include <android/hardware/wifi/1.3/IWifiChip.h>
-
-#include <VtsHalHidlTargetTestBase.h>
+#include <gtest/gtest.h>
+#include <hidl/GtestPrinter.h>
+#include <hidl/ServiceManagement.h>
#include "wifi_hidl_call_util.h"
#include "wifi_hidl_test_utils.h"
@@ -39,14 +41,14 @@
/**
* Fixture to use for all Wifi chip HIDL interface tests.
*/
-class WifiChipHidlTest : public ::testing::VtsHalHidlTargetTestBase {
+class WifiChipHidlTest : public ::testing::TestWithParam<std::string> {
public:
virtual void SetUp() override {
- wifi_chip_ = IWifiChip::castFrom(getWifiChip());
+ wifi_chip_ = IWifiChip::castFrom(getWifiChip(GetInstanceName()));
ASSERT_NE(nullptr, wifi_chip_.get());
}
- virtual void TearDown() override { stopWifi(); }
+ virtual void TearDown() override { stopWifi(GetInstanceName()); }
protected:
// Helper function to configure the Chip in one of the supported modes.
@@ -70,6 +72,9 @@
}
sp<IWifiChip> wifi_chip_;
+
+ private:
+ std::string GetInstanceName() { return GetParam(); }
};
/*
@@ -77,7 +82,7 @@
* This test case tests the setLatencyMode() API with
* Latency mode NORMAL
*/
-TEST_F(WifiChipHidlTest, SetLatencyMode_normal) {
+TEST_P(WifiChipHidlTest, SetLatencyMode_normal) {
uint32_t caps = configureChipForStaIfaceAndGetCapabilities();
const auto& status =
HIDL_INVOKE(wifi_chip_, setLatencyMode, kLatencyModeNormal);
@@ -92,7 +97,7 @@
* SetLatencyMode_low
* This test case tests the setLatencyMode() API with Latency mode LOW
*/
-TEST_F(WifiChipHidlTest, SetLatencyMode_low) {
+TEST_P(WifiChipHidlTest, SetLatencyMode_low) {
uint32_t caps = configureChipForStaIfaceAndGetCapabilities();
const auto& status =
HIDL_INVOKE(wifi_chip_, setLatencyMode, kLatencyModeLow);
@@ -106,7 +111,7 @@
/*
* GetCapabilities_1_3
*/
-TEST_F(WifiChipHidlTest, GetCapabilities_1_3) {
+TEST_P(WifiChipHidlTest, GetCapabilities_1_3) {
configureChipForIfaceType(IfaceType::STA, true);
const auto& status_and_caps = HIDL_INVOKE(wifi_chip_, getCapabilities_1_3);
if (status_and_caps.first.code != WifiStatusCode::SUCCESS) {
@@ -116,3 +121,9 @@
}
EXPECT_NE(0u, status_and_caps.second);
}
+
+INSTANTIATE_TEST_SUITE_P(
+ PerInstance, WifiChipHidlTest,
+ testing::ValuesIn(android::hardware::getAllHalInstanceNames(
+ ::android::hardware::wifi::V1_3::IWifi::descriptor)),
+ android::hardware::PrintInstanceNameToString);
\ No newline at end of file
diff --git a/wifi/1.3/vts/functional/wifi_sta_iface_hidl_test.cpp b/wifi/1.3/vts/functional/wifi_sta_iface_hidl_test.cpp
index d382f30..c5acc3c 100644
--- a/wifi/1.3/vts/functional/wifi_sta_iface_hidl_test.cpp
+++ b/wifi/1.3/vts/functional/wifi_sta_iface_hidl_test.cpp
@@ -19,9 +19,11 @@
#include <android-base/logging.h>
+#include <android/hardware/wifi/1.3/IWifi.h>
#include <android/hardware/wifi/1.3/IWifiStaIface.h>
-
-#include <VtsHalHidlTargetTestBase.h>
+#include <gtest/gtest.h>
+#include <hidl/GtestPrinter.h>
+#include <hidl/ServiceManagement.h>
#include "wifi_hidl_call_util.h"
#include "wifi_hidl_test_utils.h"
@@ -35,14 +37,15 @@
/**
* Fixture to use for all STA Iface HIDL interface tests.
*/
-class WifiStaIfaceHidlTest : public ::testing::VtsHalHidlTargetTestBase {
+class WifiStaIfaceHidlTest : public ::testing::TestWithParam<std::string> {
public:
virtual void SetUp() override {
- wifi_sta_iface_ = IWifiStaIface::castFrom(getWifiStaIface());
+ wifi_sta_iface_ =
+ IWifiStaIface::castFrom(getWifiStaIface(GetInstanceName()));
ASSERT_NE(nullptr, wifi_sta_iface_.get());
}
- virtual void TearDown() override { stopWifi(); }
+ virtual void TearDown() override { stopWifi(GetInstanceName()); }
protected:
bool isCapabilitySupported(IWifiStaIface::StaIfaceCapabilityMask cap_mask) {
@@ -53,6 +56,9 @@
}
sp<IWifiStaIface> wifi_sta_iface_;
+
+ private:
+ std::string GetInstanceName() { return GetParam(); }
};
/*
@@ -60,7 +66,7 @@
* Ensures that calls to get factory MAC address will retrieve a non-zero MAC
* and return a success status code.
*/
-TEST_F(WifiStaIfaceHidlTest, GetFactoryMacAddress) {
+TEST_P(WifiStaIfaceHidlTest, GetFactoryMacAddress) {
std::pair<WifiStatus, hidl_array<uint8_t, 6> > status_and_mac =
HIDL_INVOKE(wifi_sta_iface_, getFactoryMacAddress);
EXPECT_EQ(WifiStatusCode::SUCCESS, status_and_mac.first.code);
@@ -73,7 +79,7 @@
* Ensures that calls to get link layer stats V1_3 will retrieve a non-empty
* StaLinkLayerStats after link layer stats collection is enabled.
*/
-TEST_F(WifiStaIfaceHidlTest, GetLinkLayerStats_1_3) {
+TEST_P(WifiStaIfaceHidlTest, GetLinkLayerStats_1_3) {
if (!isCapabilitySupported(
IWifiStaIface::StaIfaceCapabilityMask::LINK_LAYER_STATS)) {
// No-op if link layer stats is not supported.
@@ -94,3 +100,9 @@
WifiStatusCode::SUCCESS,
HIDL_INVOKE(wifi_sta_iface_, disableLinkLayerStatsCollection).code);
}
+
+INSTANTIATE_TEST_SUITE_P(
+ PerInstance, WifiStaIfaceHidlTest,
+ testing::ValuesIn(android::hardware::getAllHalInstanceNames(
+ ::android::hardware::wifi::V1_3::IWifi::descriptor)),
+ android::hardware::PrintInstanceNameToString);
\ No newline at end of file
diff --git a/wifi/1.4/Android.bp b/wifi/1.4/Android.bp
index e197859..5750e42 100644
--- a/wifi/1.4/Android.bp
+++ b/wifi/1.4/Android.bp
@@ -13,6 +13,7 @@
"IWifiChip.hal",
"IWifiRttController.hal",
"IWifiRttControllerEventCallback.hal",
+ "IWifiStaIface.hal",
],
interfaces: [
"android.hardware.wifi@1.0",
diff --git a/wifi/1.4/IWifiStaIface.hal b/wifi/1.4/IWifiStaIface.hal
new file mode 100644
index 0000000..fb658cd
--- /dev/null
+++ b/wifi/1.4/IWifiStaIface.hal
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.wifi@1.4;
+
+import @1.0::WifiStatus;
+import @1.0::MacAddress;
+import @1.0::IWifiStaIface;
+import @1.3::IWifiStaIface;
+
+/**
+ * Interface used to represent a single STA iface.
+ *
+ * IWifiChip.createStaIface() may return a @1.4::IWifiStaIface when supported.
+ */
+interface IWifiStaIface extends @1.3::IWifiStaIface {
+
+ enum StaIfaceCapabilityMask : @1.0::IWifiStaIface.StaIfaceCapabilityMask {
+ STA_6G = 1 << 15
+ };
+
+ /**
+ * Get the capabilities supported by this STA iface.
+ *
+ * @return status WifiStatus of the operation.
+ * Possible status codes:
+ * |WifiStatusCode.SUCCESS|,
+ * |WifiStatusCode.ERROR_WIFI_IFACE_INVALID|,
+ * |WifiStatusCode.ERROR_NOT_AVAILABLE|,
+ * |WifiStatusCode.ERROR_NOT_SUPPORTED|,
+ * |WifiStatusCode.ERROR_UNKNOWN|
+ * @return capabilities Bitset of |StaIfaceCapabilityMask| values.
+ */
+ getCapabilities_1_4()
+ generates (WifiStatus status,
+ bitfield<StaIfaceCapabilityMask> capabilities);
+};
diff --git a/wifi/1.4/default/hidl_struct_util.cpp b/wifi/1.4/default/hidl_struct_util.cpp
index 61f311e..13a09f3 100644
--- a/wifi/1.4/default/hidl_struct_util.cpp
+++ b/wifi/1.4/default/hidl_struct_util.cpp
@@ -91,7 +91,7 @@
}
IWifiStaIface::StaIfaceCapabilityMask
-convertLegacyFeatureToHidlStaIfaceCapability(uint32_t feature) {
+convertLegacyFeatureToHidlStaIfaceCapability(uint64_t feature) {
using HidlStaIfaceCaps = IWifiStaIface::StaIfaceCapabilityMask;
switch (feature) {
case WIFI_FEATURE_GSCAN:
@@ -120,6 +120,8 @@
return HidlStaIfaceCaps::ND_OFFLOAD;
case WIFI_FEATURE_MKEEP_ALIVE:
return HidlStaIfaceCaps::KEEP_ALIVE;
+ case WIFI_FEATURE_INFRA_6G:
+ return HidlStaIfaceCaps::STA_6G;
};
CHECK(false) << "Unknown legacy feature: " << feature;
return {};
@@ -365,7 +367,7 @@
}
bool convertLegacyFeaturesToHidlStaCapabilities(
- uint32_t legacy_feature_set, uint32_t legacy_logger_feature_set,
+ uint64_t legacy_feature_set, uint32_t legacy_logger_feature_set,
uint32_t* hidl_caps) {
if (!hidl_caps) {
return false;
@@ -384,7 +386,8 @@
WIFI_FEATURE_IE_WHITELIST, WIFI_FEATURE_SCAN_RAND,
WIFI_FEATURE_INFRA_5G, WIFI_FEATURE_HOTSPOT, WIFI_FEATURE_PNO,
WIFI_FEATURE_TDLS, WIFI_FEATURE_TDLS_OFFCHANNEL,
- WIFI_FEATURE_CONFIG_NDO, WIFI_FEATURE_MKEEP_ALIVE}) {
+ WIFI_FEATURE_CONFIG_NDO, WIFI_FEATURE_MKEEP_ALIVE,
+ WIFI_FEATURE_INFRA_6G}) {
if (feature & legacy_feature_set) {
*hidl_caps |= convertLegacyFeatureToHidlStaIfaceCapability(feature);
}
diff --git a/wifi/1.4/default/hidl_struct_util.h b/wifi/1.4/default/hidl_struct_util.h
index a99c1ac..cfaa4ad 100644
--- a/wifi/1.4/default/hidl_struct_util.h
+++ b/wifi/1.4/default/hidl_struct_util.h
@@ -25,6 +25,7 @@
#include <android/hardware/wifi/1.2/types.h>
#include <android/hardware/wifi/1.3/IWifiChip.h>
#include <android/hardware/wifi/1.3/types.h>
+#include <android/hardware/wifi/1.4/IWifiStaIface.h>
#include <android/hardware/wifi/1.4/types.h>
#include "wifi_legacy_hal.h"
@@ -69,7 +70,7 @@
// STA iface conversion methods.
bool convertLegacyFeaturesToHidlStaCapabilities(
- uint32_t legacy_feature_set, uint32_t legacy_logger_feature_set,
+ uint64_t legacy_feature_set, uint32_t legacy_logger_feature_set,
uint32_t* hidl_caps);
bool convertLegacyApfCapabilitiesToHidl(
const legacy_hal::PacketFilterCapabilities& legacy_caps,
diff --git a/wifi/1.4/default/wifi_legacy_hal.cpp b/wifi/1.4/default/wifi_legacy_hal.cpp
index 8139253..ae3c447 100644
--- a/wifi/1.4/default/wifi_legacy_hal.cpp
+++ b/wifi/1.4/default/wifi_legacy_hal.cpp
@@ -479,7 +479,7 @@
std::pair<wifi_error, uint32_t> WifiLegacyHal::getSupportedFeatureSet(
const std::string& iface_name) {
feature_set set;
- static_assert(sizeof(set) == sizeof(uint32_t),
+ static_assert(sizeof(set) == sizeof(uint64_t),
"Some feature_flags can not be represented in output");
wifi_error status = global_func_table_.wifi_get_supported_feature_set(
getIfaceHandle(iface_name), &set);
diff --git a/wifi/1.4/default/wifi_sta_iface.cpp b/wifi/1.4/default/wifi_sta_iface.cpp
index 3e0127e..8e1ada1 100644
--- a/wifi/1.4/default/wifi_sta_iface.cpp
+++ b/wifi/1.4/default/wifi_sta_iface.cpp
@@ -266,6 +266,13 @@
hidl_status_cb);
}
+Return<void> WifiStaIface::getCapabilities_1_4(
+ getCapabilities_cb hidl_status_cb) {
+ return validateAndCall(this, WifiStatusCode::ERROR_WIFI_IFACE_INVALID,
+ &WifiStaIface::getCapabilitiesInternal_1_4,
+ hidl_status_cb);
+}
+
std::pair<WifiStatus, std::string> WifiStaIface::getNameInternal() {
return {createWifiStatus(WifiStatusCode::SUCCESS), ifname_};
}
@@ -283,26 +290,7 @@
}
std::pair<WifiStatus, uint32_t> WifiStaIface::getCapabilitiesInternal() {
- legacy_hal::wifi_error legacy_status;
- uint32_t legacy_feature_set;
- std::tie(legacy_status, legacy_feature_set) =
- legacy_hal_.lock()->getSupportedFeatureSet(ifname_);
- if (legacy_status != legacy_hal::WIFI_SUCCESS) {
- return {createWifiStatusFromLegacyError(legacy_status), 0};
- }
- uint32_t legacy_logger_feature_set;
- std::tie(legacy_status, legacy_logger_feature_set) =
- legacy_hal_.lock()->getLoggerSupportedFeatureSet(ifname_);
- if (legacy_status != legacy_hal::WIFI_SUCCESS) {
- // some devices don't support querying logger feature set
- legacy_logger_feature_set = 0;
- }
- uint32_t hidl_caps;
- if (!hidl_struct_util::convertLegacyFeaturesToHidlStaCapabilities(
- legacy_feature_set, legacy_logger_feature_set, &hidl_caps)) {
- return {createWifiStatus(WifiStatusCode::ERROR_UNKNOWN), 0};
- }
- return {createWifiStatus(WifiStatusCode::SUCCESS), hidl_caps};
+ return {createWifiStatus(WifiStatusCode::ERROR_NOT_SUPPORTED), 0};
}
std::pair<WifiStatus, StaApfPacketFilterCapabilities>
@@ -640,6 +628,29 @@
return {createWifiStatus(WifiStatusCode::SUCCESS), mac};
}
+std::pair<WifiStatus, uint32_t> WifiStaIface::getCapabilitiesInternal_1_4() {
+ legacy_hal::wifi_error legacy_status;
+ uint64_t legacy_feature_set;
+ std::tie(legacy_status, legacy_feature_set) =
+ legacy_hal_.lock()->getSupportedFeatureSet(ifname_);
+ if (legacy_status != legacy_hal::WIFI_SUCCESS) {
+ return {createWifiStatusFromLegacyError(legacy_status), 0};
+ }
+ uint32_t legacy_logger_feature_set;
+ std::tie(legacy_status, legacy_logger_feature_set) =
+ legacy_hal_.lock()->getLoggerSupportedFeatureSet(ifname_);
+ if (legacy_status != legacy_hal::WIFI_SUCCESS) {
+ // some devices don't support querying logger feature set
+ legacy_logger_feature_set = 0;
+ }
+ uint32_t hidl_caps;
+ if (!hidl_struct_util::convertLegacyFeaturesToHidlStaCapabilities(
+ legacy_feature_set, legacy_logger_feature_set, &hidl_caps)) {
+ return {createWifiStatus(WifiStatusCode::ERROR_UNKNOWN), 0};
+ }
+ return {createWifiStatus(WifiStatusCode::SUCCESS), hidl_caps};
+}
+
} // namespace implementation
} // namespace V1_4
} // namespace wifi
diff --git a/wifi/1.4/default/wifi_sta_iface.h b/wifi/1.4/default/wifi_sta_iface.h
index d8f7a01..ccf234f 100644
--- a/wifi/1.4/default/wifi_sta_iface.h
+++ b/wifi/1.4/default/wifi_sta_iface.h
@@ -19,7 +19,7 @@
#include <android-base/macros.h>
#include <android/hardware/wifi/1.0/IWifiStaIfaceEventCallback.h>
-#include <android/hardware/wifi/1.3/IWifiStaIface.h>
+#include <android/hardware/wifi/1.4/IWifiStaIface.h>
#include "hidl_callback_util.h"
#include "wifi_iface_util.h"
@@ -35,7 +35,7 @@
/**
* HIDL interface object used to control a STA Iface instance.
*/
-class WifiStaIface : public V1_3::IWifiStaIface {
+class WifiStaIface : public V1_4::IWifiStaIface {
public:
WifiStaIface(const std::string& ifname,
const std::weak_ptr<legacy_hal::WifiLegacyHal> legacy_hal,
@@ -111,6 +111,8 @@
setMacAddress_cb hidl_status_cb) override;
Return<void> getFactoryMacAddress(
getFactoryMacAddress_cb hidl_status_cb) override;
+ Return<void> getCapabilities_1_4(
+ getCapabilities_1_4_cb hidl_status_cb) override;
private:
// Corresponding worker functions for the HIDL methods.
@@ -159,6 +161,7 @@
WifiStatus setMacAddressInternal(const std::array<uint8_t, 6>& mac);
std::pair<WifiStatus, std::array<uint8_t, 6>>
getFactoryMacAddressInternal();
+ std::pair<WifiStatus, uint32_t> getCapabilitiesInternal_1_4();
std::string ifname_;
std::weak_ptr<legacy_hal::WifiLegacyHal> legacy_hal_;
diff --git a/wifi/1.4/vts/functional/Android.bp b/wifi/1.4/vts/functional/Android.bp
index 42c60f2..c71b319 100644
--- a/wifi/1.4/vts/functional/Android.bp
+++ b/wifi/1.4/vts/functional/Android.bp
@@ -29,5 +29,7 @@
"android.hardware.wifi@1.2",
"android.hardware.wifi@1.3",
"android.hardware.wifi@1.4",
+ "libwifi-system-iface"
],
+ test_suites: ["general-tests", "vts-core"],
}
diff --git a/wifi/1.4/vts/functional/VtsHalWifiV1_4TargetTest.cpp b/wifi/1.4/vts/functional/VtsHalWifiV1_4TargetTest.cpp
index deac0fa..7e0f3cd 100644
--- a/wifi/1.4/vts/functional/VtsHalWifiV1_4TargetTest.cpp
+++ b/wifi/1.4/vts/functional/VtsHalWifiV1_4TargetTest.cpp
@@ -14,37 +14,8 @@
* limitations under the License.
*/
-#include <android-base/logging.h>
-#include <android/hardware/wifi/1.4/IWifi.h>
+#include <VtsHalHidlTargetTestEnvBase.h>
-#include "wifi_hidl_test_utils.h"
-
-using ::android::hardware::wifi::V1_4::IWifi;
-
-// Test environment for Wifi HIDL HAL.
-class WifiHidlEnvironment_1_4 : public WifiHidlEnvironment {
- public:
- // get the test environment singleton
- static WifiHidlEnvironment_1_4* Instance() {
- static WifiHidlEnvironment_1_4* instance = new WifiHidlEnvironment_1_4;
- return instance;
- }
-
- virtual void registerTestServices() override {
- registerTestService<android::hardware::wifi::V1_4::IWifi>();
- }
-
- private:
- WifiHidlEnvironment_1_4() {}
-};
-
-WifiHidlEnvironment_1_4* gEnv = WifiHidlEnvironment_1_4::Instance();
-
-int main(int argc, char** argv) {
- ::testing::AddGlobalTestEnvironment(gEnv);
- ::testing::InitGoogleTest(&argc, argv);
- gEnv->init(&argc, argv);
- int status = RUN_ALL_TESTS();
- LOG(INFO) << "Test result = " << status;
- return status;
-}
+// TODO(b/143892896): Remove this file after wifi_hidl_test_utils.cpp is
+// updated.
+::testing::VtsHalHidlTargetTestEnvBase* gEnv = nullptr;
\ No newline at end of file
diff --git a/wifi/1.4/vts/functional/wifi_ap_iface_hidl_test.cpp b/wifi/1.4/vts/functional/wifi_ap_iface_hidl_test.cpp
index 68e9bbb..017ecb6 100644
--- a/wifi/1.4/vts/functional/wifi_ap_iface_hidl_test.cpp
+++ b/wifi/1.4/vts/functional/wifi_ap_iface_hidl_test.cpp
@@ -14,9 +14,11 @@
* limitations under the License.
*/
+#include <android/hardware/wifi/1.4/IWifi.h>
#include <android/hardware/wifi/1.4/IWifiApIface.h>
-
-#include <VtsHalHidlTargetTestBase.h>
+#include <gtest/gtest.h>
+#include <hidl/GtestPrinter.h>
+#include <hidl/ServiceManagement.h>
#include "wifi_hidl_call_util.h"
#include "wifi_hidl_test_utils.h"
@@ -25,6 +27,7 @@
using ::android::hardware::hidl_array;
using ::android::hardware::wifi::V1_0::WifiStatus;
using ::android::hardware::wifi::V1_0::WifiStatusCode;
+using ::android::hardware::wifi::V1_4::IWifi;
using ::android::hardware::wifi::V1_4::IWifiApIface;
extern WifiHidlEnvironment* gEnv;
@@ -32,19 +35,21 @@
/**
* Fixture to use for all STA Iface HIDL interface tests.
*/
-class WifiApIfaceHidlTest : public ::testing::VtsHalHidlTargetTestBase {
+class WifiApIfaceHidlTest : public ::testing::TestWithParam<std::string> {
public:
virtual void SetUp() override {
- wifi_ap_iface_ = IWifiApIface::castFrom(getWifiApIface());
+ wifi_ap_iface_ =
+ IWifiApIface::castFrom(getWifiApIface(GetInstanceName()));
ASSERT_NE(nullptr, wifi_ap_iface_.get());
}
- virtual void TearDown() override {
- stopWifi();
- }
+ virtual void TearDown() override { stopWifi(GetInstanceName()); }
protected:
sp<IWifiApIface> wifi_ap_iface_;
+
+ private:
+ std::string GetInstanceName() { return GetParam(); }
};
/*
@@ -52,7 +57,7 @@
* Ensures that calls to set MAC address will return a success status
* code.
*/
-TEST_F(WifiApIfaceHidlTest, SetMacAddress) {
+TEST_P(WifiApIfaceHidlTest, SetMacAddress) {
const hidl_array<uint8_t, 6> kMac{{0x12, 0x22, 0x33, 0x52, 0x10, 0x41}};
EXPECT_EQ(WifiStatusCode::SUCCESS,
HIDL_INVOKE(wifi_ap_iface_, setMacAddress, kMac).code);
@@ -63,10 +68,16 @@
* Ensures that calls to get factory MAC address will retrieve a non-zero MAC
* and return a success status code.
*/
-TEST_F(WifiApIfaceHidlTest, GetFactoryMacAddress) {
+TEST_P(WifiApIfaceHidlTest, GetFactoryMacAddress) {
std::pair<WifiStatus, hidl_array<uint8_t, 6> > status_and_mac =
HIDL_INVOKE(wifi_ap_iface_, getFactoryMacAddress);
EXPECT_EQ(WifiStatusCode::SUCCESS, status_and_mac.first.code);
hidl_array<uint8_t, 6> all_zero{};
EXPECT_NE(all_zero, status_and_mac.second);
}
+
+INSTANTIATE_TEST_SUITE_P(
+ PerInstance, WifiApIfaceHidlTest,
+ testing::ValuesIn(
+ android::hardware::getAllHalInstanceNames(IWifi::descriptor)),
+ android::hardware::PrintInstanceNameToString);
\ No newline at end of file
diff --git a/wifi/1.4/vts/functional/wifi_sta_iface_hidl_test.cpp b/wifi/1.4/vts/functional/wifi_sta_iface_hidl_test.cpp
new file mode 100644
index 0000000..ec4b2c9
--- /dev/null
+++ b/wifi/1.4/vts/functional/wifi_sta_iface_hidl_test.cpp
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Staache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <android-base/logging.h>
+
+#include <android/hardware/wifi/1.4/IWifiStaIface.h>
+
+#include <VtsHalHidlTargetTestBase.h>
+
+#include "wifi_hidl_call_util.h"
+#include "wifi_hidl_test_utils.h"
+
+using ::android::sp;
+using ::android::hardware::hidl_array;
+using ::android::hardware::wifi::V1_0::WifiStatus;
+using ::android::hardware::wifi::V1_0::WifiStatusCode;
+using ::android::hardware::wifi::V1_4::IWifiStaIface;
+
+/**
+ * Fixture to use for all STA Iface HIDL interface tests.
+ */
+class WifiStaIfaceHidlTest : public ::testing::VtsHalHidlTargetTestBase {
+ public:
+ virtual void SetUp() override {
+ wifi_sta_iface_ = IWifiStaIface::castFrom(getWifiStaIface());
+ ASSERT_NE(nullptr, wifi_sta_iface_.get());
+ }
+
+ virtual void TearDown() override { stopWifi(); }
+
+ protected:
+ sp<IWifiStaIface> wifi_sta_iface_;
+};
+
+/*
+ * GetCapabilities_1_4
+ */
+TEST_F(WifiStaIfaceHidlTest, GetCapabilities_1_4) {
+ configureChipForIfaceType(IfaceType::STA, true);
+
+ const auto& status_and_caps =
+ HIDL_INVOKE(wifi_sta_iface_, getCapabilities_1_4);
+ if (status_and_caps.first.code != WifiStatusCode::SUCCESS) {
+ EXPECT_EQ(WifiStatusCode::ERROR_NOT_SUPPORTED,
+ status_and_caps.first.code);
+ return;
+ }
+ EXPECT_NE(0u, status_and_caps.second);
+}
diff --git a/wifi/supplicant/1.0/vts/functional/Android.bp b/wifi/supplicant/1.0/vts/functional/Android.bp
index ba79738..15525bb 100644
--- a/wifi/supplicant/1.0/vts/functional/Android.bp
+++ b/wifi/supplicant/1.0/vts/functional/Android.bp
@@ -51,7 +51,7 @@
"libwifi-system",
"libwifi-system-iface",
],
- test_suites: ["general-tests"],
+ test_suites: ["general-tests", "vts-core"],
}
cc_test {
@@ -71,4 +71,5 @@
"libwifi-system",
"libwifi-system-iface",
],
+ test_suites: ["general-tests", "vts-core"],
}
diff --git a/wifi/supplicant/1.0/vts/functional/VtsHalWifiSupplicantV1_0TargetTest.cpp b/wifi/supplicant/1.0/vts/functional/VtsHalWifiSupplicantV1_0TargetTest.cpp
index 6ca0546..f582cc1 100644
--- a/wifi/supplicant/1.0/vts/functional/VtsHalWifiSupplicantV1_0TargetTest.cpp
+++ b/wifi/supplicant/1.0/vts/functional/VtsHalWifiSupplicantV1_0TargetTest.cpp
@@ -14,40 +14,8 @@
* limitations under the License.
*/
-#include <android-base/logging.h>
-
#include "supplicant_hidl_test_utils.h"
-#include "wifi_hidl_test_utils.h"
-class WifiSupplicantHidlEnvironment_1_0 : public WifiSupplicantHidlEnvironment {
- public:
- // get the test environment singleton
- static WifiSupplicantHidlEnvironment_1_0* Instance() {
- static WifiSupplicantHidlEnvironment_1_0* instance =
- new WifiSupplicantHidlEnvironment_1_0;
- return instance;
- }
- virtual void registerTestServices() override {
- registerTestService<::android::hardware::wifi::V1_0::IWifi>();
- registerTestService<
- ::android::hardware::wifi::supplicant::V1_0::ISupplicant>();
- }
-
- private:
- WifiSupplicantHidlEnvironment_1_0() {}
-};
-
-WifiSupplicantHidlEnvironment* gEnv =
- WifiSupplicantHidlEnvironment_1_0::Instance();
-
-int main(int argc, char** argv) {
- ::testing::AddGlobalTestEnvironment(gEnv);
- ::testing::InitGoogleTest(&argc, argv);
- gEnv->init(&argc, argv);
- int status = gEnv->initFromOptions(argc, argv);
- if (status == 0) {
- int status = RUN_ALL_TESTS();
- LOG(INFO) << "Test result = " << status;
- }
- return status;
-}
+// TODO(b/143892896): Remove this file after wifi_hidl_test_utils.cpp is
+// updated.
+WifiSupplicantHidlEnvironment* gEnv = nullptr;
diff --git a/wifi/supplicant/1.0/vts/functional/supplicant_hidl_call_util.h b/wifi/supplicant/1.0/vts/functional/supplicant_hidl_call_util.h
index 1c0fcec..3fa6f9d 100644
--- a/wifi/supplicant/1.0/vts/functional/supplicant_hidl_call_util.h
+++ b/wifi/supplicant/1.0/vts/functional/supplicant_hidl_call_util.h
@@ -25,8 +25,6 @@
#include <type_traits>
#include <utility>
-#include <VtsHalHidlTargetTestBase.h>
-
namespace {
namespace detail {
template <typename>
diff --git a/wifi/supplicant/1.0/vts/functional/supplicant_hidl_test.cpp b/wifi/supplicant/1.0/vts/functional/supplicant_hidl_test.cpp
index 436b88b..4f25465 100644
--- a/wifi/supplicant/1.0/vts/functional/supplicant_hidl_test.cpp
+++ b/wifi/supplicant/1.0/vts/functional/supplicant_hidl_test.cpp
@@ -16,35 +16,47 @@
#include <android-base/logging.h>
-#include <VtsHalHidlTargetTestBase.h>
-
+#include <VtsCoreUtil.h>
+#include <android/hardware/wifi/1.0/IWifi.h>
#include <android/hardware/wifi/supplicant/1.0/ISupplicant.h>
+#include <gtest/gtest.h>
+#include <hidl/GtestPrinter.h>
+#include <hidl/ServiceManagement.h>
#include "supplicant_hidl_test_utils.h"
using ::android::sp;
using ::android::hardware::hidl_vec;
+using ::android::hardware::wifi::supplicant::V1_0::IfaceType;
using ::android::hardware::wifi::supplicant::V1_0::ISupplicant;
using ::android::hardware::wifi::supplicant::V1_0::ISupplicantIface;
using ::android::hardware::wifi::supplicant::V1_0::SupplicantStatus;
using ::android::hardware::wifi::supplicant::V1_0::SupplicantStatusCode;
-using ::android::hardware::wifi::supplicant::V1_0::IfaceType;
+using ::android::hardware::wifi::V1_0::IWifi;
-extern WifiSupplicantHidlEnvironment* gEnv;
-
-class SupplicantHidlTest : public ::testing::VtsHalHidlTargetTestBase {
+class SupplicantHidlTest
+ : public ::testing::TestWithParam<std::tuple<std::string, std::string>> {
public:
virtual void SetUp() override {
- startSupplicantAndWaitForHidlService();
- supplicant_ = getSupplicant();
+ wifi_instance_name_ = std::get<0>(GetParam());
+ supplicant_instance_name_ = std::get<1>(GetParam());
+ stopSupplicant(wifi_instance_name_);
+ startSupplicantAndWaitForHidlService(wifi_instance_name_,
+ supplicant_instance_name_);
+ isP2pOn_ =
+ testing::deviceSupportsFeature("android.hardware.wifi.direct");
+ supplicant_ = getSupplicant(supplicant_instance_name_, isP2pOn_);
ASSERT_NE(supplicant_.get(), nullptr);
}
- virtual void TearDown() override { stopSupplicant(); }
+ virtual void TearDown() override { stopSupplicant(wifi_instance_name_); }
protected:
// ISupplicant object used for all tests in this fixture.
sp<ISupplicant> supplicant_;
+ bool isP2pOn_ = false;
+ std::string wifi_instance_name_;
+ std::string supplicant_instance_name_;
};
/*
@@ -52,16 +64,19 @@
* Ensures that an instance of the ISupplicant proxy object is
* successfully created.
*/
-TEST(SupplicantHidlTestNoFixture, Create) {
- startSupplicantAndWaitForHidlService();
- EXPECT_NE(nullptr, getSupplicant().get());
- stopSupplicant();
+TEST_P(SupplicantHidlTest, Create) {
+ // Stop the proxy object created in setup.
+ stopSupplicant(wifi_instance_name_);
+ startSupplicantAndWaitForHidlService(wifi_instance_name_,
+ supplicant_instance_name_);
+ EXPECT_NE(nullptr,
+ getSupplicant(supplicant_instance_name_, isP2pOn_).get());
}
/*
* ListInterfaces
*/
-TEST_F(SupplicantHidlTest, ListInterfaces) {
+TEST_P(SupplicantHidlTest, ListInterfaces) {
std::vector<ISupplicant::IfaceInfo> ifaces;
supplicant_->listInterfaces(
[&](const SupplicantStatus& status,
@@ -74,7 +89,7 @@
std::find_if(ifaces.begin(), ifaces.end(), [](const auto& iface) {
return iface.type == IfaceType::STA;
}));
- if (gEnv->isP2pOn) {
+ if (isP2pOn_) {
EXPECT_NE(
ifaces.end(),
std::find_if(ifaces.begin(), ifaces.end(), [](const auto& iface) {
@@ -86,7 +101,7 @@
/*
* GetInterface
*/
-TEST_F(SupplicantHidlTest, GetInterface) {
+TEST_P(SupplicantHidlTest, GetInterface) {
std::vector<ISupplicant::IfaceInfo> ifaces;
supplicant_->listInterfaces(
[&](const SupplicantStatus& status,
@@ -107,7 +122,7 @@
/*
* SetDebugParams
*/
-TEST_F(SupplicantHidlTest, SetDebugParams) {
+TEST_P(SupplicantHidlTest, SetDebugParams) {
bool show_timestamp = true;
bool show_keys = true;
ISupplicant::DebugLevel level = ISupplicant::DebugLevel::EXCESSIVE;
@@ -124,7 +139,7 @@
/*
* GetDebugLevel
*/
-TEST_F(SupplicantHidlTest, GetDebugLevel) {
+TEST_P(SupplicantHidlTest, GetDebugLevel) {
bool show_timestamp = true;
bool show_keys = true;
ISupplicant::DebugLevel level = ISupplicant::DebugLevel::EXCESSIVE;
@@ -142,7 +157,7 @@
/*
* IsDebugShowTimestampEnabled
*/
-TEST_F(SupplicantHidlTest, IsDebugShowTimestampEnabled) {
+TEST_P(SupplicantHidlTest, IsDebugShowTimestampEnabled) {
bool show_timestamp = true;
bool show_keys = true;
ISupplicant::DebugLevel level = ISupplicant::DebugLevel::EXCESSIVE;
@@ -160,7 +175,7 @@
/*
* IsDebugShowKeysEnabled
*/
-TEST_F(SupplicantHidlTest, IsDebugShowKeysEnabled) {
+TEST_P(SupplicantHidlTest, IsDebugShowKeysEnabled) {
bool show_timestamp = true;
bool show_keys = true;
ISupplicant::DebugLevel level = ISupplicant::DebugLevel::EXCESSIVE;
@@ -178,15 +193,24 @@
/*
* SetConcurrenyPriority
*/
-TEST_F(SupplicantHidlTest, SetConcurrencyPriority) {
+TEST_P(SupplicantHidlTest, SetConcurrencyPriority) {
supplicant_->setConcurrencyPriority(
IfaceType::STA, [](const SupplicantStatus& status) {
EXPECT_EQ(SupplicantStatusCode::SUCCESS, status.code);
});
- if (gEnv->isP2pOn) {
+ if (isP2pOn_) {
supplicant_->setConcurrencyPriority(
IfaceType::P2P, [](const SupplicantStatus& status) {
EXPECT_EQ(SupplicantStatusCode::SUCCESS, status.code);
});
}
}
+
+INSTANTIATE_TEST_CASE_P(
+ PerInstance, SupplicantHidlTest,
+ testing::Combine(
+ testing::ValuesIn(
+ android::hardware::getAllHalInstanceNames(IWifi::descriptor)),
+ testing::ValuesIn(android::hardware::getAllHalInstanceNames(
+ ISupplicant::descriptor))),
+ android::hardware::PrintInstanceTupleNameToString<>);
\ No newline at end of file
diff --git a/wifi/supplicant/1.0/vts/functional/supplicant_hidl_test_utils.cpp b/wifi/supplicant/1.0/vts/functional/supplicant_hidl_test_utils.cpp
index 7bd04dc..d47e42f 100644
--- a/wifi/supplicant/1.0/vts/functional/supplicant_hidl_test_utils.cpp
+++ b/wifi/supplicant/1.0/vts/functional/supplicant_hidl_test_utils.cpp
@@ -56,8 +56,8 @@
// Helper function to initialize the driver and firmware to STA mode
// using the vendor HAL HIDL interface.
-void initilializeDriverAndFirmware() {
- sp<IWifiChip> wifi_chip = getWifiChip();
+void initilializeDriverAndFirmware(const std::string& wifi_instance_name) {
+ sp<IWifiChip> wifi_chip = getWifiChip(wifi_instance_name);
ChipModeId mode_id;
EXPECT_TRUE(configureChipToSupportIfaceType(
wifi_chip, ::android::hardware::wifi::V1_0::IfaceType::STA, &mode_id));
@@ -65,7 +65,9 @@
// Helper function to deinitialize the driver and firmware
// using the vendor HAL HIDL interface.
-void deInitilializeDriverAndFirmware() { stopWifi(); }
+void deInitilializeDriverAndFirmware(const std::string& wifi_instance_name) {
+ stopWifi(wifi_instance_name);
+}
// Helper function to find any iface of the desired type exposed.
bool findIfaceOfType(sp<ISupplicant> supplicant, IfaceType desired_type,
@@ -154,28 +156,38 @@
std::condition_variable condition_;
};
-void stopSupplicant() {
+void stopSupplicant() { stopSupplicant(""); }
+
+void stopSupplicant(const std::string& wifi_instance_name) {
SupplicantManager supplicant_manager;
ASSERT_TRUE(supplicant_manager.StopSupplicant());
- deInitilializeDriverAndFirmware();
+ deInitilializeDriverAndFirmware(wifi_instance_name);
ASSERT_FALSE(supplicant_manager.IsSupplicantRunning());
}
+// TODO(b/143892896): Remove old APIs after all supplicant tests are updated.
void startSupplicantAndWaitForHidlService() {
- initilializeDriverAndFirmware();
+ startSupplicantAndWaitForHidlService("",
+ gEnv->getServiceName<ISupplicant>());
+}
+
+void startSupplicantAndWaitForHidlService(
+ const std::string& wifi_instance_name,
+ const std::string& supplicant_instance_name) {
+ initilializeDriverAndFirmware(wifi_instance_name);
android::sp<ServiceNotificationListener> notification_listener =
new ServiceNotificationListener();
- string service_name = gEnv->getServiceName<ISupplicant>();
ASSERT_TRUE(notification_listener->registerForHidlServiceNotifications(
- service_name));
+ supplicant_instance_name));
SupplicantManager supplicant_manager;
ASSERT_TRUE(supplicant_manager.StartSupplicant());
ASSERT_TRUE(supplicant_manager.IsSupplicantRunning());
- ASSERT_TRUE(notification_listener->waitForHidlService(500, service_name));
+ ASSERT_TRUE(notification_listener->waitForHidlService(
+ 500, supplicant_instance_name));
}
bool is_1_1(const sp<ISupplicant>& supplicant) {
@@ -218,6 +230,7 @@
});
}
+// TODO(b/143892896): Remove old APIs after all supplicant tests are updated.
sp<ISupplicant> getSupplicant() {
sp<ISupplicant> supplicant =
::testing::VtsHalHidlTargetTestBase::getService<ISupplicant>(
@@ -232,8 +245,28 @@
return supplicant;
}
+sp<ISupplicant> getSupplicant(const std::string& supplicant_instance_name,
+ bool isP2pOn) {
+ sp<ISupplicant> supplicant =
+ ISupplicant::getService(supplicant_instance_name);
+ // For 1.1 supplicant, we need to add interfaces at initialization.
+ if (is_1_1(supplicant)) {
+ addSupplicantStaIface_1_1(supplicant);
+ if (isP2pOn) {
+ addSupplicantP2pIface_1_1(supplicant);
+ }
+ }
+ return supplicant;
+}
+
+// TODO(b/143892896): Remove old APIs after all supplicant tests are updated.
sp<ISupplicantStaIface> getSupplicantStaIface() {
sp<ISupplicant> supplicant = getSupplicant();
+ return getSupplicantStaIface(supplicant);
+}
+
+sp<ISupplicantStaIface> getSupplicantStaIface(
+ const sp<ISupplicant>& supplicant) {
if (!supplicant.get()) {
return nullptr;
}
@@ -257,8 +290,14 @@
return sta_iface;
}
+// TODO(b/143892896): Remove old APIs after all supplicant tests are updated.
sp<ISupplicantStaNetwork> createSupplicantStaNetwork() {
- sp<ISupplicantStaIface> sta_iface = getSupplicantStaIface();
+ return createSupplicantStaNetwork(getSupplicant());
+}
+
+sp<ISupplicantStaNetwork> createSupplicantStaNetwork(
+ const sp<ISupplicant>& supplicant) {
+ sp<ISupplicantStaIface> sta_iface = getSupplicantStaIface(supplicant);
if (!sta_iface.get()) {
return nullptr;
}
@@ -278,8 +317,13 @@
return sta_network;
}
+// TODO(b/143892896): Remove old APIs after all supplicant tests are updated.
sp<ISupplicantP2pIface> getSupplicantP2pIface() {
- sp<ISupplicant> supplicant = getSupplicant();
+ return getSupplicantP2pIface(getSupplicant());
+}
+
+sp<ISupplicantP2pIface> getSupplicantP2pIface(
+ const sp<ISupplicant>& supplicant) {
if (!supplicant.get()) {
return nullptr;
}
@@ -303,8 +347,12 @@
return p2p_iface;
}
+// TODO(b/143892896): Remove old APIs after all supplicant tests are updated.
bool turnOnExcessiveLogging() {
- sp<ISupplicant> supplicant = getSupplicant();
+ return turnOnExcessiveLogging(getSupplicant());
+}
+
+bool turnOnExcessiveLogging(const sp<ISupplicant>& supplicant) {
if (!supplicant.get()) {
return false;
}
diff --git a/wifi/supplicant/1.0/vts/functional/supplicant_hidl_test_utils.h b/wifi/supplicant/1.0/vts/functional/supplicant_hidl_test_utils.h
index 21a1ae6..40ad695 100644
--- a/wifi/supplicant/1.0/vts/functional/supplicant_hidl_test_utils.h
+++ b/wifi/supplicant/1.0/vts/functional/supplicant_hidl_test_utils.h
@@ -25,21 +25,47 @@
#include <getopt.h>
-#include <VtsHalHidlTargetTestEnvBase.h>
+#include "wifi_hidl_test_utils.h"
// Used to stop the android wifi framework before every test.
void stopWifiFramework();
+void stopWifiFramework(const std::string& wifi_instance_name);
void startWifiFramework();
+void startWifiFramework(const std::string& wifi_instance_name);
+
void stopSupplicant();
+void stopSupplicant(const std::string& wifi_instance_name);
// Used to configure the chip, driver and start wpa_supplicant before every
// test.
-void startSupplicantAndWaitForHidlService();
+void startSupplicantAndWaitForHidlService(
+ const std::string& wifi_instance_name,
+ const std::string& supplicant_instance_name);
// Helper functions to obtain references to the various HIDL interface objects.
// Note: We only have a single instance of each of these objects currently.
// These helper functions should be modified to return vectors if we support
// multiple instances.
android::sp<android::hardware::wifi::supplicant::V1_0::ISupplicant>
+getSupplicant(const std::string& supplicant_instance_name, bool isP2pOn);
+android::sp<android::hardware::wifi::supplicant::V1_0::ISupplicantStaIface>
+getSupplicantStaIface(
+ const android::sp<android::hardware::wifi::supplicant::V1_0::ISupplicant>&
+ supplicant);
+android::sp<android::hardware::wifi::supplicant::V1_0::ISupplicantStaNetwork>
+createSupplicantStaNetwork(
+ const android::sp<android::hardware::wifi::supplicant::V1_0::ISupplicant>&
+ supplicant);
+android::sp<android::hardware::wifi::supplicant::V1_0::ISupplicantP2pIface>
+getSupplicantP2pIface(
+ const android::sp<android::hardware::wifi::supplicant::V1_0::ISupplicant>&
+ supplicant);
+bool turnOnExcessiveLogging(
+ const android::sp<android::hardware::wifi::supplicant::V1_0::ISupplicant>&
+ supplicant);
+
+// TODO(b/143892896): Remove old APIs after all supplicant tests are updated.
+void startSupplicantAndWaitForHidlService();
+android::sp<android::hardware::wifi::supplicant::V1_0::ISupplicant>
getSupplicant();
android::sp<android::hardware::wifi::supplicant::V1_0::ISupplicantStaIface>
getSupplicantStaIface();
diff --git a/wifi/supplicant/1.0/vts/functional/supplicant_p2p_iface_hidl_test.cpp b/wifi/supplicant/1.0/vts/functional/supplicant_p2p_iface_hidl_test.cpp
index 0181f7b..8d6f38d 100644
--- a/wifi/supplicant/1.0/vts/functional/supplicant_p2p_iface_hidl_test.cpp
+++ b/wifi/supplicant/1.0/vts/functional/supplicant_p2p_iface_hidl_test.cpp
@@ -15,9 +15,12 @@
*/
#include <android-base/logging.h>
+#include <gtest/gtest.h>
+#include <hidl/GtestPrinter.h>
+#include <hidl/ServiceManagement.h>
-#include <VtsHalHidlTargetTestBase.h>
-
+#include <VtsCoreUtil.h>
+#include <android/hardware/wifi/1.0/IWifi.h>
#include <android/hardware/wifi/supplicant/1.0/ISupplicantP2pIface.h>
#include "supplicant_hidl_call_util.h"
@@ -30,11 +33,13 @@
using ::android::hardware::Return;
using ::android::hardware::Void;
using ::android::hardware::wifi::supplicant::V1_0::IfaceType;
+using ::android::hardware::wifi::supplicant::V1_0::ISupplicant;
using ::android::hardware::wifi::supplicant::V1_0::ISupplicantP2pIface;
using ::android::hardware::wifi::supplicant::V1_0::ISupplicantP2pIfaceCallback;
using ::android::hardware::wifi::supplicant::V1_0::SupplicantNetworkId;
using ::android::hardware::wifi::supplicant::V1_0::SupplicantStatus;
using ::android::hardware::wifi::supplicant::V1_0::SupplicantStatusCode;
+using ::android::hardware::wifi::V1_0::IWifi;
namespace {
constexpr uint8_t kTestSsidPostfix[] = {'t', 'e', 's', 't'};
@@ -66,26 +71,38 @@
constexpr SupplicantNetworkId kTestNetworkId = 5;
} // namespace
-class SupplicantP2pIfaceHidlTest : public ::testing::VtsHalHidlTargetTestBase {
+class SupplicantP2pIfaceHidlTest
+ : public ::testing::TestWithParam<std::tuple<std::string, std::string>> {
public:
virtual void SetUp() override {
- startSupplicantAndWaitForHidlService();
- EXPECT_TRUE(turnOnExcessiveLogging());
- p2p_iface_ = getSupplicantP2pIface();
+ wifi_instance_name_ = std::get<0>(GetParam());
+ supplicant_instance_name_ = std::get<1>(GetParam());
+ stopSupplicant(wifi_instance_name_);
+ startSupplicantAndWaitForHidlService(wifi_instance_name_,
+ supplicant_instance_name_);
+ isP2pOn_ =
+ testing::deviceSupportsFeature("android.hardware.wifi.direct");
+ supplicant_ = getSupplicant(supplicant_instance_name_, isP2pOn_);
+ EXPECT_TRUE(turnOnExcessiveLogging(supplicant_));
+ p2p_iface_ = getSupplicantP2pIface(supplicant_);
ASSERT_NE(p2p_iface_.get(), nullptr);
memcpy(mac_addr_.data(), kTestMacAddr, mac_addr_.size());
memcpy(peer_mac_addr_.data(), kTestPeerMacAddr, peer_mac_addr_.size());
}
- virtual void TearDown() override { stopSupplicant(); }
+ virtual void TearDown() override { stopSupplicant(wifi_instance_name_); }
protected:
+ bool isP2pOn_ = false;
+ sp<ISupplicant> supplicant_;
// ISupplicantP2pIface object used for all tests in this fixture.
sp<ISupplicantP2pIface> p2p_iface_;
// MAC address to use for various tests.
std::array<uint8_t, 6> mac_addr_;
std::array<uint8_t, 6> peer_mac_addr_;
+ std::string wifi_instance_name_;
+ std::string supplicant_instance_name_;
};
class IfaceCallback : public ISupplicantP2pIfaceCallback {
@@ -177,16 +194,20 @@
* Ensures that an instance of the ISupplicantP2pIface proxy object is
* successfully created.
*/
-TEST(SupplicantP2pIfaceHidlTestNoFixture, Create) {
- startSupplicantAndWaitForHidlService();
- EXPECT_NE(nullptr, getSupplicantP2pIface().get());
- stopSupplicant();
+TEST_P(SupplicantP2pIfaceHidlTest, Create) {
+ stopSupplicant(wifi_instance_name_);
+ startSupplicantAndWaitForHidlService(wifi_instance_name_,
+ supplicant_instance_name_);
+ sp<ISupplicantP2pIface> p2p_iface = getSupplicantP2pIface(
+ getSupplicant(supplicant_instance_name_, isP2pOn_));
+
+ EXPECT_NE(nullptr, p2p_iface.get());
}
/*
* RegisterCallback
*/
-TEST_F(SupplicantP2pIfaceHidlTest, RegisterCallback) {
+TEST_P(SupplicantP2pIfaceHidlTest, RegisterCallback) {
p2p_iface_->registerCallback(
new IfaceCallback(), [](const SupplicantStatus& status) {
EXPECT_EQ(SupplicantStatusCode::SUCCESS, status.code);
@@ -196,7 +217,7 @@
/*
* GetName
*/
-TEST_F(SupplicantP2pIfaceHidlTest, GetName) {
+TEST_P(SupplicantP2pIfaceHidlTest, GetName) {
const auto& status_and_interface_name = HIDL_INVOKE(p2p_iface_, getName);
EXPECT_EQ(SupplicantStatusCode::SUCCESS,
status_and_interface_name.first.code);
@@ -206,7 +227,7 @@
/*
* GetType
*/
-TEST_F(SupplicantP2pIfaceHidlTest, GetType) {
+TEST_P(SupplicantP2pIfaceHidlTest, GetType) {
const auto& status_and_interface_type = HIDL_INVOKE(p2p_iface_, getType);
EXPECT_EQ(SupplicantStatusCode::SUCCESS,
status_and_interface_type.first.code);
@@ -216,7 +237,7 @@
/*
* GetDeviceAddress
*/
-TEST_F(SupplicantP2pIfaceHidlTest, GetDeviceAddress) {
+TEST_P(SupplicantP2pIfaceHidlTest, GetDeviceAddress) {
p2p_iface_->getDeviceAddress(
[](const SupplicantStatus& status,
const hidl_array<uint8_t, 6>& /* mac_addr */) {
@@ -227,7 +248,7 @@
/*
* SetSsidPostfix
*/
-TEST_F(SupplicantP2pIfaceHidlTest, SetSsidPostfix) {
+TEST_P(SupplicantP2pIfaceHidlTest, SetSsidPostfix) {
std::vector<uint8_t> ssid(kTestSsidPostfix,
kTestSsidPostfix + sizeof(kTestSsidPostfix));
p2p_iface_->setSsidPostfix(ssid, [](const SupplicantStatus& status) {
@@ -238,7 +259,7 @@
/*
* Find
*/
-TEST_F(SupplicantP2pIfaceHidlTest, Find) {
+TEST_P(SupplicantP2pIfaceHidlTest, Find) {
p2p_iface_->find(kTestFindTimeout, [](const SupplicantStatus& status) {
EXPECT_EQ(SupplicantStatusCode::SUCCESS, status.code);
});
@@ -247,7 +268,7 @@
/*
* StopFind
*/
-TEST_F(SupplicantP2pIfaceHidlTest, StopFind) {
+TEST_P(SupplicantP2pIfaceHidlTest, StopFind) {
p2p_iface_->find(kTestFindTimeout, [](const SupplicantStatus& status) {
EXPECT_EQ(SupplicantStatusCode::SUCCESS, status.code);
});
@@ -260,7 +281,7 @@
/*
* Flush
*/
-TEST_F(SupplicantP2pIfaceHidlTest, Flush) {
+TEST_P(SupplicantP2pIfaceHidlTest, Flush) {
p2p_iface_->flush([](const SupplicantStatus& status) {
EXPECT_EQ(SupplicantStatusCode::SUCCESS, status.code);
});
@@ -269,7 +290,7 @@
/*
* Connect
*/
-TEST_F(SupplicantP2pIfaceHidlTest, Connect) {
+TEST_P(SupplicantP2pIfaceHidlTest, Connect) {
p2p_iface_->connect(
mac_addr_, ISupplicantP2pIface::WpsProvisionMethod::PBC,
kTestConnectPin, false, false, kTestConnectGoIntent,
@@ -282,7 +303,7 @@
/*
* CancelConnect
*/
-TEST_F(SupplicantP2pIfaceHidlTest, CancelConnect) {
+TEST_P(SupplicantP2pIfaceHidlTest, CancelConnect) {
p2p_iface_->connect(
mac_addr_, ISupplicantP2pIface::WpsProvisionMethod::PBC,
kTestConnectPin, false, false, kTestConnectGoIntent,
@@ -299,7 +320,7 @@
/*
* ProvisionDiscovery
*/
-TEST_F(SupplicantP2pIfaceHidlTest, ProvisionDiscovery) {
+TEST_P(SupplicantP2pIfaceHidlTest, ProvisionDiscovery) {
p2p_iface_->provisionDiscovery(
mac_addr_, ISupplicantP2pIface::WpsProvisionMethod::PBC,
[](const SupplicantStatus& status) {
@@ -311,7 +332,7 @@
/*
* AddGroup
*/
-TEST_F(SupplicantP2pIfaceHidlTest, AddGroup) {
+TEST_P(SupplicantP2pIfaceHidlTest, AddGroup) {
p2p_iface_->addGroup(false, kTestNetworkId,
[](const SupplicantStatus& /* status */) {
// TODO: Figure out the initialization sequence for
@@ -324,7 +345,7 @@
/*
* RemoveGroup
*/
-TEST_F(SupplicantP2pIfaceHidlTest, RemoveGroup) {
+TEST_P(SupplicantP2pIfaceHidlTest, RemoveGroup) {
// This is not going to work with fake values.
EXPECT_NE(SupplicantStatusCode::SUCCESS,
HIDL_INVOKE(p2p_iface_, removeGroup, kTestGroupIfName).code);
@@ -333,7 +354,7 @@
/*
* Reject
*/
-TEST_F(SupplicantP2pIfaceHidlTest, Reject) {
+TEST_P(SupplicantP2pIfaceHidlTest, Reject) {
p2p_iface_->reject(mac_addr_, [](const SupplicantStatus& status) {
// This is not going to work with fake values.
EXPECT_EQ(SupplicantStatusCode::FAILURE_UNKNOWN, status.code);
@@ -343,7 +364,7 @@
/*
* Invite
*/
-TEST_F(SupplicantP2pIfaceHidlTest, Invite) {
+TEST_P(SupplicantP2pIfaceHidlTest, Invite) {
p2p_iface_->invite(kTestGroupIfName, mac_addr_, peer_mac_addr_,
[](const SupplicantStatus& status) {
// This is not going to work with fake values.
@@ -355,7 +376,7 @@
/*
* Reinvoke
*/
-TEST_F(SupplicantP2pIfaceHidlTest, Reinvoke) {
+TEST_P(SupplicantP2pIfaceHidlTest, Reinvoke) {
p2p_iface_->reinvoke(
kTestNetworkId, mac_addr_, [](const SupplicantStatus& status) {
// This is not going to work with fake values.
@@ -367,7 +388,7 @@
/*
* ConfigureExtListen
*/
-TEST_F(SupplicantP2pIfaceHidlTest, ConfigureExtListen) {
+TEST_P(SupplicantP2pIfaceHidlTest, ConfigureExtListen) {
p2p_iface_->configureExtListen(kTestExtListenPeriod, kTestExtListenInterval,
[](const SupplicantStatus& status) {
EXPECT_EQ(SupplicantStatusCode::SUCCESS,
@@ -378,7 +399,7 @@
/*
* SetListenChannel
*/
-TEST_F(SupplicantP2pIfaceHidlTest, SetListenChannel) {
+TEST_P(SupplicantP2pIfaceHidlTest, SetListenChannel) {
p2p_iface_->setListenChannel(
kTestChannel, kTestOperatingClass, [](const SupplicantStatus& status) {
EXPECT_EQ(SupplicantStatusCode::SUCCESS, status.code);
@@ -388,7 +409,7 @@
/*
* SetDisallowedFrequencies
*/
-TEST_F(SupplicantP2pIfaceHidlTest, SetDisallowedFrequencies) {
+TEST_P(SupplicantP2pIfaceHidlTest, SetDisallowedFrequencies) {
std::vector<ISupplicantP2pIface::FreqRange> ranges = {
{kTestFreqRange[0], kTestFreqRange[1]}};
p2p_iface_->setDisallowedFrequencies(
@@ -400,7 +421,7 @@
/*
* GetSsid
*/
-TEST_F(SupplicantP2pIfaceHidlTest, GetSsid) {
+TEST_P(SupplicantP2pIfaceHidlTest, GetSsid) {
std::array<uint8_t, 6> mac_addr;
memcpy(mac_addr.data(), kTestMacAddr, mac_addr.size());
p2p_iface_->getSsid(mac_addr, [](const SupplicantStatus& status,
@@ -413,7 +434,7 @@
/*
* GetGroupCapability
*/
-TEST_F(SupplicantP2pIfaceHidlTest, GetGroupCapability) {
+TEST_P(SupplicantP2pIfaceHidlTest, GetGroupCapability) {
std::array<uint8_t, 6> mac_addr;
memcpy(mac_addr.data(), kTestMacAddr, mac_addr.size());
p2p_iface_->getGroupCapability(
@@ -426,7 +447,7 @@
/*
* FlushServices
*/
-TEST_F(SupplicantP2pIfaceHidlTest, FlushServices) {
+TEST_P(SupplicantP2pIfaceHidlTest, FlushServices) {
p2p_iface_->flushServices([](const SupplicantStatus& status) {
EXPECT_EQ(SupplicantStatusCode::SUCCESS, status.code);
});
@@ -435,7 +456,7 @@
/*
* SetMiracastMode
*/
-TEST_F(SupplicantP2pIfaceHidlTest, SetMiracastMode) {
+TEST_P(SupplicantP2pIfaceHidlTest, SetMiracastMode) {
p2p_iface_->setMiracastMode(ISupplicantP2pIface::MiracastMode::DISABLED,
[](const SupplicantStatus& status) {
EXPECT_EQ(SupplicantStatusCode::SUCCESS,
@@ -456,7 +477,7 @@
/*
* SetGroupIdle
*/
-TEST_F(SupplicantP2pIfaceHidlTest, SetGroupIdle) {
+TEST_P(SupplicantP2pIfaceHidlTest, SetGroupIdle) {
// This is not going to work with fake values.
EXPECT_NE(SupplicantStatusCode::SUCCESS,
HIDL_INVOKE(p2p_iface_, setGroupIdle, kTestGroupIfName,
@@ -467,7 +488,7 @@
/*
* SetPowerSave
*/
-TEST_F(SupplicantP2pIfaceHidlTest, SetPowerSave) {
+TEST_P(SupplicantP2pIfaceHidlTest, SetPowerSave) {
// This is not going to work with fake values.
EXPECT_NE(
SupplicantStatusCode::SUCCESS,
@@ -481,7 +502,7 @@
/*
* SetWpsDeviceName
*/
-TEST_F(SupplicantP2pIfaceHidlTest, SetWpsDeviceName) {
+TEST_P(SupplicantP2pIfaceHidlTest, SetWpsDeviceName) {
EXPECT_EQ(
SupplicantStatusCode::SUCCESS,
HIDL_INVOKE(p2p_iface_, setWpsDeviceName, kTestWpsDeviceName).code);
@@ -490,7 +511,7 @@
/*
* SetWpsDeviceType
*/
-TEST_F(SupplicantP2pIfaceHidlTest, SetWpsDeviceType) {
+TEST_P(SupplicantP2pIfaceHidlTest, SetWpsDeviceType) {
EXPECT_EQ(
SupplicantStatusCode::SUCCESS,
HIDL_INVOKE(p2p_iface_, setWpsDeviceType, kTestWpsDeviceType).code);
@@ -499,7 +520,7 @@
/*
* SetWpsManufacturer
*/
-TEST_F(SupplicantP2pIfaceHidlTest, SetWpsManufacturer) {
+TEST_P(SupplicantP2pIfaceHidlTest, SetWpsManufacturer) {
EXPECT_EQ(
SupplicantStatusCode::SUCCESS,
HIDL_INVOKE(p2p_iface_, setWpsManufacturer, kTestWpsManufacturer).code);
@@ -508,7 +529,7 @@
/*
* SetWpsModelName
*/
-TEST_F(SupplicantP2pIfaceHidlTest, SetWpsModelName) {
+TEST_P(SupplicantP2pIfaceHidlTest, SetWpsModelName) {
EXPECT_EQ(SupplicantStatusCode::SUCCESS,
HIDL_INVOKE(p2p_iface_, setWpsModelName, kTestWpsModelName).code);
}
@@ -516,7 +537,7 @@
/*
* SetWpsModelNumber
*/
-TEST_F(SupplicantP2pIfaceHidlTest, SetWpsModelNumber) {
+TEST_P(SupplicantP2pIfaceHidlTest, SetWpsModelNumber) {
EXPECT_EQ(
SupplicantStatusCode::SUCCESS,
HIDL_INVOKE(p2p_iface_, setWpsModelNumber, kTestWpsModelNumber).code);
@@ -525,7 +546,7 @@
/*
* SetWpsSerialNumber
*/
-TEST_F(SupplicantP2pIfaceHidlTest, SetWpsSerialNumber) {
+TEST_P(SupplicantP2pIfaceHidlTest, SetWpsSerialNumber) {
EXPECT_EQ(
SupplicantStatusCode::SUCCESS,
HIDL_INVOKE(p2p_iface_, setWpsSerialNumber, kTestWpsSerialNumber).code);
@@ -534,7 +555,7 @@
/*
* SetWpsConfigMethods
*/
-TEST_F(SupplicantP2pIfaceHidlTest, SetWpsConfigMethods) {
+TEST_P(SupplicantP2pIfaceHidlTest, SetWpsConfigMethods) {
EXPECT_EQ(
SupplicantStatusCode::SUCCESS,
HIDL_INVOKE(p2p_iface_, setWpsConfigMethods, kTestWpsConfigMethods)
@@ -548,7 +569,7 @@
* This also tests that removeBonjourSerive() returns error when there is no
* existing bonjour service with the same query data.
*/
-TEST_F(SupplicantP2pIfaceHidlTest, AddAndRemoveBonjourService) {
+TEST_P(SupplicantP2pIfaceHidlTest, AddAndRemoveBonjourService) {
EXPECT_EQ(SupplicantStatusCode::SUCCESS,
HIDL_INVOKE(
p2p_iface_, addBonjourService,
@@ -584,7 +605,7 @@
* This also tests that removeUpnpService() returns error when there is no
* exsiting upnp service with the same service name.
*/
-TEST_F(SupplicantP2pIfaceHidlTest, AddAndRemoveUpnpService) {
+TEST_P(SupplicantP2pIfaceHidlTest, AddAndRemoveUpnpService) {
EXPECT_EQ(SupplicantStatusCode::SUCCESS,
HIDL_INVOKE(p2p_iface_, addUpnpService, 0 /* version */,
kTestUpnpServiceName)
@@ -604,7 +625,7 @@
/*
* EnableWfd
*/
-TEST_F(SupplicantP2pIfaceHidlTest, EnableWfd) {
+TEST_P(SupplicantP2pIfaceHidlTest, EnableWfd) {
EXPECT_EQ(SupplicantStatusCode::SUCCESS,
HIDL_INVOKE(p2p_iface_, enableWfd, true).code);
EXPECT_EQ(SupplicantStatusCode::SUCCESS,
@@ -614,8 +635,17 @@
/*
* SetWfdDeviceInfo
*/
-TEST_F(SupplicantP2pIfaceHidlTest, SetWfdDeviceInfo) {
+TEST_P(SupplicantP2pIfaceHidlTest, SetWfdDeviceInfo) {
EXPECT_EQ(
SupplicantStatusCode::SUCCESS,
HIDL_INVOKE(p2p_iface_, setWfdDeviceInfo, kTestWfdDeviceInfo).code);
}
+
+INSTANTIATE_TEST_CASE_P(
+ PerInstance, SupplicantP2pIfaceHidlTest,
+ testing::Combine(
+ testing::ValuesIn(
+ android::hardware::getAllHalInstanceNames(IWifi::descriptor)),
+ testing::ValuesIn(android::hardware::getAllHalInstanceNames(
+ ISupplicant::descriptor))),
+ android::hardware::PrintInstanceTupleNameToString<>);
\ No newline at end of file
diff --git a/wifi/supplicant/1.0/vts/functional/supplicant_sta_iface_hidl_test.cpp b/wifi/supplicant/1.0/vts/functional/supplicant_sta_iface_hidl_test.cpp
index ec102d5..089b3cd 100644
--- a/wifi/supplicant/1.0/vts/functional/supplicant_sta_iface_hidl_test.cpp
+++ b/wifi/supplicant/1.0/vts/functional/supplicant_sta_iface_hidl_test.cpp
@@ -15,9 +15,12 @@
*/
#include <android-base/logging.h>
+#include <gtest/gtest.h>
+#include <hidl/GtestPrinter.h>
+#include <hidl/ServiceManagement.h>
-#include <VtsHalHidlTargetTestBase.h>
-
+#include <VtsCoreUtil.h>
+#include <android/hardware/wifi/1.0/IWifi.h>
#include <android/hardware/wifi/supplicant/1.0/ISupplicantStaIface.h>
#include "supplicant_hidl_call_util.h"
@@ -30,12 +33,14 @@
using ::android::hardware::Return;
using ::android::hardware::Void;
using ::android::hardware::wifi::supplicant::V1_0::IfaceType;
+using ::android::hardware::wifi::supplicant::V1_0::ISupplicant;
using ::android::hardware::wifi::supplicant::V1_0::ISupplicantStaIface;
using ::android::hardware::wifi::supplicant::V1_0::ISupplicantStaIfaceCallback;
using ::android::hardware::wifi::supplicant::V1_0::ISupplicantStaNetwork;
using ::android::hardware::wifi::supplicant::V1_0::SupplicantNetworkId;
using ::android::hardware::wifi::supplicant::V1_0::SupplicantStatus;
using ::android::hardware::wifi::supplicant::V1_0::SupplicantStatusCode;
+using ::android::hardware::wifi::V1_0::IWifi;
namespace {
constexpr uint8_t kTestMacAddr[] = {0x56, 0x67, 0x67, 0xf4, 0x56, 0x92};
@@ -61,24 +66,36 @@
constexpr uint16_t kTestWpsConfigMethods = 0xffff;
} // namespace
-class SupplicantStaIfaceHidlTest : public ::testing::VtsHalHidlTargetTestBase {
+class SupplicantStaIfaceHidlTest
+ : public ::testing::TestWithParam<std::tuple<std::string, std::string>> {
public:
virtual void SetUp() override {
- startSupplicantAndWaitForHidlService();
- EXPECT_TRUE(turnOnExcessiveLogging());
- sta_iface_ = getSupplicantStaIface();
+ wifi_instance_name_ = std::get<0>(GetParam());
+ supplicant_instance_name_ = std::get<1>(GetParam());
+ stopSupplicant(wifi_instance_name_);
+ startSupplicantAndWaitForHidlService(wifi_instance_name_,
+ supplicant_instance_name_);
+ isP2pOn_ =
+ testing::deviceSupportsFeature("android.hardware.wifi.direct");
+ supplicant_ = getSupplicant(supplicant_instance_name_, isP2pOn_);
+ EXPECT_TRUE(turnOnExcessiveLogging(supplicant_));
+ sta_iface_ = getSupplicantStaIface(supplicant_);
ASSERT_NE(sta_iface_.get(), nullptr);
memcpy(mac_addr_.data(), kTestMacAddr, mac_addr_.size());
}
- virtual void TearDown() override { stopSupplicant(); }
+ virtual void TearDown() override { stopSupplicant(wifi_instance_name_); }
protected:
+ bool isP2pOn_ = false;
+ sp<ISupplicant> supplicant_;
// ISupplicantStaIface object used for all tests in this fixture.
sp<ISupplicantStaIface> sta_iface_;
// MAC address to use for various tests.
std::array<uint8_t, 6> mac_addr_;
+ std::string wifi_instance_name_;
+ std::string supplicant_instance_name_;
};
class IfaceCallback : public ISupplicantStaIfaceCallback {
@@ -159,16 +176,19 @@
* Ensures that an instance of the ISupplicantStaIface proxy object is
* successfully created.
*/
-TEST(SupplicantStaIfaceHidlTestNoFixture, Create) {
- startSupplicantAndWaitForHidlService();
- EXPECT_NE(nullptr, getSupplicantStaIface().get());
- stopSupplicant();
+TEST_P(SupplicantStaIfaceHidlTest, Create) {
+ stopSupplicant(wifi_instance_name_);
+ startSupplicantAndWaitForHidlService(wifi_instance_name_,
+ supplicant_instance_name_);
+ EXPECT_NE(nullptr, getSupplicantStaIface(
+ getSupplicant(supplicant_instance_name_, isP2pOn_))
+ .get());
}
/*
* RegisterCallback
*/
-TEST_F(SupplicantStaIfaceHidlTest, RegisterCallback) {
+TEST_P(SupplicantStaIfaceHidlTest, RegisterCallback) {
sta_iface_->registerCallback(
new IfaceCallback(), [](const SupplicantStatus& status) {
EXPECT_EQ(SupplicantStatusCode::SUCCESS, status.code);
@@ -178,7 +198,7 @@
/*
* GetName
*/
-TEST_F(SupplicantStaIfaceHidlTest, GetName) {
+TEST_P(SupplicantStaIfaceHidlTest, GetName) {
const auto& status_and_interface_name = HIDL_INVOKE(sta_iface_, getName);
EXPECT_EQ(SupplicantStatusCode::SUCCESS,
status_and_interface_name.first.code);
@@ -188,7 +208,7 @@
/*
* GetType
*/
-TEST_F(SupplicantStaIfaceHidlTest, GetType) {
+TEST_P(SupplicantStaIfaceHidlTest, GetType) {
const auto& status_and_interface_type = HIDL_INVOKE(sta_iface_, getType);
EXPECT_EQ(SupplicantStatusCode::SUCCESS,
status_and_interface_type.first.code);
@@ -198,14 +218,15 @@
/*
* listNetworks.
*/
-TEST_F(SupplicantStaIfaceHidlTest, listNetworks) {
+TEST_P(SupplicantStaIfaceHidlTest, listNetworks) {
sta_iface_->listNetworks([](const SupplicantStatus& status,
const hidl_vec<SupplicantNetworkId>& ids) {
EXPECT_EQ(SupplicantStatusCode::SUCCESS, status.code);
EXPECT_EQ(0u, ids.size());
});
- sp<ISupplicantStaNetwork> sta_network = createSupplicantStaNetwork();
+ sp<ISupplicantStaNetwork> sta_network =
+ createSupplicantStaNetwork(supplicant_);
EXPECT_NE(nullptr, sta_network.get());
sta_iface_->listNetworks([](const SupplicantStatus& status,
@@ -218,7 +239,7 @@
/*
* Reassociate.
*/
-TEST_F(SupplicantStaIfaceHidlTest, Reassociate) {
+TEST_P(SupplicantStaIfaceHidlTest, Reassociate) {
sta_iface_->reassociate([](const SupplicantStatus& status) {
EXPECT_EQ(SupplicantStatusCode::SUCCESS, status.code);
});
@@ -227,7 +248,7 @@
/*
* Reconnect.
*/
-TEST_F(SupplicantStaIfaceHidlTest, Reconnect) {
+TEST_P(SupplicantStaIfaceHidlTest, Reconnect) {
sta_iface_->reconnect([](const SupplicantStatus& status) {
EXPECT_EQ(SupplicantStatusCode::FAILURE_IFACE_NOT_DISCONNECTED,
status.code);
@@ -237,7 +258,7 @@
/*
* Disconnect.
*/
-TEST_F(SupplicantStaIfaceHidlTest, Disconnect) {
+TEST_P(SupplicantStaIfaceHidlTest, Disconnect) {
sta_iface_->disconnect([](const SupplicantStatus& status) {
EXPECT_EQ(SupplicantStatusCode::SUCCESS, status.code);
});
@@ -246,7 +267,7 @@
/*
* SetPowerSave.
*/
-TEST_F(SupplicantStaIfaceHidlTest, SetPowerSave) {
+TEST_P(SupplicantStaIfaceHidlTest, SetPowerSave) {
sta_iface_->setPowerSave(true, [](const SupplicantStatus& status) {
EXPECT_EQ(SupplicantStatusCode::SUCCESS, status.code);
});
@@ -258,7 +279,7 @@
/*
* InitiateTdlsDiscover.
*/
-TEST_F(SupplicantStaIfaceHidlTest, InitiateTdlsDiscover) {
+TEST_P(SupplicantStaIfaceHidlTest, InitiateTdlsDiscover) {
sta_iface_->initiateTdlsDiscover(
mac_addr_, [](const SupplicantStatus& status) {
EXPECT_EQ(SupplicantStatusCode::SUCCESS, status.code);
@@ -268,7 +289,7 @@
/*
* InitiateTdlsSetup.
*/
-TEST_F(SupplicantStaIfaceHidlTest, InitiateTdlsSetup) {
+TEST_P(SupplicantStaIfaceHidlTest, InitiateTdlsSetup) {
sta_iface_->initiateTdlsSetup(
mac_addr_, [](const SupplicantStatus& status) {
EXPECT_EQ(SupplicantStatusCode::SUCCESS, status.code);
@@ -278,7 +299,7 @@
/*
* InitiateTdlsTeardown.
*/
-TEST_F(SupplicantStaIfaceHidlTest, InitiateTdlsTeardown) {
+TEST_P(SupplicantStaIfaceHidlTest, InitiateTdlsTeardown) {
sta_iface_->initiateTdlsTeardown(
mac_addr_, [](const SupplicantStatus& status) {
EXPECT_EQ(SupplicantStatusCode::SUCCESS, status.code);
@@ -288,7 +309,7 @@
/*
* InitiateAnqpQuery.
*/
-TEST_F(SupplicantStaIfaceHidlTest, InitiateAnqpQuery) {
+TEST_P(SupplicantStaIfaceHidlTest, InitiateAnqpQuery) {
std::vector<ISupplicantStaIface::AnqpInfoId> anqp_ids(
kTestAnqpInfoIds, kTestAnqpInfoIds + sizeof(kTestAnqpInfoIds));
std::vector<ISupplicantStaIface::Hs20AnqpSubtypes> hs_types(
@@ -304,7 +325,7 @@
/*
* InitiateHs20IconQuery.
*/
-TEST_F(SupplicantStaIfaceHidlTest, InitiateHs20IconQuery) {
+TEST_P(SupplicantStaIfaceHidlTest, InitiateHs20IconQuery) {
sta_iface_->initiateHs20IconQuery(
mac_addr_, kTestHs20IconFile, [](const SupplicantStatus& status) {
// These requests will fail unless the BSSID mentioned is actually
@@ -316,7 +337,7 @@
/*
* GetMacAddress.
*/
-TEST_F(SupplicantStaIfaceHidlTest, GetMacAddress) {
+TEST_P(SupplicantStaIfaceHidlTest, GetMacAddress) {
sta_iface_->getMacAddress([](const SupplicantStatus& status,
const hidl_array<uint8_t, 6>& mac_addr) {
EXPECT_EQ(SupplicantStatusCode::SUCCESS, status.code);
@@ -328,7 +349,7 @@
/*
* StartRxFilter.
*/
-TEST_F(SupplicantStaIfaceHidlTest, StartRxFilter) {
+TEST_P(SupplicantStaIfaceHidlTest, StartRxFilter) {
sta_iface_->startRxFilter([](const SupplicantStatus& status) {
EXPECT_EQ(SupplicantStatusCode::SUCCESS, status.code);
});
@@ -337,7 +358,7 @@
/*
* StopRxFilter.
*/
-TEST_F(SupplicantStaIfaceHidlTest, StopRxFilter) {
+TEST_P(SupplicantStaIfaceHidlTest, StopRxFilter) {
sta_iface_->stopRxFilter([](const SupplicantStatus& status) {
EXPECT_EQ(SupplicantStatusCode::SUCCESS, status.code);
});
@@ -346,7 +367,7 @@
/*
* AddRxFilter.
*/
-TEST_F(SupplicantStaIfaceHidlTest, AddRxFilter) {
+TEST_P(SupplicantStaIfaceHidlTest, AddRxFilter) {
sta_iface_->addRxFilter(ISupplicantStaIface::RxFilterType::V4_MULTICAST,
[](const SupplicantStatus& status) {
EXPECT_EQ(SupplicantStatusCode::SUCCESS,
@@ -362,7 +383,7 @@
/*
* RemoveRxFilter.
*/
-TEST_F(SupplicantStaIfaceHidlTest, RemoveRxFilter) {
+TEST_P(SupplicantStaIfaceHidlTest, RemoveRxFilter) {
sta_iface_->removeRxFilter(ISupplicantStaIface::RxFilterType::V4_MULTICAST,
[](const SupplicantStatus& status) {
EXPECT_EQ(SupplicantStatusCode::SUCCESS,
@@ -378,7 +399,7 @@
/*
* SetBtCoexistenceMode.
*/
-TEST_F(SupplicantStaIfaceHidlTest, SetBtCoexistenceMode) {
+TEST_P(SupplicantStaIfaceHidlTest, SetBtCoexistenceMode) {
sta_iface_->setBtCoexistenceMode(
ISupplicantStaIface::BtCoexistenceMode::ENABLED,
[](const SupplicantStatus& status) {
@@ -399,7 +420,7 @@
/*
* SetBtCoexistenceScanModeEnabled.
*/
-TEST_F(SupplicantStaIfaceHidlTest, SetBtCoexistenceScanModeEnabled) {
+TEST_P(SupplicantStaIfaceHidlTest, SetBtCoexistenceScanModeEnabled) {
sta_iface_->setBtCoexistenceScanModeEnabled(
true, [](const SupplicantStatus& status) {
EXPECT_EQ(SupplicantStatusCode::SUCCESS, status.code);
@@ -413,7 +434,7 @@
/*
* SetSuspendModeEnabled.
*/
-TEST_F(SupplicantStaIfaceHidlTest, SetSuspendModeEnabled) {
+TEST_P(SupplicantStaIfaceHidlTest, SetSuspendModeEnabled) {
sta_iface_->setSuspendModeEnabled(true, [](const SupplicantStatus& status) {
EXPECT_EQ(SupplicantStatusCode::SUCCESS, status.code);
});
@@ -426,7 +447,7 @@
/*
* SetCountryCode.
*/
-TEST_F(SupplicantStaIfaceHidlTest, SetCountryCode) {
+TEST_P(SupplicantStaIfaceHidlTest, SetCountryCode) {
sta_iface_->setCountryCode(
kTestCountryCode, [](const SupplicantStatus& status) {
EXPECT_EQ(SupplicantStatusCode::SUCCESS, status.code);
@@ -436,7 +457,7 @@
/*
* SetWpsDeviceName
*/
-TEST_F(SupplicantStaIfaceHidlTest, SetWpsDeviceName) {
+TEST_P(SupplicantStaIfaceHidlTest, SetWpsDeviceName) {
EXPECT_EQ(
SupplicantStatusCode::SUCCESS,
HIDL_INVOKE(sta_iface_, setWpsDeviceName, kTestWpsDeviceName).code);
@@ -445,7 +466,7 @@
/*
* SetWpsDeviceType
*/
-TEST_F(SupplicantStaIfaceHidlTest, SetWpsDeviceType) {
+TEST_P(SupplicantStaIfaceHidlTest, SetWpsDeviceType) {
EXPECT_EQ(
SupplicantStatusCode::SUCCESS,
HIDL_INVOKE(sta_iface_, setWpsDeviceType, kTestWpsDeviceType).code);
@@ -454,7 +475,7 @@
/*
* SetWpsManufacturer
*/
-TEST_F(SupplicantStaIfaceHidlTest, SetWpsManufacturer) {
+TEST_P(SupplicantStaIfaceHidlTest, SetWpsManufacturer) {
EXPECT_EQ(
SupplicantStatusCode::SUCCESS,
HIDL_INVOKE(sta_iface_, setWpsManufacturer, kTestWpsManufacturer).code);
@@ -463,7 +484,7 @@
/*
* SetWpsModelName
*/
-TEST_F(SupplicantStaIfaceHidlTest, SetWpsModelName) {
+TEST_P(SupplicantStaIfaceHidlTest, SetWpsModelName) {
EXPECT_EQ(SupplicantStatusCode::SUCCESS,
HIDL_INVOKE(sta_iface_, setWpsModelName, kTestWpsModelName).code);
}
@@ -471,7 +492,7 @@
/*
* SetWpsModelNumber
*/
-TEST_F(SupplicantStaIfaceHidlTest, SetWpsModelNumber) {
+TEST_P(SupplicantStaIfaceHidlTest, SetWpsModelNumber) {
EXPECT_EQ(
SupplicantStatusCode::SUCCESS,
HIDL_INVOKE(sta_iface_, setWpsModelNumber, kTestWpsModelNumber).code);
@@ -480,7 +501,7 @@
/*
* SetWpsSerialNumber
*/
-TEST_F(SupplicantStaIfaceHidlTest, SetWpsSerialNumber) {
+TEST_P(SupplicantStaIfaceHidlTest, SetWpsSerialNumber) {
EXPECT_EQ(
SupplicantStatusCode::SUCCESS,
HIDL_INVOKE(sta_iface_, setWpsSerialNumber, kTestWpsSerialNumber).code);
@@ -489,7 +510,7 @@
/*
* SetWpsConfigMethods
*/
-TEST_F(SupplicantStaIfaceHidlTest, SetWpsConfigMethods) {
+TEST_P(SupplicantStaIfaceHidlTest, SetWpsConfigMethods) {
EXPECT_EQ(
SupplicantStatusCode::SUCCESS,
HIDL_INVOKE(sta_iface_, setWpsConfigMethods, kTestWpsConfigMethods)
@@ -499,7 +520,7 @@
/*
* SetExternalSim
*/
-TEST_F(SupplicantStaIfaceHidlTest, SetExternalSim) {
+TEST_P(SupplicantStaIfaceHidlTest, SetExternalSim) {
EXPECT_EQ(SupplicantStatusCode::SUCCESS,
HIDL_INVOKE(sta_iface_, setExternalSim, true).code);
EXPECT_EQ(SupplicantStatusCode::SUCCESS,
@@ -509,7 +530,7 @@
/*
* AddExtRadioWork
*/
-TEST_F(SupplicantStaIfaceHidlTest, AddExtRadioWork) {
+TEST_P(SupplicantStaIfaceHidlTest, AddExtRadioWork) {
const auto& status_and_radio_work_id =
HIDL_INVOKE(sta_iface_, addExtRadioWork, kTestRadioWorkName,
kTestRadioWorkFrequency, kTestRadioWorkTimeout);
@@ -524,9 +545,18 @@
/*
* RemoveExtRadioWork
*/
-TEST_F(SupplicantStaIfaceHidlTest, RemoveExtRadioWork) {
+TEST_P(SupplicantStaIfaceHidlTest, RemoveExtRadioWork) {
// This fails because there is no on going radio work with kTestRadioWorkId.
EXPECT_NE(
SupplicantStatusCode::SUCCESS,
HIDL_INVOKE(sta_iface_, removeExtRadioWork, kTestRadioWorkId).code);
}
+
+INSTANTIATE_TEST_CASE_P(
+ PerInstance, SupplicantStaIfaceHidlTest,
+ testing::Combine(
+ testing::ValuesIn(
+ android::hardware::getAllHalInstanceNames(IWifi::descriptor)),
+ testing::ValuesIn(android::hardware::getAllHalInstanceNames(
+ ISupplicant::descriptor))),
+ android::hardware::PrintInstanceTupleNameToString<>);
\ No newline at end of file
diff --git a/wifi/supplicant/1.0/vts/functional/supplicant_sta_network_hidl_test.cpp b/wifi/supplicant/1.0/vts/functional/supplicant_sta_network_hidl_test.cpp
index 832dd41..52f77a1 100644
--- a/wifi/supplicant/1.0/vts/functional/supplicant_sta_network_hidl_test.cpp
+++ b/wifi/supplicant/1.0/vts/functional/supplicant_sta_network_hidl_test.cpp
@@ -16,14 +16,16 @@
#include <android-base/logging.h>
-#include <VtsHalHidlTargetTestBase.h>
-
+#include <VtsCoreUtil.h>
+#include <android/hardware/wifi/1.0/IWifi.h>
#include <android/hardware/wifi/supplicant/1.0/ISupplicantStaNetwork.h>
-
-#include <android/hardware/wifi/supplicant/1.0/ISupplicantStaNetwork.h>
+#include <gtest/gtest.h>
+#include <hidl/GtestPrinter.h>
+#include <hidl/ServiceManagement.h>
#include "supplicant_hidl_call_util.h"
#include "supplicant_hidl_test_utils.h"
+#include "wifi_hidl_test_utils.h"
using ::android::sp;
using ::android::hardware::hidl_array;
@@ -32,12 +34,14 @@
using ::android::hardware::Return;
using ::android::hardware::Void;
using ::android::hardware::wifi::supplicant::V1_0::IfaceType;
+using ::android::hardware::wifi::supplicant::V1_0::ISupplicant;
using ::android::hardware::wifi::supplicant::V1_0::ISupplicantStaIface;
using ::android::hardware::wifi::supplicant::V1_0::ISupplicantStaNetwork;
using ::android::hardware::wifi::supplicant::V1_0::
ISupplicantStaNetworkCallback;
using ::android::hardware::wifi::supplicant::V1_0::SupplicantStatus;
using ::android::hardware::wifi::supplicant::V1_0::SupplicantStatusCode;
+using ::android::hardware::wifi::V1_0::IWifi;
namespace {
constexpr char kTestSsidStr[] = "TestSsid1234";
@@ -74,37 +78,50 @@
ISupplicantStaNetwork::PairwiseCipherMask::TKIP);
} // namespace
-class SupplicantStaNetworkHidlTest : public ::testing::VtsHalHidlTargetTestBase {
+class SupplicantStaNetworkHidlTest
+ : public ::testing::TestWithParam<std::tuple<std::string, std::string>> {
public:
virtual void SetUp() override {
- startSupplicantAndWaitForHidlService();
- EXPECT_TRUE(turnOnExcessiveLogging());
- sta_network_ = createSupplicantStaNetwork();
+ wifi_instance_name_ = std::get<0>(GetParam());
+ supplicant_instance_name_ = std::get<1>(GetParam());
+ stopSupplicant(wifi_instance_name_);
+ startSupplicantAndWaitForHidlService(wifi_instance_name_,
+ supplicant_instance_name_);
+ isP2pOn_ =
+ testing::deviceSupportsFeature("android.hardware.wifi.direct");
+ supplicant_ = getSupplicant(supplicant_instance_name_, isP2pOn_);
+ EXPECT_TRUE(turnOnExcessiveLogging(supplicant_));
+ sta_network_ = createSupplicantStaNetwork(supplicant_);
ASSERT_NE(sta_network_.get(), nullptr);
ssid_.assign(kTestSsidStr, kTestSsidStr + strlen(kTestSsidStr));
}
- virtual void TearDown() override { stopSupplicant(); }
+ virtual void TearDown() override { stopSupplicant(wifi_instance_name_); }
protected:
void removeNetwork() {
- sp<ISupplicantStaIface> sta_iface = getSupplicantStaIface();
- ASSERT_NE(nullptr, sta_iface.get());
- uint32_t net_id;
- sta_network_->getId([&](const SupplicantStatus& status, int network_id) {
- ASSERT_EQ(SupplicantStatusCode::SUCCESS, status.code);
- net_id = network_id;
- });
- sta_iface->removeNetwork(net_id, [](const SupplicantStatus& status) {
- ASSERT_EQ(SupplicantStatusCode::SUCCESS, status.code);
- });
+ sp<ISupplicantStaIface> sta_iface = getSupplicantStaIface(supplicant_);
+ ASSERT_NE(nullptr, sta_iface.get());
+ uint32_t net_id;
+ sta_network_->getId(
+ [&](const SupplicantStatus& status, int network_id) {
+ ASSERT_EQ(SupplicantStatusCode::SUCCESS, status.code);
+ net_id = network_id;
+ });
+ sta_iface->removeNetwork(net_id, [](const SupplicantStatus& status) {
+ ASSERT_EQ(SupplicantStatusCode::SUCCESS, status.code);
+ });
}
+ bool isP2pOn_ = false;
+ sp<ISupplicant> supplicant_;
// ISupplicantStaNetwork object used for all tests in this fixture.
sp<ISupplicantStaNetwork> sta_network_;
// SSID to use for various tests.
std::vector<uint8_t> ssid_;
+ std::string wifi_instance_name_;
+ std::string supplicant_instance_name_;
};
class NetworkCallback : public ISupplicantStaNetworkCallback {
@@ -126,16 +143,20 @@
* Ensures that an instance of the ISupplicantStaNetwork proxy object is
* successfully created.
*/
-TEST(SupplicantStaNetworkHidlTestNoFixture, Create) {
- startSupplicantAndWaitForHidlService();
- EXPECT_NE(nullptr, createSupplicantStaNetwork().get());
- stopSupplicant();
+TEST_P(SupplicantStaNetworkHidlTest, Create) {
+ stopSupplicant(wifi_instance_name_);
+ startSupplicantAndWaitForHidlService(wifi_instance_name_,
+ supplicant_instance_name_);
+ sp<ISupplicant> supplicant =
+ getSupplicant(supplicant_instance_name_, isP2pOn_);
+ EXPECT_TRUE(turnOnExcessiveLogging(supplicant));
+ EXPECT_NE(nullptr, createSupplicantStaNetwork(supplicant).get());
}
/*
* RegisterCallback
*/
-TEST_F(SupplicantStaNetworkHidlTest, RegisterCallback) {
+TEST_P(SupplicantStaNetworkHidlTest, RegisterCallback) {
sta_network_->registerCallback(
new NetworkCallback(), [](const SupplicantStatus& status) {
EXPECT_EQ(SupplicantStatusCode::SUCCESS, status.code);
@@ -145,7 +166,7 @@
/*
* GetInterfaceName
*/
-TEST_F(SupplicantStaNetworkHidlTest, GetInterfaceName) {
+TEST_P(SupplicantStaNetworkHidlTest, GetInterfaceName) {
const auto& status_and_interface_name =
HIDL_INVOKE(sta_network_, getInterfaceName);
EXPECT_EQ(SupplicantStatusCode::SUCCESS,
@@ -156,7 +177,7 @@
/*
* GetType
*/
-TEST_F(SupplicantStaNetworkHidlTest, GetType) {
+TEST_P(SupplicantStaNetworkHidlTest, GetType) {
const auto& status_and_interface_type = HIDL_INVOKE(sta_network_, getType);
EXPECT_EQ(SupplicantStatusCode::SUCCESS,
status_and_interface_type.first.code);
@@ -167,7 +188,7 @@
/*
* SetGetSsid
*/
-TEST_F(SupplicantStaNetworkHidlTest, SetGetSsid) {
+TEST_P(SupplicantStaNetworkHidlTest, SetGetSsid) {
sta_network_->setSsid(ssid_, [](const SupplicantStatus& status) {
EXPECT_EQ(SupplicantStatusCode::SUCCESS, status.code);
});
@@ -181,7 +202,7 @@
/*
* SetGetBssid
*/
-TEST_F(SupplicantStaNetworkHidlTest, SetGetBssid) {
+TEST_P(SupplicantStaNetworkHidlTest, SetGetBssid) {
std::array<uint8_t, 6> set_bssid;
memcpy(set_bssid.data(), kTestBssid, set_bssid.size());
sta_network_->setBssid(set_bssid, [](const SupplicantStatus& status) {
@@ -199,7 +220,7 @@
/*
* SetGetKeyMgmt
*/
-TEST_F(SupplicantStaNetworkHidlTest, SetGetKeyMgmt) {
+TEST_P(SupplicantStaNetworkHidlTest, SetGetKeyMgmt) {
sta_network_->setKeyMgmt(kTestKeyMgmt, [](const SupplicantStatus& status) {
EXPECT_EQ(SupplicantStatusCode::SUCCESS, status.code);
});
@@ -213,7 +234,7 @@
/*
* SetGetProto
*/
-TEST_F(SupplicantStaNetworkHidlTest, SetGetProto) {
+TEST_P(SupplicantStaNetworkHidlTest, SetGetProto) {
sta_network_->setProto(kTestProto, [](const SupplicantStatus& status) {
EXPECT_EQ(SupplicantStatusCode::SUCCESS, status.code);
});
@@ -226,7 +247,7 @@
/*
* SetGetKeyAuthAlg
*/
-TEST_F(SupplicantStaNetworkHidlTest, SetGetAuthAlg) {
+TEST_P(SupplicantStaNetworkHidlTest, SetGetAuthAlg) {
sta_network_->setAuthAlg(kTestAuthAlg, [](const SupplicantStatus& status) {
EXPECT_EQ(SupplicantStatusCode::SUCCESS, status.code);
});
@@ -240,7 +261,7 @@
/*
* SetGetGroupCipher
*/
-TEST_F(SupplicantStaNetworkHidlTest, SetGetGroupCipher) {
+TEST_P(SupplicantStaNetworkHidlTest, SetGetGroupCipher) {
sta_network_->setGroupCipher(
kTestGroupCipher, [](const SupplicantStatus& status) {
EXPECT_EQ(SupplicantStatusCode::SUCCESS, status.code);
@@ -255,7 +276,7 @@
/*
* SetGetPairwiseCipher
*/
-TEST_F(SupplicantStaNetworkHidlTest, SetGetPairwiseCipher) {
+TEST_P(SupplicantStaNetworkHidlTest, SetGetPairwiseCipher) {
sta_network_->setPairwiseCipher(
kTestPairwiseCipher, [](const SupplicantStatus& status) {
EXPECT_EQ(SupplicantStatusCode::SUCCESS, status.code);
@@ -270,7 +291,7 @@
/*
* SetGetPskPassphrase
*/
-TEST_F(SupplicantStaNetworkHidlTest, SetGetPskPassphrase) {
+TEST_P(SupplicantStaNetworkHidlTest, SetGetPskPassphrase) {
sta_network_->setPskPassphrase(
kTestPskPassphrase, [](const SupplicantStatus& status) {
EXPECT_EQ(SupplicantStatusCode::SUCCESS, status.code);
@@ -285,7 +306,7 @@
/*
* SetGetPsk
*/
-TEST_F(SupplicantStaNetworkHidlTest, SetGetPsk) {
+TEST_P(SupplicantStaNetworkHidlTest, SetGetPsk) {
EXPECT_EQ(SupplicantStatusCode::SUCCESS,
HIDL_INVOKE(sta_network_, setPsk, kTestPsk).code);
const auto& status_and_psk = HIDL_INVOKE(sta_network_, getPsk);
@@ -297,7 +318,7 @@
/*
* SetGetWepKeys
*/
-TEST_F(SupplicantStaNetworkHidlTest, SetGetWepTxKeyIdx) {
+TEST_P(SupplicantStaNetworkHidlTest, SetGetWepTxKeyIdx) {
sta_network_->setWepTxKeyIdx(
kTestWepTxKeyIdx, [](const SupplicantStatus& status) {
EXPECT_EQ(SupplicantStatusCode::SUCCESS, status.code);
@@ -312,7 +333,7 @@
/*
* SetGetWepKeys
*/
-TEST_F(SupplicantStaNetworkHidlTest, SetGetWepKeys) {
+TEST_P(SupplicantStaNetworkHidlTest, SetGetWepKeys) {
for (uint32_t i = 0;
i < static_cast<uint32_t>(
ISupplicantStaNetwork::ParamSizeLimits::WEP_KEYS_MAX_NUM);
@@ -334,7 +355,7 @@
/*
* SetGetScanSsid
*/
-TEST_F(SupplicantStaNetworkHidlTest, SetGetScanSsid) {
+TEST_P(SupplicantStaNetworkHidlTest, SetGetScanSsid) {
sta_network_->setScanSsid(
true, [](const SupplicantStatus& status) {
EXPECT_EQ(SupplicantStatusCode::SUCCESS, status.code);
@@ -349,7 +370,7 @@
/*
* SetGetRequirePmf
*/
-TEST_F(SupplicantStaNetworkHidlTest, SetGetRequirePmf) {
+TEST_P(SupplicantStaNetworkHidlTest, SetGetRequirePmf) {
sta_network_->setRequirePmf(
true, [](const SupplicantStatus& status) {
EXPECT_EQ(SupplicantStatusCode::SUCCESS, status.code);
@@ -364,7 +385,7 @@
/*
* SetGetIdStr
*/
-TEST_F(SupplicantStaNetworkHidlTest, SetGetIdStr) {
+TEST_P(SupplicantStaNetworkHidlTest, SetGetIdStr) {
sta_network_->setIdStr(
kTestIdStr, [](const SupplicantStatus& status) {
EXPECT_EQ(SupplicantStatusCode::SUCCESS, status.code);
@@ -376,11 +397,10 @@
});
}
-
/*
* SetGetEapMethod
*/
-TEST_F(SupplicantStaNetworkHidlTest, SetGetEapMethod) {
+TEST_P(SupplicantStaNetworkHidlTest, SetGetEapMethod) {
ISupplicantStaNetwork::EapMethod set_eap_method =
ISupplicantStaNetwork::EapMethod::PEAP;
sta_network_->setEapMethod(
@@ -398,7 +418,7 @@
/*
* SetGetEapPhase2Method
*/
-TEST_F(SupplicantStaNetworkHidlTest, SetGetEapPhase2Method) {
+TEST_P(SupplicantStaNetworkHidlTest, SetGetEapPhase2Method) {
ISupplicantStaNetwork::EapMethod set_eap_method =
ISupplicantStaNetwork::EapMethod::PEAP;
sta_network_->setEapMethod(
@@ -422,7 +442,7 @@
/*
* SetGetEapIdentity
*/
-TEST_F(SupplicantStaNetworkHidlTest, SetGetEapIdentity) {
+TEST_P(SupplicantStaNetworkHidlTest, SetGetEapIdentity) {
std::vector<uint8_t> set_identity(kTestIdentity, kTestIdentity + sizeof(kTestIdentity));
sta_network_->setEapIdentity(
set_identity, [](const SupplicantStatus& status) {
@@ -438,7 +458,7 @@
/*
* SetGetEapAnonymousIdentity
*/
-TEST_F(SupplicantStaNetworkHidlTest, SetGetEapAnonymousIdentity) {
+TEST_P(SupplicantStaNetworkHidlTest, SetGetEapAnonymousIdentity) {
std::vector<uint8_t> set_identity(kTestIdentity, kTestIdentity + sizeof(kTestIdentity));
sta_network_->setEapAnonymousIdentity(
set_identity, [](const SupplicantStatus& status) {
@@ -454,7 +474,7 @@
/*
* SetGetEapPassword
*/
-TEST_F(SupplicantStaNetworkHidlTest, SetGetEapPassword) {
+TEST_P(SupplicantStaNetworkHidlTest, SetGetEapPassword) {
std::vector<uint8_t> set_eap_passwd(
kTestEapPasswdStr, kTestEapPasswdStr + strlen(kTestEapPasswdStr));
sta_network_->setEapPassword(
@@ -471,7 +491,7 @@
/*
* SetGetEapCACert
*/
-TEST_F(SupplicantStaNetworkHidlTest, SetGetEapCACert) {
+TEST_P(SupplicantStaNetworkHidlTest, SetGetEapCACert) {
sta_network_->setEapCACert(
kTestEapCert, [](const SupplicantStatus& status) {
EXPECT_EQ(SupplicantStatusCode::SUCCESS, status.code);
@@ -486,7 +506,7 @@
/*
* SetGetEapCAPath
*/
-TEST_F(SupplicantStaNetworkHidlTest, SetGetEapCAPath) {
+TEST_P(SupplicantStaNetworkHidlTest, SetGetEapCAPath) {
sta_network_->setEapCAPath(
kTestEapCert, [](const SupplicantStatus& status) {
EXPECT_EQ(SupplicantStatusCode::SUCCESS, status.code);
@@ -501,7 +521,7 @@
/*
* SetGetEapClientCert
*/
-TEST_F(SupplicantStaNetworkHidlTest, SetGetEapClientCert) {
+TEST_P(SupplicantStaNetworkHidlTest, SetGetEapClientCert) {
sta_network_->setEapClientCert(
kTestEapCert, [](const SupplicantStatus& status) {
EXPECT_EQ(SupplicantStatusCode::SUCCESS, status.code);
@@ -516,7 +536,7 @@
/*
* SetGetEapPrivateKeyId
*/
-TEST_F(SupplicantStaNetworkHidlTest, SetGetEapPrivateKeyId) {
+TEST_P(SupplicantStaNetworkHidlTest, SetGetEapPrivateKeyId) {
sta_network_->setEapPrivateKeyId(
kTestEapPrivateKeyId, [](const SupplicantStatus& status) {
EXPECT_EQ(SupplicantStatusCode::SUCCESS, status.code);
@@ -531,7 +551,7 @@
/*
* SetGetEapAltSubjectMatch
*/
-TEST_F(SupplicantStaNetworkHidlTest, SetGetEapAltSubjectMatch) {
+TEST_P(SupplicantStaNetworkHidlTest, SetGetEapAltSubjectMatch) {
sta_network_->setEapAltSubjectMatch(
kTestEapMatch, [](const SupplicantStatus& status) {
EXPECT_EQ(SupplicantStatusCode::SUCCESS, status.code);
@@ -546,7 +566,7 @@
/*
* SetGetEapSubjectMatch
*/
-TEST_F(SupplicantStaNetworkHidlTest, SetGetEapSubjectMatch) {
+TEST_P(SupplicantStaNetworkHidlTest, SetGetEapSubjectMatch) {
EXPECT_EQ(
SupplicantStatusCode::SUCCESS,
HIDL_INVOKE(sta_network_, setEapSubjectMatch, kTestEapMatch).code);
@@ -561,7 +581,7 @@
/*
* SetGetEapDomainSuffixMatch
*/
-TEST_F(SupplicantStaNetworkHidlTest, SetGetEapDomainSuffixMatch) {
+TEST_P(SupplicantStaNetworkHidlTest, SetGetEapDomainSuffixMatch) {
sta_network_->setEapDomainSuffixMatch(
kTestEapMatch, [](const SupplicantStatus& status) {
EXPECT_EQ(SupplicantStatusCode::SUCCESS, status.code);
@@ -576,7 +596,7 @@
/*
* SetGetEapEngine
*/
-TEST_F(SupplicantStaNetworkHidlTest, SetGetEapEngine) {
+TEST_P(SupplicantStaNetworkHidlTest, SetGetEapEngine) {
sta_network_->setEapEngine(
true, [](const SupplicantStatus& status) {
EXPECT_EQ(SupplicantStatusCode::SUCCESS, status.code);
@@ -591,7 +611,7 @@
/*
* SetGetEapEngineID
*/
-TEST_F(SupplicantStaNetworkHidlTest, SetGetEapEngineID) {
+TEST_P(SupplicantStaNetworkHidlTest, SetGetEapEngineID) {
sta_network_->setEapEngineID(
kTestEapEngineID, [](const SupplicantStatus& status) {
EXPECT_EQ(SupplicantStatusCode::SUCCESS, status.code);
@@ -606,7 +626,7 @@
/*
* Enable
*/
-TEST_F(SupplicantStaNetworkHidlTest, Enable) {
+TEST_P(SupplicantStaNetworkHidlTest, Enable) {
// wpa_supplicant doesn't perform any connection initiation
// unless atleast the Ssid and Ket mgmt params are set.
sta_network_->setSsid(ssid_, [](const SupplicantStatus& status) {
@@ -633,7 +653,7 @@
/*
* Disable
*/
-TEST_F(SupplicantStaNetworkHidlTest, Disable) {
+TEST_P(SupplicantStaNetworkHidlTest, Disable) {
// wpa_supplicant doesn't perform any connection initiation
// unless atleast the Ssid and Ket mgmt params are set.
sta_network_->setSsid(ssid_, [](const SupplicantStatus& status) {
@@ -656,7 +676,7 @@
/*
* Select.
*/
-TEST_F(SupplicantStaNetworkHidlTest, Select) {
+TEST_P(SupplicantStaNetworkHidlTest, Select) {
// wpa_supplicant doesn't perform any connection initiation
// unless atleast the Ssid and Ket mgmt params are set.
sta_network_->setSsid(ssid_, [](const SupplicantStatus& status) {
@@ -679,7 +699,7 @@
/*
* SendNetworkEapSimGsmAuthResponse
*/
-TEST_F(SupplicantStaNetworkHidlTest, SendNetworkEapSimGsmAuthResponse) {
+TEST_P(SupplicantStaNetworkHidlTest, SendNetworkEapSimGsmAuthResponse) {
std::vector<ISupplicantStaNetwork::NetworkResponseEapSimGsmAuthParams>
params;
ISupplicantStaNetwork::NetworkResponseEapSimGsmAuthParams param;
@@ -695,7 +715,7 @@
/*
* SendNetworkEapSimGsmAuthFailure
*/
-TEST_F(SupplicantStaNetworkHidlTest, SendNetworkEapSimGsmAuthFailure) {
+TEST_P(SupplicantStaNetworkHidlTest, SendNetworkEapSimGsmAuthFailure) {
EXPECT_EQ(SupplicantStatusCode::SUCCESS,
HIDL_INVOKE(sta_network_, sendNetworkEapSimGsmAuthFailure).code);
}
@@ -703,7 +723,7 @@
/*
* SendNetworkEapSimUmtsAuthResponse
*/
-TEST_F(SupplicantStaNetworkHidlTest, SendNetworkEapSimUmtsAuthResponse) {
+TEST_P(SupplicantStaNetworkHidlTest, SendNetworkEapSimUmtsAuthResponse) {
ISupplicantStaNetwork::NetworkResponseEapSimUmtsAuthParams params;
params.res = std::vector<uint8_t>(kTestRes, kTestRes + sizeof(kTestRes));
memcpy(params.ik.data(), kTestIk, params.ik.size());
@@ -717,7 +737,7 @@
/*
* SendNetworkEapSimUmtsAuthFailure
*/
-TEST_F(SupplicantStaNetworkHidlTest, SendNetworkEapSimUmtsAuthFailure) {
+TEST_P(SupplicantStaNetworkHidlTest, SendNetworkEapSimUmtsAuthFailure) {
EXPECT_EQ(SupplicantStatusCode::SUCCESS,
HIDL_INVOKE(sta_network_, sendNetworkEapSimUmtsAuthFailure).code);
}
@@ -725,7 +745,7 @@
/*
* SendNetworkEapSimUmtsAutsResponse
*/
-TEST_F(SupplicantStaNetworkHidlTest, SendNetworkEapSimUmtsAutsResponse) {
+TEST_P(SupplicantStaNetworkHidlTest, SendNetworkEapSimUmtsAutsResponse) {
EXPECT_EQ(SupplicantStatusCode::SUCCESS,
HIDL_INVOKE(sta_network_, sendNetworkEapSimUmtsAutsResponse,
kTestAutParam)
@@ -735,7 +755,7 @@
/*
* SendNetworkEapIdentityResponse
*/
-TEST_F(SupplicantStaNetworkHidlTest, SendNetworkEapIdentityResponse) {
+TEST_P(SupplicantStaNetworkHidlTest, SendNetworkEapIdentityResponse) {
sta_network_->sendNetworkEapIdentityResponse(
std::vector<uint8_t>(kTestIdentity,
kTestIdentity + sizeof(kTestIdentity)),
@@ -747,7 +767,7 @@
/*
* SetUpdateIdentifier
*/
-TEST_F(SupplicantStaNetworkHidlTest, SetUpdateIdentifier) {
+TEST_P(SupplicantStaNetworkHidlTest, SetUpdateIdentifier) {
EXPECT_EQ(
SupplicantStatusCode::SUCCESS,
HIDL_INVOKE(sta_network_, setUpdateIdentifier, kTestUpdateIdentifier)
@@ -757,7 +777,7 @@
/*
* SetProactiveKeyCaching
*/
-TEST_F(SupplicantStaNetworkHidlTest, SetProactiveKeyCaching) {
+TEST_P(SupplicantStaNetworkHidlTest, SetProactiveKeyCaching) {
EXPECT_EQ(SupplicantStatusCode::SUCCESS,
HIDL_INVOKE(sta_network_, setProactiveKeyCaching, true).code);
EXPECT_EQ(SupplicantStatusCode::SUCCESS,
@@ -767,7 +787,7 @@
/*
* GetWpsNfcConfigurationToken
*/
-TEST_F(SupplicantStaNetworkHidlTest, GetWpsNfcConfigurationToken) {
+TEST_P(SupplicantStaNetworkHidlTest, GetWpsNfcConfigurationToken) {
ASSERT_EQ(SupplicantStatusCode::SUCCESS,
HIDL_INVOKE(sta_network_, setSsid, ssid_).code);
ASSERT_EQ(SupplicantStatusCode::SUCCESS,
@@ -780,3 +800,12 @@
EXPECT_EQ(SupplicantStatusCode::SUCCESS, status_and_token.first.code);
EXPECT_FALSE(0 == status_and_token.second.size());
}
+
+INSTANTIATE_TEST_CASE_P(
+ PerInstance, SupplicantStaNetworkHidlTest,
+ testing::Combine(
+ testing::ValuesIn(
+ android::hardware::getAllHalInstanceNames(IWifi::descriptor)),
+ testing::ValuesIn(android::hardware::getAllHalInstanceNames(
+ ISupplicant::descriptor))),
+ android::hardware::PrintInstanceTupleNameToString<>);
\ No newline at end of file