Merge "vndk vintf vibrator"
diff --git a/TEST_MAPPING b/TEST_MAPPING
index 98e125b..0e18f48 100644
--- a/TEST_MAPPING
+++ b/TEST_MAPPING
@@ -1,6 +1,12 @@
{
"presubmit": [
{
+ "name": "vts_treble_vintf_framework_test"
+ },
+ {
+ "name": "vts_treble_vintf_vendor_test"
+ },
+ {
"name": "hidl_implementation_test"
}
]
diff --git a/audio/6.0/IDevice.hal b/audio/6.0/IDevice.hal
index e885fe2..122c550 100644
--- a/audio/6.0/IDevice.hal
+++ b/audio/6.0/IDevice.hal
@@ -280,4 +280,19 @@
*/
setConnectedState(DeviceAddress address, bool connected)
generates (Result retval);
+
+ /**
+ * Called by the framework to deinitialize the device and free up
+ * all currently allocated resources. It is recommended to close
+ * the device on the client side as soon as it is becomes unused.
+ *
+ * Note that all streams must be closed by the client before
+ * attempting to close the device they belong to.
+ *
+ * @return retval OK in case the success.
+ * INVALID_STATE if the device was already closed
+ * or there are streams currently opened.
+ */
+ @exit
+ close() generates (Result retval);
};
diff --git a/audio/6.0/IStream.hal b/audio/6.0/IStream.hal
index 451e116..d7d3c84 100644
--- a/audio/6.0/IStream.hal
+++ b/audio/6.0/IStream.hal
@@ -277,7 +277,7 @@
* @return retval OK in case the success.
* NOT_SUPPORTED on non mmap mode streams
* NOT_INITIALIZED in case of memory allocation error
- * INVALID_ARGUMENTS if the requested buffer size is too large
+ * INVALID_ARGUMENTS if the requested buffer size is invalid
* INVALID_STATE if called out of sequence
* @return info a MmapBufferInfo struct containing information on the MMMAP buffer created.
*/
@@ -300,13 +300,17 @@
/**
* Called by the framework to deinitialize the stream and free up
- * all the currently allocated resources. It is recommended to close
+ * all currently allocated resources. It is recommended to close
* the stream on the client side as soon as it is becomes unused.
*
+ * The client must ensure that this function is not called while
+ * audio data is being transferred through the stream's message queues.
+ *
* @return retval OK in case the success.
* NOT_SUPPORTED if called on IStream instead of input or
* output stream interface.
* INVALID_STATE if the stream was already closed.
*/
+ @exit
close() generates (Result retval);
};
diff --git a/audio/common/all-versions/test/utility/include/utility/PrettyPrintAudioTypes.h b/audio/common/all-versions/test/utility/include/utility/PrettyPrintAudioTypes.h
deleted file mode 100644
index 3833fd0..0000000
--- a/audio/common/all-versions/test/utility/include/utility/PrettyPrintAudioTypes.h
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Copyright (C) 2018 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_HARDWARE_AUDIO_COMMON_TEST_UTILITY_PRETTY_PRINT_AUDIO_TYPES_H
-#define ANDROID_HARDWARE_AUDIO_COMMON_TEST_UTILITY_PRETTY_PRINT_AUDIO_TYPES_H
-
-#include <iosfwd>
-#include <utility>
-
-/** @file Use HIDL generated toString methods to pretty print gtest errors
- * Unfortunately Gtest does not offer a template to specialize, only
- * overloading PrintTo.
- * @note that this overload can NOT be template because
- * the fallback is already template, resulting in ambiguity.
- * @note that the overload MUST be in the exact namespace
- * the type is declared in, as per the ADL rules.
- */
-
-namespace android {
-namespace hardware {
-namespace audio {
-
-#define DEFINE_GTEST_PRINT_TO(T) \
- inline void PrintTo(const T& val, ::std::ostream* os) { *os << toString(val); }
-
-namespace CPP_VERSION {
-DEFINE_GTEST_PRINT_TO(IPrimaryDevice::TtyMode)
-DEFINE_GTEST_PRINT_TO(Result)
-} // namespace CPP_VERSION
-
-namespace common {
-namespace CPP_VERSION {
-DEFINE_GTEST_PRINT_TO(AudioConfig)
-DEFINE_GTEST_PRINT_TO(AudioMode)
-DEFINE_GTEST_PRINT_TO(AudioDevice)
-DEFINE_GTEST_PRINT_TO(AudioFormat)
-DEFINE_GTEST_PRINT_TO(AudioChannelMask)
-} // namespace CPP_VERSION
-} // namespace common
-
-#undef DEFINE_GTEST_PRINT_TO
-
-} // namespace audio
-} // namespace hardware
-} // namespace android
-
-#endif // ANDROID_HARDWARE_AUDIO_COMMON_TEST_UTILITY_PRETTY_PRINT_AUDIO_TYPES_H
diff --git a/audio/core/all-versions/default/Device.cpp b/audio/core/all-versions/default/Device.cpp
index 1a9df21..21dab00 100644
--- a/audio/core/all-versions/default/Device.cpp
+++ b/audio/core/all-versions/default/Device.cpp
@@ -39,11 +39,10 @@
using ::android::hardware::audio::common::CPP_VERSION::implementation::HidlUtils;
-Device::Device(audio_hw_device_t* device) : mDevice(device) {}
+Device::Device(audio_hw_device_t* device) : mIsClosed(false), mDevice(device) {}
Device::~Device() {
- int status = audio_hw_device_close(mDevice);
- ALOGW_IF(status, "Error closing audio hw device %p: %s", mDevice, strerror(-status));
+ (void)doClose();
mDevice = nullptr;
}
@@ -54,10 +53,14 @@
void Device::closeInputStream(audio_stream_in_t* stream) {
mDevice->close_input_stream(mDevice, stream);
+ LOG_ALWAYS_FATAL_IF(mOpenedStreamsCount == 0, "mOpenedStreamsCount is already 0");
+ --mOpenedStreamsCount;
}
void Device::closeOutputStream(audio_stream_out_t* stream) {
mDevice->close_output_stream(mDevice, stream);
+ LOG_ALWAYS_FATAL_IF(mOpenedStreamsCount == 0, "mOpenedStreamsCount is already 0");
+ --mOpenedStreamsCount;
}
char* Device::halGetParameters(const char* keys) {
@@ -159,6 +162,7 @@
sp<IStreamOut> streamOut;
if (status == OK) {
streamOut = new StreamOut(this, halStream);
+ ++mOpenedStreamsCount;
}
HidlUtils::audioConfigFromHal(halConfig, suggestedConfig);
return {analyzeStatus("open_output_stream", status, {EINVAL} /*ignore*/), streamOut};
@@ -185,6 +189,7 @@
sp<IStreamIn> streamIn;
if (status == OK) {
streamIn = new StreamIn(this, halStream);
+ ++mOpenedStreamsCount;
}
HidlUtils::audioConfigFromHal(halConfig, suggestedConfig);
return {analyzeStatus("open_input_stream", status, {EINVAL} /*ignore*/), streamIn};
@@ -383,6 +388,18 @@
}
#endif
+Result Device::doClose() {
+ if (mIsClosed || mOpenedStreamsCount != 0) return Result::INVALID_STATE;
+ mIsClosed = true;
+ return analyzeStatus("close", audio_hw_device_close(mDevice));
+}
+
+#if MAJOR_VERSION >= 6
+Return<Result> Device::close() {
+ return doClose();
+}
+#endif
+
} // namespace implementation
} // namespace CPP_VERSION
} // namespace audio
diff --git a/audio/core/all-versions/default/PrimaryDevice.cpp b/audio/core/all-versions/default/PrimaryDevice.cpp
index 99590b0..3cf0932 100644
--- a/audio/core/all-versions/default/PrimaryDevice.cpp
+++ b/audio/core/all-versions/default/PrimaryDevice.cpp
@@ -31,7 +31,11 @@
PrimaryDevice::PrimaryDevice(audio_hw_device_t* device) : mDevice(new Device(device)) {}
-PrimaryDevice::~PrimaryDevice() {}
+PrimaryDevice::~PrimaryDevice() {
+ // Do not call mDevice->close here. If there are any unclosed streams,
+ // they only hold IDevice instance, not IPrimaryDevice, thus IPrimaryDevice
+ // "part" of a device can be destroyed before the streams.
+}
// Methods from ::android::hardware::audio::CPP_VERSION::IDevice follow.
Return<Result> PrimaryDevice::initCheck() {
@@ -160,6 +164,11 @@
return mDevice->setConnectedState(address, connected);
}
#endif
+#if MAJOR_VERSION >= 6
+Return<Result> PrimaryDevice::close() {
+ return mDevice->close();
+}
+#endif
// Methods from ::android::hardware::audio::CPP_VERSION::IPrimaryDevice follow.
Return<Result> PrimaryDevice::setVoiceVolume(float volume) {
diff --git a/audio/core/all-versions/default/StreamIn.cpp b/audio/core/all-versions/default/StreamIn.cpp
index d316f83..f1152ca 100644
--- a/audio/core/all-versions/default/StreamIn.cpp
+++ b/audio/core/all-versions/default/StreamIn.cpp
@@ -139,8 +139,7 @@
} // namespace
StreamIn::StreamIn(const sp<Device>& device, audio_stream_in_t* stream)
- : mIsClosed(false),
- mDevice(device),
+ : mDevice(device),
mStream(stream),
mStreamCommon(new Stream(&stream->common)),
mStreamMmap(new StreamMmap<audio_stream_in_t>(stream)),
@@ -159,7 +158,9 @@
status_t status = EventFlag::deleteEventFlag(&mEfGroup);
ALOGE_IF(status, "read MQ event flag deletion error: %s", strerror(-status));
}
+#if MAJOR_VERSION <= 5
mDevice->closeInputStream(mStream);
+#endif
mStream = nullptr;
}
@@ -303,14 +304,16 @@
}
Return<Result> StreamIn::close() {
- if (mIsClosed) return Result::INVALID_STATE;
- mIsClosed = true;
- if (mReadThread.get()) {
- mStopReadThread.store(true, std::memory_order_release);
+ if (mStopReadThread.load(std::memory_order_relaxed)) { // only this thread writes
+ return Result::INVALID_STATE;
}
+ mStopReadThread.store(true, std::memory_order_release);
if (mEfGroup) {
mEfGroup->wake(static_cast<uint32_t>(MessageQueueFlagBits::NOT_FULL));
}
+#if MAJOR_VERSION >= 6
+ mDevice->closeInputStream(mStream);
+#endif
return Result::OK;
}
diff --git a/audio/core/all-versions/default/StreamOut.cpp b/audio/core/all-versions/default/StreamOut.cpp
index 82cc408..396d354 100644
--- a/audio/core/all-versions/default/StreamOut.cpp
+++ b/audio/core/all-versions/default/StreamOut.cpp
@@ -138,8 +138,7 @@
} // namespace
StreamOut::StreamOut(const sp<Device>& device, audio_stream_out_t* stream)
- : mIsClosed(false),
- mDevice(device),
+ : mDevice(device),
mStream(stream),
mStreamCommon(new Stream(&stream->common)),
mStreamMmap(new StreamMmap<audio_stream_out_t>(stream)),
@@ -148,7 +147,7 @@
StreamOut::~StreamOut() {
ATRACE_CALL();
- close();
+ (void)close();
if (mWriteThread.get()) {
ATRACE_NAME("mWriteThread->join");
status_t status = mWriteThread->join();
@@ -159,10 +158,12 @@
ALOGE_IF(status, "write MQ event flag deletion error: %s", strerror(-status));
}
mCallback.clear();
+#if MAJOR_VERSION <= 5
mDevice->closeOutputStream(mStream);
// Closing the output stream in the HAL waits for the callback to finish,
// and joins the callback thread. Thus is it guaranteed that the callback
// thread will not be accessing our object anymore.
+#endif
mStream = nullptr;
}
@@ -291,14 +292,16 @@
#endif
Return<Result> StreamOut::close() {
- if (mIsClosed) return Result::INVALID_STATE;
- mIsClosed = true;
- if (mWriteThread.get()) {
- mStopWriteThread.store(true, std::memory_order_release);
+ if (mStopWriteThread.load(std::memory_order_relaxed)) { // only this thread writes
+ return Result::INVALID_STATE;
}
+ mStopWriteThread.store(true, std::memory_order_release);
if (mEfGroup) {
mEfGroup->wake(static_cast<uint32_t>(MessageQueueFlagBits::NOT_EMPTY));
}
+#if MAJOR_VERSION >= 6
+ mDevice->closeOutputStream(mStream);
+#endif
return Result::OK;
}
diff --git a/audio/core/all-versions/default/include/core/default/Device.h b/audio/core/all-versions/default/include/core/default/Device.h
index e64f00f..11ab607 100644
--- a/audio/core/all-versions/default/include/core/default/Device.h
+++ b/audio/core/all-versions/default/include/core/default/Device.h
@@ -114,6 +114,9 @@
Return<void> getMicrophones(getMicrophones_cb _hidl_cb) override;
Return<Result> setConnectedState(const DeviceAddress& address, bool connected) override;
#endif
+#if MAJOR_VERSION >= 6
+ Return<Result> close() override;
+#endif
Return<void> debug(const hidl_handle& fd, const hidl_vec<hidl_string>& options) override;
@@ -124,11 +127,15 @@
void closeOutputStream(audio_stream_out_t* stream);
audio_hw_device_t* device() const { return mDevice; }
- private:
+ private:
+ bool mIsClosed;
audio_hw_device_t* mDevice;
+ int mOpenedStreamsCount = 0;
virtual ~Device();
+ Result doClose();
+
// Methods from ParametersUtil.
char* halGetParameters(const char* keys) override;
int halSetParameters(const char* keysAndValues) override;
diff --git a/audio/core/all-versions/default/include/core/default/PrimaryDevice.h b/audio/core/all-versions/default/include/core/default/PrimaryDevice.h
index 9d69cb0..f5f3848 100644
--- a/audio/core/all-versions/default/include/core/default/PrimaryDevice.h
+++ b/audio/core/all-versions/default/include/core/default/PrimaryDevice.h
@@ -96,6 +96,9 @@
Return<void> getMicrophones(getMicrophones_cb _hidl_cb) override;
Return<Result> setConnectedState(const DeviceAddress& address, bool connected) override;
#endif
+#if MAJOR_VERSION >= 6
+ Return<Result> close() override;
+#endif
Return<void> debug(const hidl_handle& fd, const hidl_vec<hidl_string>& options) override;
diff --git a/audio/core/all-versions/default/include/core/default/StreamIn.h b/audio/core/all-versions/default/include/core/default/StreamIn.h
index 6209b8f..24f9944 100644
--- a/audio/core/all-versions/default/include/core/default/StreamIn.h
+++ b/audio/core/all-versions/default/include/core/default/StreamIn.h
@@ -120,7 +120,6 @@
uint64_t* time);
private:
- bool mIsClosed;
const sp<Device> mDevice;
audio_stream_in_t* mStream;
const sp<Stream> mStreamCommon;
diff --git a/audio/core/all-versions/default/include/core/default/StreamOut.h b/audio/core/all-versions/default/include/core/default/StreamOut.h
index b098005..6334785 100644
--- a/audio/core/all-versions/default/include/core/default/StreamOut.h
+++ b/audio/core/all-versions/default/include/core/default/StreamOut.h
@@ -126,7 +126,6 @@
TimeSpec* timeStamp);
private:
- bool mIsClosed;
const sp<Device> mDevice;
audio_stream_out_t* mStream;
const sp<Stream> mStreamCommon;
diff --git a/audio/core/all-versions/vts/functional/4.0/AudioPrimaryHidlHalTest.cpp b/audio/core/all-versions/vts/functional/4.0/AudioPrimaryHidlHalTest.cpp
index e267a5e..709b7cd 100644
--- a/audio/core/all-versions/vts/functional/4.0/AudioPrimaryHidlHalTest.cpp
+++ b/audio/core/all-versions/vts/functional/4.0/AudioPrimaryHidlHalTest.cpp
@@ -22,18 +22,16 @@
GTEST_SKIP() << "No primary device on this factory"; // returns
}
- struct WaitExecutor {
- ~WaitExecutor() { DeviceManager::waitForInstanceDestruction(); }
- } waitExecutor; // Make sure we wait for the device destruction on exiting from the test.
- Result result;
- sp<IDevice> baseDevice;
- ASSERT_OK(getDevicesFactory()->openDevice("primary", returnIn(result, baseDevice)));
- ASSERT_OK(result);
- ASSERT_TRUE(baseDevice != nullptr);
-
- Return<sp<IPrimaryDevice>> primaryDevice = IPrimaryDevice::castFrom(baseDevice);
- ASSERT_TRUE(primaryDevice.isOk());
- ASSERT_TRUE(sp<IPrimaryDevice>(primaryDevice) != nullptr);
+ { // Scope for device SPs
+ sp<IDevice> baseDevice =
+ DeviceManager::getInstance().get(getFactoryName(), DeviceManager::kPrimaryDevice);
+ ASSERT_TRUE(baseDevice != nullptr);
+ Return<sp<IPrimaryDevice>> primaryDevice = IPrimaryDevice::castFrom(baseDevice);
+ EXPECT_TRUE(primaryDevice.isOk());
+ EXPECT_TRUE(sp<IPrimaryDevice>(primaryDevice) != nullptr);
+ }
+ EXPECT_TRUE(
+ DeviceManager::getInstance().reset(getFactoryName(), DeviceManager::kPrimaryDevice));
}
//////////////////////////////////////////////////////////////////////////////
@@ -54,7 +52,6 @@
doc::test(
"Make sure getMicrophones always succeeds"
"and getActiveMicrophones always succeeds when recording from these microphones.");
- AudioIoHandle ioHandle = (AudioIoHandle)AudioHandleConsts::AUDIO_IO_HANDLE_NONE;
AudioConfig config{};
config.channelMask = mkEnumBitfield(AudioChannelMask::IN_MONO);
config.sampleRateHz = 8000;
@@ -67,18 +64,14 @@
continue;
}
sp<IStreamIn> stream;
+ StreamHelper<IStreamIn> helper(stream);
AudioConfig suggestedConfig{};
- ASSERT_OK(getDevice()->openInputStream(ioHandle, microphone.deviceAddress, config,
- flags, initMetadata,
- returnIn(res, stream, suggestedConfig)));
- if (res != Result::OK) {
- ASSERT_TRUE(stream == nullptr);
- AudioConfig suggestedConfigRetry{};
- ASSERT_OK(getDevice()->openInputStream(
- ioHandle, microphone.deviceAddress, suggestedConfig, flags, initMetadata,
- returnIn(res, stream, suggestedConfigRetry)));
- }
- ASSERT_OK(res);
+ ASSERT_NO_FATAL_FAILURE(helper.open(
+ [&](AudioIoHandle handle, AudioConfig config, auto cb) {
+ return getDevice()->openInputStream(handle, microphone.deviceAddress,
+ config, flags, initMetadata, cb);
+ },
+ config, &res, &suggestedConfig));
hidl_vec<MicrophoneInfo> activeMicrophones;
Result readRes;
typedef MessageQueue<IStreamIn::ReadParameters, kSynchronizedReadWrite> CommandMQ;
@@ -112,11 +105,8 @@
ASSERT_OK(res);
ASSERT_NE(0U, activeMicrophones.size());
}
- stream->close();
- // Workaround for b/139329877. Ensures the stream gets closed on the audio hal side.
- stream.clear();
- IPCThreadState::self()->flushCommands();
- usleep(1000);
+ helper.close(true /*clear*/, &res);
+ ASSERT_OK(res);
if (efGroup) {
EventFlag::deleteEventFlag(&efGroup);
}
diff --git a/audio/core/all-versions/vts/functional/6.0/AudioPrimaryHidlHalTest.cpp b/audio/core/all-versions/vts/functional/6.0/AudioPrimaryHidlHalTest.cpp
index 30f8a7a..22e60be 100644
--- a/audio/core/all-versions/vts/functional/6.0/AudioPrimaryHidlHalTest.cpp
+++ b/audio/core/all-versions/vts/functional/6.0/AudioPrimaryHidlHalTest.cpp
@@ -100,7 +100,8 @@
special = true;
}
if ((flags & AUDIO_OUTPUT_FLAG_DIRECT) &&
- !(flags & AUDIO_OUTPUT_FLAG_HW_AV_SYNC)) {
+ !(flags &
+ (AUDIO_OUTPUT_FLAG_HW_AV_SYNC | AUDIO_OUTPUT_FLAG_MMAP_NOIRQ))) {
result.emplace_back(device, config,
AudioOutputFlag(AUDIO_OUTPUT_FLAG_DIRECT));
special = true;
@@ -144,3 +145,49 @@
}();
return parameters;
}
+
+TEST_P(AudioHidlDeviceTest, CloseDeviceWithOpenedOutputStreams) {
+ doc::test("Verify that a device can't be closed if there are streams opened");
+ DeviceAddress address{.device = AudioDevice::OUT_DEFAULT};
+ AudioConfig config{};
+ auto flags = hidl_bitfield<AudioOutputFlag>(AudioOutputFlag::NONE);
+ SourceMetadata initMetadata = {{{AudioUsage::MEDIA, AudioContentType::MUSIC, 1 /* gain */}}};
+ sp<IStreamOut> stream;
+ StreamHelper<IStreamOut> helper(stream);
+ AudioConfig suggestedConfig{};
+ ASSERT_NO_FATAL_FAILURE(helper.open(
+ [&](AudioIoHandle handle, AudioConfig config, auto cb) {
+ return getDevice()->openOutputStream(handle, address, config, flags, initMetadata,
+ cb);
+ },
+ config, &res, &suggestedConfig));
+ ASSERT_RESULT(Result::INVALID_STATE, getDevice()->close());
+ ASSERT_NO_FATAL_FAILURE(helper.close(true /*clear*/, &res));
+ ASSERT_OK(getDevice()->close());
+ ASSERT_TRUE(resetDevice());
+}
+
+TEST_P(AudioHidlDeviceTest, CloseDeviceWithOpenedInputStreams) {
+ doc::test("Verify that a device can't be closed if there are streams opened");
+ auto module = getCachedPolicyConfig().getModuleFromName(getDeviceName());
+ if (module->getInputProfiles().empty()) {
+ GTEST_SKIP() << "Device doesn't have input profiles";
+ }
+ DeviceAddress address{.device = AudioDevice::IN_DEFAULT};
+ AudioConfig config{};
+ auto flags = hidl_bitfield<AudioInputFlag>(AudioInputFlag::NONE);
+ SinkMetadata initMetadata = {{{.source = AudioSource::MIC, .gain = 1}}};
+ sp<IStreamIn> stream;
+ StreamHelper<IStreamIn> helper(stream);
+ AudioConfig suggestedConfig{};
+ ASSERT_NO_FATAL_FAILURE(helper.open(
+ [&](AudioIoHandle handle, AudioConfig config, auto cb) {
+ return getDevice()->openInputStream(handle, address, config, flags, initMetadata,
+ cb);
+ },
+ config, &res, &suggestedConfig));
+ ASSERT_RESULT(Result::INVALID_STATE, getDevice()->close());
+ ASSERT_NO_FATAL_FAILURE(helper.close(true /*clear*/, &res));
+ ASSERT_OK(getDevice()->close());
+ ASSERT_TRUE(resetDevice());
+}
diff --git a/audio/core/all-versions/vts/functional/AudioPrimaryHidlHalTest.h b/audio/core/all-versions/vts/functional/AudioPrimaryHidlHalTest.h
index 468f9b2..d0d39e8 100644
--- a/audio/core/all-versions/vts/functional/AudioPrimaryHidlHalTest.h
+++ b/audio/core/all-versions/vts/functional/AudioPrimaryHidlHalTest.h
@@ -58,7 +58,6 @@
#include "utility/AssertOk.h"
#include "utility/Documentation.h"
-#include "utility/PrettyPrintAudioTypes.h"
#include "utility/ReturnIn.h"
#include "utility/ValidateXml.h"
@@ -719,12 +718,6 @@
::testing::Values(AudioInputFlag::NONE)),
&DeviceConfigParameterToString);
INSTANTIATE_TEST_CASE_P(
- SupportedInputBufferSize, RequiredInputBufferSizeTest,
- ::testing::Combine(::testing::ValuesIn(getDeviceParameters()),
- ::testing::ValuesIn(ConfigHelper::getSupportedCaptureAudioConfig()),
- ::testing::Values(AudioInputFlag::NONE)),
- &DeviceConfigParameterToString);
-INSTANTIATE_TEST_CASE_P(
RecommendedCaptureAudioConfigSupport, OptionalInputBufferSizeTest,
::testing::Combine(
::testing::ValuesIn(getDeviceParametersForPrimaryDeviceTests()),
@@ -816,60 +809,84 @@
////////////////////////// open{Output,Input}Stream //////////////////////////
//////////////////////////////////////////////////////////////////////////////
+// This class is also used by some device tests.
template <class Stream>
-class OpenStreamTest : public AudioHidlTestWithDeviceConfigParameter {
- protected:
+class StreamHelper {
+ public:
+ // StreamHelper doesn't own the stream, this is for simpler stream lifetime management.
+ explicit StreamHelper(sp<Stream>& stream) : mStream(stream) {}
template <class Open>
- void testOpen(Open openStream, const AudioConfig& config) {
+ void open(Open openStream, const AudioConfig& config, Result* res,
+ AudioConfig* suggestedConfigPtr) {
// FIXME: Open a stream without an IOHandle
// This is not required to be accepted by hal implementations
AudioIoHandle ioHandle = (AudioIoHandle)AudioHandleConsts::AUDIO_IO_HANDLE_NONE;
AudioConfig suggestedConfig{};
- ASSERT_OK(openStream(ioHandle, config, returnIn(res, stream, suggestedConfig)));
-
- // TODO: only allow failure for RecommendedPlaybackAudioConfig
- switch (res) {
+ bool retryWithSuggestedConfig = true;
+ if (suggestedConfigPtr == nullptr) {
+ suggestedConfigPtr = &suggestedConfig;
+ retryWithSuggestedConfig = false;
+ }
+ ASSERT_OK(openStream(ioHandle, config, returnIn(*res, mStream, *suggestedConfigPtr)));
+ switch (*res) {
case Result::OK:
- ASSERT_TRUE(stream != nullptr);
- audioConfig = config;
+ ASSERT_TRUE(mStream != nullptr);
+ *suggestedConfigPtr = config;
break;
case Result::INVALID_ARGUMENTS:
- ASSERT_TRUE(stream == nullptr);
- AudioConfig suggestedConfigRetry;
- // Could not open stream with config, try again with the
- // suggested one
- ASSERT_OK(openStream(ioHandle, suggestedConfig,
- returnIn(res, stream, suggestedConfigRetry)));
- // This time it must succeed
- ASSERT_OK(res);
- ASSERT_TRUE(stream != nullptr);
- audioConfig = suggestedConfig;
+ ASSERT_TRUE(mStream == nullptr);
+ if (retryWithSuggestedConfig) {
+ AudioConfig suggestedConfigRetry;
+ ASSERT_OK(openStream(ioHandle, *suggestedConfigPtr,
+ returnIn(*res, mStream, suggestedConfigRetry)));
+ ASSERT_OK(*res);
+ ASSERT_TRUE(mStream != nullptr);
+ }
break;
default:
- FAIL() << "Invalid return status: " << ::testing::PrintToString(res);
+ FAIL() << "Invalid return status: " << ::testing::PrintToString(*res);
}
+ }
+ void close(bool clear, Result* res) {
+ auto ret = mStream->close();
+ EXPECT_TRUE(ret.isOk());
+ *res = ret;
+ if (clear) {
+ mStream.clear();
+#if MAJOR_VERSION <= 5
+ // FIXME: there is no way to know when the remote IStream is being destroyed
+ // Binder does not support testing if an object is alive, thus
+ // wait for 100ms to let the binder destruction propagates and
+ // the remote device has the time to be destroyed.
+ // flushCommand makes sure all local command are sent, thus should reduce
+ // the latency between local and remote destruction.
+ IPCThreadState::self()->flushCommands();
+ usleep(100 * 1000);
+#endif
+ }
+ }
+
+ private:
+ sp<Stream>& mStream;
+};
+
+template <class Stream>
+class OpenStreamTest : public AudioHidlTestWithDeviceConfigParameter {
+ protected:
+ OpenStreamTest() : AudioHidlTestWithDeviceConfigParameter(), helper(stream) {}
+ template <class Open>
+ void testOpen(Open openStream, const AudioConfig& config) {
+ // TODO: only allow failure for RecommendedPlaybackAudioConfig
+ ASSERT_NO_FATAL_FAILURE(helper.open(openStream, config, &res, &audioConfig));
open = true;
}
- Return<Result> closeStream() {
+ Result closeStream(bool clear = true) {
open = false;
- auto res = stream->close();
- stream.clear();
- waitForStreamDestruction();
+ helper.close(clear, &res);
return res;
}
- static void waitForStreamDestruction() {
- // FIXME: there is no way to know when the remote IStream is being destroyed
- // Binder does not support testing if an object is alive, thus
- // wait for 100ms to let the binder destruction propagates and
- // the remote device has the time to be destroyed.
- // flushCommand makes sure all local command are sent, thus should reduce
- // the latency between local and remote destruction.
- IPCThreadState::self()->flushCommands();
- usleep(100 * 1000);
- }
-
private:
void TearDown() override {
if (open) {
@@ -882,6 +899,7 @@
AudioConfig audioConfig;
DeviceAddress address = {};
sp<Stream> stream;
+ StreamHelper<Stream> helper;
bool open = false;
};
@@ -930,12 +948,6 @@
::testing::Values(AudioOutputFlag::NONE)),
&DeviceConfigParameterToString);
INSTANTIATE_TEST_CASE_P(
- SupportedOutputStreamConfig, OutputStreamTest,
- ::testing::Combine(::testing::ValuesIn(getDeviceParameters()),
- ::testing::ValuesIn(ConfigHelper::getSupportedPlaybackAudioConfig()),
- ::testing::Values(AudioOutputFlag::NONE)),
- &DeviceConfigParameterToString);
-INSTANTIATE_TEST_CASE_P(
RecommendedOutputStreamConfigSupport, OutputStreamTest,
::testing::Combine(
::testing::ValuesIn(getDeviceParametersForPrimaryDeviceTests()),
@@ -945,7 +957,7 @@
#elif MAJOR_VERSION >= 6
// For V6 and above test according to the audio policy manager configuration.
// This is more correct as CDD is written from the apps perspective.
-// Audio system provides necessary format conversions for the missing configurations.
+// Audio system provides necessary format conversions for missing configurations.
INSTANTIATE_TEST_CASE_P(DeclaredOutputStreamConfigSupport, OutputStreamTest,
::testing::ValuesIn(getOutputDeviceConfigParameters()),
&DeviceConfigParameterToString);
@@ -991,12 +1003,6 @@
::testing::Values(AudioInputFlag::NONE)),
&DeviceConfigParameterToString);
INSTANTIATE_TEST_CASE_P(
- SupportedInputStreamConfig, InputStreamTest,
- ::testing::Combine(::testing::ValuesIn(getDeviceParameters()),
- ::testing::ValuesIn(ConfigHelper::getSupportedCaptureAudioConfig()),
- ::testing::Values(AudioInputFlag::NONE)),
- &DeviceConfigParameterToString);
-INSTANTIATE_TEST_CASE_P(
RecommendedInputStreamConfigSupport, InputStreamTest,
::testing::Combine(
::testing::ValuesIn(getDeviceParametersForPrimaryDeviceTests()),
@@ -1006,7 +1012,7 @@
#elif MAJOR_VERSION >= 6
// For V6 and above test according to the audio policy manager configuration.
// This is more correct as CDD is written from the apps perspective.
-// Audio system provides necessary format conversions for the missing configurations.
+// Audio system provides necessary format conversions for missing configurations.
INSTANTIATE_TEST_CASE_P(DeclaredInputStreamConfigSupport, InputStreamTest,
::testing::ValuesIn(getInputDeviceConfigParameters()),
&DeviceConfigParameterToString);
@@ -1195,26 +1201,21 @@
TEST_IO_STREAM(close, "Make sure a stream can be closed", ASSERT_OK(closeStream()))
// clang-format off
TEST_IO_STREAM(closeTwice, "Make sure a stream can not be closed twice",
- auto streamCopy = stream;
- ASSERT_OK(closeStream());
- ASSERT_RESULT(Result::INVALID_STATE, streamCopy->close());
- streamCopy.clear();
- waitForStreamDestruction())
+ ASSERT_OK(closeStream(false /*clear*/));
+ ASSERT_EQ(Result::INVALID_STATE, closeStream()))
// clang-format on
-static void testCreateTooBigMmapBuffer(IStream* stream) {
- MmapBufferInfo info;
- Result res;
- // Assume that int max is a value too big to be allocated
- // This is true currently with a 32bit media server, but might not when it
- // will run in 64 bit
- auto minSizeFrames = std::numeric_limits<int32_t>::max();
- ASSERT_OK(stream->createMmapBuffer(minSizeFrames, returnIn(res, info)));
- ASSERT_RESULT(invalidArgsOrNotSupported, res);
+static void testMmapBufferOfInvalidSize(IStream* stream) {
+ for (int32_t value : {-1, 0, std::numeric_limits<int32_t>::max()}) {
+ MmapBufferInfo info;
+ Result res;
+ EXPECT_OK(stream->createMmapBuffer(value, returnIn(res, info)));
+ EXPECT_RESULT(invalidArgsOrNotSupported, res) << "value=" << value;
+ }
}
-TEST_IO_STREAM(CreateTooBigMmapBuffer, "Create mmap buffer too big should fail",
- testCreateTooBigMmapBuffer(stream.get()))
+TEST_IO_STREAM(CreateTooBigMmapBuffer, "Create mmap buffer of invalid size must fail",
+ testMmapBufferOfInvalidSize(stream.get()))
static void testGetMmapPositionOfNonMmapedStream(IStream* stream) {
Result res;
diff --git a/audio/core/all-versions/vts/functional/ConfigHelper.h b/audio/core/all-versions/vts/functional/ConfigHelper.h
index 48aae8c..a2f4116 100644
--- a/audio/core/all-versions/vts/functional/ConfigHelper.h
+++ b/audio/core/all-versions/vts/functional/ConfigHelper.h
@@ -57,12 +57,6 @@
{24000, 48000}, {AudioFormat::PCM_16_BIT});
}
- static const vector<AudioConfig> getSupportedPlaybackAudioConfig() {
- // TODO: retrieve audio config supported by the platform
- // as declared in the policy configuration
- return {};
- }
-
static const vector<AudioConfig> getRequiredSupportCaptureAudioConfig() {
if (!primaryHasMic()) return {};
return combineAudioConfig({AudioChannelMask::IN_MONO}, {8000, 11025, 16000, 44100},
@@ -73,11 +67,6 @@
return combineAudioConfig({AudioChannelMask::IN_STEREO}, {22050, 48000},
{AudioFormat::PCM_16_BIT});
}
- static const vector<AudioConfig> getSupportedCaptureAudioConfig() {
- // TODO: retrieve audio config supported by the platform
- // as declared in the policy configuration
- return {};
- }
static vector<AudioConfig> combineAudioConfig(vector<audio_channel_mask_t> channelMasks,
vector<uint32_t> sampleRates,
diff --git a/audio/core/all-versions/vts/functional/DeviceManager.h b/audio/core/all-versions/vts/functional/DeviceManager.h
index b6e2db0..d849f85 100644
--- a/audio/core/all-versions/vts/functional/DeviceManager.h
+++ b/audio/core/all-versions/vts/functional/DeviceManager.h
@@ -22,25 +22,33 @@
template <class Derived, class Key, class Interface>
class InterfaceManager {
public:
+ sp<Interface> getExisting(const Key& name) {
+ auto existing = instances.find(name);
+ return existing != instances.end() ? existing->second : sp<Interface>();
+ }
+
sp<Interface> get(const Key& name) {
auto existing = instances.find(name);
if (existing != instances.end()) return existing->second;
auto [inserted, _] = instances.emplace(name, Derived::createInterfaceInstance(name));
if (inserted->second) {
- environment->registerTearDown([name]() { (void)Derived::getInstance().reset(name); });
+ environment->registerTearDown(
+ [name]() { (void)Derived::getInstance().reset(name, false); });
}
return inserted->second;
}
// The test must check that reset was successful. Reset failure means that the test code
// is holding a strong reference to the device.
- bool reset(const Key& name) __attribute__((warn_unused_result)) {
+ bool reset(const Key& name, bool waitForDestruction) __attribute__((warn_unused_result)) {
auto iter = instances.find(name);
if (iter == instances.end()) return true;
::android::wp<Interface> weak = iter->second;
instances.erase(iter);
if (weak.promote() != nullptr) return false;
- waitForInstanceDestruction();
+ if (waitForDestruction) {
+ waitForInstanceDestruction();
+ }
return true;
}
@@ -100,7 +108,15 @@
}
bool reset(const std::string& factoryName, const std::string& name)
__attribute__((warn_unused_result)) {
- return InterfaceManager::reset(std::make_tuple(factoryName, name));
+#if MAJOR_VERSION <= 5
+ return InterfaceManager::reset(std::make_tuple(factoryName, name), true);
+#elif MAJOR_VERSION >= 6
+ {
+ sp<IDevice> device = getExisting(std::make_tuple(factoryName, name));
+ if (device != nullptr) device->close();
+ }
+ return InterfaceManager::reset(std::make_tuple(factoryName, name), false);
+#endif
}
bool resetPrimary(const std::string& factoryName) __attribute__((warn_unused_result)) {
return reset(factoryName, kPrimaryDevice);
diff --git a/audio/effect/6.0/IEffect.hal b/audio/effect/6.0/IEffect.hal
index b35afee..f4c50a2 100644
--- a/audio/effect/6.0/IEffect.hal
+++ b/audio/effect/6.0/IEffect.hal
@@ -407,9 +407,12 @@
/**
* Called by the framework to deinitialize the effect and free up
- * all the currently allocated resources. It is recommended to close
+ * all currently allocated resources. It is recommended to close
* the effect on the client side as soon as it is becomes unused.
*
+ * The client must ensure that this function is not called while
+ * audio data is being transferred through the effect's message queues.
+ *
* @return retval OK in case the success.
* INVALID_STATE if the effect was already closed.
*/
diff --git a/audio/effect/all-versions/default/Effect.cpp b/audio/effect/all-versions/default/Effect.cpp
index 3c0d878..0afa779 100644
--- a/audio/effect/all-versions/default/Effect.cpp
+++ b/audio/effect/all-versions/default/Effect.cpp
@@ -138,11 +138,11 @@
const char* Effect::sContextCallFunction = sContextCallToCommand;
Effect::Effect(effect_handle_t handle)
- : mIsClosed(false), mHandle(handle), mEfGroup(nullptr), mStopProcessThread(false) {}
+ : mHandle(handle), mEfGroup(nullptr), mStopProcessThread(false) {}
Effect::~Effect() {
ATRACE_CALL();
- close();
+ (void)close();
if (mProcessThread.get()) {
ATRACE_NAME("mProcessThread->join");
status_t status = mProcessThread->join();
@@ -154,8 +154,10 @@
}
mInBuffer.clear();
mOutBuffer.clear();
+#if MAJOR_VERSION <= 5
int status = EffectRelease(mHandle);
ALOGW_IF(status, "Error releasing effect %p: %s", mHandle, strerror(-status));
+#endif
EffectMap::getInstance().remove(mHandle);
mHandle = 0;
}
@@ -699,15 +701,20 @@
}
Return<Result> Effect::close() {
- if (mIsClosed) return Result::INVALID_STATE;
- mIsClosed = true;
- if (mProcessThread.get()) {
- mStopProcessThread.store(true, std::memory_order_release);
+ if (mStopProcessThread.load(std::memory_order_relaxed)) { // only this thread modifies
+ return Result::INVALID_STATE;
}
+ mStopProcessThread.store(true, std::memory_order_release);
if (mEfGroup) {
mEfGroup->wake(static_cast<uint32_t>(MessageQueueFlagBits::REQUEST_QUIT));
}
+#if MAJOR_VERSION <= 5
return Result::OK;
+#elif MAJOR_VERSION >= 6
+ // No need to join the processing thread, it is part of the API contract that the client
+ // must finish processing before closing the effect.
+ return analyzeStatus("EffectRelease", "", sContextCallFunction, EffectRelease(mHandle));
+#endif
}
Return<void> Effect::debug(const hidl_handle& fd, const hidl_vec<hidl_string>& /* options */) {
diff --git a/audio/effect/all-versions/default/Effect.h b/audio/effect/all-versions/default/Effect.h
index 3d99a0e..181e542 100644
--- a/audio/effect/all-versions/default/Effect.h
+++ b/audio/effect/all-versions/default/Effect.h
@@ -170,7 +170,6 @@
static const char* sContextCallToCommand;
static const char* sContextCallFunction;
- bool mIsClosed;
effect_handle_t mHandle;
sp<AudioBufferWrapper> mInBuffer;
sp<AudioBufferWrapper> mOutBuffer;
diff --git a/biometrics/face/1.0/vts/functional/Android.bp b/biometrics/face/1.0/vts/functional/Android.bp
index fa68c4e..f2598a7 100644
--- a/biometrics/face/1.0/vts/functional/Android.bp
+++ b/biometrics/face/1.0/vts/functional/Android.bp
@@ -19,6 +19,6 @@
defaults: ["VtsHalTargetTestDefaults"],
srcs: ["VtsHalBiometricsFaceV1_0TargetTest.cpp"],
static_libs: ["android.hardware.biometrics.face@1.0"],
- test_suites: ["general-tests"],
+ test_suites: ["general-tests", "vts-core"],
}
diff --git a/biometrics/face/1.0/vts/functional/VtsHalBiometricsFaceV1_0TargetTest.cpp b/biometrics/face/1.0/vts/functional/VtsHalBiometricsFaceV1_0TargetTest.cpp
index a4e95ed..7ac44a4 100644
--- a/biometrics/face/1.0/vts/functional/VtsHalBiometricsFaceV1_0TargetTest.cpp
+++ b/biometrics/face/1.0/vts/functional/VtsHalBiometricsFaceV1_0TargetTest.cpp
@@ -20,9 +20,10 @@
#include <android/hardware/biometrics/face/1.0/IBiometricsFaceClientCallback.h>
#include <VtsHalHidlTargetCallbackBase.h>
-#include <VtsHalHidlTargetTestBase.h>
-#include <VtsHalHidlTargetTestEnvBase.h>
#include <android-base/logging.h>
+#include <gtest/gtest.h>
+#include <hidl/GtestPrinter.h>
+#include <hidl/ServiceManagement.h>
#include <chrono>
#include <cstdint>
@@ -124,27 +125,11 @@
}
};
-// Test environment for the BiometricsFace HAL.
-class FaceHidlEnvironment : public ::testing::VtsHalHidlTargetTestEnvBase {
- public:
- // Get the test environment singleton.
- static FaceHidlEnvironment* Instance() {
- static FaceHidlEnvironment* instance = new FaceHidlEnvironment;
- return instance;
- }
-
- void registerTestServices() override { registerTestService<IBiometricsFace>(); }
-
- private:
- FaceHidlEnvironment() = default;
-};
-
// Test class for the BiometricsFace HAL.
-class FaceHidlTest : public ::testing::VtsHalHidlTargetTestBase {
+class FaceHidlTest : public ::testing::TestWithParam<std::string> {
public:
void SetUp() override {
- mService = ::testing::VtsHalHidlTargetTestBase::getService<IBiometricsFace>(
- FaceHidlEnvironment::Instance()->getServiceName<IBiometricsFace>());
+ mService = IBiometricsFace::getService(GetParam());
ASSERT_NE(mService, nullptr);
mCallback = new FaceCallback();
mCallback->SetWaitTimeoutDefault(kTimeout);
@@ -167,7 +152,7 @@
// generateChallenge should always return a unique, cryptographically secure,
// non-zero number.
-TEST_F(FaceHidlTest, GenerateChallengeTest) {
+TEST_P(FaceHidlTest, GenerateChallengeTest) {
std::map<uint64_t, int> m;
for (int i = 0; i < kGenerateChallengeIterations; ++i) {
Return<void> ret =
@@ -182,7 +167,7 @@
}
// enroll with an invalid (all zeroes) HAT should fail.
-TEST_F(FaceHidlTest, EnrollZeroHatTest) {
+TEST_P(FaceHidlTest, EnrollZeroHatTest) {
// Filling HAT with zeros
hidl_vec<uint8_t> token(69);
for (size_t i = 0; i < 69; i++) {
@@ -200,7 +185,7 @@
}
// enroll with an invalid HAT should fail.
-TEST_F(FaceHidlTest, EnrollGarbageHatTest) {
+TEST_P(FaceHidlTest, EnrollGarbageHatTest) {
// Filling HAT with pseudorandom invalid data.
// Using default seed to make the test reproducible.
std::mt19937 gen(std::mt19937::default_seed);
@@ -221,7 +206,7 @@
}
// setFeature with an invalid (all zeros) HAT should fail.
-TEST_F(FaceHidlTest, SetFeatureZeroHatTest) {
+TEST_P(FaceHidlTest, SetFeatureZeroHatTest) {
hidl_vec<uint8_t> token(69);
for (size_t i = 0; i < 69; i++) {
token[i] = 0;
@@ -232,7 +217,7 @@
}
// setFeature with an invalid HAT should fail.
-TEST_F(FaceHidlTest, SetFeatureGarbageHatTest) {
+TEST_P(FaceHidlTest, SetFeatureGarbageHatTest) {
// Filling HAT with pseudorandom invalid data.
// Using default seed to make the test reproducible.
std::mt19937 gen(std::mt19937::default_seed);
@@ -254,16 +239,16 @@
ASSERT_TRUE(res.isOk());
}
-TEST_F(FaceHidlTest, GetFeatureRequireAttentionTest) {
+TEST_P(FaceHidlTest, GetFeatureRequireAttentionTest) {
assertGetFeatureFails(mService, 0 /* faceId */, Feature::REQUIRE_ATTENTION);
}
-TEST_F(FaceHidlTest, GetFeatureRequireDiversityTest) {
+TEST_P(FaceHidlTest, GetFeatureRequireDiversityTest) {
assertGetFeatureFails(mService, 0 /* faceId */, Feature::REQUIRE_DIVERSITY);
}
// revokeChallenge should always return within the timeout
-TEST_F(FaceHidlTest, RevokeChallengeTest) {
+TEST_P(FaceHidlTest, RevokeChallengeTest) {
auto start = std::chrono::system_clock::now();
Return<Status> ret = mService->revokeChallenge();
auto elapsed = std::chrono::system_clock::now() - start;
@@ -272,14 +257,14 @@
}
// The call to getAuthenticatorId should succeed.
-TEST_F(FaceHidlTest, GetAuthenticatorIdTest) {
+TEST_P(FaceHidlTest, GetAuthenticatorIdTest) {
Return<void> ret = mService->getAuthenticatorId(
[](const OptionalUint64& res) { ASSERT_EQ(Status::OK, res.status); });
ASSERT_TRUE(ret.isOk());
}
// The call to enumerate should succeed.
-TEST_F(FaceHidlTest, EnumerateTest) {
+TEST_P(FaceHidlTest, EnumerateTest) {
Return<Status> ret = mService->enumerate();
ASSERT_EQ(Status::OK, static_cast<Status>(ret));
auto res = mCallback->WaitForCallback(kCallbackNameOnEnumerate);
@@ -288,21 +273,21 @@
}
// The call to remove should succeed for any faceId
-TEST_F(FaceHidlTest, RemoveFaceTest) {
+TEST_P(FaceHidlTest, RemoveFaceTest) {
// Remove a face
Return<Status> ret = mService->remove(kFaceId);
ASSERT_EQ(Status::OK, static_cast<Status>(ret));
}
// Remove should accept 0 to delete all faces
-TEST_F(FaceHidlTest, RemoveAllFacesTest) {
+TEST_P(FaceHidlTest, RemoveAllFacesTest) {
// Remove all faces
Return<Status> ret = mService->remove(0);
ASSERT_EQ(Status::OK, static_cast<Status>(ret));
}
// Active user should successfully set to a writable location.
-TEST_F(FaceHidlTest, SetActiveUserTest) {
+TEST_P(FaceHidlTest, SetActiveUserTest) {
// Create an active user
Return<Status> ret = mService->setActiveUser(2, kFacedataDir);
ASSERT_EQ(Status::OK, static_cast<Status>(ret));
@@ -313,7 +298,7 @@
}
// Active user should fail to set to an unwritable location.
-TEST_F(FaceHidlTest, SetActiveUserUnwritableTest) {
+TEST_P(FaceHidlTest, SetActiveUserUnwritableTest) {
// Create an active user to an unwritable location (device root dir)
Return<Status> ret = mService->setActiveUser(3, "/");
ASSERT_NE(Status::OK, static_cast<Status>(ret));
@@ -324,7 +309,7 @@
}
// Active user should fail to set to a null location.
-TEST_F(FaceHidlTest, SetActiveUserNullTest) {
+TEST_P(FaceHidlTest, SetActiveUserNullTest) {
// Create an active user to a null location.
Return<Status> ret = mService->setActiveUser(4, nullptr);
ASSERT_NE(Status::OK, static_cast<Status>(ret));
@@ -336,7 +321,7 @@
// Cancel should always return CANCELED from any starting state including
// the IDLE state.
-TEST_F(FaceHidlTest, CancelTest) {
+TEST_P(FaceHidlTest, CancelTest) {
Return<Status> ret = mService->cancel();
// check that we were able to make an IPC request successfully
ASSERT_EQ(Status::OK, static_cast<Status>(ret));
@@ -347,7 +332,7 @@
EXPECT_EQ(FaceError::CANCELED, res.args->error);
}
-TEST_F(FaceHidlTest, OnLockoutChangedTest) {
+TEST_P(FaceHidlTest, OnLockoutChangedTest) {
// Update active user and ensure onLockoutChanged was called.
Return<Status> ret = mService->setActiveUser(kUserId + 1, kFacedataDir);
ASSERT_EQ(Status::OK, static_cast<Status>(ret));
@@ -359,11 +344,7 @@
} // anonymous namespace
-int main(int argc, char** argv) {
- ::testing::AddGlobalTestEnvironment(FaceHidlEnvironment::Instance());
- ::testing::InitGoogleTest(&argc, argv);
- FaceHidlEnvironment::Instance()->init(&argc, argv);
- int status = RUN_ALL_TESTS();
- LOG(INFO) << "Test result = " << status;
- return status;
-}
+INSTANTIATE_TEST_SUITE_P(
+ PerInstance, FaceHidlTest,
+ testing::ValuesIn(android::hardware::getAllHalInstanceNames(IBiometricsFace::descriptor)),
+ android::hardware::PrintInstanceNameToString);
diff --git a/bluetooth/1.0/default/bluetooth_hci.cc b/bluetooth/1.0/default/bluetooth_hci.cc
index e14e3d7..a2211f4 100644
--- a/bluetooth/1.0/default/bluetooth_hci.cc
+++ b/bluetooth/1.0/default/bluetooth_hci.cc
@@ -89,6 +89,9 @@
if (!hidl_status.isOk()) {
ALOGE("VendorInterface -> Unable to call scoDataReceived()");
}
+ },
+ [cb](const hidl_vec<uint8_t>&) {
+ ALOGE("VendorInterface -> No callback for ISO packets in HAL V1_0");
});
if (!rc) {
auto hidl_status = cb->initializationComplete(Status::INITIALIZATION_ERROR);
diff --git a/bluetooth/1.0/default/h4_protocol.cc b/bluetooth/1.0/default/h4_protocol.cc
index 98e3273..8c24f76 100644
--- a/bluetooth/1.0/default/h4_protocol.cc
+++ b/bluetooth/1.0/default/h4_protocol.cc
@@ -58,6 +58,9 @@
case HCI_PACKET_TYPE_SCO_DATA:
sco_cb_(hci_packetizer_.GetPacket());
break;
+ case HCI_PACKET_TYPE_ISO_DATA:
+ iso_cb_(hci_packetizer_.GetPacket());
+ break;
default:
LOG_ALWAYS_FATAL("%s: Unimplemented packet type %d", __func__,
static_cast<int>(hci_packet_type_));
diff --git a/bluetooth/1.0/default/h4_protocol.h b/bluetooth/1.0/default/h4_protocol.h
index 0d0a1ca..0c82fd6 100644
--- a/bluetooth/1.0/default/h4_protocol.h
+++ b/bluetooth/1.0/default/h4_protocol.h
@@ -31,11 +31,12 @@
class H4Protocol : public HciProtocol {
public:
H4Protocol(int fd, PacketReadCallback event_cb, PacketReadCallback acl_cb,
- PacketReadCallback sco_cb)
+ PacketReadCallback sco_cb, PacketReadCallback iso_cb)
: uart_fd_(fd),
event_cb_(event_cb),
acl_cb_(acl_cb),
sco_cb_(sco_cb),
+ iso_cb_(iso_cb),
hci_packetizer_([this]() { OnPacketReady(); }) {}
size_t Send(uint8_t type, const uint8_t* data, size_t length);
@@ -50,6 +51,7 @@
PacketReadCallback event_cb_;
PacketReadCallback acl_cb_;
PacketReadCallback sco_cb_;
+ PacketReadCallback iso_cb_;
HciPacketType hci_packet_type_{HCI_PACKET_TYPE_UNKNOWN};
hci::HciPacketizer hci_packetizer_;
diff --git a/bluetooth/1.0/default/hci_internals.h b/bluetooth/1.0/default/hci_internals.h
index 1e1f300..24e944f 100644
--- a/bluetooth/1.0/default/hci_internals.h
+++ b/bluetooth/1.0/default/hci_internals.h
@@ -24,7 +24,8 @@
HCI_PACKET_TYPE_COMMAND = 1,
HCI_PACKET_TYPE_ACL_DATA = 2,
HCI_PACKET_TYPE_SCO_DATA = 3,
- HCI_PACKET_TYPE_EVENT = 4
+ HCI_PACKET_TYPE_EVENT = 4,
+ HCI_PACKET_TYPE_ISO_DATA = 5,
};
// 2 bytes for opcode, 1 byte for parameter length (Volume 2, Part E, 5.4.1)
diff --git a/bluetooth/1.0/default/test/h4_protocol_unittest.cc b/bluetooth/1.0/default/test/h4_protocol_unittest.cc
index ba56c0d..283243d 100644
--- a/bluetooth/1.0/default/test/h4_protocol_unittest.cc
+++ b/bluetooth/1.0/default/test/h4_protocol_unittest.cc
@@ -42,11 +42,15 @@
static char sample_data1[100] = "A point is that which has no part.";
static char sample_data2[100] = "A line is breadthless length.";
static char sample_data3[100] = "The ends of a line are points.";
+static char sample_data4[100] =
+ "A plane surface is a surface which lies evenly with the straight ...";
static char acl_data[100] =
"A straight line is a line which lies evenly with the points on itself.";
static char sco_data[100] =
"A surface is that which has length and breadth only.";
static char event_data[100] = "The edges of a surface are lines.";
+static char iso_data[100] =
+ "A plane angle is the inclination to one another of two lines in a ...";
MATCHER_P3(HidlVecMatches, preamble, preamble_length, payload, "") {
size_t length = strlen(payload) + preamble_length;
@@ -75,9 +79,9 @@
int sockfd[2];
socketpair(AF_LOCAL, SOCK_STREAM, 0, sockfd);
- H4Protocol* h4_hci =
- new H4Protocol(sockfd[0], event_cb_.AsStdFunction(),
- acl_cb_.AsStdFunction(), sco_cb_.AsStdFunction());
+ H4Protocol* h4_hci = new H4Protocol(
+ sockfd[0], event_cb_.AsStdFunction(), acl_cb_.AsStdFunction(),
+ sco_cb_.AsStdFunction(), iso_cb_.AsStdFunction());
fd_watcher_.WatchFdForNonBlockingReads(
sockfd[0], [h4_hci](int fd) { h4_hci->OnDataReady(fd); });
protocol_ = h4_hci;
@@ -184,9 +188,35 @@
}
}
+ void WriteAndExpectInboundIsoData(char* payload) {
+ // h4 type[1] + handle[2] + size[1]
+ char preamble[4] = {HCI_PACKET_TYPE_ISO_DATA, 20, 17, 0};
+ preamble[3] = strlen(payload) & 0xFF;
+
+ ALOGD("%s writing", __func__);
+ TEMP_FAILURE_RETRY(write(fake_uart_, preamble, sizeof(preamble)));
+ TEMP_FAILURE_RETRY(write(fake_uart_, payload, strlen(payload)));
+
+ ALOGD("%s waiting", __func__);
+ std::mutex mutex;
+ std::condition_variable done;
+ EXPECT_CALL(iso_cb_, Call(HidlVecMatches(preamble + 1, sizeof(preamble) - 1,
+ payload)))
+ .WillOnce(Notify(&mutex, &done));
+
+ // Fail if it takes longer than 100 ms.
+ auto timeout_time =
+ std::chrono::steady_clock::now() + std::chrono::milliseconds(100);
+ {
+ std::unique_lock<std::mutex> lock(mutex);
+ done.wait_until(lock, timeout_time);
+ }
+ }
+
testing::MockFunction<void(const hidl_vec<uint8_t>&)> event_cb_;
testing::MockFunction<void(const hidl_vec<uint8_t>&)> acl_cb_;
testing::MockFunction<void(const hidl_vec<uint8_t>&)> sco_cb_;
+ testing::MockFunction<void(const hidl_vec<uint8_t>&)> iso_cb_;
async::AsyncFdWatcher fd_watcher_;
H4Protocol* protocol_;
int fake_uart_;
@@ -197,6 +227,7 @@
SendAndReadUartOutbound(HCI_PACKET_TYPE_COMMAND, sample_data1);
SendAndReadUartOutbound(HCI_PACKET_TYPE_ACL_DATA, sample_data2);
SendAndReadUartOutbound(HCI_PACKET_TYPE_SCO_DATA, sample_data3);
+ SendAndReadUartOutbound(HCI_PACKET_TYPE_ISO_DATA, sample_data4);
}
// Ensure we properly parse data coming from the UART
@@ -204,6 +235,7 @@
WriteAndExpectInboundAclData(acl_data);
WriteAndExpectInboundScoData(sco_data);
WriteAndExpectInboundEvent(event_data);
+ WriteAndExpectInboundIsoData(iso_data);
}
} // namespace implementation
diff --git a/bluetooth/1.0/default/vendor_interface.cc b/bluetooth/1.0/default/vendor_interface.cc
index d56e344..d809313 100644
--- a/bluetooth/1.0/default/vendor_interface.cc
+++ b/bluetooth/1.0/default/vendor_interface.cc
@@ -162,14 +162,14 @@
bool VendorInterface::Initialize(
InitializeCompleteCallback initialize_complete_cb,
PacketReadCallback event_cb, PacketReadCallback acl_cb,
- PacketReadCallback sco_cb) {
+ PacketReadCallback sco_cb, PacketReadCallback iso_cb) {
if (g_vendor_interface) {
ALOGE("%s: No previous Shutdown()?", __func__);
return false;
}
g_vendor_interface = new VendorInterface();
return g_vendor_interface->Open(initialize_complete_cb, event_cb, acl_cb,
- sco_cb);
+ sco_cb, iso_cb);
}
void VendorInterface::Shutdown() {
@@ -185,7 +185,8 @@
bool VendorInterface::Open(InitializeCompleteCallback initialize_complete_cb,
PacketReadCallback event_cb,
PacketReadCallback acl_cb,
- PacketReadCallback sco_cb) {
+ PacketReadCallback sco_cb,
+ PacketReadCallback iso_cb) {
initialize_complete_cb_ = initialize_complete_cb;
// Initialize vendor interface
@@ -248,7 +249,7 @@
if (fd_count == 1) {
hci::H4Protocol* h4_hci =
- new hci::H4Protocol(fd_list[0], intercept_events, acl_cb, sco_cb);
+ new hci::H4Protocol(fd_list[0], intercept_events, acl_cb, sco_cb, iso_cb);
fd_watcher_.WatchFdForNonBlockingReads(
fd_list[0], [h4_hci](int fd) { h4_hci->OnDataReady(fd); });
hci_ = h4_hci;
diff --git a/bluetooth/1.0/default/vendor_interface.h b/bluetooth/1.0/default/vendor_interface.h
index 1d69040..040f31a 100644
--- a/bluetooth/1.0/default/vendor_interface.h
+++ b/bluetooth/1.0/default/vendor_interface.h
@@ -38,7 +38,7 @@
public:
static bool Initialize(InitializeCompleteCallback initialize_complete_cb,
PacketReadCallback event_cb, PacketReadCallback acl_cb,
- PacketReadCallback sco_cb);
+ PacketReadCallback sco_cb, PacketReadCallback iso_cb);
static void Shutdown();
static VendorInterface* get();
@@ -51,7 +51,7 @@
bool Open(InitializeCompleteCallback initialize_complete_cb,
PacketReadCallback event_cb, PacketReadCallback acl_cb,
- PacketReadCallback sco_cb);
+ PacketReadCallback sco_cb, PacketReadCallback iso_cb);
void Close();
void OnTimeout();
diff --git a/bluetooth/1.0/vts/functional/VtsHalBluetoothV1_0TargetTest.cpp b/bluetooth/1.0/vts/functional/VtsHalBluetoothV1_0TargetTest.cpp
index beb9a3e..ef02eff 100644
--- a/bluetooth/1.0/vts/functional/VtsHalBluetoothV1_0TargetTest.cpp
+++ b/bluetooth/1.0/vts/functional/VtsHalBluetoothV1_0TargetTest.cpp
@@ -24,8 +24,9 @@
#include <utils/Log.h>
#include <VtsHalHidlTargetCallbackBase.h>
-#include <VtsHalHidlTargetTestBase.h>
-#include <VtsHalHidlTargetTestEnvBase.h>
+#include <gtest/gtest.h>
+#include <hidl/GtestPrinter.h>
+#include <hidl/ServiceManagement.h>
#include <chrono>
#include <queue>
@@ -137,30 +138,12 @@
std::chrono::steady_clock::time_point start_time_;
};
-// Test environment for Bluetooth HIDL HAL.
-class BluetoothHidlEnvironment : public ::testing::VtsHalHidlTargetTestEnvBase {
- public:
- // get the test environment singleton
- static BluetoothHidlEnvironment* Instance() {
- static BluetoothHidlEnvironment* instance = new BluetoothHidlEnvironment;
- return instance;
- }
-
- virtual void registerTestServices() override {
- registerTestService<IBluetoothHci>();
- }
-
- private:
- BluetoothHidlEnvironment() {}
-};
-
// The main test class for Bluetooth HIDL HAL.
-class BluetoothHidlTest : public ::testing::VtsHalHidlTargetTestBase {
+class BluetoothHidlTest : public ::testing::TestWithParam<std::string> {
public:
virtual void SetUp() override {
// currently test passthrough mode only
- bluetooth =
- ::testing::VtsHalHidlTargetTestBase::getService<IBluetoothHci>();
+ bluetooth = IBluetoothHci::getService(GetParam());
ASSERT_NE(bluetooth, nullptr);
ALOGI("%s: getService() for bluetooth is %s", __func__,
bluetooth->isRemote() ? "remote" : "local");
@@ -617,10 +600,10 @@
}
// Empty test: Initialize()/Close() are called in SetUp()/TearDown().
-TEST_F(BluetoothHidlTest, InitializeAndClose) {}
+TEST_P(BluetoothHidlTest, InitializeAndClose) {}
// Send an HCI Reset with sendHciCommand and wait for a command complete event.
-TEST_F(BluetoothHidlTest, HciReset) {
+TEST_P(BluetoothHidlTest, HciReset) {
hidl_vec<uint8_t> cmd = COMMAND_HCI_RESET;
bluetooth->sendHciCommand(cmd);
@@ -628,7 +611,7 @@
}
// Read and check the HCI version of the controller.
-TEST_F(BluetoothHidlTest, HciVersionTest) {
+TEST_P(BluetoothHidlTest, HciVersionTest) {
hidl_vec<uint8_t> cmd = COMMAND_HCI_READ_LOCAL_VERSION_INFORMATION;
bluetooth->sendHciCommand(cmd);
@@ -649,7 +632,7 @@
}
// Send an unknown HCI command and wait for the error message.
-TEST_F(BluetoothHidlTest, HciUnknownCommand) {
+TEST_P(BluetoothHidlTest, HciUnknownCommand) {
hidl_vec<uint8_t> cmd = COMMAND_HCI_SHOULD_BE_UNKNOWN;
bluetooth->sendHciCommand(cmd);
@@ -676,14 +659,14 @@
}
// Enter loopback mode, but don't send any packets.
-TEST_F(BluetoothHidlTest, WriteLoopbackMode) {
+TEST_P(BluetoothHidlTest, WriteLoopbackMode) {
std::vector<uint16_t> sco_connection_handles;
std::vector<uint16_t> acl_connection_handles;
enterLoopbackMode(sco_connection_handles, acl_connection_handles);
}
// Enter loopback mode and send single packets.
-TEST_F(BluetoothHidlTest, LoopbackModeSinglePackets) {
+TEST_P(BluetoothHidlTest, LoopbackModeSinglePackets) {
setBufferSizes();
std::vector<uint16_t> sco_connection_handles;
@@ -720,7 +703,7 @@
}
// Enter loopback mode and send packets for bandwidth measurements.
-TEST_F(BluetoothHidlTest, LoopbackModeBandwidth) {
+TEST_P(BluetoothHidlTest, LoopbackModeBandwidth) {
setBufferSizes();
std::vector<uint16_t> sco_connection_handles;
@@ -758,11 +741,8 @@
}
}
-int main(int argc, char** argv) {
- ::testing::AddGlobalTestEnvironment(BluetoothHidlEnvironment::Instance());
- ::testing::InitGoogleTest(&argc, argv);
- BluetoothHidlEnvironment::Instance()->init(&argc, argv);
- int status = RUN_ALL_TESTS();
- ALOGI("Test result = %d", status);
- return status;
-}
+INSTANTIATE_TEST_SUITE_P(
+ PerInstance, BluetoothHidlTest,
+ testing::ValuesIn(
+ android::hardware::getAllHalInstanceNames(IBluetoothHci::descriptor)),
+ android::hardware::PrintInstanceNameToString);
diff --git a/bluetooth/1.1/Android.bp b/bluetooth/1.1/Android.bp
new file mode 100644
index 0000000..4204aed
--- /dev/null
+++ b/bluetooth/1.1/Android.bp
@@ -0,0 +1,18 @@
+// This file is autogenerated by hidl-gen -Landroidbp.
+
+hidl_interface {
+ name: "android.hardware.bluetooth@1.1",
+ root: "android.hardware",
+ vndk: {
+ enabled: true,
+ },
+ srcs: [
+ "IBluetoothHci.hal",
+ "IBluetoothHciCallbacks.hal",
+ ],
+ interfaces: [
+ "android.hardware.bluetooth@1.0",
+ "android.hidl.base@1.0",
+ ],
+ gen_java: true,
+}
diff --git a/bluetooth/1.1/IBluetoothHci.hal b/bluetooth/1.1/IBluetoothHci.hal
new file mode 100644
index 0000000..0f69c6e
--- /dev/null
+++ b/bluetooth/1.1/IBluetoothHci.hal
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.bluetooth@1.1;
+
+import @1.0::HciPacket;
+import @1.0::IBluetoothHci;
+import IBluetoothHciCallbacks;
+
+/**
+ * The Host Controller Interface (HCI) is the layer defined by the Bluetooth
+ * specification between the software that runs on the host and the Bluetooth
+ * controller chip. This boundary is the natural choice for a Hardware
+ * Abstraction Layer (HAL). Dealing only in HCI packets and events simplifies
+ * the stack and abstracts away power management, initialization, and other
+ * implementation-specific details related to the hardware.
+ */
+interface IBluetoothHci extends @1.0::IBluetoothHci {
+ /**
+ * Same as @1.0, but uses 1.1 Callbacks version
+ */
+ initialize_1_1(@1.1::IBluetoothHciCallbacks callback);
+
+ /**
+ * Send an ISO data packet (as specified in the Bluetooth Specification
+ * V6.0) to the Bluetooth controller.
+ * Packets must be processed in order.
+ * @param data HCI data packet to be sent
+ */
+ sendIsoData(HciPacket data);
+};
diff --git a/bluetooth/1.1/IBluetoothHciCallbacks.hal b/bluetooth/1.1/IBluetoothHciCallbacks.hal
new file mode 100644
index 0000000..62cbe45
--- /dev/null
+++ b/bluetooth/1.1/IBluetoothHciCallbacks.hal
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.bluetooth@1.1;
+
+import @1.0::HciPacket;
+import @1.0::IBluetoothHciCallbacks;
+
+/**
+ * The interface from the Bluetooth Controller to the stack.
+ */
+interface IBluetoothHciCallbacks extends @1.0::IBluetoothHciCallbacks {
+ /**
+ * Send a ISO data packet form the controller to the host.
+ * @param data the ISO HCI packet to be passed to the host stack
+ */
+ isoDataReceived(HciPacket data);
+};
diff --git a/bluetooth/1.1/default/Android.bp b/bluetooth/1.1/default/Android.bp
new file mode 100644
index 0000000..4f7fecb
--- /dev/null
+++ b/bluetooth/1.1/default/Android.bp
@@ -0,0 +1,42 @@
+//
+// Copyright (C) 2019 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+cc_binary {
+ name: "android.hardware.bluetooth@1.1-service",
+ defaults: ["hidl_defaults"],
+ relative_install_path: "hw",
+ vendor: true,
+ init_rc: ["android.hardware.bluetooth@1.1-service.rc"],
+ srcs: [
+ "service.cpp",
+ "bluetooth_hci.cc",
+ ],
+
+ shared_libs: [
+ "liblog",
+ "libcutils",
+ "libdl",
+ "libbase",
+ "libutils",
+ "libhardware",
+ "libhidlbase",
+ "android.hardware.bluetooth@1.0-impl",
+ "android.hardware.bluetooth@1.1",
+ ],
+
+ static_libs: [
+ "android.hardware.bluetooth-hci",
+ ],
+}
diff --git a/bluetooth/1.1/default/OWNERS b/bluetooth/1.1/default/OWNERS
new file mode 100644
index 0000000..0c01df6
--- /dev/null
+++ b/bluetooth/1.1/default/OWNERS
@@ -0,0 +1,3 @@
+zachoverflow@google.com
+mylesgw@google.com
+jpawlowski@google.com
diff --git a/bluetooth/1.1/default/android.hardware.bluetooth@1.1-service.rc b/bluetooth/1.1/default/android.hardware.bluetooth@1.1-service.rc
new file mode 100644
index 0000000..49f0be3
--- /dev/null
+++ b/bluetooth/1.1/default/android.hardware.bluetooth@1.1-service.rc
@@ -0,0 +1,9 @@
+service vendor.bluetooth-1-1 /vendor/bin/hw/android.hardware.bluetooth@1.1-service
+ interface android.hardware.bluetooth@1.1::IBluetoothHci default
+ interface android.hardware.bluetooth@1.0::IBluetoothHci default
+ class hal
+ capabilities BLOCK_SUSPEND NET_ADMIN SYS_NICE
+ user bluetooth
+ group bluetooth
+ writepid /dev/stune/foreground/tasks
+
diff --git a/bluetooth/1.1/default/bluetooth_hci.cc b/bluetooth/1.1/default/bluetooth_hci.cc
new file mode 100644
index 0000000..7ccce80
--- /dev/null
+++ b/bluetooth/1.1/default/bluetooth_hci.cc
@@ -0,0 +1,197 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "android.hardware.bluetooth@1.1-impl"
+#include "bluetooth_hci.h"
+
+#include <log/log.h>
+
+#include "vendor_interface.h"
+
+using android::hardware::bluetooth::V1_0::implementation::VendorInterface;
+
+namespace android {
+namespace hardware {
+namespace bluetooth {
+namespace V1_1 {
+namespace implementation {
+
+static const uint8_t HCI_DATA_TYPE_COMMAND = 1;
+static const uint8_t HCI_DATA_TYPE_ACL = 2;
+static const uint8_t HCI_DATA_TYPE_SCO = 3;
+static const uint8_t HCI_DATA_TYPE_ISO = 5;
+
+class BluetoothDeathRecipient : public hidl_death_recipient {
+ public:
+ BluetoothDeathRecipient(const sp<IBluetoothHci> hci) : mHci(hci) {}
+
+ virtual void serviceDied(
+ uint64_t /*cookie*/,
+ const wp<::android::hidl::base::V1_0::IBase>& /*who*/) {
+ ALOGE("BluetoothDeathRecipient::serviceDied - Bluetooth service died");
+ has_died_ = true;
+ mHci->close();
+ }
+ sp<IBluetoothHci> mHci;
+ bool getHasDied() const { return has_died_; }
+ void setHasDied(bool has_died) { has_died_ = has_died; }
+
+ private:
+ bool has_died_;
+};
+
+BluetoothHci::BluetoothHci()
+ : death_recipient_(new BluetoothDeathRecipient(this)) {}
+
+Return<void> BluetoothHci::initialize_1_1(
+ const ::android::sp<V1_1::IBluetoothHciCallbacks>& cb) {
+ ALOGI("BluetoothHci::initialize_1_1()");
+ if (cb == nullptr) {
+ ALOGE("cb == nullptr! -> Unable to call initializationComplete(ERR)");
+ return Void();
+ }
+
+ death_recipient_->setHasDied(false);
+ cb->linkToDeath(death_recipient_, 0);
+
+ bool rc = VendorInterface::Initialize(
+ [cb](bool status) {
+ auto hidl_status = cb->initializationComplete(
+ status ? V1_0::Status::SUCCESS
+ : V1_0::Status::INITIALIZATION_ERROR);
+ if (!hidl_status.isOk()) {
+ ALOGE("VendorInterface -> Unable to call initializationComplete()");
+ }
+ },
+ [cb](const hidl_vec<uint8_t>& packet) {
+ auto hidl_status = cb->hciEventReceived(packet);
+ if (!hidl_status.isOk()) {
+ ALOGE("VendorInterface -> Unable to call hciEventReceived()");
+ }
+ },
+ [cb](const hidl_vec<uint8_t>& packet) {
+ auto hidl_status = cb->aclDataReceived(packet);
+ if (!hidl_status.isOk()) {
+ ALOGE("VendorInterface -> Unable to call aclDataReceived()");
+ }
+ },
+ [cb](const hidl_vec<uint8_t>& packet) {
+ auto hidl_status = cb->scoDataReceived(packet);
+ if (!hidl_status.isOk()) {
+ ALOGE("VendorInterface -> Unable to call scoDataReceived()");
+ }
+ },
+ [cb](const hidl_vec<uint8_t>& packet) {
+ auto hidl_status = cb->isoDataReceived(packet);
+ if (!hidl_status.isOk()) {
+ ALOGE("VendorInterface -> Unable to call isoDataReceived()");
+ }
+ });
+ if (!rc) {
+ auto hidl_status =
+ cb->initializationComplete(V1_0::Status::INITIALIZATION_ERROR);
+ if (!hidl_status.isOk()) {
+ ALOGE("VendorInterface -> Unable to call initializationComplete(ERR)");
+ }
+ }
+
+ unlink_cb_ = [cb](sp<BluetoothDeathRecipient>& death_recipient) {
+ if (death_recipient->getHasDied())
+ ALOGI("Skipping unlink call, service died.");
+ else
+ cb->unlinkToDeath(death_recipient);
+ };
+
+ return Void();
+}
+
+class OldCbWrapper : public V1_1::IBluetoothHciCallbacks {
+ public:
+ const ::android::sp<V1_0::IBluetoothHciCallbacks> old_cb_;
+ OldCbWrapper(const ::android::sp<V1_0::IBluetoothHciCallbacks>& old_cb)
+ : old_cb_(old_cb) {}
+
+ virtual ~OldCbWrapper() = default;
+
+ Return<void> initializationComplete(V1_0::Status status) override {
+ return old_cb_->initializationComplete(status);
+ };
+
+ Return<void> hciEventReceived(
+ const ::android::hardware::hidl_vec<uint8_t>& event) override {
+ return old_cb_->hciEventReceived(event);
+ };
+
+ Return<void> aclDataReceived(
+ const ::android::hardware::hidl_vec<uint8_t>& data) override {
+ return old_cb_->aclDataReceived(data);
+ };
+
+ Return<void> scoDataReceived(
+ const ::android::hardware::hidl_vec<uint8_t>& data) override {
+ return old_cb_->scoDataReceived(data);
+ };
+
+ Return<void> isoDataReceived(
+ const ::android::hardware::hidl_vec<uint8_t>&) override {
+ ALOGE("Please use HAL V1_1 for ISO.");
+ return Void();
+ };
+};
+
+Return<void> BluetoothHci::initialize(
+ const ::android::sp<V1_0::IBluetoothHciCallbacks>& cb) {
+ ALOGE("Using initialize from HAL V1_0 instead of initialize_1_1.");
+ return initialize_1_1(new OldCbWrapper(cb));
+}
+
+Return<void> BluetoothHci::close() {
+ ALOGI("BluetoothHci::close()");
+ unlink_cb_(death_recipient_);
+ VendorInterface::Shutdown();
+ return Void();
+}
+
+Return<void> BluetoothHci::sendHciCommand(const hidl_vec<uint8_t>& command) {
+ sendDataToController(HCI_DATA_TYPE_COMMAND, command);
+ return Void();
+}
+
+Return<void> BluetoothHci::sendAclData(const hidl_vec<uint8_t>& data) {
+ sendDataToController(HCI_DATA_TYPE_ACL, data);
+ return Void();
+}
+
+Return<void> BluetoothHci::sendScoData(const hidl_vec<uint8_t>& data) {
+ sendDataToController(HCI_DATA_TYPE_SCO, data);
+ return Void();
+}
+
+Return<void> BluetoothHci::sendIsoData(const hidl_vec<uint8_t>& data) {
+ sendDataToController(HCI_DATA_TYPE_ISO, data);
+ return Void();
+}
+
+void BluetoothHci::sendDataToController(const uint8_t type,
+ const hidl_vec<uint8_t>& data) {
+ VendorInterface::get()->Send(type, data.data(), data.size());
+}
+
+} // namespace implementation
+} // namespace V1_1
+} // namespace bluetooth
+} // namespace hardware
+} // namespace android
diff --git a/bluetooth/1.1/default/bluetooth_hci.h b/bluetooth/1.1/default/bluetooth_hci.h
new file mode 100644
index 0000000..5f59cb0
--- /dev/null
+++ b/bluetooth/1.1/default/bluetooth_hci.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef HIDL_GENERATED_android_hardware_bluetooth_V1_1_BluetoothHci_H_
+#define HIDL_GENERATED_android_hardware_bluetooth_V1_1_BluetoothHci_H_
+
+#include <android/hardware/bluetooth/1.1/IBluetoothHci.h>
+#include <android/hardware/bluetooth/1.1/IBluetoothHciCallbacks.h>
+
+#include <hidl/MQDescriptor.h>
+
+#include <functional>
+
+namespace android {
+namespace hardware {
+namespace bluetooth {
+namespace V1_1 {
+namespace implementation {
+
+using ::android::hardware::hidl_vec;
+using ::android::hardware::Return;
+
+class BluetoothDeathRecipient;
+
+class BluetoothHci : public V1_1::IBluetoothHci {
+ public:
+ BluetoothHci();
+ Return<void> initialize(
+ const ::android::sp<V1_0::IBluetoothHciCallbacks>& cb) override;
+ Return<void> initialize_1_1(
+ const ::android::sp<V1_1::IBluetoothHciCallbacks>& cb) override;
+ Return<void> sendHciCommand(const hidl_vec<uint8_t>& packet) override;
+ Return<void> sendAclData(const hidl_vec<uint8_t>& data) override;
+ Return<void> sendScoData(const hidl_vec<uint8_t>& data) override;
+ Return<void> sendIsoData(const hidl_vec<uint8_t>& data) override;
+ Return<void> close() override;
+
+ private:
+ void sendDataToController(const uint8_t type, const hidl_vec<uint8_t>& data);
+ ::android::sp<BluetoothDeathRecipient> death_recipient_;
+ std::function<void(sp<BluetoothDeathRecipient>&)> unlink_cb_;
+};
+
+} // namespace implementation
+} // namespace V1_1
+} // namespace bluetooth
+} // namespace hardware
+} // namespace android
+
+#endif // HIDL_GENERATED_android_hardware_bluetooth_V1_1_BluetoothHci_H_
diff --git a/bluetooth/1.1/default/service.cpp b/bluetooth/1.1/default/service.cpp
new file mode 100644
index 0000000..affa855
--- /dev/null
+++ b/bluetooth/1.1/default/service.cpp
@@ -0,0 +1,43 @@
+//
+// Copyright 2019 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#define LOG_TAG "android.hardware.bluetooth@1.1-service"
+
+#include <android/hardware/bluetooth/1.1/IBluetoothHci.h>
+#include <hidl/HidlTransportSupport.h>
+
+#include "bluetooth_hci.h"
+
+// Generated HIDL files
+using android::hardware::bluetooth::V1_1::IBluetoothHci;
+using android::hardware::bluetooth::V1_1::implementation::BluetoothHci;
+
+using android::sp;
+using android::status_t;
+
+int main() {
+ ::android::hardware::configureRpcThreadpool(1 /*threads*/, true /*willJoin*/);
+
+ sp bluetoothHci = new BluetoothHci();
+ const status_t status = bluetoothHci->registerAsService();
+ if (status != ::android::OK) {
+ ALOGE("Cannot register Bluetooth HAL service");
+ return 1; // or handle error
+ }
+
+ ::android::hardware::joinRpcThreadpool();
+ return 1; // joinRpcThreadpool should never return
+}
diff --git a/bluetooth/1.1/vts/OWNERS b/bluetooth/1.1/vts/OWNERS
new file mode 100644
index 0000000..ff6fd93
--- /dev/null
+++ b/bluetooth/1.1/vts/OWNERS
@@ -0,0 +1,6 @@
+zachoverflow@google.com
+siyuanh@google.com
+mylesgw@google.com
+jpawlowski@google.com
+hsz@google.com
+
diff --git a/vibrator/1.4/vts/functional/Android.bp b/bluetooth/1.1/vts/functional/Android.bp
similarity index 65%
copy from vibrator/1.4/vts/functional/Android.bp
copy to bluetooth/1.1/vts/functional/Android.bp
index 202a824..8d6d749 100644
--- a/vibrator/1.4/vts/functional/Android.bp
+++ b/bluetooth/1.1/vts/functional/Android.bp
@@ -15,19 +15,13 @@
//
cc_test {
- name: "VtsHalVibratorV1_4TargetTest",
+ name: "VtsHalBluetoothV1_1TargetTest",
defaults: ["VtsHalTargetTestDefaults"],
- srcs: ["VtsHalVibratorV1_4TargetTest.cpp"],
+ srcs: ["VtsHalBluetoothV1_1TargetTest.cpp"],
static_libs: [
- "android.hardware.vibrator@1.0",
- "android.hardware.vibrator@1.1",
- "android.hardware.vibrator@1.2",
- "android.hardware.vibrator@1.3",
- "android.hardware.vibrator@1.4",
+ "android.hardware.bluetooth@1.1",
+ "android.hardware.bluetooth@1.0",
+ "libbluetooth-types",
],
- test_suites: [
- "general-tests",
- "vts-core",
- ],
+ test_suites: ["general-tests", "vts-core"],
}
-
diff --git a/bluetooth/1.1/vts/functional/VtsHalBluetoothV1_1TargetTest.cpp b/bluetooth/1.1/vts/functional/VtsHalBluetoothV1_1TargetTest.cpp
new file mode 100644
index 0000000..659b2c8
--- /dev/null
+++ b/bluetooth/1.1/vts/functional/VtsHalBluetoothV1_1TargetTest.cpp
@@ -0,0 +1,760 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "bluetooth_hidl_hal_test"
+#include <android-base/logging.h>
+
+#include <android/hardware/bluetooth/1.0/types.h>
+#include <android/hardware/bluetooth/1.1/IBluetoothHci.h>
+#include <android/hardware/bluetooth/1.1/IBluetoothHciCallbacks.h>
+#include <hardware/bluetooth.h>
+#include <utils/Log.h>
+
+#include <VtsHalHidlTargetCallbackBase.h>
+#include <gtest/gtest.h>
+#include <hidl/GtestPrinter.h>
+#include <hidl/ServiceManagement.h>
+
+#include <chrono>
+#include <queue>
+#include <thread>
+
+using ::android::sp;
+using ::android::hardware::hidl_death_recipient;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+using ::android::hardware::bluetooth::V1_0::Status;
+using ::android::hardware::bluetooth::V1_1::IBluetoothHci;
+using ::android::hardware::bluetooth::V1_1::IBluetoothHciCallbacks;
+
+#define HCI_MINIMUM_HCI_VERSION 5 // Bluetooth Core Specification 3.0 + HS
+#define HCI_MINIMUM_LMP_VERSION 5 // Bluetooth Core Specification 3.0 + HS
+#define NUM_HCI_COMMANDS_BANDWIDTH 1000
+#define NUM_SCO_PACKETS_BANDWIDTH 1000
+#define NUM_ACL_PACKETS_BANDWIDTH 1000
+#define WAIT_FOR_INIT_TIMEOUT std::chrono::milliseconds(2000)
+#define WAIT_FOR_HCI_EVENT_TIMEOUT std::chrono::milliseconds(2000)
+#define WAIT_FOR_SCO_DATA_TIMEOUT std::chrono::milliseconds(1000)
+#define WAIT_FOR_ACL_DATA_TIMEOUT std::chrono::milliseconds(1000)
+#define INTERFACE_CLOSE_DELAY_MS std::chrono::milliseconds(200)
+
+#define COMMAND_HCI_SHOULD_BE_UNKNOWN \
+ { 0xff, 0x3B, 0x08, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07 }
+#define COMMAND_HCI_READ_LOCAL_VERSION_INFORMATION \
+ { 0x01, 0x10, 0x00 }
+#define COMMAND_HCI_READ_BUFFER_SIZE \
+ { 0x05, 0x10, 0x00 }
+#define COMMAND_HCI_WRITE_LOOPBACK_MODE_LOCAL \
+ { 0x02, 0x18, 0x01, 0x01 }
+#define COMMAND_HCI_RESET \
+ { 0x03, 0x0c, 0x00 }
+#define COMMAND_HCI_WRITE_LOCAL_NAME \
+ { 0x13, 0x0c, 0xf8 }
+#define HCI_STATUS_SUCCESS 0x00
+#define HCI_STATUS_UNKNOWN_HCI_COMMAND 0x01
+
+#define EVENT_CONNECTION_COMPLETE 0x03
+#define EVENT_COMMAND_COMPLETE 0x0e
+#define EVENT_COMMAND_STATUS 0x0f
+#define EVENT_NUMBER_OF_COMPLETED_PACKETS 0x13
+#define EVENT_LOOPBACK_COMMAND 0x19
+
+#define EVENT_CODE_BYTE 0
+#define EVENT_LENGTH_BYTE 1
+#define EVENT_FIRST_PAYLOAD_BYTE 2
+#define EVENT_COMMAND_STATUS_STATUS_BYTE 2
+#define EVENT_COMMAND_STATUS_ALLOWED_PACKETS_BYTE 3
+#define EVENT_COMMAND_STATUS_OPCODE_LSBYTE 4 // Bytes 4 and 5
+#define EVENT_COMMAND_COMPLETE_ALLOWED_PACKETS_BYTE 2
+#define EVENT_COMMAND_COMPLETE_OPCODE_LSBYTE 3 // Bytes 3 and 4
+#define EVENT_COMMAND_COMPLETE_STATUS_BYTE 5
+#define EVENT_COMMAND_COMPLETE_FIRST_PARAM_BYTE 6
+#define EVENT_LOCAL_HCI_VERSION_BYTE EVENT_COMMAND_COMPLETE_FIRST_PARAM_BYTE
+#define EVENT_LOCAL_LMP_VERSION_BYTE EVENT_LOCAL_HCI_VERSION_BYTE + 3
+
+#define EVENT_CONNECTION_COMPLETE_PARAM_LENGTH 11
+#define EVENT_CONNECTION_COMPLETE_TYPE 11
+#define EVENT_CONNECTION_COMPLETE_TYPE_SCO 0
+#define EVENT_CONNECTION_COMPLETE_TYPE_ACL 1
+#define EVENT_CONNECTION_COMPLETE_HANDLE_LSBYTE 3
+#define EVENT_COMMAND_STATUS_LENGTH 4
+
+#define EVENT_NUMBER_OF_COMPLETED_PACKETS_NUM_HANDLES 2
+
+#define ACL_BROADCAST_FLAG_OFFSET 6
+#define ACL_BROADCAST_FLAG_POINT_TO_POINT 0x0
+#define ACL_BROADCAST_POINT_TO_POINT \
+ (ACL_BROADCAST_FLAG_POINT_TO_POINT << ACL_BROADCAST_FLAG_OFFSET)
+
+#define ACL_PACKET_BOUNDARY_FLAG_OFFSET 4
+#define ACL_PACKET_BOUNDARY_FLAG_FIRST_AUTO_FLUSHABLE 0x2
+#define ACL_PACKET_BOUNDARY_FIRST_AUTO_FLUSHABLE \
+ (ACL_PACKET_BOUNDARY_FLAG_FIRST_AUTO_FLUSHABLE \
+ << ACL_PACKET_BOUNDARY_FLAG_OFFSET)
+
+// To be removed in VTS release builds
+#define ACL_HANDLE_QCA_DEBUG_MESSAGE 0xedc
+
+constexpr char kCallbackNameAclEventReceived[] = "aclDataReceived";
+constexpr char kCallbackNameHciEventReceived[] = "hciEventReceived";
+constexpr char kCallbackNameInitializationComplete[] = "initializationComplete";
+constexpr char kCallbackNameScoEventReceived[] = "scoDataReceived";
+constexpr char kCallbackNameIsoEventReceived[] = "isoDataReceived";
+
+class ThroughputLogger {
+ public:
+ ThroughputLogger(std::string task)
+ : task_(task), start_time_(std::chrono::steady_clock::now()) {}
+
+ ~ThroughputLogger() {
+ if (total_bytes_ == 0) return;
+ std::chrono::duration<double> duration =
+ std::chrono::steady_clock::now() - start_time_;
+ double s = duration.count();
+ if (s == 0) return;
+ double rate_kb = (static_cast<double>(total_bytes_) / s) / 1024;
+ ALOGD("%s %.1f KB/s (%zu bytes in %.3fs)", task_.c_str(), rate_kb,
+ total_bytes_, s);
+ }
+
+ void setTotalBytes(size_t total_bytes) { total_bytes_ = total_bytes; }
+
+ private:
+ size_t total_bytes_;
+ std::string task_;
+ std::chrono::steady_clock::time_point start_time_;
+};
+
+// The main test class for Bluetooth HIDL HAL.
+class BluetoothHidlTest : public ::testing::TestWithParam<std::string> {
+ public:
+ virtual void SetUp() override {
+ // currently test passthrough mode only
+ bluetooth = IBluetoothHci::getService(GetParam());
+ ASSERT_NE(bluetooth, nullptr);
+ ALOGI("%s: getService() for bluetooth is %s", __func__,
+ bluetooth->isRemote() ? "remote" : "local");
+
+ bluetooth_hci_death_recipient = new BluetoothHciDeathRecipient();
+ ASSERT_NE(bluetooth_hci_death_recipient, nullptr);
+ ASSERT_TRUE(
+ bluetooth->linkToDeath(bluetooth_hci_death_recipient, 0).isOk());
+
+ bluetooth_cb = new BluetoothHciCallbacks(*this);
+ ASSERT_NE(bluetooth_cb, nullptr);
+
+ max_acl_data_packet_length = 0;
+ max_sco_data_packet_length = 0;
+ max_acl_data_packets = 0;
+ max_sco_data_packets = 0;
+
+ initialized = false;
+ event_cb_count = 0;
+ acl_cb_count = 0;
+ sco_cb_count = 0;
+
+ ASSERT_FALSE(initialized);
+ // Should not be checked in production code
+ ASSERT_TRUE(bluetooth->initialize(bluetooth_cb).isOk());
+
+ bluetooth_cb->SetWaitTimeout(kCallbackNameInitializationComplete,
+ WAIT_FOR_INIT_TIMEOUT);
+ bluetooth_cb->SetWaitTimeout(kCallbackNameHciEventReceived,
+ WAIT_FOR_HCI_EVENT_TIMEOUT);
+ bluetooth_cb->SetWaitTimeout(kCallbackNameAclEventReceived,
+ WAIT_FOR_ACL_DATA_TIMEOUT);
+ bluetooth_cb->SetWaitTimeout(kCallbackNameScoEventReceived,
+ WAIT_FOR_SCO_DATA_TIMEOUT);
+
+ EXPECT_TRUE(
+ bluetooth_cb->WaitForCallback(kCallbackNameInitializationComplete)
+ .no_timeout);
+
+ ASSERT_TRUE(initialized);
+ }
+
+ virtual void TearDown() override {
+ ALOGI("TearDown");
+ // Should not be checked in production code
+ ASSERT_TRUE(bluetooth->close().isOk());
+ std::this_thread::sleep_for(INTERFACE_CLOSE_DELAY_MS);
+ handle_no_ops();
+ EXPECT_EQ(static_cast<size_t>(0), event_queue.size());
+ EXPECT_EQ(static_cast<size_t>(0), sco_queue.size());
+ EXPECT_EQ(static_cast<size_t>(0), acl_queue.size());
+ EXPECT_EQ(static_cast<size_t>(0), iso_queue.size());
+ }
+
+ void setBufferSizes();
+
+ // Functions called from within tests in loopback mode
+ void sendAndCheckHCI(int num_packets);
+ void sendAndCheckSCO(int num_packets, size_t size, uint16_t handle);
+ void sendAndCheckACL(int num_packets, size_t size, uint16_t handle);
+
+ // Helper functions to try to get a handle on verbosity
+ void enterLoopbackMode(std::vector<uint16_t>* sco_handles,
+ std::vector<uint16_t>* acl_handles);
+ void handle_no_ops();
+ void wait_for_event(bool timeout_is_error);
+ void wait_for_command_complete_event(hidl_vec<uint8_t> cmd);
+ int wait_for_completed_packets_event(uint16_t handle);
+
+ class BluetoothHciDeathRecipient : public hidl_death_recipient {
+ public:
+ void serviceDied(
+ uint64_t /*cookie*/,
+ const android::wp<::android::hidl::base::V1_0::IBase>& /*who*/)
+ override {
+ FAIL();
+ }
+ };
+
+ // A simple test implementation of BluetoothHciCallbacks.
+ class BluetoothHciCallbacks
+ : public ::testing::VtsHalHidlTargetCallbackBase<BluetoothHidlTest>,
+ public IBluetoothHciCallbacks {
+ BluetoothHidlTest& parent_;
+
+ public:
+ BluetoothHciCallbacks(BluetoothHidlTest& parent) : parent_(parent){};
+
+ virtual ~BluetoothHciCallbacks() = default;
+
+ Return<void> initializationComplete(Status status) override {
+ parent_.initialized = (status == Status::SUCCESS);
+ NotifyFromCallback(kCallbackNameInitializationComplete);
+ ALOGV("%s (status = %d)", __func__, static_cast<int>(status));
+ return Void();
+ };
+
+ Return<void> hciEventReceived(
+ const ::android::hardware::hidl_vec<uint8_t>& event) override {
+ parent_.event_cb_count++;
+ parent_.event_queue.push(event);
+ NotifyFromCallback(kCallbackNameHciEventReceived);
+ ALOGV("Event received (length = %d)", static_cast<int>(event.size()));
+ return Void();
+ };
+
+ Return<void> aclDataReceived(
+ const ::android::hardware::hidl_vec<uint8_t>& data) override {
+ parent_.acl_cb_count++;
+ parent_.acl_queue.push(data);
+ NotifyFromCallback(kCallbackNameAclEventReceived);
+ return Void();
+ };
+
+ Return<void> scoDataReceived(
+ const ::android::hardware::hidl_vec<uint8_t>& data) override {
+ parent_.sco_cb_count++;
+ parent_.sco_queue.push(data);
+ NotifyFromCallback(kCallbackNameScoEventReceived);
+ return Void();
+ };
+
+ Return<void> isoDataReceived(
+ const ::android::hardware::hidl_vec<uint8_t>& data) override {
+ parent_.iso_cb_count++;
+ parent_.iso_queue.push(data);
+ NotifyFromCallback(kCallbackNameIsoEventReceived);
+ return Void();
+ };
+ };
+
+ sp<IBluetoothHci> bluetooth;
+ sp<BluetoothHciCallbacks> bluetooth_cb;
+ sp<BluetoothHciDeathRecipient> bluetooth_hci_death_recipient;
+ std::queue<hidl_vec<uint8_t>> event_queue;
+ std::queue<hidl_vec<uint8_t>> acl_queue;
+ std::queue<hidl_vec<uint8_t>> sco_queue;
+ std::queue<hidl_vec<uint8_t>> iso_queue;
+
+ bool initialized;
+
+ int event_cb_count;
+ int sco_cb_count;
+ int acl_cb_count;
+ int iso_cb_count;
+
+ int max_acl_data_packet_length;
+ int max_sco_data_packet_length;
+ int max_acl_data_packets;
+ int max_sco_data_packets;
+};
+
+// Discard NO-OPs from the event queue.
+void BluetoothHidlTest::handle_no_ops() {
+ while (event_queue.size() > 0) {
+ hidl_vec<uint8_t> event = event_queue.front();
+ EXPECT_GE(event.size(),
+ static_cast<size_t>(EVENT_COMMAND_COMPLETE_STATUS_BYTE));
+ bool event_is_no_op =
+ (event[EVENT_CODE_BYTE] == EVENT_COMMAND_COMPLETE) &&
+ (event[EVENT_COMMAND_COMPLETE_OPCODE_LSBYTE] == 0x00) &&
+ (event[EVENT_COMMAND_COMPLETE_OPCODE_LSBYTE + 1] == 0x00);
+ event_is_no_op |= (event[EVENT_CODE_BYTE] == EVENT_COMMAND_STATUS) &&
+ (event[EVENT_COMMAND_STATUS_OPCODE_LSBYTE] == 0x00) &&
+ (event[EVENT_COMMAND_STATUS_OPCODE_LSBYTE + 1] == 0x00);
+ if (event_is_no_op) {
+ event_queue.pop();
+ } else {
+ break;
+ }
+ }
+ // To be removed in VTS release builds
+ while (acl_queue.size() > 0) {
+ hidl_vec<uint8_t> acl_packet = acl_queue.front();
+ uint16_t connection_handle = acl_packet[1] & 0xF;
+ connection_handle <<= 8;
+ connection_handle |= acl_packet[0];
+ bool packet_is_no_op = connection_handle == ACL_HANDLE_QCA_DEBUG_MESSAGE;
+ if (packet_is_no_op) {
+ acl_queue.pop();
+ } else {
+ break;
+ }
+ }
+}
+
+// Receive an event, discarding NO-OPs.
+void BluetoothHidlTest::wait_for_event(bool timeout_is_error = true) {
+ hidl_vec<uint8_t> event;
+ do {
+ bool no_timeout =
+ bluetooth_cb->WaitForCallback(kCallbackNameHciEventReceived).no_timeout;
+ EXPECT_TRUE(no_timeout || !timeout_is_error);
+ if (no_timeout && timeout_is_error) {
+ EXPECT_LT(static_cast<size_t>(0), event_queue.size());
+ }
+ if (event_queue.size() == 0) {
+ // WaitForCallback timed out.
+ return;
+ }
+ handle_no_ops();
+ } while (event_queue.size() == 0);
+}
+
+// Wait until a COMMAND_COMPLETE is received.
+void BluetoothHidlTest::wait_for_command_complete_event(hidl_vec<uint8_t> cmd) {
+ wait_for_event();
+ hidl_vec<uint8_t> event = event_queue.front();
+ event_queue.pop();
+
+ EXPECT_GT(event.size(),
+ static_cast<size_t>(EVENT_COMMAND_COMPLETE_STATUS_BYTE));
+ EXPECT_EQ(EVENT_COMMAND_COMPLETE, event[EVENT_CODE_BYTE]);
+ EXPECT_EQ(cmd[0], event[EVENT_COMMAND_COMPLETE_OPCODE_LSBYTE]);
+ EXPECT_EQ(cmd[1], event[EVENT_COMMAND_COMPLETE_OPCODE_LSBYTE + 1]);
+ EXPECT_EQ(HCI_STATUS_SUCCESS, event[EVENT_COMMAND_COMPLETE_STATUS_BYTE]);
+}
+
+// Send the command to read the controller's buffer sizes.
+void BluetoothHidlTest::setBufferSizes() {
+ hidl_vec<uint8_t> cmd = COMMAND_HCI_READ_BUFFER_SIZE;
+ bluetooth->sendHciCommand(cmd);
+
+ wait_for_event();
+ if (event_queue.size() == 0) return;
+
+ hidl_vec<uint8_t> event = event_queue.front();
+ event_queue.pop();
+
+ EXPECT_EQ(EVENT_COMMAND_COMPLETE, event[EVENT_CODE_BYTE]);
+ EXPECT_EQ(cmd[0], event[EVENT_COMMAND_COMPLETE_OPCODE_LSBYTE]);
+ EXPECT_EQ(cmd[1], event[EVENT_COMMAND_COMPLETE_OPCODE_LSBYTE + 1]);
+ EXPECT_EQ(HCI_STATUS_SUCCESS, event[EVENT_COMMAND_COMPLETE_STATUS_BYTE]);
+
+ max_acl_data_packet_length =
+ event[EVENT_COMMAND_COMPLETE_STATUS_BYTE + 1] +
+ (event[EVENT_COMMAND_COMPLETE_STATUS_BYTE + 2] << 8);
+ max_sco_data_packet_length = event[EVENT_COMMAND_COMPLETE_STATUS_BYTE + 3];
+ max_acl_data_packets = event[EVENT_COMMAND_COMPLETE_STATUS_BYTE + 4] +
+ (event[EVENT_COMMAND_COMPLETE_STATUS_BYTE + 5] << 8);
+ max_sco_data_packets = event[EVENT_COMMAND_COMPLETE_STATUS_BYTE + 6] +
+ (event[EVENT_COMMAND_COMPLETE_STATUS_BYTE + 7] << 8);
+
+ ALOGD("%s: ACL max %d num %d SCO max %d num %d", __func__,
+ static_cast<int>(max_acl_data_packet_length),
+ static_cast<int>(max_acl_data_packets),
+ static_cast<int>(max_sco_data_packet_length),
+ static_cast<int>(max_sco_data_packets));
+}
+
+// Send an HCI command (in Loopback mode) and check the response.
+void BluetoothHidlTest::sendAndCheckHCI(int num_packets) {
+ ThroughputLogger logger = {__func__};
+ int command_size = 0;
+ for (int n = 0; n < num_packets; n++) {
+ // Send an HCI packet
+ std::vector<uint8_t> write_name = COMMAND_HCI_WRITE_LOCAL_NAME;
+ // With a name
+ char new_name[] = "John Jacob Jingleheimer Schmidt ___________________0";
+ size_t new_name_length = strlen(new_name);
+ for (size_t i = 0; i < new_name_length; i++)
+ write_name.push_back(static_cast<uint8_t>(new_name[i]));
+ // And the packet number
+ size_t i = new_name_length - 1;
+ for (int digits = n; digits > 0; digits = digits / 10, i--)
+ write_name[i] = static_cast<uint8_t>('0' + digits % 10);
+ // And padding
+ for (size_t i = 0; i < 248 - new_name_length; i++)
+ write_name.push_back(static_cast<uint8_t>(0));
+
+ hidl_vec<uint8_t> cmd = write_name;
+ bluetooth->sendHciCommand(cmd);
+
+ // Check the loopback of the HCI packet
+ wait_for_event();
+ if (event_queue.size() == 0) return;
+
+ hidl_vec<uint8_t> event = event_queue.front();
+ event_queue.pop();
+ size_t compare_length =
+ (cmd.size() > static_cast<size_t>(0xff) ? static_cast<size_t>(0xff)
+ : cmd.size());
+ EXPECT_GT(event.size(), compare_length + EVENT_FIRST_PAYLOAD_BYTE - 1);
+
+ EXPECT_EQ(EVENT_LOOPBACK_COMMAND, event[EVENT_CODE_BYTE]);
+ EXPECT_EQ(compare_length, event[EVENT_LENGTH_BYTE]);
+
+ // Don't compare past the end of the event.
+ if (compare_length + EVENT_FIRST_PAYLOAD_BYTE > event.size()) {
+ compare_length = event.size() - EVENT_FIRST_PAYLOAD_BYTE;
+ ALOGE("Only comparing %d bytes", static_cast<int>(compare_length));
+ }
+
+ if (n == num_packets - 1) {
+ command_size = cmd.size();
+ }
+
+ for (size_t i = 0; i < compare_length; i++)
+ EXPECT_EQ(cmd[i], event[EVENT_FIRST_PAYLOAD_BYTE + i]);
+ }
+ logger.setTotalBytes(command_size * num_packets * 2);
+}
+
+// Send a SCO data packet (in Loopback mode) and check the response.
+void BluetoothHidlTest::sendAndCheckSCO(int num_packets, size_t size,
+ uint16_t handle) {
+ ThroughputLogger logger = {__func__};
+ for (int n = 0; n < num_packets; n++) {
+ // Send a SCO packet
+ hidl_vec<uint8_t> sco_packet;
+ std::vector<uint8_t> sco_vector;
+ sco_vector.push_back(static_cast<uint8_t>(handle & 0xff));
+ sco_vector.push_back(static_cast<uint8_t>((handle & 0x0f00) >> 8));
+ sco_vector.push_back(static_cast<uint8_t>(size & 0xff));
+ sco_vector.push_back(static_cast<uint8_t>((size & 0xff00) >> 8));
+ for (size_t i = 0; i < size; i++) {
+ sco_vector.push_back(static_cast<uint8_t>(i + n));
+ }
+ sco_packet = sco_vector;
+ bluetooth->sendScoData(sco_vector);
+
+ // Check the loopback of the SCO packet
+ EXPECT_TRUE(bluetooth_cb->WaitForCallback(kCallbackNameScoEventReceived)
+ .no_timeout);
+ hidl_vec<uint8_t> sco_loopback = sco_queue.front();
+ sco_queue.pop();
+
+ EXPECT_EQ(sco_packet.size(), sco_loopback.size());
+ size_t successful_bytes = 0;
+
+ for (size_t i = 0; i < sco_packet.size(); i++) {
+ if (sco_packet[i] == sco_loopback[i]) {
+ successful_bytes = i;
+ } else {
+ ALOGE("Miscompare at %d (expected %x, got %x)", static_cast<int>(i),
+ sco_packet[i], sco_loopback[i]);
+ ALOGE("At %d (expected %x, got %x)", static_cast<int>(i + 1),
+ sco_packet[i + 1], sco_loopback[i + 1]);
+ break;
+ }
+ }
+ EXPECT_EQ(sco_packet.size(), successful_bytes + 1);
+ }
+ logger.setTotalBytes(num_packets * size * 2);
+}
+
+// Send an ACL data packet (in Loopback mode) and check the response.
+void BluetoothHidlTest::sendAndCheckACL(int num_packets, size_t size,
+ uint16_t handle) {
+ ThroughputLogger logger = {__func__};
+ for (int n = 0; n < num_packets; n++) {
+ // Send an ACL packet
+ hidl_vec<uint8_t> acl_packet;
+ std::vector<uint8_t> acl_vector;
+ acl_vector.push_back(static_cast<uint8_t>(handle & 0xff));
+ acl_vector.push_back(static_cast<uint8_t>((handle & 0x0f00) >> 8) |
+ ACL_BROADCAST_POINT_TO_POINT |
+ ACL_PACKET_BOUNDARY_FIRST_AUTO_FLUSHABLE);
+ acl_vector.push_back(static_cast<uint8_t>(size & 0xff));
+ acl_vector.push_back(static_cast<uint8_t>((size & 0xff00) >> 8));
+ for (size_t i = 0; i < size; i++) {
+ acl_vector.push_back(static_cast<uint8_t>(i + n));
+ }
+ acl_packet = acl_vector;
+ bluetooth->sendAclData(acl_vector);
+
+ // Check the loopback of the ACL packet
+ EXPECT_TRUE(bluetooth_cb->WaitForCallback(kCallbackNameAclEventReceived)
+ .no_timeout);
+ hidl_vec<uint8_t> acl_loopback = acl_queue.front();
+ acl_queue.pop();
+
+ EXPECT_EQ(acl_packet.size(), acl_loopback.size());
+ size_t successful_bytes = 0;
+
+ for (size_t i = 0; i < acl_packet.size(); i++) {
+ if (acl_packet[i] == acl_loopback[i]) {
+ successful_bytes = i;
+ } else {
+ ALOGE("Miscompare at %d (expected %x, got %x)", static_cast<int>(i),
+ acl_packet[i], acl_loopback[i]);
+ ALOGE("At %d (expected %x, got %x)", static_cast<int>(i + 1),
+ acl_packet[i + 1], acl_loopback[i + 1]);
+ break;
+ }
+ }
+ EXPECT_EQ(acl_packet.size(), successful_bytes + 1);
+ }
+ logger.setTotalBytes(num_packets * size * 2);
+}
+
+// Return the number of completed packets reported by the controller.
+int BluetoothHidlTest::wait_for_completed_packets_event(uint16_t handle) {
+ int packets_processed = 0;
+ wait_for_event(false);
+ if (event_queue.size() == 0) {
+ ALOGW("%s: WaitForCallback timed out.", __func__);
+ return packets_processed;
+ }
+ while (event_queue.size() > 0) {
+ hidl_vec<uint8_t> event = event_queue.front();
+ event_queue.pop();
+
+ EXPECT_EQ(EVENT_NUMBER_OF_COMPLETED_PACKETS, event[EVENT_CODE_BYTE]);
+ EXPECT_EQ(1, event[EVENT_NUMBER_OF_COMPLETED_PACKETS_NUM_HANDLES]);
+
+ uint16_t event_handle = event[3] + (event[4] << 8);
+ EXPECT_EQ(handle, event_handle);
+
+ packets_processed += event[5] + (event[6] << 8);
+ }
+ return packets_processed;
+}
+
+// Send local loopback command and initialize SCO and ACL handles.
+void BluetoothHidlTest::enterLoopbackMode(std::vector<uint16_t>* sco_handles,
+ std::vector<uint16_t>* acl_handles) {
+ hidl_vec<uint8_t> cmd = COMMAND_HCI_WRITE_LOOPBACK_MODE_LOCAL;
+ bluetooth->sendHciCommand(cmd);
+
+ // Receive connection complete events with data channels
+ int connection_event_count = 0;
+ bool command_complete_received = false;
+ while (true) {
+ wait_for_event(false);
+ if (event_queue.size() == 0) {
+ // Fail if there was no event received or no connections completed.
+ EXPECT_TRUE(command_complete_received);
+ EXPECT_LT(0, connection_event_count);
+ return;
+ }
+ hidl_vec<uint8_t> event = event_queue.front();
+ event_queue.pop();
+ EXPECT_GT(event.size(),
+ static_cast<size_t>(EVENT_COMMAND_COMPLETE_STATUS_BYTE));
+ if (event[EVENT_CODE_BYTE] == EVENT_CONNECTION_COMPLETE) {
+ EXPECT_GT(event.size(),
+ static_cast<size_t>(EVENT_CONNECTION_COMPLETE_TYPE));
+ EXPECT_EQ(event[EVENT_LENGTH_BYTE],
+ EVENT_CONNECTION_COMPLETE_PARAM_LENGTH);
+ uint8_t connection_type = event[EVENT_CONNECTION_COMPLETE_TYPE];
+
+ EXPECT_TRUE(connection_type == EVENT_CONNECTION_COMPLETE_TYPE_SCO ||
+ connection_type == EVENT_CONNECTION_COMPLETE_TYPE_ACL);
+
+ // Save handles
+ uint16_t handle = event[EVENT_CONNECTION_COMPLETE_HANDLE_LSBYTE] |
+ event[EVENT_CONNECTION_COMPLETE_HANDLE_LSBYTE + 1] << 8;
+ if (connection_type == EVENT_CONNECTION_COMPLETE_TYPE_SCO)
+ sco_handles->push_back(handle);
+ else
+ acl_handles->push_back(handle);
+
+ ALOGD("Connect complete type = %d handle = %d",
+ event[EVENT_CONNECTION_COMPLETE_TYPE], handle);
+ connection_event_count++;
+ } else {
+ EXPECT_EQ(EVENT_COMMAND_COMPLETE, event[EVENT_CODE_BYTE]);
+ EXPECT_EQ(cmd[0], event[EVENT_COMMAND_COMPLETE_OPCODE_LSBYTE]);
+ EXPECT_EQ(cmd[1], event[EVENT_COMMAND_COMPLETE_OPCODE_LSBYTE + 1]);
+ EXPECT_EQ(HCI_STATUS_SUCCESS, event[EVENT_COMMAND_COMPLETE_STATUS_BYTE]);
+ command_complete_received = true;
+ }
+ }
+}
+
+// Empty test: Initialize()/Close() are called in SetUp()/TearDown().
+TEST_P(BluetoothHidlTest, InitializeAndClose) {}
+
+// Send an HCI Reset with sendHciCommand and wait for a command complete event.
+TEST_P(BluetoothHidlTest, HciReset) {
+ hidl_vec<uint8_t> cmd = COMMAND_HCI_RESET;
+ bluetooth->sendHciCommand(cmd);
+
+ wait_for_command_complete_event(cmd);
+}
+
+// Read and check the HCI version of the controller.
+TEST_P(BluetoothHidlTest, HciVersionTest) {
+ hidl_vec<uint8_t> cmd = COMMAND_HCI_READ_LOCAL_VERSION_INFORMATION;
+ bluetooth->sendHciCommand(cmd);
+
+ wait_for_event();
+ if (event_queue.size() == 0) return;
+
+ hidl_vec<uint8_t> event = event_queue.front();
+ event_queue.pop();
+ EXPECT_GT(event.size(), static_cast<size_t>(EVENT_LOCAL_LMP_VERSION_BYTE));
+
+ EXPECT_EQ(EVENT_COMMAND_COMPLETE, event[EVENT_CODE_BYTE]);
+ EXPECT_EQ(cmd[0], event[EVENT_COMMAND_COMPLETE_OPCODE_LSBYTE]);
+ EXPECT_EQ(cmd[1], event[EVENT_COMMAND_COMPLETE_OPCODE_LSBYTE + 1]);
+ EXPECT_EQ(HCI_STATUS_SUCCESS, event[EVENT_COMMAND_COMPLETE_STATUS_BYTE]);
+
+ EXPECT_LE(HCI_MINIMUM_HCI_VERSION, event[EVENT_LOCAL_HCI_VERSION_BYTE]);
+ EXPECT_LE(HCI_MINIMUM_LMP_VERSION, event[EVENT_LOCAL_LMP_VERSION_BYTE]);
+}
+
+// Send an unknown HCI command and wait for the error message.
+TEST_P(BluetoothHidlTest, HciUnknownCommand) {
+ hidl_vec<uint8_t> cmd = COMMAND_HCI_SHOULD_BE_UNKNOWN;
+ bluetooth->sendHciCommand(cmd);
+
+ wait_for_event();
+ if (event_queue.size() == 0) return;
+
+ hidl_vec<uint8_t> event = event_queue.front();
+ event_queue.pop();
+
+ EXPECT_GT(event.size(),
+ static_cast<size_t>(EVENT_COMMAND_COMPLETE_STATUS_BYTE));
+ if (event[EVENT_CODE_BYTE] == EVENT_COMMAND_COMPLETE) {
+ EXPECT_EQ(cmd[0], event[EVENT_COMMAND_COMPLETE_OPCODE_LSBYTE]);
+ EXPECT_EQ(cmd[1], event[EVENT_COMMAND_COMPLETE_OPCODE_LSBYTE + 1]);
+ EXPECT_EQ(HCI_STATUS_UNKNOWN_HCI_COMMAND,
+ event[EVENT_COMMAND_COMPLETE_STATUS_BYTE]);
+ } else {
+ EXPECT_EQ(EVENT_COMMAND_STATUS, event[EVENT_CODE_BYTE]);
+ EXPECT_EQ(cmd[0], event[EVENT_COMMAND_STATUS_OPCODE_LSBYTE]);
+ EXPECT_EQ(cmd[1], event[EVENT_COMMAND_STATUS_OPCODE_LSBYTE + 1]);
+ EXPECT_EQ(HCI_STATUS_UNKNOWN_HCI_COMMAND,
+ event[EVENT_COMMAND_STATUS_STATUS_BYTE]);
+ }
+}
+
+// Enter loopback mode, but don't send any packets.
+TEST_P(BluetoothHidlTest, WriteLoopbackMode) {
+ std::vector<uint16_t> sco_connection_handles;
+ std::vector<uint16_t> acl_connection_handles;
+ enterLoopbackMode(&sco_connection_handles, &acl_connection_handles);
+}
+
+// Enter loopback mode and send single packets.
+TEST_P(BluetoothHidlTest, LoopbackModeSinglePackets) {
+ setBufferSizes();
+
+ std::vector<uint16_t> sco_connection_handles;
+ std::vector<uint16_t> acl_connection_handles;
+ enterLoopbackMode(&sco_connection_handles, &acl_connection_handles);
+
+ sendAndCheckHCI(1);
+
+ // This should work, but breaks on some current platforms. Figure out how to
+ // grandfather older devices but test new ones.
+ if (0 && sco_connection_handles.size() > 0) {
+ EXPECT_LT(0, max_sco_data_packet_length);
+ sendAndCheckSCO(1, max_sco_data_packet_length, sco_connection_handles[0]);
+ int sco_packets_sent = 1;
+ int completed_packets =
+ wait_for_completed_packets_event(sco_connection_handles[0]);
+ if (sco_packets_sent != completed_packets) {
+ ALOGW("%s: packets_sent (%d) != completed_packets (%d)", __func__,
+ sco_packets_sent, completed_packets);
+ }
+ }
+
+ if (acl_connection_handles.size() > 0) {
+ EXPECT_LT(0, max_acl_data_packet_length);
+ sendAndCheckACL(1, max_acl_data_packet_length, acl_connection_handles[0]);
+ int acl_packets_sent = 1;
+ int completed_packets =
+ wait_for_completed_packets_event(acl_connection_handles[0]);
+ if (acl_packets_sent != completed_packets) {
+ ALOGW("%s: packets_sent (%d) != completed_packets (%d)", __func__,
+ acl_packets_sent, completed_packets);
+ }
+ }
+}
+
+// Enter loopback mode and send packets for bandwidth measurements.
+TEST_P(BluetoothHidlTest, LoopbackModeBandwidth) {
+ setBufferSizes();
+
+ std::vector<uint16_t> sco_connection_handles;
+ std::vector<uint16_t> acl_connection_handles;
+ enterLoopbackMode(&sco_connection_handles, &acl_connection_handles);
+
+ sendAndCheckHCI(NUM_HCI_COMMANDS_BANDWIDTH);
+
+ // This should work, but breaks on some current platforms. Figure out how to
+ // grandfather older devices but test new ones.
+ if (0 && sco_connection_handles.size() > 0) {
+ EXPECT_LT(0, max_sco_data_packet_length);
+ sendAndCheckSCO(NUM_SCO_PACKETS_BANDWIDTH, max_sco_data_packet_length,
+ sco_connection_handles[0]);
+ int sco_packets_sent = NUM_SCO_PACKETS_BANDWIDTH;
+ int completed_packets =
+ wait_for_completed_packets_event(sco_connection_handles[0]);
+ if (sco_packets_sent != completed_packets) {
+ ALOGW("%s: packets_sent (%d) != completed_packets (%d)", __func__,
+ sco_packets_sent, completed_packets);
+ }
+ }
+
+ if (acl_connection_handles.size() > 0) {
+ EXPECT_LT(0, max_acl_data_packet_length);
+ sendAndCheckACL(NUM_ACL_PACKETS_BANDWIDTH, max_acl_data_packet_length,
+ acl_connection_handles[0]);
+ int acl_packets_sent = NUM_ACL_PACKETS_BANDWIDTH;
+ int completed_packets =
+ wait_for_completed_packets_event(acl_connection_handles[0]);
+ if (acl_packets_sent != completed_packets) {
+ ALOGW("%s: packets_sent (%d) != completed_packets (%d)", __func__,
+ acl_packets_sent, completed_packets);
+ }
+ }
+}
+
+INSTANTIATE_TEST_SUITE_P(
+ PerInstance, BluetoothHidlTest,
+ testing::ValuesIn(
+ android::hardware::getAllHalInstanceNames(IBluetoothHci::descriptor)),
+ android::hardware::PrintInstanceNameToString);
diff --git a/compatibility_matrices/compatibility_matrix.current.xml b/compatibility_matrices/compatibility_matrix.current.xml
index aeb11c4..f0b5966 100644
--- a/compatibility_matrices/compatibility_matrix.current.xml
+++ b/compatibility_matrices/compatibility_matrix.current.xml
@@ -75,7 +75,7 @@
</hal>
<hal format="hidl" optional="true">
<name>android.hardware.bluetooth</name>
- <version>1.0</version>
+ <version>1.0-1</version>
<interface>
<name>IBluetoothHci</name>
<instance>default</instance>
@@ -256,7 +256,7 @@
<hal format="hidl" optional="false">
<name>android.hardware.keymaster</name>
<version>3.0</version>
- <version>4.0</version>
+ <version>4.0-1</version>
<interface>
<name>IKeymasterDevice</name>
<instance>default</instance>
@@ -264,7 +264,7 @@
</hal>
<hal format="hidl" optional="true">
<name>android.hardware.keymaster</name>
- <version>4.0</version>
+ <version>4.0-1</version>
<interface>
<name>IKeymasterDevice</name>
<instance>strongbox</instance>
diff --git a/current.txt b/current.txt
index 98cb792..c1991c3 100644
--- a/current.txt
+++ b/current.txt
@@ -584,15 +584,20 @@
fd65298e1e09e0e3c781ab18305920d757dbe55a3b459ce17814ec5cf6dfee99 android.hardware.wifi@1.0::IWifiP2pIface
# HALs released in Android R
+79e115c8f8970b8b914bafc66df5425e065fda4dcda97222966ef12451d2a1cc android.hardware.bluetooth@1.1::IBluetoothHci
+40ab2c6866c18d32baf6e49e3053949e79601f56963a791e93e68b9ee18f718d android.hardware.bluetooth@1.1::IBluetoothHciCallbacks
07d0a252b2d8fa35887908a996ba395cf392968395fc30afab791f46e0c22a52 android.hardware.boot@1.1::IBootControl
74049a402be913963edfdd80828a53736570e9d8124a1bf18166b6ed46a6b0ab android.hardware.boot@1.1::types
ce8dbe76eb9ee94b46ef98f725be992e760a5751073d4f4912484026541371f3 android.hardware.health@2.1::IHealth
26f04510a0b57aba5167c5c0a7c2f077c2acbb98b81902a072517829fd9fd67f android.hardware.health@2.1::IHealthInfoCallback
db47f4ceceb1f06c656f39caa70c557b0f8471ef59fd58611bea667ffca20101 android.hardware.health@2.1::types
+c228aaa27f66c48e147159a4f4996c5273191fece1b08de31bd171c61334855e android.hardware.keymaster@4.1::IKeymasterDevice
+adb0efdf1462e9b2e742c0dcadd598666aac551f178be06e755bfcdf5797abd0 android.hardware.keymaster@4.1::IOperation
+7a04ea5595ed418ca3e91c28b8bd7353dd988be9be7b0c8c9e64fb4b77bd4523 android.hardware.keymaster@4.1::types
9e59fffceed0dd72a9799e04505db5f777bbbea1af0695ba4107ef6d967c6fda android.hardware.neuralnetworks@1.3::IDevice
4a6c3b3556da951b4def21ba579a227c022980fe4465df6cdfbe20628fa75f5a android.hardware.neuralnetworks@1.3::IPreparedModel
94e803236398bed1febb11cc21051bc42ec003700139b099d6c479e02a7ca3c3 android.hardware.neuralnetworks@1.3::IPreparedModelCallback
-b74fe72cfe438f50e772e6a307657ff449d5bde83c15dd1f140ff2edbe73499c android.hardware.neuralnetworks@1.3::types
+c511b1427b1c3f76af90967bbddaaf250db983a8d3abb9ff189fb5a807cf3d4d android.hardware.neuralnetworks@1.3::types
274fb1254a6d1a97824ec5c880eeefc0e410dc6d3a2a4c34052201169d2b7de0 android.hardware.radio@1.5::types
c8e81d912827a5d49b2ddcdc4eb4556c5d231a899a1dca879309e04210daa4a0 android.hardware.radio@1.5::IRadio
a62a93faf173b14a6175b683ebf61ffa568dc61f81e369d2dce7b1265e86cf2f android.hardware.radio@1.5::IRadioIndication
diff --git a/keymaster/4.1/Android.bp b/keymaster/4.1/Android.bp
new file mode 100644
index 0000000..eaa7e41
--- /dev/null
+++ b/keymaster/4.1/Android.bp
@@ -0,0 +1,19 @@
+// This file is autogenerated by hidl-gen -Landroidbp.
+
+hidl_interface {
+ name: "android.hardware.keymaster@4.1",
+ root: "android.hardware",
+ vndk: {
+ enabled: true,
+ },
+ srcs: [
+ "types.hal",
+ "IKeymasterDevice.hal",
+ "IOperation.hal",
+ ],
+ interfaces: [
+ "android.hardware.keymaster@4.0",
+ "android.hidl.base@1.0",
+ ],
+ gen_java: false,
+}
diff --git a/keymaster/4.1/IKeymasterDevice.hal b/keymaster/4.1/IKeymasterDevice.hal
new file mode 100644
index 0000000..64d2c9f
--- /dev/null
+++ b/keymaster/4.1/IKeymasterDevice.hal
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.keymaster@4.1;
+
+import @4.0::ErrorCode;
+import @4.0::HardwareAuthToken;
+import @4.0::IKeymasterDevice;
+import @4.0::KeyParameter;
+import @4.0::KeyPurpose;
+import @4.0::OperationHandle;
+import IOperation;
+
+/**
+ * @4.1::IKeymasterDevice is a minor extension to @4.0::IKeymasterDevice. It adds support for
+ *
+ * - Partial hardware enforcment of UNLOCKED_DEVICE_REQUIRED keys;
+ * - Device-unique attestaion;
+ * - Early boot only keys;
+ * - Better cleanup of operations when clients die without completing or aborting them.
+ */
+interface IKeymasterDevice extends @4.0::IKeymasterDevice {
+ /**
+ * Called by client to notify the IKeymasterDevice that the device is now locked, and keys with
+ * the UNLOCKED_DEVICE_REQUIRED tag should no longer be usable. When this function is called,
+ * the IKeymasterDevice should note the current timestamp, and attempts to use
+ * UNLOCKED_DEVICE_REQUIRED keys must be rejected with Error::DEVICE_LOCKED until an
+ * authentication token with a later timestamp is presented. If the `passwordOnly' argument is
+ * set to true the sufficiently-recent authentication token must indicate that the user
+ * authenticated with a password, not a biometric.
+ *
+ * @param passwordOnly specifies whether the device must be unlocked with a password, rather
+ * than a biometric, before UNLOCKED_DEVICE_REQUIRED keys can be used.
+ */
+ deviceLocked(bool passwordOnly) generates (ErrorCode error);
+
+ /**
+ * Called by client to notify the IKeymasterDevice that the device has left the early boot
+ * state, and that keys with the EARLY_BOOT_ONLY tag may no longer be used. All attempts to use
+ * an EARLY_BOOT_ONLY key after this method is called must fail with Error::INVALID_KEY_BLOB.
+ */
+ earlyBootEnded() generates (ErrorCode error);
+
+ /**
+ * Begins a cryptographic operation. beginOp() is a variation on begin(). beginOp() has
+ * identical functionality to begin, but instead of an OperationHandle it returns an IOperation
+ * object. An IKeymasterDevice HAL service must call linkToDeath() on the Operation before
+ * returning it, and the provided hidl_death_recipient, if called, must abort() the operation.
+ * This is to ensure that in the event a client crashes while an operation is in progress, the
+ * operation slot is freed and available for use by other clients.
+ *
+ * @4.1::IKeymasterDevices must implement both beginOp() and begin().
+ */
+ beginOp(KeyPurpose purpose, vec<uint8_t> keyBlob, vec<KeyParameter> inParams,
+ HardwareAuthToken authToken)
+ generates (ErrorCode error, vec<KeyParameter> outParam, IOperation operation);
+};
diff --git a/keymaster/4.1/IOperation.hal b/keymaster/4.1/IOperation.hal
new file mode 100644
index 0000000..7103e9e
--- /dev/null
+++ b/keymaster/4.1/IOperation.hal
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.keymaster@4.1;
+
+import @4.0::ErrorCode;
+import @4.0::OperationHandle;
+
+/**
+ * IOperation represents an in-progress IKeymasterDevice operation. It is returned by
+ * IKeymasterDevice.beginOp().
+ */
+interface IOperation {
+ /**
+ * Returns the operation handle to be used as an authentication challenge.
+ */
+ getOperationChallenge() generates (ErrorCode error, OperationHandle operation);
+};
diff --git a/keymaster/4.1/default/Android.bp b/keymaster/4.1/default/Android.bp
new file mode 100644
index 0000000..b06878b
--- /dev/null
+++ b/keymaster/4.1/default/Android.bp
@@ -0,0 +1,38 @@
+//
+// Copyright (C) 2019 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+cc_binary {
+ name: "android.hardware.keymaster@4.1-service",
+ defaults: ["hidl_defaults"],
+ relative_install_path: "hw",
+ vendor: true,
+ init_rc: ["android.hardware.keymaster@4.1-service.rc"],
+ srcs: ["service.cpp"],
+
+ shared_libs: [
+ "android.hardware.keymaster@4.0",
+ "android.hardware.keymaster@4.1",
+ "libbase",
+ "libcutils",
+ "libhardware",
+ "libhidlbase",
+ "libkeymaster4",
+ "libkeymaster41",
+ "liblog",
+ "libutils",
+ ],
+
+}
diff --git a/keymaster/4.1/default/OWNERS b/keymaster/4.1/default/OWNERS
new file mode 100644
index 0000000..335660d
--- /dev/null
+++ b/keymaster/4.1/default/OWNERS
@@ -0,0 +1,2 @@
+jdanis@google.com
+swillden@google.com
diff --git a/keymaster/4.1/default/android.hardware.keymaster@4.1-service.rc b/keymaster/4.1/default/android.hardware.keymaster@4.1-service.rc
new file mode 100644
index 0000000..740b3c2
--- /dev/null
+++ b/keymaster/4.1/default/android.hardware.keymaster@4.1-service.rc
@@ -0,0 +1,6 @@
+service vendor.keymaster-4-1 /vendor/bin/hw/android.hardware.keymaster@4.1-service
+ interface android.hardware.keymaster@4.0::IKeymasterDevice default
+ interface android.hardware.keymaster@4.1::IKeymasterDevice default
+ class early_hal
+ user system
+ group system drmrpc
diff --git a/keymaster/4.1/default/service.cpp b/keymaster/4.1/default/service.cpp
new file mode 100644
index 0000000..d79a291
--- /dev/null
+++ b/keymaster/4.1/default/service.cpp
@@ -0,0 +1,35 @@
+/*
+** Copyright 2019, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+** http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+#include <android-base/logging.h>
+#include <android/hardware/keymaster/4.1/IKeymasterDevice.h>
+#include <hidl/HidlTransportSupport.h>
+
+#include <AndroidKeymaster41Device.h>
+
+using android::hardware::keymaster::V4_0::SecurityLevel;
+
+int main() {
+ ::android::hardware::configureRpcThreadpool(1, true /* willJoinThreadpool */);
+ auto keymaster = ::keymaster::V4_1::CreateKeymasterDevice(SecurityLevel::SOFTWARE);
+ auto status = keymaster->registerAsService();
+ if (status != android::OK) {
+ LOG(FATAL) << "Could not register service for Keymaster 4.1 (" << status << ")";
+ }
+
+ android::hardware::joinRpcThreadpool();
+ return -1; // Should never get here.
+}
diff --git a/keymaster/4.1/support/Android.bp b/keymaster/4.1/support/Android.bp
new file mode 100644
index 0000000..34b6108
--- /dev/null
+++ b/keymaster/4.1/support/Android.bp
@@ -0,0 +1,32 @@
+//
+// Copyright (C) 2019 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+cc_library {
+ name: "libkeymaster4_1support",
+ vendor_available: true,
+ cflags: [
+ "-Wall",
+ "-Wextra",
+ "-Werror",
+ ],
+ export_include_dirs: ["include"],
+ shared_libs: [
+ "android.hardware.keymaster@3.0",
+ "android.hardware.keymaster@4.0",
+ "android.hardware.keymaster@4.1",
+ "libkeymaster4support",
+ ]
+}
diff --git a/keymaster/4.1/support/include/keymasterV4_1/authorization_set.h b/keymaster/4.1/support/include/keymasterV4_1/authorization_set.h
new file mode 100644
index 0000000..afc0eaf
--- /dev/null
+++ b/keymaster/4.1/support/include/keymasterV4_1/authorization_set.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef HARDWARE_INTERFACES_KEYMASTER_V4_1_SUPPORT_INCLUDE_AUTHORIZATION_SET_H_
+#define HARDWARE_INTERFACES_KEYMASTER_V4_1_SUPPORT_INCLUDE_AUTHORIZATION_SET_H_
+
+#include <keymasterV4_0/authorization_set.h>
+
+#include <keymasterV4_1/keymaster_tags.h>
+
+namespace android::hardware::keymaster::V4_1 {
+
+using V4_0::AuthorizationSet;
+using V4_0::AuthorizationSetBuilder;
+using V4_0::KeyParameter;
+
+} // namespace android::hardware::keymaster::V4_1
+
+#endif // HARDWARE_INTERFACES_KEYMASTER_V4_1_SUPPORT_INCLUDE_AUTHORIZATION_SET_H_
diff --git a/keymaster/4.1/support/include/keymasterV4_1/keymaster_tags.h b/keymaster/4.1/support/include/keymasterV4_1/keymaster_tags.h
new file mode 100644
index 0000000..6ffe8e1
--- /dev/null
+++ b/keymaster/4.1/support/include/keymasterV4_1/keymaster_tags.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef HARDWARE_INTERFACES_KEYMASTER_V4_1_SUPPORT_INCLUDE_KEYMASTER_TAGS_H_
+#define HARDWARE_INTERFACES_KEYMASTER_V4_1_SUPPORT_INCLUDE_KEYMASTER_TAGS_H_
+
+#include <android/hardware/keymaster/4.1/types.h>
+
+#include <keymasterV4_0/keymaster_tags.h>
+
+namespace android::hardware::keymaster::V4_1 {
+
+using V4_0::BlockMode;
+using V4_0::Digest;
+using V4_0::EcCurve;
+using V4_0::ErrorCode;
+using V4_0::HardwareAuthToken;
+using V4_0::KeyParameter;
+using V4_0::PaddingMode;
+using V4_0::TagType;
+using V4_0::VerificationToken;
+
+using V4_0::TypedTag;
+
+using V4_0::TAG_ACTIVE_DATETIME;
+using V4_0::TAG_ALGORITHM;
+using V4_0::TAG_ALLOW_WHILE_ON_BODY;
+using V4_0::TAG_APPLICATION_DATA;
+using V4_0::TAG_APPLICATION_ID;
+using V4_0::TAG_ASSOCIATED_DATA;
+using V4_0::TAG_ATTESTATION_APPLICATION_ID;
+using V4_0::TAG_ATTESTATION_CHALLENGE;
+using V4_0::TAG_AUTH_TIMEOUT;
+using V4_0::TAG_BLOB_USAGE_REQUIREMENTS;
+using V4_0::TAG_BLOCK_MODE;
+using V4_0::TAG_BOOT_PATCHLEVEL;
+using V4_0::TAG_BOOTLOADER_ONLY;
+using V4_0::TAG_CALLER_NONCE;
+using V4_0::TAG_CONFIRMATION_TOKEN;
+using V4_0::TAG_CREATION_DATETIME;
+using V4_0::TAG_DIGEST;
+using V4_0::TAG_EC_CURVE;
+using V4_0::TAG_HARDWARE_TYPE;
+using V4_0::TAG_INCLUDE_UNIQUE_ID;
+using V4_0::TAG_INVALID;
+using V4_0::TAG_KEY_SIZE;
+using V4_0::TAG_MAC_LENGTH;
+using V4_0::TAG_MAX_USES_PER_BOOT;
+using V4_0::TAG_MIN_MAC_LENGTH;
+using V4_0::TAG_MIN_SECONDS_BETWEEN_OPS;
+using V4_0::TAG_NO_AUTH_REQUIRED;
+using V4_0::TAG_NONCE;
+using V4_0::TAG_ORIGIN;
+using V4_0::TAG_ORIGINATION_EXPIRE_DATETIME;
+using V4_0::TAG_OS_PATCHLEVEL;
+using V4_0::TAG_OS_VERSION;
+using V4_0::TAG_PADDING;
+using V4_0::TAG_PURPOSE;
+using V4_0::TAG_RESET_SINCE_ID_ROTATION;
+using V4_0::TAG_ROLLBACK_RESISTANCE;
+using V4_0::TAG_ROOT_OF_TRUST;
+using V4_0::TAG_RSA_PUBLIC_EXPONENT;
+using V4_0::TAG_TRUSTED_CONFIRMATION_REQUIRED;
+using V4_0::TAG_TRUSTED_USER_PRESENCE_REQUIRED;
+using V4_0::TAG_UNIQUE_ID;
+using V4_0::TAG_UNLOCKED_DEVICE_REQUIRED;
+using V4_0::TAG_USAGE_EXPIRE_DATETIME;
+using V4_0::TAG_USER_AUTH_TYPE;
+using V4_0::TAG_USER_ID;
+using V4_0::TAG_USER_SECURE_ID;
+using V4_0::TAG_VENDOR_PATCHLEVEL;
+
+#define DECLARE_KM_4_1_TYPED_TAG(name) \
+ typedef typename V4_0::Tag2TypedTag<(static_cast<V4_0::Tag>(V4_1::Tag::name))>::type \
+ TAG_##name##_t; \
+ static TAG_##name##_t TAG_##name;
+
+DECLARE_KM_4_1_TYPED_TAG(EARLY_BOOT_ONLY);
+DECLARE_KM_4_1_TYPED_TAG(DEVICE_UNIQUE_ATTESTATION);
+
+} // namespace android::hardware::keymaster::V4_1
+
+#endif // HARDWARE_INTERFACES_KEYMASTER_V4_1_SUPPORT_INCLUDE_KEYMASTER_TAGS_H_
diff --git a/keymaster/4.1/types.hal b/keymaster/4.1/types.hal
new file mode 100644
index 0000000..bdf1731
--- /dev/null
+++ b/keymaster/4.1/types.hal
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.keymaster@4.1;
+
+import @4.0::ErrorCode;
+import @4.0::Tag;
+import @4.0::TagType;
+
+enum Tag : @4.0::Tag {
+ /**
+ * Keys tagged with EARLY_BOOT_ONLY may only be used, or created, during early boot, until
+ * IKeymasterDevice::earlyBootEnded() is called.
+ */
+ EARLY_BOOT_ONLY = TagType:BOOL | 305,
+ /**
+ * DEVICE_UNIQUE_ATTESTATION is an argument to IKeymasterDevice::attestKey(). It indicates that
+ * attestation using a device-unique key is requested, rather than a batch key. Only
+ * SecurityLevel::STRONGBOX IKeymasterDevices may support device-unique attestations.
+ * SecurityLevel::TRUSTED_ENVIRONMENT IKeymasterDevices must return ErrorCode::INVALID_ARGUMENT
+ * if they receive DEVICE_UNIQUE_ATTESTATION. SecurityLevel::STRONGBOX IKeymasterDevices need
+ * not support DEVICE_UNIQUE_ATTESTATION, and return ErrorCode::CANNOT_ATTEST_IDS if they do not
+ * support it.
+ *
+ * IKeymasterDevice implementations that support device-unique attestation MUST add the
+ * DEVICE_UNIQUE_ATTESTATION tag to device-unique attestations.
+ */
+ DEVICE_UNIQUE_ATTESTATION = TagType:BOOL | 720,
+};
diff --git a/keymaster/4.1/vts/OWNERS b/keymaster/4.1/vts/OWNERS
new file mode 100644
index 0000000..335660d
--- /dev/null
+++ b/keymaster/4.1/vts/OWNERS
@@ -0,0 +1,2 @@
+jdanis@google.com
+swillden@google.com
diff --git a/vibrator/1.4/vts/functional/Android.bp b/keymaster/4.1/vts/functional/Android.bp
similarity index 66%
rename from vibrator/1.4/vts/functional/Android.bp
rename to keymaster/4.1/vts/functional/Android.bp
index 202a824..f5a0c9c 100644
--- a/vibrator/1.4/vts/functional/Android.bp
+++ b/keymaster/4.1/vts/functional/Android.bp
@@ -15,19 +15,16 @@
//
cc_test {
- name: "VtsHalVibratorV1_4TargetTest",
+ name: "VtsHalKeymasterV4_1TargetTest",
defaults: ["VtsHalTargetTestDefaults"],
- srcs: ["VtsHalVibratorV1_4TargetTest.cpp"],
+ srcs: [
+ "EarlyBootKeyTest.cpp",
+ ],
static_libs: [
- "android.hardware.vibrator@1.0",
- "android.hardware.vibrator@1.1",
- "android.hardware.vibrator@1.2",
- "android.hardware.vibrator@1.3",
- "android.hardware.vibrator@1.4",
+ "android.hardware.keymaster@4.0",
+ "android.hardware.keymaster@4.1",
+ "libkeymaster4support",
+ "libkeymaster4_1support",
],
- test_suites: [
- "general-tests",
- "vts-core",
- ],
+ test_suites: ["vts-core"],
}
-
diff --git a/vibrator/1.4/IVibratorCallback.hal b/keymaster/4.1/vts/functional/EarlyBootKeyTest.cpp
similarity index 80%
rename from vibrator/1.4/IVibratorCallback.hal
rename to keymaster/4.1/vts/functional/EarlyBootKeyTest.cpp
index 76281bc..4a19010 100644
--- a/vibrator/1.4/IVibratorCallback.hal
+++ b/keymaster/4.1/vts/functional/EarlyBootKeyTest.cpp
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-package android.hardware.vibrator@1.4;
+namespace android::hardware::keymaster::V4_1::test {
-interface IVibratorCallback {
- oneway onComplete();
-};
+// TODO(swillden): Put tests here.
+
+} // namespace android::hardware::keymaster::V4_1::test
diff --git a/neuralnetworks/1.3/types.hal b/neuralnetworks/1.3/types.hal
index 86ab287..7df14b1 100644
--- a/neuralnetworks/1.3/types.hal
+++ b/neuralnetworks/1.3/types.hal
@@ -73,6 +73,4500 @@
BASE_MAX = 0xFFFF,
};
+/**
+ * Operation types.
+ *
+ * The type of an operation in a model.
+ */
+enum OperationType : int32_t {
+
+ /**
+ * Adds two tensors, element-wise.
+ *
+ * Takes two input tensors of identical {@link OperandType} and compatible
+ * dimensions. The output is the sum of both input tensors, optionally
+ * modified by an activation function.
+ *
+ * Two dimensions are compatible when:
+ * 1. they are equal, or
+ * 2. one of them is 1
+ *
+ * The size of the output is the maximum size along each dimension of the
+ * input operands. It starts with the trailing dimensions, and works its
+ * way forward.
+ *
+ * Example:
+ *
+ * input1.dimension = {4, 1, 2}
+ * input2.dimension = {5, 4, 3, 1}
+ * output.dimension = {5, 4, 3, 2}
+ *
+ * Since HAL version 1.2, generic zero-sized input tensor is supported. Zero
+ * dimension is only compatible with 0 or 1. The size of the output
+ * dimension is zero if either of corresponding input dimension is zero.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: up to 4
+ *
+ * Inputs:
+ * * 0: A tensor.
+ * * 1: A tensor of the same {@link OperandType}, and compatible dimensions
+ * as input0.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scales and zeroPoint can be different from input0 scale and zeroPoint.
+ * * 2: An {@link OperandType::INT32} scalar, and has to be one of the
+ * {@link FusedActivationFunc} values. Specifies the activation to
+ * invoke on the result.
+ *
+ * Outputs:
+ * * 0: The sum, a tensor of the same {@link OperandType} as input0.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint can be different from inputs' scale and zeroPoint.
+ */
+ ADD = @1.2::OperationType:ADD,
+
+ /**
+ * Performs a 2-D average pooling operation.
+ *
+ * The output dimensions are functions of the filter dimensions, stride, and
+ * padding.
+ *
+ * The values in the output tensor are computed as:
+ *
+ * output[b, i, j, channel] =
+ * sum_{di, dj}(
+ * input[b, strides[1] * i + di, strides[2] * j + dj, channel]
+ * ) / sum(1)
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
+ * With the default data layout NHWC, the data is stored in the order of:
+ * [batch, height, width, channels]. Alternatively, the data layout could
+ * be NCHW, the data storage order of: [batch, channels, height, width].
+ * NCHW is supported since HAL version 1.2.
+ *
+ * Both explicit padding and implicit padding are supported.
+ *
+ * Inputs (explicit padding):
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
+ * the input.
+ * Since HAL version 1.2, zero batches is supported for this tensor.
+ * * 1: An {@link OperandType::INT32} scalar, specifying the padding on
+ * the left, in the ‘width’ dimension.
+ * * 2: An {@link OperandType::INT32} scalar, specifying the padding on
+ * the right, in the ‘width’ dimension.
+ * * 3: An {@link OperandType::INT32} scalar, specifying the padding on
+ * the top, in the ‘height’ dimension.
+ * * 4: An {@link OperandType::INT32} scalar, specifying the padding on
+ * the bottom, in the ‘height’ dimension.
+ * * 5: An {@link OperandType::INT32} scalar, specifying the stride when
+ * walking through input in the ‘width’ dimension.
+ * * 6: An {@link OperandType::INT32} scalar, specifying the stride when
+ * walking through input in the ‘height’ dimension.
+ * * 7: An {@link OperandType::INT32} scalar, specifying the filter
+ * width.
+ * * 8: An {@link OperandType::INT32} scalar, specifying the filter
+ * height.
+ * * 9: An {@link OperandType::INT32} scalar, and has to be one of the
+ * {@link FusedActivationFunc} values. Specifies the activation to
+ * invoke on the result.
+ * * 10: An optional {@link OperandType::BOOL} scalar, default to false.
+ * Set to true to specify NCHW data layout for input0 and output0.
+ * Available since HAL version 1.2.
+ *
+ * Inputs (implicit padding):
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
+ * the input.
+ * Since HAL version 1.2, zero batches is supported for this tensor.
+ * * 1: An {@link OperandType::INT32} scalar, specifying the implicit
+ * padding scheme, has to be one of the
+ * following values: {0 (NONE), 1 (SAME), 2 (VALID)}.
+ * * 2: An {@link OperandType::INT32} scalar, specifying the stride when
+ * walking through input in the ‘width’ dimension.
+ * * 3: An {@link OperandType::INT32} scalar, specifying the stride when
+ * walking through input in the ‘height’ dimension.
+ * * 4: An {@link OperandType::INT32} scalar, specifying the filter
+ * width.
+ * * 5: An {@link OperandType::INT32} scalar, specifying the filter
+ * height.
+ * * 6: An {@link OperandType::INT32} scalar, and has to be one of the
+ * {@link FusedActivationFunc} values. Specifies the activation to
+ * invoke on the result.
+ * * 7: An optional {@link OperandType::BOOL} scalar, default to false.
+ * Set to true to specify NCHW data layout for input0 and output0.
+ * Available since HAL version 1.2.
+ *
+ * Outputs:
+ * * 0: The output 4-D tensor, of shape
+ * [batches, out_height, out_width, depth].
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ */
+ AVERAGE_POOL_2D = @1.2::OperationType:AVERAGE_POOL_2D,
+
+ /**
+ * Concatenates the input tensors along the given dimension.
+ *
+ * The input tensors must have identical {@link OperandType} and the same
+ * dimensions except the dimension along the concatenation axis.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * (full support since HAL version 1.2, see the input section)
+ *
+ * Supported tensor rank: up to 4
+ *
+ * Inputs:
+ * * 0 ~ n-1: The list of n input tensors, of shape
+ * [D0, D1, ..., Daxis(i), ..., Dm].
+ * Before HAL version 1.2, all input tensors of
+ * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * must have the same scale and zeroPoint as the output tensor.
+ * Since HAL version 1.2, zero-sized tensors are supported.
+ * * n: An {@link OperandType::INT32} scalar, specifying the
+ * concatenation axis.
+ *
+ * Outputs:
+ * * 0: The output, a tensor of the same {@link OperandType} as the input
+ * tensors. The output shape is [D0, D1, ..., sum(Daxis(i)), ..., Dm].
+ * Since HAL version 1.2, for a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint values can be different from
+ * input tensors. Before HAL version 1.2 they have to be the same as for the input tensors.
+ */
+ CONCATENATION = @1.2::OperationType:CONCATENATION,
+
+ /**
+ * Performs a 2-D convolution operation.
+ *
+ * The CONV_2D op sweeps a 2-D filter that can mix channels together over a
+ * batch of images, applying the filter to each window of each image of the
+ * appropriate size.
+ *
+ * The output dimensions are functions of the filter dimensions, stride, and
+ * padding.
+ *
+ * The values in the output tensor are computed as:
+ *
+ * output[b, i, j, channel] =
+ * sum_{di, dj, k} (
+ * input[b, strides[1] * i + di, strides[2] * j + dj, k] *
+ * filter[channel, di, dj, k]
+ * ) + bias[channel]
+ *
+ * Supported tensor {@link OperandType} configurations:
+ * * 32 bit floating point:
+ * * * {@link OperandType::TENSOR_FLOAT32} for input, filter, output, and bias.
+ *
+ * * Quantized:
+ * * * {@link OperandType::TENSOR_QUANT8_ASYMM} for input, filter, and output.
+ * * * {@link OperandType::TENSOR_INT32} for bias (with scale set to
+ * * * input.scale * filter.scale).
+ *
+ * Available since HAL version 1.2:
+ * * 16 bit floating point:
+ * * * {@link OperandType::TENSOR_FLOAT16} for input, filter, output, and bias.
+ *
+ * * Quantized with symmetric per channel quantization for the filter:
+ * * * {@link OperandType::TENSOR_QUANT8_ASYMM} for input, and output.
+ * * * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
+ * * * {@link OperandType::TENSOR_INT32} for bias (scale set to 0.0,
+ * * * each value scaling is separate and equal to input.scale * filter.scales[channel]).
+ *
+ * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
+ * With the default data layout NHWC, the data is stored in the order of:
+ * [batch, height, width, channels]. Alternatively, the data layout could
+ * be NCHW, the data storage order of: [batch, channels, height, width].
+ * NCHW is supported since HAL version 1.2.
+ *
+ * Both explicit padding and implicit padding are supported.
+ *
+ * Inputs (explicit padding):
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
+ * specifying the input.
+ * Since HAL version 1.2, zero batches is supported for this tensor.
+ * * 1: A 4-D tensor, of shape
+ * [depth_out, filter_height, filter_width, depth_in], specifying the
+ * filter.
+ * For tensor of type {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}
+ * the channel dimension (SymmPerChannelQuantParams::channelDim)
+ * must be set to 0.
+ * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
+ * tensor of type {@link OperandType::TENSOR_FLOAT32}
+ * or {@link OperandType::TENSOR_FLOAT16} the bias must be of the same
+ * type. For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
+ * the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint
+ * of 0 and bias_scale == input_scale * filter_scale.
+ * For filter tensor of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL},
+ * the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0
+ * and bias_scale of 0. The actual scale of each value 'i' is equal to
+ * bias_scale[i] = input_scale * filter_scale[i].
+ * * 3: An {@link OperandType::INT32} scalar, specifying the padding on
+ * the left, in the ‘width’ dimension.
+ * * 4: An {@link OperandType::INT32} scalar, specifying the padding on
+ * the right, in the ‘width’ dimension.
+ * * 5: An {@link OperandType::INT32} scalar, specifying the padding on
+ * the top, in the ‘height’ dimension.
+ * * 6: An {@link OperandType::INT32} scalar, specifying the padding on
+ * the bottom, in the ‘height’ dimension.
+ * * 7: An {@link OperandType::INT32} scalar, specifying the stride when
+ * walking through input in the ‘width’ dimension.
+ * * 8: An {@link OperandType::INT32} scalar, specifying the stride when
+ * walking through input in the ‘height’ dimension.
+ * * 9: An {@link OperandType::INT32} scalar, and has to be one of the
+ * {@link FusedActivationFunc} values. Specifies the activation to
+ * invoke on the result.
+ * * 10: An optional {@link OperandType::BOOL} scalar, default to false.
+ * Set to true to specify NCHW data layout for input0 and output0.
+ * Available since HAL version 1.2.
+ * * 11: An optional {@link OperandType::INT32} scalar, specifying the dilation
+ * factor for width. Defaults to 1. If set to k > 1, there will be k-1 skipped
+ * cells between each filter element on width dimension. If this input is set,
+ * input 12 (dilation factor for height) must be specified as well.
+ * Available since HAL version 1.2.
+ * * 12: An optional {@link OperandType::INT32} scalar, specifying the dilation
+ * factor for height. Defaults to 1. If set to k > 1, there will be k-1 skipped
+ * cells between each filter element on height dimension. If this input is set,
+ * input 11 (dilation factor for width) must be specified as well.
+ * Available since HAL version 1.2.
+ *
+ * Inputs (implicit padding):
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
+ * specifying the input.
+ * Since HAL version 1.2, zero batches is supported for this tensor.
+ * * 1: A 4-D tensor, of shape
+ * [depth_out, filter_height, filter_width, depth_in], specifying the
+ * filter.
+ * For tensor of type {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}
+ * the channel dimension (SymmPerChannelQuantParams::channelDim)
+ * must be set to 0.
+ * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
+ * tensor of type {@link OperandType::TENSOR_FLOAT32}
+ * or {@link OperandType::TENSOR_FLOAT16} the bias must be of the same
+ * type. For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
+ * the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint
+ * of 0 and bias_scale == input_scale * filter_scale.
+ * For filter tensor of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL},
+ * the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0
+ * and bias_scale of 0. The actual scale of each value 'i' is equal to
+ * bias_scale[i] = input_scale * filter_scale[i].
+ * * 3: An {@link OperandType::INT32} scalar, specifying the implicit
+ * padding scheme, has to be one of the
+ * following values: {0 (NONE), 1 (SAME), 2 (VALID)}.
+ * * 4: An {@link OperandType::INT32} scalar, specifying the stride when
+ * walking through input in the ‘width’ dimension.
+ * * 5: An {@link OperandType::INT32} scalar, specifying the stride when
+ * walking through input in the ‘height’ dimension.
+ * * 6: An {@link OperandType::INT32} scalar, and has to be one of the
+ * {@link FusedActivationFunc} values. Specifies the activation to
+ * invoke on the result.
+ * * 7: An optional {@link OperandType::BOOL} scalar, default to false.
+ * Set to true to specify NCHW data layout for input0 and output0.
+ * Available since HAL version 1.2.
+ * * 8: An optional {@link OperandType::INT32} scalar, specifying the dilation
+ * factor for width. Defaults to 1. If set to k > 1, there will be k-1 skipped
+ * cells between each filter element on width dimension. If this input is set,
+ * input 9 (dilation factor for height) must be specified as well.
+ * Available since HAL version 1.2.
+ * * 9: An optional {@link OperandType::INT32} scalar, specifying the dilation
+ * factor for height. Defaults to 1. If set to k > 1, there will be k-1 skipped
+ * cells between each filter element on height dimension. If this input is set,
+ * input 8 (dilation factor for width) must be specified as well.
+ * Available since HAL version 1.2.
+ *
+ * Outputs:
+ * * 0: The output 4-D tensor, of shape
+ * [batches, out_height, out_width, depth_out].
+ * Before HAL version 1.2, for output tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
+ * the following condition must be satisfied: output_scale > input_scale * filter_scale
+ */
+ CONV_2D = @1.2::OperationType:CONV_2D,
+
+ /**
+ * Performs a depthwise 2-D convolution operation.
+ *
+ * Given an input tensor of shape [batches, height, width, depth_in] and a
+ * filter tensor of shape [1, filter_height, filter_width, depth_out]
+ * containing depth_out convolutional filters of depth 1, DEPTHWISE_CONV
+ * applies a different filter to each input channel (expanding from 1
+ * channel to channel_multiplier channels for each), then concatenates the
+ * results together.
+ *
+ * The output has depth_out = depth_in * depth_multiplier channels.
+ * The output dimensions are functions of the filter dimensions, stride, and
+ * padding.
+ *
+ * The values in the output tensor are computed as:
+ *
+ * output[b, i, j, k * channel_multiplier + q] =
+ * sum_{di, dj} (
+ * input[b, strides[1] * i + di, strides[2] * j + dj, k] *
+ * filter[1, di, dj, k * channel_multiplier + q]
+ * ) + bias[k * channel_multiplier + q]
+ *
+ * Supported tensor {@link OperandType} configurations:
+ * * 32 bit floating point:
+ * * * {@link OperandType::TENSOR_FLOAT32} for input, filter, output, and bias.
+ *
+ * * Quantized:
+ * * * {@link OperandType::TENSOR_QUANT8_ASYMM} for input, filter, and output.
+ * * * {@link OperandType::TENSOR_INT32} for bias (with scale set to
+ * * * input.scale * filter.scale).
+ *
+ * Available since HAL version 1.2:
+ * * 16 bit floating point:
+ * * * {@link OperandType::TENSOR_FLOAT16} for input, filter, output, and bias.
+ *
+ * * Quantized with symmetric per channel quantization for the filter:
+ * * * {@link OperandType::TENSOR_QUANT8_ASYMM} for input, and output.
+ * * * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
+ * * * {@link OperandType::TENSOR_INT32} for bias (scale set to 0.0,
+ * * * each value scaling is separate and equal to input.scale * filter.scales[channel]).
+ *
+ * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
+ * With the default data layout NHWC, the data is stored in the order of:
+ * [batch, height, width, channels]. Alternatively, the data layout could
+ * be NCHW, the data storage order of: [batch, channels, height, width].
+ * NCHW is supported since HAL version 1.2.
+ *
+ * Both explicit padding and implicit padding are supported.
+ *
+ * Inputs (explicit padding):
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
+ * specifying the input.
+ * * 1: A 4-D tensor, of shape [1, filter_height, filter_width, depth_out],
+ * specifying the filter.
+ * For tensor of type {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}
+ * the channel dimension (SymmPerChannelQuantParams::channelDim)
+ * must be set to 3.
+ * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
+ * tensor of type {@link OperandType::TENSOR_FLOAT32}
+ * or {@link OperandType::TENSOR_FLOAT16} the bias must be of the same
+ * type. For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
+ * the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint
+ * of 0 and bias_scale == input_scale * filter_scale.
+ * For filter tensor of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL},
+ * the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0
+ * and bias_scale of 0. The actual scale of each value 'i' is equal to
+ * bias_scale[i] = input_scale * filter_scale[i].
+ * * 3: An {@link OperandType::INT32} scalar, specifying the padding on
+ * the left, in the ‘width’ dimension.
+ * * 4: An {@link OperandType::INT32} scalar, specifying the padding on
+ * the right, in the ‘width’ dimension.
+ * * 5: An {@link OperandType::INT32} scalar, specifying the padding on
+ * the top, in the ‘height’ dimension.
+ * * 6: An {@link OperandType::INT32} scalar, specifying the padding on
+ * the bottom, in the ‘height’ dimension.
+ * * 7: An {@link OperandType::INT32} scalar, specifying the stride when
+ * walking through input in the ‘width’ dimension.
+ * * 8: An {@link OperandType::INT32} scalar, specifying the stride when
+ * walking through input in the ‘height’ dimension.
+ * * 9: An {@link OperandType::INT32} scalar, specifying the depthwise
+ * multiplier.
+ * * 10: An {@link OperandType::INT32} scalar, and has to be one of the
+ * {@link FusedActivationFunc} values. Specifies the activation to
+ * invoke on the result.
+ * * 11: An optional {@link OperandType::BOOL} scalar, default to false.
+ * Set to true to specify NCHW data layout for input0 and output0.
+ * Available since HAL version 1.2.
+ * * 12: An optional {@link OperandType::INT32} scalar, specifying the dilation
+ * factor for width. Defaults to 1. If set to k > 1, there will be k-1 skipped
+ * cells between each filter element on width dimension. If this input is set,
+ * input 13 (dilation factor for height) must be specified as well.
+ * Available since HAL version 1.2.
+ * * 13: An optional {@link OperandType::INT32} scalar, specifying the dilation
+ * factor for height. Defaults to 1. If set to k > 1, there will be k-1 skipped
+ * cells between each filter element on height dimension. If this input is set,
+ * input 12 (dilation factor for width) must be specified as well.
+ * Available since HAL version 1.2.
+ *
+ * Inputs (implicit padding):
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
+ * specifying the input.
+ * * 1: A 4-D tensor, of shape [1, filter_height, filter_width, depth_out],
+ * specifying the filter.
+ * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
+ * tensor of type {@link OperandType::TENSOR_FLOAT32}
+ * or {@link OperandType::TENSOR_FLOAT16} the bias must be of the same
+ * type. For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
+ * the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint
+ * of 0 and bias_scale == input_scale * filter_scale.
+ * For filter tensor of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL},
+ * the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0
+ * and bias_scale of 0. The actual scale of each value 'i' is equal to
+ * bias_scale[i] = input_scale * filter_scale[i].
+ * * 3: An {@link OperandType::INT32} scalar, specifying the implicit
+ * padding scheme, has to be one of the
+ * following values: {0 (NONE), 1 (SAME), 2 (VALID)}.
+ * * 4: An {@link OperandType::INT32} scalar, specifying the stride when
+ * walking through input in the ‘width’ dimension.
+ * * 5: An {@link OperandType::INT32} scalar, specifying the stride when
+ * walking through input in the ‘height’ dimension.
+ * * 6: An {@link OperandType::INT32} scalar, specifying the depthwise
+ * multiplier.
+ * * 7: An {@link OperandType::INT32} scalar, and has to be one of the
+ * {@link FusedActivationFunc} values. Specifies the activation to
+ * invoke on the result.
+ * * 8: An optional {@link OperandType::BOOL} scalar, default to false.
+ * Set to true to specify NCHW data layout for input0 and output0.
+ * Available since HAL version 1.2.
+ * * 9: An optional {@link OperandType::INT32} scalar, specifying the dilation
+ * factor for width. Defaults to 1. If set to k > 1, there will be k-1 skipped
+ * cells between each filter element on width dimension. If this input is set,
+ * input 10 (dilation factor for height) must be specified as well.
+ * Available since HAL version 1.2.
+ * * 10: An optional {@link OperandType::INT32} scalar, specifying the dilation
+ * factor for height. Defaults to 1. If set to k > 1, there will be k-1 skipped
+ * cells between each filter element on height dimension. If this input is set,
+ * input 9 (dilation factor for width) must be specified as well.
+ * Available since HAL version 1.2.
+ *
+ * Outputs:
+ * * 0: The output 4-D tensor, of shape
+ * [batches, out_height, out_width, depth_out]. Before HAL version 1.2, for
+ * output tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
+ * the following condition must be satisfied:
+ * output_scale > input_scale * filter_scale
+ */
+ DEPTHWISE_CONV_2D = @1.2::OperationType:DEPTHWISE_CONV_2D,
+
+ /**
+ * Rearranges data from depth into blocks of spatial data.
+ *
+ * More specifically, this op outputs a copy of the input tensor where
+ * values from the depth dimension are moved in spatial blocks to the height
+ * and width dimensions. The value block_size indicates the input block size
+ * and how the data is moved.
+ *
+ * Chunks of data of size block_size * block_size from depth are rearranged
+ * into non-overlapping blocks of size block_size x block_size.
+ *
+ * The width of the output tensor is input_depth * block_size, whereas the
+ * height is input_height * block_size. The depth of the input tensor must
+ * be divisible by block_size * block_size
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
+ * With the default data layout NHWC, the data is stored in the order of:
+ * [batch, height, width, channels]. Alternatively, the data layout could
+ * be NCHW, the data storage order of: [batch, channels, height, width].
+ * NCHW is supported since HAL version 1.2.
+ *
+ * Inputs:
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
+ * specifying the input.
+ * * 1: An {@link OperandType::INT32} scalar, specifying the block_size.
+ * block_size must be >=1 and block_size * block_size must be a divisor
+ * of the input depth.
+ * * 2: An optional {@link OperandType::BOOL} scalar, default to false.
+ * Set to true to specify NCHW data layout for input0 and output0.
+ * Available since HAL version 1.2.
+ *
+ * Outputs:
+ * * 0: The output 4-D tensor, of shape [batch, height*block_size,
+ * width*block_size, depth/(block_size*block_size)].
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ */
+ DEPTH_TO_SPACE = @1.2::OperationType:DEPTH_TO_SPACE,
+
+ /**
+ * Dequantizes the input tensor.
+ *
+ * The formula is:
+ *
+ * output = (input - zeroPoint) * scale.
+ *
+ * Supported input tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * * {@link OperandType::TENSOR_QUANT8_SYMM} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} (since HAL version 1.2)
+ *
+ * Supported output tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}.
+ *
+ * Supported tensor rank: up to 4
+ *
+ * Inputs:
+ * * 0: A tensor.
+ * Since HAL version 1.2, this tensor may be zero-sized.
+ *
+ * Outputs:
+ * * 0: A tensor with the same shape as input0.
+ */
+ DEQUANTIZE = @1.2::OperationType:DEQUANTIZE,
+
+ /**
+ * Looks up sub-tensors in the input tensor.
+ *
+ * This operator takes for input a tensor of values (Values) and
+ * a one-dimensional tensor of selection indices (Lookups).
+ * The output tensor is the concatenation of sub-tensors of Values as
+ * selected by Lookups.
+ *
+ * Think of Values as being sliced along its first dimension:
+ * The entries in Lookups select which slices are concatenated together
+ * to create the output tensor.
+ *
+ * For example, if Values has shape of [40, 200, 300] and
+ * Lookups has shape of [3], all three values found in Lookups are
+ * expected to be between 0 and 39. The resulting tensor must
+ * have shape of [3, 200, 300].
+ *
+ * If a value in Lookups is out of bounds, the operation must fail
+ * and an error must be reported.
+ *
+ * Supported value tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_INT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported value tensor rank: from 2
+ *
+ * Inputs:
+ * * 0: Lookups. A 1-D tensor of {@link OperandType::TENSOR_INT32}.
+ * The values are indices into the first dimension of Values.
+ * * 1: Values. An n-D tensor, where n >= 2, from which sub-tensors are
+ * extracted.
+ *
+ * Output:
+ * * 0: A n-D tensor with the same rank and shape as the Values
+ * tensor, except for the first dimension which has the same size
+ * as Lookups' only dimension.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input1.
+ */
+ EMBEDDING_LOOKUP = @1.2::OperationType:EMBEDDING_LOOKUP,
+
+ /**
+ * Computes element-wise floor() on the input tensor.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ *
+ * Supported tensor rank: up to 4
+ *
+ * Inputs:
+ * * 0: A tensor.
+ *
+ * Outputs:
+ * * 0: The output tensor, of the same {@link OperandType} and dimensions as
+ * the input tensor.
+ */
+ FLOOR = @1.2::OperationType:FLOOR,
+
+ /**
+ * Denotes a fully (densely) connected layer, which connects all elements
+ * in the input tensor with each element in the output tensor.
+ *
+ * This layer implements the operation:
+ *
+ * outputs = activation(inputs * weights’ + bias)
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: up to 4.
+ *
+ * Inputs:
+ * * 0: A tensor of at least rank 2, specifying the input. If rank is
+ * greater than 2, then it gets flattened to a 2-D Tensor. The
+ * (flattened) 2-D Tensor is reshaped (if necessary) to
+ * [batch_size, input_size], where "input_size" corresponds to the
+ * number of inputs to the layer, matching the second dimension of
+ * weights, and "batch_size" is calculated by dividing the number of
+ * elements by "input_size".
+ * Since HAL version 1.2, zero batch_size is supported for this tensor.
+ * * 1: A 2-D tensor, specifying the weights, of shape
+ * [num_units, input_size], where "num_units" corresponds to the number
+ * of output nodes.
+ * * 2: A 1-D tensor, of shape [num_units], specifying the bias. For input
+ * tensor of {@link OperandType::TENSOR_FLOAT32}, the bias should
+ * also be of {@link OperandType::TENSOR_FLOAT32}. For input tensor
+ * of {@link OperandType::TENSOR_QUANT8_ASYMM}, the bias should be
+ * of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0 and
+ * bias_scale == input_scale * filter_scale.
+ * * 3: An {@link OperandType::INT32} scalar, and has to be one of the
+ * {@link FusedActivationFunc} values. Specifies the activation to
+ * invoke on the result.
+ *
+ * Outputs:
+ * * 0: The output tensor, of shape [batch_size, num_units]. Before HAL version 1.2, for
+ * output tensor of {@link OperandType::TENSOR_QUANT8_ASYMM}, the following
+ * condition must be satisfied: output_scale > input_scale * filter_scale.
+ */
+ FULLY_CONNECTED = @1.2::OperationType:FULLY_CONNECTED,
+
+ /**
+ * Looks up sub-tensors in the input tensor using a key-value map.
+ *
+ * This operator takes for input a tensor of values (Values),
+ * a one-dimensional tensor of selection values (Lookups) and
+ * a one-dimensional tensor that maps these values to Values
+ * indexes. The output tensor is the concatenation of sub-tensors of
+ * Values as selected by Lookups via Keys.
+ *
+ * Think of Values as being sliced along its outer-most dimension.
+ * The output is a concatenation of selected slices, with one slice
+ * for each entry of Lookups. The slice selected is the one at the
+ * same index as the Maps entry that matches the value in Lookups.
+ *
+ * For a hit, the corresponding sub-tensor of Values is included
+ * in the Output tensor. For a miss, the corresponding sub-tensor in
+ * Output must have zero values.
+ *
+ * For example, if Values has shape of [40, 200, 300],
+ * Keys should have a shape of [40]. If Lookups tensor has shape
+ * of [3], three slices are being concatenated, so the resulting tensor
+ * must have the shape of [3, 200, 300]. If the first entry in Lookups
+ * has the value 123456, that value must be located in Keys tensor.
+ * If the sixth entry of Keys contains 123456, the sixth slice of Values
+ * must be selected. If no entry in Keys has 123456, a slice of zeroes
+ * must be concatenated.
+ *
+ * Supported value tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_INT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported value tensor rank: from 2
+ *
+ * Inputs:
+ * * 0: Lookups. A 1-D {@link OperandType::TENSOR_INT32} tensor with
+ * shape [ k ].
+ * * 1: Keys. A 1-D {@link OperandType::TENSOR_INT32} tensor with shape
+ * [ n ]; Keys and Values pair represent a map, i.e., the ith element
+ * in Keys (Keys[i]) is the key to select the ith sub-tensor in Values
+ * (Values[i]), where 0 <= i <= n-1. Keys tensor *MUST* be sorted in
+ * ascending order.
+ * * 2: Values. A tensor with shape of [ n, … ]; i.e., the first dimension
+ * must be n.
+ *
+ * Outputs:
+ * * 0: Output. A tensor with shape [ k …].
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input2.
+ * * 1: Hits. A boolean tensor with shape [ k ] indicates whether the lookup
+ * hits (True) or not (False).
+ * Stored as {@link OperandType::TENSOR_QUANT8_ASYMM} with offset 0
+ * and scale 1.0f.
+ * A non-zero byte represents True, a hit. A zero indicates otherwise.
+ */
+ HASHTABLE_LOOKUP = @1.2::OperationType:HASHTABLE_LOOKUP,
+
+ /**
+ * Applies L2 normalization along the depth dimension.
+ *
+ * The values in the output tensor are computed as:
+ *
+ * output[batch, row, col, channel] =
+ * input[batch, row, col, channel] /
+ * sqrt(sum_{c} pow(input[batch, row, col, c], 2))
+ *
+ * For input tensor with rank less than 4, independently normalizes each
+ * 1-D slice along dimension dim.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM} (since HAL version 1.2)
+ *
+ * Supported tensor rank: up to 4
+ * Tensors with rank less than 4 are only supported since HAL version 1.2.
+ *
+ * Inputs:
+ * * 0: An n-D tensor, specifying the tensor to be normalized.
+ * * 1: An optional {@link OperandType::INT32} scalar, default to -1,
+ * specifying the dimension normalization would be performed on.
+ * Negative index is used to specify axis from the end (e.g. -1 for
+ * the last axis). Must be in the range [-n, n).
+ * Available since HAL version 1.2.
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandType} and same shape as input0.
+ * For {@link OperandType::TENSOR_QUANT8_ASYMM},
+ * the scale must be 1.f / 128 and the zeroPoint must be 128.
+ */
+ L2_NORMALIZATION = @1.2::OperationType:L2_NORMALIZATION,
+
+ /**
+ * Performs an 2-D L2 pooling operation.
+ *
+ * The output dimensions are functions of the filter dimensions, stride, and
+ * padding.
+ *
+ * The values in the output tensor are computed as:
+ *
+ * output[b, i, j, c] =
+ * sqrt(sum_{di, dj} pow(input[b, strides[1] * i + di, strides[2] * j + dj, c], 2) /
+ * sum(1))
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ *
+ * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
+ * With the default data layout NHWC, the data is stored in the order of:
+ * [batch, height, width, channels]. Alternatively, the data layout could
+ * be NCHW, the data storage order of: [batch, channels, height, width].
+ * NCHW is supported since HAL version 1.2.
+ *
+ * Both explicit padding and implicit padding are supported.
+ *
+ * Inputs (explicit padding):
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
+ * the input.
+ * Since HAL version 1.2, zero batches is supported for this tensor.
+ * * 1: An {@link OperandType::INT32} scalar, specifying the padding on
+ * the left, in the ‘width’ dimension.
+ * * 2: An {@link OperandType::INT32} scalar, specifying the padding on
+ * the right, in the ‘width’ dimension.
+ * * 3: An {@link OperandType::INT32} scalar, specifying the padding on
+ * the top, in the ‘height’ dimension.
+ * * 4: An {@link OperandType::INT32} scalar, specifying the padding on
+ * the bottom, in the ‘height’ dimension.
+ * * 5: An {@link OperandType::INT32} scalar, specifying the stride when
+ * walking through input in the ‘width’ dimension.
+ * * 6: An {@link OperandType::INT32} scalar, specifying the stride when
+ * walking through input in the ‘height’ dimension.
+ * * 7: An {@link OperandType::INT32} scalar, specifying the filter
+ * width.
+ * * 8: An {@link OperandType::INT32} scalar, specifying the filter
+ * height.
+ * * 9: An {@link OperandType::INT32} scalar, and has to be one of the
+ * {@link FusedActivationFunc} values. Specifies the activation to
+ * invoke on the result.
+ * * 10: An optional {@link OperandType::BOOL} scalar, default to false.
+ * Set to true to specify NCHW data layout for input0 and output0.
+ * Available since HAL version 1.2.
+ *
+ * Inputs (implicit padding):
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
+ * the input.
+ * Since HAL version 1.2, zero batches is supported for this tensor.
+ * * 1: An {@link OperandType::INT32} scalar, specifying the implicit
+ * padding scheme, has to be one of the
+ * following values: {0 (NONE), 1 (SAME), 2 (VALID)}.
+ * * 2: An {@link OperandType::INT32} scalar, specifying the stride when
+ * walking through input in the ‘width’ dimension.
+ * * 3: An {@link OperandType::INT32} scalar, specifying the stride when
+ * walking through input in the ‘height’ dimension.
+ * * 4: An {@link OperandType::INT32} scalar, specifying the filter
+ * width.
+ * * 5: An {@link OperandType::INT32} scalar, specifying the filter
+ * height.
+ * * 6: An {@link OperandType::INT32} scalar, and has to be one of the
+ * {@link FusedActivationFunc} values. Specifies the activation to
+ * invoke on the result.
+ * * 7: An optional {@link OperandType::BOOL} scalar, default to false.
+ * Set to true to specify NCHW data layout for input0 and output0.
+ * Available since HAL version 1.2.
+ *
+ * Outputs:
+ * * 0: The output 4-D tensor, of shape
+ * [batches, out_height, out_width, depth].
+ */
+ L2_POOL_2D = @1.2::OperationType:L2_POOL_2D,
+
+ /**
+ * Applies Local Response Normalization along the depth dimension.
+ *
+ * The 4-D input tensor is treated as a 3-D array of 1-D vectors (along the
+ * last dimension), and each vector is normalized independently. Within a
+ * given vector, each component is divided by the weighted, squared sum of
+ * inputs within depth_radius.
+ *
+ * The output is calculated using this formula:
+ *
+ * sqr_sum[a, b, c, d] = sum(
+ * pow(input[a, b, c, d - depth_radius : d + depth_radius + 1], 2))
+ * output = input / pow((bias + alpha * sqr_sum), beta)
+ *
+ * For input tensor with rank less than 4, independently normalizes each
+ * 1-D slice along specified dimension.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ *
+ * Supported tensor rank: up to 4
+ * Tensors with rank less than 4 are only supported since HAL version 1.2.
+ *
+ * Inputs:
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
+ * the input.
+ * * 1: An {@link OperandType::INT32} scalar, specifying the radius of
+ * the normalization window.
+ * * 2: A scalar, specifying the bias, must not be zero.
+ * For input tensor of {@link OperandType::TENSOR_FLOAT16}, the bias
+ * value must be of {@link OperandType::FLOAT16}.
+ * For input tensor of {@link OperandType::TENSOR_FLOAT32}, the bias
+ * value must be of {@link OperandType::FLOAT32}.
+ * * 3: A scalar, specifying the scale factor, alpha.
+ * For input tensor of {@link OperandType::TENSOR_FLOAT16}, the
+ * alpha value must be of {@link OperandType::FLOAT16}.
+ * For input tensor of {@link OperandType::TENSOR_FLOAT32}, the
+ * alpha value must be of {@link OperandType::FLOAT32}.
+ * * 4: A scalar, specifying the exponent, beta.
+ * For input tensor of {@link OperandType::TENSOR_FLOAT16}, the beta
+ * value must be of {@link OperandType::FLOAT16}.
+ * For input tensor of {@link OperandType::TENSOR_FLOAT32}, the beta
+ * value must be of {@link OperandType::FLOAT32}.
+ * * 5: An optional {@link OperandType::INT32} scalar, default to -1,
+ * specifying the dimension normalization would be performed on.
+ * Negative index is used to specify axis from the end (e.g. -1 for
+ * the last axis). Must be in the range [-n, n).
+ * Available since HAL version 1.2.
+ *
+ * Outputs:
+ * * 0: The output tensor of same shape as input0.
+ */
+ LOCAL_RESPONSE_NORMALIZATION = @1.2::OperationType:LOCAL_RESPONSE_NORMALIZATION,
+
+ /**
+ * Computes sigmoid activation on the input tensor element-wise.
+ *
+ * The output is calculated using this formula:
+ *
+ * output = 1 / (1 + exp(-input))
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: up to 4.
+ *
+ * Inputs:
+ * * 0: A tensor, specifying the input.
+ * Since HAL version 1.2, this tensor may be zero-sized.
+ *
+ * Outputs:
+ * * 0: The output tensor of same shape as input0.
+ * For {@link OperandType::TENSOR_QUANT8_ASYMM},
+ * the scale must be 1.f / 256 and the zeroPoint must be 0.
+ */
+ LOGISTIC = @1.2::OperationType:LOGISTIC,
+
+ /**
+ * Projects an input to a bit vector via locality senstive hashing.
+ *
+ * Supported input tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_INT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported input tensor rank: from 1
+ *
+ * Inputs:
+ * * 0: Hash functions. Dim.size == 2, DataType: Float.
+ * Tensor[0].Dim[0]: Number of hash functions.
+ * Tensor[0].Dim[1]: Number of projected output bits generated by each
+ * hash function.
+ * If the projection type is Sparse:
+ * Tensor[0].Dim[1] + ceil(log2(Tensor[0].Dim[0])) <= 32
+ *
+ * * 1: Input. Dim.size >= 1, no restriction on DataType.
+ * * 2: Weight. Optional. Dim.size == 1, DataType: Float.
+ * If not set, each input element is considered to have the same weight
+ * of 1.0.
+ * Tensor[1].Dim[0] == Tensor[2].Dim[0]
+ * * 3: Type:
+ * Sparse:
+ * Value LSHProjectionType_SPARSE(=3) (since HAL version 1.2).
+ * Computed bit vector is considered to be sparse.
+ * Each output element is an int32 made up of multiple bits
+ * computed from hash functions.
+ *
+ * NOTE: To avoid collisions across hash functions, an offset value
+ * of k * (1 << Tensor[0].Dim[1]) will be added to each signature,
+ * where k is the index of the hash function.
+ *
+ * Value LSHProjectionType_SPARSE_DEPRECATED(=1).
+ * Legacy behavior that does not include the offset value.
+ *
+ * Dense:
+ * Value LSHProjectionType_DENSE(=2).
+ * Computed bit vector is considered to be dense. Each output
+ * element represents a bit and can take the value of either
+ * 0 or 1.
+ *
+ * Outputs:
+ * * 0: If the projection type is Sparse:
+ * Output.Dim == { Tensor[0].Dim[0] }
+ * A tensor of int32 that represents hash signatures.
+ *
+ * If the projection type is Dense:
+ * Output.Dim == { Tensor[0].Dim[0] * Tensor[0].Dim[1] }
+ * A flattened tensor that represents projected bit vectors.
+ * The offset value for sparse projections was added in HAL version 1.2.
+ */
+ LSH_PROJECTION = @1.2::OperationType:LSH_PROJECTION,
+
+ /**
+ * Performs a single time step in a Long Short-Term Memory (LSTM) layer
+ *
+ * The LSTM operation is described by the following equations.
+ *
+ * \f{eqnarray*}{
+ * i_t =& \sigma(W_{xi}x_t+W_{hi}h_{t-1}+W_{ci}C_{t-1}+b_i) & \\
+ * f_t =& \sigma(W_{xf}x_t+W_{hf}h_{t-1}+W_{cf}C_{t-1}+b_f) & \\
+ * C_t =& clip(f_t \odot C_{t-1} + i_t \odot
+ * g(W_{xc}x_t+W_{hc}h_{t-1}+b_c),\ t_{cell}) & \\
+ * o_t =& \sigma(W_{xo}x_t+W_{ho}h_{t-1}+W_{co}C_t+b_o) & \\
+ * & & \\
+ * & clip(W_{proj}(o_t \odot g(C_t))+b_{proj},\ t_{proj})
+ * & if\ there\ is\ a\ projection; \\
+ * h_t =& & \\
+ * & o_t \odot g(C_t) & otherwise. \\
+ * \f}
+ * Where:
+ * * \f$x_t\f$ is the input,
+ * * \f$i_t\f$ is the input gate,
+ * * \f$f_t\f$ is the forget gate,
+ * * \f$C_t\f$ is the cell state,
+ * * \f$o_t\f$ is the output,
+ * * \f$h_t\f$ is the output state,
+ * * \f$\sigma\f$ is the logistic sigmoid function,
+ * * \f$g\f$ is the cell input and cell output activation function, usually
+ * \f$tahn\f$,
+ * * \f$W_{xi}\f$ is the input-to-input weight matrix,
+ * * \f$W_{hi}\f$ is the recurrent to input weight matrix,
+ * * \f$W_{ci}\f$ is the cell-to-input weight matrix,
+ * * \f$b_i\f$ is the input gate bias,
+ * * \f$W_{xf}\f$ is the input-to-forget weight matrix,
+ * * \f$W_{hf}\f$ is the recurrent-to-forget weight matrix,
+ * * \f$W_{cf}\f$ is the cell-to-forget weight matrix,
+ * * \f$b_f\f$ is the forget gate bias,
+ * * \f$W_{xc}\f$ is the input-to-cell weight matrix,
+ * * \f$W_{hc}\f$ is the recurrent-to-cell weight matrix,
+ * * \f$b_c\f$ is the cell bias,
+ * * \f$W_{xo}\f$ is the input-to-output weight matrix,
+ * * \f$W_{ho}\f$ is the recurrent-to-output weight matrix,
+ * * \f$W_{co}\f$ is the cell-to-output weight matrix,
+ * * \f$b_o\f$ is the output gate bias,
+ * * \f$W_{proj}\f$ is the projection weight matrix,
+ * * \f$b_{proj}\f$ is the projection bias,
+ * * \f$t_{cell}\f$ is the threshold for clipping the cell state, and
+ * * \f$t_{proj}\f$ is the threshold for clipping the projected output.
+ * * \f$\odot\f$ is the
+ * <a href="https://en.wikipedia.org/wiki/Hadamard_product_(matrices)">
+ * Hadamard product</a> that takes two matrices and produces another
+ * matrix, each element of which is the product of the corresponding
+ * elements of the input matrices.
+ *
+ * Since HAL version 1.2 LSTM supports layer normalization.
+ * In case layer normalization is used, the inputs to internal activation
+ * functions (sigmoid and \f$g\f$) are normalized, rescaled and recentered
+ * following an approach from section 3.1 from
+ * https://arxiv.org/pdf/1607.06450.pdf
+ *
+ * The operation has the following independently optional inputs:
+ * * The cell-to-input weights (\f$W_{ci}\f$), cell-to-forget weights
+ * (\f$W_{cf}\f$) and cell-to-output weights (\f$W_{co}\f$) either all
+ * have values or neither of them have values (i.e., all set to null). If
+ * they have values, the peephole optimization is used.
+ * * The input-to-input weights (\f$W_{xi}\f$), recurrent-to-input weights
+ * (\f$W_{hi}\f$) and input gate bias (\f$b_i\f$) either all have values,
+ * or none of them have values. If they have no values, coupling of input
+ * and forget gates (CIFG) is used, in which case the input gate
+ * (\f$i_t\f$) is calculated using the following equation instead.
+ * \f{eqnarray*}{
+ * i_t = 1 - f_t
+ * \f}
+ * In case peephole optimization is used and CIFG is not used
+ * cell-to-input (\f$W_{ci}\f$) weights must be present. Otherwise, the
+ * cell-to-input weights must have no value.
+ * * The projection weights (\f$W_{proj}\f$) is required only for the
+ * recurrent projection layer, and should otherwise have no value.
+ * * The projection bias (\f$b_{proj}\f$) may (but not required to) have a
+ * value if the recurrent projection layer exists, and should otherwise
+ * have no value.
+ * * (HAL version 1.2 or later) The four layer normalization weights either all have
+ * values or none of them have values. Additionally, if CIFG is used,
+ * input layer normalization weights tensor is omitted and the other layer
+ * normalization weights either all have values or none of them have
+ * values. Layer normalization is used when the values of all the layer
+ * normalization weights are present.
+ *
+ * References:
+ *
+ * The default non-peephole non-CIFG implementation is based on:
+ * http://www.bioinf.jku.at/publications/older/2604.pdf
+ * S. Hochreiter and J. Schmidhuber. "Long Short-Term Memory". Neural
+ * Computation, 9(8):1735-1780, 1997.
+ *
+ * The peephole implementation and projection layer is based on:
+ * https://research.google.com/pubs/archive/43905.pdf
+ * Hasim Sak, Andrew Senior, and Francoise Beaufays. "Long short-term memory
+ * recurrent neural network architectures for large scale acoustic
+ * modeling." INTERSPEECH, 2014.
+ * (However, the concept of peephole optimization was introduced in work
+ * prior to this paper.)
+ *
+ * The coupling of input and forget gate (CIFG) is based on:
+ * http://arxiv.org/pdf/1503.04069.pdf
+ * Greff et al. "LSTM: A Search Space Odyssey"
+ *
+ * The layer normalization is based on:
+ * https://arxiv.org/pdf/1607.06450.pdf
+ * Jimmy Ba et al. "Layer Normalization"
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ *
+ * All input and output tensors must be of the same type.
+ *
+ * Inputs:
+ * * 0: The input (\f$x_t\f$).
+ * A 2-D tensor of shape [batch_size, input_size], where “batch_size”
+ * corresponds to the batching dimension, and “input_size” is the size
+ * of the input.
+ * * 1: The input-to-input weights (\f$W_{xi}\f$). Optional.
+ * A 2-D tensor of shape [num_units, input_size], where “num_units”
+ * corresponds to the number of cell units.
+ * * 2: The input-to-forget weights (\f$W_{xf}\f$).
+ * A 2-D tensor of shape [num_units, input_size].
+ * * 3: The input-to-cell weights (\f$W_{xc}\f$).
+ * A 2-D tensor of shape [num_units, input_size].
+ * * 4: The input-to-output weights (\f$W_{xo}\f$).
+ * A 2-D tensor of shape [num_units, input_size].
+ * * 5: The recurrent-to-input weights (\f$W_{hi}\f$). Optional.
+ * A 2-D tensor of shape [num_units, output_size], where “output_size”
+ * corresponds to either the number of cell units (i.e., “num_units”),
+ * or the second dimension of the “projection_weights”, if defined.
+ * * 6: The recurrent-to-forget weights (\f$W_{hf}\f$).
+ * A 2-D tensor of shape [num_units, output_size].
+ * * 7: The recurrent-to-cell weights (\f$W_{hc}\f$).
+ * A 2-D tensor of shape [num_units, output_size].
+ * * 8: The recurrent-to-output weights (\f$W_{ho}\f$).
+ * A 2-D tensor of shape [num_units, output_size].
+ * * 9: The cell-to-input weights (\f$W_{ci}\f$). Optional.
+ * A 1-D tensor of shape [num_units].
+ * * 10:The cell-to-forget weights (\f$W_{cf}\f$). Optional.
+ * A 1-D tensor of shape [num_units].
+ * * 11:The cell-to-output weights (\f$W_{co}\f$). Optional.
+ * A 1-D tensor of shape [num_units].
+ * * 12:The input gate bias (\f$b_i\f$). Optional.
+ * A 1-D tensor of shape [num_units].
+ * * 13:The forget gate bias (\f$b_f\f$).
+ * A 1-D tensor of shape [num_units].
+ * * 14:The cell bias (\f$b_c\f$).
+ * A 1-D tensor of shape [num_units].
+ * * 15:The output gate bias (\f$b_o\f$).
+ * A 1-D tensor of shape [num_units].
+ * * 16:The projection weights (\f$W_{proj}\f$). Optional.
+ * A 2-D tensor of shape [output_size, num_units].
+ * * 17:The projection bias (\f$b_{proj}\f$). Optional.
+ * A 1-D tensor of shape [output_size].
+ * * 18:The output state (in) (\f$h_{t-1}\f$).
+ * A 2-D tensor of shape [batch_size, output_size].
+ * * 19:The cell state (in) (\f$C_{t-1}\f$).
+ * A 2-D tensor of shape [batch_size, num_units].
+ * * 20:The activation function (\f$g\f$).
+ * A value indicating the activation function:
+ * <ul>
+ * <li>0: None;
+ * <li>1: Relu;
+ * <li>3: Relu6;
+ * <li>4: Tanh;
+ * <li>6: Sigmoid.
+ * </ul>
+ * * 21:The clipping threshold (\f$t_{cell}\f$) for the cell state, such
+ * that values are bound within [-cell_clip, cell_clip]. If set to 0.0
+ * then clipping is disabled.
+ * Until HAL version 1.2 this scalar must be of type {@link
+ * OperandType::FLOAT32}. Since HAL version 1.2, if all the input
+ * tensors have type {@link OperandType::TENSOR_FLOAT32}, this
+ * scalar must be of the type {@link OperandType::FLOAT32},
+ * otherwise if all the input tensors have the type {@link
+ * OperandType::TENSOR_FLOAT16}, this scalar must be of type {@link
+ * OperandType::FLOAT16}.
+ * * 22:The clipping threshold (\f$t_{proj}\f$) for the output from the
+ * projection layer, such that values are bound within
+ * [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
+ * Until HAL version 1.2 this scalar must be of type {@link
+ * OperandType::FLOAT32}. Since HAL version 1.2, if all the input
+ * tensors have type {@link OperandType::TENSOR_FLOAT32}, this
+ * scalar must be of the type {@link OperandType::FLOAT32},
+ * otherwise if all the input tensors have the type {@link
+ * OperandType::TENSOR_FLOAT16}, this scalar must be of type {@link
+ * OperandType::FLOAT16}.
+ * Since HAL version 1.2 there are additional inputs to this op:
+ * * 23:The input layer normalization weights.
+ * A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
+ * to activation at input gate.
+ * * 24:The forget layer normalization weights.
+ * A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
+ * to activation at forget gate.
+ * * 25:The cell layer normalization weights.
+ * A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
+ * to activation at cell gate.
+ * * 26:The output layer normalization weights.
+ * A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
+ * to activation at output gate.
+ *
+ * Outputs:
+ * * 0: The scratch buffer.
+ * A 2-D tensor of shape [batch_size, num_units * 3] with CIFG, or
+ * [batch_size, num_units * 4] without CIFG.
+ * * 1: The output state (out) (\f$h_t\f$).
+ * A 2-D tensor of shape [batch_size, output_size].
+ * * 2: The cell state (out) (\f$C_t\f$).
+ * A 2-D tensor of shape [batch_size, num_units].
+ * * 3: The output (\f$o_t\f$).
+ * A 2-D tensor of shape [batch_size, output_size]. This is effectively
+ * the same as the current “output state (out)” value.
+ */
+ LSTM = @1.2::OperationType:LSTM,
+
+ /**
+ * Performs an 2-D max pooling operation.
+ *
+ * The output dimensions are functions of the filter dimensions, stride, and
+ * padding.
+ *
+ * The values in the output tensor are computed as:
+ *
+ * output[b, i, j, channel] =
+ * max_{di, dj} (
+ * input[b, strides[1] * i + di, strides[2] * j + dj, channel]
+ * )
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
+ * With the default data layout NHWC, the data is stored in the order of:
+ * [batch, height, width, channels]. Alternatively, the data layout could
+ * be NCHW, the data storage order of: [batch, channels, height, width].
+ * NCHW is supported since HAL version 1.2.
+ *
+ * Both explicit padding and implicit padding are supported.
+ *
+ * Inputs (explicit padding):
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
+ * the input.
+ * Since HAL version 1.2, zero batches is supported for this tensor.
+ * * 1: An {@link OperandType::INT32} scalar, specifying the padding on
+ * the left, in the ‘width’ dimension.
+ * * 2: An {@link OperandType::INT32} scalar, specifying the padding on
+ * the right, in the ‘width’ dimension.
+ * * 3: An {@link OperandType::INT32} scalar, specifying the padding on
+ * the top, in the ‘height’ dimension.
+ * * 4: An {@link OperandType::INT32} scalar, specifying the padding on
+ * the bottom, in the ‘height’ dimension.
+ * * 5: An {@link OperandType::INT32} scalar, specifying the stride when
+ * walking through input in the ‘width’ dimension.
+ * * 6: An {@link OperandType::INT32} scalar, specifying the stride when
+ * walking through input in the ‘height’ dimension.
+ * * 7: An {@link OperandType::INT32} scalar, specifying the filter
+ * width.
+ * * 8: An {@link OperandType::INT32} scalar, specifying the filter
+ * height.
+ * * 9: An {@link OperandType::INT32} scalar, and has to be one of the
+ * {@link FusedActivationFunc} values. Specifies the activation to
+ * invoke on the result.
+ * * 10: An optional {@link OperandType::BOOL} scalar, default to false.
+ * Set to true to specify NCHW data layout for input0 and output0.
+ * Available since HAL version 1.2.
+ *
+ * Inputs (implicit padding):
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
+ * the input.
+ * Since HAL version 1.2, zero batches is supported for this tensor.
+ * * 1: An {@link OperandType::INT32} scalar, specifying the implicit
+ * padding scheme, has to be one of the
+ * following values: {0 (NONE), 1 (SAME), 2 (VALID)}.
+ * * 2: An {@link OperandType::INT32} scalar, specifying the stride when
+ * walking through input in the ‘width’ dimension.
+ * * 3: An {@link OperandType::INT32} scalar, specifying the stride when
+ * walking through input in the ‘height’ dimension.
+ * * 4: An {@link OperandType::INT32} scalar, specifying the filter
+ * width.
+ * * 5: An {@link OperandType::INT32} scalar, specifying the filter
+ * height.
+ * * 6: An {@link OperandType::INT32} scalar, and has to be one of the
+ * {@link FusedActivationFunc} values. Specifies the activation to
+ * invoke on the result.
+ * * 7: An optional {@link OperandType::BOOL} scalar, default to false.
+ * Set to true to specify NCHW data layout for input0 and output0.
+ * Available since HAL version 1.2.
+ *
+ * Outputs:
+ * * 0: The output 4-D tensor, of shape
+ * [batches, out_height, out_width, depth].
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ */
+ MAX_POOL_2D = @1.2::OperationType:MAX_POOL_2D,
+
+ /**
+ * Multiplies two tensors, element-wise.
+ *
+ * Takes two input tensors of identical {@link OperandType} and compatible
+ * dimensions. The output is the product of both input tensors, optionally
+ * modified by an activation function.
+ *
+ * Two dimensions are compatible when:
+ * 1. they are equal, or
+ * 2. one of them is 1
+ *
+ * The size of the resulting output is the maximum size along each dimension
+ * of the input operands. It starts with the trailing dimensions, and works
+ * its way forward.
+ *
+ * Since HAL version 1.2, generic zero-sized input tensor is supported. Zero
+ * dimension is only compatible with 0 or 1. The size of the output
+ * dimension is zero if either of corresponding input dimension is zero.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: up to 4
+ *
+ * Inputs:
+ * * 0: A tensor.
+ * * 1: A tensor of the same {@link OperandType}, and compatible dimensions
+ * as input0.
+ * * 2: An {@link OperandType::INT32} scalar, and has to be one of the
+ * {@link FusedActivationFunc} values. Specifies the activation to
+ * invoke on the result.
+ *
+ * Outputs:
+ * * 0: The product, a tensor of the same {@link OperandType} as input0.
+ * For output tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
+ * the following condition must be satisfied:
+ * output_scale > input1_scale * input2_scale.
+ */
+ MUL = @1.2::OperationType:MUL,
+
+ /**
+ * Computes rectified linear activation on the input tensor element-wise.
+ *
+ * The output is calculated using this formula:
+ *
+ * output = max(0, input)
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: up to 4.
+ *
+ * Inputs:
+ * * 0: A tensor, specifying the input.
+ * Since HAL version 1.2, this tensor may be zero-sized.
+ *
+ * Outputs:
+ * * 0: The output tensor of same shape as input0.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ */
+ RELU = @1.2::OperationType:RELU,
+
+ /**
+ * Computes rectified linear 1 activation on the input tensor element-wise.
+ *
+ * The output is calculated using this formula:
+ *
+ * output = min(1.f, max(-1.f, input))
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: up to 4.
+ *
+ * Inputs:
+ * * 0: A tensor, specifying the input.
+ * Since HAL version 1.2, this tensor may be zero-sized.
+ *
+ * Outputs:
+ * * 0: The output tensor of the same shape as input0.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ */
+ RELU1 = @1.2::OperationType:RELU1,
+
+ /**
+ * Computes rectified linear 6 activation on the input tensor element-wise.
+ *
+ * The output is calculated using this formula:
+ *
+ * output = min(6, max(0, input))
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: up to 4.
+ *
+ * Inputs:
+ * * 0: A tensor, specifying the input.
+ * Since HAL version 1.2, this tensor may be zero-sized.
+ *
+ * Outputs:
+ * * 0: The output tensor of same shape as input0.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ */
+ RELU6 = @1.2::OperationType:RELU6,
+
+ /**
+ * Reshapes a tensor.
+ *
+ * Given tensor, this operation returns a tensor that has the same values as
+ * tensor, but with a newly specified shape.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: up to 4.
+ *
+ * Inputs:
+ * * 0: A tensor, specifying the tensor to be reshaped.
+ * * 1: A 1-D tensor of {@link OperandType::TENSOR_INT32}, defining the
+ * shape of the output tensor. The number of elements implied by shape
+ * must be the same as the number of elements in the input tensor.
+ *
+ * If one component of shape is the special value -1, the size of that
+ * dimension is computed so that the total size remains constant. In
+ * particular, a shape of [-1] flattens into 1-D. At most one component
+ * of shape can be -1.
+ *
+ * Outputs:
+ * * 0: The output tensor, of shape specified by the input shape.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ */
+ RESHAPE = @1.2::OperationType:RESHAPE,
+
+ /**
+ * Resizes images to given size using the bilinear interpretation.
+ *
+ * Resized images must be distorted if their output aspect ratio is not the
+ * same as input aspect ratio. The corner pixels of output may not be the
+ * same as corner pixels of input.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM} (since HAL version 1.2)
+ *
+ * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
+ * With the default data layout NHWC, the data is stored in the order of:
+ * [batch, height, width, channels]. Alternatively, the data layout could
+ * be NCHW, the data storage order of: [batch, channels, height, width].
+ * NCHW is supported since HAL version 1.2.
+ *
+ * Both resizing by shape and resizing by scale are supported.
+ *
+ * Inputs (resizing by shape):
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
+ * the input.
+ * Since HAL version 1.2, zero batches is supported for this tensor.
+ * * 1: An {@link OperandType::INT32} scalar, specifying the output
+ * width of the output tensor.
+ * * 2: An {@link OperandType::INT32} scalar, specifying the output
+ * height of the output tensor.
+ * * 3: An optional {@link OperandType::BOOL} scalar, default to false.
+ * Set to true to specify NCHW data layout for input0 and output0.
+ * Available since HAL version 1.2.
+ *
+ * Inputs (resizing by scale, since HAL version 1.2):
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
+ * the input. Zero batches is supported for this tensor.
+ * * 1: A scalar, specifying width_scale, the scaling factor of the width
+ * dimension from the input tensor to the output tensor. The output
+ * width is calculated as new_width = floor(width * width_scale).
+ * The scalar must be of {@link OperandType::FLOAT16} if input0 is
+ * of {@link OperandType::TENSOR_FLOAT16} and of
+ * {@link OperandType::FLOAT32} otherwise.
+ * * 2: A scalar, specifying height_scale, the scaling factor of the height
+ * dimension from the input tensor to the output tensor. The output
+ * height is calculated as new_height = floor(height * height_scale).
+ * The scalar must be of {@link OperandType::FLOAT16} if input0 is
+ * of {@link OperandType::TENSOR_FLOAT16} and of
+ * {@link OperandType::FLOAT32} otherwise.
+ * * 3: An optional {@link OperandType::BOOL} scalar, default to false.
+ * Set to true to specify NCHW data layout for input0 and output0.
+ *
+ * Outputs:
+ * * 0: The output 4-D tensor, of shape
+ * [batches, new_height, new_width, depth].
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ */
+ RESIZE_BILINEAR = @1.2::OperationType:RESIZE_BILINEAR,
+
+ /**
+ * A basic recurrent neural network layer.
+ *
+ * This layer implements the operation:
+ * outputs = state = activation(inputs * input_weights +
+ * state * recurrent_weights + bias)
+ *
+ * Where:
+ * * “input_weights” is a weight matrix that multiplies the inputs;
+ * * “recurrent_weights” is a weight matrix that multiplies the current
+ * “state” which itself is the output from the previous time step
+ * computation;
+ * * “bias” is a bias vector (added to each output vector in the batch);
+ * * “activation” is the function passed as the “fused_activation_function”
+ * argument (if not “NONE”).
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ *
+ * The input tensors must all be the same type.
+ *
+ * Inputs:
+ * * 0: input.
+ * A 2-D tensor of shape [batch_size, input_size], where “batch_size”
+ * corresponds to the batching dimension, and “input_size” is the size
+ * of the input.
+ * * 1: weights.
+ * A 2-D tensor of shape [num_units, input_size], where “num_units”
+ * corresponds to the number of units.
+ * * 2: recurrent_weights.
+ * A 2-D tensor of shape [num_units, num_units], with columns
+ * corresponding to the weights from each unit.
+ * * 3: bias.
+ * A 1-D tensor of shape [num_units].
+ * * 4: hidden state (in).
+ * A 2-D tensor of shape [batch_size, num_units].
+ * * 5: fused_activation_function.
+ * An optional {@link FusedActivationFunc} value indicating the
+ * activation function. If “NONE” is specified then it results in a
+ * linear activation.
+ *
+ * Outputs:
+ * * 0: hidden state (out).
+ * A 2-D tensor of shape [batch_size, num_units].
+ *
+ * * 1: output.
+ * A 2-D tensor of shape [batch_size, num_units]. This is effectively
+ * the same as the current state value.
+ */
+ RNN = @1.2::OperationType:RNN,
+
+ /**
+ * Computes the softmax activation on the input tensor element-wise, per
+ * batch, by normalizing the input vector so the maximum coefficient is
+ * zero.
+ *
+ * The output is calculated using this formula:
+ *
+ * output[batch, i] =
+ * exp((input[batch, i] - max(input[batch, :])) * beta) /
+ * sum_{k}{exp((input[batch, k] - max(input[batch, :])) * beta)}
+ *
+ * For input tensor with rank other than 2, the activation will be applied
+ * independently on each 1-D slice along specified dimension.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: up to 4.
+ * Tensors with rank other than 2 or 4 are only supported since HAL version 1.2.
+ *
+ * Inputs:
+ * * 0: A 2-D or 4-D tensor, specifying the tensor to be reshaped.
+ * Since HAL version 1.2, this tensor may be zero-sized.
+ * * 1: A scalar, specifying the positive scaling factor for the exponent,
+ * beta. If input0 is of {@link OperandType::TENSOR_FLOAT32} or
+ * {@link OperandType::TENSOR_QUANT8_ASYMM}, the scalar must be of
+ * {@link OperandType::FLOAT32}.
+ * If input0 is of {@link OperandType::TENSOR_FLOAT16}, then the
+ * scalar must be of {@link OperandType::FLOAT16}.
+ * * 2: An optional {@link OperandType::INT32} scalar, default to -1,
+ * specifying the dimension the activation would be performed on.
+ * Negative index is used to specify axis from the end (e.g. -1 for
+ * the last axis). Must be in the range [-n, n).
+ * Available since HAL version 1.2.
+ *
+ * Outputs:
+ * * 0: The output tensor of same shape as input0.
+ * For {@link OperandType::TENSOR_QUANT8_ASYMM},
+ * the scale must be 1.f / 256 and the zeroPoint must be 0.
+ */
+ SOFTMAX = @1.2::OperationType:SOFTMAX,
+
+ /**
+ * Rearranges blocks of spatial data, into depth.
+ *
+ * More specifically, this op outputs a copy of the input tensor where
+ * values from the height and width dimensions are moved to the depth
+ * dimension. The value block_size indicates the input block size and how
+ * the data is moved.
+ *
+ * Chunks of data of size block_size * block_size from depth are rearranged
+ * into non-overlapping blocks of size block_size x block_size.
+ *
+ * The depth of the output tensor is input_depth * block_size * block_size.
+ * The input tensor's height and width must be divisible by block_size.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
+ * With the default data layout NHWC, the data is stored in the order of:
+ * [batch, height, width, channels]. Alternatively, the data layout could
+ * be NCHW, the data storage order of: [batch, channels, height, width].
+ * NCHW is supported since HAL version 1.2.
+ *
+ * Inputs:
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
+ * specifying the input.
+ * * 1: An {@link OperandType::INT32} scalar, specifying the block_size.
+ * block_size must be >=1 and block_size must be a divisor of both the
+ * input height and width.
+ * * 2: An optional {@link OperandType::BOOL} scalar, default to false.
+ * Set to true to specify NCHW data layout for input0 and output0.
+ * Available since HAL version 1.2.
+ *
+ * Outputs:
+ * * 0: The output 4-D tensor, of shape [batches, height/block_size,
+ * width/block_size, depth_in*block_size*block_size].
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ */
+ SPACE_TO_DEPTH = @1.2::OperationType:SPACE_TO_DEPTH,
+
+ /**
+ * SVDF op is a kind of stateful layer derived from the notion that a
+ * densely connected layer that's processing a sequence of input frames can
+ * be approximated by using a singular value decomposition of each of its
+ * nodes. The implementation is based on:
+ *
+ * https://research.google.com/pubs/archive/43813.pdf
+ *
+ * P. Nakkiran, R. Alvarez, R. Prabhavalkar, C. Parada.
+ * “Compressing Deep Neural Networks using a Rank-Constrained Topology”.
+ * INTERSPEECH, 2015.
+ *
+ * It processes the incoming input using a 2-stage filtering mechanism:
+ * * stage 1 performs filtering on the "features" dimension, whose outputs
+ * get pushed into a memory of fixed-size memory_size.
+ * * stage 2 performs filtering on the "time" dimension of the memory_size
+ * memoized outputs of stage 1.
+ *
+ * Specifically, for rank 1, this layer implements the operation:
+ *
+ * memory = push(conv1d(inputs, weights_feature, feature_dim,
+ * "PADDING_VALID"));
+ * outputs = activation(memory * weights_time + bias);
+ *
+ * Where:
+ * * “weights_feature” is a weights matrix that processes the inputs (by
+ * convolving the input with every “feature filter”), and whose outputs
+ * get pushed, stacked in order, into the fixed-size “memory” (the oldest
+ * entry gets dropped);
+ * * “weights_time” is a weights matrix that processes the “memory” (by a
+ * batched matrix multiplication on the num_units);
+ * * “bias” is an optional bias vector (added to each output vector in the
+ * batch); and
+ * * “activation” is the function passed as the “fused_activation_function”
+ * argument (if not “NONE”).
+ *
+ * Each rank adds a dimension to the weights matrices by means of stacking
+ * the filters.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ *
+ * All input tensors must be the same type.
+ *
+ * Inputs:
+ * * 0: input.
+ * A 2-D tensor of shape [batch_size, input_size], where “batch_size”
+ * corresponds to the batching dimension, and “input_size” is the size
+ * of the input.
+ * * 1: weights_feature.
+ * A 2-D tensor of shape [num_units, input_size], where “num_units”
+ * corresponds to the number of units.
+ * * 2: weights_time.
+ * A 2-D tensor of shape [num_units, memory_size], where “memory_size”
+ * corresponds to the fixed-size of the memory.
+ * * 3: bias.
+ * An optional 1-D tensor of shape [num_units].
+ * * 4: state (in).
+ * A 2-D tensor of shape [batch_size, (memory_size - 1) * num_units * rank].
+ * * 5: rank.
+ * The rank of the SVD approximation.
+ * * 6: fused_activation_function.
+ * An optional {@link FusedActivationFunc} value indicating the
+ * activation function. If “NONE” is specified then it results in a
+ * linear activation.
+ *
+ * Outputs:
+ * * 0: state (out).
+ * A 2-D tensor of the same {@link OperandType} as the inputs, with shape
+ * [batch_size, (memory_size - 1) * num_units * rank].
+ * * 1: output.
+ * A 2-D tensor of the same {@link OperandType} as the inputs, with shape
+ * [batch_size, num_units].
+ */
+ SVDF = @1.2::OperationType:SVDF,
+
+ /**
+ * Computes hyperbolic tangent of input tensor element-wise.
+ *
+ * The output is calculated using this formula:
+ *
+ * output = tanh(input)
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM} (since HAL version 1.2)
+ *
+ * Supported tensor rank: up to 4.
+ *
+ * Inputs:
+ * * 0: A tensor, specifying the input.
+ * Since HAL version 1.2, this tensor may be zero-sized.
+ *
+ * Outputs:
+ * * 0: The output tensor of same shape as input0.
+ * For {@link OperandType::TENSOR_QUANT8_ASYMM},
+ * the scale must be 1.f / 128 and the zeroPoint must be 128.
+ */
+ TANH = @1.2::OperationType:TANH,
+
+ /**
+ * BatchToSpace for N-dimensional tensors.
+ *
+ * This operation reshapes the batch dimension (dimension 0) into M + 1
+ * dimensions of shape block_shape + [batch], interleaves these blocks back
+ * into the grid defined by the spatial dimensions [1, ..., M], to obtain a
+ * result with the same rank as the input.
+ *
+ * This is the reverse of SpaceToBatch.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
+ * With the default data layout NHWC, the data is stored in the order of:
+ * [batch, height, width, channels]. Alternatively, the data layout could
+ * be NCHW, the data storage order of: [batch, channels, height, width].
+ * NCHW is supported since HAL version 1.2.
+ *
+ * Inputs:
+ * * 0: An n-D tensor, specifying the tensor to be reshaped
+ * * 1: A 1-D Tensor of {@link OperandType::TENSOR_INT32}, the block
+ * sizes for each spatial dimension of the input tensor. All values
+ * must be >= 1.
+ * * 2: An optional {@link OperandType::BOOL} scalar, default to false.
+ * Set to true to specify NCHW data layout for input0 and output0.
+ * Available since API level 29.
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandType} as input0.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ */
+ BATCH_TO_SPACE_ND = @1.2::OperationType:BATCH_TO_SPACE_ND,
+
+ /**
+ * Element-wise division of two tensors.
+ *
+ * Takes two input tensors of identical {@link OperandType} and compatible
+ * dimensions. The output is the result of dividing the first input tensor
+ * by the second, optionally modified by an activation function.
+ *
+ * Two dimensions are compatible when:
+ * 1. they are equal, or
+ * 2. one of them is 1
+ *
+ * The size of the output is the maximum size along each dimension of the
+ * input operands. It starts with the trailing dimensions, and works its way
+ * forward.
+ *
+ * Example:
+ * input1.dimension = {4, 1, 2}
+ * input2.dimension = {5, 4, 3, 1}
+ * output.dimension = {5, 4, 3, 2}
+ *
+ * Since HAL version 1.2, generic zero-sized input tensor is supported. Zero
+ * dimension is only compatible with 0 or 1. The size of the output
+ * dimension is zero if either of corresponding input dimension is zero.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ *
+ * Supported tensor rank: up to 4
+ *
+ * Inputs:
+ * * 0: An n-D tensor, specifying the first input.
+ * * 1: A tensor of the same {@link OperandType}, and compatible dimensions
+ * as input0.
+ * * 2: An {@link OperandType::INT32} scalar, and has to be one of the
+ * {@link FusedActivationFunc} values. Specifies the activation to
+ * invoke on the result.
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandType} as input0.
+ */
+ DIV = @1.2::OperationType:DIV,
+
+ /**
+ * Computes the mean of elements across dimensions of a tensor.
+ *
+ * Reduces the input tensor along the given dimensions to reduce. Unless
+ * keep_dims is true, the rank of the tensor is reduced by 1 for each entry
+ * in axis. If keep_dims is true, the reduced dimensions are retained with
+ * length 1.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: up to 4
+ *
+ * Inputs:
+ * * 0: A tensor, specifying the input.
+ * * 1: A 1-D Tensor of {@link OperandType::TENSOR_INT32}. The dimensions
+ * to reduce. Must be in the range
+ * [-rank(input_tensor), rank(input_tensor)).
+ *
+ * NOTE: When the operation was introduced, the documentation
+ * incorrectly stated that if dimensions were empty, the operation
+ * would reduce across all dimensions. This behavior was never
+ * implemented.
+ *
+ * * 2: An {@link OperandType::INT32} scalar, keep_dims. If positive,
+ * retains reduced dimensions with length 1.
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandType} as input0.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be same as input0.
+ */
+ MEAN = @1.2::OperationType:MEAN,
+
+ /**
+ * Pads a tensor.
+ *
+ * This operation pads a tensor according to the specified paddings.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * (full support since HAL version 1.2, see the output section)
+ *
+ * Supported tensor rank: up to 4
+ *
+ * Inputs:
+ * * 0: An n-D tensor, specifying the tensor to be padded.
+ * * 1: A 2-D Tensor of {@link OperandType::TENSOR_INT32}, the paddings
+ * for each spatial dimension of the input tensor. The shape of the
+ * tensor must be {rank(input0), 2}.
+ * padding[i, 0] specifies the number of elements to be padded in the
+ * front of dimension i.
+ * padding[i, 1] specifies the number of elements to be padded after the
+ * end of dimension i.
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandType} as input0. The
+ * output tensor has the same rank as input0, and each
+ * dimension of the output tensor has the same size as the
+ * corresponding dimension of the input tensor plus the size
+ * of the padding:
+ * output0.dimension[i] =
+ * padding[i, 0] + input0.dimension[i] + padding[i, 1]
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ *
+ * NOTE: Before HAL version 1.2, the pad value for
+ * {@link OperandType::TENSOR_QUANT8_ASYMM} is undefined.
+ * Since HAL version 1.2, the pad value is always the logical zero.
+ */
+ PAD = @1.2::OperationType:PAD,
+
+ /**
+ * SpaceToBatch for N-Dimensional tensors.
+ *
+ * This operation divides "spatial" dimensions [1, ..., M] of the input into
+ * a grid of blocks of shape block_shape, and interleaves these blocks with
+ * the "batch" dimension (0) such that in the output, the spatial dimensions
+ * [1, ..., M] correspond to the position within the grid, and the batch
+ * dimension combines both the position within a spatial block and the
+ * original batch position. Prior to division into blocks, the spatial
+ * dimensions of the input are optionally zero padded according to paddings.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * (full support since HAL version 1.2, see the output section)
+ *
+ * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
+ * With the default data layout NHWC, the data is stored in the order of:
+ * [batch, height, width, channels]. Alternatively, the data layout could
+ * be NCHW, the data storage order of: [batch, channels, height, width].
+ * NCHW is supported since HAL version 1.2.
+ *
+ * Inputs:
+ * * 0: An n-D tensor, specifying the input.
+ * * 1: A 1-D Tensor of {@link OperandType::TENSOR_INT32}, the block
+ * sizes for each spatial dimension of the input tensor. All values
+ * must be >= 1.
+ * * 2: A 2-D Tensor of {@link OperandType::TENSOR_INT32}, the paddings
+ * for each spatial dimension of the input tensor. All values must be
+ * >= 0. The shape of the tensor must be {M, 2}, where M is the number
+ * of spatial dimensions.
+ * padding[i, 0] specifies the number of element to be padded in the
+ * front of dimension i.
+ * padding[i, 1] specifies the number of element to be padded after the
+ * end of dimension i.
+ * * 3: An optional {@link OperandType::BOOL} scalar, default to false.
+ * Set to true to specify NCHW data layout for input0 and output0.
+ * Available since HAL version 1.2.
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandType} as input0.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ *
+ * NOTE: Before HAL version 1.2, the pad value for
+ * {@link OperandType::TENSOR_QUANT8_ASYMM} is undefined.
+ * Since HAL version 1.2, the pad value is always the logical zero.
+ */
+ SPACE_TO_BATCH_ND = @1.2::OperationType:SPACE_TO_BATCH_ND,
+
+ /**
+ * Removes dimensions of size 1 from the shape of a tensor.
+ *
+ * Given a tensor input, this operation returns a tensor of the same
+ * {@link OperandType} with all dimensions of size 1 removed. If you don't
+ * want to remove all size 1 dimensions, you can remove specific size 1
+ * dimensions by specifying the axes (input1).
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: up to 4
+ *
+ * Inputs:
+ * * 0: An n-D tensor, the tensor to be squeezed.
+ * * 1: An optional 1-D tensor of {@link OperandType::TENSOR_INT32}. The
+ * dimensions to squeeze. If specified only squeezes the dimensions
+ * listed. Otherwise, squeezes all dimensions. The dimension index
+ * starts at 0. An error must be reported if squeezing a dimension that
+ * is not 1.
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandType} as input0. Contains the
+ * same data as input, but has one or more dimensions of size 1
+ * removed.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ */
+ SQUEEZE = @1.2::OperationType:SQUEEZE,
+
+ /**
+ * Extracts a strided slice of a tensor.
+ *
+ * Roughly speaking, this op extracts a slice of size (end - begin) / stride
+ * from the given input tensor. Starting at the location specified by begin
+ * the slice continues by adding stride to the index until all dimensions
+ * are not less than end. Note that a stride can be negative, which causes a
+ * reverse slice.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: up to 4
+ *
+ * Inputs:
+ * * 0: An n-D tensor, specifying the tensor to be sliced.
+ * * 1: begin, a 1-D tensor of {@link OperandType::TENSOR_INT32}. The
+ * starts of the dimensions of the input tensor to be sliced. The
+ * length must be of rank(input0).
+ * * 2: end, a 1-D tensor of {@link OperandType::TENSOR_INT32}. The
+ * ends of the dimensions of the input tensor to be sliced. The length
+ * must be of rank(input0).
+ * * 3: strides, a 1-D tensor of {@link OperandType::TENSOR_INT32}. The
+ * strides of the dimensions of the input tensor to be sliced. The
+ * length must be of rank(input0). The entries must be non-zero.
+ * * 4: begin_mask, an {@link OperandType::INT32} scalar. If the ith bit
+ * of begin_mask is set, begin[i] is ignored and the fullest possible
+ * range in that dimension is used instead.
+ * * 5: end_mask, an {@link OperandType::INT32} scalar. If the ith bit of
+ * end_mask is set, end[i] is ignored and the fullest possible range in
+ * that dimension is used instead.
+ * * 6: shrink_axis_mask, an {@link OperandType::INT32} scalar. If the
+ * ith bit of shrink_axis_mask is set, the ith dimension specification
+ * shrinks the dimensionality by 1, taking on the value at index
+ * begin[i]. In this case, the ith specification must define a
+ * slice of size 1, e.g. begin[i] = x, end[i] = x + 1.
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandType} as input0 and rank (n - k),
+ * where k is the number of bits set in shrink_axis_mask.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ */
+ STRIDED_SLICE = @1.2::OperationType:STRIDED_SLICE,
+
+ /**
+ * Element-wise subtraction of two tensors.
+ *
+ * Takes two input tensors of identical {@link OperandType} and compatible
+ * dimensions. The output is the result of subtracting the second input
+ * tensor from the first one, optionally modified by an activation function.
+ *
+ * Two dimensions are compatible when:
+ * 1. they are equal, or
+ * 2. one of them is 1
+ *
+ * The size of the output is the maximum size along each dimension of the
+ * input operands. It starts with the trailing dimensions, and works its way
+ * forward.
+ *
+ * Example:
+ * input1.dimension = {4, 1, 2}
+ * input2.dimension = {5, 4, 3, 1}
+ * output.dimension = {5, 4, 3, 2}
+ *
+ * Since HAL version 1.2, generic zero-sized input tensor is supported. Zero
+ * dimension is only compatible with 0 or 1. The size of the output
+ * dimension is zero if either of corresponding input dimension is zero.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM} (since HAL version 1.2)
+ *
+ * Supported tensor rank: up to 4
+ *
+ * Inputs:
+ * * 0: An n-D tensor, specifying the first input.
+ * * 1: A tensor of the same {@link OperandType}, and compatible dimensions
+ * as input0.
+ * * 2: An {@link OperandType::INT32} scalar, and has to be one of the
+ * {@link FusedActivationFunc} values. Specifies the activation to
+ * invoke on the result.
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandType} as input0.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint can be different from inputs' scale and zeroPoint.
+ */
+ SUB = @1.2::OperationType:SUB,
+
+ /**
+ * Transposes the input tensor, permuting the dimensions according to the
+ * perm tensor.
+ *
+ * The returned tensor's dimension i corresponds to the input dimension
+ * perm[i]. If perm is not given, it is set to (n-1...0), where n is the
+ * rank of the input tensor. Hence by default, this operation performs a
+ * regular matrix transpose on 2-D input Tensors.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: up to 4
+ *
+ * Inputs:
+ * * 0: An n-D tensor, specifying the tensor to be transposed.
+ * Since HAL version 1.2, this tensor may be zero-sized.
+ * * 1: An optional 1-D Tensor of {@link OperandType::TENSOR_INT32},
+ * the permutation of the dimensions of the input tensor.
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandType} as input0.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ */
+ TRANSPOSE = @1.2::OperationType:TRANSPOSE,
+
+ /**
+ * Computes the absolute value of a tensor, element-wise.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ *
+ * Supported tensor rank: from 1.
+ *
+ * Inputs:
+ * * 0: A tensor.
+ *
+ * Outputs:
+ * * 0: The output tensor of same shape as input0.
+ */
+ ABS = @1.2::OperationType:ABS,
+
+ /**
+ * Returns the index of the largest element along an axis.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_INT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: from 1
+ *
+ * Inputs:
+ * * 0: An n-D tensor specifying the input. Must be non-empty.
+ * * 1: An {@link OperandType::INT32} scalar specifying the axis to
+ * reduce across. Negative index is used to specify axis from the
+ * end (e.g. -1 for the last axis). Must be in the range [-n, n).
+ *
+ * Outputs:
+ * * 0: An (n - 1)-D {@link OperandType::TENSOR_INT32} tensor.
+ */
+ // There is no underscore in ARG_MAX to avoid name conflict with
+ // the macro defined in libc/kernel/uapi/linux/limits.h.
+ ARGMAX = @1.2::OperationType:ARGMAX,
+
+ /**
+ * Returns the index of the smallest element along an axis.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_INT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: from 1
+ *
+ * Inputs:
+ * * 0: An n-D tensor specifying the input. Must be non-empty.
+ * * 1: An {@link OperandType::INT32} scalar specifying the axis to
+ * reduce across. Negative index is used to specify axis from the
+ * end (e.g. -1 for the last axis). Must be in the range [-n, n).
+ *
+ * Outputs:
+ * * 0: An (n - 1)-D {@link OperandType::TENSOR_INT32} tensor.
+ */
+ ARGMIN = @1.2::OperationType:ARGMIN, // See ARGMAX for naming discussion.
+
+ /**
+ * Transform axis-aligned bounding box proposals using bounding box deltas.
+ *
+ * Given the positions of bounding box proposals and the corresponding
+ * bounding box deltas for each class, return the refined bounding box
+ * regions. The resulting bounding boxes are cliped against the edges of
+ * the image.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT16_ASYMM}
+ *
+ * Inputs:
+ * * 0: A 2-D Tensor of shape [num_rois, 4], specifying the locations of the
+ * bounding box proposals, each line with format [x1, y1, x2, y2].
+ * For tensor of type {@link OperandType::TENSOR_QUANT16_ASYMM},
+ * the zeroPoint must be 0 and the scale must be 0.125. Zero num_rois
+ * is supported for this tensor.
+ * * 1: A 2-D Tensor of shape [num_rois, num_classes * 4], specifying the
+ * bounding box delta for each region of interest and each class. The
+ * bounding box deltas are organized in the following order
+ * [dx, dy, dw, dh], where dx and dy is the relative correction factor
+ * for the center position of the bounding box with respect to the width
+ * and height, dw and dh is the log-scale relative correction factor
+ * for the width and height. For input0 of type
+ * {@link OperandType::TENSOR_QUANT16_ASYMM}, this tensor should be
+ * of {@link OperandType::TENSOR_QUANT8_ASYMM}. Zero num_rois is
+ * supported for this tensor.
+ * * 2: An 1-D {@link OperandType::TENSOR_INT32} tensor, of shape
+ * [num_rois], specifying the batch index of each box. Boxes with
+ * the same batch index are grouped together. Zero num_rois is
+ * supported for this tensor.
+ * * 3: A 2-D Tensor of shape [batches, 2], specifying the information of
+ * each image in the batch, each line with format
+ * [image_height, image_width].
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandType} as input0, with shape
+ * [num_rois, num_classes * 4], specifying the coordinates of each
+ * output bounding box for each class, with format [x1, y1, x2, y2].
+ * For type of {@link OperandType::TENSOR_QUANT16_ASYMM}, the
+ * scale must be 0.125 and the zero point must be 0.
+ */
+ AXIS_ALIGNED_BBOX_TRANSFORM = @1.2::OperationType:AXIS_ALIGNED_BBOX_TRANSFORM,
+
+ /**
+ * Performs a forward LSTM on the input followed by a backward LSTM.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ *
+ * Supported tensor rank: 3, either time-major or batch-major.
+ *
+ * All input and output tensors must be of the same type.
+ *
+ *
+ * Inputs:
+ * * 0: The input.
+ * A 3-D tensor of shape:
+ * If time-major: [max_time, batch_size, input_size]
+ * If batch-major: [batch_size, max_time, input_size]
+ * where "max_time" is the number of timesteps (sequence length),
+ * "batch_size" corresponds to the batching dimension, and
+ * "input_size" is the size of the input.
+ * * 1: The forward input-to-input weights. Optional.
+ * A 2-D tensor of shape [fw_num_units, input_size], where “fw_num_units”
+ * corresponds to the number of forward cell units.
+ * * 2: The forward input-to-forget weights.
+ * A 2-D tensor of shape [fw_num_units, input_size].
+ * * 3: The forward input-to-cell weights.
+ * A 2-D tensor of shape [fw_num_units, input_size].
+ * * 4: The forward input-to-output weights.
+ * A 2-D tensor of shape [fw_num_units, input_size].
+ * * 5: The forward recurrent-to-input weights. Optional.
+ * A 2-D tensor of shape [fw_num_units, fw_output_size], where “fw_output_size”
+ * corresponds to either the number of cell units (i.e., fw_num_units),
+ * or the second dimension of the “fw_projection_weights”, if defined.
+ * * 6: The forward recurrent-to-forget weights.
+ * A 2-D tensor of shape [fw_num_units, fw_output_size].
+ * * 7: The forward recurrent-to-cell weights.
+ * A 2-D tensor of shape [fw_num_units, fw_output_size].
+ * * 8: The forward recurrent-to-output weights.
+ * A 2-D tensor of shape [fw_num_units, fw_output_size].
+ * * 9: The forward cell-to-input weights. Optional.
+ * A 1-D tensor of shape [fw_num_units].
+ * * 10: The forward cell-to-forget weights. Optional.
+ * A 1-D tensor of shape [fw_num_units].
+ * * 11: The forward cell-to-output weights. Optional.
+ * A 1-D tensor of shape [fw_num_units].
+ * * 12: The forward input gate bias. Optional.
+ * A 1-D tensor of shape [fw_num_units].
+ * * 13: The forward forget gate bias.
+ * A 1-D tensor of shape [fw_num_units].
+ * * 14: The forward cell gate bias.
+ * A 1-D tensor of shape [fw_num_units].
+ * * 15: The forward output gate bias.
+ * A 1-D tensor of shape [fw_num_units].
+ * * 16: The forward projection weights. Optional.
+ * A 2-D tensor of shape [fw_output_size, fw_num_units].
+ * * 17: The forward projection bias. Optional.
+ * A 1-D tensor of shape [fw_output_size].
+ * * 18: The backward input-to-input weights. Optional.
+ * A 2-D tensor of shape [bw_num_units, input_size], where “bw_num_units”
+ * corresponds to the number of backward cell units.
+ * * 19: The backward input-to-forget weights.
+ * A 2-D tensor of shape [bw_num_units, input_size].
+ * * 20: The backward input-to-cell weights.
+ * A 2-D tensor of shape [bw_num_units, input_size].
+ * * 21: The backward input-to-output weights.
+ * A 2-D tensor of shape [bw_num_units, input_size].
+ * * 22: The backward recurrent-to-input weights. Optional.
+ * A 2-D tensor of shape [bw_num_units, bw_output_size], where “bw_output_size”
+ * corresponds to either the number of cell units (i.e., “bw_num_units”),
+ * or the second dimension of the “bw_projection_weights”, if defined.
+ * * 23: The backward recurrent-to-forget weights.
+ * A 2-D tensor of shape [bw_num_units, bw_output_size].
+ * * 24: The backward recurrent-to-cell weights.
+ * A 2-D tensor of shape [bw_num_units, bw_output_size].
+ * * 25: The backward recurrent-to-output weights.
+ * A 2-D tensor of shape [bw_num_units, bw_output_size].
+ * * 26: The backward cell-to-input weights. Optional.
+ * A 1-D tensor of shape [bw_num_units].
+ * * 27: The backward cell-to-forget weights. Optional.
+ * A 1-D tensor of shape [bw_num_units].
+ * * 28: The backward cell-to-output weights. Optional.
+ * A 1-D tensor of shape [bw_num_units].
+ * * 29: The backward input gate bias. Optional.
+ * A 1-D tensor of shape [bw_num_units].
+ * * 30: The backward forget gate bias.
+ * A 1-D tensor of shape [bw_num_units].
+ * * 31: The backward cell gate bias.
+ * A 1-D tensor of shape [bw_num_units].
+ * * 32: The backward output gate bias.
+ * A 1-D tensor of shape [bw_num_units].
+ * * 33: The backward projection weights. Optional.
+ * A 2-D tensor of shape [bw_output_size, bw_num_units].
+ * * 34: The backward projection bias. Optional.
+ * A 1-D tensor of shape [bw_output_size].
+ * * 35: The forward input activation state.
+ * A 2-D tensor of shape [batch_size, bw_output_size].
+ * * 36: The forward input cell state.
+ * A 2-D tensor of shape [batch_size, bw_num_units].
+ * * 37: The backward input activation state.
+ * A 2-D tensor of shape [batch_size, bw_output_size].
+ * * 38: The backward input cell state.
+ * A 2-D tensor of shape [batch_size, bw_num_units].
+ * * 39: The auxiliary input. Optional.
+ * A 3-D tensor of shape [max_time, batch_size, input_size], where “batch_size”
+ * corresponds to the batching dimension, and “input_size” is the size
+ * of the input.
+ * * 40: The forward auxiliary input-to-input weights. Optional.
+ * A 2-D tensor of shape [fw_num_units, input_size].
+ * * 41: The forward auxiliary input-to-forget weights. Optional.
+ * A 2-D tensor of shape [fw_num_units, input_size].
+ * * 42: The forward auxiliary input-to-cell weights. Optional.
+ * A 2-D tensor of shape [fw_num_units, input_size].
+ * * 43: The forward auxiliary input-to-output weights. Optional.
+ * A 2-D tensor of shape [fw_num_units, input_size].
+ * * 44: The backward auxiliary input-to-input weights. Optional.
+ * A 2-D tensor of shape [bw_num_units, input_size].
+ * * 45: The backward auxiliary input-to-forget weights. Optional.
+ * A 2-D tensor of shape [bw_num_units, input_size].
+ * * 46: The backward auxiliary input-to-cell weights. Optional.
+ * A 2-D tensor of shape [bw_num_units, input_size].
+ * * 47: The backward auxiliary input-to-output weights. Optional.
+ * A 2-D tensor of shape [bw_num_units, input_size].
+ * * 48: The activation function.
+ * A value indicating the activation function:
+ * <ul>
+ * <li>0: None;
+ * <li>1: Relu;
+ * <li>3: Relu6;
+ * <li>4: Tanh;
+ * <li>6: Sigmoid.
+ * </ul>
+ * * 49: The clipping threshold for the cell state, such
+ * that values are bound within [-cell_clip, cell_clip]. If set to 0.0
+ * then clipping is disabled.
+ * If all the input tensors have type {@link OperandType::TENSOR_FLOAT32},
+ * this scalar must be of the type {@link OperandType::FLOAT32},
+ * otherwise if all the input tensors have the type {@link OperandType::TENSOR_FLOAT16},
+ * this scalar must be of type {@link OperandType::FLOAT16}.
+ * * 50: The clipping threshold for the output from the
+ * projection layer, such that values are bound within
+ * [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
+ * If all the input tensors have type {@link OperandType::TENSOR_FLOAT32},
+ * this scalar must be of the type {@link OperandType::FLOAT32},
+ * otherwise if all the input tensors have the type {@link OperandType::TENSOR_FLOAT16},
+ * this scalar must be of type {@link OperandType::FLOAT16}.
+ * * 51: merge_outputs
+ * An {@link OperandType::BOOL} scalar specifying if the outputs
+ * from forward and backward cells should be merged.
+ * * 52: time_major
+ * An {@link OperandType::BOOL} scalar specifying the shape format
+ * of input and output tensors.
+ * * 53: The forward input layer normalization weights. Optional.
+ * A 1-D tensor of shape [fw_num_units]. Used to rescale normalized inputs
+ * to activation at input gate.
+ * * 54: The forward forget layer normalization weights. Optional.
+ * A 1-D tensor of shape [fw_num_units]. Used to rescale normalized inputs
+ * to activation at forget gate.
+ * * 55: The forward cell layer normalization weights. Optional.
+ * A 1-D tensor of shape [fw_num_units]. Used to rescale normalized inputs
+ * to activation at cell gate.
+ * * 56: The forward output layer normalization weights. Optional.
+ * A 1-D tensor of shape [fw_num_units]. Used to rescale normalized inputs
+ * to activation at output gate.
+ * * 57: The backward input layer normalization weights. Optional.
+ * A 1-D tensor of shape [bw_num_units]. Used to rescale normalized inputs
+ * to activation at input gate.
+ * * 58: The backward forget layer normalization weights. Optional.
+ * A 1-D tensor of shape [bw_num_units]. Used to rescale normalized inputs
+ * to activation at forget gate.
+ * * 59: The backward cell layer normalization weights. Optional.
+ * A 1-D tensor of shape [bw_num_units]. Used to rescale normalized inputs
+ * to activation at cell gate.
+ * * 60: The backward output layer normalization weights. Optional.
+ * A 1-D tensor of shape [bw_num_units]. Used to rescale normalized inputs
+ * to activation at output gate.
+ *
+ * Outputs:
+ * * 0: The forward output.
+ * A 3-D tensor of shape:
+ * If time-major and not merge_outputs:
+ * [max_time, batch_size, fw_output_size]
+ * If time-major and merge_outputs:
+ * [max_time, batch_size, fw_output_size + bw_output_size]
+ * If batch-major and not merge_outputs:
+ * [batch_size, max_time, fw_output_size]
+ * If batch-major and merge_outputs:
+ * [batch_size, max_time, fw_output_size + bw_output_size]
+ * * 1: The backward output. Unused if merge_outputs is true.
+ * A 3-D tensor of shape:
+ * If time-major: [max_time, batch_size, bw_output_size]
+ * If batch-major: [batch_size, max_time, bw_output_size]
+ */
+ BIDIRECTIONAL_SEQUENCE_LSTM = @1.2::OperationType:BIDIRECTIONAL_SEQUENCE_LSTM,
+
+ /**
+ * A recurrent neural network layer that applies a basic RNN cell to a
+ * sequence of inputs in forward and backward directions.
+ *
+ * This Op unrolls the input along the sequence dimension, and implements
+ * the following operation for each element in the sequence s =
+ * 1...sequence_length:
+ * fw_outputs[s] = fw_state = activation(inputs[s] * fw_input_weights’ +
+ * fw_state * fw_recurrent_weights’ + fw_bias)
+ *
+ * And for each element in sequence t = sequence_length : 1
+ * bw_outputs[t] = bw_state = activation(inputs[t] * bw_input_weights’ +
+ * bw_state * bw_recurrent_weights’ + bw_bias)
+ *
+ * Where:
+ * * “{fw,bw}_input_weights” is a weight matrix that multiplies the inputs;
+ * * “{fw,bw}_recurrent_weights” is a weight matrix that multiplies the
+ * current “state” which itself is the output from the previous time step
+ * computation;
+ * * “{fw,bw}_bias” is a bias vector (added to each output vector in the
+ * batch);
+ * * “activation” is the function passed as the “fused_activation_function”
+ * argument (if not “NONE”).
+ *
+ * The op also supports an auxiliary input. Regular cell feeds one input
+ * into the two RNN cells in the following way:
+ *
+ * INPUT (INPUT_REVERSED)
+ * | |
+ * ---------------------
+ * | FW_RNN BW_RNN |
+ * ---------------------
+ * | |
+ * FW_OUT BW_OUT
+ *
+ * An op with an auxiliary input takes two inputs and feeds them into the
+ * RNN cells in the following way:
+ *
+ * AUX_INPUT (AUX_INPUT_REVERSED)
+ * | |
+ * INPUT | (INPUT_R'D.)|
+ * | | | |
+ * -----------------------
+ * | \ / \ / |
+ * | FW_RNN BW_RNN |
+ * -----------------------
+ * | |
+ * FW_OUT BW_OUT
+ *
+ * While stacking this op on top of itself, this allows to connect both
+ * forward and backward outputs from previous cell to the next cell's
+ * inputs.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ *
+ * The input tensors must all be the same type.
+ *
+ * Inputs:
+ * * 0: input.
+ * A 3-D tensor. The shape is defined by the input 6 (timeMajor). If
+ * it is set to true, then the input has a shape [maxTime, batchSize,
+ * inputSize], otherwise the input has a shape [batchSize, maxTime,
+ * inputSize].
+ * * 1: fwWeights.
+ * A 2-D tensor of shape [fwNumUnits, inputSize].
+ * * 2: fwRecurrentWeights.
+ * A 2-D tensor of shape [fwNumUnits, fwNumUnits].
+ * * 3: fwBias.
+ * A 1-D tensor of shape [fwNumUnits].
+ * * 4: fwHiddenState.
+ * A 2-D tensor of shape [batchSize, fwNumUnits]. Specifies a hidden
+ * state input for the first time step of the computation.
+ * * 5: bwWeights.
+ * A 2-D tensor of shape [bwNumUnits, inputSize].
+ * * 6: bwRecurrentWeights.
+ * A 2-D tensor of shape [bwNumUnits, bwNumUnits].
+ * * 7: bwBias.
+ * A 1-D tensor of shape [bwNumUnits].
+ * * 8: bwHiddenState
+ * A 2-D tensor of shape [batchSize, bwNumUnits]. Specifies a hidden
+ * state input for the first time step of the computation.
+ * * 9: auxInput.
+ * A 3-D tensor. The shape is the same as of the input 0.
+ * * 10:fwAuxWeights.
+ * A 2-D tensor of shape [fwNumUnits, inputSize].
+ * * 11:bwAuxWeights.
+ * A 2-D tensor of shape [bwNumUnits, inputSize].
+ * * 12:fusedActivationFunction.
+ * A {@link FusedActivationFunc} value indicating the activation function. If
+ * “NONE” is specified then it results in a linear activation.
+ * * 13:timeMajor
+ * An {@link OperandType::BOOL} scalar specifying the shape format
+ * of input and output tensors.
+ * * 14:mergeOutputs
+ * An {@link OperandType::BOOL} scalar specifying if the outputs
+ * from forward and backward cells are separate (if set to false) or
+ * concatenated (if set to true).
+ * Outputs:
+ * * 0: fwOutput.
+ * A 3-D tensor. The first two dimensions of the shape are defined by
+ * the input 6 (timeMajor) and the third dimension is defined by the
+ * input 14 (mergeOutputs). If timeMajor is set to true, then the first
+ * two dimensions are [maxTime, batchSize], otherwise they are set to
+ * [batchSize, maxTime]. If mergeOutputs is set to true, then the third
+ * dimension is equal to (fwNumUnits + bwNumUnits), otherwise it is set
+ * to fwNumUnits.
+ * * 1: bwOutput.
+ * A 3-D tensor. If the input 14 (mergeOutputs) is set to true, then
+ * this tensor is not produced. The shape is defined by the input 6
+ * (timeMajor). If it is set to true, then the shape is set to
+ * [maxTime, batchSize, bwNumUnits], otherwise the shape is set to
+ * [batchSize, maxTime, bwNumUnits].
+ */
+ BIDIRECTIONAL_SEQUENCE_RNN = @1.2::OperationType:BIDIRECTIONAL_SEQUENCE_RNN,
+
+ /**
+ * Greedily selects a subset of bounding boxes in descending order of score.
+ *
+ * This op applies NMS algorithm to each class. In each loop of execution,
+ * the box with maximum score gets selected and removed from the pending set.
+ * The scores of the rest of boxes are lowered according to the
+ * intersection-over-union (IOU) overlapping with the previously selected
+ * boxes and a specified NMS kernel method. Any boxes with score less
+ * than a threshold are removed from the pending set.
+ *
+ * Three NMS kernels are supported:
+ * * Hard: score_new = score_old * (1 if IoU < threshold else 0)
+ * * Linear: score_new = score_old * (1 if IoU < threshold else 1 - IoU)
+ * * Gaussian: score_new = score_old * exp(- IoU^2 / sigma)
+ *
+ * Axis-aligned bounding boxes are represented by its upper-left corner
+ * coordinate (x1,y1) and lower-right corner coordinate (x2,y2). A valid
+ * bounding box should satisfy x1 <= x2 and y1 <= y2.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Inputs:
+ * * 0: A 2-D Tensor of shape [num_rois, num_classes], specifying the score
+ * of each bounding box proposal. The boxes are grouped by batches in the
+ * first dimension. Zero num_rois is supported for this tensor.
+ * * 1: A 2-D Tensor specifying the bounding boxes of shape
+ * [num_rois, num_classes * 4], organized in the order [x1, y1, x2, y2].
+ * The boxes are grouped by batches in the first dimension. The sequential
+ * order of the boxes corresponds with input0. For input0 of type
+ * {@link OperandType::TENSOR_QUANT8_ASYMM}, this tensor should be of
+ * {@link OperandType::TENSOR_QUANT16_ASYMM}, with zeroPoint of 0 and
+ * scale of 0.125. Zero num_rois is supported for this tensor.
+ * * 2: A 1-D {@link OperandType::TENSOR_INT32} tensor, of shape
+ * [num_rois], specifying the batch index of each box. Boxes with
+ * the same batch index are grouped together.
+ * * 3: An {@link OperandType::FLOAT32} scalar, score_threshold. Boxes
+ * with scores lower than the threshold are filtered before sending
+ * to the NMS algorithm.
+ * * 4: An {@link OperandType::INT32} scalar, specifying the maximum
+ * number of selected bounding boxes for each image. Set to a negative
+ * value for unlimited number of output bounding boxes.
+ * * 5: An {@link OperandType::INT32} scalar, specifying the NMS
+ * kernel method, options are 0:hard, 1:linear, 2:gaussian.
+ * * 6: An {@link OperandType::FLOAT32} scalar, specifying the IoU
+ * threshold in hard and linear NMS kernel. This field is ignored if
+ * gaussian kernel is selected.
+ * * 7: An {@link OperandType::FLOAT32} scalar, specifying the sigma in
+ * gaussian NMS kernel. This field is ignored if gaussian kernel is
+ * not selected.
+ * * 8: An {@link OperandType::FLOAT32} scalar, nms_score_threshold.
+ * Boxes with scores lower than the threshold are dropped during the
+ * score updating phase in soft NMS.
+ *
+ * Outputs:
+ * * 0: A 1-D Tensor of the same {@link OperandType} as input0, with shape
+ * [num_output_rois], specifying the score of each output box. The boxes
+ * are grouped by batches, but the sequential order in each batch is not
+ * guaranteed. For type of {@link OperandType::TENSOR_QUANT8_ASYMM},
+ * the scale and zero point must be the same as input0.
+ * * 1: A 2-D Tensor of the same {@link OperandType} as input1, with shape
+ * [num_output_rois, 4], specifying the coordinates of each
+ * output bounding box with the same format as input1. The sequential
+ * order of the boxes corresponds with output0. For type of
+ * {@link OperandType::TENSOR_QUANT16_ASYMM}, the scale must be
+ * 0.125 and the zero point must be 0.
+ * * 2: A 1-D {@link OperandType::TENSOR_INT32} tensor, of shape
+ * [num_output_rois], specifying the class of each output box. The
+ * sequential order of the boxes corresponds with output0.
+ * * 3: A 1-D {@link OperandType::TENSOR_INT32} tensor, of shape
+ * [num_output_rois], specifying the batch index of each box. Boxes
+ * with the same batch index are grouped together.
+ */
+ BOX_WITH_NMS_LIMIT = @1.2::OperationType:BOX_WITH_NMS_LIMIT,
+
+ /**
+ * Casts a tensor to a new type.
+ *
+ * This operation ignores the scale and zeroPoint of quanized tensors,
+ * e.g. it treats a {@link OperandType::TENSOR_QUANT8_ASYMM} input
+ * as a tensor of uint8 values.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_INT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: from 1
+ *
+ * Inputs:
+ * * 0: A tensor.
+ *
+ * Outputs:
+ * * 0: A tensor with the same shape as input0.
+ */
+ CAST = @1.2::OperationType:CAST,
+
+ /**
+ * Shuffle the channels of the input tensor.
+ *
+ * Given an input tensor and a integer value of num_groups, CHANNEL_SHUFFLE
+ * divide the channel dimension into num_groups groups, and reorganize the
+ * channels by grouping channels with the same index in each group.
+ *
+ * Along the channel dimension, the output is calculated using this formula:
+ *
+ * output_channel[k * num_groups + g] = input_channel[g * group_size + k]
+ *
+ * where group_size = num_channels / num_groups
+ *
+ * The number of channels must be divisible by num_groups.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: up to 4
+ *
+ * Inputs:
+ * * 0: An n-D tensor, specifying the tensor to be shuffled.
+ * * 1: An {@link OperandType::INT32} scalar, specifying the number of
+ * groups.
+ * * 2: An {@link OperandType::INT32} scalar, specifying the dimension
+ * channel shuffle would be performed on. Negative index is used to
+ * specify axis from the end (e.g. -1 for the last axis). Must be in
+ * the range [-n, n).
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandType} and same shape as input0.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ */
+ CHANNEL_SHUFFLE = @1.2::OperationType:CHANNEL_SHUFFLE,
+
+ /**
+ * Apply postprocessing steps to bounding box detections.
+ *
+ * Bounding box detections are generated by applying transformation on a set
+ * of predefined anchors with the bounding box deltas from bounding box
+ * regression. A final step of hard NMS is applied to limit the number of
+ * returned boxes.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ *
+ * Inputs:
+ * * 0: A 3-D Tensor of shape [batches, num_anchors, num_classes], specifying
+ * the score of each anchor with each class. Class 0 for each
+ * [batches, num_anchors, 0] is background and will be ignored.
+ * * 1: A 3-D Tensor of shape [batches, num_anchors, length_box_encoding], with
+ * the first four values in length_box_encoding specifying the bounding
+ * box deltas. The box deltas are encoded in the order of [dy, dx, dh, dw],
+ * where dy and dx is the linear-scale relative correction factor for the
+ * center position of the bounding box with respect to the width and height,
+ * dh and dw is the log-scale relative correction factor for the width and
+ * height. All the entries in length_box_encoding beyond the first four
+ * values are ignored in this operation.
+ * * 2: A 2-D Tensor of shape [num_anchors, 4], specifying the shape of each
+ * predefined anchor, with format [ctr_y, ctr_x, h, w], where ctr_y and
+ * ctr_x are the center position of the box, and h and w are the height
+ * and the width.
+ * * 3: An {@link OperandType::FLOAT32} scalar, specifying the scaling
+ * factor for dy in bounding box deltas.
+ * * 4: An {@link OperandType::FLOAT32} scalar, specifying the scaling
+ * factor for dx in bounding box deltas.
+ * * 5: An {@link OperandType::FLOAT32} scalar, specifying the scaling
+ * factor for dh in bounding box deltas.
+ * * 6: An {@link OperandType::FLOAT32} scalar, specifying the scaling
+ * factor for dw in bounding box deltas.
+ * * 7: An {@link OperandType::BOOL} scalar, set to true to use regular
+ * multi-class NMS algorithm that do NMS separately for each class,
+ * set to false for a faster algorithm that only do one single NMS
+ * using the highest class score..
+ * * 8: An {@link OperandType::INT32} scalar, max_num_detections, specifying
+ * the maximum number of boxes for the output. Boxes with the lowest
+ * scores are discarded to meet the limit.
+ * * 9: An {@link OperandType::INT32} scalar, only used when input7 is
+ * set to false, specifying the maximum number of classes per detection.
+ * * 10: An {@link OperandType::INT32} scalar, only used when input7 is
+ * set to true, specifying the maximum number of detections when
+ * applying NMS algorithm for each single class.
+ * * 11: A scalar, score_threshold. Boxes with scores lower than the
+ * threshold are filtered before sending to the NMS algorithm. The
+ * scalar must be of {@link OperandType::FLOAT16} if input0 is of
+ * {@link OperandType::TENSOR_FLOAT16} and of
+ * {@link OperandType::FLOAT32} if input0 is of
+ * {@link OperandType::TENSOR_FLOAT32}.
+ * * 12: A scalar, specifying the IoU threshold for hard NMS. The scalar
+ * must be of {@link OperandType::FLOAT16} if input0 is of
+ * {@link OperandType::TENSOR_FLOAT16} and of
+ * {@link OperandType::FLOAT32} if input0 is of
+ * {@link OperandType::TENSOR_FLOAT32}.
+ * * 13: An {@link OperandType::BOOL} scalar, set to true to include
+ * background class in the list of label map for the output, set
+ * to false to not include the background. When the background
+ * class is included, it has label 0 and the output classes start
+ * at 1 in the label map, otherwise, the output classes start at 0.
+ *
+ * Outputs:
+ * * 0: A 2-D tensor of the same {@link OperandType} as input0, with shape
+ * [batches, max_num_detections], specifying the score of each output
+ * detections.
+ * * 1: A 3-D tensor of shape [batches, max_num_detections, 4], specifying the
+ * coordinates of each output bounding box, with format
+ * [y1, x1, y2, x2].
+ * * 2: A 2-D {@link OperandType::TENSOR_INT32} tensor, of shape
+ * [batches, max_num_detections], specifying the class label for each
+ * output detection.
+ * * 3: An 1-D {@link OperandType::TENSOR_INT32} tensor, of shape [batches],
+ * specifying the number of valid output detections for each batch.
+ */
+ DETECTION_POSTPROCESSING = @1.2::OperationType:DETECTION_POSTPROCESSING,
+
+ /**
+ * For input tensors x and y, computes x == y elementwise.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_BOOL8}
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_INT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: from 1
+ *
+ * This operation supports broadcasting.
+ *
+ * Inputs:
+ * * 0: A tensor.
+ * * 1: A tensor of the same {@link OperandType} and dimensions compatible
+ * with input0.
+ *
+ * Outputs:
+ * * 0: A tensor of {@link OperandType::TENSOR_BOOL8}.
+ */
+ EQUAL = @1.2::OperationType:EQUAL,
+
+ /**
+ * Computes exponential of x element-wise.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ *
+ * Supported tensor rank: from 1.
+ *
+ * Inputs:
+ * * 0: A tensor.
+ *
+ * Outputs:
+ * * 0: The output tensor of same shape as input0.
+ */
+ EXP = @1.2::OperationType:EXP,
+
+ /**
+ * Inserts a dimension of 1 into a tensor's shape.
+ *
+ * Given a tensor input, this operation inserts a dimension of 1 at the
+ * given dimension index of input's shape. The dimension index starts at
+ * zero; if you specify a negative dimension index, it is counted backward
+ * from the end.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_INT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: from 1
+ *
+ * Inputs:
+ * * 0: An n-D tensor.
+ * * 1: An {@link OperandType::INT32} scalar specifying the dimension
+ * index to expand. Must be in the range [-(n + 1), (n + 1)).
+ *
+ * Outputs:
+ * * 0: An (n + 1)-D tensor with the same {@link OperandType} and data as
+ * input0.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ */
+ EXPAND_DIMS = @1.2::OperationType:EXPAND_DIMS,
+
+ /**
+ * Gathers values along an axis.
+ *
+ * Produces an output tensor with shape
+ * input0.dimension[:axis] + indices.dimension + input0.dimension[axis + 1:]
+ * where:
+ * # Vector indices (output is rank(input0)).
+ * output[a_0, ..., a_n, i, b_0, ..., b_n] =
+ * input0[a_0, ..., a_n, indices[i], b_0, ..., b_n]
+ *
+ * # Higher rank indices (output is rank(input0) + rank(indices) - 1).
+ * output[a_0, ..., a_n, i, ..., j, b_0, ... b_n] =
+ * input0[a_0, ..., a_n, indices[i, ..., j], b_0, ..., b_n]
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_INT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: from 1
+ *
+ * Inputs:
+ * * 0: An n-D tensor from which to gather values.
+ * * 1: An {@link OperandType::INT32} scalar specifying the axis.
+ * Negative index is used to specify axis from the end
+ * (e.g. -1 for the last axis). Must be in the range [-n, n).
+ * * 2: A k-D tensor {@link OperandType::TENSOR_INT32} of indices.
+ * The values must be in the bounds of the corresponding dimensions
+ * of input0.
+ *
+ * Outputs:
+ * * 0: An (n + k - 1)-D tensor with the same {@link OperandType} as input0.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ */
+ GATHER = @1.2::OperationType:GATHER,
+
+ /**
+ * Generate aixs-aligned bounding box proposals.
+ *
+ * Bounding box proposals are generated by applying transformation on a set
+ * of predefined anchors with the bounding box deltas from bounding box
+ * regression. A final step of hard NMS is applied to limit the number of
+ * returned boxes.
+ *
+ * Axis-aligned bounding boxes are represented by its upper-left corner
+ * coordinate (x1,y1) and lower-right corner coordinate (x2,y2). A valid
+ * bounding box should satisfy x1 <= x2 and y1 <= y2.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Inputs:
+ * * 0: A 4-D Tensor specifying the score of each anchor at each
+ * location. With "NHWC" data layout, the tensor shape is
+ * [batches, height, width, num_anchors]. With "NCHW" data layout,
+ * the tensor shape is [batches, num_anchors, height, width].
+ * * 1: A 4-D Tensor specifying the bounding box deltas. With "NHWC" data
+ * layout, the tensor shape is [batches, height, width, num_anchors * 4].
+ * With "NCHW" data layout, the tensor shape is
+ * [batches, num_anchors * 4, height, width]. The box deltas are encoded
+ * in the order of [dx, dy, dw, dh], where dx and dy is the linear-scale
+ * relative correction factor for the center position of the bounding box
+ * with respect to the width and height, dw and dh is the log-scale
+ * relative correction factor for the width and height. The last
+ * dimensions is the channel dimension.
+ * * 2: A 2-D Tensor of shape [num_anchors, 4], specifying the shape of each
+ * predefined anchor, with format [x1, y1, x2, y2]. For input0 of type
+ * {@link OperandType::TENSOR_QUANT8_ASYMM}, this tensor should be of
+ * {@link OperandType::TENSOR_QUANT16_SYMM}, with scale of 0.125.
+ * * 3: A 2-D Tensor of shape [batches, 2], specifying the size of
+ * each image in the batch, with format [image_height, image_width].
+ * For input0 of type {@link OperandType::TENSOR_QUANT8_ASYMM}, this
+ * tensor should be of {@link OperandType::TENSOR_QUANT16_SYMM}, with
+ * scale of 0.125.
+ * * 4: An {@link OperandType::FLOAT32} scalar, specifying the ratio
+ * from the height of original image to the height of feature map.
+ * * 5: An {@link OperandType::FLOAT32} scalar, specifying the ratio
+ * from the width of original image to the width of feature map.
+ * * 6: An {@link OperandType::INT32} scalar, specifying the maximum
+ * number of boxes before going into the hard NMS algorithm. Boxes
+ * with the lowest scores are discarded to meet the limit. Set to
+ * a non-positive value for unlimited number.
+ * * 7: An {@link OperandType::INT32} scalar, specifying the maximum
+ * number of boxes returning from the hard NMS algorithm. Boxes
+ * with the lowest scores are discarded to meet the limit. Set to
+ * a non-positive value for unlimited number.
+ * * 8: An {@link OperandType::FLOAT32} scalar, specifying the IoU
+ * threshold for hard NMS.
+ * * 9: An {@link OperandType::FLOAT32} scalar, min_size. Boxes with
+ * height or width lower than the absolute threshold are filtered out.
+ * * 10: An {@link OperandType::BOOL} scalar, set to true to specify
+ * NCHW data layout for input0 and input1. Set to false for NHWC.
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandType} as input0, of shape
+ * [num_output_rois], specifying the score of each output box.
+ * The boxes are grouped by batches, but the sequential order in
+ * each batch is not guaranteed. For type of
+ * {@link OperandType::TENSOR_QUANT8_ASYMM}, the scale and zero
+ * point must be the same as input0.
+ * * 1: A tensor of the same {@link OperandType} as input3, of shape
+ * [num_output_rois, 4], specifying the coordinates of each output
+ * bounding box for each class, with format [x1, y1, x2, y2].
+ * The sequential order of the boxes corresponds with output0.
+ * For type of {@link OperandType::TENSOR_QUANT16_ASYMM}, the
+ * scale must be 0.125 and the zero point must be 0.
+ * * 2: A 1-D {@link OperandType::TENSOR_INT32} tensor, of shape
+ * [num_output_rois], specifying the batch index of each box. Boxes
+ * with the same batch index are grouped together.
+ */
+ GENERATE_PROPOSALS = @1.2::OperationType:GENERATE_PROPOSALS,
+
+ /**
+ * For input tensors x and y, computes x > y elementwise.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_BOOL8}
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_INT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: from 1
+ *
+ * This operation supports broadcasting.
+ *
+ * Inputs:
+ * * 0: A tensor.
+ * * 1: A tensor of the same {@link OperandType} and dimensions compatible
+ * with input0.
+ *
+ * Outputs:
+ * * 0: A tensor of {@link OperandType::TENSOR_BOOL8}.
+ */
+ GREATER = @1.2::OperationType:GREATER,
+ /**
+ * For input tensors x and y, computes x >= y elementwise.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_BOOL8}
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_INT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: from 1
+ *
+ * This operation supports broadcasting.
+ *
+ * Inputs:
+ * * 0: A tensor.
+ * * 1: A tensor of the same {@link OperandType} and dimensions compatible
+ * with input0.
+ *
+ * Outputs:
+ * * 0: A tensor of {@link OperandType::TENSOR_BOOL8}.
+ */
+ GREATER_EQUAL = @1.2::OperationType:GREATER_EQUAL,
+
+ /**
+ * Performs a grouped 2-D convolution operation.
+ *
+ * Given an input tensor of shape [batches, height, width, depth_in] and a
+ * filter tensor of shape [depth_out, filter_height, filter_width, depth_group]
+ * containing depth_out convolutional filters of depth depth_group, GROUPED_CONV
+ * applies a group of different filters to each input channel group, then
+ * concatenates the results together.
+ *
+ * Specifically, the input channels are divided into num_groups groups, each with
+ * depth depth_group, i.e. depth_in = num_groups * depth_group. The convolutional
+ * filters are also divided into num_groups groups, i.e. depth_out is divisible
+ * by num_groups. GROUPED_CONV applies each group of filters to the corresponding
+ * input channel group, and the result are concatenated together.
+ *
+ * The output dimensions are functions of the filter dimensions, stride, and
+ * padding.
+ *
+ * The values in the output tensor are computed as:
+ *
+ * output[b, i, j, g * channel_multiplier + q] =
+ * sum_{di, dj, dk} (
+ * input[b, strides[1] * i + di, strides[2] * j + dj,
+ * g * depth_group + dk] *
+ * filter[g * channel_multiplier + q, di, dj, dk]
+ * ) + bias[channel]
+ *
+ * where channel_multiplier = depth_out / num_groups
+ *
+ * Supported tensor {@link OperandType} configurations:
+ * * 16 bit floating point:
+ * * * {@link OperandType::TENSOR_FLOAT16} for input, filter, output, and bias.
+ *
+ * * 32 bit floating point:
+ * * * {@link OperandType::TENSOR_FLOAT32} for input, filter, output, and bias.
+ *
+ * * Quantized:
+ * * * {@link OperandType::TENSOR_QUANT8_ASYMM} for input, filter, and output.
+ * * * {@link OperandType::TENSOR_INT32} for bias (with scale set to
+ * * * input.scale * filter.scale).
+ *
+ * * Quantized with symmetric per channel quantization for the filter:
+ * * * {@link OperandType::TENSOR_QUANT8_ASYMM} for input, and output.
+ * * * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
+ * * * {@link OperandType::TENSOR_INT32} for bias (scale set to 0.0,
+ * * * each value scaling is separate and equal to input.scale * filter.scales[channel]).
+ *
+ * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
+ * With the default data layout NHWC, the data is stored in the order of:
+ * [batch, height, width, channels]. Alternatively, the data layout could
+ * be NCHW, the data storage order of: [batch, channels, height, width].
+ *
+ * Both explicit padding and implicit padding are supported.
+ *
+ * Inputs (explicit padding):
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
+ * specifying the input, where depth_in = num_groups * depth_group.
+ * * 1: A 4-D tensor, of shape
+ * [depth_out, filter_height, filter_width, depth_group], specifying
+ * the filter, where depth_out must be divisible by num_groups. For
+ * tensor of type {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}
+ * the channel dimension (channelDim at
+ * {@link SymmPerChannelQuantParams}) must be set to 0.
+ * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
+ * tensor of type {@link OperandType::TENSOR_FLOAT32} or
+ * {@link OperandType::TENSOR_FLOAT16}, the bias must be of the same
+ * type. For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
+ * the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint
+ * of 0 and bias_scale == input_scale * filter_scale. For filter tensor
+ * of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}, the bias
+ * should be of {@link OperandType::TENSOR_INT32}, with zeroPoint of
+ * 0 and bias_scale of 0. The actual scale of each value 'i' is equal to
+ * bias_scale[i] = input_scale * filter_scale[i].
+ * * 3: An {@link OperandType::INT32} scalar, specifying the padding on
+ * the left, in the ‘width’ dimension.
+ * * 4: An {@link OperandType::INT32} scalar, specifying the padding on
+ * the right, in the ‘width’ dimension.
+ * * 5: An {@link OperandType::INT32} scalar, specifying the padding on
+ * the top, in the ‘height’ dimension.
+ * * 6: An {@link OperandType::INT32} scalar, specifying the padding on
+ * the bottom, in the ‘height’ dimension.
+ * * 7: An {@link OperandType::INT32} scalar, specifying the stride when
+ * walking through input in the ‘width’ dimension.
+ * * 8: An {@link OperandType::INT32} scalar, specifying the stride when
+ * walking through input in the ‘height’ dimension.
+ * * 9: An {@link OperandType::INT32} scalar, specifying the number of
+ groups.
+ * * 10: An {@link OperandType::INT32} scalar, and has to be one of the
+ * {@link FusedActivationFunc} values. Specifies the activation to
+ * invoke on the result.
+ * * 11: An {@link OperandType::BOOL} scalar, set to true to specify
+ * NCHW data layout for input0 and output0. Set to false for NHWC.
+ *
+ * Inputs (implicit padding):
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
+ * specifying the input, where depth_in = num_groups * depth_group.
+ * * 1: A 4-D tensor, of shape
+ * [depth_out, filter_height, filter_width, depth_group], specifying
+ * the filter, where depth_out must be divisible by num_groups. For
+ * tensor of type {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}
+ * the channel dimension (SymmPerChannelQuantParams::channelDim)
+ * must be set to 0.
+ * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
+ * tensor of type {@link OperandType::TENSOR_FLOAT32} or
+ * {@link OperandType::TENSOR_FLOAT16}, the bias must be of the same
+ * type. For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
+ * the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint
+ * of 0 and bias_scale == input_scale * filter_scale. For filter tensor
+ * of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}, the bias
+ * should be of {@link OperandType::TENSOR_INT32}, with zeroPoint of
+ * 0 and bias_scale of 0. The actual scale of each value 'i' is equal to
+ * bias_scale[i] = input_scale * filter_scale[i].
+ * * 3: An {@link OperandType::INT32} scalar, specifying the implicit
+ * padding scheme, has to be one of the
+ * following values: {0 (NONE), 1 (SAME), 2 (VALID)}.
+ * * 4: An {@link OperandType::INT32} scalar, specifying the stride when
+ * walking through input in the ‘width’ dimension.
+ * * 5: An {@link OperandType::INT32} scalar, specifying the stride when
+ * walking through input in the ‘height’ dimension.
+ * * 6: An {@link OperandType::INT32} scalar, specifying the number of
+ * groups.
+ * * 7: An {@link OperandType::INT32} scalar, and has to be one of the
+ * {@link FusedActivationFunc} values. Specifies the activation to
+ * invoke on the result.
+ * * 8: An {@link OperandType::BOOL} scalar, set to true to specify
+ * NCHW data layout for input0 and output0. Set to false for NHWC.
+ *
+ * Outputs:
+ * * 0: The output 4-D tensor, of shape
+ * [batches, out_height, out_width, depth_out].
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint can be different from inputs' scale and zeroPoint.
+ */
+ GROUPED_CONV_2D = @1.2::OperationType:GROUPED_CONV_2D,
+
+ /**
+ * Localize the maximum keypoints from heatmaps.
+ *
+ * This operation approximates the accurate maximum keypoint scores and
+ * indices after bicubic upscaling by using Taylor expansion up to the
+ * quadratic term.
+ *
+ * The bounding box is represented by its upper-left corner coordinate
+ * (x1,y1) and lower-right corner coordinate (x2,y2) in the original image.
+ * A valid bounding box should satisfy x1 <= x2 and y1 <= y2.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
+ * With the default data layout NHWC, the data is stored in the order of:
+ * [batch, height, width, channels]. Alternatively, the data layout could
+ * be NCHW, the data storage order of: [batch, channels, height, width].
+ *
+ * Inputs:
+ * * 0: A 4-D Tensor of shape
+ * [num_boxes, heatmap_size, heatmap_size, num_keypoints],
+ * specifying the heatmaps, the height and width of heatmaps should
+ * be the same, and must be greater than or equal to 2.
+ * * 1: A 2-D Tensor of shape [num_boxes, 4], specifying the bounding boxes,
+ * each with format [x1, y1, x2, y2]. For input0 of type
+ * {@link OperandType::TENSOR_QUANT8_ASYMM}, this tensor should
+ * be of {@link OperandType::TENSOR_QUANT16_ASYMM}, with zeroPoint
+ * of 0 and scale of 0.125.
+ * * 2: An {@link OperandType::BOOL} scalar, set to true to specify
+ * NCHW data layout for input0. Set to false for NHWC.
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandType} as input0, with shape
+ * [num_boxes, num_keypoints], specifying score of the keypoints.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint can be different from input0 scale and zeroPoint.
+ * * 1: A tensor of the same {@link OperandType} as input1, with shape
+ * [num_boxes, num_keypoints, 2], specifying the location of
+ * the keypoints, the second dimension is organized as
+ * [keypoint_x, keypoint_y].
+ * For type of {@link OperandType::TENSOR_QUANT16_ASYMM}, the
+ * scale must be 0.125 and the zero point must be 0.
+ */
+ HEATMAP_MAX_KEYPOINT = @1.2::OperationType:HEATMAP_MAX_KEYPOINT,
+
+ /**
+ * Applies instance normalization to the input tensor.
+ *
+ * The values in the output tensor are computed as:
+ *
+ * output[b, h, w, c] =
+ * (input[b, h, w, c] - mean[b, c]) * gamma /
+ * sqrt(var[b, c] + epsilon) + beta
+ *
+ * Where the mean and variance are computed across the spatial dimensions:
+ *
+ * mean[b, c] =
+ * sum_{h, w}(input[b, h, w, c]) / sum(1)
+ *
+ * var[b, c] =
+ * sum_{h, w}(pow(input[b, h, w, c] - mean[b, c], 2)) / sum(1)
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ *
+ * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
+ * With the default data layout NHWC, the data is stored in the order of:
+ * [batch, height, width, channels]. Alternatively, the data layout could
+ * be NCHW, the data storage order of: [batch, channels, height, width].
+ *
+ * Inputs:
+ * * 0: An n-D tensor, specifying the tensor to be normalized.
+ * * 1: A scalar, specifying gamma, the scale applied to the normalized
+ * tensor. The scalar must be of {@link OperandType::FLOAT16} if
+ * input0 is of {@link OperandType::TENSOR_FLOAT16} and of
+ * {@link OperandType::FLOAT32} if input0 is of
+ * {@link OperandType::TENSOR_FLOAT32}.
+ * * 2: A scalar, specifying beta, the offset applied to the normalized
+ * tensor. The scalar must be of {@link OperandType::FLOAT16} if
+ * input0 is of {@link OperandType::TENSOR_FLOAT16} and of
+ * {@link OperandType::FLOAT32} if input0 is of
+ * {@link OperandType::TENSOR_FLOAT32}.
+ * * 3: A scalar, specifying epsilon, the small value added to variance to
+ * avoid dividing by zero. The scalar must be of {@link OperandType::FLOAT16} if
+ * input0 is of {@link OperandType::TENSOR_FLOAT16} and of
+ * {@link OperandType::FLOAT32} if input0 is of
+ * {@link OperandType::TENSOR_FLOAT32}.
+ * * 4: An {@link OperandType::BOOL} scalar, set to true to specify
+ * NCHW data layout for input0 and output0. Set to false for NHWC.
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandType} and same shape as input0.
+ */
+ INSTANCE_NORMALIZATION = @1.2::OperationType:INSTANCE_NORMALIZATION,
+
+ /**
+ * For input tensors x and y, computes x < y elementwise.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_BOOL8}
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_INT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: from 1
+ *
+ * This operation supports broadcasting.
+ *
+ * Inputs:
+ * * 0: A tensor.
+ * * 1: A tensor of the same {@link OperandType} and dimensions compatible
+ * with input0.
+ *
+ * Outputs:
+ * * 0: A tensor of {@link OperandType::TENSOR_BOOL8}.
+ */
+ LESS = @1.2::OperationType:LESS,
+
+ /**
+ * For input tensors x and y, computes x <= y elementwise.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_BOOL8}
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_INT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: from 1
+ *
+ * This operation supports broadcasting.
+ *
+ * Inputs:
+ * * 0: A tensor.
+ * * 1: A tensor of the same {@link OperandType} and dimensions compatible
+ * with input0.
+ *
+ * Outputs:
+ * * 0: A tensor of {@link OperandType::TENSOR_BOOL8}.
+ */
+ LESS_EQUAL = @1.2::OperationType:LESS_EQUAL,
+
+ /**
+ * Computes natural logarithm of x element-wise.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ *
+ * Supported tensor rank: from 1.
+ *
+ * Inputs:
+ * * 0: A tensor.
+ *
+ * Outputs:
+ * * 0: The output tensor of same shape as input0.
+ */
+ LOG = @1.2::OperationType:LOG,
+
+ /**
+ * Returns the truth value of x AND y element-wise.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_BOOL8}
+ *
+ * Supported tensor rank: from 1
+ *
+ * This operation supports broadcasting.
+ *
+ * Inputs:
+ * * 0: A tensor of {@link OperandType::TENSOR_BOOL8}.
+ * * 1: A tensor of {@link OperandType::TENSOR_BOOL8} and dimensions
+ * compatible with input0.
+ *
+ * Outputs:
+ * * 0: A tensor of {@link OperandType::TENSOR_BOOL8}.
+ */
+ LOGICAL_AND = @1.2::OperationType:LOGICAL_AND,
+
+ /**
+ * Computes the truth value of NOT x element-wise.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_BOOL8}
+ *
+ * Supported tensor rank: from 1.
+ *
+ * Inputs:
+ * * 0: A tensor.
+ *
+ * Outputs:
+ * * 0: The output tensor of same shape as input0.
+ */
+ LOGICAL_NOT = @1.2::OperationType:LOGICAL_NOT,
+
+ /**
+ * Returns the truth value of x OR y element-wise.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_BOOL8}
+ *
+ * Supported tensor rank: from 1
+ *
+ * This operation supports broadcasting.
+ *
+ * Inputs:
+ * * 0: A tensor of {@link OperandType::TENSOR_BOOL8}.
+ * * 1: A tensor of {@link OperandType::TENSOR_BOOL8} and dimensions
+ * compatible with input0.
+ *
+ * Outputs:
+ * * 0: A tensor of {@link OperandType::TENSOR_BOOL8}.
+ */
+ LOGICAL_OR = @1.2::OperationType:LOGICAL_OR,
+
+ /**
+ * Computes the log softmax activations given logits.
+ *
+ * The output is calculated using this formula:
+ *
+ * output = logits * beta - log(reduce_sum(exp(logits * beta), axis))
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ *
+ * Supported tensor rank: from 1.
+ *
+ * Inputs:
+ * * 0: A tensor specifying the input logits.
+ * * 1: A scalar, specifying the positive scaling factor for the exponent,
+ * beta.
+ * For input tensor of {@link OperandType::TENSOR_FLOAT16}, the beta
+ * value must be of {@link OperandType::FLOAT16}.
+ * For input tensor of {@link OperandType::TENSOR_FLOAT32}, the beta
+ * value must be of {@link OperandType::FLOAT32}.
+ * * 2: An {@link OperandType::INT32} scalar specifying the axis to
+ * reduce across. Negative index is used to specify axis from the
+ * end (e.g. -1 for the last axis). Must be in the range [-n, n).
+ *
+ * Outputs:
+ * * 0: The output tensor of the same {@link OperandType} and shape as
+ * input0.
+ */
+ LOG_SOFTMAX = @1.2::OperationType:LOG_SOFTMAX,
+
+ /**
+ * Returns the element-wise maximum of two tensors.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_INT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: from 1.
+ *
+ * Inputs:
+ * * 0: A tensor.
+ * * 1: A tensor of the same {@link OperandType} and compatible dimensions
+ * with input0.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scales and zeroPoint can be different from input0 scale and zeroPoint.
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandType} as input0.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint can be different from inputs' scale and zeroPoint.
+ */
+ MAXIMUM = @1.2::OperationType:MAXIMUM,
+
+ /**
+ * Returns the element-wise minimum of two tensors.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_INT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: from 1.
+ *
+ * Inputs:
+ * * 0: A tensor.
+ * * 1: A tensor of the same {@link OperandType} and compatible dimensions
+ * with input0.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scales and zeroPoint can be different from input0 scale and zeroPoint.
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandType} as input0.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint can be different from inputs' scale and zeroPoint.
+ */
+ MINIMUM = @1.2::OperationType:MINIMUM,
+
+ /**
+ * Computes numerical negative value element-wise.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_INT32}
+ *
+ * Supported tensor rank: from 1.
+ *
+ * Inputs:
+ * * 0: A tensor.
+ *
+ * Outputs:
+ * * 0: The output tensor of same shape as input0.
+ */
+ NEG = @1.2::OperationType:NEG,
+
+ /**
+ * For input tensors x and y, computes x != y elementwise.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_BOOL8}
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_INT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: from 1
+ *
+ * This operation supports broadcasting.
+ *
+ * Inputs:
+ * * 0: A tensor.
+ * * 1: A tensor of the same {@link OperandType} and dimensions compatible
+ * with input0.
+ *
+ * Outputs:
+ * * 0: A tensor of {@link OperandType::TENSOR_BOOL8}.
+ */
+ NOT_EQUAL = @1.2::OperationType:NOT_EQUAL,
+
+ /**
+ * Pads a tensor with the given constant value according to the specified
+ * paddings.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: up to 4
+ *
+ * Inputs:
+ * * 0: An n-D tensor, specifying the tensor to be padded.
+ * * 1: A 2-D Tensor of {@link OperandType::TENSOR_INT32}, the paddings
+ * for each spatial dimension of the input tensor. The shape of the
+ * tensor must be {rank(input0), 2}.
+ * padding[i, 0] specifies the number of elements to be padded in the
+ * front of dimension i.
+ * padding[i, 1] specifies the number of elements to be padded after
+ * the end of dimension i.
+ * * 2: An scalar specifying the value to use for padding input0.
+ * For input tensor of {@link OperandType::TENSOR_FLOAT16}, the
+ * pad value must be of {@link OperandType::FLOAT16}.
+ * For input tensor of {@link OperandType::TENSOR_FLOAT32}, the
+ * pad value must be of {@link OperandType::FLOAT32}.
+ * For input tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
+ * the pad value must be of {@link OperandType::INT32}. The
+ * scale and zeroPoint are assumed to be the same as in input0.
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandType} as input0. The
+ * output tensor has the same rank as input0, and each
+ * dimension of the output tensor has the same size as the
+ * corresponding dimension of the input tensor plus the size
+ * of the padding:
+ * output0.dimension[i] =
+ * padding[i, 0] + input0.dimension[i] + padding[i, 1]
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ */
+ PAD_V2 = @1.2::OperationType:PAD_V2,
+
+ /**
+ * Computes the power of one value to another.
+ *
+ * Given a tensor base and a tensor exponent, this operation computes
+ * base^exponent elementwise.
+ *
+ * This operations supports broadcasting. The size of the output is the
+ * maximum size along each dimension of the input operands. It starts with
+ * the trailing dimensions, and works its way forward.
+ *
+ * For example:
+ * base.dimension = {4, 1, 2}
+ * exponent.dimension = {5, 4, 3, 1}
+ * output.dimension = {5, 4, 3, 2}
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ *
+ * Supported tensor rank: from 1
+ *
+ * Inputs:
+ * * 0: A tensor specifying the base.
+ * * 1: A tensor specifying the exponent.
+ *
+ * Outputs:
+ * * 0: An output tensor.
+ */
+ POW = @1.2::OperationType:POW,
+
+ /**
+ * Parametric Rectified Linear Unit.
+ *
+ * It follows: f(x) = alpha * x for x < 0, f(x) = x for x >= 0, where alpha
+ * is a learned array with the same {@link OperandType} and compatible
+ * dimensions as input x.
+ *
+ * Two dimensions are compatible when:
+ * 1. they are equal, or
+ * 2. one of them is 1
+ *
+ * The size of the output is the maximum size along each dimension of the
+ * input operands. It starts with the trailing dimensions, and works its way
+ * forward.
+ *
+ * Example:
+ * input.dimension = {4, 1, 2}
+ * alpha.dimension = {5, 4, 3, 1}
+ * output.dimension = {5, 4, 3, 2}
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: from 1
+ *
+ * Inputs:
+ * * 0: A tensor, specifying the input.
+ * * 1: A tensor of the same {@link OperandType}, and compatible dimensions
+ * as input0, specifying the alpha.
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandType} as input0.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint can be diffent from the input0 scale and zeroPoint.
+ */
+ PRELU = @1.2::OperationType:PRELU,
+
+ /**
+ * Quantizes the input tensor.
+ *
+ * The formula is:
+ *
+ * output = max(0, min(255, round(input / scale) + zeroPoint)
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ *
+ * Supported tensor rank: from 1
+ *
+ * Inputs:
+ * * 0: A tensor, may be zero-sized.
+ *
+ * Outputs:
+ * * 0: The output tensor of same shape as input0, but with
+ * {@link OperandType::TENSOR_QUANT8_ASYMM}.
+ */
+ QUANTIZE = @1.2::OperationType:QUANTIZE,
+
+ /**
+ * A version of quantized LSTM, using 16 bit quantization for internal
+ * state.
+ *
+ * There is no projection layer, so cell state size is equal to the output
+ * size.
+ *
+ * Inputs:
+ * * 0: A 2-D tensor of type {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * and shape [numBatches, inputSize] specifying the input to the LSTM
+ * cell. Tensor is quantized with a fixed quantization range of
+ * [-1, 127/128] (scale = 1/128, zeroPoint = 128).
+ * * 1: The input-to-input weights.
+ * A 2-D tensor of type {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * and shape [outputSize, inputSize] specifying input-to-input part of
+ * weights for fully-connected layer inside the LSTM cell.
+ * Quantization zero point and scale must be the same across all the
+ * weights.
+ * * 2: The input-to-forget weights.
+ * A 2-D tensor of type {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * and shape [outputSize, inputSize] specifying input-to-forget part of
+ * weights for fully-connected layer inside the LSTM cell.
+ * Quantization zero point and scale must be the same across all the
+ * weights.
+ * * 3: The input-to-cell weights.
+ * A 2-D tensor of type {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * and shape [outputSize, inputSize] specifying input-to-cell part of
+ * weights for fully-connected layer inside the LSTM cell.
+ * Quantization zero point and scale must be the same across all the
+ * weights.
+ * * 4: The input-to-output weights.
+ * A 2-D tensor of type {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * and shape [outputSize, inputSize] specifying input-to-output part of
+ * weights for fully-connected layer inside the LSTM cell.
+ * Quantization zero point and scale must be the same across all the
+ * weights.
+ * * 5: The recurrent-to-input weights.
+ * A 2-D tensor of type {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * and shape [outputSize, outputSize] specifying recurrent-to-input part
+ * of weights for fully-connected layer inside the LSTM cell.
+ * Quantization zero point and scale must be the same across all the
+ * weights.
+ * * 6: The recurrent-to-forget weights.
+ * A 2-D tensor of type {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * and shape [outputSize, outputSize] specifying recurrent-to-forget
+ * part of weights for fully-connected layer inside the LSTM cell.
+ * Quantization zero point and scale must be the same across all the
+ * weights.
+ * * 7: The recurrent-to-cell weights.
+ * A 2-D tensor of type {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * and shape [outputSize, outputSize] specifying recurrent-to-cell part
+ * of weights for fully-connected layer inside the LSTM cell.
+ * Quantization zero point and scale must be the same across all the
+ * weights.
+ * * 8: The recurrent-to-output weights.
+ * A 2-D tensor of type {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * and shape [outputSize, outputSize] specifying recurrent-to-output
+ * part of weights for fully-connected layer inside the LSTM cell.
+ * Quantization zero point and scale must be the same across all the
+ * weights.
+ * * 9: The input gate bias.
+ * A 1-D tensor of type {@link OperandType::TENSOR_INT32} and shape
+ * [outputSize] specifying the bias for the fully-connected layer
+ * inside the LSTM cell. Bias is quantized with scale being a product
+ * of input and weights scales and zeroPoint equal to 0.
+ * * 10:The forget gate bias.
+ * A 1-D tensor of type {@link OperandType::TENSOR_INT32} and shape
+ * [outputSize] specifying the bias for the fully-connected layer
+ * inside the LSTM cell. Bias is quantized with scale being a product
+ * of input and weights scales and zeroPoint equal to 0.
+ * * 11:The cell bias.
+ * A 1-D tensor of type {@link OperandType::TENSOR_INT32} and shape
+ * [outputSize] specifying the bias for the fully-connected layer
+ * inside the LSTM cell. Bias is quantized with scale being a product
+ * of input and weights scales and zeroPoint equal to 0.
+ * * 12:The output gate bias.
+ * A 1-D tensor of type {@link OperandType::TENSOR_INT32} and shape
+ * [outputSize] specifying the bias for the fully-connected layer
+ * inside the LSTM cell. Bias is quantized with scale being a product
+ * of input and weights scales and zeroPoint equal to 0.
+ * * 13: A 2-D tensor of type {@link OperandType::TENSOR_QUANT16_SYMM}
+ * and shape [numBatches, outputSize] specifying the cell state from the
+ * previous time step of the LSTM cell. It is quantized using a
+ * quantization range of [-2^4, 2^4 * 32767/32768] (scale = 2^4 /
+ * 32768, zeroPoint = 0).
+ * * 14: A 2-D tensor of type {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * and shape [numBathes, outputSize] specifying the output of the LSTM
+ * cell from previous time-step. Tensor is quantized with a fixed
+ * quantization range of [-1, 127/128] (scale = 1/128, zeroPoint =
+ * 128).
+ *
+ *
+ * Outputs:
+ * * 0: A 2-D tensor of type {@link OperandType::TENSOR_QUANT16_SYMM}
+ * and shape [numBatches, outputSize] which contains a cell state from
+ * the current time step. Tensor is quantized using a quantization
+ * range of [-2^4, 2^4 * 32767/32768] (scale = 2^4 / 32768, zeroPoint =
+ * 0).
+ * * 1: A 2-D tensor of type {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * and shape [numBathes, outputSize] which contains the output value.
+ * Tensor is quantized with a fixed quantization range of [-1, 127/128]
+ * (scale = 1/128, zeroPoint = 128).
+ */
+ QUANTIZED_16BIT_LSTM = @1.2::OperationType:QUANTIZED_16BIT_LSTM,
+
+ /**
+ * Draws samples from a multinomial distribution.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ *
+ * Inputs:
+ * * 0: A 2-D tensor with shape [batches, classes], specifying the
+ * unnormalized log-probabilities for all classes.
+ * * 1: A scalar {@link OperandType::INT32}, specifying the number of
+ * independent samples to draw for each row slice.
+ * * 2: A 1-D {@link OperandType::TENSOR_INT32} tensor with shape [2],
+ * specifying seeds used to initialize the random distribution.
+ * Outputs:
+ * * 0: A 2-D {@link OperandType::TENSOR_INT32} tensor with shape
+ * [batches, samples], containing the drawn samples.
+ */
+ RANDOM_MULTINOMIAL = @1.2::OperationType:RANDOM_MULTINOMIAL,
+
+ /**
+ * Reduces a tensor by computing the "logical and" of elements along given
+ * dimensions.
+ *
+ * If keep_dims is true, the reduced dimensions are
+ * retained with length 1. Otherwise, the rank of the tensor is reduced by
+ * 1 for each entry in dimensions.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_BOOL8}
+ *
+ * Supported tensor rank: up to 4
+ *
+ * Inputs:
+ * * 0: An n-D tensor.
+ * * 1: A 1-D tensor of {@link OperandType::TENSOR_INT32}. The dimensions
+ * to reduce. Dimension values must be in the range [-n, n).
+ * * 2: An {@link OperandType::BOOL} scalar, keep_dims. If true,
+ * retains reduced dimensions with length 1.
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandType} as input0.
+ */
+ REDUCE_ALL = @1.2::OperationType:REDUCE_ALL,
+
+ /**
+ * Reduces a tensor by computing the "logical or" of elements along given
+ * dimensions.
+ *
+ * If keep_dims is true, the reduced dimensions are
+ * retained with length 1. Otherwise, the rank of the tensor is reduced by
+ * 1 for each entry in dimensions.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_BOOL8}
+ *
+ * Supported tensor rank: up to 4
+ *
+ * Inputs:
+ * * 0: An n-D tensor.
+ * * 1: A 1-D tensor of {@link OperandType::TENSOR_INT32}. The dimensions
+ * to reduce. Dimension values must be in the range [-n, n).
+ * * 2: An {@link OperandType::BOOL} scalar, keep_dims. If true,
+ * retains reduced dimensions with length 1.
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandType} as input0.
+ */
+ REDUCE_ANY = @1.2::OperationType:REDUCE_ANY,
+
+ /**
+ * Reduces a tensor by computing the maximum of elements along given
+ * dimensions.
+ *
+ * If keep_dims is true, the reduced dimensions are
+ * retained with length 1. Otherwise, the rank of the tensor is reduced by
+ * 1 for each entry in dimensions.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: up to 4
+ *
+ * Inputs:
+ * * 0: An n-D tensor.
+ * * 1: A 1-D tensor of {@link OperandType::TENSOR_INT32}. The dimensions
+ * to reduce. Dimension values must be in the range [-n, n).
+ * * 2: An {@link OperandType::BOOL} scalar, keep_dims. If true,
+ * retains reduced dimensions with length 1.
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandType} as input0.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ */
+ REDUCE_MAX = @1.2::OperationType:REDUCE_MAX,
+
+ /**
+ * Reduces a tensor by computing the minimum of elements along given
+ * dimensions.
+ *
+ * If keep_dims is true, the reduced dimensions are
+ * retained with length 1. Otherwise, the rank of the tensor is reduced by
+ * 1 for each entry in dimensions.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: up to 4
+ *
+ * Inputs:
+ * * 0: An n-D tensor.
+ * * 1: A 1-D tensor of {@link OperandType::TENSOR_INT32}. The dimensions
+ * to reduce. Dimension values must be in the range [-n, n).
+ * * 2: An {@link OperandType::BOOL} scalar, keep_dims. If true,
+ * retains reduced dimensions with length 1.
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandType} as input0.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ */
+ REDUCE_MIN = @1.2::OperationType:REDUCE_MIN,
+
+ /**
+ * Reduces a tensor by multiplying elements along given dimensions.
+ *
+ * If keep_dims is true, the reduced dimensions are
+ * retained with length 1. Otherwise, the rank of the tensor is reduced by
+ * 1 for each entry in dimensions.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ *
+ * Supported tensor rank: up to 4
+ *
+ * Inputs:
+ * * 0: An n-D tensor.
+ * * 1: A 1-D tensor of {@link OperandType::TENSOR_INT32}. The dimensions
+ * to reduce. Dimension values must be in the range [-n, n).
+ * * 2: An {@link OperandType::BOOL} scalar, keep_dims. If true,
+ * retains reduced dimensions with length 1.
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandType} as input0.
+ */
+ REDUCE_PROD = @1.2::OperationType:REDUCE_PROD,
+
+ /**
+ * Reduces a tensor by summing elements along given dimensions.
+ *
+ * If keep_dims is true, the reduced dimensions are
+ * retained with length 1. Otherwise, the rank of the tensor is reduced by
+ * 1 for each entry in dimensions.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ *
+ * Supported tensor rank: up to 4
+ *
+ * Inputs:
+ * * 0: An n-D tensor.
+ * * 1: A 1-D tensor of {@link OperandType::TENSOR_INT32}. The dimensions
+ * to reduce. Dimension values must be in the range [-n, n).
+ * * 2: An {@link OperandType::BOOL} scalar, keep_dims. If true,
+ * retains reduced dimensions with length 1.
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandType} as input0.
+ */
+ REDUCE_SUM = @1.2::OperationType:REDUCE_SUM,
+
+ /**
+ * Select and scale the feature map of each region of interest to a unified
+ * output size by average pooling sampling points from bilinear interpolation.
+ *
+ * The region of interest is represented by its upper-left corner coordinate
+ * (x1,y1) and lower-right corner coordinate (x2,y2) in the original image.
+ * A spatial scaling factor is applied to map into feature map coordinate.
+ * A valid region of interest should satisfy x1 <= x2 and y1 <= y2.
+ *
+ * No rounding is applied in this operation. The sampling points are unified
+ * distributed in the pooling bin and their values are calculated by bilinear
+ * interpolation.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
+ * With the default data layout NHWC, the data is stored in the order of:
+ * [batch, height, width, channels]. Alternatively, the data layout could
+ * be NCHW, the data storage order of: [batch, channels, height, width].
+ *
+ * Inputs:
+ * * 0: A 4-D tensor, specifying the feature map.
+ * * 1: A 2-D Tensor of shape [num_rois, 4], specifying the locations of
+ * the regions of interest, each line with format [x1, y1, x2, y2].
+ * For input0 of type {@link OperandType::TENSOR_QUANT8_ASYMM},
+ * this tensor should be of {@link OperandType::TENSOR_QUANT16_ASYMM},
+ * with zeroPoint of 0 and scale of 0.125. Zero num_rois is
+ * supported for this tensor.
+ * * 2: An 1-D {@link OperandType::TENSOR_INT32} tensor, of shape
+ * [num_rois], specifying the batch index of each box. Boxes with
+ * the same batch index are grouped together. Zero num_rois is
+ * supported for this tensor.
+ * * 3: An {@link OperandType::INT32} scalar, specifying the output
+ * height of the output tensor.
+ * * 4: An {@link OperandType::INT32} scalar, specifying the output
+ * width of the output tensor.
+ * * 5: An {@link OperandType::FLOAT32} scalar, specifying the ratio
+ * from the height of original image to the height of feature map.
+ * * 6: An {@link OperandType::FLOAT32} scalar, specifying the ratio
+ * from the width of original image to the width of feature map.
+ * * 7: An {@link OperandType::INT32} scalar, specifying the number of
+ * sampling points in height dimension used to compute the output.
+ * Set to 0 for adaptive value of ceil(roi_height/out_height).
+ * * 8: An {@link OperandType::INT32} scalar, specifying the number of
+ * sampling points in width dimension used to compute the output.
+ * Set to 0 for adaptive value of ceil(roi_width/out_width).
+ * * 9: An {@link OperandType::BOOL} scalar, set to true to specify
+ * NCHW data layout for input0 and output0. Set to false for NHWC.
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandType} as input0. The output
+ * shape is [num_rois, out_height, out_width, depth].
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint can be different from the input0 scale and zeroPoint.
+ */
+ ROI_ALIGN = @1.2::OperationType:ROI_ALIGN,
+
+ /**
+ * Select and scale the feature map of each region of interest to a unified
+ * output size by max-pooling.
+ *
+ * The region of interest is represented by its upper-left corner coordinate
+ * (x1,y1) and lower-right corner coordinate (x2,y2) in the original image.
+ * A spatial scaling factor is applied to map into feature map coordinate.
+ * A valid region of interest should satisfy x1 <= x2 and y1 <= y2.
+ *
+ * Rounding is applied in this operation to ensure integer boundary for
+ * regions of interest and pooling bins.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
+ * With the default data layout NHWC, the data is stored in the order of:
+ * [batch, height, width, channels]. Alternatively, the data layout could
+ * be NCHW, the data storage order of: [batch, channels, height, width].
+ *
+ * Inputs:
+ * * 0: A 4-D tensor, specifying the feature map.
+ * * 1: A 2-D Tensor of shape [num_rois, 4], specifying the locations of
+ * the regions of interest, each line with format [x1, y1, x2, y2].
+ * For input0 of type {@link OperandType::TENSOR_QUANT8_ASYMM},
+ * this tensor should be of {@link OperandType::TENSOR_QUANT16_ASYMM},
+ * with zeroPoint of 0 and scale of 0.125.
+ * * 2: An 1-D {@link OperandType::TENSOR_INT32} tensor, of shape
+ * [num_rois], specifying the batch index of each box. Boxes with
+ * the same batch index are grouped together.
+ * * 3: An {@link OperandType::INT32} scalar, specifying the output
+ * height of the output tensor.
+ * * 4: An {@link OperandType::INT32} scalar, specifying the output
+ * width of the output tensor.
+ * * 5: An {@link OperandType::FLOAT32} scalar, specifying the ratio
+ * from the height of original image to the height of feature map.
+ * * 6: An {@link OperandType::FLOAT32} scalar, specifying the ratio
+ * from the width of original image to the width of feature map.
+ * * 7: An {@link OperandType::BOOL} scalar, set to true to specify
+ * NCHW data layout for input0 and output0. Set to false for NHWC.
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandType} as input0. The output
+ * shape is [num_rois, out_height, out_width, depth].
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ */
+ ROI_POOLING = @1.2::OperationType:ROI_POOLING,
+
+ /**
+ * Computes reciprocal of square root of x element-wise.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ *
+ * Supported tensor rank: from 1.
+ *
+ * Inputs:
+ * * 0: A tensor.
+ *
+ * Outputs:
+ * * 0: The output tensor of same shape as input0.
+ */
+ RSQRT = @1.2::OperationType:RSQRT,
+
+ /**
+ * Using a tensor of booleans c and input tensors x and y select values
+ * elementwise from both input tensors:
+ *
+ * O[i] = C[i] ? x[i] : y[i].
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_INT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: from 1
+ *
+ * Inputs:
+ * * 0: A tensor of type {@link OperandType::TENSOR_BOOL8} acting as a
+ * mask that chooses, based on the value at each element, whether the
+ * corresponding element in the output should be taken from input1 (if
+ * true) or input2 (if false).
+ * * 1: An input tensor of the same shape as input0.
+ * * 2: An input tensor of the same shape and type as input1.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scales and zeroPoint can be different from input1 scale and zeroPoint.
+ *
+ * Outputs:
+ * * 0: A tensor of the same type and shape as input1 and input2.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint can be different from inputs' scale and zeroPoint.
+ *
+ */
+ SELECT = @1.2::OperationType:SELECT,
+
+ /**
+ * Computes sin of x element-wise.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ *
+ * Supported tensor rank: from 1.
+ *
+ * Inputs:
+ * * 0: A tensor.
+ *
+ * Outputs:
+ * * 0: The output tensor of same shape as input0.
+ */
+ SIN = @1.2::OperationType:SIN,
+
+ /**
+ * Extracts a slice of specified size from the input tensor starting at a
+ * specified location.
+ *
+ * The starting location is specified as a 1-D tensor containing offsets
+ * for each dimension. The size is specified as a 1-D tensor containing
+ * either size of a slice along corresponding dimension or -1. In the latter
+ * case, all the remaining elements in dimension are included in the slice.
+ *
+ * A sum of begin offset and a size of a slice must not exceed size of a
+ * corresponding dimension.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_INT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: from 1
+ *
+ * Inputs:
+ * * 0: An n-D tensor to take slice from, may be zero-sized.
+ * * 1: A 1-D tensor of type {@link OperandType::TENSOR_INT32} specifying
+ * the beginning indices of the slice in each dimension.
+ * * 2: A 1-D tensor of type {@link OperandType::TENSOR_INT32} specifying
+ * the size of the slice in each dimension.
+ *
+ * Outputs:
+ * * 0: An n-D tensor of the same type as the input containing the slice.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * its scale and zeroPoint has to be same as the input0 scale and zeroPoint.
+ */
+ SLICE = @1.2::OperationType:SLICE,
+
+ /**
+ * Splits a tensor along a given axis into num_splits subtensors.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_INT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: from 1
+ *
+ * Inputs:
+ * * 0: An n-D tensor to split.
+ * * 1: An {@link OperandType::INT32} scalar specifying the axis along
+ * which to split.
+ * * 2: An {@link OperandType::INT32} scalar indicating the number of
+ * splits along given axis. Must evenly divide axis size.
+ *
+ * Outputs:
+ * * 0 ~ (num_splits - 1): Resulting subtensors.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ */
+ SPLIT = @1.2::OperationType:SPLIT,
+
+ /**
+ * Computes square root of x element-wise.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ *
+ * Supported tensor rank: from 1.
+ *
+ * Inputs:
+ * * 0: A tensor.
+ *
+ * Outputs:
+ * * 0: The output tensor of same shape as input0.
+ */
+ SQRT = @1.2::OperationType:SQRT,
+
+ /**
+ * Constructs a tensor by tiling a given tensor.
+ *
+ * This operation creates a new tensor by replicating `input` `multiples`
+ * times. The output tensor's i-th dimension has `input.dims(i) * multiples[i]`
+ * elements, and the values of `input` are replicated `multiples[i]` times
+ * along the i-th dimension.
+ * For example, tiling `[a b c d]` by `[2]` produces `[a b c d a b c d]`.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_INT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: from 1
+ *
+ * Inputs:
+ * * 0: input, an n-D tensor specifying the input.
+ * * 1: multiples, a 1-D tensor of {@link OperandType::TENSOR_INT32}.
+ * The length of multiples must be n.
+ *
+ * Outputs:
+ * * 0: A tiled tensor of the same {@link OperandType} and rank as `input`.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ */
+ TILE = @1.2::OperationType:TILE,
+
+ /**
+ * Finds values and indices of the k largest entries for the last dimension.
+ *
+ * Resulting values in each dimensions are sorted in descending order. If
+ * two values are equal, the one with larger index appears first.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_INT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: from 1
+ *
+ * Inputs:
+ * * 0: input, an n-D tensor specifying the input.
+ * * 1: k, an {@link OperandType::INT32} scalar, specifying the number of
+ * top elements to look for along the last dimension.
+ *
+ * Outputs:
+ * * 0: An n-D tensor of the same type as the input, containing the k
+ * largest elements along each last dimensional slice.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ * * 1: An n-D tensor of type {@link OperandType::TENSOR_INT32}
+ * containing the indices of values within the last dimension of input.
+ */
+ TOPK_V2 = @1.2::OperationType:TOPK_V2,
+
+ /**
+ * Performs the transpose of 2-D convolution operation.
+ *
+ * This operation is sometimes called "deconvolution" after Deconvolutional
+ * Networks, but is actually the transpose (gradient) of
+ * {@link OperandType::CONV_2D} rather than an actual deconvolution.
+ *
+ * The output dimensions are functions of the filter dimensions, stride, and
+ * padding.
+ *
+ * Supported tensor {@link OperandType} configurations:
+ * * 16 bit floating point:
+ * * * {@link OperandType::TENSOR_FLOAT16} for input, filter, output, and bias.
+ *
+ * * 32 bit floating point:
+ * * * {@link OperandType::TENSOR_FLOAT32} for input, filter, output, and bias.
+ *
+ * * Quantized:
+ * * * {@link OperandType::TENSOR_QUANT8_ASYMM} for input, filter, and output.
+ * * * {@link OperandType::TENSOR_INT32} for bias (with scale set to
+ * * * input.scale * filter.scale).
+ *
+ * * Quantized with symmetric per channel quantization for the filter:
+ * * * {@link OperandType::TENSOR_QUANT8_ASYMM} for input, and output.
+ * * * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
+ * * * {@link OperandType::TENSOR_INT32} for bias (scale set to 0.0,
+ * * * each value scaling is separate and equal to input.scale * filter.scales[channel]).
+ *
+ * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
+ * With the default data layout NHWC, the data is stored in the order of:
+ * [batch, height, width, channels]. Alternatively, the data layout could
+ * be NCHW, the data storage order of: [batch, channels, height, width].
+ *
+ * Both explicit padding and implicit padding are supported.
+ *
+ * Inputs (explicit padding):
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
+ * specifying the input.
+ * * 1: A 4-D tensor, of shape
+ * [depth_out, filter_height, filter_width, depth_in], specifying the
+ * filter. For tensor of type
+ * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} the channel
+ * dimension (SymmPerChannelQuantParams::channelDim) must be set to 0.
+ * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
+ * tensor of type {@link OperandType::TENSOR_FLOAT32} or
+ * {@link OperandType::TENSOR_FLOAT16}, the bias should be of the
+ * same type. For input tensor of type
+ * {@link OperandType::TENSOR_QUANT8_ASYMM}, the bias should be
+ * of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0 and
+ * bias_scale == input_scale * filter_scale. For filter tensor of
+ * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}, the bias
+ * must be of {@link OperandType::TENSOR_INT32}, with zeroPoint of
+ * 0 and bias_scale of 0. The actual scale of each value 'i' is equal
+ * to bias_scale[i] = input_scale * filter_scale[i].
+ * * 3: An {@link OperandType::INT32} scalar, specifying the padding on
+ * the left, in the ‘width’ dimension.
+ * * 4: An {@link OperandType::INT32} scalar, specifying the padding on
+ * the right, in the ‘width’ dimension.
+ * * 5: An {@link OperandType::INT32} scalar, specifying the padding on
+ * the top, in the ‘height’ dimension.
+ * * 6: An {@link OperandType::INT32} scalar, specifying the padding on
+ * the bottom, in the ‘height’ dimension.
+ * * 7: An {@link OperandType::INT32} scalar, specifying the stride when
+ * walking through input in the ‘width’ dimension.
+ * * 8: An {@link OperandType::INT32} scalar, specifying the stride when
+ * walking through input in the ‘height’ dimension.
+ * * 9: An {@link OperandType::INT32} scalar, and has to be one of the
+ * {@link FusedActivationFunc} values. Specifies the activation to
+ * invoke on the result.
+ * * 10: An {@link OperandType::BOOL} scalar, set to true to specify
+ * NCHW data layout for input0 and output0. Set to false for NHWC.
+ *
+ * Inputs (implicit padding):
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
+ * specifying the input.
+ * * 1: A 4-D tensor, of shape
+ * [depth_out, filter_height, filter_width, depth_in], specifying the
+ * filter. For tensor of type
+ * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} the channel
+ * dimension (SymmPerChannelQuantParams::channelDim) must be set to 0.
+ * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
+ * tensor of type {@link OperandType::TENSOR_FLOAT32} or
+ * {@link OperandType::TENSOR_FLOAT16}, the bias should be of the
+ * same type. For input tensor of type
+ * {@link OperandType::TENSOR_QUANT8_ASYMM}, the bias should be
+ * of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0 and
+ * bias_scale == input_scale * filter_scale. For filter tensor of
+ * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}, the bias
+ * must be of {@link OperandType::TENSOR_INT32}, with zeroPoint of
+ * 0 and bias_scale of 0. The actual scale of each value 'i' is equal
+ * to bias_scale[i] = input_scale * filter_scale[i].
+ * * 3: An {@link OperandType::TENSOR_INT32} tensor, specifying the output
+ * tensor shape.
+ * * 4: An {@link OperandType::INT32} scalar, specifying the implicit
+ * padding scheme, has to be one of the
+ * following values: {0 (NONE), 1 (SAME), 2 (VALID)}.
+ * * 5: An {@link OperandType::INT32} scalar, specifying the stride when
+ * walking through input in the ‘width’ dimension.
+ * * 6: An {@link OperandType::INT32} scalar, specifying the stride when
+ * walking through input in the ‘height’ dimension.
+ * * 7: An {@link OperandType::INT32} scalar, and has to be one of the
+ * {@link FusedActivationFunc} values. Specifies the activation to
+ * invoke on the result.
+ * * 8: An {@link OperandType::BOOL} scalar, set to true to specify
+ * NCHW data layout for input0 and output0. Set to false for NHWC.
+ *
+ * Outputs:
+ * * 0: The output 4-D tensor, of shape
+ * [batches, out_height, out_width, depth_out].
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint can be different from inputs' scale and zeroPoint.
+ */
+ TRANSPOSE_CONV_2D = @1.2::OperationType:TRANSPOSE_CONV_2D,
+
+ /**
+ * A recurrent neural network specified by an LSTM cell.
+ *
+ * Performs (fully) dynamic unrolling of input.
+ *
+ * This Op unrolls the input along the time dimension, and implements the
+ * following operation for each element in the sequence
+ * s = 1...sequence_length:
+ * outputs[s] = projection(state = activation(LSTMOp(inputs[s])))
+ *
+ * Where LSTMOp is the LSTM op as in {@link OperandType::LSTM},
+ * the "projection" is an optional projection layer from state and output
+ * and the “activation” is the function passed as the
+ * “fused_activation_function” argument (if not “NONE”).
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ *
+ * Supported tensor rank: 3, either time-major or batch-major.
+ *
+ * All input and output tensors must be of the same type.
+ *
+ * Inputs:
+ * * 0: The input (\f$x_t\f$).
+ * A 3-D tensor of shape:
+ * If time-major: [max_time, batch_size, input_size]
+ * If batch-major: [batch_size, max_time, input_size]
+ * where “max_time” is the number of timesteps (sequence length),
+ * “batch_size” corresponds to the batching dimension, and
+ * “input_size” is the size of the input.
+ * * 1: The input-to-input weights (\f$W_{xi}\f$). Optional.
+ * A 2-D tensor of shape [num_units, input_size], where “num_units”
+ * corresponds to the number of cell units.
+ * * 2: The input-to-forget weights (\f$W_{xf}\f$).
+ * A 2-D tensor of shape [num_units, input_size].
+ * * 3: The input-to-cell weights (\f$W_{xc}\f$).
+ * A 2-D tensor of shape [num_units, input_size].
+ * * 4: The input-to-output weights (\f$W_{xo}\f$).
+ * A 2-D tensor of shape [num_units, input_size].
+ * * 5: The recurrent-to-input weights (\f$W_{hi}\f$). Optional.
+ * A 2-D tensor of shape [num_units, output_size], where “output_size”
+ * corresponds to either the number of cell units (i.e., “num_units”),
+ * or the second dimension of the “projection_weights”, if defined.
+ * * 6: The recurrent-to-forget weights (\f$W_{hf}\f$).
+ * A 2-D tensor of shape [num_units, output_size].
+ * * 7: The recurrent-to-cell weights (\f$W_{hc}\f$).
+ * A 2-D tensor of shape [num_units, output_size].
+ * * 8: The recurrent-to-output weights (\f$W_{ho}\f$).
+ * A 2-D tensor of shape [num_units, output_size].
+ * * 9: The cell-to-input weights (\f$W_{ci}\f$). Optional.
+ * A 1-D tensor of shape [num_units].
+ * * 10:The cell-to-forget weights (\f$W_{cf}\f$). Optional.
+ * A 1-D tensor of shape [num_units].
+ * * 11:The cell-to-output weights (\f$W_{co}\f$). Optional.
+ * A 1-D tensor of shape [num_units].
+ * * 12:The input gate bias (\f$b_i\f$). Optional.
+ * A 1-D tensor of shape [num_units].
+ * * 13:The forget gate bias (\f$b_f\f$).
+ * A 1-D tensor of shape [num_units].
+ * * 14:The cell bias (\f$b_c\f$).
+ * A 1-D tensor of shape [num_units].
+ * * 15:The output gate bias (\f$b_o\f$).
+ * A 1-D tensor of shape [num_units].
+ * * 16:The projection weights (\f$W_{proj}\f$). Optional.
+ * A 2-D tensor of shape [output_size, num_units].
+ * * 17:The projection bias (\f$b_{proj}\f$). Optional.
+ * A 1-D tensor of shape [output_size].
+ * * 18:The output state (in) (\f$h_{t-1}\f$).
+ * A 2-D tensor of shape [batch_size, output_size].
+ * * 19:The cell state (in) (\f$C_{t-1}\f$).
+ * A 2-D tensor of shape [batch_size, num_units].
+ * * 20:The activation function (\f$g\f$).
+ * A value indicating the activation function:
+ * <ul>
+ * <li>0: None;
+ * <li>1: Relu;
+ * <li>3: Relu6;
+ * <li>4: Tanh;
+ * <li>6: Sigmoid.
+ * </ul>
+ * * 21:The clipping threshold (\f$t_{cell}\f$) for the cell state, such
+ * that values are bound within [-cell_clip, cell_clip]. If set to 0.0
+ * then clipping is disabled.
+ * * 22:The clipping threshold (\f$t_{proj}\f$) for the output from the
+ * projection layer, such that values are bound within
+ * [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
+ * * 23:Time-major if true, batch-major if false.
+ * * 24:The input layer normalization weights. Optional.
+ * A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
+ * to activation at input gate.
+ * * 25:The forget layer normalization weights. Optional.
+ * A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
+ * to activation at forget gate.
+ * * 26:The cell layer normalization weights. Optional.
+ * A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
+ * to activation at cell gate.
+ * * 27:The output layer normalization weights. Optional.
+ * A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
+ * to activation at output gate.
+ *
+ * Outputs:
+ * * 0: The output (\f$o_t\f$).
+ * A 3-D tensor of shape:
+ * If time-major: [max_time, batch_size, output_size]
+ * If batch-major: [batch_size, max_time, output_size]
+ */
+ UNIDIRECTIONAL_SEQUENCE_LSTM = @1.2::OperationType:UNIDIRECTIONAL_SEQUENCE_LSTM,
+
+ /**
+ * A recurrent neural network layer that applies a basic RNN cell to a
+ * sequence of inputs.
+ *
+ * This layer unrolls the input along the sequence dimension, and implements
+ * the following operation
+ * for each element in the sequence s = 1...sequence_length:
+ * outputs[s] = state = activation(inputs[s] * input_weights’ + state *
+ * recurrent_weights’ + bias)
+ *
+ * Where:
+ * * “input_weights” is a weight matrix that multiplies the inputs;
+ * * “recurrent_weights” is a weight matrix that multiplies the current
+ * “state” which itself is the output from the previous time step
+ * computation;
+ * * “bias” is a bias vector (added to each output vector in the batch);
+ * * “activation” is the function passed as the “fused_activation_function”
+ * argument (if not “NONE”).
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ *
+ * The input tensors must all be the same type.
+ *
+ * Inputs:
+ * * 0: input.
+ * A 3-D tensor. The shape is defined by the input 6 (timeMajor). If
+ * it is set to 1, then the input has a shape [maxTime, batchSize,
+ * inputSize], otherwise the input has a shape [batchSize, maxTime,
+ * inputSize].
+ * * 1: weights.
+ * A 2-D tensor of shape [numUnits, inputSize].
+ * * 2: recurrent_weights.
+ * A 2-D tensor of shape [numUnits, numUnits].
+ * * 3: bias.
+ * A 1-D tensor of shape [numUnits].
+ * * 4: hidden state
+ * A 2-D tensor of shape [batchSize, numUnits]. Specifies a hidden
+ * state input for the first time step of the computation.
+ * * 5: fusedActivationFunction.
+ * A {@link FusedActivationFunc} value indicating the activation function. If
+ * “NONE” is specified then it results in a linear activation.
+ * * 6: timeMajor
+ * An {@link OperandType::INT32} scalar specifying the shape format
+ * of input and output tensors. Must be set to either 0 or 1.
+ * Outputs:
+ * * 0: output.
+ * A 3-D tensor. The shape is defined by the input 6 (timeMajor). If
+ * it is set to 1, then the output has a shape [maxTime, batchSize,
+ * numUnits], otherwise the output has a shape [batchSize, maxTime,
+ * numUnits].
+ */
+ UNIDIRECTIONAL_SEQUENCE_RNN = @1.2::OperationType:UNIDIRECTIONAL_SEQUENCE_RNN,
+
+ /**
+ * Resizes images to given size using the nearest neighbor interpretation.
+ *
+ * Resized images must be distorted if their output aspect ratio is not the
+ * same as input aspect ratio. The corner pixels of output may not be the
+ * same as corner pixels of input.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
+ * With the default data layout NHWC, the data is stored in the order of:
+ * [batch, height, width, channels]. Alternatively, the data layout could
+ * be NCHW, the data storage order of: [batch, channels, height, width].
+ *
+ * Both resizing by shape and resizing by scale are supported.
+ *
+ * Inputs (resizing by shape):
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
+ * the input. Zero batches is supported for this tensor.
+ * * 1: An {@link OperandType::INT32} scalar, specifying the output
+ * width of the output tensor.
+ * * 2: An {@link OperandType::INT32} scalar, specifying the output
+ * height of the output tensor.
+ * * 3: An {@link OperandType::BOOL} scalar, default to false.
+ * Set to true to specify NCHW data layout for input0 and output0.
+ *
+ * Inputs (resizing by scale):
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
+ * the input. Zero batches is supported for this tensor.
+ * * 1: A scalar, specifying width_scale, the scaling factor of the width
+ * dimension from the input tensor to the output tensor. The output
+ * width is calculated as new_width = floor(width * width_scale).
+ * The scalar must be of {@link OperandType::FLOAT16} if input0 is
+ * of {@link OperandType::TENSOR_FLOAT16} and of
+ * {@link OperandType::FLOAT32} otherwise.
+ * * 2: A scalar, specifying height_scale, the scaling factor of the height
+ * dimension from the input tensor to the output tensor. The output
+ * height is calculated as new_height = floor(height * height_scale).
+ * The scalar must be of {@link OperandType::FLOAT16} if input0 is
+ * of {@link OperandType::TENSOR_FLOAT16} and of
+ * {@link OperandType::FLOAT32} otherwise.
+ * * 3: An {@link OperandType::BOOL} scalar, default to false.
+ * Set to true to specify NCHW data layout for input0 and output0.
+ *
+ * Outputs:
+ * * 0: The output 4-D tensor, of shape
+ * [batches, new_height, new_width, depth].
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ */
+ RESIZE_NEAREST_NEIGHBOR = @1.2::OperationType:RESIZE_NEAREST_NEIGHBOR,
+
+ /**
+ * DEPRECATED. Since NNAPI 1.2, extensions are the preferred alternative to
+ * OEM operation and data types.
+ *
+ * This operation is OEM specific. It should only be used for OEM
+ * applications.
+ */
+ OEM_OPERATION = @1.2::OperationType:OEM_OPERATION,
+ /* ADDING A NEW FUNDAMENTAL OPERATION REQUIRES UPDATING THE VALUE OF
+ * OperationTypeRange::FUNDAMENTAL_MAX.
+ */
+ /* ADDING A NEW OEM OPERATION REQUIRES UPDATING THE VALUE OF
+ * OperationTypeRange::OEM_MAX.
+ */
+};
+
+/**
+ * The range of values in the OperationType enum.
+ */
+enum OperationTypeRange : uint32_t {
+ BASE_MIN = 0,
+ FUNDAMENTAL_MIN = 0,
+ FUNDAMENTAL_MAX = 94,
+ OEM_MIN = 10000,
+ OEM_MAX = 10000,
+ BASE_MAX = 0xFFFF,
+};
+
/**
* The capabilities of a driver.
@@ -109,6 +4603,32 @@
};
/**
+ * Describes one operation of the model's graph.
+ */
+struct Operation {
+ /**
+ * The operation type.
+ *
+ * Besides the values listed in {@link OperationType}, any value above
+ * {@link OperationTypeRange::BASE_MAX} is possible and should be interpreted
+ * as an extension type according to {@link Model::extensionNameToPrefix}.
+ */
+ OperationType type;
+
+ /**
+ * Describes the table that contains the indexes of the inputs of the
+ * operation. The offset is the index in the operandIndexes table.
+ */
+ vec<uint32_t> inputs;
+
+ /**
+ * Describes the table that contains the indexes of the outputs of the
+ * operation. The offset is the index in the operandIndexes table.
+ */
+ vec<uint32_t> outputs;
+};
+
+/**
* Describes one operand of the model's graph.
*/
struct Operand {
@@ -233,28 +4753,6 @@
};
/**
- * Describes one operation of the model's graph.
- */
-struct Operation {
- /**
- * The operation type.
- */
- OperationType type;
-
- /**
- * Describes the table that contains the indexes of the inputs of the
- * operation. The offset is the index in the operandIndexes table.
- */
- vec<uint32_t> inputs;
-
- /**
- * Describes the table that contains the indexes of the outputs of the
- * operation. The offset is the index in the operandIndexes table.
- */
- vec<uint32_t> outputs;
-};
-
-/**
* A Neural Network Model.
*
* This includes not only the execution graph, but also constant data such as
diff --git a/neuralnetworks/1.3/types.t b/neuralnetworks/1.3/types.t
index d41cfd2..e06f5d6 100644
--- a/neuralnetworks/1.3/types.t
+++ b/neuralnetworks/1.3/types.t
@@ -44,6 +44,47 @@
BASE_MAX = 0xFFFF,
};
+/**
+ * Operation types.
+ *
+ * The type of an operation in a model.
+ */
+enum OperationType : int32_t {
+
+%insert Operation_1.0
+
+%insert Operation_1.1
+
+%insert Operation_1.2
+
+ /**
+ * DEPRECATED. Since NNAPI 1.2, extensions are the preferred alternative to
+ * OEM operation and data types.
+ *
+ * This operation is OEM specific. It should only be used for OEM
+ * applications.
+ */
+ OEM_OPERATION = @1.2::OperationType:OEM_OPERATION,
+ /* ADDING A NEW FUNDAMENTAL OPERATION REQUIRES UPDATING THE VALUE OF
+ * OperationTypeRange::FUNDAMENTAL_MAX.
+ */
+ /* ADDING A NEW OEM OPERATION REQUIRES UPDATING THE VALUE OF
+ * OperationTypeRange::OEM_MAX.
+ */
+};
+
+/**
+ * The range of values in the OperationType enum.
+ */
+enum OperationTypeRange : uint32_t {
+ BASE_MIN = 0,
+ FUNDAMENTAL_MIN = 0,
+%insert Operation_1.3_MAX
+ OEM_MIN = 10000,
+ OEM_MAX = 10000,
+ BASE_MAX = 0xFFFF,
+};
+
/**
* The capabilities of a driver.
@@ -80,6 +121,32 @@
};
/**
+ * Describes one operation of the model's graph.
+ */
+struct Operation {
+ /**
+ * The operation type.
+ *
+ * Besides the values listed in {@link OperationType}, any value above
+ * {@link OperationTypeRange::BASE_MAX} is possible and should be interpreted
+ * as an extension type according to {@link Model::extensionNameToPrefix}.
+ */
+ OperationType type;
+
+ /**
+ * Describes the table that contains the indexes of the inputs of the
+ * operation. The offset is the index in the operandIndexes table.
+ */
+ vec<uint32_t> inputs;
+
+ /**
+ * Describes the table that contains the indexes of the outputs of the
+ * operation. The offset is the index in the operandIndexes table.
+ */
+ vec<uint32_t> outputs;
+};
+
+/**
* Describes one operand of the model's graph.
*/
struct Operand {
@@ -204,28 +271,6 @@
};
/**
- * Describes one operation of the model's graph.
- */
-struct Operation {
- /**
- * The operation type.
- */
- OperationType type;
-
- /**
- * Describes the table that contains the indexes of the inputs of the
- * operation. The offset is the index in the operandIndexes table.
- */
- vec<uint32_t> inputs;
-
- /**
- * Describes the table that contains the indexes of the outputs of the
- * operation. The offset is the index in the operandIndexes table.
- */
- vec<uint32_t> outputs;
-};
-
-/**
* A Neural Network Model.
*
* This includes not only the execution graph, but also constant data such as
diff --git a/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp
index 325d641..a1e04c5 100644
--- a/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp
+++ b/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp
@@ -63,7 +63,6 @@
using V1_1::ExecutionPreference;
using V1_2::Constant;
using V1_2::MeasureTiming;
-using V1_2::OperationType;
using V1_2::OutputShape;
using V1_2::SymmPerChannelQuantParams;
using V1_2::Timing;
diff --git a/neuralnetworks/1.3/vts/functional/TestAssertions.cpp b/neuralnetworks/1.3/vts/functional/TestAssertions.cpp
index 7361078..a7569e6 100644
--- a/neuralnetworks/1.3/vts/functional/TestAssertions.cpp
+++ b/neuralnetworks/1.3/vts/functional/TestAssertions.cpp
@@ -25,8 +25,6 @@
#define CHECK_TEST_ENUM(EnumType, enumValue) \
static_assert(static_cast<EnumType>(Test##EnumType::enumValue) == EnumType::enumValue)
-using V1_2::OperationType;
-
CHECK_TEST_ENUM(OperandType, FLOAT32);
CHECK_TEST_ENUM(OperandType, INT32);
CHECK_TEST_ENUM(OperandType, UINT32);
diff --git a/neuralnetworks/1.3/vts/functional/ValidateModel.cpp b/neuralnetworks/1.3/vts/functional/ValidateModel.cpp
index 1ff02dc..46bbd3f 100644
--- a/neuralnetworks/1.3/vts/functional/ValidateModel.cpp
+++ b/neuralnetworks/1.3/vts/functional/ValidateModel.cpp
@@ -27,7 +27,6 @@
using V1_0::ErrorStatus;
using V1_0::OperandLifeTime;
using V1_1::ExecutionPreference;
-using V1_2::OperationType;
using V1_2::OperationTypeRange;
using V1_2::SymmPerChannelQuantParams;
using HidlToken =
diff --git a/radio/config/1.0/vts/functional/radio_config_hidl_hal_utils.h b/radio/config/1.0/vts/functional/radio_config_hidl_hal_utils.h
index 6bc1b65..2722afe 100644
--- a/radio/config/1.0/vts/functional/radio_config_hidl_hal_utils.h
+++ b/radio/config/1.0/vts/functional/radio_config_hidl_hal_utils.h
@@ -27,6 +27,7 @@
#include <gtest/gtest.h>
#include <hidl/GtestPrinter.h>
#include <hidl/ServiceManagement.h>
+#include <log/log.h>
#include "vts_test_util.h"
diff --git a/radio/config/1.1/vts/functional/radio_config_hidl_hal_utils.h b/radio/config/1.1/vts/functional/radio_config_hidl_hal_utils.h
index e9951dc..4cdeb06 100644
--- a/radio/config/1.1/vts/functional/radio_config_hidl_hal_utils.h
+++ b/radio/config/1.1/vts/functional/radio_config_hidl_hal_utils.h
@@ -26,6 +26,7 @@
#include <gtest/gtest.h>
#include <hidl/GtestPrinter.h>
#include <hidl/ServiceManagement.h>
+#include <log/log.h>
#include "vts_test_util.h"
diff --git a/radio/config/1.2/vts/functional/radio_config_hidl_hal_utils.h b/radio/config/1.2/vts/functional/radio_config_hidl_hal_utils.h
index e9cbcbd..ba3f02e 100644
--- a/radio/config/1.2/vts/functional/radio_config_hidl_hal_utils.h
+++ b/radio/config/1.2/vts/functional/radio_config_hidl_hal_utils.h
@@ -28,6 +28,7 @@
#include <gtest/gtest.h>
#include <hidl/GtestPrinter.h>
#include <hidl/ServiceManagement.h>
+#include <log/log.h>
#include "vts_test_util.h"
@@ -98,20 +99,6 @@
const ::android::hardware::hidl_vec<SimSlotStatus>& slotStatus);
};
-// Test environment for Radio HIDL HAL.
-class RadioConfigHidlEnvironment : public ::testing::VtsHalHidlTargetTestEnvBase {
- public:
- // get the test environment singleton
- static RadioConfigHidlEnvironment* Instance() {
- static RadioConfigHidlEnvironment* instance = new RadioConfigHidlEnvironment;
- return instance;
- }
- virtual void registerTestServices() override { registerTestService<IRadioConfig>(); }
-
- private:
- RadioConfigHidlEnvironment() {}
-};
-
// The main test class for Radio config HIDL.
class RadioConfigHidlTest : public ::testing::TestWithParam<std::string> {
protected:
diff --git a/tests/memory/2.0/Android.bp b/tests/memory/2.0/Android.bp
new file mode 100644
index 0000000..5166652
--- /dev/null
+++ b/tests/memory/2.0/Android.bp
@@ -0,0 +1,12 @@
+hidl_interface {
+ name: "android.hardware.tests.memory@2.0",
+ root: "android.hardware",
+ srcs: [
+ "IMemoryInterface.hal",
+ "types.hal",
+ ],
+ interfaces: [
+ "android.hidl.base@1.0",
+ ],
+ gen_java: true,
+}
diff --git a/tests/memory/2.0/IMemoryInterface.hal b/tests/memory/2.0/IMemoryInterface.hal
new file mode 100644
index 0000000..2c824bf
--- /dev/null
+++ b/tests/memory/2.0/IMemoryInterface.hal
@@ -0,0 +1,12 @@
+package android.hardware.tests.memory@2.0;
+
+interface IMemoryInterface {
+ // Flips all the bits in the given memory buffer.
+ bitwiseNot(memory mem);
+ // Returns a read-only buffer of size 8, containing the bytes 0..7.
+ getTestMem() generates(memory mem);
+ // Given two memory regions of the same size, returns two memory fields of
+ // equal size, the first contains the byte-wise sum and the other the byte-
+ // wise difference.
+ getSumDiff(TwoMemory in) generates(TwoMemory out);
+};
diff --git a/tests/memory/2.0/types.hal b/tests/memory/2.0/types.hal
new file mode 100644
index 0000000..9ec357b
--- /dev/null
+++ b/tests/memory/2.0/types.hal
@@ -0,0 +1,6 @@
+package android.hardware.tests.memory@2.0;
+
+struct TwoMemory {
+ memory mem1;
+ memory mem2;
+};
diff --git a/vibrator/1.4/Android.bp b/vibrator/1.4/Android.bp
deleted file mode 100644
index acfc795..0000000
--- a/vibrator/1.4/Android.bp
+++ /dev/null
@@ -1,19 +0,0 @@
-// This file is autogenerated by hidl-gen -Landroidbp.
-
-hidl_interface {
- name: "android.hardware.vibrator@1.4",
- root: "android.hardware",
- srcs: [
- "types.hal",
- "IVibrator.hal",
- "IVibratorCallback.hal",
- ],
- interfaces: [
- "android.hardware.vibrator@1.0",
- "android.hardware.vibrator@1.1",
- "android.hardware.vibrator@1.2",
- "android.hardware.vibrator@1.3",
- "android.hidl.base@1.0",
- ],
- gen_java: true,
-}
diff --git a/vibrator/1.4/IVibrator.hal b/vibrator/1.4/IVibrator.hal
deleted file mode 100644
index 913abe3..0000000
--- a/vibrator/1.4/IVibrator.hal
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Copyright (C) 2019 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.hardware.vibrator@1.4;
-
-import @1.0::EffectStrength;
-import @1.3::Effect;
-import @1.0::Status;
-import @1.3::IVibrator;
-import IVibratorCallback;
-
-interface IVibrator extends @1.3::IVibrator {
- /**
- * Determine capabilities of the vibrator HAL.
- */
- getCapabilities() generates (bitfield<Capabilities> capabilities);
-
- /**
- * Turn on vibrator
- *
- * This function must only be called after the previous timeout has expired or
- * was canceled (through off()).
- * @param timeoutMs number of milliseconds to vibrate.
- * @param callback A callback used to inform Frameworks of state change, if supported.
- * @return vibratorOnRet whether vibrator command was successful or not.
- */
- on_1_4(uint32_t timeoutMs, IVibratorCallback callback) generates (Status vibratorOnRet);
-
- /**
- * Fire off a predefined haptic event.
- *
- * @param effect The type of haptic event to trigger.
- * @param strength The intensity of haptic event to trigger.
- * @param callback A callback used to inform Frameworks of state change, if supported.
- * @return status Whether the effect was successfully performed or not. Must
- * return Status::UNSUPPORTED_OPERATION if the effect is not supported.
- * @return lengthMs The length of time the event is expected to take in
- * milliseconds. This doesn't need to be perfectly accurate, but should be a reasonable
- * approximation. Should be a positive, non-zero value if the returned status is Status::OK,
- * and set to 0 otherwise.
- */
- perform_1_4(Effect effect, EffectStrength strength, IVibratorCallback callback)
- generates (Status status, uint32_t lengthMs);
-};
diff --git a/vibrator/1.4/types.hal b/vibrator/1.4/types.hal
deleted file mode 100644
index acc49b1..0000000
--- a/vibrator/1.4/types.hal
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Copyright (C) 2019 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.hardware.vibrator@1.4;
-
-enum Capabilities : uint32_t {
- ON_COMPLETION_CALLBACK = 1 << 0,
- PERFORM_COMPLETION_CALLBACK = 1 << 1,
-};
diff --git a/vibrator/1.4/vts/functional/VtsHalVibratorV1_4TargetTest.cpp b/vibrator/1.4/vts/functional/VtsHalVibratorV1_4TargetTest.cpp
deleted file mode 100644
index 1b6abe9..0000000
--- a/vibrator/1.4/vts/functional/VtsHalVibratorV1_4TargetTest.cpp
+++ /dev/null
@@ -1,203 +0,0 @@
-/*
- * Copyright (C) 2019 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "vibrator_hidl_hal_test"
-
-#include <android-base/logging.h>
-#include <android/hardware/vibrator/1.0/types.h>
-#include <android/hardware/vibrator/1.4/IVibrator.h>
-#include <gtest/gtest.h>
-#include <hidl/GtestPrinter.h>
-#include <hidl/ServiceManagement.h>
-
-#include <getopt.h>
-#include <unistd.h>
-
-#include <future>
-
-using ::android::sp;
-using ::android::hardware::hidl_bitfield;
-using ::android::hardware::hidl_enum_range;
-using ::android::hardware::Return;
-using ::android::hardware::Void;
-using ::android::hardware::vibrator::V1_0::EffectStrength;
-using ::android::hardware::vibrator::V1_0::Status;
-using ::android::hardware::vibrator::V1_3::Effect;
-using ::android::hardware::vibrator::V1_4::Capabilities;
-using ::android::hardware::vibrator::V1_4::IVibrator;
-using ::android::hardware::vibrator::V1_4::IVibratorCallback;
-
-static uint32_t sCompletionLimitMs = UINT32_MAX;
-
-#define EXPECT_OK(ret) ASSERT_TRUE((ret).isOk())
-
-class CompletionCallback : public IVibratorCallback {
- public:
- CompletionCallback(std::function<void()> callback) : mCallback(callback) {}
- Return<void> onComplete() override {
- mCallback();
- return Void();
- }
-
- private:
- std::function<void()> mCallback;
-};
-
-class VibratorHidlTest_1_4 : public testing::TestWithParam<std::string> {
- public:
- virtual void SetUp() override {
- vibrator = IVibrator::getService(GetParam());
- ASSERT_NE(vibrator, nullptr);
- capabilities = vibrator->getCapabilities();
- }
-
- virtual void TearDown() override {}
-
- sp<IVibrator> vibrator;
- hidl_bitfield<Capabilities> capabilities;
-};
-
-TEST_P(VibratorHidlTest_1_4, OnWithCallback) {
- if (capabilities & Capabilities::ON_COMPLETION_CALLBACK) {
- std::promise<void> completionPromise;
- std::future<void> completionFuture{completionPromise.get_future()};
- sp<CompletionCallback> callback =
- new CompletionCallback([&completionPromise] { completionPromise.set_value(); });
- uint32_t duration = 250;
- std::chrono::milliseconds timeout{duration * 2};
- EXPECT_EQ(Status::OK, vibrator->on_1_4(duration, callback));
- EXPECT_EQ(completionFuture.wait_for(timeout), std::future_status::ready);
- vibrator->off();
- }
-}
-
-static void validatePerformEffectUnsupportedOperation(Status status, uint32_t lengthMs) {
- ASSERT_EQ(Status::UNSUPPORTED_OPERATION, status);
- ASSERT_EQ(static_cast<uint32_t>(0), lengthMs)
- << "Effects that return UNSUPPORTED_OPERATION must have a duration of zero";
-}
-
-static void validatePerformEffect(Status status, uint32_t lengthMs) {
- ASSERT_TRUE(status == Status::OK || status == Status::UNSUPPORTED_OPERATION);
- if (status == Status::OK) {
- ASSERT_LT(static_cast<uint32_t>(0), lengthMs)
- << "Effects that return OK must return a positive duration";
- } else {
- validatePerformEffectUnsupportedOperation(status, lengthMs);
- }
-}
-
-/*
- * Test to make sure effects within the valid range return are either supported and return OK with
- * a valid duration, or are unsupported and return UNSUPPORTED_OPERATION with a duration of 0.
- */
-TEST_P(VibratorHidlTest_1_4, PerformEffect_1_4) {
- Status performStatus;
- uint32_t performLength;
- auto validateWrapper = [&](Status status, uint32_t lengthMs) {
- performStatus = status;
- performLength = lengthMs;
- validatePerformEffect(status, lengthMs);
- };
- for (const auto& effect : hidl_enum_range<Effect>()) {
- for (const auto& strength : hidl_enum_range<EffectStrength>()) {
- std::promise<void> completionPromise;
- std::future<void> completionFuture{completionPromise.get_future()};
- sp<CompletionCallback> callback =
- new CompletionCallback([&completionPromise] { completionPromise.set_value(); });
- EXPECT_OK(vibrator->perform_1_4(effect, strength, callback, validateWrapper));
- if (performStatus == Status::OK && performLength < sCompletionLimitMs &&
- (capabilities & Capabilities::PERFORM_COMPLETION_CALLBACK)) {
- std::chrono::milliseconds timeout{performLength * 2};
- EXPECT_EQ(completionFuture.wait_for(timeout), std::future_status::ready);
- }
- }
- }
-}
-
-/*
- * Test to make sure effect values above the valid range are rejected.
- */
-TEST_P(VibratorHidlTest_1_4, PerformEffect_1_4_BadEffects_AboveValidRange) {
- Effect effect = *std::prev(hidl_enum_range<Effect>().end());
- Effect badEffect = static_cast<Effect>(static_cast<int32_t>(effect) + 1);
- EXPECT_OK(vibrator->perform_1_4(badEffect, EffectStrength::LIGHT, nullptr,
- validatePerformEffectUnsupportedOperation));
-}
-
-/*
- * Test to make sure effect values below the valid range are rejected.
- */
-TEST_P(VibratorHidlTest_1_4, PerformEffect_1_4_BadEffects_BelowValidRange) {
- Effect effect = *hidl_enum_range<Effect>().begin();
- Effect badEffect = static_cast<Effect>(static_cast<int32_t>(effect) - 1);
- EXPECT_OK(vibrator->perform_1_4(badEffect, EffectStrength::LIGHT, nullptr,
- validatePerformEffectUnsupportedOperation));
-}
-
-/*
- * Test to make sure strength values above the valid range are rejected.
- */
-TEST_P(VibratorHidlTest_1_4, PerformEffect_1_4_BadStrength_AboveValidRange) {
- EffectStrength strength = *std::prev(hidl_enum_range<EffectStrength>().end());
- EffectStrength badStrength = static_cast<EffectStrength>(static_cast<int32_t>(strength) + 1);
- EXPECT_OK(vibrator->perform_1_4(Effect::THUD, badStrength, nullptr,
- validatePerformEffectUnsupportedOperation));
-}
-
-/*
- * Test to make sure strength values below the valid range are rejected.
- */
-TEST_P(VibratorHidlTest_1_4, PerformEffect_1_4_BadStrength_BelowValidRange) {
- EffectStrength strength = *hidl_enum_range<EffectStrength>().begin();
- EffectStrength badStrength = static_cast<EffectStrength>(static_cast<int32_t>(strength) - 1);
- EXPECT_OK(vibrator->perform_1_4(Effect::THUD, badStrength, nullptr,
- validatePerformEffectUnsupportedOperation));
-}
-
-INSTANTIATE_TEST_SUITE_P(
- PerInstance, VibratorHidlTest_1_4,
- testing::ValuesIn(android::hardware::getAllHalInstanceNames(IVibrator::descriptor)),
- android::hardware::PrintInstanceNameToString);
-
-enum {
- OPTION_COMPLETION_LIMIT_MS,
-};
-
-int main(int argc, char** argv) {
- struct option options[] = {
- {"completion-limit-ms", required_argument, 0, OPTION_COMPLETION_LIMIT_MS}, {}};
-
- printf("Running main() from %s\n", __FILE__);
- testing::InitGoogleTest(&argc, argv);
-
- while (true) {
- int opt = getopt_long(argc, argv, "", options, nullptr);
- if (opt == -1) {
- break;
- }
- switch (opt) {
- case OPTION_COMPLETION_LIMIT_MS:
- std::istringstream(optarg) >> sCompletionLimitMs;
- break;
- default:
- printf("Unrecognized option\n");
- return -EINVAL;
- }
- }
-
- return RUN_ALL_TESTS();
-}