Merge "vibrator: Add Composition APIs"
diff --git a/audio/6.0/IDevice.hal b/audio/6.0/IDevice.hal
index e885fe2..122c550 100644
--- a/audio/6.0/IDevice.hal
+++ b/audio/6.0/IDevice.hal
@@ -280,4 +280,19 @@
      */
     setConnectedState(DeviceAddress address, bool connected)
             generates (Result retval);
+
+    /**
+     * Called by the framework to deinitialize the device and free up
+     * all currently allocated resources. It is recommended to close
+     * the device on the client side as soon as it is becomes unused.
+     *
+     * Note that all streams must be closed by the client before
+     * attempting to close the device they belong to.
+     *
+     * @return retval OK in case the success.
+     *                INVALID_STATE if the device was already closed
+     *                or there are streams currently opened.
+     */
+    @exit
+    close() generates (Result retval);
 };
diff --git a/audio/6.0/IStream.hal b/audio/6.0/IStream.hal
index 451e116..d7d3c84 100644
--- a/audio/6.0/IStream.hal
+++ b/audio/6.0/IStream.hal
@@ -277,7 +277,7 @@
      * @return retval OK in case the success.
      *                NOT_SUPPORTED on non mmap mode streams
      *                NOT_INITIALIZED in case of memory allocation error
-     *                INVALID_ARGUMENTS if the requested buffer size is too large
+     *                INVALID_ARGUMENTS if the requested buffer size is invalid
      *                INVALID_STATE if called out of sequence
      * @return info    a MmapBufferInfo struct containing information on the MMMAP buffer created.
      */
@@ -300,13 +300,17 @@
 
     /**
      * Called by the framework to deinitialize the stream and free up
-     * all the currently allocated resources. It is recommended to close
+     * all currently allocated resources. It is recommended to close
      * the stream on the client side as soon as it is becomes unused.
      *
+     * The client must ensure that this function is not called while
+     * audio data is being transferred through the stream's message queues.
+     *
      * @return retval OK in case the success.
      *                NOT_SUPPORTED if called on IStream instead of input or
      *                              output stream interface.
      *                INVALID_STATE if the stream was already closed.
      */
+    @exit
     close() generates (Result retval);
 };
diff --git a/audio/common/6.0/Android.bp b/audio/common/6.0/Android.bp
index 1a4e054..94f1cf8 100644
--- a/audio/common/6.0/Android.bp
+++ b/audio/common/6.0/Android.bp
@@ -12,6 +12,6 @@
     interfaces: [
         "android.hidl.safe_union@1.0",
     ],
-    gen_java: false,
+    gen_java: true,
     gen_java_constants: true,
 }
diff --git a/audio/core/all-versions/default/Android.bp b/audio/core/all-versions/default/Android.bp
index 007ad85..6f18d1d 100644
--- a/audio/core/all-versions/default/Android.bp
+++ b/audio/core/all-versions/default/Android.bp
@@ -19,6 +19,7 @@
     export_include_dirs: ["include"],
 
     shared_libs: [
+        "libaudiofoundation",
         "libbase",
         "libcutils",
         "libfmq",
diff --git a/audio/core/all-versions/default/Device.cpp b/audio/core/all-versions/default/Device.cpp
index 1a9df21..21dab00 100644
--- a/audio/core/all-versions/default/Device.cpp
+++ b/audio/core/all-versions/default/Device.cpp
@@ -39,11 +39,10 @@
 
 using ::android::hardware::audio::common::CPP_VERSION::implementation::HidlUtils;
 
-Device::Device(audio_hw_device_t* device) : mDevice(device) {}
+Device::Device(audio_hw_device_t* device) : mIsClosed(false), mDevice(device) {}
 
 Device::~Device() {
-    int status = audio_hw_device_close(mDevice);
-    ALOGW_IF(status, "Error closing audio hw device %p: %s", mDevice, strerror(-status));
+    (void)doClose();
     mDevice = nullptr;
 }
 
@@ -54,10 +53,14 @@
 
 void Device::closeInputStream(audio_stream_in_t* stream) {
     mDevice->close_input_stream(mDevice, stream);
+    LOG_ALWAYS_FATAL_IF(mOpenedStreamsCount == 0, "mOpenedStreamsCount is already 0");
+    --mOpenedStreamsCount;
 }
 
 void Device::closeOutputStream(audio_stream_out_t* stream) {
     mDevice->close_output_stream(mDevice, stream);
+    LOG_ALWAYS_FATAL_IF(mOpenedStreamsCount == 0, "mOpenedStreamsCount is already 0");
+    --mOpenedStreamsCount;
 }
 
 char* Device::halGetParameters(const char* keys) {
@@ -159,6 +162,7 @@
     sp<IStreamOut> streamOut;
     if (status == OK) {
         streamOut = new StreamOut(this, halStream);
+        ++mOpenedStreamsCount;
     }
     HidlUtils::audioConfigFromHal(halConfig, suggestedConfig);
     return {analyzeStatus("open_output_stream", status, {EINVAL} /*ignore*/), streamOut};
@@ -185,6 +189,7 @@
     sp<IStreamIn> streamIn;
     if (status == OK) {
         streamIn = new StreamIn(this, halStream);
+        ++mOpenedStreamsCount;
     }
     HidlUtils::audioConfigFromHal(halConfig, suggestedConfig);
     return {analyzeStatus("open_input_stream", status, {EINVAL} /*ignore*/), streamIn};
@@ -383,6 +388,18 @@
 }
 #endif
 
+Result Device::doClose() {
+    if (mIsClosed || mOpenedStreamsCount != 0) return Result::INVALID_STATE;
+    mIsClosed = true;
+    return analyzeStatus("close", audio_hw_device_close(mDevice));
+}
+
+#if MAJOR_VERSION >= 6
+Return<Result> Device::close() {
+    return doClose();
+}
+#endif
+
 }  // namespace implementation
 }  // namespace CPP_VERSION
 }  // namespace audio
diff --git a/audio/core/all-versions/default/PrimaryDevice.cpp b/audio/core/all-versions/default/PrimaryDevice.cpp
index 99590b0..3cf0932 100644
--- a/audio/core/all-versions/default/PrimaryDevice.cpp
+++ b/audio/core/all-versions/default/PrimaryDevice.cpp
@@ -31,7 +31,11 @@
 
 PrimaryDevice::PrimaryDevice(audio_hw_device_t* device) : mDevice(new Device(device)) {}
 
-PrimaryDevice::~PrimaryDevice() {}
+PrimaryDevice::~PrimaryDevice() {
+    // Do not call mDevice->close here. If there are any unclosed streams,
+    // they only hold IDevice instance, not IPrimaryDevice, thus IPrimaryDevice
+    // "part" of a device can be destroyed before the streams.
+}
 
 // Methods from ::android::hardware::audio::CPP_VERSION::IDevice follow.
 Return<Result> PrimaryDevice::initCheck() {
@@ -160,6 +164,11 @@
     return mDevice->setConnectedState(address, connected);
 }
 #endif
+#if MAJOR_VERSION >= 6
+Return<Result> PrimaryDevice::close() {
+    return mDevice->close();
+}
+#endif
 
 // Methods from ::android::hardware::audio::CPP_VERSION::IPrimaryDevice follow.
 Return<Result> PrimaryDevice::setVoiceVolume(float volume) {
diff --git a/audio/core/all-versions/default/Stream.cpp b/audio/core/all-versions/default/Stream.cpp
index 5f24a5d..74e5945 100644
--- a/audio/core/all-versions/default/Stream.cpp
+++ b/audio/core/all-versions/default/Stream.cpp
@@ -26,9 +26,8 @@
 #include <android/log.h>
 #include <hardware/audio.h>
 #include <hardware/audio_effect.h>
+#include <media/AudioContainers.h>
 #include <media/TypeConverter.h>
-#include <utils/SortedVector.h>
-#include <utils/Vector.h>
 
 namespace android {
 namespace hardware {
@@ -100,11 +99,11 @@
     Result result =
         getParam(AudioParameter::keyStreamSupportedSamplingRates, &halListValue, context);
     hidl_vec<uint32_t> sampleRates;
-    SortedVector<uint32_t> halSampleRates;
+    SampleRateSet halSampleRates;
     if (result == Result::OK) {
         halSampleRates =
             samplingRatesFromString(halListValue.string(), AudioParameter::valueListSeparator);
-        sampleRates.setToExternal(halSampleRates.editArray(), halSampleRates.size());
+        sampleRates = hidl_vec<uint32_t>(halSampleRates.begin(), halSampleRates.end());
         // Legacy get_parameter does not return a status_t, thus can not advertise of failure.
         // Note that this method must succeed (non empty list) if the format is supported.
         if (sampleRates.size() == 0) {
@@ -126,13 +125,14 @@
     String8 halListValue;
     Result result = getParam(AudioParameter::keyStreamSupportedChannels, &halListValue, context);
     hidl_vec<AudioChannelBitfield> channelMasks;
-    SortedVector<audio_channel_mask_t> halChannelMasks;
+    ChannelMaskSet halChannelMasks;
     if (result == Result::OK) {
         halChannelMasks =
             channelMasksFromString(halListValue.string(), AudioParameter::valueListSeparator);
         channelMasks.resize(halChannelMasks.size());
-        for (size_t i = 0; i < halChannelMasks.size(); ++i) {
-            channelMasks[i] = AudioChannelBitfield(halChannelMasks[i]);
+        size_t i = 0;
+        for (auto channelMask : halChannelMasks) {
+            channelMasks[i++] = AudioChannelBitfield(channelMask);
         }
         // Legacy get_parameter does not return a status_t, thus can not advertise of failure.
         // Note that this method must succeed (non empty list) if the format is supported.
@@ -168,7 +168,7 @@
     String8 halListValue;
     Result result = getParam(AudioParameter::keyStreamSupportedFormats, &halListValue);
     hidl_vec<AudioFormat> formats;
-    Vector<audio_format_t> halFormats;
+    FormatVector halFormats;
     if (result == Result::OK) {
         halFormats = formatsFromString(halListValue.string(), AudioParameter::valueListSeparator);
         formats.resize(halFormats.size());
diff --git a/audio/core/all-versions/default/StreamIn.cpp b/audio/core/all-versions/default/StreamIn.cpp
index d316f83..f1152ca 100644
--- a/audio/core/all-versions/default/StreamIn.cpp
+++ b/audio/core/all-versions/default/StreamIn.cpp
@@ -139,8 +139,7 @@
 }  // namespace
 
 StreamIn::StreamIn(const sp<Device>& device, audio_stream_in_t* stream)
-    : mIsClosed(false),
-      mDevice(device),
+    : mDevice(device),
       mStream(stream),
       mStreamCommon(new Stream(&stream->common)),
       mStreamMmap(new StreamMmap<audio_stream_in_t>(stream)),
@@ -159,7 +158,9 @@
         status_t status = EventFlag::deleteEventFlag(&mEfGroup);
         ALOGE_IF(status, "read MQ event flag deletion error: %s", strerror(-status));
     }
+#if MAJOR_VERSION <= 5
     mDevice->closeInputStream(mStream);
+#endif
     mStream = nullptr;
 }
 
@@ -303,14 +304,16 @@
 }
 
 Return<Result> StreamIn::close() {
-    if (mIsClosed) return Result::INVALID_STATE;
-    mIsClosed = true;
-    if (mReadThread.get()) {
-        mStopReadThread.store(true, std::memory_order_release);
+    if (mStopReadThread.load(std::memory_order_relaxed)) {  // only this thread writes
+        return Result::INVALID_STATE;
     }
+    mStopReadThread.store(true, std::memory_order_release);
     if (mEfGroup) {
         mEfGroup->wake(static_cast<uint32_t>(MessageQueueFlagBits::NOT_FULL));
     }
+#if MAJOR_VERSION >= 6
+    mDevice->closeInputStream(mStream);
+#endif
     return Result::OK;
 }
 
diff --git a/audio/core/all-versions/default/StreamOut.cpp b/audio/core/all-versions/default/StreamOut.cpp
index 82cc408..396d354 100644
--- a/audio/core/all-versions/default/StreamOut.cpp
+++ b/audio/core/all-versions/default/StreamOut.cpp
@@ -138,8 +138,7 @@
 }  // namespace
 
 StreamOut::StreamOut(const sp<Device>& device, audio_stream_out_t* stream)
-    : mIsClosed(false),
-      mDevice(device),
+    : mDevice(device),
       mStream(stream),
       mStreamCommon(new Stream(&stream->common)),
       mStreamMmap(new StreamMmap<audio_stream_out_t>(stream)),
@@ -148,7 +147,7 @@
 
 StreamOut::~StreamOut() {
     ATRACE_CALL();
-    close();
+    (void)close();
     if (mWriteThread.get()) {
         ATRACE_NAME("mWriteThread->join");
         status_t status = mWriteThread->join();
@@ -159,10 +158,12 @@
         ALOGE_IF(status, "write MQ event flag deletion error: %s", strerror(-status));
     }
     mCallback.clear();
+#if MAJOR_VERSION <= 5
     mDevice->closeOutputStream(mStream);
     // Closing the output stream in the HAL waits for the callback to finish,
     // and joins the callback thread. Thus is it guaranteed that the callback
     // thread will not be accessing our object anymore.
+#endif
     mStream = nullptr;
 }
 
@@ -291,14 +292,16 @@
 #endif
 
 Return<Result> StreamOut::close() {
-    if (mIsClosed) return Result::INVALID_STATE;
-    mIsClosed = true;
-    if (mWriteThread.get()) {
-        mStopWriteThread.store(true, std::memory_order_release);
+    if (mStopWriteThread.load(std::memory_order_relaxed)) {  // only this thread writes
+        return Result::INVALID_STATE;
     }
+    mStopWriteThread.store(true, std::memory_order_release);
     if (mEfGroup) {
         mEfGroup->wake(static_cast<uint32_t>(MessageQueueFlagBits::NOT_EMPTY));
     }
+#if MAJOR_VERSION >= 6
+    mDevice->closeOutputStream(mStream);
+#endif
     return Result::OK;
 }
 
diff --git a/audio/core/all-versions/default/include/core/default/Device.h b/audio/core/all-versions/default/include/core/default/Device.h
index e64f00f..11ab607 100644
--- a/audio/core/all-versions/default/include/core/default/Device.h
+++ b/audio/core/all-versions/default/include/core/default/Device.h
@@ -114,6 +114,9 @@
     Return<void> getMicrophones(getMicrophones_cb _hidl_cb) override;
     Return<Result> setConnectedState(const DeviceAddress& address, bool connected) override;
 #endif
+#if MAJOR_VERSION >= 6
+    Return<Result> close() override;
+#endif
 
     Return<void> debug(const hidl_handle& fd, const hidl_vec<hidl_string>& options) override;
 
@@ -124,11 +127,15 @@
     void closeOutputStream(audio_stream_out_t* stream);
     audio_hw_device_t* device() const { return mDevice; }
 
-   private:
+  private:
+    bool mIsClosed;
     audio_hw_device_t* mDevice;
+    int mOpenedStreamsCount = 0;
 
     virtual ~Device();
 
+    Result doClose();
+
     // Methods from ParametersUtil.
     char* halGetParameters(const char* keys) override;
     int halSetParameters(const char* keysAndValues) override;
diff --git a/audio/core/all-versions/default/include/core/default/PrimaryDevice.h b/audio/core/all-versions/default/include/core/default/PrimaryDevice.h
index 9d69cb0..f5f3848 100644
--- a/audio/core/all-versions/default/include/core/default/PrimaryDevice.h
+++ b/audio/core/all-versions/default/include/core/default/PrimaryDevice.h
@@ -96,6 +96,9 @@
     Return<void> getMicrophones(getMicrophones_cb _hidl_cb) override;
     Return<Result> setConnectedState(const DeviceAddress& address, bool connected) override;
 #endif
+#if MAJOR_VERSION >= 6
+    Return<Result> close() override;
+#endif
 
     Return<void> debug(const hidl_handle& fd, const hidl_vec<hidl_string>& options) override;
 
diff --git a/audio/core/all-versions/default/include/core/default/StreamIn.h b/audio/core/all-versions/default/include/core/default/StreamIn.h
index 6209b8f..24f9944 100644
--- a/audio/core/all-versions/default/include/core/default/StreamIn.h
+++ b/audio/core/all-versions/default/include/core/default/StreamIn.h
@@ -120,7 +120,6 @@
                                          uint64_t* time);
 
    private:
-    bool mIsClosed;
     const sp<Device> mDevice;
     audio_stream_in_t* mStream;
     const sp<Stream> mStreamCommon;
diff --git a/audio/core/all-versions/default/include/core/default/StreamOut.h b/audio/core/all-versions/default/include/core/default/StreamOut.h
index b098005..6334785 100644
--- a/audio/core/all-versions/default/include/core/default/StreamOut.h
+++ b/audio/core/all-versions/default/include/core/default/StreamOut.h
@@ -126,7 +126,6 @@
                                               TimeSpec* timeStamp);
 
    private:
-    bool mIsClosed;
     const sp<Device> mDevice;
     audio_stream_out_t* mStream;
     const sp<Stream> mStreamCommon;
diff --git a/audio/core/all-versions/vts/functional/4.0/AudioPrimaryHidlHalTest.cpp b/audio/core/all-versions/vts/functional/4.0/AudioPrimaryHidlHalTest.cpp
index e267a5e..709b7cd 100644
--- a/audio/core/all-versions/vts/functional/4.0/AudioPrimaryHidlHalTest.cpp
+++ b/audio/core/all-versions/vts/functional/4.0/AudioPrimaryHidlHalTest.cpp
@@ -22,18 +22,16 @@
         GTEST_SKIP() << "No primary device on this factory";  // returns
     }
 
-    struct WaitExecutor {
-        ~WaitExecutor() { DeviceManager::waitForInstanceDestruction(); }
-    } waitExecutor;  // Make sure we wait for the device destruction on exiting from the test.
-    Result result;
-    sp<IDevice> baseDevice;
-    ASSERT_OK(getDevicesFactory()->openDevice("primary", returnIn(result, baseDevice)));
-    ASSERT_OK(result);
-    ASSERT_TRUE(baseDevice != nullptr);
-
-    Return<sp<IPrimaryDevice>> primaryDevice = IPrimaryDevice::castFrom(baseDevice);
-    ASSERT_TRUE(primaryDevice.isOk());
-    ASSERT_TRUE(sp<IPrimaryDevice>(primaryDevice) != nullptr);
+    {  // Scope for device SPs
+        sp<IDevice> baseDevice =
+                DeviceManager::getInstance().get(getFactoryName(), DeviceManager::kPrimaryDevice);
+        ASSERT_TRUE(baseDevice != nullptr);
+        Return<sp<IPrimaryDevice>> primaryDevice = IPrimaryDevice::castFrom(baseDevice);
+        EXPECT_TRUE(primaryDevice.isOk());
+        EXPECT_TRUE(sp<IPrimaryDevice>(primaryDevice) != nullptr);
+    }
+    EXPECT_TRUE(
+            DeviceManager::getInstance().reset(getFactoryName(), DeviceManager::kPrimaryDevice));
 }
 
 //////////////////////////////////////////////////////////////////////////////
@@ -54,7 +52,6 @@
         doc::test(
             "Make sure getMicrophones always succeeds"
             "and getActiveMicrophones always succeeds when recording from these microphones.");
-        AudioIoHandle ioHandle = (AudioIoHandle)AudioHandleConsts::AUDIO_IO_HANDLE_NONE;
         AudioConfig config{};
         config.channelMask = mkEnumBitfield(AudioChannelMask::IN_MONO);
         config.sampleRateHz = 8000;
@@ -67,18 +64,14 @@
                 continue;
             }
             sp<IStreamIn> stream;
+            StreamHelper<IStreamIn> helper(stream);
             AudioConfig suggestedConfig{};
-            ASSERT_OK(getDevice()->openInputStream(ioHandle, microphone.deviceAddress, config,
-                                                   flags, initMetadata,
-                                                   returnIn(res, stream, suggestedConfig)));
-            if (res != Result::OK) {
-                ASSERT_TRUE(stream == nullptr);
-                AudioConfig suggestedConfigRetry{};
-                ASSERT_OK(getDevice()->openInputStream(
-                        ioHandle, microphone.deviceAddress, suggestedConfig, flags, initMetadata,
-                        returnIn(res, stream, suggestedConfigRetry)));
-            }
-            ASSERT_OK(res);
+            ASSERT_NO_FATAL_FAILURE(helper.open(
+                    [&](AudioIoHandle handle, AudioConfig config, auto cb) {
+                        return getDevice()->openInputStream(handle, microphone.deviceAddress,
+                                                            config, flags, initMetadata, cb);
+                    },
+                    config, &res, &suggestedConfig));
             hidl_vec<MicrophoneInfo> activeMicrophones;
             Result readRes;
             typedef MessageQueue<IStreamIn::ReadParameters, kSynchronizedReadWrite> CommandMQ;
@@ -112,11 +105,8 @@
                 ASSERT_OK(res);
                 ASSERT_NE(0U, activeMicrophones.size());
             }
-            stream->close();
-            // Workaround for b/139329877. Ensures the stream gets closed on the audio hal side.
-            stream.clear();
-            IPCThreadState::self()->flushCommands();
-            usleep(1000);
+            helper.close(true /*clear*/, &res);
+            ASSERT_OK(res);
             if (efGroup) {
                 EventFlag::deleteEventFlag(&efGroup);
             }
diff --git a/audio/core/all-versions/vts/functional/6.0/AudioPrimaryHidlHalTest.cpp b/audio/core/all-versions/vts/functional/6.0/AudioPrimaryHidlHalTest.cpp
index 30f8a7a..22e60be 100644
--- a/audio/core/all-versions/vts/functional/6.0/AudioPrimaryHidlHalTest.cpp
+++ b/audio/core/all-versions/vts/functional/6.0/AudioPrimaryHidlHalTest.cpp
@@ -100,7 +100,8 @@
                             special = true;
                         }
                         if ((flags & AUDIO_OUTPUT_FLAG_DIRECT) &&
-                            !(flags & AUDIO_OUTPUT_FLAG_HW_AV_SYNC)) {
+                            !(flags &
+                              (AUDIO_OUTPUT_FLAG_HW_AV_SYNC | AUDIO_OUTPUT_FLAG_MMAP_NOIRQ))) {
                             result.emplace_back(device, config,
                                                 AudioOutputFlag(AUDIO_OUTPUT_FLAG_DIRECT));
                             special = true;
@@ -144,3 +145,49 @@
     }();
     return parameters;
 }
+
+TEST_P(AudioHidlDeviceTest, CloseDeviceWithOpenedOutputStreams) {
+    doc::test("Verify that a device can't be closed if there are streams opened");
+    DeviceAddress address{.device = AudioDevice::OUT_DEFAULT};
+    AudioConfig config{};
+    auto flags = hidl_bitfield<AudioOutputFlag>(AudioOutputFlag::NONE);
+    SourceMetadata initMetadata = {{{AudioUsage::MEDIA, AudioContentType::MUSIC, 1 /* gain */}}};
+    sp<IStreamOut> stream;
+    StreamHelper<IStreamOut> helper(stream);
+    AudioConfig suggestedConfig{};
+    ASSERT_NO_FATAL_FAILURE(helper.open(
+            [&](AudioIoHandle handle, AudioConfig config, auto cb) {
+                return getDevice()->openOutputStream(handle, address, config, flags, initMetadata,
+                                                     cb);
+            },
+            config, &res, &suggestedConfig));
+    ASSERT_RESULT(Result::INVALID_STATE, getDevice()->close());
+    ASSERT_NO_FATAL_FAILURE(helper.close(true /*clear*/, &res));
+    ASSERT_OK(getDevice()->close());
+    ASSERT_TRUE(resetDevice());
+}
+
+TEST_P(AudioHidlDeviceTest, CloseDeviceWithOpenedInputStreams) {
+    doc::test("Verify that a device can't be closed if there are streams opened");
+    auto module = getCachedPolicyConfig().getModuleFromName(getDeviceName());
+    if (module->getInputProfiles().empty()) {
+        GTEST_SKIP() << "Device doesn't have input profiles";
+    }
+    DeviceAddress address{.device = AudioDevice::IN_DEFAULT};
+    AudioConfig config{};
+    auto flags = hidl_bitfield<AudioInputFlag>(AudioInputFlag::NONE);
+    SinkMetadata initMetadata = {{{.source = AudioSource::MIC, .gain = 1}}};
+    sp<IStreamIn> stream;
+    StreamHelper<IStreamIn> helper(stream);
+    AudioConfig suggestedConfig{};
+    ASSERT_NO_FATAL_FAILURE(helper.open(
+            [&](AudioIoHandle handle, AudioConfig config, auto cb) {
+                return getDevice()->openInputStream(handle, address, config, flags, initMetadata,
+                                                    cb);
+            },
+            config, &res, &suggestedConfig));
+    ASSERT_RESULT(Result::INVALID_STATE, getDevice()->close());
+    ASSERT_NO_FATAL_FAILURE(helper.close(true /*clear*/, &res));
+    ASSERT_OK(getDevice()->close());
+    ASSERT_TRUE(resetDevice());
+}
diff --git a/audio/core/all-versions/vts/functional/Android.bp b/audio/core/all-versions/vts/functional/Android.bp
index 3286ecb..73af7f4 100644
--- a/audio/core/all-versions/vts/functional/Android.bp
+++ b/audio/core/all-versions/vts/functional/Android.bp
@@ -24,6 +24,7 @@
         "libxml2",
     ],
     shared_libs: [
+        "libaudiofoundation",
         "libfmq",
     ],
     header_libs: [
diff --git a/audio/core/all-versions/vts/functional/AudioPrimaryHidlHalTest.h b/audio/core/all-versions/vts/functional/AudioPrimaryHidlHalTest.h
index e76e24e..d0d39e8 100644
--- a/audio/core/all-versions/vts/functional/AudioPrimaryHidlHalTest.h
+++ b/audio/core/all-versions/vts/functional/AudioPrimaryHidlHalTest.h
@@ -718,12 +718,6 @@
                 ::testing::Values(AudioInputFlag::NONE)),
         &DeviceConfigParameterToString);
 INSTANTIATE_TEST_CASE_P(
-        SupportedInputBufferSize, RequiredInputBufferSizeTest,
-        ::testing::Combine(::testing::ValuesIn(getDeviceParameters()),
-                           ::testing::ValuesIn(ConfigHelper::getSupportedCaptureAudioConfig()),
-                           ::testing::Values(AudioInputFlag::NONE)),
-        &DeviceConfigParameterToString);
-INSTANTIATE_TEST_CASE_P(
         RecommendedCaptureAudioConfigSupport, OptionalInputBufferSizeTest,
         ::testing::Combine(
                 ::testing::ValuesIn(getDeviceParametersForPrimaryDeviceTests()),
@@ -815,60 +809,84 @@
 ////////////////////////// open{Output,Input}Stream //////////////////////////
 //////////////////////////////////////////////////////////////////////////////
 
+// This class is also used by some device tests.
 template <class Stream>
-class OpenStreamTest : public AudioHidlTestWithDeviceConfigParameter {
-  protected:
+class StreamHelper {
+  public:
+    // StreamHelper doesn't own the stream, this is for simpler stream lifetime management.
+    explicit StreamHelper(sp<Stream>& stream) : mStream(stream) {}
     template <class Open>
-    void testOpen(Open openStream, const AudioConfig& config) {
+    void open(Open openStream, const AudioConfig& config, Result* res,
+              AudioConfig* suggestedConfigPtr) {
         // FIXME: Open a stream without an IOHandle
         //        This is not required to be accepted by hal implementations
         AudioIoHandle ioHandle = (AudioIoHandle)AudioHandleConsts::AUDIO_IO_HANDLE_NONE;
         AudioConfig suggestedConfig{};
-        ASSERT_OK(openStream(ioHandle, config, returnIn(res, stream, suggestedConfig)));
-
-        // TODO: only allow failure for RecommendedPlaybackAudioConfig
-        switch (res) {
+        bool retryWithSuggestedConfig = true;
+        if (suggestedConfigPtr == nullptr) {
+            suggestedConfigPtr = &suggestedConfig;
+            retryWithSuggestedConfig = false;
+        }
+        ASSERT_OK(openStream(ioHandle, config, returnIn(*res, mStream, *suggestedConfigPtr)));
+        switch (*res) {
             case Result::OK:
-                ASSERT_TRUE(stream != nullptr);
-                audioConfig = config;
+                ASSERT_TRUE(mStream != nullptr);
+                *suggestedConfigPtr = config;
                 break;
             case Result::INVALID_ARGUMENTS:
-                ASSERT_TRUE(stream == nullptr);
-                AudioConfig suggestedConfigRetry;
-                // Could not open stream with config, try again with the
-                // suggested one
-                ASSERT_OK(openStream(ioHandle, suggestedConfig,
-                                     returnIn(res, stream, suggestedConfigRetry)));
-                // This time it must succeed
-                ASSERT_OK(res);
-                ASSERT_TRUE(stream != nullptr);
-                audioConfig = suggestedConfig;
+                ASSERT_TRUE(mStream == nullptr);
+                if (retryWithSuggestedConfig) {
+                    AudioConfig suggestedConfigRetry;
+                    ASSERT_OK(openStream(ioHandle, *suggestedConfigPtr,
+                                         returnIn(*res, mStream, suggestedConfigRetry)));
+                    ASSERT_OK(*res);
+                    ASSERT_TRUE(mStream != nullptr);
+                }
                 break;
             default:
-                FAIL() << "Invalid return status: " << ::testing::PrintToString(res);
+                FAIL() << "Invalid return status: " << ::testing::PrintToString(*res);
         }
+    }
+    void close(bool clear, Result* res) {
+        auto ret = mStream->close();
+        EXPECT_TRUE(ret.isOk());
+        *res = ret;
+        if (clear) {
+            mStream.clear();
+#if MAJOR_VERSION <= 5
+            // FIXME: there is no way to know when the remote IStream is being destroyed
+            //        Binder does not support testing if an object is alive, thus
+            //        wait for 100ms to let the binder destruction propagates and
+            //        the remote device has the time to be destroyed.
+            //        flushCommand makes sure all local command are sent, thus should reduce
+            //        the latency between local and remote destruction.
+            IPCThreadState::self()->flushCommands();
+            usleep(100 * 1000);
+#endif
+        }
+    }
+
+  private:
+    sp<Stream>& mStream;
+};
+
+template <class Stream>
+class OpenStreamTest : public AudioHidlTestWithDeviceConfigParameter {
+  protected:
+    OpenStreamTest() : AudioHidlTestWithDeviceConfigParameter(), helper(stream) {}
+    template <class Open>
+    void testOpen(Open openStream, const AudioConfig& config) {
+        // TODO: only allow failure for RecommendedPlaybackAudioConfig
+        ASSERT_NO_FATAL_FAILURE(helper.open(openStream, config, &res, &audioConfig));
         open = true;
     }
 
-    Return<Result> closeStream() {
+    Result closeStream(bool clear = true) {
         open = false;
-        auto res = stream->close();
-        stream.clear();
-        waitForStreamDestruction();
+        helper.close(clear, &res);
         return res;
     }
 
-    static void waitForStreamDestruction() {
-        // FIXME: there is no way to know when the remote IStream is being destroyed
-        //        Binder does not support testing if an object is alive, thus
-        //        wait for 100ms to let the binder destruction propagates and
-        //        the remote device has the time to be destroyed.
-        //        flushCommand makes sure all local command are sent, thus should reduce
-        //        the latency between local and remote destruction.
-        IPCThreadState::self()->flushCommands();
-        usleep(100 * 1000);
-    }
-
   private:
     void TearDown() override {
         if (open) {
@@ -881,6 +899,7 @@
     AudioConfig audioConfig;
     DeviceAddress address = {};
     sp<Stream> stream;
+    StreamHelper<Stream> helper;
     bool open = false;
 };
 
@@ -929,12 +948,6 @@
                 ::testing::Values(AudioOutputFlag::NONE)),
         &DeviceConfigParameterToString);
 INSTANTIATE_TEST_CASE_P(
-        SupportedOutputStreamConfig, OutputStreamTest,
-        ::testing::Combine(::testing::ValuesIn(getDeviceParameters()),
-                           ::testing::ValuesIn(ConfigHelper::getSupportedPlaybackAudioConfig()),
-                           ::testing::Values(AudioOutputFlag::NONE)),
-        &DeviceConfigParameterToString);
-INSTANTIATE_TEST_CASE_P(
         RecommendedOutputStreamConfigSupport, OutputStreamTest,
         ::testing::Combine(
                 ::testing::ValuesIn(getDeviceParametersForPrimaryDeviceTests()),
@@ -944,7 +957,7 @@
 #elif MAJOR_VERSION >= 6
 // For V6 and above test according to the audio policy manager configuration.
 // This is more correct as CDD is written from the apps perspective.
-// Audio system provides necessary format conversions for the missing configurations.
+// Audio system provides necessary format conversions for missing configurations.
 INSTANTIATE_TEST_CASE_P(DeclaredOutputStreamConfigSupport, OutputStreamTest,
                         ::testing::ValuesIn(getOutputDeviceConfigParameters()),
                         &DeviceConfigParameterToString);
@@ -990,12 +1003,6 @@
                 ::testing::Values(AudioInputFlag::NONE)),
         &DeviceConfigParameterToString);
 INSTANTIATE_TEST_CASE_P(
-        SupportedInputStreamConfig, InputStreamTest,
-        ::testing::Combine(::testing::ValuesIn(getDeviceParameters()),
-                           ::testing::ValuesIn(ConfigHelper::getSupportedCaptureAudioConfig()),
-                           ::testing::Values(AudioInputFlag::NONE)),
-        &DeviceConfigParameterToString);
-INSTANTIATE_TEST_CASE_P(
         RecommendedInputStreamConfigSupport, InputStreamTest,
         ::testing::Combine(
                 ::testing::ValuesIn(getDeviceParametersForPrimaryDeviceTests()),
@@ -1005,7 +1012,7 @@
 #elif MAJOR_VERSION >= 6
 // For V6 and above test according to the audio policy manager configuration.
 // This is more correct as CDD is written from the apps perspective.
-// Audio system provides necessary format conversions for the missing configurations.
+// Audio system provides necessary format conversions for missing configurations.
 INSTANTIATE_TEST_CASE_P(DeclaredInputStreamConfigSupport, InputStreamTest,
                         ::testing::ValuesIn(getInputDeviceConfigParameters()),
                         &DeviceConfigParameterToString);
@@ -1194,26 +1201,21 @@
 TEST_IO_STREAM(close, "Make sure a stream can be closed", ASSERT_OK(closeStream()))
 // clang-format off
 TEST_IO_STREAM(closeTwice, "Make sure a stream can not be closed twice",
-        auto streamCopy = stream;
-        ASSERT_OK(closeStream());
-        ASSERT_RESULT(Result::INVALID_STATE, streamCopy->close());
-        streamCopy.clear();
-        waitForStreamDestruction())
+        ASSERT_OK(closeStream(false /*clear*/));
+        ASSERT_EQ(Result::INVALID_STATE, closeStream()))
 // clang-format on
 
-static void testCreateTooBigMmapBuffer(IStream* stream) {
-    MmapBufferInfo info;
-    Result res;
-    // Assume that int max is a value too big to be allocated
-    // This is true currently with a 32bit media server, but might not when it
-    // will run in 64 bit
-    auto minSizeFrames = std::numeric_limits<int32_t>::max();
-    ASSERT_OK(stream->createMmapBuffer(minSizeFrames, returnIn(res, info)));
-    ASSERT_RESULT(invalidArgsOrNotSupported, res);
+static void testMmapBufferOfInvalidSize(IStream* stream) {
+    for (int32_t value : {-1, 0, std::numeric_limits<int32_t>::max()}) {
+        MmapBufferInfo info;
+        Result res;
+        EXPECT_OK(stream->createMmapBuffer(value, returnIn(res, info)));
+        EXPECT_RESULT(invalidArgsOrNotSupported, res) << "value=" << value;
+    }
 }
 
-TEST_IO_STREAM(CreateTooBigMmapBuffer, "Create mmap buffer too big should fail",
-               testCreateTooBigMmapBuffer(stream.get()))
+TEST_IO_STREAM(CreateTooBigMmapBuffer, "Create mmap buffer of invalid size must fail",
+               testMmapBufferOfInvalidSize(stream.get()))
 
 static void testGetMmapPositionOfNonMmapedStream(IStream* stream) {
     Result res;
diff --git a/audio/core/all-versions/vts/functional/ConfigHelper.h b/audio/core/all-versions/vts/functional/ConfigHelper.h
index 48aae8c..a2f4116 100644
--- a/audio/core/all-versions/vts/functional/ConfigHelper.h
+++ b/audio/core/all-versions/vts/functional/ConfigHelper.h
@@ -57,12 +57,6 @@
                                   {24000, 48000}, {AudioFormat::PCM_16_BIT});
     }
 
-    static const vector<AudioConfig> getSupportedPlaybackAudioConfig() {
-        // TODO: retrieve audio config supported by the platform
-        // as declared in the policy configuration
-        return {};
-    }
-
     static const vector<AudioConfig> getRequiredSupportCaptureAudioConfig() {
         if (!primaryHasMic()) return {};
         return combineAudioConfig({AudioChannelMask::IN_MONO}, {8000, 11025, 16000, 44100},
@@ -73,11 +67,6 @@
         return combineAudioConfig({AudioChannelMask::IN_STEREO}, {22050, 48000},
                                   {AudioFormat::PCM_16_BIT});
     }
-    static const vector<AudioConfig> getSupportedCaptureAudioConfig() {
-        // TODO: retrieve audio config supported by the platform
-        // as declared in the policy configuration
-        return {};
-    }
 
     static vector<AudioConfig> combineAudioConfig(vector<audio_channel_mask_t> channelMasks,
                                                   vector<uint32_t> sampleRates,
diff --git a/audio/core/all-versions/vts/functional/DeviceManager.h b/audio/core/all-versions/vts/functional/DeviceManager.h
index b6e2db0..d849f85 100644
--- a/audio/core/all-versions/vts/functional/DeviceManager.h
+++ b/audio/core/all-versions/vts/functional/DeviceManager.h
@@ -22,25 +22,33 @@
 template <class Derived, class Key, class Interface>
 class InterfaceManager {
   public:
+    sp<Interface> getExisting(const Key& name) {
+        auto existing = instances.find(name);
+        return existing != instances.end() ? existing->second : sp<Interface>();
+    }
+
     sp<Interface> get(const Key& name) {
         auto existing = instances.find(name);
         if (existing != instances.end()) return existing->second;
         auto [inserted, _] = instances.emplace(name, Derived::createInterfaceInstance(name));
         if (inserted->second) {
-            environment->registerTearDown([name]() { (void)Derived::getInstance().reset(name); });
+            environment->registerTearDown(
+                    [name]() { (void)Derived::getInstance().reset(name, false); });
         }
         return inserted->second;
     }
 
     // The test must check that reset was successful. Reset failure means that the test code
     // is holding a strong reference to the device.
-    bool reset(const Key& name) __attribute__((warn_unused_result)) {
+    bool reset(const Key& name, bool waitForDestruction) __attribute__((warn_unused_result)) {
         auto iter = instances.find(name);
         if (iter == instances.end()) return true;
         ::android::wp<Interface> weak = iter->second;
         instances.erase(iter);
         if (weak.promote() != nullptr) return false;
-        waitForInstanceDestruction();
+        if (waitForDestruction) {
+            waitForInstanceDestruction();
+        }
         return true;
     }
 
@@ -100,7 +108,15 @@
     }
     bool reset(const std::string& factoryName, const std::string& name)
             __attribute__((warn_unused_result)) {
-        return InterfaceManager::reset(std::make_tuple(factoryName, name));
+#if MAJOR_VERSION <= 5
+        return InterfaceManager::reset(std::make_tuple(factoryName, name), true);
+#elif MAJOR_VERSION >= 6
+        {
+            sp<IDevice> device = getExisting(std::make_tuple(factoryName, name));
+            if (device != nullptr) device->close();
+        }
+        return InterfaceManager::reset(std::make_tuple(factoryName, name), false);
+#endif
     }
     bool resetPrimary(const std::string& factoryName) __attribute__((warn_unused_result)) {
         return reset(factoryName, kPrimaryDevice);
diff --git a/audio/effect/6.0/IEffect.hal b/audio/effect/6.0/IEffect.hal
index b35afee..f4c50a2 100644
--- a/audio/effect/6.0/IEffect.hal
+++ b/audio/effect/6.0/IEffect.hal
@@ -407,9 +407,12 @@
 
     /**
      * Called by the framework to deinitialize the effect and free up
-     * all the currently allocated resources. It is recommended to close
+     * all currently allocated resources. It is recommended to close
      * the effect on the client side as soon as it is becomes unused.
      *
+     * The client must ensure that this function is not called while
+     * audio data is being transferred through the effect's message queues.
+     *
      * @return retval OK in case the success.
      *                INVALID_STATE if the effect was already closed.
      */
diff --git a/audio/effect/all-versions/default/Effect.cpp b/audio/effect/all-versions/default/Effect.cpp
index 3c0d878..0afa779 100644
--- a/audio/effect/all-versions/default/Effect.cpp
+++ b/audio/effect/all-versions/default/Effect.cpp
@@ -138,11 +138,11 @@
 const char* Effect::sContextCallFunction = sContextCallToCommand;
 
 Effect::Effect(effect_handle_t handle)
-    : mIsClosed(false), mHandle(handle), mEfGroup(nullptr), mStopProcessThread(false) {}
+    : mHandle(handle), mEfGroup(nullptr), mStopProcessThread(false) {}
 
 Effect::~Effect() {
     ATRACE_CALL();
-    close();
+    (void)close();
     if (mProcessThread.get()) {
         ATRACE_NAME("mProcessThread->join");
         status_t status = mProcessThread->join();
@@ -154,8 +154,10 @@
     }
     mInBuffer.clear();
     mOutBuffer.clear();
+#if MAJOR_VERSION <= 5
     int status = EffectRelease(mHandle);
     ALOGW_IF(status, "Error releasing effect %p: %s", mHandle, strerror(-status));
+#endif
     EffectMap::getInstance().remove(mHandle);
     mHandle = 0;
 }
@@ -699,15 +701,20 @@
 }
 
 Return<Result> Effect::close() {
-    if (mIsClosed) return Result::INVALID_STATE;
-    mIsClosed = true;
-    if (mProcessThread.get()) {
-        mStopProcessThread.store(true, std::memory_order_release);
+    if (mStopProcessThread.load(std::memory_order_relaxed)) {  // only this thread modifies
+        return Result::INVALID_STATE;
     }
+    mStopProcessThread.store(true, std::memory_order_release);
     if (mEfGroup) {
         mEfGroup->wake(static_cast<uint32_t>(MessageQueueFlagBits::REQUEST_QUIT));
     }
+#if MAJOR_VERSION <= 5
     return Result::OK;
+#elif MAJOR_VERSION >= 6
+    // No need to join the processing thread, it is part of the API contract that the client
+    // must finish processing before closing the effect.
+    return analyzeStatus("EffectRelease", "", sContextCallFunction, EffectRelease(mHandle));
+#endif
 }
 
 Return<void> Effect::debug(const hidl_handle& fd, const hidl_vec<hidl_string>& /* options */) {
diff --git a/audio/effect/all-versions/default/Effect.h b/audio/effect/all-versions/default/Effect.h
index 3d99a0e..181e542 100644
--- a/audio/effect/all-versions/default/Effect.h
+++ b/audio/effect/all-versions/default/Effect.h
@@ -170,7 +170,6 @@
     static const char* sContextCallToCommand;
     static const char* sContextCallFunction;
 
-    bool mIsClosed;
     effect_handle_t mHandle;
     sp<AudioBufferWrapper> mInBuffer;
     sp<AudioBufferWrapper> mOutBuffer;
diff --git a/biometrics/face/1.0/vts/functional/Android.bp b/biometrics/face/1.0/vts/functional/Android.bp
index fa68c4e..f2598a7 100644
--- a/biometrics/face/1.0/vts/functional/Android.bp
+++ b/biometrics/face/1.0/vts/functional/Android.bp
@@ -19,6 +19,6 @@
     defaults: ["VtsHalTargetTestDefaults"],
     srcs: ["VtsHalBiometricsFaceV1_0TargetTest.cpp"],
     static_libs: ["android.hardware.biometrics.face@1.0"],
-    test_suites: ["general-tests"],
+    test_suites: ["general-tests", "vts-core"],
 }
 
diff --git a/biometrics/face/1.0/vts/functional/VtsHalBiometricsFaceV1_0TargetTest.cpp b/biometrics/face/1.0/vts/functional/VtsHalBiometricsFaceV1_0TargetTest.cpp
index a4e95ed..7ac44a4 100644
--- a/biometrics/face/1.0/vts/functional/VtsHalBiometricsFaceV1_0TargetTest.cpp
+++ b/biometrics/face/1.0/vts/functional/VtsHalBiometricsFaceV1_0TargetTest.cpp
@@ -20,9 +20,10 @@
 #include <android/hardware/biometrics/face/1.0/IBiometricsFaceClientCallback.h>
 
 #include <VtsHalHidlTargetCallbackBase.h>
-#include <VtsHalHidlTargetTestBase.h>
-#include <VtsHalHidlTargetTestEnvBase.h>
 #include <android-base/logging.h>
+#include <gtest/gtest.h>
+#include <hidl/GtestPrinter.h>
+#include <hidl/ServiceManagement.h>
 
 #include <chrono>
 #include <cstdint>
@@ -124,27 +125,11 @@
     }
 };
 
-// Test environment for the BiometricsFace HAL.
-class FaceHidlEnvironment : public ::testing::VtsHalHidlTargetTestEnvBase {
-  public:
-    // Get the test environment singleton.
-    static FaceHidlEnvironment* Instance() {
-        static FaceHidlEnvironment* instance = new FaceHidlEnvironment;
-        return instance;
-    }
-
-    void registerTestServices() override { registerTestService<IBiometricsFace>(); }
-
-  private:
-    FaceHidlEnvironment() = default;
-};
-
 // Test class for the BiometricsFace HAL.
-class FaceHidlTest : public ::testing::VtsHalHidlTargetTestBase {
+class FaceHidlTest : public ::testing::TestWithParam<std::string> {
   public:
     void SetUp() override {
-        mService = ::testing::VtsHalHidlTargetTestBase::getService<IBiometricsFace>(
-                FaceHidlEnvironment::Instance()->getServiceName<IBiometricsFace>());
+        mService = IBiometricsFace::getService(GetParam());
         ASSERT_NE(mService, nullptr);
         mCallback = new FaceCallback();
         mCallback->SetWaitTimeoutDefault(kTimeout);
@@ -167,7 +152,7 @@
 
 // generateChallenge should always return a unique, cryptographically secure,
 // non-zero number.
-TEST_F(FaceHidlTest, GenerateChallengeTest) {
+TEST_P(FaceHidlTest, GenerateChallengeTest) {
     std::map<uint64_t, int> m;
     for (int i = 0; i < kGenerateChallengeIterations; ++i) {
         Return<void> ret =
@@ -182,7 +167,7 @@
 }
 
 // enroll with an invalid (all zeroes) HAT should fail.
-TEST_F(FaceHidlTest, EnrollZeroHatTest) {
+TEST_P(FaceHidlTest, EnrollZeroHatTest) {
     // Filling HAT with zeros
     hidl_vec<uint8_t> token(69);
     for (size_t i = 0; i < 69; i++) {
@@ -200,7 +185,7 @@
 }
 
 // enroll with an invalid HAT should fail.
-TEST_F(FaceHidlTest, EnrollGarbageHatTest) {
+TEST_P(FaceHidlTest, EnrollGarbageHatTest) {
     // Filling HAT with pseudorandom invalid data.
     // Using default seed to make the test reproducible.
     std::mt19937 gen(std::mt19937::default_seed);
@@ -221,7 +206,7 @@
 }
 
 // setFeature with an invalid (all zeros) HAT should fail.
-TEST_F(FaceHidlTest, SetFeatureZeroHatTest) {
+TEST_P(FaceHidlTest, SetFeatureZeroHatTest) {
     hidl_vec<uint8_t> token(69);
     for (size_t i = 0; i < 69; i++) {
         token[i] = 0;
@@ -232,7 +217,7 @@
 }
 
 // setFeature with an invalid HAT should fail.
-TEST_F(FaceHidlTest, SetFeatureGarbageHatTest) {
+TEST_P(FaceHidlTest, SetFeatureGarbageHatTest) {
     // Filling HAT with pseudorandom invalid data.
     // Using default seed to make the test reproducible.
     std::mt19937 gen(std::mt19937::default_seed);
@@ -254,16 +239,16 @@
     ASSERT_TRUE(res.isOk());
 }
 
-TEST_F(FaceHidlTest, GetFeatureRequireAttentionTest) {
+TEST_P(FaceHidlTest, GetFeatureRequireAttentionTest) {
     assertGetFeatureFails(mService, 0 /* faceId */, Feature::REQUIRE_ATTENTION);
 }
 
-TEST_F(FaceHidlTest, GetFeatureRequireDiversityTest) {
+TEST_P(FaceHidlTest, GetFeatureRequireDiversityTest) {
     assertGetFeatureFails(mService, 0 /* faceId */, Feature::REQUIRE_DIVERSITY);
 }
 
 // revokeChallenge should always return within the timeout
-TEST_F(FaceHidlTest, RevokeChallengeTest) {
+TEST_P(FaceHidlTest, RevokeChallengeTest) {
     auto start = std::chrono::system_clock::now();
     Return<Status> ret = mService->revokeChallenge();
     auto elapsed = std::chrono::system_clock::now() - start;
@@ -272,14 +257,14 @@
 }
 
 // The call to getAuthenticatorId should succeed.
-TEST_F(FaceHidlTest, GetAuthenticatorIdTest) {
+TEST_P(FaceHidlTest, GetAuthenticatorIdTest) {
     Return<void> ret = mService->getAuthenticatorId(
             [](const OptionalUint64& res) { ASSERT_EQ(Status::OK, res.status); });
     ASSERT_TRUE(ret.isOk());
 }
 
 // The call to enumerate should succeed.
-TEST_F(FaceHidlTest, EnumerateTest) {
+TEST_P(FaceHidlTest, EnumerateTest) {
     Return<Status> ret = mService->enumerate();
     ASSERT_EQ(Status::OK, static_cast<Status>(ret));
     auto res = mCallback->WaitForCallback(kCallbackNameOnEnumerate);
@@ -288,21 +273,21 @@
 }
 
 // The call to remove should succeed for any faceId
-TEST_F(FaceHidlTest, RemoveFaceTest) {
+TEST_P(FaceHidlTest, RemoveFaceTest) {
     // Remove a face
     Return<Status> ret = mService->remove(kFaceId);
     ASSERT_EQ(Status::OK, static_cast<Status>(ret));
 }
 
 // Remove should accept 0 to delete all faces
-TEST_F(FaceHidlTest, RemoveAllFacesTest) {
+TEST_P(FaceHidlTest, RemoveAllFacesTest) {
     // Remove all faces
     Return<Status> ret = mService->remove(0);
     ASSERT_EQ(Status::OK, static_cast<Status>(ret));
 }
 
 // Active user should successfully set to a writable location.
-TEST_F(FaceHidlTest, SetActiveUserTest) {
+TEST_P(FaceHidlTest, SetActiveUserTest) {
     // Create an active user
     Return<Status> ret = mService->setActiveUser(2, kFacedataDir);
     ASSERT_EQ(Status::OK, static_cast<Status>(ret));
@@ -313,7 +298,7 @@
 }
 
 // Active user should fail to set to an unwritable location.
-TEST_F(FaceHidlTest, SetActiveUserUnwritableTest) {
+TEST_P(FaceHidlTest, SetActiveUserUnwritableTest) {
     // Create an active user to an unwritable location (device root dir)
     Return<Status> ret = mService->setActiveUser(3, "/");
     ASSERT_NE(Status::OK, static_cast<Status>(ret));
@@ -324,7 +309,7 @@
 }
 
 // Active user should fail to set to a null location.
-TEST_F(FaceHidlTest, SetActiveUserNullTest) {
+TEST_P(FaceHidlTest, SetActiveUserNullTest) {
     // Create an active user to a null location.
     Return<Status> ret = mService->setActiveUser(4, nullptr);
     ASSERT_NE(Status::OK, static_cast<Status>(ret));
@@ -336,7 +321,7 @@
 
 // Cancel should always return CANCELED from any starting state including
 // the IDLE state.
-TEST_F(FaceHidlTest, CancelTest) {
+TEST_P(FaceHidlTest, CancelTest) {
     Return<Status> ret = mService->cancel();
     // check that we were able to make an IPC request successfully
     ASSERT_EQ(Status::OK, static_cast<Status>(ret));
@@ -347,7 +332,7 @@
     EXPECT_EQ(FaceError::CANCELED, res.args->error);
 }
 
-TEST_F(FaceHidlTest, OnLockoutChangedTest) {
+TEST_P(FaceHidlTest, OnLockoutChangedTest) {
     // Update active user and ensure onLockoutChanged was called.
     Return<Status> ret = mService->setActiveUser(kUserId + 1, kFacedataDir);
     ASSERT_EQ(Status::OK, static_cast<Status>(ret));
@@ -359,11 +344,7 @@
 
 }  // anonymous namespace
 
-int main(int argc, char** argv) {
-    ::testing::AddGlobalTestEnvironment(FaceHidlEnvironment::Instance());
-    ::testing::InitGoogleTest(&argc, argv);
-    FaceHidlEnvironment::Instance()->init(&argc, argv);
-    int status = RUN_ALL_TESTS();
-    LOG(INFO) << "Test result = " << status;
-    return status;
-}
+INSTANTIATE_TEST_SUITE_P(
+        PerInstance, FaceHidlTest,
+        testing::ValuesIn(android::hardware::getAllHalInstanceNames(IBiometricsFace::descriptor)),
+        android::hardware::PrintInstanceNameToString);
diff --git a/cas/1.2/Android.bp b/cas/1.2/Android.bp
index af98b2e..fbb38b0 100644
--- a/cas/1.2/Android.bp
+++ b/cas/1.2/Android.bp
@@ -7,10 +7,10 @@
         enabled: true,
     },
     srcs: [
+        "types.hal",
         "ICas.hal",
         "ICasListener.hal",
         "IMediaCasService.hal",
-        "types.hal",
     ],
     interfaces: [
         "android.hardware.cas@1.0",
diff --git a/compatibility_matrices/compatibility_matrix.current.xml b/compatibility_matrices/compatibility_matrix.current.xml
index a500ca0..ed88274 100644
--- a/compatibility_matrices/compatibility_matrix.current.xml
+++ b/compatibility_matrices/compatibility_matrix.current.xml
@@ -256,7 +256,7 @@
     <hal format="hidl" optional="false">
         <name>android.hardware.keymaster</name>
         <version>3.0</version>
-        <version>4.0</version>
+        <version>4.0-1</version>
         <interface>
             <name>IKeymasterDevice</name>
             <instance>default</instance>
@@ -264,7 +264,7 @@
     </hal>
     <hal format="hidl" optional="true">
         <name>android.hardware.keymaster</name>
-        <version>4.0</version>
+        <version>4.0-1</version>
         <interface>
             <name>IKeymasterDevice</name>
             <instance>strongbox</instance>
@@ -367,10 +367,7 @@
     </hal>
     <hal format="hidl" optional="true">
         <name>android.hardware.radio.config</name>
-        <!--
-        See compatibility_matrix.4.xml on versioning of radio config HAL.
-        -->
-        <version>1.1</version>
+        <version>1.3</version>
         <interface>
             <name>IRadioConfig</name>
             <instance>default</instance>
diff --git a/current.txt b/current.txt
index b811a92..b6ef9dc 100644
--- a/current.txt
+++ b/current.txt
@@ -578,7 +578,7 @@
 9d8ee57c490ffeaa28f702eaea8d198cb510e4bbfb99e6cb5f63e73341057c7c android.hardware.neuralnetworks@1.1::types
 fb382e986c10b8fbb797a8546e8f9ea6d1107bfe6f3fb7e57f6bbbf1f807a906 android.hardware.neuralnetworks@1.2::IDevice
 40e71cd693de5b832325c5d8f081f2ff20a7ba2b89d401cee5b4b3eb0e241681 android.hardware.neuralnetworks@1.2::IPreparedModel
-71c0f7127335e5b74d1615d5e7f129831b43ffbae5318ad0924d7d8d8910a859 android.hardware.neuralnetworks@1.2::types
+72de91c3feba4b19c159cd1c413cbea596b78240caa43e31194e20e6f5b05c49 android.hardware.neuralnetworks@1.2::types
 a785a57447a81e9c130eef6904c3a5c256076c6a04588c40620ebd6fa2660d77 android.hardware.radio@1.2::types
 1a6e2bd289f22931c526b21916910f1d4c436b7acb9556e4243de4ce8e6cc2e4 android.hardware.soundtrigger@2.0::ISoundTriggerHwCallback
 fd65298e1e09e0e3c781ab18305920d757dbe55a3b459ce17814ec5cf6dfee99 android.hardware.wifi@1.0::IWifiP2pIface
@@ -591,11 +591,18 @@
 ce8dbe76eb9ee94b46ef98f725be992e760a5751073d4f4912484026541371f3 android.hardware.health@2.1::IHealth
 26f04510a0b57aba5167c5c0a7c2f077c2acbb98b81902a072517829fd9fd67f android.hardware.health@2.1::IHealthInfoCallback
 db47f4ceceb1f06c656f39caa70c557b0f8471ef59fd58611bea667ffca20101 android.hardware.health@2.1::types
+c228aaa27f66c48e147159a4f4996c5273191fece1b08de31bd171c61334855e android.hardware.keymaster@4.1::IKeymasterDevice
+adb0efdf1462e9b2e742c0dcadd598666aac551f178be06e755bfcdf5797abd0 android.hardware.keymaster@4.1::IOperation
+7a04ea5595ed418ca3e91c28b8bd7353dd988be9be7b0c8c9e64fb4b77bd4523 android.hardware.keymaster@4.1::types
 9e59fffceed0dd72a9799e04505db5f777bbbea1af0695ba4107ef6d967c6fda android.hardware.neuralnetworks@1.3::IDevice
 4a6c3b3556da951b4def21ba579a227c022980fe4465df6cdfbe20628fa75f5a android.hardware.neuralnetworks@1.3::IPreparedModel
 94e803236398bed1febb11cc21051bc42ec003700139b099d6c479e02a7ca3c3 android.hardware.neuralnetworks@1.3::IPreparedModelCallback
-b74fe72cfe438f50e772e6a307657ff449d5bde83c15dd1f140ff2edbe73499c android.hardware.neuralnetworks@1.3::types
+554baa3b317e077b850afcbaac99daeef56861b1786540e56275a4fcad1f43e3 android.hardware.neuralnetworks@1.3::types
 274fb1254a6d1a97824ec5c880eeefc0e410dc6d3a2a4c34052201169d2b7de0 android.hardware.radio@1.5::types
 c8e81d912827a5d49b2ddcdc4eb4556c5d231a899a1dca879309e04210daa4a0 android.hardware.radio@1.5::IRadio
 a62a93faf173b14a6175b683ebf61ffa568dc61f81e369d2dce7b1265e86cf2f android.hardware.radio@1.5::IRadioIndication
-260ce05806d753d728f844d405e832179ed7d9b65986ec18fef3d21cf7285587 android.hardware.radio@1.5::IRadioResponse
\ No newline at end of file
+260ce05806d753d728f844d405e832179ed7d9b65986ec18fef3d21cf7285587 android.hardware.radio@1.5::IRadioResponse
+55f0a15642869ec98a55ea0a5ac049d3e1a6245ff7750deb6bcb7182057eee83 android.hardware.radio.config@1.3::types
+b27ab0cd40b0b078cdcd024bfe1061c4c4c065f3519eeb9347fa359a3268a5ae android.hardware.radio.config@1.3::IRadioConfig
+742360c775313438b0f82256eac62fb5bbc76a6ae6f388573f3aa142fb2c1eea android.hardware.radio.config@1.3::IRadioConfigIndication
+7683fed9d253956071f18b152e6be657719536f98d9b534433d5e411bcde5061 android.hardware.radio.config@1.3::IRadioConfigResponse
diff --git a/drm/1.0/Android.bp b/drm/1.0/Android.bp
index a950c57..9049af2 100644
--- a/drm/1.0/Android.bp
+++ b/drm/1.0/Android.bp
@@ -17,5 +17,5 @@
     interfaces: [
         "android.hidl.base@1.0",
     ],
-    gen_java: false,
+    gen_java: true,
 }
diff --git a/gatekeeper/1.0/vts/functional/Android.bp b/gatekeeper/1.0/vts/functional/Android.bp
index f55e5d2..a115285 100644
--- a/gatekeeper/1.0/vts/functional/Android.bp
+++ b/gatekeeper/1.0/vts/functional/Android.bp
@@ -19,5 +19,5 @@
     defaults: ["VtsHalTargetTestDefaults"],
     srcs: ["VtsHalGatekeeperV1_0TargetTest.cpp"],
     static_libs: ["android.hardware.gatekeeper@1.0"],
-    test_suites: ["general-tests"],
+    test_suites: ["general-tests", "vts-core"],
 }
diff --git a/gatekeeper/1.0/vts/functional/VtsHalGatekeeperV1_0TargetTest.cpp b/gatekeeper/1.0/vts/functional/VtsHalGatekeeperV1_0TargetTest.cpp
index 715e9fc..afc737c 100644
--- a/gatekeeper/1.0/vts/functional/VtsHalGatekeeperV1_0TargetTest.cpp
+++ b/gatekeeper/1.0/vts/functional/VtsHalGatekeeperV1_0TargetTest.cpp
@@ -24,7 +24,10 @@
 #include <inttypes.h>
 #include <unistd.h>
 
+#include <gtest/gtest.h>
 #include <hardware/hw_auth_token.h>
+#include <hidl/GtestPrinter.h>
+#include <hidl/ServiceManagement.h>
 
 #include <android/log.h>
 #include <android/hardware/gatekeeper/1.0/IGatekeeper.h>
@@ -32,9 +35,6 @@
 
 #include <log/log.h>
 
-#include <VtsHalHidlTargetTestBase.h>
-#include <VtsHalHidlTargetTestEnvBase.h>
-
 using ::android::hardware::hidl_string;
 using ::android::hardware::hidl_vec;
 using ::android::hardware::gatekeeper::V1_0::IGatekeeper;
@@ -78,22 +78,8 @@
   return auth_token;
 }
 
-// Test environment for Gatekeeper HIDL HAL.
-class GatekeeperHidlEnvironment : public ::testing::VtsHalHidlTargetTestEnvBase {
- public:
-  // get the test environment singleton
-  static GatekeeperHidlEnvironment* Instance() {
-    static GatekeeperHidlEnvironment* instance = new GatekeeperHidlEnvironment;
-    return instance;
-  }
-
-  virtual void registerTestServices() override { registerTestService<IGatekeeper>(); }
- private:
-  GatekeeperHidlEnvironment() {}
-};
-
 // The main test class for Gatekeeper HIDL HAL.
-class GatekeeperHidlTest : public ::testing::VtsHalHidlTargetTestBase {
+class GatekeeperHidlTest : public ::testing::TestWithParam<std::string> {
  protected:
   void setUid(uint32_t uid) { uid_ = uid; }
 
@@ -204,8 +190,7 @@
   GatekeeperHidlTest() : uid_(0) {}
   virtual void SetUp() override {
     GatekeeperResponse rsp;
-    gatekeeper_ = ::testing::VtsHalHidlTargetTestBase::getService<IGatekeeper>(
-        GatekeeperHidlEnvironment::Instance()->getServiceName<IGatekeeper>());
+    gatekeeper_ = IGatekeeper::getService(GetParam());
     ASSERT_NE(nullptr, gatekeeper_.get());
     doDeleteAllUsers(rsp);
   }
@@ -219,7 +204,7 @@
 /**
  * Ensure we can enroll new password
  */
-TEST_F(GatekeeperHidlTest, EnrollSuccess) {
+TEST_P(GatekeeperHidlTest, EnrollSuccess) {
   hidl_vec<uint8_t> password;
   GatekeeperResponse rsp;
   ALOGI("Testing Enroll (expected success)");
@@ -231,7 +216,7 @@
 /**
  * Ensure we can not enroll empty password
  */
-TEST_F(GatekeeperHidlTest, EnrollNoPassword) {
+TEST_P(GatekeeperHidlTest, EnrollNoPassword) {
   hidl_vec<uint8_t> password;
   GatekeeperResponse rsp;
   ALOGI("Testing Enroll (expected failure)");
@@ -242,7 +227,7 @@
 /**
  * Ensure we can successfully verify previously enrolled password
  */
-TEST_F(GatekeeperHidlTest, VerifySuccess) {
+TEST_P(GatekeeperHidlTest, VerifySuccess) {
   GatekeeperResponse enrollRsp;
   GatekeeperResponse verifyRsp;
   hidl_vec<uint8_t> password;
@@ -258,7 +243,7 @@
  * Ensure we can securely update password (keep the same
  * secure user_id) if we prove we know old password
  */
-TEST_F(GatekeeperHidlTest, TrustedReenroll) {
+TEST_P(GatekeeperHidlTest, TrustedReenroll) {
   GatekeeperResponse enrollRsp;
   GatekeeperRequest reenrollReq;
   GatekeeperResponse reenrollRsp;
@@ -297,7 +282,7 @@
  * Ensure we can update password (and get new
  * secure user_id) if we don't know old password
  */
-TEST_F(GatekeeperHidlTest, UntrustedReenroll) {
+TEST_P(GatekeeperHidlTest, UntrustedReenroll) {
   GatekeeperResponse enrollRsp;
   GatekeeperResponse reenrollRsp;
   GatekeeperResponse verifyRsp;
@@ -327,7 +312,7 @@
 /**
  * Ensure we dont get successful verify with invalid data
  */
-TEST_F(GatekeeperHidlTest, VerifyNoData) {
+TEST_P(GatekeeperHidlTest, VerifyNoData) {
   hidl_vec<uint8_t> password;
   hidl_vec<uint8_t> passwordHandle;
   GatekeeperResponse verifyRsp;
@@ -341,7 +326,7 @@
 /**
  * Ensure we can not verify password after we enrolled it and then deleted user
  */
-TEST_F(GatekeeperHidlTest, DeleteUserTest) {
+TEST_P(GatekeeperHidlTest, DeleteUserTest) {
   hidl_vec<uint8_t> password;
   GatekeeperResponse enrollRsp;
   GatekeeperResponse verifyRsp;
@@ -368,7 +353,7 @@
 /**
  * Ensure we can not delete a user that does not exist
  */
-TEST_F(GatekeeperHidlTest, DeleteInvalidUserTest) {
+TEST_P(GatekeeperHidlTest, DeleteInvalidUserTest) {
   hidl_vec<uint8_t> password;
   GatekeeperResponse enrollRsp;
   GatekeeperResponse verifyRsp;
@@ -400,7 +385,7 @@
  * Ensure we can not verify passwords after we enrolled them and then deleted
  * all users
  */
-TEST_F(GatekeeperHidlTest, DeleteAllUsersTest) {
+TEST_P(GatekeeperHidlTest, DeleteAllUsersTest) {
   struct UserData {
     uint32_t userId;
     hidl_vec<uint8_t> password;
@@ -448,11 +433,7 @@
   ALOGI("Testing deleteAllUsers done: rsp=%" PRIi32, delAllRsp.code);
 }
 
-int main(int argc, char **argv) {
-  ::testing::AddGlobalTestEnvironment(GatekeeperHidlEnvironment::Instance());
-  ::testing::InitGoogleTest(&argc, argv);
-  GatekeeperHidlEnvironment::Instance()->init(&argc, argv);
-  int status = RUN_ALL_TESTS();
-  ALOGI("Test result = %d", status);
-  return status;
-}
+INSTANTIATE_TEST_SUITE_P(
+        PerInstance, GatekeeperHidlTest,
+        testing::ValuesIn(android::hardware::getAllHalInstanceNames(IGatekeeper::descriptor)),
+        android::hardware::PrintInstanceNameToString);
diff --git a/health/2.0/default/healthd_common_adapter.cpp b/health/2.0/default/healthd_common_adapter.cpp
index 8fc689d..0b5d869 100644
--- a/health/2.0/default/healthd_common_adapter.cpp
+++ b/health/2.0/default/healthd_common_adapter.cpp
@@ -49,11 +49,14 @@
 static std::unique_ptr<HealthLoopAdapter> health_loop;
 
 int healthd_register_event(int fd, void (*handler)(uint32_t), EventWakeup wakeup) {
+    if (!health_loop) return -1;
+
     auto wrapped_handler = [handler](auto*, uint32_t epevents) { handler(epevents); };
     return health_loop->RegisterEvent(fd, wrapped_handler, wakeup);
 }
 
 void healthd_battery_update_internal(bool charger_online) {
+    if (!health_loop) return;
     health_loop->AdjustWakealarmPeriods(charger_online);
 }
 
diff --git a/keymaster/4.1/Android.bp b/keymaster/4.1/Android.bp
new file mode 100644
index 0000000..3b505d8
--- /dev/null
+++ b/keymaster/4.1/Android.bp
@@ -0,0 +1,20 @@
+// This file is autogenerated by hidl-gen -Landroidbp.
+
+hidl_interface {
+    name: "android.hardware.keymaster@4.1",
+    root: "android.hardware",
+    vndk: {
+        enabled: true,
+    },
+    srcs: [
+        "types.hal",
+        "IKeymasterDevice.hal",
+        "IOperation.hal",
+    ],
+    interfaces: [
+        "android.hardware.keymaster@3.0",
+        "android.hardware.keymaster@4.0",
+        "android.hidl.base@1.0",
+    ],
+    gen_java: false,
+}
diff --git a/keymaster/4.1/IKeymasterDevice.hal b/keymaster/4.1/IKeymasterDevice.hal
new file mode 100644
index 0000000..64d2c9f
--- /dev/null
+++ b/keymaster/4.1/IKeymasterDevice.hal
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.keymaster@4.1;
+
+import @4.0::ErrorCode;
+import @4.0::HardwareAuthToken;
+import @4.0::IKeymasterDevice;
+import @4.0::KeyParameter;
+import @4.0::KeyPurpose;
+import @4.0::OperationHandle;
+import IOperation;
+
+/**
+ * @4.1::IKeymasterDevice is a minor extension to @4.0::IKeymasterDevice.  It adds support for
+ *
+ * - Partial hardware enforcment of UNLOCKED_DEVICE_REQUIRED keys;
+ * - Device-unique attestaion;
+ * - Early boot only keys;
+ * - Better cleanup of operations when clients die without completing or aborting them.
+ */
+interface IKeymasterDevice extends @4.0::IKeymasterDevice {
+    /**
+     * Called by client to notify the IKeymasterDevice that the device is now locked, and keys with
+     * the UNLOCKED_DEVICE_REQUIRED tag should no longer be usable.  When this function is called,
+     * the IKeymasterDevice should note the current timestamp, and attempts to use
+     * UNLOCKED_DEVICE_REQUIRED keys must be rejected with Error::DEVICE_LOCKED until an
+     * authentication token with a later timestamp is presented.  If the `passwordOnly' argument is
+     * set to true the sufficiently-recent authentication token must indicate that the user
+     * authenticated with a password, not a biometric.
+     *
+     * @param passwordOnly specifies whether the device must be unlocked with a password, rather
+     * than a biometric, before UNLOCKED_DEVICE_REQUIRED keys can be used.
+     */
+    deviceLocked(bool passwordOnly) generates (ErrorCode error);
+
+    /**
+     * Called by client to notify the IKeymasterDevice that the device has left the early boot
+     * state, and that keys with the EARLY_BOOT_ONLY tag may no longer be used.  All attempts to use
+     * an EARLY_BOOT_ONLY key after this method is called must fail with Error::INVALID_KEY_BLOB.
+     */
+    earlyBootEnded() generates (ErrorCode error);
+
+    /**
+     * Begins a cryptographic operation.  beginOp() is a variation on begin().  beginOp() has
+     * identical functionality to begin, but instead of an OperationHandle it returns an IOperation
+     * object.  An IKeymasterDevice HAL service must call linkToDeath() on the Operation before
+     * returning it, and the provided hidl_death_recipient, if called, must abort() the operation.
+     * This is to ensure that in the event a client crashes while an operation is in progress, the
+     * operation slot is freed and available for use by other clients.
+     *
+     * @4.1::IKeymasterDevices must implement both beginOp() and begin().
+     */
+    beginOp(KeyPurpose purpose, vec<uint8_t> keyBlob, vec<KeyParameter> inParams,
+        HardwareAuthToken authToken)
+        generates (ErrorCode error, vec<KeyParameter> outParam, IOperation operation);
+};
diff --git a/keymaster/4.1/IOperation.hal b/keymaster/4.1/IOperation.hal
new file mode 100644
index 0000000..7103e9e
--- /dev/null
+++ b/keymaster/4.1/IOperation.hal
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.keymaster@4.1;
+
+import @4.0::ErrorCode;
+import @4.0::OperationHandle;
+
+/**
+ * IOperation represents an in-progress IKeymasterDevice operation.  It is returned by
+ * IKeymasterDevice.beginOp().
+ */
+interface IOperation {
+    /**
+     * Returns the operation handle to be used as an authentication challenge.
+     */
+    getOperationChallenge() generates (ErrorCode error, OperationHandle operation);
+};
diff --git a/keymaster/4.1/default/Android.bp b/keymaster/4.1/default/Android.bp
new file mode 100644
index 0000000..b06878b
--- /dev/null
+++ b/keymaster/4.1/default/Android.bp
@@ -0,0 +1,38 @@
+//
+// Copyright (C) 2019 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+cc_binary {
+    name: "android.hardware.keymaster@4.1-service",
+    defaults: ["hidl_defaults"],
+    relative_install_path: "hw",
+    vendor: true,
+    init_rc: ["android.hardware.keymaster@4.1-service.rc"],
+    srcs: ["service.cpp"],
+
+    shared_libs: [
+        "android.hardware.keymaster@4.0",
+        "android.hardware.keymaster@4.1",
+        "libbase",
+        "libcutils",
+        "libhardware",
+        "libhidlbase",
+        "libkeymaster4",
+        "libkeymaster41",
+        "liblog",
+        "libutils",
+    ],
+
+}
diff --git a/keymaster/4.1/default/OWNERS b/keymaster/4.1/default/OWNERS
new file mode 100644
index 0000000..335660d
--- /dev/null
+++ b/keymaster/4.1/default/OWNERS
@@ -0,0 +1,2 @@
+jdanis@google.com
+swillden@google.com
diff --git a/keymaster/4.1/default/android.hardware.keymaster@4.1-service.rc b/keymaster/4.1/default/android.hardware.keymaster@4.1-service.rc
new file mode 100644
index 0000000..740b3c2
--- /dev/null
+++ b/keymaster/4.1/default/android.hardware.keymaster@4.1-service.rc
@@ -0,0 +1,6 @@
+service vendor.keymaster-4-1 /vendor/bin/hw/android.hardware.keymaster@4.1-service
+    interface android.hardware.keymaster@4.0::IKeymasterDevice default
+    interface android.hardware.keymaster@4.1::IKeymasterDevice default
+    class early_hal
+    user system
+    group system drmrpc
diff --git a/keymaster/4.1/default/service.cpp b/keymaster/4.1/default/service.cpp
new file mode 100644
index 0000000..d79a291
--- /dev/null
+++ b/keymaster/4.1/default/service.cpp
@@ -0,0 +1,35 @@
+/*
+** Copyright 2019, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+**     http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+#include <android-base/logging.h>
+#include <android/hardware/keymaster/4.1/IKeymasterDevice.h>
+#include <hidl/HidlTransportSupport.h>
+
+#include <AndroidKeymaster41Device.h>
+
+using android::hardware::keymaster::V4_0::SecurityLevel;
+
+int main() {
+    ::android::hardware::configureRpcThreadpool(1, true /* willJoinThreadpool */);
+    auto keymaster = ::keymaster::V4_1::CreateKeymasterDevice(SecurityLevel::SOFTWARE);
+    auto status = keymaster->registerAsService();
+    if (status != android::OK) {
+        LOG(FATAL) << "Could not register service for Keymaster 4.1 (" << status << ")";
+    }
+
+    android::hardware::joinRpcThreadpool();
+    return -1;  // Should never get here.
+}
diff --git a/keymaster/4.1/support/Android.bp b/keymaster/4.1/support/Android.bp
new file mode 100644
index 0000000..34b6108
--- /dev/null
+++ b/keymaster/4.1/support/Android.bp
@@ -0,0 +1,32 @@
+//
+// Copyright (C) 2019 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+cc_library {
+    name: "libkeymaster4_1support",
+    vendor_available: true,
+    cflags: [
+        "-Wall",
+        "-Wextra",
+        "-Werror",
+    ],
+    export_include_dirs: ["include"],
+    shared_libs: [
+        "android.hardware.keymaster@3.0",
+        "android.hardware.keymaster@4.0",
+        "android.hardware.keymaster@4.1",
+        "libkeymaster4support",
+    ]
+}
diff --git a/keymaster/4.1/support/include/keymasterV4_1/authorization_set.h b/keymaster/4.1/support/include/keymasterV4_1/authorization_set.h
new file mode 100644
index 0000000..afc0eaf
--- /dev/null
+++ b/keymaster/4.1/support/include/keymasterV4_1/authorization_set.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef HARDWARE_INTERFACES_KEYMASTER_V4_1_SUPPORT_INCLUDE_AUTHORIZATION_SET_H_
+#define HARDWARE_INTERFACES_KEYMASTER_V4_1_SUPPORT_INCLUDE_AUTHORIZATION_SET_H_
+
+#include <keymasterV4_0/authorization_set.h>
+
+#include <keymasterV4_1/keymaster_tags.h>
+
+namespace android::hardware::keymaster::V4_1 {
+
+using V4_0::AuthorizationSet;
+using V4_0::AuthorizationSetBuilder;
+using V4_0::KeyParameter;
+
+}  // namespace android::hardware::keymaster::V4_1
+
+#endif  // HARDWARE_INTERFACES_KEYMASTER_V4_1_SUPPORT_INCLUDE_AUTHORIZATION_SET_H_
diff --git a/keymaster/4.1/support/include/keymasterV4_1/keymaster_tags.h b/keymaster/4.1/support/include/keymasterV4_1/keymaster_tags.h
new file mode 100644
index 0000000..6ffe8e1
--- /dev/null
+++ b/keymaster/4.1/support/include/keymasterV4_1/keymaster_tags.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef HARDWARE_INTERFACES_KEYMASTER_V4_1_SUPPORT_INCLUDE_KEYMASTER_TAGS_H_
+#define HARDWARE_INTERFACES_KEYMASTER_V4_1_SUPPORT_INCLUDE_KEYMASTER_TAGS_H_
+
+#include <android/hardware/keymaster/4.1/types.h>
+
+#include <keymasterV4_0/keymaster_tags.h>
+
+namespace android::hardware::keymaster::V4_1 {
+
+using V4_0::BlockMode;
+using V4_0::Digest;
+using V4_0::EcCurve;
+using V4_0::ErrorCode;
+using V4_0::HardwareAuthToken;
+using V4_0::KeyParameter;
+using V4_0::PaddingMode;
+using V4_0::TagType;
+using V4_0::VerificationToken;
+
+using V4_0::TypedTag;
+
+using V4_0::TAG_ACTIVE_DATETIME;
+using V4_0::TAG_ALGORITHM;
+using V4_0::TAG_ALLOW_WHILE_ON_BODY;
+using V4_0::TAG_APPLICATION_DATA;
+using V4_0::TAG_APPLICATION_ID;
+using V4_0::TAG_ASSOCIATED_DATA;
+using V4_0::TAG_ATTESTATION_APPLICATION_ID;
+using V4_0::TAG_ATTESTATION_CHALLENGE;
+using V4_0::TAG_AUTH_TIMEOUT;
+using V4_0::TAG_BLOB_USAGE_REQUIREMENTS;
+using V4_0::TAG_BLOCK_MODE;
+using V4_0::TAG_BOOT_PATCHLEVEL;
+using V4_0::TAG_BOOTLOADER_ONLY;
+using V4_0::TAG_CALLER_NONCE;
+using V4_0::TAG_CONFIRMATION_TOKEN;
+using V4_0::TAG_CREATION_DATETIME;
+using V4_0::TAG_DIGEST;
+using V4_0::TAG_EC_CURVE;
+using V4_0::TAG_HARDWARE_TYPE;
+using V4_0::TAG_INCLUDE_UNIQUE_ID;
+using V4_0::TAG_INVALID;
+using V4_0::TAG_KEY_SIZE;
+using V4_0::TAG_MAC_LENGTH;
+using V4_0::TAG_MAX_USES_PER_BOOT;
+using V4_0::TAG_MIN_MAC_LENGTH;
+using V4_0::TAG_MIN_SECONDS_BETWEEN_OPS;
+using V4_0::TAG_NO_AUTH_REQUIRED;
+using V4_0::TAG_NONCE;
+using V4_0::TAG_ORIGIN;
+using V4_0::TAG_ORIGINATION_EXPIRE_DATETIME;
+using V4_0::TAG_OS_PATCHLEVEL;
+using V4_0::TAG_OS_VERSION;
+using V4_0::TAG_PADDING;
+using V4_0::TAG_PURPOSE;
+using V4_0::TAG_RESET_SINCE_ID_ROTATION;
+using V4_0::TAG_ROLLBACK_RESISTANCE;
+using V4_0::TAG_ROOT_OF_TRUST;
+using V4_0::TAG_RSA_PUBLIC_EXPONENT;
+using V4_0::TAG_TRUSTED_CONFIRMATION_REQUIRED;
+using V4_0::TAG_TRUSTED_USER_PRESENCE_REQUIRED;
+using V4_0::TAG_UNIQUE_ID;
+using V4_0::TAG_UNLOCKED_DEVICE_REQUIRED;
+using V4_0::TAG_USAGE_EXPIRE_DATETIME;
+using V4_0::TAG_USER_AUTH_TYPE;
+using V4_0::TAG_USER_ID;
+using V4_0::TAG_USER_SECURE_ID;
+using V4_0::TAG_VENDOR_PATCHLEVEL;
+
+#define DECLARE_KM_4_1_TYPED_TAG(name)                                                   \
+    typedef typename V4_0::Tag2TypedTag<(static_cast<V4_0::Tag>(V4_1::Tag::name))>::type \
+            TAG_##name##_t;                                                              \
+    static TAG_##name##_t TAG_##name;
+
+DECLARE_KM_4_1_TYPED_TAG(EARLY_BOOT_ONLY);
+DECLARE_KM_4_1_TYPED_TAG(DEVICE_UNIQUE_ATTESTATION);
+
+}  // namespace android::hardware::keymaster::V4_1
+
+#endif  // HARDWARE_INTERFACES_KEYMASTER_V4_1_SUPPORT_INCLUDE_KEYMASTER_TAGS_H_
diff --git a/keymaster/4.1/types.hal b/keymaster/4.1/types.hal
new file mode 100644
index 0000000..bdf1731
--- /dev/null
+++ b/keymaster/4.1/types.hal
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.keymaster@4.1;
+
+import @4.0::ErrorCode;
+import @4.0::Tag;
+import @4.0::TagType;
+
+enum Tag : @4.0::Tag {
+    /**
+     * Keys tagged with EARLY_BOOT_ONLY may only be used, or created, during early boot, until
+     * IKeymasterDevice::earlyBootEnded() is called.
+     */
+    EARLY_BOOT_ONLY = TagType:BOOL | 305,
+    /**
+     * DEVICE_UNIQUE_ATTESTATION is an argument to IKeymasterDevice::attestKey().  It indicates that
+     * attestation using a device-unique key is requested, rather than a batch key.  Only
+     * SecurityLevel::STRONGBOX IKeymasterDevices may support device-unique attestations.
+     * SecurityLevel::TRUSTED_ENVIRONMENT IKeymasterDevices must return ErrorCode::INVALID_ARGUMENT
+     * if they receive DEVICE_UNIQUE_ATTESTATION.  SecurityLevel::STRONGBOX IKeymasterDevices need
+     * not support DEVICE_UNIQUE_ATTESTATION, and return ErrorCode::CANNOT_ATTEST_IDS if they do not
+     * support it.
+     *
+     * IKeymasterDevice implementations that support device-unique attestation MUST add the
+     * DEVICE_UNIQUE_ATTESTATION tag to device-unique attestations.
+     */
+    DEVICE_UNIQUE_ATTESTATION = TagType:BOOL | 720,
+};
diff --git a/keymaster/4.1/vts/OWNERS b/keymaster/4.1/vts/OWNERS
new file mode 100644
index 0000000..335660d
--- /dev/null
+++ b/keymaster/4.1/vts/OWNERS
@@ -0,0 +1,2 @@
+jdanis@google.com
+swillden@google.com
diff --git a/keymaster/4.1/vts/functional/Android.bp b/keymaster/4.1/vts/functional/Android.bp
new file mode 100644
index 0000000..f5a0c9c
--- /dev/null
+++ b/keymaster/4.1/vts/functional/Android.bp
@@ -0,0 +1,30 @@
+//
+// Copyright (C) 2019 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+cc_test {
+    name: "VtsHalKeymasterV4_1TargetTest",
+    defaults: ["VtsHalTargetTestDefaults"],
+    srcs: [
+        "EarlyBootKeyTest.cpp",
+    ],
+    static_libs: [
+        "android.hardware.keymaster@4.0",
+        "android.hardware.keymaster@4.1",
+        "libkeymaster4support",
+        "libkeymaster4_1support",
+    ],
+    test_suites: ["vts-core"],
+}
diff --git a/keymaster/4.1/vts/functional/EarlyBootKeyTest.cpp b/keymaster/4.1/vts/functional/EarlyBootKeyTest.cpp
new file mode 100644
index 0000000..4a19010
--- /dev/null
+++ b/keymaster/4.1/vts/functional/EarlyBootKeyTest.cpp
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+namespace android::hardware::keymaster::V4_1::test {
+
+// TODO(swillden): Put tests here.
+
+}  // namespace android::hardware::keymaster::V4_1::test
diff --git a/neuralnetworks/1.2/types.hal b/neuralnetworks/1.2/types.hal
index 837ced5..ef71ea8 100644
--- a/neuralnetworks/1.2/types.hal
+++ b/neuralnetworks/1.2/types.hal
@@ -2448,15 +2448,17 @@
      *       then clipping is disabled.
      *       If all the input tensors have type {@link OperandType::TENSOR_FLOAT32},
      *       this scalar must be of the type {@link OperandType::FLOAT32},
-     *       otherwise if all the input tensors have the type {@link OperandType::TENSOR_FLOAT16},
-     *       this scalar must be of type {@link OperandType::FLOAT16}.
+     *       otherwise if all the input tensors have the type
+     *       {@link OperandType::TENSOR_FLOAT16}, this scalar must be
+     *       of type {@link OperandType::FLOAT16}.
      * * 50: The clipping threshold for the output from the
      *       projection layer, such that values are bound within
      *       [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
      *       If all the input tensors have type {@link OperandType::TENSOR_FLOAT32},
      *       this scalar must be of the type {@link OperandType::FLOAT32},
-     *       otherwise if all the input tensors have the type {@link OperandType::TENSOR_FLOAT16},
-     *       this scalar must be of type {@link OperandType::FLOAT16}.
+     *       otherwise if all the input tensors have the type
+     *       {@link OperandType::TENSOR_FLOAT16}, this scalar must be
+     *       of type {@link OperandType::FLOAT16}.
      * * 51: merge_outputs
      *       An {@link OperandType::BOOL} scalar specifying if the outputs
      *       from forward and backward cells should be merged.
@@ -4124,7 +4126,6 @@
      * * 0: A tensor of the same type and shape as input1 and input2.
      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
      *      the scale and zeroPoint can be different from inputs' scale and zeroPoint.
-     *
      */
     SELECT = 84,
 
diff --git a/neuralnetworks/1.2/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.2/vts/functional/GeneratedTestHarness.cpp
index aacb385..c1bf494 100644
--- a/neuralnetworks/1.2/vts/functional/GeneratedTestHarness.cpp
+++ b/neuralnetworks/1.2/vts/functional/GeneratedTestHarness.cpp
@@ -58,8 +58,20 @@
 using V1_1::ExecutionPreference;
 using HidlToken = hidl_array<uint8_t, static_cast<uint32_t>(Constant::BYTE_SIZE_OF_CACHE_TOKEN)>;
 
+namespace {
+
+enum class Executor { ASYNC, SYNC, BURST };
+
 enum class OutputType { FULLY_SPECIFIED, UNSPECIFIED, INSUFFICIENT };
 
+struct TestConfig {
+    Executor executor;
+    MeasureTiming measureTiming;
+    OutputType outputType;
+};
+
+}  // namespace
+
 Model createModel(const TestModel& testModel) {
     // Model operands.
     hidl_vec<Operand> operands(testModel.operands.size());
@@ -194,31 +206,31 @@
     return android::nn::ExecutionBurstController::create(preparedModel,
                                                          std::chrono::microseconds{0});
 }
-enum class Executor { ASYNC, SYNC, BURST };
 
 void EvaluatePreparedModel(const sp<IPreparedModel>& preparedModel, const TestModel& testModel,
-                           Executor executor, MeasureTiming measure, OutputType outputType) {
+                           const TestConfig& testConfig) {
     // If output0 does not have size larger than one byte, we can not test with insufficient buffer.
-    if (outputType == OutputType::INSUFFICIENT && !isOutputSizeGreaterThanOne(testModel, 0)) {
+    if (testConfig.outputType == OutputType::INSUFFICIENT &&
+        !isOutputSizeGreaterThanOne(testModel, 0)) {
         return;
     }
 
     Request request = createRequest(testModel);
-    if (outputType == OutputType::INSUFFICIENT) {
+    if (testConfig.outputType == OutputType::INSUFFICIENT) {
         makeOutputInsufficientSize(/*outputIndex=*/0, &request);
     }
 
     ErrorStatus executionStatus;
     hidl_vec<OutputShape> outputShapes;
     Timing timing;
-    switch (executor) {
+    switch (testConfig.executor) {
         case Executor::ASYNC: {
             SCOPED_TRACE("asynchronous");
 
             // launch execution
             sp<ExecutionCallback> executionCallback = new ExecutionCallback();
-            Return<ErrorStatus> executionLaunchStatus =
-                    ExecutePreparedModel(preparedModel, request, measure, executionCallback);
+            Return<ErrorStatus> executionLaunchStatus = ExecutePreparedModel(
+                    preparedModel, request, testConfig.measureTiming, executionCallback);
             ASSERT_TRUE(executionLaunchStatus.isOk());
             EXPECT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(executionLaunchStatus));
 
@@ -234,8 +246,8 @@
             SCOPED_TRACE("synchronous");
 
             // execute
-            Return<ErrorStatus> executionReturnStatus =
-                    ExecutePreparedModel(preparedModel, request, measure, &outputShapes, &timing);
+            Return<ErrorStatus> executionReturnStatus = ExecutePreparedModel(
+                    preparedModel, request, testConfig.measureTiming, &outputShapes, &timing);
             ASSERT_TRUE(executionReturnStatus.isOk());
             executionStatus = static_cast<ErrorStatus>(executionReturnStatus);
 
@@ -258,14 +270,14 @@
             // execute burst
             int n;
             std::tie(n, outputShapes, timing, std::ignore) =
-                    controller->compute(request, measure, keys);
+                    controller->compute(request, testConfig.measureTiming, keys);
             executionStatus = nn::convertResultCodeToErrorStatus(n);
 
             break;
         }
     }
 
-    if (outputType != OutputType::FULLY_SPECIFIED &&
+    if (testConfig.outputType != OutputType::FULLY_SPECIFIED &&
         executionStatus == ErrorStatus::GENERAL_FAILURE) {
         LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
                      "execute model that it does not support.";
@@ -274,7 +286,7 @@
                   << std::endl;
         GTEST_SKIP();
     }
-    if (measure == MeasureTiming::NO) {
+    if (testConfig.measureTiming == MeasureTiming::NO) {
         EXPECT_EQ(UINT64_MAX, timing.timeOnDevice);
         EXPECT_EQ(UINT64_MAX, timing.timeInDriver);
     } else {
@@ -283,7 +295,7 @@
         }
     }
 
-    switch (outputType) {
+    switch (testConfig.outputType) {
         case OutputType::FULLY_SPECIFIED:
             // If the model output operands are fully specified, outputShapes must be either
             // either empty, or have the same number of elements as the number of outputs.
@@ -321,44 +333,29 @@
 
 void EvaluatePreparedModel(const sp<IPreparedModel>& preparedModel, const TestModel& testModel,
                            bool testDynamicOutputShape) {
+    std::initializer_list<OutputType> outputTypesList;
+    std::initializer_list<MeasureTiming> measureTimingList;
+    std::initializer_list<Executor> executorList;
+
     if (testDynamicOutputShape) {
-        EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::NO,
-                              OutputType::UNSPECIFIED);
-        EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::NO,
-                              OutputType::UNSPECIFIED);
-        EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::NO,
-                              OutputType::UNSPECIFIED);
-        EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::YES,
-                              OutputType::UNSPECIFIED);
-        EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::YES,
-                              OutputType::UNSPECIFIED);
-        EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::YES,
-                              OutputType::UNSPECIFIED);
-        EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::NO,
-                              OutputType::INSUFFICIENT);
-        EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::NO,
-                              OutputType::INSUFFICIENT);
-        EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::NO,
-                              OutputType::INSUFFICIENT);
-        EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::YES,
-                              OutputType::INSUFFICIENT);
-        EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::YES,
-                              OutputType::INSUFFICIENT);
-        EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::YES,
-                              OutputType::INSUFFICIENT);
+        outputTypesList = {OutputType::UNSPECIFIED, OutputType::INSUFFICIENT};
+        measureTimingList = {MeasureTiming::NO, MeasureTiming::YES};
+        executorList = {Executor::ASYNC, Executor::SYNC, Executor::BURST};
     } else {
-        EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::NO,
-                              OutputType::FULLY_SPECIFIED);
-        EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::NO,
-                              OutputType::FULLY_SPECIFIED);
-        EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::NO,
-                              OutputType::FULLY_SPECIFIED);
-        EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::YES,
-                              OutputType::FULLY_SPECIFIED);
-        EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::YES,
-                              OutputType::FULLY_SPECIFIED);
-        EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::YES,
-                              OutputType::FULLY_SPECIFIED);
+        outputTypesList = {OutputType::FULLY_SPECIFIED};
+        measureTimingList = {MeasureTiming::NO, MeasureTiming::YES};
+        executorList = {Executor::ASYNC, Executor::SYNC, Executor::BURST};
+    }
+
+    for (const OutputType outputType : outputTypesList) {
+        for (const MeasureTiming measureTiming : measureTimingList) {
+            for (const Executor executor : executorList) {
+                const TestConfig testConfig = {.executor = executor,
+                                               .measureTiming = measureTiming,
+                                               .outputType = outputType};
+                EvaluatePreparedModel(preparedModel, testModel, testConfig);
+            }
+        }
     }
 }
 
diff --git a/neuralnetworks/1.3/types.hal b/neuralnetworks/1.3/types.hal
index 86ab287..f959e45 100644
--- a/neuralnetworks/1.3/types.hal
+++ b/neuralnetworks/1.3/types.hal
@@ -73,6 +73,4503 @@
     BASE_MAX        = 0xFFFF,
 };
 
+/**
+ * Operation types.
+ *
+ * The type of an operation in a model.
+ */
+enum OperationType : int32_t {
+
+    /**
+     * Adds two tensors, element-wise.
+     *
+     * Takes two input tensors of identical {@link OperandType} and compatible
+     * dimensions. The output is the sum of both input tensors, optionally
+     * modified by an activation function.
+     *
+     * Two dimensions are compatible when:
+     *     1. they are equal, or
+     *     2. one of them is 1
+     *
+     * The size of the output is the maximum size along each dimension of the
+     * input operands. It starts with the trailing dimensions, and works its
+     * way forward.
+     *
+     * Example:
+     *
+     *     input1.dimension = {4, 1, 2}
+     *     input2.dimension = {5, 4, 3, 1}
+     *     output.dimension = {5, 4, 3, 2}
+     *
+     * Since HAL version 1.2, generic zero-sized input tensor is supported. Zero
+     * dimension is only compatible with 0 or 1. The size of the output
+     * dimension is zero if either of corresponding input dimension is zero.
+     *
+     * Supported tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+     * * {@link OperandType::TENSOR_FLOAT32}
+     * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+     *
+     * Supported tensor rank: up to 4
+     *
+     * Inputs:
+     * * 0: A tensor.
+     * * 1: A tensor of the same {@link OperandType}, and compatible dimensions
+     *      as input0.
+     *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+     *      the scales and zeroPoint can be different from input0 scale and zeroPoint.
+     * * 2: An {@link OperandType::INT32} scalar, and has to be one of the
+     *      {@link FusedActivationFunc} values. Specifies the activation to
+     *      invoke on the result.
+     *
+     * Outputs:
+     * * 0: The sum, a tensor of the same {@link OperandType} as input0.
+     *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+     *      the scale and zeroPoint can be different from inputs' scale and zeroPoint.
+     */
+    ADD = @1.2::OperationType:ADD,
+
+    /**
+     * Performs a 2-D average pooling operation.
+     *
+     * The output dimensions are functions of the filter dimensions, stride, and
+     * padding.
+     *
+     * The values in the output tensor are computed as:
+     *
+     *     output[b, i, j, channel] =
+     *         sum_{di, dj}(
+     *             input[b, strides[1] * i + di, strides[2] * j + dj, channel]
+     *         ) / sum(1)
+     *
+     * Supported tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+     * * {@link OperandType::TENSOR_FLOAT32}
+     * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+     *
+     * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
+     * With the default data layout NHWC, the data is stored in the order of:
+     * [batch, height, width, channels]. Alternatively, the data layout could
+     * be NCHW, the data storage order of: [batch, channels, height, width].
+     * NCHW is supported since HAL version 1.2.
+     *
+     * Both explicit padding and implicit padding are supported.
+     *
+     * Inputs (explicit padding):
+     * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
+     *      the input.
+     *      Since HAL version 1.2, zero batches is supported for this tensor.
+     * * 1: An {@link OperandType::INT32} scalar, specifying the padding on
+     *      the left, in the ‘width’ dimension.
+     * * 2: An {@link OperandType::INT32} scalar, specifying the padding on
+     *      the right, in the ‘width’ dimension.
+     * * 3: An {@link OperandType::INT32} scalar, specifying the padding on
+     *      the top, in the ‘height’ dimension.
+     * * 4: An {@link OperandType::INT32} scalar, specifying the padding on
+     *      the bottom, in the ‘height’ dimension.
+     * * 5: An {@link OperandType::INT32} scalar, specifying the stride when
+     *      walking through input in the ‘width’ dimension.
+     * * 6: An {@link OperandType::INT32} scalar, specifying the stride when
+     *      walking through input in the ‘height’ dimension.
+     * * 7: An {@link OperandType::INT32} scalar, specifying the filter
+     *      width.
+     * * 8: An {@link OperandType::INT32} scalar, specifying the filter
+     *      height.
+     * * 9: An {@link OperandType::INT32} scalar, and has to be one of the
+     *      {@link FusedActivationFunc} values. Specifies the activation to
+     *      invoke on the result.
+     * * 10: An optional {@link OperandType::BOOL} scalar, default to false.
+     *       Set to true to specify NCHW data layout for input0 and output0.
+     *       Available since HAL version 1.2.
+     *
+     * Inputs (implicit padding):
+     * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
+     *      the input.
+     *      Since HAL version 1.2, zero batches is supported for this tensor.
+     * * 1: An {@link OperandType::INT32} scalar, specifying the implicit
+     *      padding scheme, has to be one of the
+     *      following values: {0 (NONE), 1 (SAME), 2 (VALID)}.
+     * * 2: An {@link OperandType::INT32} scalar, specifying the stride when
+     *      walking through input in the ‘width’ dimension.
+     * * 3: An {@link OperandType::INT32} scalar, specifying the stride when
+     *      walking through input in the ‘height’ dimension.
+     * * 4: An {@link OperandType::INT32} scalar, specifying the filter
+     *      width.
+     * * 5: An {@link OperandType::INT32} scalar, specifying the filter
+     *      height.
+     * * 6: An {@link OperandType::INT32} scalar, and has to be one of the
+     *      {@link FusedActivationFunc} values. Specifies the activation to
+     *      invoke on the result.
+     * * 7: An optional {@link OperandType::BOOL} scalar, default to false.
+     *      Set to true to specify NCHW data layout for input0 and output0.
+     *      Available since HAL version 1.2.
+     *
+     * Outputs:
+     * * 0: The output 4-D tensor, of shape
+     *      [batches, out_height, out_width, depth].
+     *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+     *      the scale and zeroPoint must be the same as input0.
+     */
+    AVERAGE_POOL_2D = @1.2::OperationType:AVERAGE_POOL_2D,
+
+    /**
+     * Concatenates the input tensors along the given dimension.
+     *
+     * The input tensors must have identical {@link OperandType} and the same
+     * dimensions except the dimension along the concatenation axis.
+     *
+     * Supported tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+     * * {@link OperandType::TENSOR_FLOAT32}
+     * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+     *   (full support since HAL version 1.2, see the input section)
+     *
+     * Supported tensor rank: up to 4
+     *
+     * Inputs:
+     * * 0 ~ n-1: The list of n input tensors, of shape
+     *            [D0, D1, ..., Daxis(i), ..., Dm].
+     *            Before HAL version 1.2, all input tensors of
+     *            {@link OperandType::TENSOR_QUANT8_ASYMM}
+     *            must have the same scale and zeroPoint as the output tensor.
+     *            Since HAL version 1.2, zero-sized tensors are supported.
+     * * n: An {@link OperandType::INT32} scalar, specifying the
+     *      concatenation axis.
+     *
+     * Outputs:
+     * * 0: The output, a tensor of the same {@link OperandType} as the input
+     *      tensors. The output shape is [D0, D1, ..., sum(Daxis(i)), ..., Dm].
+     *      Since HAL version 1.2, for a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+     *      the scale and zeroPoint values can be different from
+     *      input tensors. Before HAL version 1.2 they have to be the same as for the input tensors.
+     */
+    CONCATENATION = @1.2::OperationType:CONCATENATION,
+
+    /**
+     * Performs a 2-D convolution operation.
+     *
+     * The CONV_2D op sweeps a 2-D filter that can mix channels together over a
+     * batch of images, applying the filter to each window of each image of the
+     * appropriate size.
+     *
+     * The output dimensions are functions of the filter dimensions, stride, and
+     * padding.
+     *
+     * The values in the output tensor are computed as:
+     *
+     *     output[b, i, j, channel] =
+     *         sum_{di, dj, k} (
+     *             input[b, strides[1] * i + di, strides[2] * j + dj, k] *
+     *             filter[channel, di, dj, k]
+     *         ) + bias[channel]
+     *
+     * Supported tensor {@link OperandType} configurations:
+     * * 32 bit floating point:
+     * * * {@link OperandType::TENSOR_FLOAT32} for input, filter, output, and bias.
+     *
+     * * Quantized:
+     * * * {@link OperandType::TENSOR_QUANT8_ASYMM} for input, filter, and output.
+     * * * {@link OperandType::TENSOR_INT32} for bias (with scale set to
+     * * * input.scale * filter.scale).
+     *
+     * Available since HAL version 1.2:
+     * * 16 bit floating point:
+     * * * {@link OperandType::TENSOR_FLOAT16} for input, filter, output, and bias.
+     *
+     * * Quantized with symmetric per channel quantization for the filter:
+     * * * {@link OperandType::TENSOR_QUANT8_ASYMM} for input, and output.
+     * * * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
+     * * * {@link OperandType::TENSOR_INT32} for bias (scale set to 0.0,
+     * * * each value scaling is separate and equal to input.scale * filter.scales[channel]).
+     *
+     * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
+     * With the default data layout NHWC, the data is stored in the order of:
+     * [batch, height, width, channels]. Alternatively, the data layout could
+     * be NCHW, the data storage order of: [batch, channels, height, width].
+     * NCHW is supported since HAL version 1.2.
+     *
+     * Both explicit padding and implicit padding are supported.
+     *
+     * Inputs (explicit padding):
+     * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
+     *      specifying the input.
+     *      Since HAL version 1.2, zero batches is supported for this tensor.
+     * * 1: A 4-D tensor, of shape
+     *      [depth_out, filter_height, filter_width, depth_in], specifying the
+     *      filter.
+     *      For tensor of type {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}
+     *      the channel dimension (SymmPerChannelQuantParams::channelDim)
+     *      must be set to 0.
+     * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
+     *      tensor of type {@link OperandType::TENSOR_FLOAT32}
+     *      or {@link OperandType::TENSOR_FLOAT16} the bias must be of the same
+     *      type. For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
+     *      the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint
+     *      of 0 and bias_scale == input_scale * filter_scale.
+     *      For filter tensor of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL},
+     *      the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0
+     *      and bias_scale of 0. The actual scale of each value 'i' is equal to
+     *      bias_scale[i] = input_scale * filter_scale[i].
+     * * 3: An {@link OperandType::INT32} scalar, specifying the padding on
+     *      the left, in the ‘width’ dimension.
+     * * 4: An {@link OperandType::INT32} scalar, specifying the padding on
+     *      the right, in the ‘width’ dimension.
+     * * 5: An {@link OperandType::INT32} scalar, specifying the padding on
+     *      the top, in the ‘height’ dimension.
+     * * 6: An {@link OperandType::INT32} scalar, specifying the padding on
+     *      the bottom, in the ‘height’ dimension.
+     * * 7: An {@link OperandType::INT32} scalar, specifying the stride when
+     *      walking through input in the ‘width’ dimension.
+     * * 8: An {@link OperandType::INT32} scalar, specifying the stride when
+     *      walking through input in the ‘height’ dimension.
+     * * 9: An {@link OperandType::INT32} scalar, and has to be one of the
+     *      {@link FusedActivationFunc} values. Specifies the activation to
+     *      invoke on the result.
+     * * 10: An optional {@link OperandType::BOOL} scalar, default to false.
+     *      Set to true to specify NCHW data layout for input0 and output0.
+     *      Available since HAL version 1.2.
+     * * 11: An optional {@link OperandType::INT32} scalar, specifying the dilation
+     *      factor for width. Defaults to 1. If set to k > 1, there will be k-1 skipped
+     *      cells between each filter element on width dimension. If this input is set,
+     *      input 12 (dilation factor for height) must be specified as well.
+     *      Available since HAL version 1.2.
+     * * 12: An optional {@link OperandType::INT32} scalar, specifying the dilation
+     *      factor for height. Defaults to 1. If set to k > 1, there will be k-1 skipped
+     *      cells between each filter element on height dimension. If this input is set,
+     *      input 11 (dilation factor for width) must be specified as well.
+     *      Available since HAL version 1.2.
+     *
+     * Inputs (implicit padding):
+     * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
+     *      specifying the input.
+     *      Since HAL version 1.2, zero batches is supported for this tensor.
+     * * 1: A 4-D tensor, of shape
+     *      [depth_out, filter_height, filter_width, depth_in], specifying the
+     *      filter.
+     *      For tensor of type {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}
+     *      the channel dimension (SymmPerChannelQuantParams::channelDim)
+     *      must be set to 0.
+     * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
+     *      tensor of type {@link OperandType::TENSOR_FLOAT32}
+     *      or {@link OperandType::TENSOR_FLOAT16} the bias must be of the same
+     *      type. For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
+     *      the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint
+     *      of 0 and bias_scale == input_scale * filter_scale.
+     *      For filter tensor of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL},
+     *      the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0
+     *      and bias_scale of 0. The actual scale of each value 'i' is equal to
+     *      bias_scale[i] = input_scale * filter_scale[i].
+     * * 3: An {@link OperandType::INT32} scalar, specifying the implicit
+     *      padding scheme, has to be one of the
+     *      following values: {0 (NONE), 1 (SAME), 2 (VALID)}.
+     * * 4: An {@link OperandType::INT32} scalar, specifying the stride when
+     *      walking through input in the ‘width’ dimension.
+     * * 5: An {@link OperandType::INT32} scalar, specifying the stride when
+     *      walking through input in the ‘height’ dimension.
+     * * 6: An {@link OperandType::INT32} scalar, and has to be one of the
+     *      {@link FusedActivationFunc} values. Specifies the activation to
+     *      invoke on the result.
+     * * 7: An optional {@link OperandType::BOOL} scalar, default to false.
+     *      Set to true to specify NCHW data layout for input0 and output0.
+     *      Available since HAL version 1.2.
+     * * 8: An optional {@link OperandType::INT32} scalar, specifying the dilation
+     *      factor for width. Defaults to 1. If set to k > 1, there will be k-1 skipped
+     *      cells between each filter element on width dimension. If this input is set,
+     *      input 9 (dilation factor for height) must be specified as well.
+     *      Available since HAL version 1.2.
+     * * 9: An optional {@link OperandType::INT32} scalar, specifying the dilation
+     *      factor for height. Defaults to 1. If set to k > 1, there will be k-1 skipped
+     *      cells between each filter element on height dimension. If this input is set,
+     *      input 8 (dilation factor for width) must be specified as well.
+     *      Available since HAL version 1.2.
+     *
+     * Outputs:
+     * * 0: The output 4-D tensor, of shape
+     *      [batches, out_height, out_width, depth_out].
+     *      Before HAL version 1.2, for output tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
+     *      the following condition must be satisfied: output_scale > input_scale * filter_scale
+     */
+    CONV_2D = @1.2::OperationType:CONV_2D,
+
+    /**
+     * Performs a depthwise 2-D convolution operation.
+     *
+     * Given an input tensor of shape [batches, height, width, depth_in] and a
+     * filter tensor of shape [1, filter_height, filter_width, depth_out]
+     * containing depth_out convolutional filters of depth 1, DEPTHWISE_CONV
+     * applies a different filter to each input channel (expanding from 1
+     * channel to channel_multiplier channels for each), then concatenates the
+     * results together.
+     *
+     * The output has depth_out = depth_in * depth_multiplier channels.
+     * The output dimensions are functions of the filter dimensions, stride, and
+     * padding.
+     *
+     * The values in the output tensor are computed as:
+     *
+     *     output[b, i, j, k * channel_multiplier + q] =
+     *         sum_{di, dj} (
+     *             input[b, strides[1] * i + di, strides[2] * j + dj, k] *
+     *             filter[1, di, dj, k * channel_multiplier + q]
+     *         ) + bias[k * channel_multiplier + q]
+     *
+     * Supported tensor {@link OperandType} configurations:
+     * * 32 bit floating point:
+     * * * {@link OperandType::TENSOR_FLOAT32} for input, filter, output, and bias.
+     *
+     * * Quantized:
+     * * * {@link OperandType::TENSOR_QUANT8_ASYMM} for input, filter, and output.
+     * * * {@link OperandType::TENSOR_INT32} for bias (with scale set to
+     * * * input.scale * filter.scale).
+     *
+     * Available since HAL version 1.2:
+     * * 16 bit floating point:
+     * * * {@link OperandType::TENSOR_FLOAT16} for input, filter, output, and bias.
+     *
+     * * Quantized with symmetric per channel quantization for the filter:
+     * * * {@link OperandType::TENSOR_QUANT8_ASYMM} for input, and output.
+     * * * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
+     * * * {@link OperandType::TENSOR_INT32} for bias (scale set to 0.0,
+     * * * each value scaling is separate and equal to input.scale * filter.scales[channel]).
+     *
+     * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
+     * With the default data layout NHWC, the data is stored in the order of:
+     * [batch, height, width, channels]. Alternatively, the data layout could
+     * be NCHW, the data storage order of: [batch, channels, height, width].
+     * NCHW is supported since HAL version 1.2.
+     *
+     * Both explicit padding and implicit padding are supported.
+     *
+     * Inputs (explicit padding):
+     * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
+     *      specifying the input.
+     * * 1: A 4-D tensor, of shape [1, filter_height, filter_width, depth_out],
+     *      specifying the filter.
+     *      For tensor of type {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}
+     *      the channel dimension (SymmPerChannelQuantParams::channelDim)
+     *      must be set to 3.
+     * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
+     *      tensor of type {@link OperandType::TENSOR_FLOAT32}
+     *      or {@link OperandType::TENSOR_FLOAT16} the bias must be of the same
+     *      type. For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
+     *      the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint
+     *      of 0 and bias_scale == input_scale * filter_scale.
+     *      For filter tensor of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL},
+     *      the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0
+     *      and bias_scale of 0. The actual scale of each value 'i' is equal to
+     *      bias_scale[i] = input_scale * filter_scale[i].
+     * * 3: An {@link OperandType::INT32} scalar, specifying the padding on
+     *      the left, in the ‘width’ dimension.
+     * * 4: An {@link OperandType::INT32} scalar, specifying the padding on
+     *      the right, in the ‘width’ dimension.
+     * * 5: An {@link OperandType::INT32} scalar, specifying the padding on
+     *      the top, in the ‘height’ dimension.
+     * * 6: An {@link OperandType::INT32} scalar, specifying the padding on
+     *      the bottom, in the ‘height’ dimension.
+     * * 7: An {@link OperandType::INT32} scalar, specifying the stride when
+     *      walking through input in the ‘width’ dimension.
+     * * 8: An {@link OperandType::INT32} scalar, specifying the stride when
+     *      walking through input in the ‘height’ dimension.
+     * * 9: An {@link OperandType::INT32} scalar, specifying the depthwise
+     *      multiplier.
+     * * 10: An {@link OperandType::INT32} scalar, and has to be one of the
+     *       {@link FusedActivationFunc} values. Specifies the activation to
+     *       invoke on the result.
+     * * 11: An optional {@link OperandType::BOOL} scalar, default to false.
+     *       Set to true to specify NCHW data layout for input0 and output0.
+     *       Available since HAL version 1.2.
+     * * 12: An optional {@link OperandType::INT32} scalar, specifying the dilation
+     *      factor for width. Defaults to 1. If set to k > 1, there will be k-1 skipped
+     *      cells between each filter element on width dimension. If this input is set,
+     *      input 13 (dilation factor for height) must be specified as well.
+     *      Available since HAL version 1.2.
+     * * 13: An optional {@link OperandType::INT32} scalar, specifying the dilation
+     *      factor for height. Defaults to 1. If set to k > 1, there will be k-1 skipped
+     *      cells between each filter element on height dimension. If this input is set,
+     *      input 12 (dilation factor for width) must be specified as well.
+     *      Available since HAL version 1.2.
+     *
+     * Inputs (implicit padding):
+     * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
+     *      specifying the input.
+     * * 1: A 4-D tensor, of shape [1, filter_height, filter_width, depth_out],
+     *      specifying the filter.
+     * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
+     *      tensor of type {@link OperandType::TENSOR_FLOAT32}
+     *      or {@link OperandType::TENSOR_FLOAT16} the bias must be of the same
+     *      type. For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
+     *      the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint
+     *      of 0 and bias_scale == input_scale * filter_scale.
+     *      For filter tensor of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL},
+     *      the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0
+     *      and bias_scale of 0. The actual scale of each value 'i' is equal to
+     *      bias_scale[i] = input_scale * filter_scale[i].
+     * * 3: An {@link OperandType::INT32} scalar, specifying the implicit
+     *      padding scheme, has to be one of the
+     *      following values: {0 (NONE), 1 (SAME), 2 (VALID)}.
+     * * 4: An {@link OperandType::INT32} scalar, specifying the stride when
+     *      walking through input in the ‘width’ dimension.
+     * * 5: An {@link OperandType::INT32} scalar, specifying the stride when
+     *      walking through input in the ‘height’ dimension.
+     * * 6: An {@link OperandType::INT32} scalar, specifying the depthwise
+     *      multiplier.
+     * * 7: An {@link OperandType::INT32} scalar, and has to be one of the
+     *      {@link FusedActivationFunc} values. Specifies the activation to
+     *      invoke on the result.
+     * * 8: An optional {@link OperandType::BOOL} scalar, default to false.
+     *      Set to true to specify NCHW data layout for input0 and output0.
+     *      Available since HAL version 1.2.
+     * * 9: An optional {@link OperandType::INT32} scalar, specifying the dilation
+     *      factor for width. Defaults to 1. If set to k > 1, there will be k-1 skipped
+     *      cells between each filter element on width dimension. If this input is set,
+     *      input 10 (dilation factor for height) must be specified as well.
+     *      Available since HAL version 1.2.
+     * * 10: An optional {@link OperandType::INT32} scalar, specifying the dilation
+     *      factor for height. Defaults to 1. If set to k > 1, there will be k-1 skipped
+     *      cells between each filter element on height dimension. If this input is set,
+     *      input 9 (dilation factor for width) must be specified as well.
+     *      Available since HAL version 1.2.
+     *
+     * Outputs:
+     * * 0: The output 4-D tensor, of shape
+     *      [batches, out_height, out_width, depth_out]. Before HAL version 1.2, for
+     *      output tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
+     *      the following condition must be satisfied:
+     *      output_scale > input_scale * filter_scale
+     */
+    DEPTHWISE_CONV_2D = @1.2::OperationType:DEPTHWISE_CONV_2D,
+
+    /**
+     * Rearranges data from depth into blocks of spatial data.
+     *
+     * More specifically, this op outputs a copy of the input tensor where
+     * values from the depth dimension are moved in spatial blocks to the height
+     * and width dimensions. The value block_size indicates the input block size
+     * and how the data is moved.
+     *
+     * Chunks of data of size block_size * block_size from depth are rearranged
+     * into non-overlapping blocks of size block_size x block_size.
+     *
+     * The width of the output tensor is input_depth * block_size, whereas the
+     * height is input_height * block_size. The depth of the input tensor must
+     * be divisible by block_size * block_size
+     *
+     * Supported tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+     * * {@link OperandType::TENSOR_FLOAT32}
+     * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+     *
+     * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
+     * With the default data layout NHWC, the data is stored in the order of:
+     * [batch, height, width, channels]. Alternatively, the data layout could
+     * be NCHW, the data storage order of: [batch, channels, height, width].
+     * NCHW is supported since HAL version 1.2.
+     *
+     * Inputs:
+     * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
+     *      specifying the input.
+     * * 1: An {@link OperandType::INT32} scalar, specifying the block_size.
+     *      block_size must be >=1 and block_size * block_size must be a divisor
+     *      of the input depth.
+     * * 2: An optional {@link OperandType::BOOL} scalar, default to false.
+     *      Set to true to specify NCHW data layout for input0 and output0.
+     *      Available since HAL version 1.2.
+     *
+     * Outputs:
+     * * 0: The output 4-D tensor, of shape [batch, height*block_size,
+     *      width*block_size, depth/(block_size*block_size)].
+     *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+     *      the scale and zeroPoint must be the same as input0.
+     */
+    DEPTH_TO_SPACE = @1.2::OperationType:DEPTH_TO_SPACE,
+
+    /**
+     * Dequantizes the input tensor.
+     *
+     * The formula is:
+     *
+     *     output = (input - zeroPoint) * scale.
+     *
+     * Supported input tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+     * * {@link OperandType::TENSOR_QUANT8_SYMM} (since HAL version 1.2)
+     * * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} (since HAL version 1.2)
+     *
+     * Supported output tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+     * * {@link OperandType::TENSOR_FLOAT32}.
+     *
+     * Supported tensor rank: up to 4
+     *
+     * Inputs:
+     * * 0: A tensor.
+     *      Since HAL version 1.2, this tensor may be zero-sized.
+     *
+     * Outputs:
+     * * 0: A tensor with the same shape as input0.
+     */
+    DEQUANTIZE = @1.2::OperationType:DEQUANTIZE,
+
+    /**
+     * Looks up sub-tensors in the input tensor.
+     *
+     * This operator takes for input a tensor of values (Values) and
+     * a one-dimensional tensor of selection indices (Lookups).
+     * The output tensor is the concatenation of sub-tensors of Values as
+     * selected by Lookups.
+     *
+     * Think of Values as being sliced along its first dimension:
+     * The entries in Lookups select which slices are concatenated together
+     * to create the output tensor.
+     *
+     * For example, if Values has shape of [40, 200, 300] and
+     * Lookups has shape of [3], all three values found in Lookups are
+     * expected to be between 0 and 39. The resulting tensor must
+     * have shape of [3, 200, 300].
+     *
+     * If a value in Lookups is out of bounds, the operation must fail
+     * and an error must be reported.
+     *
+     * Supported value tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_FLOAT32}
+     * * {@link OperandType::TENSOR_INT32}
+     * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+     *
+     * Supported value tensor rank: from 2
+     *
+     * Inputs:
+     * * 0: Lookups. A 1-D tensor of {@link OperandType::TENSOR_INT32}.
+     *      The values are indices into the first dimension of Values.
+     * * 1: Values. An n-D tensor, where n >= 2, from which sub-tensors are
+     *      extracted.
+     *
+     * Output:
+     * * 0: A n-D tensor with the same rank and shape as the Values
+     *      tensor, except for the first dimension which has the same size
+     *      as Lookups' only dimension.
+     *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+     *      the scale and zeroPoint must be the same as input1.
+     */
+    EMBEDDING_LOOKUP = @1.2::OperationType:EMBEDDING_LOOKUP,
+
+    /**
+     * Computes element-wise floor() on the input tensor.
+     *
+     * Supported tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+     * * {@link OperandType::TENSOR_FLOAT32}
+     *
+     * Supported tensor rank: up to 4
+     *
+     * Inputs:
+     * * 0: A tensor.
+     *
+     * Outputs:
+     * * 0: The output tensor, of the same {@link OperandType} and dimensions as
+     *      the input tensor.
+     */
+    FLOOR = @1.2::OperationType:FLOOR,
+
+    /**
+     * Denotes a fully (densely) connected layer, which connects all elements
+     * in the input tensor with each element in the output tensor.
+     *
+     * This layer implements the operation:
+     *
+     *     outputs = activation(inputs * weights’ + bias)
+     *
+     * Supported tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+     * * {@link OperandType::TENSOR_FLOAT32}
+     * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+     *
+     * Supported tensor rank: up to 4.
+     *
+     * Inputs:
+     * * 0: A tensor of at least rank 2, specifying the input. If rank is
+     *      greater than 2, then it gets flattened to a 2-D Tensor. The
+     *      (flattened) 2-D Tensor is reshaped (if necessary) to
+     *      [batch_size, input_size], where "input_size" corresponds to the
+     *      number of inputs to the layer, matching the second dimension of
+     *      weights, and "batch_size" is calculated by dividing the number of
+     *      elements by "input_size".
+     *      Since HAL version 1.2, zero batch_size is supported for this tensor.
+     * * 1: A 2-D tensor, specifying the weights, of shape
+     *      [num_units, input_size], where "num_units" corresponds to the number
+     *      of output nodes.
+     * * 2: A 1-D tensor, of shape [num_units], specifying the bias. For input
+     *      tensor of {@link OperandType::TENSOR_FLOAT32}, the bias should
+     *      also be of {@link OperandType::TENSOR_FLOAT32}. For input tensor
+     *      of {@link OperandType::TENSOR_QUANT8_ASYMM}, the bias should be
+     *      of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0 and
+     *      bias_scale == input_scale * filter_scale.
+     * * 3: An {@link OperandType::INT32} scalar, and has to be one of the
+     *      {@link FusedActivationFunc} values. Specifies the activation to
+     *      invoke on the result.
+     *
+     * Outputs:
+     * * 0: The output tensor, of shape [batch_size, num_units]. Before HAL version 1.2, for
+     *      output tensor of {@link OperandType::TENSOR_QUANT8_ASYMM}, the following
+     *      condition must be satisfied: output_scale > input_scale * filter_scale.
+     */
+    FULLY_CONNECTED = @1.2::OperationType:FULLY_CONNECTED,
+
+    /**
+     * Looks up sub-tensors in the input tensor using a key-value map.
+     *
+     * This operator takes for input a tensor of values (Values),
+     * a one-dimensional tensor of selection values (Lookups) and
+     * a one-dimensional tensor that maps these values to Values
+     * indexes. The output tensor is the concatenation of sub-tensors of
+     * Values as selected by Lookups via Keys.
+     *
+     * Think of Values as being sliced along its outer-most dimension.
+     * The output is a concatenation of selected slices, with one slice
+     * for each entry of Lookups. The slice selected is the one at the
+     * same index as the Maps entry that matches the value in Lookups.
+     *
+     * For a hit, the corresponding sub-tensor of Values is included
+     * in the Output tensor. For a miss, the corresponding sub-tensor in
+     * Output must have zero values.
+     *
+     * For example, if Values has shape of [40, 200, 300],
+     * Keys should have a shape of [40]. If Lookups tensor has shape
+     * of [3], three slices are being concatenated, so the resulting tensor
+     * must have the shape of [3, 200, 300]. If the first entry in Lookups
+     * has the value 123456, that value must be located in Keys tensor.
+     * If the sixth entry of Keys contains 123456, the sixth slice of Values
+     * must be selected. If no entry in Keys has 123456, a slice of zeroes
+     * must be concatenated.
+     *
+     * Supported value tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_FLOAT32}
+     * * {@link OperandType::TENSOR_INT32}
+     * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+     *
+     * Supported value tensor rank: from 2
+     *
+     * Inputs:
+     * * 0: Lookups. A 1-D {@link OperandType::TENSOR_INT32} tensor with
+     *      shape [ k ].
+     * * 1: Keys. A 1-D {@link OperandType::TENSOR_INT32} tensor with shape
+     *      [ n ]; Keys and Values pair represent a map, i.e., the ith element
+     *      in Keys (Keys[i]) is the key to select the ith sub-tensor in Values
+     *      (Values[i]), where 0 <= i <= n-1. Keys tensor *MUST* be sorted in
+     *      ascending order.
+     * * 2: Values. A tensor with shape of [ n, … ]; i.e., the first dimension
+     *      must be n.
+     *
+     * Outputs:
+     * * 0: Output. A tensor with shape [ k …].
+     *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+     *      the scale and zeroPoint must be the same as input2.
+     * * 1: Hits. A boolean tensor with shape [ k ] indicates whether the lookup
+     *      hits (True) or not (False).
+     *      Stored as {@link OperandType::TENSOR_QUANT8_ASYMM} with offset 0
+     *      and scale 1.0f.
+     *      A non-zero byte represents True, a hit. A zero indicates otherwise.
+     */
+    HASHTABLE_LOOKUP = @1.2::OperationType:HASHTABLE_LOOKUP,
+
+    /**
+     * Applies L2 normalization along the depth dimension.
+     *
+     * The values in the output tensor are computed as:
+     *
+     *     output[batch, row, col, channel] =
+     *         input[batch, row, col, channel] /
+     *         sqrt(sum_{c} pow(input[batch, row, col, c], 2))
+     *
+     * For input tensor with rank less than 4, independently normalizes each
+     * 1-D slice along dimension dim.
+     *
+     * Supported tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+     * * {@link OperandType::TENSOR_FLOAT32}
+     * * {@link OperandType::TENSOR_QUANT8_ASYMM} (since HAL version 1.2)
+     *
+     * Supported tensor rank: up to 4
+     * Tensors with rank less than 4 are only supported since HAL version 1.2.
+     *
+     * Inputs:
+     * * 0: An n-D tensor, specifying the tensor to be normalized.
+     * * 1: An optional {@link OperandType::INT32} scalar, default to -1,
+     *      specifying the dimension normalization would be performed on.
+     *      Negative index is used to specify axis from the end (e.g. -1 for
+     *      the last axis). Must be in the range [-n, n).
+     *      Available since HAL version 1.2.
+     *
+     * Outputs:
+     * * 0: A tensor of the same {@link OperandType} and same shape as input0.
+     *      For {@link OperandType::TENSOR_QUANT8_ASYMM},
+     *      the scale must be 1.f / 128 and the zeroPoint must be 128.
+     */
+    L2_NORMALIZATION = @1.2::OperationType:L2_NORMALIZATION,
+
+    /**
+     * Performs an 2-D L2 pooling operation.
+     *
+     * The output dimensions are functions of the filter dimensions, stride, and
+     * padding.
+     *
+     * The values in the output tensor are computed as:
+     *
+     *     output[b, i, j, c] =
+     *         sqrt(sum_{di, dj} pow(input[b, strides[1] * i + di, strides[2] * j + dj, c], 2) /
+     *              sum(1))
+     *
+     * Supported tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+     * * {@link OperandType::TENSOR_FLOAT32}
+     *
+     * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
+     * With the default data layout NHWC, the data is stored in the order of:
+     * [batch, height, width, channels]. Alternatively, the data layout could
+     * be NCHW, the data storage order of: [batch, channels, height, width].
+     * NCHW is supported since HAL version 1.2.
+     *
+     * Both explicit padding and implicit padding are supported.
+     *
+     * Inputs (explicit padding):
+     * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
+     *      the input.
+     *      Since HAL version 1.2, zero batches is supported for this tensor.
+     * * 1: An {@link OperandType::INT32} scalar, specifying the padding on
+     *      the left, in the ‘width’ dimension.
+     * * 2: An {@link OperandType::INT32} scalar, specifying the padding on
+     *      the right, in the ‘width’ dimension.
+     * * 3: An {@link OperandType::INT32} scalar, specifying the padding on
+     *      the top, in the ‘height’ dimension.
+     * * 4: An {@link OperandType::INT32} scalar, specifying the padding on
+     *      the bottom, in the ‘height’ dimension.
+     * * 5: An {@link OperandType::INT32} scalar, specifying the stride when
+     *      walking through input in the ‘width’ dimension.
+     * * 6: An {@link OperandType::INT32} scalar, specifying the stride when
+     *      walking through input in the ‘height’ dimension.
+     * * 7: An {@link OperandType::INT32} scalar, specifying the filter
+     *      width.
+     * * 8: An {@link OperandType::INT32} scalar, specifying the filter
+     *      height.
+     * * 9: An {@link OperandType::INT32} scalar, and has to be one of the
+     *      {@link FusedActivationFunc} values. Specifies the activation to
+     *      invoke on the result.
+     * * 10: An optional {@link OperandType::BOOL} scalar, default to false.
+     *       Set to true to specify NCHW data layout for input0 and output0.
+     *       Available since HAL version 1.2.
+     *
+     * Inputs (implicit padding):
+     * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
+     *      the input.
+     *      Since HAL version 1.2, zero batches is supported for this tensor.
+     * * 1: An {@link OperandType::INT32} scalar, specifying the implicit
+     *      padding scheme, has to be one of the
+     *      following values: {0 (NONE), 1 (SAME), 2 (VALID)}.
+     * * 2: An {@link OperandType::INT32} scalar, specifying the stride when
+     *      walking through input in the ‘width’ dimension.
+     * * 3: An {@link OperandType::INT32} scalar, specifying the stride when
+     *      walking through input in the ‘height’ dimension.
+     * * 4: An {@link OperandType::INT32} scalar, specifying the filter
+     *      width.
+     * * 5: An {@link OperandType::INT32} scalar, specifying the filter
+     *      height.
+     * * 6: An {@link OperandType::INT32} scalar, and has to be one of the
+     *      {@link FusedActivationFunc} values. Specifies the activation to
+     *      invoke on the result.
+     * * 7: An optional {@link OperandType::BOOL} scalar, default to false.
+     *      Set to true to specify NCHW data layout for input0 and output0.
+     *      Available since HAL version 1.2.
+     *
+     * Outputs:
+     * * 0: The output 4-D tensor, of shape
+     *      [batches, out_height, out_width, depth].
+     */
+    L2_POOL_2D = @1.2::OperationType:L2_POOL_2D,
+
+    /**
+     * Applies Local Response Normalization along the depth dimension.
+     *
+     * The 4-D input tensor is treated as a 3-D array of 1-D vectors (along the
+     * last dimension), and each vector is normalized independently. Within a
+     * given vector, each component is divided by the weighted, squared sum of
+     * inputs within depth_radius.
+     *
+     * The output is calculated using this formula:
+     *
+     *     sqr_sum[a, b, c, d] = sum(
+     *         pow(input[a, b, c, d - depth_radius : d + depth_radius + 1], 2))
+     *     output = input / pow((bias + alpha * sqr_sum), beta)
+     *
+     * For input tensor with rank less than 4, independently normalizes each
+     * 1-D slice along specified dimension.
+     *
+     * Supported tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+     * * {@link OperandType::TENSOR_FLOAT32}
+     *
+     * Supported tensor rank: up to 4
+     * Tensors with rank less than 4 are only supported since HAL version 1.2.
+     *
+     * Inputs:
+     * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
+     *      the input.
+     * * 1: An {@link OperandType::INT32} scalar, specifying the radius of
+     *      the normalization window.
+     * * 2: A scalar, specifying the bias, must not be zero.
+     *      For input tensor of {@link OperandType::TENSOR_FLOAT16}, the bias
+     *      value must be of {@link OperandType::FLOAT16}.
+     *      For input tensor of {@link OperandType::TENSOR_FLOAT32}, the bias
+     *      value must be of {@link OperandType::FLOAT32}.
+     * * 3: A scalar, specifying the scale factor, alpha.
+     *      For input tensor of {@link OperandType::TENSOR_FLOAT16}, the
+     *      alpha value must be of {@link OperandType::FLOAT16}.
+     *      For input tensor of {@link OperandType::TENSOR_FLOAT32}, the
+     *      alpha value must be of {@link OperandType::FLOAT32}.
+     * * 4: A scalar, specifying the exponent, beta.
+     *      For input tensor of {@link OperandType::TENSOR_FLOAT16}, the beta
+     *      value must be of {@link OperandType::FLOAT16}.
+     *      For input tensor of {@link OperandType::TENSOR_FLOAT32}, the beta
+     *      value must be of {@link OperandType::FLOAT32}.
+     * * 5: An optional {@link OperandType::INT32} scalar, default to -1,
+     *      specifying the dimension normalization would be performed on.
+     *      Negative index is used to specify axis from the end (e.g. -1 for
+     *      the last axis). Must be in the range [-n, n).
+     *      Available since HAL version 1.2.
+     *
+     * Outputs:
+     * * 0: The output tensor of same shape as input0.
+     */
+    LOCAL_RESPONSE_NORMALIZATION = @1.2::OperationType:LOCAL_RESPONSE_NORMALIZATION,
+
+    /**
+     * Computes sigmoid activation on the input tensor element-wise.
+     *
+     * The output is calculated using this formula:
+     *
+     *     output = 1 / (1 + exp(-input))
+     *
+     * Supported tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+     * * {@link OperandType::TENSOR_FLOAT32}
+     * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+     *
+     * Supported tensor rank: up to 4.
+     *
+     * Inputs:
+     * * 0: A tensor, specifying the input.
+     *      Since HAL version 1.2, this tensor may be zero-sized.
+     *
+     * Outputs:
+     * * 0: The output tensor of same shape as input0.
+     *      For {@link OperandType::TENSOR_QUANT8_ASYMM},
+     *      the scale must be 1.f / 256 and the zeroPoint must be 0.
+     */
+    LOGISTIC = @1.2::OperationType:LOGISTIC,
+
+    /**
+     * Projects an input to a bit vector via locality senstive hashing.
+     *
+     * Supported input tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+     * * {@link OperandType::TENSOR_FLOAT32}
+     * * {@link OperandType::TENSOR_INT32}
+     * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+     *
+     * Supported input tensor rank: from 1
+     *
+     * Inputs:
+     * * 0: Hash functions. Dim.size == 2, DataType: Float.
+     *      Tensor[0].Dim[0]: Number of hash functions.
+     *      Tensor[0].Dim[1]: Number of projected output bits generated by each
+     *      hash function.
+     *      If the projection type is Sparse:
+     *      Tensor[0].Dim[1] + ceil(log2(Tensor[0].Dim[0])) <= 32
+     *
+     * * 1: Input. Dim.size >= 1, no restriction on DataType.
+     * * 2: Weight. Optional. Dim.size == 1, DataType: Float.
+     *      If not set, each input element is considered to have the same weight
+     *      of 1.0.
+     *      Tensor[1].Dim[0] == Tensor[2].Dim[0]
+     * * 3: Type:
+     *        Sparse:
+     *          Value LSHProjectionType_SPARSE(=3) (since HAL version 1.2).
+     *          Computed bit vector is considered to be sparse.
+     *          Each output element is an int32 made up of multiple bits
+     *          computed from hash functions.
+     *
+     *          NOTE: To avoid collisions across hash functions, an offset value
+     *          of k * (1 << Tensor[0].Dim[1]) will be added to each signature,
+     *          where k is the index of the hash function.
+     *
+     *          Value LSHProjectionType_SPARSE_DEPRECATED(=1).
+     *          Legacy behavior that does not include the offset value.
+     *
+     *        Dense:
+     *          Value LSHProjectionType_DENSE(=2).
+     *          Computed bit vector is considered to be dense. Each output
+     *          element represents a bit and can take the value of either
+     *          0 or 1.
+     *
+     * Outputs:
+     * * 0: If the projection type is Sparse:
+     *      Output.Dim == { Tensor[0].Dim[0] }
+     *      A tensor of int32 that represents hash signatures.
+     *
+     *      If the projection type is Dense:
+     *      Output.Dim == { Tensor[0].Dim[0] * Tensor[0].Dim[1] }
+     *      A flattened tensor that represents projected bit vectors.
+     * The offset value for sparse projections was added in HAL version 1.2.
+     */
+    LSH_PROJECTION = @1.2::OperationType:LSH_PROJECTION,
+
+    /**
+     * Performs a single time step in a Long Short-Term Memory (LSTM) layer
+     *
+     * The LSTM operation is described by the following equations.
+     *
+     * \f{eqnarray*}{
+     * i_t =& \sigma(W_{xi}x_t+W_{hi}h_{t-1}+W_{ci}C_{t-1}+b_i) & \\
+     * f_t =& \sigma(W_{xf}x_t+W_{hf}h_{t-1}+W_{cf}C_{t-1}+b_f) & \\
+     * C_t =& clip(f_t \odot C_{t-1} + i_t \odot
+     *        g(W_{xc}x_t+W_{hc}h_{t-1}+b_c),\ t_{cell}) & \\
+     * o_t =& \sigma(W_{xo}x_t+W_{ho}h_{t-1}+W_{co}C_t+b_o) & \\
+     *      & & \\
+     *      & clip(W_{proj}(o_t \odot g(C_t))+b_{proj},\ t_{proj})
+     *      & if\ there\ is\ a\ projection; \\
+     * h_t =& & \\
+     *      & o_t \odot g(C_t) & otherwise. \\
+     * \f}
+     * Where:
+     * * \f$x_t\f$ is the input,
+     * * \f$i_t\f$ is the input gate,
+     * * \f$f_t\f$ is the forget gate,
+     * * \f$C_t\f$ is the cell state,
+     * * \f$o_t\f$ is the output,
+     * * \f$h_t\f$ is the output state,
+     * * \f$\sigma\f$ is the logistic sigmoid function,
+     * * \f$g\f$ is the cell input and cell output activation function, usually
+     *   \f$tahn\f$,
+     * * \f$W_{xi}\f$ is the input-to-input weight matrix,
+     * * \f$W_{hi}\f$ is the recurrent to input weight matrix,
+     * * \f$W_{ci}\f$ is the cell-to-input weight matrix,
+     * * \f$b_i\f$ is the input gate bias,
+     * * \f$W_{xf}\f$ is the input-to-forget weight matrix,
+     * * \f$W_{hf}\f$ is the recurrent-to-forget weight matrix,
+     * * \f$W_{cf}\f$ is the cell-to-forget weight matrix,
+     * * \f$b_f\f$ is the forget gate bias,
+     * * \f$W_{xc}\f$ is the input-to-cell weight matrix,
+     * * \f$W_{hc}\f$ is the recurrent-to-cell weight matrix,
+     * * \f$b_c\f$ is the cell bias,
+     * * \f$W_{xo}\f$ is the input-to-output weight matrix,
+     * * \f$W_{ho}\f$ is the recurrent-to-output weight matrix,
+     * * \f$W_{co}\f$ is the cell-to-output weight matrix,
+     * * \f$b_o\f$ is the output gate bias,
+     * * \f$W_{proj}\f$ is the projection weight matrix,
+     * * \f$b_{proj}\f$ is the projection bias,
+     * * \f$t_{cell}\f$ is the threshold for clipping the cell state, and
+     * * \f$t_{proj}\f$ is the threshold for clipping the projected output.
+     * * \f$\odot\f$ is the
+     *   <a href="https://en.wikipedia.org/wiki/Hadamard_product_(matrices)">
+     *   Hadamard product</a> that takes two matrices and produces another
+     *   matrix, each element of which is the product of the corresponding
+     *   elements of the input matrices.
+     *
+     * Since HAL version 1.2 LSTM supports layer normalization.
+     * In case layer normalization is used, the inputs to internal activation
+     * functions (sigmoid and \f$g\f$) are normalized, rescaled and recentered
+     * following an approach from section 3.1 from
+     * https://arxiv.org/pdf/1607.06450.pdf
+     *
+     * The operation has the following independently optional inputs:
+     * * The cell-to-input weights (\f$W_{ci}\f$), cell-to-forget weights
+     *   (\f$W_{cf}\f$) and cell-to-output weights (\f$W_{co}\f$) either all
+     *   have values or neither of them have values (i.e., all set to null). If
+     *   they have values, the peephole optimization is used.
+     * * The input-to-input weights (\f$W_{xi}\f$), recurrent-to-input weights
+     *   (\f$W_{hi}\f$) and input gate bias (\f$b_i\f$) either all have values,
+     *   or none of them have values. If they have no values, coupling of input
+     *   and forget gates (CIFG) is used, in which case the input gate
+     *   (\f$i_t\f$) is calculated using the following equation instead.
+     *   \f{eqnarray*}{
+     *   i_t = 1 - f_t
+     *   \f}
+     *   In case peephole optimization is used and CIFG is not used
+     *   cell-to-input (\f$W_{ci}\f$) weights must be present. Otherwise, the
+     *   cell-to-input weights must have no value.
+     * * The projection weights (\f$W_{proj}\f$) is required only for the
+     *   recurrent projection layer, and should otherwise have no value.
+     * * The projection bias (\f$b_{proj}\f$) may (but not required to) have a
+     *   value if the recurrent projection layer exists, and should otherwise
+     *   have no value.
+     * * (HAL version 1.2 or later) The four layer normalization weights either all have
+     *   values or none of them have values. Additionally, if CIFG is used,
+     *   input layer normalization weights tensor is omitted and the other layer
+     *   normalization weights either all have values or none of them have
+     *   values. Layer normalization is used when the values of all the layer
+     *   normalization weights are present.
+     *
+     * References:
+     *
+     * The default non-peephole non-CIFG implementation is based on:
+     * http://www.bioinf.jku.at/publications/older/2604.pdf
+     * S. Hochreiter and J. Schmidhuber. "Long Short-Term Memory". Neural
+     * Computation, 9(8):1735-1780, 1997.
+     *
+     * The peephole implementation and projection layer is based on:
+     * https://research.google.com/pubs/archive/43905.pdf
+     * Hasim Sak, Andrew Senior, and Francoise Beaufays. "Long short-term memory
+     * recurrent neural network architectures for large scale acoustic
+     * modeling." INTERSPEECH, 2014.
+     * (However, the concept of peephole optimization was introduced in work
+     * prior to this paper.)
+     *
+     * The coupling of input and forget gate (CIFG) is based on:
+     * http://arxiv.org/pdf/1503.04069.pdf
+     * Greff et al. "LSTM: A Search Space Odyssey"
+     *
+     * The layer normalization is based on:
+     * https://arxiv.org/pdf/1607.06450.pdf
+     * Jimmy Ba et al. "Layer Normalization"
+     *
+     * Supported tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+     * * {@link OperandType::TENSOR_FLOAT32}
+     *
+     * All input and output tensors must be of the same type.
+     *
+     * Inputs:
+     * * 0: The input (\f$x_t\f$).
+     *      A 2-D tensor of shape [batch_size, input_size], where “batch_size”
+     *      corresponds to the batching dimension, and “input_size” is the size
+     *      of the input.
+     * * 1: The input-to-input weights (\f$W_{xi}\f$). Optional.
+     *      A 2-D tensor of shape [num_units, input_size], where “num_units”
+     *      corresponds to the number of cell units.
+     * * 2: The input-to-forget weights (\f$W_{xf}\f$).
+     *      A 2-D tensor of shape [num_units, input_size].
+     * * 3: The input-to-cell weights (\f$W_{xc}\f$).
+     *      A 2-D tensor of shape [num_units, input_size].
+     * * 4: The input-to-output weights (\f$W_{xo}\f$).
+     *      A 2-D tensor of shape [num_units, input_size].
+     * * 5: The recurrent-to-input weights (\f$W_{hi}\f$). Optional.
+     *      A 2-D tensor of shape [num_units, output_size], where “output_size”
+     *      corresponds to either the number of cell units (i.e., “num_units”),
+     *      or the second dimension of the “projection_weights”, if defined.
+     * * 6: The recurrent-to-forget weights (\f$W_{hf}\f$).
+     *      A 2-D tensor of shape [num_units, output_size].
+     * * 7: The recurrent-to-cell weights (\f$W_{hc}\f$).
+     *      A 2-D tensor of shape [num_units, output_size].
+     * * 8: The recurrent-to-output weights (\f$W_{ho}\f$).
+     *      A 2-D tensor of shape [num_units, output_size].
+     * * 9: The cell-to-input weights (\f$W_{ci}\f$). Optional.
+     *      A 1-D tensor of shape [num_units].
+     * * 10:The cell-to-forget weights (\f$W_{cf}\f$). Optional.
+     *      A 1-D tensor of shape [num_units].
+     * * 11:The cell-to-output weights (\f$W_{co}\f$). Optional.
+     *      A 1-D tensor of shape [num_units].
+     * * 12:The input gate bias (\f$b_i\f$). Optional.
+     *      A 1-D tensor of shape [num_units].
+     * * 13:The forget gate bias (\f$b_f\f$).
+     *      A 1-D tensor of shape [num_units].
+     * * 14:The cell bias (\f$b_c\f$).
+     *      A 1-D tensor of shape [num_units].
+     * * 15:The output gate bias (\f$b_o\f$).
+     *      A 1-D tensor of shape [num_units].
+     * * 16:The projection weights (\f$W_{proj}\f$). Optional.
+     *      A 2-D tensor of shape [output_size, num_units].
+     * * 17:The projection bias (\f$b_{proj}\f$). Optional.
+     *      A 1-D tensor of shape [output_size].
+     * * 18:The output state (in) (\f$h_{t-1}\f$).
+     *      A 2-D tensor of shape [batch_size, output_size].
+     * * 19:The cell state (in) (\f$C_{t-1}\f$).
+     *      A 2-D tensor of shape [batch_size, num_units].
+     * * 20:The activation function (\f$g\f$).
+     *      A value indicating the activation function:
+     *      <ul>
+     *      <li>0: None;
+     *      <li>1: Relu;
+     *      <li>3: Relu6;
+     *      <li>4: Tanh;
+     *      <li>6: Sigmoid.
+     *      </ul>
+     * * 21:The clipping threshold (\f$t_{cell}\f$) for the cell state, such
+     *      that values are bound within [-cell_clip, cell_clip]. If set to 0.0
+     *      then clipping is disabled.
+     *      Until HAL version 1.2 this scalar must be of type {@link
+     *      OperandType::FLOAT32}. Since HAL version 1.2, if all the input
+     *      tensors have type {@link OperandType::TENSOR_FLOAT32}, this
+     *      scalar must be of the type {@link OperandType::FLOAT32},
+     *      otherwise if all the input tensors have the type {@link
+     *      OperandType::TENSOR_FLOAT16}, this scalar must be of type {@link
+     *      OperandType::FLOAT16}.
+     * * 22:The clipping threshold (\f$t_{proj}\f$) for the output from the
+     *      projection layer, such that values are bound within
+     *      [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
+     *      Until HAL version 1.2 this scalar must be of type {@link
+     *      OperandType::FLOAT32}. Since HAL version 1.2, if all the input
+     *      tensors have type {@link OperandType::TENSOR_FLOAT32}, this
+     *      scalar must be of the type {@link OperandType::FLOAT32},
+     *      otherwise if all the input tensors have the type {@link
+     *      OperandType::TENSOR_FLOAT16}, this scalar must be of type {@link
+     *      OperandType::FLOAT16}.
+     * Since HAL version 1.2 there are additional inputs to this op:
+     * * 23:The input layer normalization weights.
+     *      A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
+     *      to activation at input gate.
+     * * 24:The forget layer normalization weights.
+     *      A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
+     *      to activation at forget gate.
+     * * 25:The cell layer normalization weights.
+     *      A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
+     *      to activation at cell gate.
+     * * 26:The output layer normalization weights.
+     *      A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
+     *      to activation at output gate.
+     *
+     * Outputs:
+     * * 0: The scratch buffer.
+     *      A 2-D tensor of shape [batch_size, num_units * 3] with CIFG, or
+     *      [batch_size, num_units * 4] without CIFG.
+     * * 1: The output state (out) (\f$h_t\f$).
+     *      A 2-D tensor of shape [batch_size, output_size].
+     * * 2: The cell state (out) (\f$C_t\f$).
+     *      A 2-D tensor of shape [batch_size, num_units].
+     * * 3: The output (\f$o_t\f$).
+     *      A 2-D tensor of shape [batch_size, output_size]. This is effectively
+     *      the same as the current “output state (out)” value.
+     */
+    LSTM = @1.2::OperationType:LSTM,
+
+    /**
+     * Performs an 2-D max pooling operation.
+     *
+     * The output dimensions are functions of the filter dimensions, stride, and
+     * padding.
+     *
+     * The values in the output tensor are computed as:
+     *
+     *     output[b, i, j, channel] =
+     *         max_{di, dj} (
+     *             input[b, strides[1] * i + di, strides[2] * j + dj, channel]
+     *         )
+     *
+     * Supported tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+     * * {@link OperandType::TENSOR_FLOAT32}
+     * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+     *
+     * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
+     * With the default data layout NHWC, the data is stored in the order of:
+     * [batch, height, width, channels]. Alternatively, the data layout could
+     * be NCHW, the data storage order of: [batch, channels, height, width].
+     * NCHW is supported since HAL version 1.2.
+     *
+     * Both explicit padding and implicit padding are supported.
+     *
+     * Inputs (explicit padding):
+     * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
+     *      the input.
+     *      Since HAL version 1.2, zero batches is supported for this tensor.
+     * * 1: An {@link OperandType::INT32} scalar, specifying the padding on
+     *      the left, in the ‘width’ dimension.
+     * * 2: An {@link OperandType::INT32} scalar, specifying the padding on
+     *      the right, in the ‘width’ dimension.
+     * * 3: An {@link OperandType::INT32} scalar, specifying the padding on
+     *      the top, in the ‘height’ dimension.
+     * * 4: An {@link OperandType::INT32} scalar, specifying the padding on
+     *      the bottom, in the ‘height’ dimension.
+     * * 5: An {@link OperandType::INT32} scalar, specifying the stride when
+     *      walking through input in the ‘width’ dimension.
+     * * 6: An {@link OperandType::INT32} scalar, specifying the stride when
+     *      walking through input in the ‘height’ dimension.
+     * * 7: An {@link OperandType::INT32} scalar, specifying the filter
+     *      width.
+     * * 8: An {@link OperandType::INT32} scalar, specifying the filter
+     *      height.
+     * * 9: An {@link OperandType::INT32} scalar, and has to be one of the
+     *      {@link FusedActivationFunc} values. Specifies the activation to
+     *      invoke on the result.
+     * * 10: An optional {@link OperandType::BOOL} scalar, default to false.
+     *       Set to true to specify NCHW data layout for input0 and output0.
+     *       Available since HAL version 1.2.
+     *
+     * Inputs (implicit padding):
+     * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
+     *      the input.
+     *      Since HAL version 1.2, zero batches is supported for this tensor.
+     * * 1: An {@link OperandType::INT32} scalar, specifying the implicit
+     *      padding scheme, has to be one of the
+     *      following values: {0 (NONE), 1 (SAME), 2 (VALID)}.
+     * * 2: An {@link OperandType::INT32} scalar, specifying the stride when
+     *      walking through input in the ‘width’ dimension.
+     * * 3: An {@link OperandType::INT32} scalar, specifying the stride when
+     *      walking through input in the ‘height’ dimension.
+     * * 4: An {@link OperandType::INT32} scalar, specifying the filter
+     *      width.
+     * * 5: An {@link OperandType::INT32} scalar, specifying the filter
+     *      height.
+     * * 6: An {@link OperandType::INT32} scalar, and has to be one of the
+     *      {@link FusedActivationFunc} values. Specifies the activation to
+     *      invoke on the result.
+     * * 7: An optional {@link OperandType::BOOL} scalar, default to false.
+     *      Set to true to specify NCHW data layout for input0 and output0.
+     *      Available since HAL version 1.2.
+     *
+     * Outputs:
+     * * 0: The output 4-D tensor, of shape
+     *      [batches, out_height, out_width, depth].
+     *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+     *      the scale and zeroPoint must be the same as input0.
+     */
+    MAX_POOL_2D = @1.2::OperationType:MAX_POOL_2D,
+
+    /**
+     * Multiplies two tensors, element-wise.
+     *
+     * Takes two input tensors of identical {@link OperandType} and compatible
+     * dimensions. The output is the product of both input tensors, optionally
+     * modified by an activation function.
+     *
+     * Two dimensions are compatible when:
+     *     1. they are equal, or
+     *     2. one of them is 1
+     *
+     * The size of the resulting output is the maximum size along each dimension
+     * of the input operands. It starts with the trailing dimensions, and works
+     * its way forward.
+     *
+     * Since HAL version 1.2, generic zero-sized input tensor is supported. Zero
+     * dimension is only compatible with 0 or 1. The size of the output
+     * dimension is zero if either of corresponding input dimension is zero.
+     *
+     * Supported tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+     * * {@link OperandType::TENSOR_FLOAT32}
+     * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+     *
+     * Supported tensor rank: up to 4
+     *
+     * Inputs:
+     * * 0: A tensor.
+     * * 1: A tensor of the same {@link OperandType}, and compatible dimensions
+     *      as input0.
+     * * 2: An {@link OperandType::INT32} scalar, and has to be one of the
+     *      {@link FusedActivationFunc} values. Specifies the activation to
+     *      invoke on the result.
+     *
+     * Outputs:
+     * * 0: The product, a tensor of the same {@link OperandType} as input0.
+     *      For output tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
+     *      the following condition must be satisfied:
+     *      output_scale > input1_scale * input2_scale.
+     */
+    MUL = @1.2::OperationType:MUL,
+
+    /**
+     * Computes rectified linear activation on the input tensor element-wise.
+     *
+     * The output is calculated using this formula:
+     *
+     *     output = max(0, input)
+     *
+     * Supported tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+     * * {@link OperandType::TENSOR_FLOAT32}
+     * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+     *
+     * Supported tensor rank: up to 4.
+     *
+     * Inputs:
+     * * 0: A tensor, specifying the input.
+     *      Since HAL version 1.2, this tensor may be zero-sized.
+     *
+     * Outputs:
+     * * 0: The output tensor of same shape as input0.
+     *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+     *      the scale and zeroPoint must be the same as input0.
+     */
+    RELU = @1.2::OperationType:RELU,
+
+    /**
+     * Computes rectified linear 1 activation on the input tensor element-wise.
+     *
+     * The output is calculated using this formula:
+     *
+     *     output = min(1.f, max(-1.f, input))
+     *
+     * Supported tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+     * * {@link OperandType::TENSOR_FLOAT32}
+     * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+     *
+     * Supported tensor rank: up to 4.
+     *
+     * Inputs:
+     * * 0: A tensor, specifying the input.
+     *      Since HAL version 1.2, this tensor may be zero-sized.
+     *
+     * Outputs:
+     * * 0: The output tensor of the same shape as input0.
+     *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+     *      the scale and zeroPoint must be the same as input0.
+     */
+    RELU1 = @1.2::OperationType:RELU1,
+
+    /**
+     * Computes rectified linear 6 activation on the input tensor element-wise.
+     *
+     * The output is calculated using this formula:
+     *
+     *     output = min(6, max(0, input))
+     *
+     * Supported tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+     * * {@link OperandType::TENSOR_FLOAT32}
+     * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+     *
+     * Supported tensor rank: up to 4.
+     *
+     * Inputs:
+     * * 0: A tensor, specifying the input.
+     *      Since HAL version 1.2, this tensor may be zero-sized.
+     *
+     * Outputs:
+     * * 0: The output tensor of same shape as input0.
+     *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+     *      the scale and zeroPoint must be the same as input0.
+     */
+    RELU6 = @1.2::OperationType:RELU6,
+
+    /**
+     * Reshapes a tensor.
+     *
+     * Given tensor, this operation returns a tensor that has the same values as
+     * tensor, but with a newly specified shape.
+     *
+     * Supported tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+     * * {@link OperandType::TENSOR_FLOAT32}
+     * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+     *
+     * Supported tensor rank: up to 4.
+     *
+     * Inputs:
+     * * 0: A tensor, specifying the tensor to be reshaped.
+     * * 1: A 1-D tensor of {@link OperandType::TENSOR_INT32}, defining the
+     *      shape of the output tensor. The number of elements implied by shape
+     *      must be the same as the number of elements in the input tensor.
+     *
+     *      If one component of shape is the special value -1, the size of that
+     *      dimension is computed so that the total size remains constant. In
+     *      particular, a shape of [-1] flattens into 1-D. At most one component
+     *      of shape can be -1.
+     *
+     * Outputs:
+     * * 0: The output tensor, of shape specified by the input shape.
+     *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+     *      the scale and zeroPoint must be the same as input0.
+     */
+    RESHAPE = @1.2::OperationType:RESHAPE,
+
+    /**
+     * Resizes images to given size using the bilinear interpretation.
+     *
+     * Resized images must be distorted if their output aspect ratio is not the
+     * same as input aspect ratio. The corner pixels of output may not be the
+     * same as corner pixels of input.
+     *
+     * Supported tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+     * * {@link OperandType::TENSOR_FLOAT32}
+     * * {@link OperandType::TENSOR_QUANT8_ASYMM} (since HAL version 1.2)
+     *
+     * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
+     * With the default data layout NHWC, the data is stored in the order of:
+     * [batch, height, width, channels]. Alternatively, the data layout could
+     * be NCHW, the data storage order of: [batch, channels, height, width].
+     * NCHW is supported since HAL version 1.2.
+     *
+     * Both resizing by shape and resizing by scale are supported.
+     *
+     * Inputs (resizing by shape):
+     * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
+     *      the input.
+     *      Since HAL version 1.2, zero batches is supported for this tensor.
+     * * 1: An {@link OperandType::INT32} scalar, specifying the output
+     *      width of the output tensor.
+     * * 2: An {@link OperandType::INT32} scalar, specifying the output
+     *      height of the output tensor.
+     * * 3: An optional {@link OperandType::BOOL} scalar, default to false.
+     *      Set to true to specify NCHW data layout for input0 and output0.
+     *      Available since HAL version 1.2.
+     *
+     * Inputs (resizing by scale, since HAL version 1.2):
+     * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
+     *      the input. Zero batches is supported for this tensor.
+     * * 1: A scalar, specifying width_scale, the scaling factor of the width
+     *      dimension from the input tensor to the output tensor. The output
+     *      width is calculated as new_width = floor(width * width_scale).
+     *      The scalar must be of {@link OperandType::FLOAT16} if input0 is
+     *      of {@link OperandType::TENSOR_FLOAT16} and of
+     *      {@link OperandType::FLOAT32} otherwise.
+     * * 2: A scalar, specifying height_scale, the scaling factor of the height
+     *      dimension from the input tensor to the output tensor. The output
+     *      height is calculated as new_height = floor(height * height_scale).
+     *      The scalar must be of {@link OperandType::FLOAT16} if input0 is
+     *      of {@link OperandType::TENSOR_FLOAT16} and of
+     *      {@link OperandType::FLOAT32} otherwise.
+     * * 3: An optional {@link OperandType::BOOL} scalar, default to false.
+     *      Set to true to specify NCHW data layout for input0 and output0.
+     *
+     * Outputs:
+     * * 0: The output 4-D tensor, of shape
+     *      [batches, new_height, new_width, depth].
+     *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+     *      the scale and zeroPoint must be the same as input0.
+     */
+    RESIZE_BILINEAR = @1.2::OperationType:RESIZE_BILINEAR,
+
+    /**
+     * A basic recurrent neural network layer.
+     *
+     * This layer implements the operation:
+     * outputs = state = activation(inputs * input_weights +
+     *                              state * recurrent_weights + bias)
+     *
+     * Where:
+     * * “input_weights” is a weight matrix that multiplies the inputs;
+     * * “recurrent_weights” is a weight matrix that multiplies the current
+     *    “state” which itself is the output from the previous time step
+     *    computation;
+     * * “bias” is a bias vector (added to each output vector in the batch);
+     * * “activation” is the function passed as the “fused_activation_function”
+     *   argument (if not “NONE”).
+     *
+     * Supported tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+     * * {@link OperandType::TENSOR_FLOAT32}
+     *
+     * The input tensors must all be the same type.
+     *
+     * Inputs:
+     * * 0: input.
+     *      A 2-D tensor of shape [batch_size, input_size], where “batch_size”
+     *      corresponds to the batching dimension, and “input_size” is the size
+     *      of the input.
+     * * 1: weights.
+     *      A 2-D tensor of shape [num_units, input_size], where “num_units”
+     *      corresponds to the number of units.
+     * * 2: recurrent_weights.
+     *      A 2-D tensor of shape [num_units, num_units], with columns
+     *      corresponding to the weights from each unit.
+     * * 3: bias.
+     *      A 1-D tensor of shape [num_units].
+     * * 4: hidden state (in).
+     *      A 2-D tensor of shape [batch_size, num_units].
+     * * 5: fused_activation_function.
+     *      An optional {@link FusedActivationFunc} value indicating the
+     *      activation function. If “NONE” is specified then it results in a
+     *      linear activation.
+     *
+     * Outputs:
+     * * 0: hidden state (out).
+     *      A 2-D tensor of shape [batch_size, num_units].
+     *
+     * * 1: output.
+     *      A 2-D tensor of shape [batch_size, num_units]. This is effectively
+     *      the same as the current state value.
+     */
+    RNN = @1.2::OperationType:RNN,
+
+    /**
+     * Computes the softmax activation on the input tensor element-wise, per
+     * batch, by normalizing the input vector so the maximum coefficient is
+     * zero.
+     *
+     * The output is calculated using this formula:
+     *
+     *     output[batch, i] =
+     *         exp((input[batch, i] - max(input[batch, :])) * beta) /
+     *         sum_{k}{exp((input[batch, k] - max(input[batch, :])) * beta)}
+     *
+     * For input tensor with rank other than 2, the activation will be applied
+     * independently on each 1-D slice along specified dimension.
+     *
+     * Supported tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+     * * {@link OperandType::TENSOR_FLOAT32}
+     * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+     *
+     * Supported tensor rank: up to 4.
+     * Tensors with rank other than 2 or 4 are only supported since HAL version 1.2.
+     *
+     * Inputs:
+     * * 0: A 2-D or 4-D tensor, specifying the tensor to be reshaped.
+     *      Since HAL version 1.2, this tensor may be zero-sized.
+     * * 1: A scalar, specifying the positive scaling factor for the exponent,
+     *      beta. If input0 is of {@link OperandType::TENSOR_FLOAT32} or
+     *      {@link OperandType::TENSOR_QUANT8_ASYMM}, the scalar must be of
+     *      {@link OperandType::FLOAT32}.
+     *      If input0 is of {@link OperandType::TENSOR_FLOAT16}, then the
+     *      scalar must be of {@link OperandType::FLOAT16}.
+     * * 2: An optional {@link OperandType::INT32} scalar, default to -1,
+     *      specifying the dimension the activation would be performed on.
+     *      Negative index is used to specify axis from the end (e.g. -1 for
+     *      the last axis). Must be in the range [-n, n).
+     *      Available since HAL version 1.2.
+     *
+     * Outputs:
+     * * 0: The output tensor of same shape as input0.
+     *      For {@link OperandType::TENSOR_QUANT8_ASYMM},
+     *      the scale must be 1.f / 256 and the zeroPoint must be 0.
+     */
+    SOFTMAX = @1.2::OperationType:SOFTMAX,
+
+    /**
+     * Rearranges blocks of spatial data, into depth.
+     *
+     * More specifically, this op outputs a copy of the input tensor where
+     * values from the height and width dimensions are moved to the depth
+     * dimension. The value block_size indicates the input block size and how
+     * the data is moved.
+     *
+     * Chunks of data of size block_size * block_size from depth are rearranged
+     * into non-overlapping blocks of size block_size x block_size.
+     *
+     * The depth of the output tensor is input_depth * block_size * block_size.
+     * The input tensor's height and width must be divisible by block_size.
+     *
+     * Supported tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+     * * {@link OperandType::TENSOR_FLOAT32}
+     * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+     *
+     * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
+     * With the default data layout NHWC, the data is stored in the order of:
+     * [batch, height, width, channels]. Alternatively, the data layout could
+     * be NCHW, the data storage order of: [batch, channels, height, width].
+     * NCHW is supported since HAL version 1.2.
+     *
+     * Inputs:
+     * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
+     *      specifying the input.
+     * * 1: An {@link OperandType::INT32} scalar, specifying the block_size.
+     *      block_size must be >=1 and block_size must be a divisor of both the
+     *      input height and width.
+     * * 2: An optional {@link OperandType::BOOL} scalar, default to false.
+     *      Set to true to specify NCHW data layout for input0 and output0.
+     *      Available since HAL version 1.2.
+     *
+     * Outputs:
+     * * 0: The output 4-D tensor, of shape [batches, height/block_size,
+     *      width/block_size, depth_in*block_size*block_size].
+     *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+     *      the scale and zeroPoint must be the same as input0.
+     */
+    SPACE_TO_DEPTH = @1.2::OperationType:SPACE_TO_DEPTH,
+
+    /**
+     * SVDF op is a kind of stateful layer derived from the notion that a
+     * densely connected layer that's processing a sequence of input frames can
+     * be approximated by using a singular value decomposition of each of its
+     * nodes. The implementation is based on:
+     *
+     * https://research.google.com/pubs/archive/43813.pdf
+     *
+     * P. Nakkiran, R. Alvarez, R. Prabhavalkar, C. Parada.
+     * “Compressing Deep Neural Networks using a Rank-Constrained Topology”.
+     * INTERSPEECH, 2015.
+     *
+     * It processes the incoming input using a 2-stage filtering mechanism:
+     * * stage 1 performs filtering on the "features" dimension, whose outputs
+     *   get pushed into a memory of fixed-size memory_size.
+     * * stage 2 performs filtering on the "time" dimension of the memory_size
+     *   memoized outputs of stage 1.
+     *
+     * Specifically, for rank 1, this layer implements the operation:
+     *
+     *     memory = push(conv1d(inputs, weights_feature, feature_dim,
+     *                          "PADDING_VALID"));
+     *     outputs = activation(memory * weights_time + bias);
+     *
+     * Where:
+     * * “weights_feature” is a weights matrix that processes the inputs (by
+     *   convolving the input with every “feature filter”), and whose outputs
+     *   get pushed, stacked in order, into the fixed-size “memory” (the oldest
+     *   entry gets dropped);
+     * * “weights_time” is a weights matrix that processes the “memory” (by a
+     *   batched matrix multiplication on the num_units);
+     * * “bias” is an optional bias vector (added to each output vector in the
+     *   batch); and
+     * * “activation” is the function passed as the “fused_activation_function”
+     *   argument (if not “NONE”).
+     *
+     * Each rank adds a dimension to the weights matrices by means of stacking
+     * the filters.
+     *
+     * Supported tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+     * * {@link OperandType::TENSOR_FLOAT32}
+     *
+     * All input tensors must be the same type.
+     *
+     * Inputs:
+     * * 0: input.
+     *      A 2-D tensor of shape [batch_size, input_size], where “batch_size”
+     *      corresponds to the batching dimension, and “input_size” is the size
+     *      of the input.
+     * * 1: weights_feature.
+     *      A 2-D tensor of shape [num_units, input_size], where “num_units”
+     *      corresponds to the number of units.
+     * * 2: weights_time.
+     *      A 2-D tensor of shape [num_units, memory_size], where “memory_size”
+     *      corresponds to the fixed-size of the memory.
+     * * 3: bias.
+     *      An optional 1-D tensor of shape [num_units].
+     * * 4: state (in).
+     *      A 2-D tensor of shape [batch_size, (memory_size - 1) * num_units * rank].
+     * * 5: rank.
+     *      The rank of the SVD approximation.
+     * * 6: fused_activation_function.
+     *      An optional {@link FusedActivationFunc} value indicating the
+     *      activation function. If “NONE” is specified then it results in a
+     *      linear activation.
+     *
+     * Outputs:
+     * * 0: state (out).
+     *      A 2-D tensor of the same {@link OperandType} as the inputs, with shape
+     *      [batch_size, (memory_size - 1) * num_units * rank].
+     * * 1: output.
+     *      A 2-D tensor of the same {@link OperandType} as the inputs, with shape
+     *      [batch_size, num_units].
+     */
+    SVDF = @1.2::OperationType:SVDF,
+
+    /**
+     * Computes hyperbolic tangent of input tensor element-wise.
+     *
+     * The output is calculated using this formula:
+     *
+     *     output = tanh(input)
+     *
+     * Supported tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+     * * {@link OperandType::TENSOR_FLOAT32}
+     * * {@link OperandType::TENSOR_QUANT8_ASYMM} (since HAL version 1.2)
+     *
+     * Supported tensor rank: up to 4.
+     *
+     * Inputs:
+     * * 0: A tensor, specifying the input.
+     *      Since HAL version 1.2, this tensor may be zero-sized.
+     *
+     * Outputs:
+     * * 0: The output tensor of same shape as input0.
+     *      For {@link OperandType::TENSOR_QUANT8_ASYMM},
+     *      the scale must be 1.f / 128 and the zeroPoint must be 128.
+     */
+    TANH = @1.2::OperationType:TANH,
+
+    /**
+     * BatchToSpace for N-dimensional tensors.
+     *
+     * This operation reshapes the batch dimension (dimension 0) into M + 1
+     * dimensions of shape block_shape + [batch], interleaves these blocks back
+     * into the grid defined by the spatial dimensions [1, ..., M], to obtain a
+     * result with the same rank as the input.
+     *
+     * This is the reverse of SpaceToBatch.
+     *
+     * Supported tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+     * * {@link OperandType::TENSOR_FLOAT32}
+     * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+     *
+     * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
+     * With the default data layout NHWC, the data is stored in the order of:
+     * [batch, height, width, channels]. Alternatively, the data layout could
+     * be NCHW, the data storage order of: [batch, channels, height, width].
+     * NCHW is supported since HAL version 1.2.
+     *
+     * Inputs:
+     * * 0: An n-D tensor, specifying the tensor to be reshaped
+     * * 1: A 1-D Tensor of {@link OperandType::TENSOR_INT32}, the block
+     *      sizes for each spatial dimension of the input tensor. All values
+     *      must be >= 1.
+     * * 2: An optional {@link OperandType::BOOL} scalar, default to false.
+     *      Set to true to specify NCHW data layout for input0 and output0.
+     *      Available since API level 29.
+     *
+     * Outputs:
+     * * 0: A tensor of the same {@link OperandType} as input0.
+     *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+     *      the scale and zeroPoint must be the same as input0.
+     */
+    BATCH_TO_SPACE_ND = @1.2::OperationType:BATCH_TO_SPACE_ND,
+
+    /**
+     * Element-wise division of two tensors.
+     *
+     * Takes two input tensors of identical {@link OperandType} and compatible
+     * dimensions. The output is the result of dividing the first input tensor
+     * by the second, optionally modified by an activation function.
+     *
+     * Two dimensions are compatible when:
+     *     1. they are equal, or
+     *     2. one of them is 1
+     *
+     * The size of the output is the maximum size along each dimension of the
+     * input operands. It starts with the trailing dimensions, and works its way
+     * forward.
+     *
+     * Example:
+     *     input1.dimension =    {4, 1, 2}
+     *     input2.dimension = {5, 4, 3, 1}
+     *     output.dimension = {5, 4, 3, 2}
+     *
+     * Since HAL version 1.2, generic zero-sized input tensor is supported. Zero
+     * dimension is only compatible with 0 or 1. The size of the output
+     * dimension is zero if either of corresponding input dimension is zero.
+     *
+     * Supported tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+     * * {@link OperandType::TENSOR_FLOAT32}
+     *
+     * Supported tensor rank: up to 4
+     *
+     * Inputs:
+     * * 0: An n-D tensor, specifying the first input.
+     * * 1: A tensor of the same {@link OperandType}, and compatible dimensions
+     *      as input0.
+     * * 2: An {@link OperandType::INT32} scalar, and has to be one of the
+     *      {@link FusedActivationFunc} values. Specifies the activation to
+     *      invoke on the result.
+     *
+     * Outputs:
+     * * 0: A tensor of the same {@link OperandType} as input0.
+     */
+    DIV = @1.2::OperationType:DIV,
+
+    /**
+     * Computes the mean of elements across dimensions of a tensor.
+     *
+     * Reduces the input tensor along the given dimensions to reduce. Unless
+     * keep_dims is true, the rank of the tensor is reduced by 1 for each entry
+     * in axis. If keep_dims is true, the reduced dimensions are retained with
+     * length 1.
+     *
+     * Supported tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+     * * {@link OperandType::TENSOR_FLOAT32}
+     * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+     *
+     * Supported tensor rank: up to 4
+     *
+     * Inputs:
+     * * 0: A tensor, specifying the input.
+     * * 1: A 1-D Tensor of {@link OperandType::TENSOR_INT32}. The dimensions
+     *      to reduce. Must be in the range
+     *      [-rank(input_tensor), rank(input_tensor)).
+     *
+     *      NOTE: When the operation was introduced, the documentation
+     *      incorrectly stated that if dimensions were empty, the operation
+     *      would reduce across all dimensions. This behavior was never
+     *      implemented.
+     *
+     * * 2: An {@link OperandType::INT32} scalar, keep_dims. If positive,
+     *      retains reduced dimensions with length 1.
+     *
+     * Outputs:
+     * * 0: A tensor of the same {@link OperandType} as input0.
+     *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+     *      the scale and zeroPoint must be same as input0.
+     */
+    MEAN = @1.2::OperationType:MEAN,
+
+    /**
+     * Pads a tensor.
+     *
+     * This operation pads a tensor according to the specified paddings.
+     *
+     * Supported tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+     * * {@link OperandType::TENSOR_FLOAT32}
+     * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+     *   (full support since HAL version 1.2, see the output section)
+     *
+     * Supported tensor rank: up to 4
+     *
+     * Inputs:
+     * * 0: An n-D tensor, specifying the tensor to be padded.
+     * * 1: A 2-D Tensor of {@link OperandType::TENSOR_INT32}, the paddings
+     *      for each spatial dimension of the input tensor. The shape of the
+     *      tensor must be {rank(input0), 2}.
+     *      padding[i, 0] specifies the number of elements to be padded in the
+     *      front of dimension i.
+     *      padding[i, 1] specifies the number of elements to be padded after the
+     *      end of dimension i.
+     *
+     * Outputs:
+     * * 0: A tensor of the same {@link OperandType} as input0. The
+     *      output tensor has the same rank as input0, and each
+     *      dimension of the output tensor has the same size as the
+     *      corresponding dimension of the input tensor plus the size
+     *      of the padding:
+     *          output0.dimension[i] =
+     *              padding[i, 0] + input0.dimension[i] + padding[i, 1]
+     *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+     *      the scale and zeroPoint must be the same as input0.
+     *
+     *      NOTE: Before HAL version 1.2, the pad value for
+     *      {@link OperandType::TENSOR_QUANT8_ASYMM} is undefined.
+     *      Since HAL version 1.2, the pad value is always the logical zero.
+     */
+    PAD = @1.2::OperationType:PAD,
+
+    /**
+     * SpaceToBatch for N-Dimensional tensors.
+     *
+     * This operation divides "spatial" dimensions [1, ..., M] of the input into
+     * a grid of blocks of shape block_shape, and interleaves these blocks with
+     * the "batch" dimension (0) such that in the output, the spatial dimensions
+     * [1, ..., M] correspond to the position within the grid, and the batch
+     * dimension combines both the position within a spatial block and the
+     * original batch position. Prior to division into blocks, the spatial
+     * dimensions of the input are optionally zero padded according to paddings.
+     *
+     * Supported tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+     * * {@link OperandType::TENSOR_FLOAT32}
+     * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+     *   (full support since HAL version 1.2, see the output section)
+     *
+     * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
+     * With the default data layout NHWC, the data is stored in the order of:
+     * [batch, height, width, channels]. Alternatively, the data layout could
+     * be NCHW, the data storage order of: [batch, channels, height, width].
+     * NCHW is supported since HAL version 1.2.
+     *
+     * Inputs:
+     * * 0: An n-D tensor, specifying the input.
+     * * 1: A 1-D Tensor of {@link OperandType::TENSOR_INT32}, the block
+     *      sizes for each spatial dimension of the input tensor. All values
+     *      must be >= 1.
+     * * 2: A 2-D Tensor of {@link OperandType::TENSOR_INT32}, the paddings
+     *      for each spatial dimension of the input tensor. All values must be
+     *      >= 0. The shape of the tensor must be {M, 2}, where M is the number
+     *      of spatial dimensions.
+     *      padding[i, 0] specifies the number of element to be padded in the
+     *      front of dimension i.
+     *      padding[i, 1] specifies the number of element to be padded after the
+     *      end of dimension i.
+     * * 3: An optional {@link OperandType::BOOL} scalar, default to false.
+     *      Set to true to specify NCHW data layout for input0 and output0.
+     *      Available since HAL version 1.2.
+     *
+     * Outputs:
+     * * 0: A tensor of the same {@link OperandType} as input0.
+     *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+     *      the scale and zeroPoint must be the same as input0.
+     *
+     *      NOTE: Before HAL version 1.2, the pad value for
+     *      {@link OperandType::TENSOR_QUANT8_ASYMM} is undefined.
+     *      Since HAL version 1.2, the pad value is always the logical zero.
+     */
+    SPACE_TO_BATCH_ND = @1.2::OperationType:SPACE_TO_BATCH_ND,
+
+    /**
+     * Removes dimensions of size 1 from the shape of a tensor.
+     *
+     * Given a tensor input, this operation returns a tensor of the same
+     * {@link OperandType} with all dimensions of size 1 removed. If you don't
+     * want to remove all size 1 dimensions, you can remove specific size 1
+     * dimensions by specifying the axes (input1).
+     *
+     * Supported tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+     * * {@link OperandType::TENSOR_FLOAT32}
+     * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+     *
+     * Supported tensor rank: up to 4
+     *
+     * Inputs:
+     * * 0: An n-D tensor, the tensor to be squeezed.
+     * * 1: An optional 1-D tensor of {@link OperandType::TENSOR_INT32}. The
+     *      dimensions to squeeze. If specified only squeezes the dimensions
+     *      listed. Otherwise, squeezes all dimensions. The dimension index
+     *      starts at 0. An error must be reported if squeezing a dimension that
+     *      is not 1.
+     *
+     * Outputs:
+     * * 0: A tensor of the same {@link OperandType} as input0. Contains the
+     *      same data as input, but has one or more dimensions of size 1
+     *      removed.
+     *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+     *      the scale and zeroPoint must be the same as input0.
+     */
+    SQUEEZE = @1.2::OperationType:SQUEEZE,
+
+    /**
+     * Extracts a strided slice of a tensor.
+     *
+     * Roughly speaking, this op extracts a slice of size (end - begin) / stride
+     * from the given input tensor. Starting at the location specified by begin
+     * the slice continues by adding stride to the index until all dimensions
+     * are not less than end. Note that a stride can be negative, which causes a
+     * reverse slice.
+     *
+     * Supported tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+     * * {@link OperandType::TENSOR_FLOAT32}
+     * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+     *
+     * Supported tensor rank: up to 4
+     *
+     * Inputs:
+     * * 0: An n-D tensor, specifying the tensor to be sliced.
+     * * 1: begin, a 1-D tensor of {@link OperandType::TENSOR_INT32}. The
+     *      starts of the dimensions of the input tensor to be sliced. The
+     *      length must be of rank(input0).
+     * * 2: end, a 1-D tensor of {@link OperandType::TENSOR_INT32}. The
+     *      ends of the dimensions of the input tensor to be sliced. The length
+     *      must be of rank(input0).
+     * * 3: strides, a 1-D tensor of {@link OperandType::TENSOR_INT32}. The
+     *      strides of the dimensions of the input tensor to be sliced. The
+     *      length must be of rank(input0). The entries must be non-zero.
+     * * 4: begin_mask, an {@link OperandType::INT32} scalar. If the ith bit
+     *      of begin_mask is set, begin[i] is ignored and the fullest possible
+     *      range in that dimension is used instead.
+     * * 5: end_mask, an {@link OperandType::INT32} scalar. If the ith bit of
+     *      end_mask is set, end[i] is ignored and the fullest possible range in
+     *      that dimension is used instead.
+     * * 6: shrink_axis_mask, an {@link OperandType::INT32} scalar. If the
+     *      ith bit of shrink_axis_mask is set, the ith dimension specification
+     *      shrinks the dimensionality by 1, taking on the value at index
+     *      begin[i]. In this case, the ith specification must define a
+     *      slice of size 1, e.g. begin[i] = x, end[i] = x + 1.
+     *
+     * Outputs:
+     * * 0: A tensor of the same {@link OperandType} as input0 and rank (n - k),
+     *      where k is the number of bits set in shrink_axis_mask.
+     *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+     *      the scale and zeroPoint must be the same as input0.
+     */
+    STRIDED_SLICE = @1.2::OperationType:STRIDED_SLICE,
+
+    /**
+     * Element-wise subtraction of two tensors.
+     *
+     * Takes two input tensors of identical {@link OperandType} and compatible
+     * dimensions. The output is the result of subtracting the second input
+     * tensor from the first one, optionally modified by an activation function.
+     *
+     * Two dimensions are compatible when:
+     *     1. they are equal, or
+     *     2. one of them is 1
+     *
+     * The size of the output is the maximum size along each dimension of the
+     * input operands. It starts with the trailing dimensions, and works its way
+     * forward.
+     *
+     * Example:
+     *     input1.dimension =    {4, 1, 2}
+     *     input2.dimension = {5, 4, 3, 1}
+     *     output.dimension = {5, 4, 3, 2}
+     *
+     * Since HAL version 1.2, generic zero-sized input tensor is supported. Zero
+     * dimension is only compatible with 0 or 1. The size of the output
+     * dimension is zero if either of corresponding input dimension is zero.
+     *
+     * Supported tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+     * * {@link OperandType::TENSOR_FLOAT32}
+     * * {@link OperandType::TENSOR_QUANT8_ASYMM} (since HAL version 1.2)
+     *
+     * Supported tensor rank: up to 4
+     *
+     * Inputs:
+     * * 0: An n-D tensor, specifying the first input.
+     * * 1: A tensor of the same {@link OperandType}, and compatible dimensions
+     *      as input0.
+     * * 2: An {@link OperandType::INT32} scalar, and has to be one of the
+     *      {@link FusedActivationFunc} values. Specifies the activation to
+     *      invoke on the result.
+     *
+     * Outputs:
+     * * 0: A tensor of the same {@link OperandType} as input0.
+     *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+     *      the scale and zeroPoint can be different from inputs' scale and zeroPoint.
+     */
+    SUB = @1.2::OperationType:SUB,
+
+    /**
+     * Transposes the input tensor, permuting the dimensions according to the
+     * perm tensor.
+     *
+     * The returned tensor's dimension i corresponds to the input dimension
+     * perm[i]. If perm is not given, it is set to (n-1...0), where n is the
+     * rank of the input tensor. Hence by default, this operation performs a
+     * regular matrix transpose on 2-D input Tensors.
+     *
+     * Supported tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+     * * {@link OperandType::TENSOR_FLOAT32}
+     * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+     *
+     * Supported tensor rank: up to 4
+     *
+     * Inputs:
+     * * 0: An n-D tensor, specifying the tensor to be transposed.
+     *      Since HAL version 1.2, this tensor may be zero-sized.
+     * * 1: An optional 1-D Tensor of {@link OperandType::TENSOR_INT32},
+     *      the permutation of the dimensions of the input tensor.
+     *
+     * Outputs:
+     * * 0: A tensor of the same {@link OperandType} as input0.
+     *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+     *      the scale and zeroPoint must be the same as input0.
+     */
+    TRANSPOSE = @1.2::OperationType:TRANSPOSE,
+
+    /**
+     * Computes the absolute value of a tensor, element-wise.
+     *
+     * Supported tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_FLOAT16}
+     * * {@link OperandType::TENSOR_FLOAT32}
+     *
+     * Supported tensor rank: from 1.
+     *
+     * Inputs:
+     * * 0: A tensor.
+     *
+     * Outputs:
+     * * 0: The output tensor of same shape as input0.
+     */
+    ABS = @1.2::OperationType:ABS,
+
+    /**
+     * Returns the index of the largest element along an axis.
+     *
+     * Supported tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_FLOAT16}
+     * * {@link OperandType::TENSOR_FLOAT32}
+     * * {@link OperandType::TENSOR_INT32}
+     * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+     *
+     * Supported tensor rank: from 1
+     *
+     * Inputs:
+     * * 0: An n-D tensor specifying the input. Must be non-empty.
+     * * 1: An {@link OperandType::INT32} scalar specifying the axis to
+     *      reduce across. Negative index is used to specify axis from the
+     *      end (e.g. -1 for the last axis). Must be in the range [-n, n).
+     *
+     * Outputs:
+     * * 0: An (n - 1)-D {@link OperandType::TENSOR_INT32} tensor.
+     */
+    // There is no underscore in ARG_MAX to avoid name conflict with
+    // the macro defined in libc/kernel/uapi/linux/limits.h.
+    ARGMAX = @1.2::OperationType:ARGMAX,
+
+    /**
+     * Returns the index of the smallest element along an axis.
+     *
+     * Supported tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_FLOAT16}
+     * * {@link OperandType::TENSOR_FLOAT32}
+     * * {@link OperandType::TENSOR_INT32}
+     * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+     *
+     * Supported tensor rank: from 1
+     *
+     * Inputs:
+     * * 0: An n-D tensor specifying the input. Must be non-empty.
+     * * 1: An {@link OperandType::INT32} scalar specifying the axis to
+     *      reduce across. Negative index is used to specify axis from the
+     *      end (e.g. -1 for the last axis). Must be in the range [-n, n).
+     *
+     * Outputs:
+     * * 0: An (n - 1)-D {@link OperandType::TENSOR_INT32} tensor.
+     */
+    ARGMIN = @1.2::OperationType:ARGMIN,  // See ARGMAX for naming discussion.
+
+    /**
+     * Transform axis-aligned bounding box proposals using bounding box deltas.
+     *
+     * Given the positions of bounding box proposals and the corresponding
+     * bounding box deltas for each class, return the refined bounding box
+     * regions. The resulting bounding boxes are cliped against the edges of
+     * the image.
+     *
+     * Supported tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_FLOAT16}
+     * * {@link OperandType::TENSOR_FLOAT32}
+     * * {@link OperandType::TENSOR_QUANT16_ASYMM}
+     *
+     * Inputs:
+     * * 0: A 2-D Tensor of shape [num_rois, 4], specifying the locations of the
+     *      bounding box proposals, each line with format [x1, y1, x2, y2].
+     *      For tensor of type {@link OperandType::TENSOR_QUANT16_ASYMM},
+     *      the zeroPoint must be 0 and the scale must be 0.125. Zero num_rois
+     *      is supported for this tensor.
+     * * 1: A 2-D Tensor of shape [num_rois, num_classes * 4], specifying the
+     *      bounding box delta for each region of interest and each class. The
+     *      bounding box deltas are organized in the following order
+     *      [dx, dy, dw, dh], where dx and dy is the relative correction factor
+     *      for the center position of the bounding box with respect to the width
+     *      and height, dw and dh is the log-scale relative correction factor
+     *      for the width and height. For input0 of type
+     *      {@link OperandType::TENSOR_QUANT16_ASYMM}, this tensor should be
+     *      of {@link OperandType::TENSOR_QUANT8_ASYMM}. Zero num_rois is
+     *      supported for this tensor.
+     * * 2: An 1-D {@link OperandType::TENSOR_INT32} tensor, of shape
+     *      [num_rois], specifying the batch index of each box. Boxes with
+     *      the same batch index are grouped together. Zero num_rois is
+     *      supported for this tensor.
+     * * 3: A 2-D Tensor of shape [batches, 2], specifying the information of
+     *      each image in the batch, each line with format
+     *      [image_height, image_width].
+     *
+     * Outputs:
+     * * 0: A tensor of the same {@link OperandType} as input0, with shape
+     *      [num_rois, num_classes * 4], specifying the coordinates of each
+     *      output bounding box for each class, with format [x1, y1, x2, y2].
+     *      For type of {@link OperandType::TENSOR_QUANT16_ASYMM}, the
+     *      scale must be 0.125 and the zero point must be 0.
+     */
+    AXIS_ALIGNED_BBOX_TRANSFORM = @1.2::OperationType:AXIS_ALIGNED_BBOX_TRANSFORM,
+
+    /**
+     * Performs a forward LSTM on the input followed by a backward LSTM.
+     *
+     * Supported tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_FLOAT16}
+     * * {@link OperandType::TENSOR_FLOAT32}
+     *
+     * Supported tensor rank: 3, either time-major or batch-major.
+     *
+     * All input and output tensors must be of the same type.
+     *
+     *
+     * Inputs:
+     * * 0: The input.
+     *      A 3-D tensor of shape:
+     *        If time-major: [max_time, batch_size, input_size]
+     *        If batch-major: [batch_size, max_time, input_size]
+     *      where "max_time" is the number of timesteps (sequence length),
+     *      "batch_size" corresponds to the batching dimension, and
+     *      "input_size" is the size of the input.
+     * * 1: The forward input-to-input weights. Optional.
+     *      A 2-D tensor of shape [fw_num_units, input_size], where “fw_num_units”
+     *      corresponds to the number of forward cell units.
+     * * 2: The forward input-to-forget weights.
+     *      A 2-D tensor of shape [fw_num_units, input_size].
+     * * 3: The forward input-to-cell weights.
+     *      A 2-D tensor of shape [fw_num_units, input_size].
+     * * 4: The forward input-to-output weights.
+     *      A 2-D tensor of shape [fw_num_units, input_size].
+     * * 5: The forward recurrent-to-input weights. Optional.
+     *      A 2-D tensor of shape [fw_num_units, fw_output_size], where “fw_output_size”
+     *      corresponds to either the number of cell units (i.e., fw_num_units),
+     *      or the second dimension of the “fw_projection_weights”, if defined.
+     * * 6: The forward recurrent-to-forget weights.
+     *      A 2-D tensor of shape [fw_num_units, fw_output_size].
+     * * 7: The forward recurrent-to-cell weights.
+     *      A 2-D tensor of shape [fw_num_units, fw_output_size].
+     * * 8: The forward recurrent-to-output weights.
+     *      A 2-D tensor of shape [fw_num_units, fw_output_size].
+     * * 9: The forward cell-to-input weights. Optional.
+     *      A 1-D tensor of shape [fw_num_units].
+     * * 10: The forward cell-to-forget weights. Optional.
+     *       A 1-D tensor of shape [fw_num_units].
+     * * 11: The forward cell-to-output weights. Optional.
+     *       A 1-D tensor of shape [fw_num_units].
+     * * 12: The forward input gate bias. Optional.
+     *       A 1-D tensor of shape [fw_num_units].
+     * * 13: The forward forget gate bias.
+     *       A 1-D tensor of shape [fw_num_units].
+     * * 14: The forward cell gate bias.
+     *       A 1-D tensor of shape [fw_num_units].
+     * * 15: The forward output gate bias.
+     *       A 1-D tensor of shape [fw_num_units].
+     * * 16: The forward projection weights. Optional.
+     *       A 2-D tensor of shape [fw_output_size, fw_num_units].
+     * * 17: The forward projection bias. Optional.
+     *       A 1-D tensor of shape [fw_output_size].
+     * * 18: The backward input-to-input weights. Optional.
+     *       A 2-D tensor of shape [bw_num_units, input_size], where “bw_num_units”
+     *       corresponds to the number of backward cell units.
+     * * 19: The backward input-to-forget weights.
+     *       A 2-D tensor of shape [bw_num_units, input_size].
+     * * 20: The backward input-to-cell weights.
+     *       A 2-D tensor of shape [bw_num_units, input_size].
+     * * 21: The backward input-to-output weights.
+     *       A 2-D tensor of shape [bw_num_units, input_size].
+     * * 22: The backward recurrent-to-input weights. Optional.
+     *       A 2-D tensor of shape [bw_num_units, bw_output_size], where “bw_output_size”
+     *       corresponds to either the number of cell units (i.e., “bw_num_units”),
+     *       or the second dimension of the “bw_projection_weights”, if defined.
+     * * 23: The backward recurrent-to-forget weights.
+     *       A 2-D tensor of shape [bw_num_units, bw_output_size].
+     * * 24: The backward recurrent-to-cell weights.
+     *       A 2-D tensor of shape [bw_num_units, bw_output_size].
+     * * 25: The backward recurrent-to-output weights.
+     *       A 2-D tensor of shape [bw_num_units, bw_output_size].
+     * * 26: The backward cell-to-input weights. Optional.
+     *       A 1-D tensor of shape [bw_num_units].
+     * * 27: The backward cell-to-forget weights. Optional.
+     *       A 1-D tensor of shape [bw_num_units].
+     * * 28: The backward cell-to-output weights. Optional.
+     *       A 1-D tensor of shape [bw_num_units].
+     * * 29: The backward input gate bias. Optional.
+     *       A 1-D tensor of shape [bw_num_units].
+     * * 30: The backward forget gate bias.
+     *       A 1-D tensor of shape [bw_num_units].
+     * * 31: The backward cell gate bias.
+     *       A 1-D tensor of shape [bw_num_units].
+     * * 32: The backward output gate bias.
+     *       A 1-D tensor of shape [bw_num_units].
+     * * 33: The backward projection weights. Optional.
+     *       A 2-D tensor of shape [bw_output_size, bw_num_units].
+     * * 34: The backward projection bias. Optional.
+     *       A 1-D tensor of shape [bw_output_size].
+     * * 35: The forward input activation state.
+     *       A 2-D tensor of shape [batch_size, bw_output_size].
+     * * 36: The forward input cell state.
+     *       A 2-D tensor of shape [batch_size, bw_num_units].
+     * * 37: The backward input activation state.
+     *       A 2-D tensor of shape [batch_size, bw_output_size].
+     * * 38: The backward input cell state.
+     *       A 2-D tensor of shape [batch_size, bw_num_units].
+     * * 39: The auxiliary input. Optional.
+     *       A 3-D tensor of shape [max_time, batch_size, input_size], where “batch_size”
+     *       corresponds to the batching dimension, and “input_size” is the size
+     *       of the input.
+     * * 40: The forward auxiliary input-to-input weights. Optional.
+     *       A 2-D tensor of shape [fw_num_units, input_size].
+     * * 41: The forward auxiliary input-to-forget weights. Optional.
+     *       A 2-D tensor of shape [fw_num_units, input_size].
+     * * 42: The forward auxiliary input-to-cell weights. Optional.
+     *       A 2-D tensor of shape [fw_num_units, input_size].
+     * * 43: The forward auxiliary input-to-output weights. Optional.
+     *       A 2-D tensor of shape [fw_num_units, input_size].
+     * * 44: The backward auxiliary input-to-input weights. Optional.
+     *       A 2-D tensor of shape [bw_num_units, input_size].
+     * * 45: The backward auxiliary input-to-forget weights. Optional.
+     *       A 2-D tensor of shape [bw_num_units, input_size].
+     * * 46: The backward auxiliary input-to-cell weights. Optional.
+     *       A 2-D tensor of shape [bw_num_units, input_size].
+     * * 47: The backward auxiliary input-to-output weights. Optional.
+     *       A 2-D tensor of shape [bw_num_units, input_size].
+     * * 48: The activation function.
+     *       A value indicating the activation function:
+     *       <ul>
+     *       <li>0: None;
+     *       <li>1: Relu;
+     *       <li>3: Relu6;
+     *       <li>4: Tanh;
+     *       <li>6: Sigmoid.
+     *       </ul>
+     * * 49: The clipping threshold for the cell state, such
+     *       that values are bound within [-cell_clip, cell_clip]. If set to 0.0
+     *       then clipping is disabled.
+     *       If all the input tensors have type {@link OperandType::TENSOR_FLOAT32},
+     *       this scalar must be of the type {@link OperandType::FLOAT32},
+     *       otherwise if all the input tensors have the type
+     *       {@link OperandType::TENSOR_FLOAT16}, this scalar must be
+     *       of type {@link OperandType::FLOAT16}.
+     * * 50: The clipping threshold for the output from the
+     *       projection layer, such that values are bound within
+     *       [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
+     *       If all the input tensors have type {@link OperandType::TENSOR_FLOAT32},
+     *       this scalar must be of the type {@link OperandType::FLOAT32},
+     *       otherwise if all the input tensors have the type
+     *       {@link OperandType::TENSOR_FLOAT16}, this scalar must be
+     *       of type {@link OperandType::FLOAT16}.
+     * * 51: merge_outputs
+     *       An {@link OperandType::BOOL} scalar specifying if the outputs
+     *       from forward and backward cells should be merged.
+     * * 52: time_major
+     *       An {@link OperandType::BOOL} scalar specifying the shape format
+     *       of input and output tensors.
+     * * 53: The forward input layer normalization weights. Optional.
+     *       A 1-D tensor of shape [fw_num_units]. Used to rescale normalized inputs
+     *       to activation at input gate.
+     * * 54: The forward forget layer normalization weights. Optional.
+     *       A 1-D tensor of shape [fw_num_units]. Used to rescale normalized inputs
+     *       to activation at forget gate.
+     * * 55: The forward cell layer normalization weights. Optional.
+     *       A 1-D tensor of shape [fw_num_units]. Used to rescale normalized inputs
+     *       to activation at cell gate.
+     * * 56: The forward output layer normalization weights. Optional.
+     *       A 1-D tensor of shape [fw_num_units]. Used to rescale normalized inputs
+     *       to activation at output gate.
+     * * 57: The backward input layer normalization weights. Optional.
+     *       A 1-D tensor of shape [bw_num_units]. Used to rescale normalized inputs
+     *       to activation at input gate.
+     * * 58: The backward forget layer normalization weights. Optional.
+     *       A 1-D tensor of shape [bw_num_units]. Used to rescale normalized inputs
+     *       to activation at forget gate.
+     * * 59: The backward cell layer normalization weights. Optional.
+     *       A 1-D tensor of shape [bw_num_units]. Used to rescale normalized inputs
+     *       to activation at cell gate.
+     * * 60: The backward output layer normalization weights. Optional.
+     *       A 1-D tensor of shape [bw_num_units]. Used to rescale normalized inputs
+     *       to activation at output gate.
+     *
+     * Outputs:
+     * * 0: The forward output.
+     *      A 3-D tensor of shape:
+     *        If time-major and not merge_outputs:
+     *          [max_time, batch_size, fw_output_size]
+     *        If time-major and merge_outputs:
+     *          [max_time, batch_size, fw_output_size + bw_output_size]
+     *        If batch-major and not merge_outputs:
+     *          [batch_size, max_time, fw_output_size]
+     *        If batch-major and merge_outputs:
+     *          [batch_size, max_time, fw_output_size + bw_output_size]
+     * * 1: The backward output.  Unused if merge_outputs is true.
+     *      A 3-D tensor of shape:
+     *        If time-major: [max_time, batch_size, bw_output_size]
+     *        If batch-major: [batch_size, max_time, bw_output_size]
+     */
+    BIDIRECTIONAL_SEQUENCE_LSTM = @1.2::OperationType:BIDIRECTIONAL_SEQUENCE_LSTM,
+
+    /**
+     * A recurrent neural network layer that applies a basic RNN cell to a
+     * sequence of inputs in forward and backward directions.
+     *
+     * This Op unrolls the input along the sequence dimension, and implements
+     * the following operation for each element in the sequence s =
+     * 1...sequence_length:
+     *   fw_outputs[s] = fw_state = activation(inputs[s] * fw_input_weights’ +
+     *          fw_state * fw_recurrent_weights’ + fw_bias)
+     *
+     * And for each element in sequence t = sequence_length : 1
+     *   bw_outputs[t] = bw_state = activation(inputs[t] * bw_input_weights’ +
+     *          bw_state * bw_recurrent_weights’ + bw_bias)
+     *
+     * Where:
+     * * “{fw,bw}_input_weights” is a weight matrix that multiplies the inputs;
+     * * “{fw,bw}_recurrent_weights” is a weight matrix that multiplies the
+     *    current “state” which itself is the output from the previous time step
+     *    computation;
+     * * “{fw,bw}_bias” is a bias vector (added to each output vector in the
+     *    batch);
+     * * “activation” is the function passed as the “fused_activation_function”
+     *   argument (if not “NONE”).
+     *
+     * The op also supports an auxiliary input. Regular cell feeds one input
+     * into the two RNN cells in the following way:
+     *
+     *       INPUT  (INPUT_REVERSED)
+     *         |         |
+     *    ---------------------
+     *    | FW_RNN     BW_RNN |
+     *    ---------------------
+     *         |         |
+     *      FW_OUT     BW_OUT
+     *
+     * An op with an auxiliary input takes two inputs and feeds them into the
+     * RNN cells in the following way:
+     *
+     *       AUX_INPUT   (AUX_INPUT_REVERSED)
+     *           |             |
+     *     INPUT | (INPUT_R'D.)|
+     *       |   |       |     |
+     *    -----------------------
+     *    |  \  /        \    / |
+     *    | FW_RNN       BW_RNN |
+     *    -----------------------
+     *         |           |
+     *      FW_OUT      BW_OUT
+     *
+     * While stacking this op on top of itself, this allows to connect both
+     * forward and backward outputs from previous cell to the next cell's
+     * inputs.
+     *
+     * Supported tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_FLOAT16}
+     * * {@link OperandType::TENSOR_FLOAT32}
+     *
+     * The input tensors must all be the same type.
+     *
+     * Inputs:
+     * * 0: input.
+     *      A 3-D tensor. The shape is defined by the input 6 (timeMajor). If
+     *      it is set to true, then the input has a shape [maxTime, batchSize,
+     *      inputSize], otherwise the input has a shape [batchSize, maxTime,
+     *      inputSize].
+     * * 1: fwWeights.
+     *      A 2-D tensor of shape [fwNumUnits, inputSize].
+     * * 2: fwRecurrentWeights.
+     *      A 2-D tensor of shape [fwNumUnits, fwNumUnits].
+     * * 3: fwBias.
+     *      A 1-D tensor of shape [fwNumUnits].
+     * * 4: fwHiddenState.
+     *      A 2-D tensor of shape [batchSize, fwNumUnits]. Specifies a hidden
+     *      state input for the first time step of the computation.
+     * * 5: bwWeights.
+     *      A 2-D tensor of shape [bwNumUnits, inputSize].
+     * * 6: bwRecurrentWeights.
+     *      A 2-D tensor of shape [bwNumUnits, bwNumUnits].
+     * * 7: bwBias.
+     *      A 1-D tensor of shape [bwNumUnits].
+     * * 8: bwHiddenState
+     *      A 2-D tensor of shape [batchSize, bwNumUnits]. Specifies a hidden
+     *      state input for the first time step of the computation.
+     * * 9: auxInput.
+     *      A 3-D tensor. The shape is the same as of the input 0.
+     * * 10:fwAuxWeights.
+     *      A 2-D tensor of shape [fwNumUnits, inputSize].
+     * * 11:bwAuxWeights.
+     *      A 2-D tensor of shape [bwNumUnits, inputSize].
+     * * 12:fusedActivationFunction.
+     *      A {@link FusedActivationFunc} value indicating the activation function. If
+     *      “NONE” is specified then it results in a linear activation.
+     * * 13:timeMajor
+     *      An {@link OperandType::BOOL} scalar specifying the shape format
+     *      of input and output tensors.
+     * * 14:mergeOutputs
+     *      An {@link OperandType::BOOL} scalar specifying if the outputs
+     *      from forward and backward cells are separate (if set to false) or
+     *      concatenated (if set to true).
+     * Outputs:
+     * * 0: fwOutput.
+     *      A 3-D tensor. The first two dimensions of the shape are defined by
+     *      the input 6 (timeMajor) and the third dimension is defined by the
+     *      input 14 (mergeOutputs). If timeMajor is set to true, then the first
+     *      two dimensions are [maxTime, batchSize], otherwise they are set to
+     *      [batchSize, maxTime]. If mergeOutputs is set to true, then the third
+     *      dimension is equal to (fwNumUnits + bwNumUnits), otherwise it is set
+     *      to fwNumUnits.
+     * * 1: bwOutput.
+     *      A 3-D tensor. If the input 14 (mergeOutputs) is set to true, then
+     *      this tensor is not produced. The shape is defined by the input 6
+     *      (timeMajor). If it is set to true, then the shape is set to
+     *      [maxTime, batchSize, bwNumUnits], otherwise the shape is set to
+     *      [batchSize, maxTime, bwNumUnits].
+     */
+    BIDIRECTIONAL_SEQUENCE_RNN = @1.2::OperationType:BIDIRECTIONAL_SEQUENCE_RNN,
+
+    /**
+     * Greedily selects a subset of bounding boxes in descending order of score.
+     *
+     * This op applies NMS algorithm to each class. In each loop of execution,
+     * the box with maximum score gets selected and removed from the pending set.
+     * The scores of the rest of boxes are lowered according to the
+     * intersection-over-union (IOU) overlapping with the previously selected
+     * boxes and a specified NMS kernel method. Any boxes with score less
+     * than a threshold are removed from the pending set.
+     *
+     * Three NMS kernels are supported:
+     * * Hard:     score_new = score_old * (1 if IoU < threshold else 0)
+     * * Linear:   score_new = score_old * (1 if IoU < threshold else 1 - IoU)
+     * * Gaussian: score_new = score_old * exp(- IoU^2 / sigma)
+     *
+     * Axis-aligned bounding boxes are represented by its upper-left corner
+     * coordinate (x1,y1) and lower-right corner coordinate (x2,y2). A valid
+     * bounding box should satisfy x1 <= x2 and y1 <= y2.
+     *
+     * Supported tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_FLOAT16}
+     * * {@link OperandType::TENSOR_FLOAT32}
+     * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+     *
+     * Inputs:
+     * * 0: A 2-D Tensor of shape [num_rois, num_classes], specifying the score
+     *      of each bounding box proposal. The boxes are grouped by batches in the
+     *      first dimension. Zero num_rois is supported for this tensor.
+     * * 1: A 2-D Tensor specifying the bounding boxes of shape
+     *      [num_rois, num_classes * 4], organized in the order [x1, y1, x2, y2].
+     *      The boxes are grouped by batches in the first dimension. The sequential
+     *      order of the boxes corresponds with input0. For input0 of type
+     *      {@link OperandType::TENSOR_QUANT8_ASYMM}, this tensor should be of
+     *      {@link OperandType::TENSOR_QUANT16_ASYMM}, with zeroPoint of 0 and
+     *      scale of 0.125. Zero num_rois is supported for this tensor.
+     * * 2: A 1-D {@link OperandType::TENSOR_INT32} tensor, of shape
+     *      [num_rois], specifying the batch index of each box. Boxes with
+     *      the same batch index are grouped together.
+     * * 3: An {@link OperandType::FLOAT32} scalar, score_threshold. Boxes
+     *      with scores lower than the threshold are filtered before sending
+     *      to the NMS algorithm.
+     * * 4: An {@link OperandType::INT32} scalar, specifying the maximum
+     *      number of selected bounding boxes for each image. Set to a negative
+     *      value for unlimited number of output bounding boxes.
+     * * 5: An {@link OperandType::INT32} scalar, specifying the NMS
+     *      kernel method, options are 0:hard, 1:linear, 2:gaussian.
+     * * 6: An {@link OperandType::FLOAT32} scalar, specifying the IoU
+     *      threshold in hard and linear NMS kernel. This field is ignored if
+     *      gaussian kernel is selected.
+     * * 7: An {@link OperandType::FLOAT32} scalar, specifying the sigma in
+     *      gaussian NMS kernel. This field is ignored if gaussian kernel is
+     *      not selected.
+     * * 8: An {@link OperandType::FLOAT32} scalar, nms_score_threshold.
+     *      Boxes with scores lower than the threshold are dropped during the
+     *      score updating phase in soft NMS.
+     *
+     * Outputs:
+     * * 0: A 1-D Tensor of the same {@link OperandType} as input0, with shape
+     *      [num_output_rois], specifying the score of each output box. The boxes
+     *      are grouped by batches, but the sequential order in each batch is not
+     *      guaranteed. For type of {@link OperandType::TENSOR_QUANT8_ASYMM},
+     *      the scale and zero point must be the same as input0.
+     * * 1: A 2-D Tensor of the same {@link OperandType} as input1, with shape
+     *      [num_output_rois, 4], specifying the coordinates of each
+     *      output bounding box with the same format as input1. The sequential
+     *      order of the boxes corresponds with output0. For type of
+     *      {@link OperandType::TENSOR_QUANT16_ASYMM}, the scale must be
+     *      0.125 and the zero point must be 0.
+     * * 2: A 1-D {@link OperandType::TENSOR_INT32} tensor, of shape
+     *      [num_output_rois], specifying the class of each output box. The
+     *      sequential order of the boxes corresponds with output0.
+     * * 3: A 1-D {@link OperandType::TENSOR_INT32} tensor, of shape
+     *      [num_output_rois], specifying the batch index of each box. Boxes
+     *      with the same batch index are grouped together.
+     */
+    BOX_WITH_NMS_LIMIT = @1.2::OperationType:BOX_WITH_NMS_LIMIT,
+
+    /**
+     * Casts a tensor to a new type.
+     *
+     * This operation ignores the scale and zeroPoint of quanized tensors,
+     * e.g. it treats a {@link OperandType::TENSOR_QUANT8_ASYMM} input
+     * as a tensor of uint8 values.
+     *
+     * Supported tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_FLOAT16}
+     * * {@link OperandType::TENSOR_FLOAT32}
+     * * {@link OperandType::TENSOR_INT32}
+     * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+     *
+     * Supported tensor rank: from 1
+     *
+     * Inputs:
+     * * 0: A tensor.
+     *
+     * Outputs:
+     * * 0: A tensor with the same shape as input0.
+     */
+    CAST = @1.2::OperationType:CAST,
+
+    /**
+     * Shuffle the channels of the input tensor.
+     *
+     * Given an input tensor and a integer value of num_groups, CHANNEL_SHUFFLE
+     * divide the channel dimension into num_groups groups, and reorganize the
+     * channels by grouping channels with the same index in each group.
+     *
+     * Along the channel dimension, the output is calculated using this formula:
+     *
+     *     output_channel[k * num_groups + g] = input_channel[g * group_size + k]
+     *
+     * where group_size = num_channels / num_groups
+     *
+     * The number of channels must be divisible by num_groups.
+     *
+     * Supported tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_FLOAT16}
+     * * {@link OperandType::TENSOR_FLOAT32}
+     * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+     *
+     * Supported tensor rank: up to 4
+     *
+     * Inputs:
+     * * 0: An n-D tensor, specifying the tensor to be shuffled.
+     * * 1: An {@link OperandType::INT32} scalar, specifying the number of
+     *      groups.
+     * * 2: An {@link OperandType::INT32} scalar, specifying the dimension
+     *      channel shuffle would be performed on. Negative index is used to
+     *      specify axis from the end (e.g. -1 for the last axis). Must be in
+     *      the range [-n, n).
+     *
+     * Outputs:
+     * * 0: A tensor of the same {@link OperandType} and same shape as input0.
+     *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+     *      the scale and zeroPoint must be the same as input0.
+     */
+    CHANNEL_SHUFFLE = @1.2::OperationType:CHANNEL_SHUFFLE,
+
+    /**
+     * Apply postprocessing steps to bounding box detections.
+     *
+     * Bounding box detections are generated by applying transformation on a set
+     * of predefined anchors with the bounding box deltas from bounding box
+     * regression. A final step of hard NMS is applied to limit the number of
+     * returned boxes.
+     *
+     * Supported tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_FLOAT16}
+     * * {@link OperandType::TENSOR_FLOAT32}
+     *
+     * Inputs:
+     * * 0: A 3-D Tensor of shape [batches, num_anchors, num_classes], specifying
+     *      the score of each anchor with each class. Class 0 for each
+     *      [batches, num_anchors, 0] is background and will be ignored.
+     * * 1: A 3-D Tensor of shape [batches, num_anchors, length_box_encoding], with
+     *      the first four values in length_box_encoding specifying the bounding
+     *      box deltas. The box deltas are encoded in the order of [dy, dx, dh, dw],
+     *      where dy and dx is the linear-scale relative correction factor for the
+     *      center position of the bounding box with respect to the width and height,
+     *      dh and dw is the log-scale relative correction factor for the width and
+     *      height. All the entries in length_box_encoding beyond the first four
+     *      values are ignored in this operation.
+     * * 2: A 2-D Tensor of shape [num_anchors, 4], specifying the shape of each
+     *      predefined anchor, with format [ctr_y, ctr_x, h, w], where ctr_y and
+     *      ctr_x are the center position of the box, and h and w are the height
+     *      and the width.
+     * * 3: An {@link OperandType::FLOAT32} scalar, specifying the scaling
+     *      factor for dy in bounding box deltas.
+     * * 4: An {@link OperandType::FLOAT32} scalar, specifying the scaling
+     *      factor for dx in bounding box deltas.
+     * * 5: An {@link OperandType::FLOAT32} scalar, specifying the scaling
+     *      factor for dh in bounding box deltas.
+     * * 6: An {@link OperandType::FLOAT32} scalar, specifying the scaling
+     *      factor for dw in bounding box deltas.
+     * * 7: An {@link OperandType::BOOL} scalar, set to true to use regular
+     *      multi-class NMS algorithm that do NMS separately for each class,
+     *      set to false for a faster algorithm that only do one single NMS
+     *      using the highest class score..
+     * * 8: An {@link OperandType::INT32} scalar, max_num_detections, specifying
+     *      the maximum number of boxes for the output. Boxes with the lowest
+     *      scores are discarded to meet the limit.
+     * * 9: An {@link OperandType::INT32} scalar, only used when input7 is
+     *      set to false, specifying the maximum number of classes per detection.
+     * * 10: An {@link OperandType::INT32} scalar, only used when input7 is
+     *       set to true, specifying the maximum number of detections when
+     *       applying NMS algorithm for each single class.
+     * * 11: A scalar, score_threshold. Boxes with scores lower than the
+     *       threshold are filtered before sending to the NMS algorithm. The
+     *       scalar must be of {@link OperandType::FLOAT16} if input0 is of
+     *       {@link OperandType::TENSOR_FLOAT16} and of
+     *       {@link OperandType::FLOAT32} if input0 is of
+     *       {@link OperandType::TENSOR_FLOAT32}.
+     * * 12: A scalar, specifying the IoU threshold for hard NMS. The scalar
+     *       must be of {@link OperandType::FLOAT16} if input0 is of
+     *       {@link OperandType::TENSOR_FLOAT16} and of
+     *       {@link OperandType::FLOAT32} if input0 is of
+     *       {@link OperandType::TENSOR_FLOAT32}.
+     * * 13: An {@link OperandType::BOOL} scalar, set to true to include
+     *       background class in the list of label map for the output, set
+     *       to false to not include the background. When the background
+     *       class is included, it has label 0 and the output classes start
+     *       at 1 in the label map, otherwise, the output classes start at 0.
+     *
+     * Outputs:
+     * * 0: A 2-D tensor of the same {@link OperandType} as input0, with shape
+     *      [batches, max_num_detections], specifying the score of each output
+     *      detections.
+     * * 1: A 3-D tensor of shape [batches, max_num_detections, 4], specifying the
+     *      coordinates of each output bounding box, with format
+     *      [y1, x1, y2, x2].
+     * * 2: A 2-D {@link OperandType::TENSOR_INT32} tensor, of shape
+     *      [batches, max_num_detections], specifying the class label for each
+     *      output detection.
+     * * 3: An 1-D {@link OperandType::TENSOR_INT32} tensor, of shape [batches],
+     *      specifying the number of valid output detections for each batch.
+     */
+    DETECTION_POSTPROCESSING = @1.2::OperationType:DETECTION_POSTPROCESSING,
+
+    /**
+     * For input tensors x and y, computes x == y elementwise.
+     *
+     * Supported tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_BOOL8}
+     * * {@link OperandType::TENSOR_FLOAT16}
+     * * {@link OperandType::TENSOR_FLOAT32}
+     * * {@link OperandType::TENSOR_INT32}
+     * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+     *
+     * Supported tensor rank: from 1
+     *
+     * This operation supports broadcasting.
+     *
+     * Inputs:
+     * * 0: A tensor.
+     * * 1: A tensor of the same {@link OperandType} and dimensions compatible
+     *      with input0.
+     *
+     * Outputs:
+     * * 0: A tensor of {@link OperandType::TENSOR_BOOL8}.
+     */
+    EQUAL = @1.2::OperationType:EQUAL,
+
+    /**
+     * Computes exponential of x element-wise.
+     *
+     * Supported tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_FLOAT16}
+     * * {@link OperandType::TENSOR_FLOAT32}
+     *
+     * Supported tensor rank: from 1.
+     *
+     * Inputs:
+     * * 0: A tensor.
+     *
+     * Outputs:
+     * * 0: The output tensor of same shape as input0.
+     */
+    EXP = @1.2::OperationType:EXP,
+
+    /**
+     * Inserts a dimension of 1 into a tensor's shape.
+     *
+     * Given a tensor input, this operation inserts a dimension of 1 at the
+     * given dimension index of input's shape. The dimension index starts at
+     * zero; if you specify a negative dimension index, it is counted backward
+     * from the end.
+     *
+     * Supported tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_FLOAT16}
+     * * {@link OperandType::TENSOR_FLOAT32}
+     * * {@link OperandType::TENSOR_INT32}
+     * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+     *
+     * Supported tensor rank: from 1
+     *
+     * Inputs:
+     * * 0: An n-D tensor.
+     * * 1: An {@link OperandType::INT32} scalar specifying the dimension
+     *      index to expand. Must be in the range [-(n + 1), (n + 1)).
+     *
+     * Outputs:
+     * * 0: An (n + 1)-D tensor with the same {@link OperandType} and data as
+     *      input0.
+     *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+     *      the scale and zeroPoint must be the same as input0.
+     */
+    EXPAND_DIMS = @1.2::OperationType:EXPAND_DIMS,
+
+    /**
+     * Gathers values along an axis.
+     *
+     * Produces an output tensor with shape
+     *     input0.dimension[:axis] + indices.dimension + input0.dimension[axis + 1:]
+     * where:
+     *     # Vector indices (output is rank(input0)).
+     *     output[a_0, ..., a_n, i, b_0, ..., b_n] =
+     *       input0[a_0, ..., a_n, indices[i], b_0, ..., b_n]
+     *
+     *     # Higher rank indices (output is rank(input0) + rank(indices) - 1).
+     *     output[a_0, ..., a_n, i, ..., j, b_0, ... b_n] =
+     *       input0[a_0, ..., a_n, indices[i, ..., j], b_0, ..., b_n]
+     *
+     * Supported tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_FLOAT16}
+     * * {@link OperandType::TENSOR_FLOAT32}
+     * * {@link OperandType::TENSOR_INT32}
+     * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+     *
+     * Supported tensor rank: from 1
+     *
+     * Inputs:
+     * * 0: An n-D tensor from which to gather values.
+     * * 1: An {@link OperandType::INT32} scalar specifying the axis.
+     *      Negative index is used to specify axis from the end
+     *      (e.g. -1 for the last axis). Must be in the range [-n, n).
+     * * 2: A k-D tensor {@link OperandType::TENSOR_INT32} of indices.
+     *      The values must be in the bounds of the corresponding dimensions
+     *      of input0.
+     *
+     * Outputs:
+     * * 0: An (n + k - 1)-D tensor with the same {@link OperandType} as input0.
+     *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+     *      the scale and zeroPoint must be the same as input0.
+     */
+    GATHER = @1.2::OperationType:GATHER,
+
+    /**
+     * Generate aixs-aligned bounding box proposals.
+     *
+     * Bounding box proposals are generated by applying transformation on a set
+     * of predefined anchors with the bounding box deltas from bounding box
+     * regression. A final step of hard NMS is applied to limit the number of
+     * returned boxes.
+     *
+     * Axis-aligned bounding boxes are represented by its upper-left corner
+     * coordinate (x1,y1) and lower-right corner coordinate (x2,y2). A valid
+     * bounding box should satisfy x1 <= x2 and y1 <= y2.
+     *
+     * Supported tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_FLOAT16}
+     * * {@link OperandType::TENSOR_FLOAT32}
+     * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+     *
+     * Inputs:
+     * * 0: A 4-D Tensor specifying the score of each anchor at each
+     *      location. With "NHWC" data layout, the tensor shape is
+     *      [batches, height, width, num_anchors]. With "NCHW" data layout,
+     *      the tensor shape is [batches, num_anchors, height, width].
+     * * 1: A 4-D Tensor specifying the bounding box deltas. With "NHWC" data
+     *      layout, the tensor shape is [batches, height, width, num_anchors * 4].
+     *      With "NCHW" data layout, the tensor shape is
+     *      [batches, num_anchors * 4, height, width]. The box deltas are encoded
+     *      in the order of [dx, dy, dw, dh], where dx and dy is the linear-scale
+     *      relative correction factor for the center position of the bounding box
+     *      with respect to the width and height, dw and dh is the log-scale
+     *      relative correction factor for the width and height. The last
+     *      dimensions is the channel dimension.
+     * * 2: A 2-D Tensor of shape [num_anchors, 4], specifying the shape of each
+     *      predefined anchor, with format [x1, y1, x2, y2]. For input0 of type
+     *      {@link OperandType::TENSOR_QUANT8_ASYMM}, this tensor should be of
+     *      {@link OperandType::TENSOR_QUANT16_SYMM}, with scale of 0.125.
+     * * 3: A 2-D Tensor of shape [batches, 2], specifying the size of
+     *      each image in the batch, with format [image_height, image_width].
+     *      For input0 of type {@link OperandType::TENSOR_QUANT8_ASYMM}, this
+     *      tensor should be of {@link OperandType::TENSOR_QUANT16_SYMM}, with
+     *      scale of 0.125.
+     * * 4: An {@link OperandType::FLOAT32} scalar, specifying the ratio
+     *      from the height of original image to the height of feature map.
+     * * 5: An {@link OperandType::FLOAT32} scalar, specifying the ratio
+     *      from the width of original image to the width of feature map.
+     * * 6: An {@link OperandType::INT32} scalar, specifying the maximum
+     *      number of boxes before going into the hard NMS algorithm. Boxes
+     *      with the lowest scores are discarded to meet the limit. Set to
+     *      a non-positive value for unlimited number.
+     * * 7: An {@link OperandType::INT32} scalar, specifying the maximum
+     *      number of boxes returning from the hard NMS algorithm. Boxes
+     *      with the lowest scores are discarded to meet the limit. Set to
+     *      a non-positive value for unlimited number.
+     * * 8: An {@link OperandType::FLOAT32} scalar, specifying the IoU
+     *      threshold for hard NMS.
+     * * 9: An {@link OperandType::FLOAT32} scalar, min_size. Boxes with
+     *      height or width lower than the absolute threshold are filtered out.
+     * * 10: An {@link OperandType::BOOL} scalar, set to true to specify
+     *       NCHW data layout for input0 and input1. Set to false for NHWC.
+     *
+     * Outputs:
+     * * 0: A tensor of the same {@link OperandType} as input0, of shape
+     *      [num_output_rois], specifying the score of each output box.
+     *      The boxes are grouped by batches, but the sequential order in
+     *      each batch is not guaranteed. For type of
+     *      {@link OperandType::TENSOR_QUANT8_ASYMM}, the scale and zero
+     *      point must be the same as input0.
+     * * 1: A tensor of the same {@link OperandType} as input3, of shape
+     *      [num_output_rois, 4], specifying the coordinates of each output
+     *      bounding box for each class, with format [x1, y1, x2, y2].
+     *      The sequential order of the boxes corresponds with output0.
+     *      For type of {@link OperandType::TENSOR_QUANT16_ASYMM}, the
+     *      scale must be 0.125 and the zero point must be 0.
+     * * 2: A 1-D {@link OperandType::TENSOR_INT32} tensor, of shape
+     *      [num_output_rois], specifying the batch index of each box. Boxes
+     *      with the same batch index are grouped together.
+     */
+    GENERATE_PROPOSALS = @1.2::OperationType:GENERATE_PROPOSALS,
+
+    /**
+     * For input tensors x and y, computes x > y elementwise.
+     *
+     * Supported tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_BOOL8}
+     * * {@link OperandType::TENSOR_FLOAT16}
+     * * {@link OperandType::TENSOR_FLOAT32}
+     * * {@link OperandType::TENSOR_INT32}
+     * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+     *
+     * Supported tensor rank: from 1
+     *
+     * This operation supports broadcasting.
+     *
+     * Inputs:
+     * * 0: A tensor.
+     * * 1: A tensor of the same {@link OperandType} and dimensions compatible
+     *      with input0.
+     *
+     * Outputs:
+     * * 0: A tensor of {@link OperandType::TENSOR_BOOL8}.
+     */
+    GREATER = @1.2::OperationType:GREATER,
+    /**
+     * For input tensors x and y, computes x >= y elementwise.
+     *
+     * Supported tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_BOOL8}
+     * * {@link OperandType::TENSOR_FLOAT16}
+     * * {@link OperandType::TENSOR_FLOAT32}
+     * * {@link OperandType::TENSOR_INT32}
+     * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+     *
+     * Supported tensor rank: from 1
+     *
+     * This operation supports broadcasting.
+     *
+     * Inputs:
+     * * 0: A tensor.
+     * * 1: A tensor of the same {@link OperandType} and dimensions compatible
+     *      with input0.
+     *
+     * Outputs:
+     * * 0: A tensor of {@link OperandType::TENSOR_BOOL8}.
+     */
+    GREATER_EQUAL = @1.2::OperationType:GREATER_EQUAL,
+
+    /**
+     * Performs a grouped 2-D convolution operation.
+     *
+     * Given an input tensor of shape [batches, height, width, depth_in] and a
+     * filter tensor of shape [depth_out, filter_height, filter_width, depth_group]
+     * containing depth_out convolutional filters of depth depth_group, GROUPED_CONV
+     * applies a group of different filters to each input channel group, then
+     * concatenates the results together.
+     *
+     * Specifically, the input channels are divided into num_groups groups, each with
+     * depth depth_group, i.e. depth_in = num_groups * depth_group. The convolutional
+     * filters are also divided into num_groups groups, i.e. depth_out is divisible
+     * by num_groups. GROUPED_CONV applies each group of filters to the corresponding
+     * input channel group, and the result are concatenated together.
+     *
+     * The output dimensions are functions of the filter dimensions, stride, and
+     * padding.
+     *
+     * The values in the output tensor are computed as:
+     *
+     *     output[b, i, j, g * channel_multiplier + q] =
+     *         sum_{di, dj, dk} (
+     *             input[b, strides[1] * i + di, strides[2] * j + dj,
+     *                   g * depth_group + dk] *
+     *             filter[g * channel_multiplier + q, di, dj, dk]
+     *         ) + bias[channel]
+     *
+     * where channel_multiplier = depth_out / num_groups
+     *
+     * Supported tensor {@link OperandType} configurations:
+     * * 16 bit floating point:
+     * * * {@link OperandType::TENSOR_FLOAT16} for input, filter, output, and bias.
+     *
+     * * 32 bit floating point:
+     * * * {@link OperandType::TENSOR_FLOAT32} for input, filter, output, and bias.
+     *
+     * * Quantized:
+     * * * {@link OperandType::TENSOR_QUANT8_ASYMM} for input, filter, and output.
+     * * * {@link OperandType::TENSOR_INT32} for bias (with scale set to
+     * * * input.scale * filter.scale).
+     *
+     * * Quantized with symmetric per channel quantization for the filter:
+     * * * {@link OperandType::TENSOR_QUANT8_ASYMM} for input, and output.
+     * * * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
+     * * * {@link OperandType::TENSOR_INT32} for bias (scale set to 0.0,
+     * * * each value scaling is separate and equal to input.scale * filter.scales[channel]).
+     *
+     * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
+     * With the default data layout NHWC, the data is stored in the order of:
+     * [batch, height, width, channels]. Alternatively, the data layout could
+     * be NCHW, the data storage order of: [batch, channels, height, width].
+     *
+     * Both explicit padding and implicit padding are supported.
+     *
+     * Inputs (explicit padding):
+     * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
+     *      specifying the input, where depth_in = num_groups * depth_group.
+     * * 1: A 4-D tensor, of shape
+     *      [depth_out, filter_height, filter_width, depth_group], specifying
+     *      the filter, where depth_out must be divisible by num_groups.  For
+     *      tensor of type {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}
+     *      the channel dimension (channelDim at
+     *      {@link SymmPerChannelQuantParams}) must be set to 0.
+     * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
+     *      tensor of type {@link OperandType::TENSOR_FLOAT32} or
+     *      {@link OperandType::TENSOR_FLOAT16}, the bias must be of the same
+     *      type. For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
+     *      the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint
+     *      of 0 and bias_scale == input_scale * filter_scale. For filter tensor
+     *      of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}, the bias
+     *      should be of {@link OperandType::TENSOR_INT32}, with zeroPoint of
+     *      0 and bias_scale of 0. The actual scale of each value 'i' is equal to
+     *      bias_scale[i] = input_scale * filter_scale[i].
+     * * 3: An {@link OperandType::INT32} scalar, specifying the padding on
+     *      the left, in the ‘width’ dimension.
+     * * 4: An {@link OperandType::INT32} scalar, specifying the padding on
+     *      the right, in the ‘width’ dimension.
+     * * 5: An {@link OperandType::INT32} scalar, specifying the padding on
+     *      the top, in the ‘height’ dimension.
+     * * 6: An {@link OperandType::INT32} scalar, specifying the padding on
+     *      the bottom, in the ‘height’ dimension.
+     * * 7: An {@link OperandType::INT32} scalar, specifying the stride when
+     *      walking through input in the ‘width’ dimension.
+     * * 8: An {@link OperandType::INT32} scalar, specifying the stride when
+     *      walking through input in the ‘height’ dimension.
+     * * 9: An {@link OperandType::INT32} scalar, specifying the number of
+            groups.
+     * * 10: An {@link OperandType::INT32} scalar, and has to be one of the
+     *       {@link FusedActivationFunc} values. Specifies the activation to
+     *       invoke on the result.
+     * * 11: An {@link OperandType::BOOL} scalar, set to true to specify
+     *       NCHW data layout for input0 and output0. Set to false for NHWC.
+     *
+     * Inputs (implicit padding):
+     * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
+     *      specifying the input, where depth_in = num_groups * depth_group.
+     * * 1: A 4-D tensor, of shape
+     *      [depth_out, filter_height, filter_width, depth_group], specifying
+     *      the filter, where depth_out must be divisible by num_groups.  For
+     *      tensor of type {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}
+     *      the channel dimension (SymmPerChannelQuantParams::channelDim)
+     *      must be set to 0.
+     * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
+     *      tensor of type {@link OperandType::TENSOR_FLOAT32} or
+     *      {@link OperandType::TENSOR_FLOAT16}, the bias must be of the same
+     *      type. For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
+     *      the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint
+     *      of 0 and bias_scale == input_scale * filter_scale. For filter tensor
+     *      of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}, the bias
+     *      should be of {@link OperandType::TENSOR_INT32}, with zeroPoint of
+     *      0 and bias_scale of 0. The actual scale of each value 'i' is equal to
+     *      bias_scale[i] = input_scale * filter_scale[i].
+     * * 3: An {@link OperandType::INT32} scalar, specifying the implicit
+     *      padding scheme, has to be one of the
+     *      following values: {0 (NONE), 1 (SAME), 2 (VALID)}.
+     * * 4: An {@link OperandType::INT32} scalar, specifying the stride when
+     *      walking through input in the ‘width’ dimension.
+     * * 5: An {@link OperandType::INT32} scalar, specifying the stride when
+     *      walking through input in the ‘height’ dimension.
+     * * 6: An {@link OperandType::INT32} scalar, specifying the number of
+     *      groups.
+     * * 7: An {@link OperandType::INT32} scalar, and has to be one of the
+     *      {@link FusedActivationFunc} values. Specifies the activation to
+     *      invoke on the result.
+     * * 8: An {@link OperandType::BOOL} scalar, set to true to specify
+     *      NCHW data layout for input0 and output0. Set to false for NHWC.
+     *
+     * Outputs:
+     * * 0: The output 4-D tensor, of shape
+     *      [batches, out_height, out_width, depth_out].
+     *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+     *      the scale and zeroPoint can be different from inputs' scale and zeroPoint.
+     */
+    GROUPED_CONV_2D = @1.2::OperationType:GROUPED_CONV_2D,
+
+    /**
+     * Localize the maximum keypoints from heatmaps.
+     *
+     * This operation approximates the accurate maximum keypoint scores and
+     * indices after bicubic upscaling by using Taylor expansion up to the
+     * quadratic term.
+     *
+     * The bounding box is represented by its upper-left corner coordinate
+     * (x1,y1) and lower-right corner coordinate (x2,y2) in the original image.
+     * A valid bounding box should satisfy x1 <= x2 and y1 <= y2.
+     *
+     * Supported tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_FLOAT16}
+     * * {@link OperandType::TENSOR_FLOAT32}
+     * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+     *
+     * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
+     * With the default data layout NHWC, the data is stored in the order of:
+     * [batch, height, width, channels]. Alternatively, the data layout could
+     * be NCHW, the data storage order of: [batch, channels, height, width].
+     *
+     * Inputs:
+     * * 0: A 4-D Tensor of shape
+     *      [num_boxes, heatmap_size, heatmap_size, num_keypoints],
+     *      specifying the heatmaps, the height and width of heatmaps should
+     *      be the same, and must be greater than or equal to 2.
+     * * 1: A 2-D Tensor of shape [num_boxes, 4], specifying the bounding boxes,
+     *      each with format [x1, y1, x2, y2]. For input0 of type
+     *      {@link OperandType::TENSOR_QUANT8_ASYMM}, this tensor should
+     *      be of {@link OperandType::TENSOR_QUANT16_ASYMM}, with zeroPoint
+     *      of 0 and scale of 0.125.
+     * * 2: An {@link OperandType::BOOL} scalar, set to true to specify
+     *      NCHW data layout for input0. Set to false for NHWC.
+     *
+     * Outputs:
+     * * 0: A tensor of the same {@link OperandType} as input0, with shape
+     *      [num_boxes, num_keypoints], specifying score of the keypoints.
+     *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+     *      the scale and zeroPoint can be different from input0 scale and zeroPoint.
+     * * 1: A tensor of the same {@link OperandType} as input1, with shape
+     *      [num_boxes, num_keypoints, 2], specifying the location of
+     *      the keypoints, the second dimension is organized as
+     *      [keypoint_x, keypoint_y].
+     *      For type of {@link OperandType::TENSOR_QUANT16_ASYMM}, the
+     *      scale must be 0.125 and the zero point must be 0.
+     */
+    HEATMAP_MAX_KEYPOINT = @1.2::OperationType:HEATMAP_MAX_KEYPOINT,
+
+    /**
+     * Applies instance normalization to the input tensor.
+     *
+     * The values in the output tensor are computed as:
+     *
+     *     output[b, h, w, c] =
+     *         (input[b, h, w, c] - mean[b, c]) * gamma /
+     *         sqrt(var[b, c] + epsilon) + beta
+     *
+     * Where the mean and variance are computed across the spatial dimensions:
+     *
+     *     mean[b, c] =
+     *         sum_{h, w}(input[b, h, w, c]) / sum(1)
+     *
+     *     var[b, c] =
+     *         sum_{h, w}(pow(input[b, h, w, c] - mean[b, c], 2)) / sum(1)
+     *
+     * Supported tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_FLOAT16}
+     * * {@link OperandType::TENSOR_FLOAT32}
+     *
+     * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
+     * With the default data layout NHWC, the data is stored in the order of:
+     * [batch, height, width, channels]. Alternatively, the data layout could
+     * be NCHW, the data storage order of: [batch, channels, height, width].
+     *
+     * Inputs:
+     * * 0: An n-D tensor, specifying the tensor to be normalized.
+     * * 1: A scalar, specifying gamma, the scale applied to the normalized
+     *      tensor. The scalar must be of {@link OperandType::FLOAT16} if
+     *      input0 is of {@link OperandType::TENSOR_FLOAT16} and of
+     *      {@link OperandType::FLOAT32} if input0 is of
+     *      {@link OperandType::TENSOR_FLOAT32}.
+     * * 2: A scalar, specifying beta, the offset applied to the normalized
+     *      tensor. The scalar must be of {@link OperandType::FLOAT16} if
+     *      input0 is of {@link OperandType::TENSOR_FLOAT16} and of
+     *      {@link OperandType::FLOAT32} if input0 is of
+     *      {@link OperandType::TENSOR_FLOAT32}.
+     * * 3: A scalar, specifying epsilon, the small value added to variance to
+     *      avoid dividing by zero. The scalar must be of {@link OperandType::FLOAT16} if
+     *      input0 is of {@link OperandType::TENSOR_FLOAT16} and of
+     *      {@link OperandType::FLOAT32} if input0 is of
+     *      {@link OperandType::TENSOR_FLOAT32}.
+     * * 4: An {@link OperandType::BOOL} scalar, set to true to specify
+     *      NCHW data layout for input0 and output0. Set to false for NHWC.
+     *
+     * Outputs:
+     * * 0: A tensor of the same {@link OperandType} and same shape as input0.
+     */
+    INSTANCE_NORMALIZATION = @1.2::OperationType:INSTANCE_NORMALIZATION,
+
+    /**
+     * For input tensors x and y, computes x < y elementwise.
+     *
+     * Supported tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_BOOL8}
+     * * {@link OperandType::TENSOR_FLOAT16}
+     * * {@link OperandType::TENSOR_FLOAT32}
+     * * {@link OperandType::TENSOR_INT32}
+     * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+     *
+     * Supported tensor rank: from 1
+     *
+     * This operation supports broadcasting.
+     *
+     * Inputs:
+     * * 0: A tensor.
+     * * 1: A tensor of the same {@link OperandType} and dimensions compatible
+     *      with input0.
+     *
+     * Outputs:
+     * * 0: A tensor of {@link OperandType::TENSOR_BOOL8}.
+     */
+    LESS = @1.2::OperationType:LESS,
+
+    /**
+     * For input tensors x and y, computes x <= y elementwise.
+     *
+     * Supported tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_BOOL8}
+     * * {@link OperandType::TENSOR_FLOAT16}
+     * * {@link OperandType::TENSOR_FLOAT32}
+     * * {@link OperandType::TENSOR_INT32}
+     * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+     *
+     * Supported tensor rank: from 1
+     *
+     * This operation supports broadcasting.
+     *
+     * Inputs:
+     * * 0: A tensor.
+     * * 1: A tensor of the same {@link OperandType} and dimensions compatible
+     *      with input0.
+     *
+     * Outputs:
+     * * 0: A tensor of {@link OperandType::TENSOR_BOOL8}.
+     */
+    LESS_EQUAL = @1.2::OperationType:LESS_EQUAL,
+
+    /**
+     * Computes natural logarithm of x element-wise.
+     *
+     * Supported tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_FLOAT16}
+     * * {@link OperandType::TENSOR_FLOAT32}
+     *
+     * Supported tensor rank: from 1.
+     *
+     * Inputs:
+     * * 0: A tensor.
+     *
+     * Outputs:
+     * * 0: The output tensor of same shape as input0.
+     */
+    LOG = @1.2::OperationType:LOG,
+
+    /**
+     * Returns the truth value of x AND y element-wise.
+     *
+     * Supported tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_BOOL8}
+     *
+     * Supported tensor rank: from 1
+     *
+     * This operation supports broadcasting.
+     *
+     * Inputs:
+     * * 0: A tensor of {@link OperandType::TENSOR_BOOL8}.
+     * * 1: A tensor of {@link OperandType::TENSOR_BOOL8} and dimensions
+     *      compatible with input0.
+     *
+     * Outputs:
+     * * 0: A tensor of {@link OperandType::TENSOR_BOOL8}.
+     */
+    LOGICAL_AND = @1.2::OperationType:LOGICAL_AND,
+
+    /**
+     * Computes the truth value of NOT x element-wise.
+     *
+     * Supported tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_BOOL8}
+     *
+     * Supported tensor rank: from 1.
+     *
+     * Inputs:
+     * * 0: A tensor.
+     *
+     * Outputs:
+     * * 0: The output tensor of same shape as input0.
+     */
+    LOGICAL_NOT = @1.2::OperationType:LOGICAL_NOT,
+
+    /**
+     * Returns the truth value of x OR y element-wise.
+     *
+     * Supported tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_BOOL8}
+     *
+     * Supported tensor rank: from 1
+     *
+     * This operation supports broadcasting.
+     *
+     * Inputs:
+     * * 0: A tensor of {@link OperandType::TENSOR_BOOL8}.
+     * * 1: A tensor of {@link OperandType::TENSOR_BOOL8} and dimensions
+     *      compatible with input0.
+     *
+     * Outputs:
+     * * 0: A tensor of {@link OperandType::TENSOR_BOOL8}.
+     */
+    LOGICAL_OR = @1.2::OperationType:LOGICAL_OR,
+
+    /**
+     * Computes the log softmax activations given logits.
+     *
+     * The output is calculated using this formula:
+     *
+     *     output = logits * beta - log(reduce_sum(exp(logits * beta), axis))
+     *
+     * Supported tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_FLOAT16}
+     * * {@link OperandType::TENSOR_FLOAT32}
+     *
+     * Supported tensor rank: from 1.
+     *
+     * Inputs:
+     * * 0: A tensor specifying the input logits.
+     * * 1: A scalar, specifying the positive scaling factor for the exponent,
+     *      beta.
+     *      For input tensor of {@link OperandType::TENSOR_FLOAT16}, the beta
+     *      value must be of {@link OperandType::FLOAT16}.
+     *      For input tensor of {@link OperandType::TENSOR_FLOAT32}, the beta
+     *      value must be of {@link OperandType::FLOAT32}.
+     * * 2: An {@link OperandType::INT32} scalar specifying the axis to
+     *      reduce across. Negative index is used to specify axis from the
+     *      end (e.g. -1 for the last axis). Must be in the range [-n, n).
+     *
+     * Outputs:
+     * * 0: The output tensor of the same {@link OperandType} and shape as
+     *      input0.
+     */
+    LOG_SOFTMAX = @1.2::OperationType:LOG_SOFTMAX,
+
+    /**
+     * Returns the element-wise maximum of two tensors.
+     *
+     * Supported tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_FLOAT16}
+     * * {@link OperandType::TENSOR_FLOAT32}
+     * * {@link OperandType::TENSOR_INT32}
+     * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+     *
+     * Supported tensor rank: from 1.
+     *
+     * Inputs:
+     * * 0: A tensor.
+     * * 1: A tensor of the same {@link OperandType} and compatible dimensions
+     *      with input0.
+     *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+     *      the scales and zeroPoint can be different from input0 scale and zeroPoint.
+     *
+     * Outputs:
+     * * 0: A tensor of the same {@link OperandType} as input0.
+     *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+     *      the scale and zeroPoint can be different from inputs' scale and zeroPoint.
+     */
+    MAXIMUM = @1.2::OperationType:MAXIMUM,
+
+    /**
+     * Returns the element-wise minimum of two tensors.
+     *
+     * Supported tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_FLOAT16}
+     * * {@link OperandType::TENSOR_FLOAT32}
+     * * {@link OperandType::TENSOR_INT32}
+     * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+     *
+     * Supported tensor rank: from 1.
+     *
+     * Inputs:
+     * * 0: A tensor.
+     * * 1: A tensor of the same {@link OperandType} and compatible dimensions
+     *      with input0.
+     *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+     *      the scales and zeroPoint can be different from input0 scale and zeroPoint.
+     *
+     * Outputs:
+     * * 0: A tensor of the same {@link OperandType} as input0.
+     *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+     *      the scale and zeroPoint can be different from inputs' scale and zeroPoint.
+     */
+    MINIMUM = @1.2::OperationType:MINIMUM,
+
+    /**
+     * Computes numerical negative value element-wise.
+     *
+     * Supported tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_FLOAT16}
+     * * {@link OperandType::TENSOR_FLOAT32}
+     * * {@link OperandType::TENSOR_INT32}
+     *
+     * Supported tensor rank: from 1.
+     *
+     * Inputs:
+     * * 0: A tensor.
+     *
+     * Outputs:
+     * * 0: The output tensor of same shape as input0.
+     */
+    NEG = @1.2::OperationType:NEG,
+
+    /**
+     * For input tensors x and y, computes x != y elementwise.
+     *
+     * Supported tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_BOOL8}
+     * * {@link OperandType::TENSOR_FLOAT16}
+     * * {@link OperandType::TENSOR_FLOAT32}
+     * * {@link OperandType::TENSOR_INT32}
+     * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+     *
+     * Supported tensor rank: from 1
+     *
+     * This operation supports broadcasting.
+     *
+     * Inputs:
+     * * 0: A tensor.
+     * * 1: A tensor of the same {@link OperandType} and dimensions compatible
+     *      with input0.
+     *
+     * Outputs:
+     * * 0: A tensor of {@link OperandType::TENSOR_BOOL8}.
+     */
+    NOT_EQUAL = @1.2::OperationType:NOT_EQUAL,
+
+    /**
+     * Pads a tensor with the given constant value according to the specified
+     * paddings.
+     *
+     * Supported tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_FLOAT16}
+     * * {@link OperandType::TENSOR_FLOAT32}
+     * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+     *
+     * Supported tensor rank: up to 4
+     *
+     * Inputs:
+     * * 0: An n-D tensor, specifying the tensor to be padded.
+     * * 1: A 2-D Tensor of {@link OperandType::TENSOR_INT32}, the paddings
+     *      for each spatial dimension of the input tensor. The shape of the
+     *      tensor must be {rank(input0), 2}.
+     *      padding[i, 0] specifies the number of elements to be padded in the
+     *      front of dimension i.
+     *      padding[i, 1] specifies the number of elements to be padded after
+     *      the end of dimension i.
+     * * 2: An scalar specifying the value to use for padding input0.
+     *      For input tensor of {@link OperandType::TENSOR_FLOAT16}, the
+     *      pad value must be of {@link OperandType::FLOAT16}.
+     *      For input tensor of {@link OperandType::TENSOR_FLOAT32}, the
+     *      pad value must be of {@link OperandType::FLOAT32}.
+     *      For input tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
+     *      the pad value must be of {@link OperandType::INT32}. The
+     *      scale and zeroPoint are assumed to be the same as in input0.
+     *
+     * Outputs:
+     * * 0: A tensor of the same {@link OperandType} as input0. The
+     *      output tensor has the same rank as input0, and each
+     *      dimension of the output tensor has the same size as the
+     *      corresponding dimension of the input tensor plus the size
+     *      of the padding:
+     *          output0.dimension[i] =
+     *              padding[i, 0] + input0.dimension[i] + padding[i, 1]
+     *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+     *      the scale and zeroPoint must be the same as input0.
+     */
+    PAD_V2 = @1.2::OperationType:PAD_V2,
+
+    /**
+     * Computes the power of one value to another.
+     *
+     * Given a tensor base and a tensor exponent, this operation computes
+     * base^exponent elementwise.
+     *
+     * This operations supports broadcasting. The size of the output is the
+     * maximum size along each dimension of the input operands. It starts with
+     * the trailing dimensions, and works its way forward.
+     *
+     * For example:
+     *     base.dimension     =    {4, 1, 2}
+     *     exponent.dimension = {5, 4, 3, 1}
+     *     output.dimension   = {5, 4, 3, 2}
+     *
+     * Supported tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_FLOAT16}
+     * * {@link OperandType::TENSOR_FLOAT32}
+     *
+     * Supported tensor rank: from 1
+     *
+     * Inputs:
+     * * 0: A tensor specifying the base.
+     * * 1: A tensor specifying the exponent.
+     *
+     * Outputs:
+     * * 0: An output tensor.
+     */
+    POW = @1.2::OperationType:POW,
+
+    /**
+     * Parametric Rectified Linear Unit.
+     *
+     * It follows: f(x) = alpha * x for x < 0, f(x) = x for x >= 0, where alpha
+     * is a learned array with the same {@link OperandType} and compatible
+     * dimensions as input x.
+     *
+     * Two dimensions are compatible when:
+     *     1. they are equal, or
+     *     2. one of them is 1
+     *
+     * The size of the output is the maximum size along each dimension of the
+     * input operands. It starts with the trailing dimensions, and works its way
+     * forward.
+     *
+     * Example:
+     *     input.dimension  =    {4, 1, 2}
+     *     alpha.dimension  = {5, 4, 3, 1}
+     *     output.dimension = {5, 4, 3, 2}
+     *
+     * Supported tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_FLOAT16}
+     * * {@link OperandType::TENSOR_FLOAT32}
+     * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+     *
+     * Supported tensor rank: from 1
+     *
+     * Inputs:
+     * * 0: A tensor, specifying the input.
+     * * 1: A tensor of the same {@link OperandType}, and compatible dimensions
+     *      as input0, specifying the alpha.
+     *
+     * Outputs:
+     * * 0: A tensor of the same {@link OperandType} as input0.
+     *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+     *      the scale and zeroPoint can be diffent from the input0 scale and zeroPoint.
+     */
+    PRELU = @1.2::OperationType:PRELU,
+
+    /**
+     * Quantizes the input tensor.
+     *
+     * The formula is:
+     *
+     *     output = max(0, min(255, round(input / scale) + zeroPoint)
+     *
+     * Supported tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_FLOAT16}
+     * * {@link OperandType::TENSOR_FLOAT32}
+     *
+     * Supported tensor rank: from 1
+     *
+     * Inputs:
+     * * 0: A tensor, may be zero-sized.
+     *
+     * Outputs:
+     * * 0: The output tensor of same shape as input0, but with
+     *      {@link OperandType::TENSOR_QUANT8_ASYMM}.
+     */
+    QUANTIZE = @1.2::OperationType:QUANTIZE,
+
+    /**
+     * A version of quantized LSTM, using 16 bit quantization for internal
+     * state.
+     *
+     * There is no projection layer, so cell state size is equal to the output
+     * size.
+     *
+     * Inputs:
+     * * 0: A 2-D tensor of type {@link OperandType::TENSOR_QUANT8_ASYMM}
+     *      and shape [numBatches, inputSize] specifying the input to the LSTM
+     *      cell. Tensor is quantized with a fixed quantization range of
+     *      [-1, 127/128] (scale = 1/128, zeroPoint = 128).
+     * * 1: The input-to-input weights.
+     *      A 2-D tensor of type {@link OperandType::TENSOR_QUANT8_ASYMM}
+     *      and shape [outputSize, inputSize] specifying input-to-input part of
+     *      weights for fully-connected layer inside the LSTM cell.
+     *      Quantization zero point and scale must be the same across all the
+     *      weights.
+     * * 2: The input-to-forget weights.
+     *      A 2-D tensor of type {@link OperandType::TENSOR_QUANT8_ASYMM}
+     *      and shape [outputSize, inputSize] specifying input-to-forget part of
+     *      weights for fully-connected layer inside the LSTM cell.
+     *      Quantization zero point and scale must be the same across all the
+     *      weights.
+     * * 3: The input-to-cell weights.
+     *      A 2-D tensor of type {@link OperandType::TENSOR_QUANT8_ASYMM}
+     *      and shape [outputSize, inputSize] specifying input-to-cell part of
+     *      weights for fully-connected layer inside the LSTM cell.
+     *      Quantization zero point and scale must be the same across all the
+     *      weights.
+     * * 4: The input-to-output weights.
+     *      A 2-D tensor of type {@link OperandType::TENSOR_QUANT8_ASYMM}
+     *      and shape [outputSize, inputSize] specifying input-to-output part of
+     *      weights for fully-connected layer inside the LSTM cell.
+     *      Quantization zero point and scale must be the same across all the
+     *      weights.
+     * * 5: The recurrent-to-input weights.
+     *      A 2-D tensor of type {@link OperandType::TENSOR_QUANT8_ASYMM}
+     *      and shape [outputSize, outputSize] specifying recurrent-to-input part
+     *      of weights for fully-connected layer inside the LSTM cell.
+     *      Quantization zero point and scale must be the same across all the
+     *      weights.
+     * * 6: The recurrent-to-forget weights.
+     *      A 2-D tensor of type {@link OperandType::TENSOR_QUANT8_ASYMM}
+     *      and shape [outputSize, outputSize] specifying recurrent-to-forget
+     *      part of weights for fully-connected layer inside the LSTM cell.
+     *      Quantization zero point and scale must be the same across all the
+     *      weights.
+     * * 7: The recurrent-to-cell weights.
+     *      A 2-D tensor of type {@link OperandType::TENSOR_QUANT8_ASYMM}
+     *      and shape [outputSize, outputSize] specifying recurrent-to-cell part
+     *      of weights for fully-connected layer inside the LSTM cell.
+     *      Quantization zero point and scale must be the same across all the
+     *      weights.
+     * * 8: The recurrent-to-output weights.
+     *      A 2-D tensor of type {@link OperandType::TENSOR_QUANT8_ASYMM}
+     *      and shape [outputSize, outputSize] specifying recurrent-to-output
+     *      part of weights for fully-connected layer inside the LSTM cell.
+     *      Quantization zero point and scale must be the same across all the
+     *      weights.
+     * * 9: The input gate bias.
+     *      A 1-D tensor of type {@link OperandType::TENSOR_INT32} and shape
+     *      [outputSize] specifying the bias for the fully-connected layer
+     *      inside the LSTM cell. Bias is quantized with scale being a product
+     *      of input and weights scales and zeroPoint equal to 0.
+     * * 10:The forget gate bias.
+     *      A 1-D tensor of type {@link OperandType::TENSOR_INT32} and shape
+     *      [outputSize] specifying the bias for the fully-connected layer
+     *      inside the LSTM cell. Bias is quantized with scale being a product
+     *      of input and weights scales and zeroPoint equal to 0.
+     * * 11:The cell bias.
+     *      A 1-D tensor of type {@link OperandType::TENSOR_INT32} and shape
+     *      [outputSize] specifying the bias for the fully-connected layer
+     *      inside the LSTM cell. Bias is quantized with scale being a product
+     *      of input and weights scales and zeroPoint equal to 0.
+     * * 12:The output gate bias.
+     *      A 1-D tensor of type {@link OperandType::TENSOR_INT32} and shape
+     *      [outputSize] specifying the bias for the fully-connected layer
+     *      inside the LSTM cell. Bias is quantized with scale being a product
+     *      of input and weights scales and zeroPoint equal to 0.
+     * * 13: A 2-D tensor of type {@link OperandType::TENSOR_QUANT16_SYMM}
+     *       and shape [numBatches, outputSize] specifying the cell state from the
+     *       previous time step of the LSTM cell. It is quantized using a
+     *       quantization range of [-2^4, 2^4 * 32767/32768] (scale = 2^4 /
+     *       32768, zeroPoint = 0).
+     * * 14: A 2-D tensor of type {@link OperandType::TENSOR_QUANT8_ASYMM}
+     *       and shape [numBathes, outputSize] specifying the output of the LSTM
+     *       cell from previous time-step. Tensor is quantized with a fixed
+     *       quantization range of [-1, 127/128] (scale = 1/128, zeroPoint =
+     *       128).
+     *
+     *
+     * Outputs:
+     * * 0: A 2-D tensor of type {@link OperandType::TENSOR_QUANT16_SYMM}
+     *      and shape [numBatches, outputSize] which contains a cell state from
+     *      the current time step. Tensor is quantized using a quantization
+     *      range of [-2^4, 2^4 * 32767/32768] (scale = 2^4 / 32768, zeroPoint =
+     *      0).
+     * * 1: A 2-D tensor of type {@link OperandType::TENSOR_QUANT8_ASYMM}
+     *      and shape [numBathes, outputSize] which contains the output value.
+     *      Tensor is quantized with a fixed quantization range of [-1, 127/128]
+     *      (scale = 1/128, zeroPoint = 128).
+     */
+    QUANTIZED_16BIT_LSTM = @1.2::OperationType:QUANTIZED_16BIT_LSTM,
+
+    /**
+     * Draws samples from a multinomial distribution.
+     *
+     * Supported tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_FLOAT16}
+     * * {@link OperandType::TENSOR_FLOAT32}
+     *
+     * Inputs:
+     * * 0: A 2-D tensor with shape [batches, classes], specifying the
+     *      unnormalized log-probabilities for all classes.
+     * * 1: A scalar {@link OperandType::INT32}, specifying the number of
+     *      independent samples to draw for each row slice.
+     * * 2: A 1-D {@link OperandType::TENSOR_INT32} tensor with shape [2],
+     *      specifying seeds used to initialize the random distribution.
+     * Outputs:
+     * * 0: A 2-D {@link OperandType::TENSOR_INT32} tensor with shape
+     *      [batches, samples], containing the drawn samples.
+     */
+    RANDOM_MULTINOMIAL = @1.2::OperationType:RANDOM_MULTINOMIAL,
+
+    /**
+     * Reduces a tensor by computing the "logical and" of elements along given
+     * dimensions.
+     *
+     * If keep_dims is true, the reduced dimensions are
+     * retained with length 1. Otherwise, the rank of the tensor is reduced by
+     * 1 for each entry in dimensions.
+     *
+     * Supported tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_BOOL8}
+     *
+     * Supported tensor rank: up to 4
+     *
+     * Inputs:
+     * * 0: An n-D tensor.
+     * * 1: A 1-D tensor of {@link OperandType::TENSOR_INT32}. The dimensions
+     *      to reduce. Dimension values must be in the range [-n, n).
+     * * 2: An {@link OperandType::BOOL} scalar, keep_dims. If true,
+     *      retains reduced dimensions with length 1.
+     *
+     * Outputs:
+     * * 0: A tensor of the same {@link OperandType} as input0.
+     */
+    REDUCE_ALL = @1.2::OperationType:REDUCE_ALL,
+
+    /**
+     * Reduces a tensor by computing the "logical or" of elements along given
+     * dimensions.
+     *
+     * If keep_dims is true, the reduced dimensions are
+     * retained with length 1. Otherwise, the rank of the tensor is reduced by
+     * 1 for each entry in dimensions.
+     *
+     * Supported tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_BOOL8}
+     *
+     * Supported tensor rank: up to 4
+     *
+     * Inputs:
+     * * 0: An n-D tensor.
+     * * 1: A 1-D tensor of {@link OperandType::TENSOR_INT32}. The dimensions
+     *      to reduce. Dimension values must be in the range [-n, n).
+     * * 2: An {@link OperandType::BOOL} scalar, keep_dims. If true,
+     *      retains reduced dimensions with length 1.
+     *
+     * Outputs:
+     * * 0: A tensor of the same {@link OperandType} as input0.
+     */
+    REDUCE_ANY = @1.2::OperationType:REDUCE_ANY,
+
+    /**
+     * Reduces a tensor by computing the maximum of elements along given
+     * dimensions.
+     *
+     * If keep_dims is true, the reduced dimensions are
+     * retained with length 1. Otherwise, the rank of the tensor is reduced by
+     * 1 for each entry in dimensions.
+     *
+     * Supported tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_FLOAT16}
+     * * {@link OperandType::TENSOR_FLOAT32}
+     * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+     *
+     * Supported tensor rank: up to 4
+     *
+     * Inputs:
+     * * 0: An n-D tensor.
+     * * 1: A 1-D tensor of {@link OperandType::TENSOR_INT32}. The dimensions
+     *      to reduce. Dimension values must be in the range [-n, n).
+     * * 2: An {@link OperandType::BOOL} scalar, keep_dims. If true,
+     *      retains reduced dimensions with length 1.
+     *
+     * Outputs:
+     * * 0: A tensor of the same {@link OperandType} as input0.
+     *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+     *      the scale and zeroPoint must be the same as input0.
+     */
+    REDUCE_MAX = @1.2::OperationType:REDUCE_MAX,
+
+    /**
+     * Reduces a tensor by computing the minimum of elements along given
+     * dimensions.
+     *
+     * If keep_dims is true, the reduced dimensions are
+     * retained with length 1. Otherwise, the rank of the tensor is reduced by
+     * 1 for each entry in dimensions.
+     *
+     * Supported tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_FLOAT16}
+     * * {@link OperandType::TENSOR_FLOAT32}
+     * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+     *
+     * Supported tensor rank: up to 4
+     *
+     * Inputs:
+     * * 0: An n-D tensor.
+     * * 1: A 1-D tensor of {@link OperandType::TENSOR_INT32}. The dimensions
+     *      to reduce. Dimension values must be in the range [-n, n).
+     * * 2: An {@link OperandType::BOOL} scalar, keep_dims. If true,
+     *      retains reduced dimensions with length 1.
+     *
+     * Outputs:
+     * * 0: A tensor of the same {@link OperandType} as input0.
+     *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+     *      the scale and zeroPoint must be the same as input0.
+     */
+    REDUCE_MIN = @1.2::OperationType:REDUCE_MIN,
+
+    /**
+     * Reduces a tensor by multiplying elements along given dimensions.
+     *
+     * If keep_dims is true, the reduced dimensions are
+     * retained with length 1. Otherwise, the rank of the tensor is reduced by
+     * 1 for each entry in dimensions.
+     *
+     * Supported tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_FLOAT16}
+     * * {@link OperandType::TENSOR_FLOAT32}
+     *
+     * Supported tensor rank: up to 4
+     *
+     * Inputs:
+     * * 0: An n-D tensor.
+     * * 1: A 1-D tensor of {@link OperandType::TENSOR_INT32}. The dimensions
+     *      to reduce. Dimension values must be in the range [-n, n).
+     * * 2: An {@link OperandType::BOOL} scalar, keep_dims. If true,
+     *      retains reduced dimensions with length 1.
+     *
+     * Outputs:
+     * * 0: A tensor of the same {@link OperandType} as input0.
+     */
+    REDUCE_PROD = @1.2::OperationType:REDUCE_PROD,
+
+    /**
+     * Reduces a tensor by summing elements along given dimensions.
+     *
+     * If keep_dims is true, the reduced dimensions are
+     * retained with length 1. Otherwise, the rank of the tensor is reduced by
+     * 1 for each entry in dimensions.
+     *
+     * Supported tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_FLOAT16}
+     * * {@link OperandType::TENSOR_FLOAT32}
+     *
+     * Supported tensor rank: up to 4
+     *
+     * Inputs:
+     * * 0: An n-D tensor.
+     * * 1: A 1-D tensor of {@link OperandType::TENSOR_INT32}. The dimensions
+     *      to reduce. Dimension values must be in the range [-n, n).
+     * * 2: An {@link OperandType::BOOL} scalar, keep_dims. If true,
+     *      retains reduced dimensions with length 1.
+     *
+     * Outputs:
+     * * 0: A tensor of the same {@link OperandType} as input0.
+     */
+    REDUCE_SUM = @1.2::OperationType:REDUCE_SUM,
+
+    /**
+     * Select and scale the feature map of each region of interest to a unified
+     * output size by average pooling sampling points from bilinear interpolation.
+     *
+     * The region of interest is represented by its upper-left corner coordinate
+     * (x1,y1) and lower-right corner coordinate (x2,y2) in the original image.
+     * A spatial scaling factor is applied to map into feature map coordinate.
+     * A valid region of interest should satisfy x1 <= x2 and y1 <= y2.
+     *
+     * No rounding is applied in this operation. The sampling points are unified
+     * distributed in the pooling bin and their values are calculated by bilinear
+     * interpolation.
+     *
+     * Supported tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_FLOAT16}
+     * * {@link OperandType::TENSOR_FLOAT32}
+     * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+     *
+     * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
+     * With the default data layout NHWC, the data is stored in the order of:
+     * [batch, height, width, channels]. Alternatively, the data layout could
+     * be NCHW, the data storage order of: [batch, channels, height, width].
+     *
+     * Inputs:
+     * * 0: A 4-D tensor, specifying the feature map.
+     * * 1: A 2-D Tensor of shape [num_rois, 4], specifying the locations of
+     *      the regions of interest, each line with format [x1, y1, x2, y2].
+     *      For input0 of type {@link OperandType::TENSOR_QUANT8_ASYMM},
+     *      this tensor should be of {@link OperandType::TENSOR_QUANT16_ASYMM},
+     *      with zeroPoint of 0 and scale of 0.125. Zero num_rois is
+     *      supported for this tensor.
+     * * 2: An 1-D {@link OperandType::TENSOR_INT32} tensor, of shape
+     *      [num_rois], specifying the batch index of each box. Boxes with
+     *      the same batch index are grouped together. Zero num_rois is
+     *      supported for this tensor.
+     * * 3: An {@link OperandType::INT32} scalar, specifying the output
+     *      height of the output tensor.
+     * * 4: An {@link OperandType::INT32} scalar, specifying the output
+     *      width of the output tensor.
+     * * 5: An {@link OperandType::FLOAT32} scalar, specifying the ratio
+     *      from the height of original image to the height of feature map.
+     * * 6: An {@link OperandType::FLOAT32} scalar, specifying the ratio
+     *      from the width of original image to the width of feature map.
+     * * 7: An {@link OperandType::INT32} scalar, specifying the number of
+     *      sampling points in height dimension used to compute the output.
+     *      Set to 0 for adaptive value of ceil(roi_height/out_height).
+     * * 8: An {@link OperandType::INT32} scalar, specifying the number of
+     *      sampling points in width dimension used to compute the output.
+     *      Set to 0 for adaptive value of ceil(roi_width/out_width).
+     * * 9: An {@link OperandType::BOOL} scalar, set to true to specify
+     *      NCHW data layout for input0 and output0. Set to false for NHWC.
+     *
+     * Outputs:
+     * * 0: A tensor of the same {@link OperandType} as input0. The output
+     *      shape is [num_rois, out_height, out_width, depth].
+     *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+     *      the scale and zeroPoint can be different from the input0 scale and zeroPoint.
+     */
+    ROI_ALIGN = @1.2::OperationType:ROI_ALIGN,
+
+    /**
+     * Select and scale the feature map of each region of interest to a unified
+     * output size by max-pooling.
+     *
+     * The region of interest is represented by its upper-left corner coordinate
+     * (x1,y1) and lower-right corner coordinate (x2,y2) in the original image.
+     * A spatial scaling factor is applied to map into feature map coordinate.
+     * A valid region of interest should satisfy x1 <= x2 and y1 <= y2.
+     *
+     * Rounding is applied in this operation to ensure integer boundary for
+     * regions of interest and pooling bins.
+     *
+     * Supported tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_FLOAT16}
+     * * {@link OperandType::TENSOR_FLOAT32}
+     * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+     *
+     * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
+     * With the default data layout NHWC, the data is stored in the order of:
+     * [batch, height, width, channels]. Alternatively, the data layout could
+     * be NCHW, the data storage order of: [batch, channels, height, width].
+     *
+     * Inputs:
+     * * 0: A 4-D tensor, specifying the feature map.
+     * * 1: A 2-D Tensor of shape [num_rois, 4], specifying the locations of
+     *      the regions of interest, each line with format [x1, y1, x2, y2].
+     *      For input0 of type {@link OperandType::TENSOR_QUANT8_ASYMM},
+     *      this tensor should be of {@link OperandType::TENSOR_QUANT16_ASYMM},
+     *      with zeroPoint of 0 and scale of 0.125.
+     * * 2: An 1-D {@link OperandType::TENSOR_INT32} tensor, of shape
+     *      [num_rois], specifying the batch index of each box. Boxes with
+     *      the same batch index are grouped together.
+     * * 3: An {@link OperandType::INT32} scalar, specifying the output
+     *      height of the output tensor.
+     * * 4: An {@link OperandType::INT32} scalar, specifying the output
+     *      width of the output tensor.
+     * * 5: An {@link OperandType::FLOAT32} scalar, specifying the ratio
+     *      from the height of original image to the height of feature map.
+     * * 6: An {@link OperandType::FLOAT32} scalar, specifying the ratio
+     *      from the width of original image to the width of feature map.
+     * * 7: An {@link OperandType::BOOL} scalar, set to true to specify
+     *      NCHW data layout for input0 and output0. Set to false for NHWC.
+     *
+     * Outputs:
+     * * 0: A tensor of the same {@link OperandType} as input0. The output
+     *      shape is [num_rois, out_height, out_width, depth].
+     *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+     *      the scale and zeroPoint must be the same as input0.
+     */
+    ROI_POOLING = @1.2::OperationType:ROI_POOLING,
+
+    /**
+     * Computes reciprocal of square root of x element-wise.
+     *
+     * Supported tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_FLOAT16}
+     * * {@link OperandType::TENSOR_FLOAT32}
+     *
+     * Supported tensor rank: from 1.
+     *
+     * Inputs:
+     * * 0: A tensor.
+     *
+     * Outputs:
+     * * 0: The output tensor of same shape as input0.
+     */
+    RSQRT = @1.2::OperationType:RSQRT,
+
+    /**
+     * Using a tensor of booleans c and input tensors x and y select values
+     * elementwise from both input tensors:
+     *
+     * O[i] = C[i] ? x[i] : y[i].
+     *
+     * Supported tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_FLOAT16}
+     * * {@link OperandType::TENSOR_FLOAT32}
+     * * {@link OperandType::TENSOR_INT32}
+     * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+     * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
+     *
+     * Supported tensor rank: from 1
+     *
+     * Inputs:
+     * * 0: A tensor of type {@link OperandType::TENSOR_BOOL8} acting as a
+     *      mask that chooses, based on the value at each element, whether the
+     *      corresponding element in the output should be taken from input1 (if
+     *      true) or input2 (if false).
+     * * 1: An input tensor of the same shape as input0.
+     * * 2: An input tensor of the same shape and type as input1.
+     *      For a {@link OperandType::TENSOR_QUANT8_ASYMM}
+     *      and {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
+     *      the scales and zeroPoint can be different from input1 scale and zeroPoint.
+     *
+     * Outputs:
+     * * 0: A tensor of the same type and shape as input1 and input2.
+     *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+     *      the scale and zeroPoint can be different from inputs' scale and zeroPoint.
+     */
+    SELECT = @1.2::OperationType:SELECT,
+
+    /**
+     * Computes sin of x element-wise.
+     *
+     * Supported tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_FLOAT16}
+     * * {@link OperandType::TENSOR_FLOAT32}
+     *
+     * Supported tensor rank: from 1.
+     *
+     * Inputs:
+     * * 0: A tensor.
+     *
+     * Outputs:
+     * * 0: The output tensor of same shape as input0.
+     */
+    SIN = @1.2::OperationType:SIN,
+
+    /**
+     * Extracts a slice of specified size from the input tensor starting at a
+     * specified location.
+     *
+     * The starting location is specified as a 1-D tensor containing offsets
+     * for each dimension. The size is specified as a 1-D tensor containing
+     * either size of a slice along corresponding dimension or -1. In the latter
+     * case, all the remaining elements in dimension are included in the slice.
+     *
+     * A sum of begin offset and a size of a slice must not exceed size of a
+     * corresponding dimension.
+     *
+     * Supported tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_FLOAT16}
+     * * {@link OperandType::TENSOR_FLOAT32}
+     * * {@link OperandType::TENSOR_INT32}
+     * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+     *
+     * Supported tensor rank: from 1
+     *
+     * Inputs:
+     * * 0: An n-D tensor to take slice from, may be zero-sized.
+     * * 1: A 1-D tensor of type {@link OperandType::TENSOR_INT32} specifying
+     *      the beginning indices of the slice in each dimension.
+     * * 2: A 1-D tensor of type {@link OperandType::TENSOR_INT32} specifying
+     *      the size of the slice in each dimension.
+     *
+     * Outputs:
+     * * 0: An n-D tensor of the same type as the input containing the slice.
+     *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+     *      its scale and zeroPoint has to be same as the input0 scale and zeroPoint.
+     */
+    SLICE = @1.2::OperationType:SLICE,
+
+    /**
+     * Splits a tensor along a given axis into num_splits subtensors.
+     *
+     * Supported tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_FLOAT16}
+     * * {@link OperandType::TENSOR_FLOAT32}
+     * * {@link OperandType::TENSOR_INT32}
+     * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+     *
+     * Supported tensor rank: from 1
+     *
+     * Inputs:
+     * * 0: An n-D tensor to split.
+     * * 1: An {@link OperandType::INT32} scalar specifying the axis along
+     *      which to split.
+     * * 2: An {@link OperandType::INT32} scalar indicating the number of
+     *      splits along given axis. Must evenly divide axis size.
+     *
+     * Outputs:
+     * * 0 ~ (num_splits - 1): Resulting subtensors.
+     *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+     *      the scale and zeroPoint must be the same as input0.
+     */
+    SPLIT = @1.2::OperationType:SPLIT,
+
+    /**
+     * Computes square root of x element-wise.
+     *
+     * Supported tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_FLOAT16}
+     * * {@link OperandType::TENSOR_FLOAT32}
+     *
+     * Supported tensor rank: from 1.
+     *
+     * Inputs:
+     * * 0: A tensor.
+     *
+     * Outputs:
+     * * 0: The output tensor of same shape as input0.
+     */
+    SQRT = @1.2::OperationType:SQRT,
+
+    /**
+     * Constructs a tensor by tiling a given tensor.
+     *
+     * This operation creates a new tensor by replicating `input` `multiples`
+     * times. The output tensor's i-th dimension has `input.dims(i) * multiples[i]`
+     * elements, and the values of `input` are replicated `multiples[i]` times
+     * along the i-th dimension.
+     * For example, tiling `[a b c d]` by `[2]` produces `[a b c d a b c d]`.
+     *
+     * Supported tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_FLOAT16}
+     * * {@link OperandType::TENSOR_FLOAT32}
+     * * {@link OperandType::TENSOR_INT32}
+     * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+     *
+     * Supported tensor rank: from 1
+     *
+     * Inputs:
+     * * 0: input, an n-D tensor specifying the input.
+     * * 1: multiples, a 1-D tensor of {@link OperandType::TENSOR_INT32}.
+     *      The length of multiples must be n.
+     *
+     * Outputs:
+     * * 0: A tiled tensor of the same {@link OperandType} and rank as `input`.
+     *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+     *      the scale and zeroPoint must be the same as input0.
+     */
+    TILE = @1.2::OperationType:TILE,
+
+    /**
+     * Finds values and indices of the k largest entries for the last dimension.
+     *
+     * Resulting values in each dimensions are sorted in descending order. If
+     * two values are equal, the one with larger index appears first.
+     *
+     * Supported tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_FLOAT16}
+     * * {@link OperandType::TENSOR_FLOAT32}
+     * * {@link OperandType::TENSOR_INT32}
+     * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+     *
+     * Supported tensor rank: from 1
+     *
+     * Inputs:
+     * * 0: input, an n-D tensor specifying the input.
+     * * 1: k, an {@link OperandType::INT32} scalar, specifying the number of
+     *      top elements to look for along the last dimension.
+     *
+     * Outputs:
+     * * 0: An n-D tensor of the same type as the input, containing the k
+     *      largest elements along each last dimensional slice.
+     *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+     *      the scale and zeroPoint must be the same as input0.
+     * * 1: An n-D tensor of type {@link OperandType::TENSOR_INT32}
+     *      containing the indices of values within the last dimension of input.
+     */
+    TOPK_V2 = @1.2::OperationType:TOPK_V2,
+
+    /**
+     * Performs the transpose of 2-D convolution operation.
+     *
+     * This operation is sometimes called "deconvolution" after Deconvolutional
+     * Networks, but is actually the transpose (gradient) of
+     * {@link OperandType::CONV_2D} rather than an actual deconvolution.
+     *
+     * The output dimensions are functions of the filter dimensions, stride, and
+     * padding.
+     *
+     * Supported tensor {@link OperandType} configurations:
+     * * 16 bit floating point:
+     * * * {@link OperandType::TENSOR_FLOAT16} for input, filter, output, and bias.
+     *
+     * * 32 bit floating point:
+     * * * {@link OperandType::TENSOR_FLOAT32} for input, filter, output, and bias.
+     *
+     * * Quantized:
+     * * * {@link OperandType::TENSOR_QUANT8_ASYMM} for input, filter, and output.
+     * * * {@link OperandType::TENSOR_INT32} for bias (with scale set to
+     * * * input.scale * filter.scale).
+     *
+     * * Quantized with symmetric per channel quantization for the filter:
+     * * * {@link OperandType::TENSOR_QUANT8_ASYMM} for input, and output.
+     * * * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
+     * * * {@link OperandType::TENSOR_INT32} for bias (scale set to 0.0,
+     * * * each value scaling is separate and equal to input.scale * filter.scales[channel]).
+     *
+     * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
+     * With the default data layout NHWC, the data is stored in the order of:
+     * [batch, height, width, channels]. Alternatively, the data layout could
+     * be NCHW, the data storage order of: [batch, channels, height, width].
+     *
+     * Both explicit padding and implicit padding are supported.
+     *
+     * Inputs (explicit padding):
+     * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
+     *      specifying the input.
+     * * 1: A 4-D tensor, of shape
+     *      [depth_out, filter_height, filter_width, depth_in], specifying the
+     *      filter. For tensor of type
+     *      {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} the channel
+     *      dimension (SymmPerChannelQuantParams::channelDim) must be set to 0.
+     * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
+     *      tensor of type {@link OperandType::TENSOR_FLOAT32} or
+     *      {@link OperandType::TENSOR_FLOAT16}, the bias should be of the
+     *      same type. For input tensor of type
+     *      {@link OperandType::TENSOR_QUANT8_ASYMM}, the bias should be
+     *      of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0 and
+     *      bias_scale == input_scale * filter_scale. For filter tensor of
+     *      {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}, the bias
+     *      must be of {@link OperandType::TENSOR_INT32}, with zeroPoint of
+     *      0 and bias_scale of 0. The actual scale of each value 'i' is equal
+     *      to bias_scale[i] = input_scale * filter_scale[i].
+     * * 3: An {@link OperandType::INT32} scalar, specifying the padding on
+     *      the left, in the ‘width’ dimension.
+     * * 4: An {@link OperandType::INT32} scalar, specifying the padding on
+     *      the right, in the ‘width’ dimension.
+     * * 5: An {@link OperandType::INT32} scalar, specifying the padding on
+     *      the top, in the ‘height’ dimension.
+     * * 6: An {@link OperandType::INT32} scalar, specifying the padding on
+     *      the bottom, in the ‘height’ dimension.
+     * * 7: An {@link OperandType::INT32} scalar, specifying the stride when
+     *      walking through input in the ‘width’ dimension.
+     * * 8: An {@link OperandType::INT32} scalar, specifying the stride when
+     *      walking through input in the ‘height’ dimension.
+     * * 9: An {@link OperandType::INT32} scalar, and has to be one of the
+     *      {@link FusedActivationFunc} values. Specifies the activation to
+     *      invoke on the result.
+     * * 10: An {@link OperandType::BOOL} scalar, set to true to specify
+     *       NCHW data layout for input0 and output0. Set to false for NHWC.
+     *
+     * Inputs (implicit padding):
+     * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
+     *      specifying the input.
+     * * 1: A 4-D tensor, of shape
+     *      [depth_out, filter_height, filter_width, depth_in], specifying the
+     *      filter. For tensor of type
+     *      {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} the channel
+     *      dimension (SymmPerChannelQuantParams::channelDim) must be set to 0.
+     * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
+     *      tensor of type {@link OperandType::TENSOR_FLOAT32} or
+     *      {@link OperandType::TENSOR_FLOAT16}, the bias should be of the
+     *      same type. For input tensor of type
+     *      {@link OperandType::TENSOR_QUANT8_ASYMM}, the bias should be
+     *      of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0 and
+     *      bias_scale == input_scale * filter_scale. For filter tensor of
+     *      {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}, the bias
+     *      must be of {@link OperandType::TENSOR_INT32}, with zeroPoint of
+     *      0 and bias_scale of 0. The actual scale of each value 'i' is equal
+     *      to bias_scale[i] = input_scale * filter_scale[i].
+     * * 3: An {@link OperandType::TENSOR_INT32} tensor, specifying the output
+     *      tensor shape.
+     * * 4: An {@link OperandType::INT32} scalar, specifying the implicit
+     *      padding scheme, has to be one of the
+     *      following values: {0 (NONE), 1 (SAME), 2 (VALID)}.
+     * * 5: An {@link OperandType::INT32} scalar, specifying the stride when
+     *      walking through input in the ‘width’ dimension.
+     * * 6: An {@link OperandType::INT32} scalar, specifying the stride when
+     *      walking through input in the ‘height’ dimension.
+     * * 7: An {@link OperandType::INT32} scalar, and has to be one of the
+     *      {@link FusedActivationFunc} values. Specifies the activation to
+     *      invoke on the result.
+     * * 8: An {@link OperandType::BOOL} scalar, set to true to specify
+     *      NCHW data layout for input0 and output0. Set to false for NHWC.
+     *
+     * Outputs:
+     * * 0: The output 4-D tensor, of shape
+     *      [batches, out_height, out_width, depth_out].
+     *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+     *      the scale and zeroPoint can be different from inputs' scale and zeroPoint.
+     */
+    TRANSPOSE_CONV_2D = @1.2::OperationType:TRANSPOSE_CONV_2D,
+
+    /**
+     * A recurrent neural network specified by an LSTM cell.
+     *
+     * Performs (fully) dynamic unrolling of input.
+     *
+     * This Op unrolls the input along the time dimension, and implements the
+     * following operation for each element in the sequence
+     * s = 1...sequence_length:
+     *   outputs[s] = projection(state = activation(LSTMOp(inputs[s])))
+     *
+     * Where LSTMOp is the LSTM op as in {@link OperandType::LSTM},
+     * the "projection" is an optional projection layer from state and output
+     * and the “activation” is the function passed as the
+     * “fused_activation_function” argument (if not “NONE”).
+     *
+     * Supported tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_FLOAT16}
+     * * {@link OperandType::TENSOR_FLOAT32}
+     *
+     * Supported tensor rank: 3, either time-major or batch-major.
+     *
+     * All input and output tensors must be of the same type.
+     *
+     * Inputs:
+     * * 0: The input (\f$x_t\f$).
+     *      A 3-D tensor of shape:
+     *        If time-major: [max_time, batch_size, input_size]
+     *        If batch-major: [batch_size, max_time, input_size]
+     *      where “max_time” is the number of timesteps (sequence length),
+     *      “batch_size” corresponds to the batching dimension, and
+     *      “input_size” is the size of the input.
+     * * 1: The input-to-input weights (\f$W_{xi}\f$). Optional.
+     *      A 2-D tensor of shape [num_units, input_size], where “num_units”
+     *      corresponds to the number of cell units.
+     * * 2: The input-to-forget weights (\f$W_{xf}\f$).
+     *      A 2-D tensor of shape [num_units, input_size].
+     * * 3: The input-to-cell weights (\f$W_{xc}\f$).
+     *      A 2-D tensor of shape [num_units, input_size].
+     * * 4: The input-to-output weights (\f$W_{xo}\f$).
+     *      A 2-D tensor of shape [num_units, input_size].
+     * * 5: The recurrent-to-input weights (\f$W_{hi}\f$). Optional.
+     *      A 2-D tensor of shape [num_units, output_size], where “output_size”
+     *      corresponds to either the number of cell units (i.e., “num_units”),
+     *      or the second dimension of the “projection_weights”, if defined.
+     * * 6: The recurrent-to-forget weights (\f$W_{hf}\f$).
+     *      A 2-D tensor of shape [num_units, output_size].
+     * * 7: The recurrent-to-cell weights (\f$W_{hc}\f$).
+     *      A 2-D tensor of shape [num_units, output_size].
+     * * 8: The recurrent-to-output weights (\f$W_{ho}\f$).
+     *      A 2-D tensor of shape [num_units, output_size].
+     * * 9: The cell-to-input weights (\f$W_{ci}\f$). Optional.
+     *      A 1-D tensor of shape [num_units].
+     * * 10:The cell-to-forget weights (\f$W_{cf}\f$). Optional.
+     *      A 1-D tensor of shape [num_units].
+     * * 11:The cell-to-output weights (\f$W_{co}\f$). Optional.
+     *      A 1-D tensor of shape [num_units].
+     * * 12:The input gate bias (\f$b_i\f$). Optional.
+     *      A 1-D tensor of shape [num_units].
+     * * 13:The forget gate bias (\f$b_f\f$).
+     *      A 1-D tensor of shape [num_units].
+     * * 14:The cell bias (\f$b_c\f$).
+     *      A 1-D tensor of shape [num_units].
+     * * 15:The output gate bias (\f$b_o\f$).
+     *      A 1-D tensor of shape [num_units].
+     * * 16:The projection weights (\f$W_{proj}\f$). Optional.
+     *      A 2-D tensor of shape [output_size, num_units].
+     * * 17:The projection bias (\f$b_{proj}\f$). Optional.
+     *      A 1-D tensor of shape [output_size].
+     * * 18:The output state (in) (\f$h_{t-1}\f$).
+     *      A 2-D tensor of shape [batch_size, output_size].
+     * * 19:The cell state (in) (\f$C_{t-1}\f$).
+     *      A 2-D tensor of shape [batch_size, num_units].
+     * * 20:The activation function (\f$g\f$).
+     *      A value indicating the activation function:
+     *      <ul>
+     *      <li>0: None;
+     *      <li>1: Relu;
+     *      <li>3: Relu6;
+     *      <li>4: Tanh;
+     *      <li>6: Sigmoid.
+     *      </ul>
+     * * 21:The clipping threshold (\f$t_{cell}\f$) for the cell state, such
+     *      that values are bound within [-cell_clip, cell_clip]. If set to 0.0
+     *      then clipping is disabled.
+     * * 22:The clipping threshold (\f$t_{proj}\f$) for the output from the
+     *      projection layer, such that values are bound within
+     *      [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
+     * * 23:Time-major if true, batch-major if false.
+     * * 24:The input layer normalization weights. Optional.
+     *      A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
+     *      to activation at input gate.
+     * * 25:The forget layer normalization weights. Optional.
+     *      A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
+     *      to activation at forget gate.
+     * * 26:The cell layer normalization weights. Optional.
+     *      A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
+     *      to activation at cell gate.
+     * * 27:The output layer normalization weights. Optional.
+     *      A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
+     *      to activation at output gate.
+     *
+     * Outputs:
+     * * 0: The output (\f$o_t\f$).
+     *      A 3-D tensor of shape:
+     *        If time-major: [max_time, batch_size, output_size]
+     *        If batch-major: [batch_size, max_time, output_size]
+     */
+    UNIDIRECTIONAL_SEQUENCE_LSTM = @1.2::OperationType:UNIDIRECTIONAL_SEQUENCE_LSTM,
+
+    /**
+     * A recurrent neural network layer that applies a basic RNN cell to a
+     * sequence of inputs.
+     *
+     * This layer unrolls the input along the sequence dimension, and implements
+     * the following operation
+     * for each element in the sequence s = 1...sequence_length:
+     *   outputs[s] = state = activation(inputs[s] * input_weights’ + state *
+     *   recurrent_weights’ + bias)
+     *
+     * Where:
+     * * “input_weights” is a weight matrix that multiplies the inputs;
+     * * “recurrent_weights” is a weight matrix that multiplies the current
+     *    “state” which itself is the output from the previous time step
+     *    computation;
+     * * “bias” is a bias vector (added to each output vector in the batch);
+     * * “activation” is the function passed as the “fused_activation_function”
+     *   argument (if not “NONE”).
+     *
+     * Supported tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_FLOAT16}
+     * * {@link OperandType::TENSOR_FLOAT32}
+     *
+     * The input tensors must all be the same type.
+     *
+     * Inputs:
+     * * 0: input.
+     *      A 3-D tensor. The shape is defined by the input 6 (timeMajor). If
+     *      it is set to 1, then the input has a shape [maxTime, batchSize,
+     *      inputSize], otherwise the input has a shape [batchSize, maxTime,
+     *      inputSize].
+     * * 1: weights.
+     *      A 2-D tensor of shape [numUnits, inputSize].
+     * * 2: recurrent_weights.
+     *      A 2-D tensor of shape [numUnits, numUnits].
+     * * 3: bias.
+     *      A 1-D tensor of shape [numUnits].
+     * * 4: hidden state
+     *      A 2-D tensor of shape [batchSize, numUnits]. Specifies a hidden
+     *      state input for the first time step of the computation.
+     * * 5: fusedActivationFunction.
+     *      A {@link FusedActivationFunc} value indicating the activation function. If
+     *      “NONE” is specified then it results in a linear activation.
+     * * 6: timeMajor
+     *      An {@link OperandType::INT32} scalar specifying the shape format
+     *      of input and output tensors. Must be set to either 0 or 1.
+     * Outputs:
+     * * 0: output.
+     *      A 3-D tensor. The shape is defined by the input 6 (timeMajor). If
+     *      it is set to 1, then the output has a shape [maxTime, batchSize,
+     *      numUnits], otherwise the output has a shape [batchSize, maxTime,
+     *      numUnits].
+     */
+    UNIDIRECTIONAL_SEQUENCE_RNN = @1.2::OperationType:UNIDIRECTIONAL_SEQUENCE_RNN,
+
+    /**
+     * Resizes images to given size using the nearest neighbor interpretation.
+     *
+     * Resized images must be distorted if their output aspect ratio is not the
+     * same as input aspect ratio. The corner pixels of output may not be the
+     * same as corner pixels of input.
+     *
+     * Supported tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_FLOAT16}
+     * * {@link OperandType::TENSOR_FLOAT32}
+     * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+     *
+     * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
+     * With the default data layout NHWC, the data is stored in the order of:
+     * [batch, height, width, channels]. Alternatively, the data layout could
+     * be NCHW, the data storage order of: [batch, channels, height, width].
+     *
+     * Both resizing by shape and resizing by scale are supported.
+     *
+     * Inputs (resizing by shape):
+     * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
+     *      the input. Zero batches is supported for this tensor.
+     * * 1: An {@link OperandType::INT32} scalar, specifying the output
+     *      width of the output tensor.
+     * * 2: An {@link OperandType::INT32} scalar, specifying the output
+     *      height of the output tensor.
+     * * 3: An {@link OperandType::BOOL} scalar, default to false.
+     *      Set to true to specify NCHW data layout for input0 and output0.
+     *
+     * Inputs (resizing by scale):
+     * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
+     *      the input. Zero batches is supported for this tensor.
+     * * 1: A scalar, specifying width_scale, the scaling factor of the width
+     *      dimension from the input tensor to the output tensor. The output
+     *      width is calculated as new_width = floor(width * width_scale).
+     *      The scalar must be of {@link OperandType::FLOAT16} if input0 is
+     *      of {@link OperandType::TENSOR_FLOAT16} and of
+     *      {@link OperandType::FLOAT32} otherwise.
+     * * 2: A scalar, specifying height_scale, the scaling factor of the height
+     *      dimension from the input tensor to the output tensor. The output
+     *      height is calculated as new_height = floor(height * height_scale).
+     *      The scalar must be of {@link OperandType::FLOAT16} if input0 is
+     *      of {@link OperandType::TENSOR_FLOAT16} and of
+     *      {@link OperandType::FLOAT32} otherwise.
+     * * 3: An {@link OperandType::BOOL} scalar, default to false.
+     *      Set to true to specify NCHW data layout for input0 and output0.
+     *
+     * Outputs:
+     * * 0: The output 4-D tensor, of shape
+     *      [batches, new_height, new_width, depth].
+     *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+     *      the scale and zeroPoint must be the same as input0.
+     */
+    RESIZE_NEAREST_NEIGHBOR = @1.2::OperationType:RESIZE_NEAREST_NEIGHBOR,
+
+    /**
+     * DEPRECATED. Since NNAPI 1.2, extensions are the preferred alternative to
+     * OEM operation and data types.
+     *
+     * This operation is OEM specific. It should only be used for OEM
+     * applications.
+     */
+    OEM_OPERATION = @1.2::OperationType:OEM_OPERATION,
+    /* ADDING A NEW FUNDAMENTAL OPERATION REQUIRES UPDATING THE VALUE OF
+     * OperationTypeRange::FUNDAMENTAL_MAX.
+     */
+    /* ADDING A NEW OEM OPERATION REQUIRES UPDATING THE VALUE OF
+     * OperationTypeRange::OEM_MAX.
+     */
+};
+
+/**
+ * The range of values in the OperationType enum.
+ */
+enum OperationTypeRange : uint32_t {
+    BASE_MIN        = 0,
+    FUNDAMENTAL_MIN = 0,
+    FUNDAMENTAL_MAX = 94,
+    OEM_MIN         = 10000,
+    OEM_MAX         = 10000,
+    BASE_MAX        = 0xFFFF,
+};
+
 
 /**
  * The capabilities of a driver.
@@ -109,6 +4606,32 @@
 };
 
 /**
+ * Describes one operation of the model's graph.
+ */
+struct Operation {
+    /**
+     * The operation type.
+     *
+     * Besides the values listed in {@link OperationType}, any value above
+     * {@link OperationTypeRange::BASE_MAX} is possible and should be interpreted
+     * as an extension type according to {@link Model::extensionNameToPrefix}.
+     */
+    OperationType type;
+
+    /**
+     * Describes the table that contains the indexes of the inputs of the
+     * operation. The offset is the index in the operandIndexes table.
+     */
+    vec<uint32_t> inputs;
+
+    /**
+     * Describes the table that contains the indexes of the outputs of the
+     * operation. The offset is the index in the operandIndexes table.
+     */
+    vec<uint32_t> outputs;
+};
+
+/**
  * Describes one operand of the model's graph.
  */
 struct Operand {
@@ -233,28 +4756,6 @@
 };
 
 /**
- * Describes one operation of the model's graph.
- */
-struct Operation {
-    /**
-     * The operation type.
-     */
-    OperationType type;
-
-    /**
-     * Describes the table that contains the indexes of the inputs of the
-     * operation. The offset is the index in the operandIndexes table.
-     */
-    vec<uint32_t> inputs;
-
-    /**
-     * Describes the table that contains the indexes of the outputs of the
-     * operation. The offset is the index in the operandIndexes table.
-     */
-    vec<uint32_t> outputs;
-};
-
-/**
  * A Neural Network Model.
  *
  * This includes not only the execution graph, but also constant data such as
diff --git a/neuralnetworks/1.3/types.t b/neuralnetworks/1.3/types.t
index d41cfd2..e06f5d6 100644
--- a/neuralnetworks/1.3/types.t
+++ b/neuralnetworks/1.3/types.t
@@ -44,6 +44,47 @@
     BASE_MAX        = 0xFFFF,
 };
 
+/**
+ * Operation types.
+ *
+ * The type of an operation in a model.
+ */
+enum OperationType : int32_t {
+
+%insert Operation_1.0
+
+%insert Operation_1.1
+
+%insert Operation_1.2
+
+    /**
+     * DEPRECATED. Since NNAPI 1.2, extensions are the preferred alternative to
+     * OEM operation and data types.
+     *
+     * This operation is OEM specific. It should only be used for OEM
+     * applications.
+     */
+    OEM_OPERATION = @1.2::OperationType:OEM_OPERATION,
+    /* ADDING A NEW FUNDAMENTAL OPERATION REQUIRES UPDATING THE VALUE OF
+     * OperationTypeRange::FUNDAMENTAL_MAX.
+     */
+    /* ADDING A NEW OEM OPERATION REQUIRES UPDATING THE VALUE OF
+     * OperationTypeRange::OEM_MAX.
+     */
+};
+
+/**
+ * The range of values in the OperationType enum.
+ */
+enum OperationTypeRange : uint32_t {
+    BASE_MIN        = 0,
+    FUNDAMENTAL_MIN = 0,
+%insert Operation_1.3_MAX
+    OEM_MIN         = 10000,
+    OEM_MAX         = 10000,
+    BASE_MAX        = 0xFFFF,
+};
+
 
 /**
  * The capabilities of a driver.
@@ -80,6 +121,32 @@
 };
 
 /**
+ * Describes one operation of the model's graph.
+ */
+struct Operation {
+    /**
+     * The operation type.
+     *
+     * Besides the values listed in {@link OperationType}, any value above
+     * {@link OperationTypeRange::BASE_MAX} is possible and should be interpreted
+     * as an extension type according to {@link Model::extensionNameToPrefix}.
+     */
+    OperationType type;
+
+    /**
+     * Describes the table that contains the indexes of the inputs of the
+     * operation. The offset is the index in the operandIndexes table.
+     */
+    vec<uint32_t> inputs;
+
+    /**
+     * Describes the table that contains the indexes of the outputs of the
+     * operation. The offset is the index in the operandIndexes table.
+     */
+    vec<uint32_t> outputs;
+};
+
+/**
  * Describes one operand of the model's graph.
  */
 struct Operand {
@@ -204,28 +271,6 @@
 };
 
 /**
- * Describes one operation of the model's graph.
- */
-struct Operation {
-    /**
-     * The operation type.
-     */
-    OperationType type;
-
-    /**
-     * Describes the table that contains the indexes of the inputs of the
-     * operation. The offset is the index in the operandIndexes table.
-     */
-    vec<uint32_t> inputs;
-
-    /**
-     * Describes the table that contains the indexes of the outputs of the
-     * operation. The offset is the index in the operandIndexes table.
-     */
-    vec<uint32_t> outputs;
-};
-
-/**
  * A Neural Network Model.
  *
  * This includes not only the execution graph, but also constant data such as
diff --git a/neuralnetworks/1.3/vts/functional/CompilationCachingTests.cpp b/neuralnetworks/1.3/vts/functional/CompilationCachingTests.cpp
index d8a7534..60992d5 100644
--- a/neuralnetworks/1.3/vts/functional/CompilationCachingTests.cpp
+++ b/neuralnetworks/1.3/vts/functional/CompilationCachingTests.cpp
@@ -456,8 +456,7 @@
     }
 
     // Execute and verify results.
-    EvaluatePreparedModel(preparedModel, testModel,
-                          /*testDynamicOutputShape=*/false);
+    EvaluatePreparedModel(preparedModel, testModel, /*testKind=*/TestKind::GENERAL);
 }
 
 TEST_P(CompilationCachingTest, CacheSavingAndRetrievalNonZeroOffset) {
@@ -519,8 +518,7 @@
     }
 
     // Execute and verify results.
-    EvaluatePreparedModel(preparedModel, testModel,
-                          /*testDynamicOutputShape=*/false);
+    EvaluatePreparedModel(preparedModel, testModel, /*testKind=*/TestKind::GENERAL);
 }
 
 TEST_P(CompilationCachingTest, SaveToCacheInvalidNumCache) {
@@ -541,8 +539,7 @@
         saveModelToCache(model, modelCache, dataCache, &preparedModel);
         ASSERT_NE(preparedModel, nullptr);
         // Execute and verify results.
-        EvaluatePreparedModel(preparedModel, testModel,
-                              /*testDynamicOutputShape=*/false);
+        EvaluatePreparedModel(preparedModel, testModel, /*testKind=*/TestKind::GENERAL);
         // Check if prepareModelFromCache fails.
         preparedModel = nullptr;
         ErrorStatus status;
@@ -566,8 +563,7 @@
         saveModelToCache(model, modelCache, dataCache, &preparedModel);
         ASSERT_NE(preparedModel, nullptr);
         // Execute and verify results.
-        EvaluatePreparedModel(preparedModel, testModel,
-                              /*testDynamicOutputShape=*/false);
+        EvaluatePreparedModel(preparedModel, testModel, /*testKind=*/TestKind::GENERAL);
         // Check if prepareModelFromCache fails.
         preparedModel = nullptr;
         ErrorStatus status;
@@ -590,8 +586,7 @@
         saveModelToCache(model, modelCache, dataCache, &preparedModel);
         ASSERT_NE(preparedModel, nullptr);
         // Execute and verify results.
-        EvaluatePreparedModel(preparedModel, testModel,
-                              /*testDynamicOutputShape=*/false);
+        EvaluatePreparedModel(preparedModel, testModel, /*testKind=*/TestKind::GENERAL);
         // Check if prepareModelFromCache fails.
         preparedModel = nullptr;
         ErrorStatus status;
@@ -615,8 +610,7 @@
         saveModelToCache(model, modelCache, dataCache, &preparedModel);
         ASSERT_NE(preparedModel, nullptr);
         // Execute and verify results.
-        EvaluatePreparedModel(preparedModel, testModel,
-                              /*testDynamicOutputShape=*/false);
+        EvaluatePreparedModel(preparedModel, testModel, /*testKind=*/TestKind::GENERAL);
         // Check if prepareModelFromCache fails.
         preparedModel = nullptr;
         ErrorStatus status;
@@ -727,8 +721,7 @@
         saveModelToCache(model, modelCache, dataCache, &preparedModel);
         ASSERT_NE(preparedModel, nullptr);
         // Execute and verify results.
-        EvaluatePreparedModel(preparedModel, testModel,
-                              /*testDynamicOutputShape=*/false);
+        EvaluatePreparedModel(preparedModel, testModel, /*testKind=*/TestKind::GENERAL);
         // Check if prepareModelFromCache fails.
         preparedModel = nullptr;
         ErrorStatus status;
@@ -752,8 +745,7 @@
         saveModelToCache(model, modelCache, dataCache, &preparedModel);
         ASSERT_NE(preparedModel, nullptr);
         // Execute and verify results.
-        EvaluatePreparedModel(preparedModel, testModel,
-                              /*testDynamicOutputShape=*/false);
+        EvaluatePreparedModel(preparedModel, testModel, /*testKind=*/TestKind::GENERAL);
         // Check if prepareModelFromCache fails.
         preparedModel = nullptr;
         ErrorStatus status;
@@ -776,8 +768,7 @@
         saveModelToCache(model, modelCache, dataCache, &preparedModel);
         ASSERT_NE(preparedModel, nullptr);
         // Execute and verify results.
-        EvaluatePreparedModel(preparedModel, testModel,
-                              /*testDynamicOutputShape=*/false);
+        EvaluatePreparedModel(preparedModel, testModel, /*testKind=*/TestKind::GENERAL);
         // Check if prepareModelFromCache fails.
         preparedModel = nullptr;
         ErrorStatus status;
@@ -801,8 +792,7 @@
         saveModelToCache(model, modelCache, dataCache, &preparedModel);
         ASSERT_NE(preparedModel, nullptr);
         // Execute and verify results.
-        EvaluatePreparedModel(preparedModel, testModel,
-                              /*testDynamicOutputShape=*/false);
+        EvaluatePreparedModel(preparedModel, testModel, /*testKind=*/TestKind::GENERAL);
         // Check if prepareModelFromCache fails.
         preparedModel = nullptr;
         ErrorStatus status;
@@ -914,8 +904,7 @@
         saveModelToCache(model, modelCache, dataCache, &preparedModel);
         ASSERT_NE(preparedModel, nullptr);
         // Execute and verify results.
-        EvaluatePreparedModel(preparedModel, testModel,
-                              /*testDynamicOutputShape=*/false);
+        EvaluatePreparedModel(preparedModel, testModel, /*testKind=*/TestKind::GENERAL);
         // Check if prepareModelFromCache fails.
         preparedModel = nullptr;
         ErrorStatus status;
@@ -937,8 +926,7 @@
         saveModelToCache(model, modelCache, dataCache, &preparedModel);
         ASSERT_NE(preparedModel, nullptr);
         // Execute and verify results.
-        EvaluatePreparedModel(preparedModel, testModel,
-                              /*testDynamicOutputShape=*/false);
+        EvaluatePreparedModel(preparedModel, testModel, /*testKind=*/TestKind::GENERAL);
         // Check if prepareModelFromCache fails.
         preparedModel = nullptr;
         ErrorStatus status;
@@ -1082,8 +1070,7 @@
                 ASSERT_EQ(preparedModel, nullptr);
             } else {
                 ASSERT_NE(preparedModel, nullptr);
-                EvaluatePreparedModel(preparedModel, testModelAdd,
-                                      /*testDynamicOutputShape=*/false);
+                EvaluatePreparedModel(preparedModel, testModelAdd, /*testKind=*/TestKind::GENERAL);
             }
         }
     }
@@ -1144,8 +1131,7 @@
                 ASSERT_EQ(preparedModel, nullptr);
             } else {
                 ASSERT_NE(preparedModel, nullptr);
-                EvaluatePreparedModel(preparedModel, testModelAdd,
-                                      /*testDynamicOutputShape=*/false);
+                EvaluatePreparedModel(preparedModel, testModelAdd, /*testKind=*/TestKind::GENERAL);
             }
         }
     }
diff --git a/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp
index 325d641..3e947f5 100644
--- a/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp
+++ b/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp
@@ -63,15 +63,41 @@
 using V1_1::ExecutionPreference;
 using V1_2::Constant;
 using V1_2::MeasureTiming;
-using V1_2::OperationType;
 using V1_2::OutputShape;
 using V1_2::SymmPerChannelQuantParams;
 using V1_2::Timing;
 using V1_2::implementation::ExecutionCallback;
 using HidlToken = hidl_array<uint8_t, static_cast<uint32_t>(Constant::BYTE_SIZE_OF_CACHE_TOKEN)>;
 
+namespace {
+
+enum class Executor { ASYNC, SYNC, BURST };
+
 enum class OutputType { FULLY_SPECIFIED, UNSPECIFIED, INSUFFICIENT };
 
+struct TestConfig {
+    Executor executor;
+    MeasureTiming measureTiming;
+    OutputType outputType;
+    // `reportSkipping` indicates if a test should print an info message in case
+    // it is skipped. The field is set to true by default and is set to false in
+    // quantization coupling tests to suppress skipping a test
+    bool reportSkipping;
+    TestConfig(Executor executor, MeasureTiming measureTiming, OutputType outputType)
+        : executor(executor),
+          measureTiming(measureTiming),
+          outputType(outputType),
+          reportSkipping(true) {}
+    TestConfig(Executor executor, MeasureTiming measureTiming, OutputType outputType,
+               bool reportSkipping)
+        : executor(executor),
+          measureTiming(measureTiming),
+          outputType(outputType),
+          reportSkipping(reportSkipping) {}
+};
+
+}  // namespace
+
 Model createModel(const TestModel& testModel) {
     // Model operands.
     hidl_vec<Operand> operands(testModel.operands.size());
@@ -206,31 +232,34 @@
     return android::nn::ExecutionBurstController::create(preparedModel,
                                                          std::chrono::microseconds{0});
 }
-enum class Executor { ASYNC, SYNC, BURST };
 
 void EvaluatePreparedModel(const sp<IPreparedModel>& preparedModel, const TestModel& testModel,
-                           Executor executor, MeasureTiming measure, OutputType outputType) {
+                           const TestConfig& testConfig, bool* skipped = nullptr) {
+    if (skipped != nullptr) {
+        *skipped = false;
+    }
     // If output0 does not have size larger than one byte, we can not test with insufficient buffer.
-    if (outputType == OutputType::INSUFFICIENT && !isOutputSizeGreaterThanOne(testModel, 0)) {
+    if (testConfig.outputType == OutputType::INSUFFICIENT &&
+        !isOutputSizeGreaterThanOne(testModel, 0)) {
         return;
     }
 
     Request request = createRequest(testModel);
-    if (outputType == OutputType::INSUFFICIENT) {
+    if (testConfig.outputType == OutputType::INSUFFICIENT) {
         makeOutputInsufficientSize(/*outputIndex=*/0, &request);
     }
 
     ErrorStatus executionStatus;
     hidl_vec<OutputShape> outputShapes;
     Timing timing;
-    switch (executor) {
+    switch (testConfig.executor) {
         case Executor::ASYNC: {
             SCOPED_TRACE("asynchronous");
 
             // launch execution
             sp<ExecutionCallback> executionCallback = new ExecutionCallback();
-            Return<ErrorStatus> executionLaunchStatus =
-                    ExecutePreparedModel(preparedModel, request, measure, executionCallback);
+            Return<ErrorStatus> executionLaunchStatus = ExecutePreparedModel(
+                    preparedModel, request, testConfig.measureTiming, executionCallback);
             ASSERT_TRUE(executionLaunchStatus.isOk());
             EXPECT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(executionLaunchStatus));
 
@@ -246,8 +275,8 @@
             SCOPED_TRACE("synchronous");
 
             // execute
-            Return<ErrorStatus> executionReturnStatus =
-                    ExecutePreparedModel(preparedModel, request, measure, &outputShapes, &timing);
+            Return<ErrorStatus> executionReturnStatus = ExecutePreparedModel(
+                    preparedModel, request, testConfig.measureTiming, &outputShapes, &timing);
             ASSERT_TRUE(executionReturnStatus.isOk());
             executionStatus = static_cast<ErrorStatus>(executionReturnStatus);
 
@@ -270,15 +299,21 @@
             // execute burst
             int n;
             std::tie(n, outputShapes, timing, std::ignore) =
-                    controller->compute(request, measure, keys);
+                    controller->compute(request, testConfig.measureTiming, keys);
             executionStatus = nn::convertResultCodeToErrorStatus(n);
 
             break;
         }
     }
 
-    if (outputType != OutputType::FULLY_SPECIFIED &&
+    if (testConfig.outputType != OutputType::FULLY_SPECIFIED &&
         executionStatus == ErrorStatus::GENERAL_FAILURE) {
+        if (skipped != nullptr) {
+            *skipped = true;
+        }
+        if (!testConfig.reportSkipping) {
+            return;
+        }
         LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
                      "execute model that it does not support.";
         std::cout << "[          ]   Early termination of test because vendor service cannot "
@@ -286,7 +321,7 @@
                   << std::endl;
         GTEST_SKIP();
     }
-    if (measure == MeasureTiming::NO) {
+    if (testConfig.measureTiming == MeasureTiming::NO) {
         EXPECT_EQ(UINT64_MAX, timing.timeOnDevice);
         EXPECT_EQ(UINT64_MAX, timing.timeInDriver);
     } else {
@@ -295,7 +330,7 @@
         }
     }
 
-    switch (outputType) {
+    switch (testConfig.outputType) {
         case OutputType::FULLY_SPECIFIED:
             // If the model output operands are fully specified, outputShapes must be either
             // either empty, or have the same number of elements as the number of outputs.
@@ -332,59 +367,117 @@
 }
 
 void EvaluatePreparedModel(const sp<IPreparedModel>& preparedModel, const TestModel& testModel,
-                           bool testDynamicOutputShape) {
-    if (testDynamicOutputShape) {
-        EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::NO,
-                              OutputType::UNSPECIFIED);
-        EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::NO,
-                              OutputType::UNSPECIFIED);
-        EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::NO,
-                              OutputType::UNSPECIFIED);
-        EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::YES,
-                              OutputType::UNSPECIFIED);
-        EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::YES,
-                              OutputType::UNSPECIFIED);
-        EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::YES,
-                              OutputType::UNSPECIFIED);
-        EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::NO,
-                              OutputType::INSUFFICIENT);
-        EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::NO,
-                              OutputType::INSUFFICIENT);
-        EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::NO,
-                              OutputType::INSUFFICIENT);
-        EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::YES,
-                              OutputType::INSUFFICIENT);
-        EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::YES,
-                              OutputType::INSUFFICIENT);
-        EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::YES,
-                              OutputType::INSUFFICIENT);
-    } else {
-        EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::NO,
-                              OutputType::FULLY_SPECIFIED);
-        EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::NO,
-                              OutputType::FULLY_SPECIFIED);
-        EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::NO,
-                              OutputType::FULLY_SPECIFIED);
-        EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::YES,
-                              OutputType::FULLY_SPECIFIED);
-        EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::YES,
-                              OutputType::FULLY_SPECIFIED);
-        EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::YES,
-                              OutputType::FULLY_SPECIFIED);
+                           TestKind testKind) {
+    std::initializer_list<OutputType> outputTypesList;
+    std::initializer_list<MeasureTiming> measureTimingList;
+    std::initializer_list<Executor> executorList;
+
+    switch (testKind) {
+        case TestKind::GENERAL: {
+            outputTypesList = {OutputType::FULLY_SPECIFIED};
+            measureTimingList = {MeasureTiming::NO, MeasureTiming::YES};
+            executorList = {Executor::ASYNC, Executor::SYNC, Executor::BURST};
+        } break;
+        case TestKind::DYNAMIC_SHAPE: {
+            outputTypesList = {OutputType::UNSPECIFIED, OutputType::INSUFFICIENT};
+            measureTimingList = {MeasureTiming::NO, MeasureTiming::YES};
+            executorList = {Executor::ASYNC, Executor::SYNC, Executor::BURST};
+        } break;
+        case TestKind::QUANTIZATION_COUPLING: {
+            LOG(FATAL) << "Wrong TestKind for EvaluatePreparedModel";
+            return;
+        } break;
+    }
+
+    for (const OutputType outputType : outputTypesList) {
+        for (const MeasureTiming measureTiming : measureTimingList) {
+            for (const Executor executor : executorList) {
+                const TestConfig testConfig(executor, measureTiming, outputType);
+                EvaluatePreparedModel(preparedModel, testModel, testConfig);
+            }
+        }
     }
 }
 
-void Execute(const sp<IDevice>& device, const TestModel& testModel, bool testDynamicOutputShape) {
+void EvaluatePreparedCoupledModels(const sp<IPreparedModel>& preparedModel,
+                                   const TestModel& testModel,
+                                   const sp<IPreparedModel>& preparedCoupledModel,
+                                   const TestModel& coupledModel) {
+    std::initializer_list<OutputType> outputTypesList = {OutputType::FULLY_SPECIFIED};
+    std::initializer_list<MeasureTiming> measureTimingList = {MeasureTiming::NO,
+                                                              MeasureTiming::YES};
+    std::initializer_list<Executor> executorList = {Executor::ASYNC, Executor::SYNC,
+                                                    Executor::BURST};
+
+    for (const OutputType outputType : outputTypesList) {
+        for (const MeasureTiming measureTiming : measureTimingList) {
+            for (const Executor executor : executorList) {
+                const TestConfig testConfig(executor, measureTiming, outputType,
+                                            /*reportSkipping=*/false);
+                bool baseSkipped = false;
+                EvaluatePreparedModel(preparedModel, testModel, testConfig, &baseSkipped);
+                bool coupledSkipped = false;
+                EvaluatePreparedModel(preparedCoupledModel, coupledModel, testConfig,
+                                      &coupledSkipped);
+                ASSERT_EQ(baseSkipped, coupledSkipped);
+                if (baseSkipped) {
+                    LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
+                                 "execute model that it does not support.";
+                    std::cout << "[          ]   Early termination of test because vendor service "
+                                 "cannot "
+                                 "execute model that it does not support."
+                              << std::endl;
+                    GTEST_SKIP();
+                }
+            }
+        }
+    }
+}
+
+void Execute(const sp<IDevice>& device, const TestModel& testModel, TestKind testKind) {
     Model model = createModel(testModel);
-    if (testDynamicOutputShape) {
+    if (testKind == TestKind::DYNAMIC_SHAPE) {
         makeOutputDimensionsUnspecified(&model);
     }
 
     sp<IPreparedModel> preparedModel;
-    createPreparedModel(device, model, &preparedModel);
-    if (preparedModel == nullptr) return;
-
-    EvaluatePreparedModel(preparedModel, testModel, testDynamicOutputShape);
+    switch (testKind) {
+        case TestKind::GENERAL: {
+            createPreparedModel(device, model, &preparedModel);
+            if (preparedModel == nullptr) return;
+            EvaluatePreparedModel(preparedModel, testModel, TestKind::GENERAL);
+        } break;
+        case TestKind::DYNAMIC_SHAPE: {
+            createPreparedModel(device, model, &preparedModel);
+            if (preparedModel == nullptr) return;
+            EvaluatePreparedModel(preparedModel, testModel, TestKind::DYNAMIC_SHAPE);
+        } break;
+        case TestKind::QUANTIZATION_COUPLING: {
+            ASSERT_TRUE(testModel.hasQuant8AsymmOperands());
+            createPreparedModel(device, model, &preparedModel, /*reportSkipping*/ false);
+            TestModel signedQuantizedModel = convertQuant8AsymmOperandsToSigned(testModel);
+            sp<IPreparedModel> preparedCoupledModel;
+            createPreparedModel(device, createModel(signedQuantizedModel), &preparedCoupledModel,
+                                /*reportSkipping*/ false);
+            // If we couldn't prepare a model with unsigned quantization, we must
+            // fail to prepare a model with signed quantization as well.
+            if (preparedModel == nullptr) {
+                ASSERT_EQ(preparedCoupledModel, nullptr);
+                // If we failed to prepare both of the models, we can safely skip
+                // the test.
+                LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
+                             "prepare model that it does not support.";
+                std::cout
+                        << "[          ]   Early termination of test because vendor service cannot "
+                           "prepare model that it does not support."
+                        << std::endl;
+                GTEST_SKIP();
+            }
+            ASSERT_NE(preparedCoupledModel, nullptr);
+            EvaluatePreparedCoupledModels(preparedModel, testModel, preparedCoupledModel,
+                                          signedQuantizedModel);
+        } break;
+    }
 }
 
 void GeneratedTestBase::SetUp() {
@@ -407,12 +500,19 @@
 // Tag for the dynamic output shape tests
 class DynamicOutputShapeTest : public GeneratedTest {};
 
+// Tag for the dynamic output shape tests
+class DISABLED_QuantizationCouplingTest : public GeneratedTest {};
+
 TEST_P(GeneratedTest, Test) {
-    Execute(kDevice, kTestModel, /*testDynamicOutputShape=*/false);
+    Execute(kDevice, kTestModel, /*testKind=*/TestKind::GENERAL);
 }
 
 TEST_P(DynamicOutputShapeTest, Test) {
-    Execute(kDevice, kTestModel, /*testDynamicOutputShape=*/true);
+    Execute(kDevice, kTestModel, /*testKind=*/TestKind::DYNAMIC_SHAPE);
+}
+
+TEST_P(DISABLED_QuantizationCouplingTest, Test) {
+    Execute(kDevice, kTestModel, /*testKind=*/TestKind::QUANTIZATION_COUPLING);
 }
 
 INSTANTIATE_GENERATED_TEST(GeneratedTest,
@@ -421,4 +521,8 @@
 INSTANTIATE_GENERATED_TEST(DynamicOutputShapeTest,
                            [](const TestModel& testModel) { return !testModel.expectFailure; });
 
+INSTANTIATE_GENERATED_TEST(DISABLED_QuantizationCouplingTest, [](const TestModel& testModel) {
+    return testModel.hasQuant8AsymmOperands() && testModel.operations.size() == 1;
+});
+
 }  // namespace android::hardware::neuralnetworks::V1_3::vts::functional
diff --git a/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.h b/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.h
index 45cff5b..ad6323f 100644
--- a/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.h
+++ b/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.h
@@ -57,8 +57,19 @@
 
 void PrepareModel(const sp<IDevice>& device, const Model& model, sp<IPreparedModel>* preparedModel);
 
+enum class TestKind {
+    // Runs a test model and compares the results to a golden data
+    GENERAL,
+    // Same as GENERAL but sets dimensions for the output tensors to zeros
+    DYNAMIC_SHAPE,
+    // Tests if quantized model with TENSOR_QUANT8_ASYMM produces the same result
+    // (OK/SKIPPED/FAILED) as the model with all such tensors converted to
+    // TENSOR_QUANT8_ASYMM_SIGNED.
+    QUANTIZATION_COUPLING
+};
+
 void EvaluatePreparedModel(const sp<IPreparedModel>& preparedModel,
-                           const test_helper::TestModel& testModel, bool testDynamicOutputShape);
+                           const test_helper::TestModel& testModel, TestKind testKind);
 
 }  // namespace android::hardware::neuralnetworks::V1_3::vts::functional
 
diff --git a/neuralnetworks/1.3/vts/functional/TestAssertions.cpp b/neuralnetworks/1.3/vts/functional/TestAssertions.cpp
index 7361078..a7569e6 100644
--- a/neuralnetworks/1.3/vts/functional/TestAssertions.cpp
+++ b/neuralnetworks/1.3/vts/functional/TestAssertions.cpp
@@ -25,8 +25,6 @@
 #define CHECK_TEST_ENUM(EnumType, enumValue) \
     static_assert(static_cast<EnumType>(Test##EnumType::enumValue) == EnumType::enumValue)
 
-using V1_2::OperationType;
-
 CHECK_TEST_ENUM(OperandType, FLOAT32);
 CHECK_TEST_ENUM(OperandType, INT32);
 CHECK_TEST_ENUM(OperandType, UINT32);
diff --git a/neuralnetworks/1.3/vts/functional/ValidateModel.cpp b/neuralnetworks/1.3/vts/functional/ValidateModel.cpp
index 1ff02dc..46bbd3f 100644
--- a/neuralnetworks/1.3/vts/functional/ValidateModel.cpp
+++ b/neuralnetworks/1.3/vts/functional/ValidateModel.cpp
@@ -27,7 +27,6 @@
 using V1_0::ErrorStatus;
 using V1_0::OperandLifeTime;
 using V1_1::ExecutionPreference;
-using V1_2::OperationType;
 using V1_2::OperationTypeRange;
 using V1_2::SymmPerChannelQuantParams;
 using HidlToken =
diff --git a/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.cpp b/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.cpp
index 625913d..92d8fa7 100644
--- a/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.cpp
+++ b/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.cpp
@@ -37,7 +37,7 @@
 
 // internal helper function
 void createPreparedModel(const sp<IDevice>& device, const Model& model,
-                         sp<IPreparedModel>* preparedModel) {
+                         sp<IPreparedModel>* preparedModel, bool reportSkipping) {
     ASSERT_NE(nullptr, preparedModel);
     *preparedModel = nullptr;
 
@@ -74,6 +74,9 @@
     // can continue.
     if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
         ASSERT_EQ(nullptr, preparedModel->get());
+        if (!reportSkipping) {
+            return;
+        }
         LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot prepare "
                      "model that it does not support.";
         std::cout << "[          ]   Early termination of test because vendor service cannot "
diff --git a/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.h b/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.h
index 8cb42d4..4e51052 100644
--- a/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.h
+++ b/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.h
@@ -47,7 +47,7 @@
 // Create an IPreparedModel object. If the model cannot be prepared,
 // "preparedModel" will be nullptr instead.
 void createPreparedModel(const sp<IDevice>& device, const Model& model,
-                         sp<IPreparedModel>* preparedModel);
+                         sp<IPreparedModel>* preparedModel, bool reportSkipping = true);
 
 // Utility function to get PreparedModel from callback and downcast to V1_2.
 sp<IPreparedModel> getPreparedModel_1_3(const sp<implementation::PreparedModelCallback>& callback);
diff --git a/radio/1.5/Android.bp b/radio/1.5/Android.bp
index de9ec6e..06a2a6e 100644
--- a/radio/1.5/Android.bp
+++ b/radio/1.5/Android.bp
@@ -19,6 +19,7 @@
         "android.hardware.radio@1.3",
         "android.hardware.radio@1.4",
         "android.hidl.base@1.0",
+        "android.hidl.safe_union@1.0",
     ],
     gen_java: true,
 }
diff --git a/radio/config/1.3/Android.bp b/radio/config/1.3/Android.bp
new file mode 100644
index 0000000..88de666
--- /dev/null
+++ b/radio/config/1.3/Android.bp
@@ -0,0 +1,23 @@
+// This file is autogenerated by hidl-gen -Landroidbp.
+
+hidl_interface {
+    name: "android.hardware.radio.config@1.3",
+    root: "android.hardware",
+    vndk: {
+        enabled: true,
+    },
+    srcs: [
+        "types.hal",
+        "IRadioConfig.hal",
+        "IRadioConfigIndication.hal",
+        "IRadioConfigResponse.hal",
+    ],
+    interfaces: [
+        "android.hardware.radio.config@1.0",
+        "android.hardware.radio.config@1.1",
+        "android.hardware.radio.config@1.2",
+        "android.hardware.radio@1.0",
+        "android.hidl.base@1.0",
+    ],
+    gen_java: true,
+}
diff --git a/radio/config/1.3/IRadioConfig.hal b/radio/config/1.3/IRadioConfig.hal
new file mode 100644
index 0000000..a0ce6e0
--- /dev/null
+++ b/radio/config/1.3/IRadioConfig.hal
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.radio.config@1.3;
+
+import @1.1::IRadioConfig;
+
+/**
+ * This interface is used by telephony and telecom to talk to cellular radio for the purpose of
+ * radio configuration, and it is not associated with any specific modem or slot.
+ * All the functions have minimum one parameter:
+ * serial: which corresponds to serial no. of request. Serial numbers must only be memorized for the
+ * duration of a method call. If clients provide colliding serials (including passing the same
+ * serial to different methods), multiple responses (one for each method call) must still be served.
+ */
+interface IRadioConfig extends @1.1::IRadioConfig {
+
+};
diff --git a/radio/config/1.3/IRadioConfigIndication.hal b/radio/config/1.3/IRadioConfigIndication.hal
new file mode 100644
index 0000000..9ef496c
--- /dev/null
+++ b/radio/config/1.3/IRadioConfigIndication.hal
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.radio.config@1.3;
+
+import @1.2::IRadioConfigIndication;
+
+/**
+ * Interface declaring unsolicited radio config indications.
+ */
+interface IRadioConfigIndication extends @1.2::IRadioConfigIndication {
+
+};
diff --git a/radio/config/1.3/IRadioConfigResponse.hal b/radio/config/1.3/IRadioConfigResponse.hal
new file mode 100644
index 0000000..9c4c971
--- /dev/null
+++ b/radio/config/1.3/IRadioConfigResponse.hal
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.radio.config@1.3;
+
+import @1.2::IRadioConfigResponse;
+
+/**
+ * Interface declaring response functions to solicited radio config requests.
+ */
+interface IRadioConfigResponse extends @1.2::IRadioConfigResponse {
+
+};
diff --git a/radio/config/1.3/default/Android.bp b/radio/config/1.3/default/Android.bp
new file mode 100644
index 0000000..163c5c5
--- /dev/null
+++ b/radio/config/1.3/default/Android.bp
@@ -0,0 +1,28 @@
+cc_binary {
+    name: "android.hardware.radio.config@1.3-service",
+    init_rc: ["android.hardware.radio.config@1.3-service.rc"],
+    relative_install_path: "hw",
+    vintf_fragments: ["radio-config-default.xml"],
+    vendor: true,
+    srcs: [
+        "RadioConfig.cpp",
+        "RadioConfigIndication.cpp",
+        "RadioConfigResponse.cpp",
+        "service.cpp",
+    ],
+    shared_libs: [
+        "libhidlbase",
+        "liblog",
+        "libutils",
+        "android.hardware.radio.config@1.0",
+        "android.hardware.radio.config@1.1",
+        "android.hardware.radio.config@1.2",
+        "android.hardware.radio.config@1.3",
+        "android.hardware.radio@1.0",
+        "android.hardware.radio@1.1",
+        "android.hardware.radio@1.2",
+        "android.hardware.radio@1.3",
+        "android.hardware.radio@1.4",
+        "android.hardware.radio@1.5",
+    ],
+}
diff --git a/radio/config/1.3/default/RadioConfig.cpp b/radio/config/1.3/default/RadioConfig.cpp
new file mode 100644
index 0000000..c28119c
--- /dev/null
+++ b/radio/config/1.3/default/RadioConfig.cpp
@@ -0,0 +1,113 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.1 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.1
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "RadioConfig.h"
+
+namespace android {
+namespace hardware {
+namespace radio {
+namespace config {
+namespace V1_3 {
+namespace implementation {
+
+using namespace ::android::hardware::radio::V1_0;
+using namespace ::android::hardware::radio::config;
+
+// Methods from ::android::hardware::radio::config::V1_0::IRadioConfig follow.
+Return<void> RadioConfig::setResponseFunctions(
+        const sp<V1_0::IRadioConfigResponse>& radioConfigResponse,
+        const sp<V1_0::IRadioConfigIndication>& radioConfigIndication) {
+    mRadioConfigResponse = radioConfigResponse;
+    mRadioConfigIndication = radioConfigIndication;
+
+    mRadioConfigResponseV1_3 =
+            V1_3::IRadioConfigResponse::castFrom(mRadioConfigResponse).withDefault(nullptr);
+    mRadioConfigIndicationV1_3 =
+            V1_3::IRadioConfigIndication::castFrom(mRadioConfigIndication).withDefault(nullptr);
+    if (mRadioConfigResponseV1_3 == nullptr || mRadioConfigIndicationV1_3 == nullptr) {
+        mRadioConfigResponseV1_3 = nullptr;
+        mRadioConfigIndicationV1_3 = nullptr;
+    }
+
+    mRadioConfigResponseV1_2 =
+            V1_2::IRadioConfigResponse::castFrom(mRadioConfigResponse).withDefault(nullptr);
+    mRadioConfigIndicationV1_2 =
+            V1_2::IRadioConfigIndication::castFrom(mRadioConfigIndication).withDefault(nullptr);
+    if (mRadioConfigResponseV1_2 == nullptr || mRadioConfigIndicationV1_2 == nullptr) {
+        mRadioConfigResponseV1_2 = nullptr;
+        mRadioConfigIndicationV1_2 = nullptr;
+    }
+
+    mRadioConfigResponseV1_1 =
+            V1_1::IRadioConfigResponse::castFrom(mRadioConfigResponse).withDefault(nullptr);
+    mRadioConfigIndicationV1_1 =
+            V1_1::IRadioConfigIndication::castFrom(mRadioConfigIndication).withDefault(nullptr);
+    if (mRadioConfigResponseV1_1 == nullptr || mRadioConfigIndicationV1_1 == nullptr) {
+        mRadioConfigResponseV1_1 = nullptr;
+        mRadioConfigIndicationV1_1 = nullptr;
+    }
+
+    return Void();
+}
+
+Return<void> RadioConfig::getSimSlotsStatus(int32_t /* serial */) {
+    hidl_vec<V1_0::SimSlotStatus> slotStatus;
+    RadioResponseInfo info;
+    mRadioConfigResponse->getSimSlotsStatusResponse(info, slotStatus);
+    return Void();
+}
+
+Return<void> RadioConfig::setSimSlotsMapping(int32_t /* serial */,
+                                             const hidl_vec<uint32_t>& /* slotMap */) {
+    RadioResponseInfo info;
+    mRadioConfigResponse->setSimSlotsMappingResponse(info);
+    return Void();
+}
+
+// Methods from ::android::hardware::radio::config::V1_1::IRadioConfig follow.
+Return<void> RadioConfig::getPhoneCapability(int32_t /* serial */) {
+    V1_1::PhoneCapability phoneCapability;
+    RadioResponseInfo info;
+    mRadioConfigResponseV1_1->getPhoneCapabilityResponse(info, phoneCapability);
+    return Void();
+}
+
+Return<void> RadioConfig::setPreferredDataModem(int32_t /* serial */, uint8_t /* modemId */) {
+    RadioResponseInfo info;
+    mRadioConfigResponseV1_1->setPreferredDataModemResponse(info);
+    return Void();
+}
+
+Return<void> RadioConfig::setModemsConfig(int32_t /* serial */,
+                                          const V1_1::ModemsConfig& /* modemsConfig */) {
+    RadioResponseInfo info;
+    mRadioConfigResponseV1_1->setModemsConfigResponse(info);
+    return Void();
+}
+
+Return<void> RadioConfig::getModemsConfig(int32_t /* serial */) {
+    V1_1::ModemsConfig modemsConfig;
+    RadioResponseInfo info;
+    mRadioConfigResponseV1_1->getModemsConfigResponse(info, modemsConfig);
+    return Void();
+}
+
+}  // namespace implementation
+}  // namespace V1_3
+}  // namespace config
+}  // namespace radio
+}  // namespace hardware
+}  // namespace android
diff --git a/radio/config/1.3/default/RadioConfig.h b/radio/config/1.3/default/RadioConfig.h
new file mode 100644
index 0000000..00585e6
--- /dev/null
+++ b/radio/config/1.3/default/RadioConfig.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.1 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.1
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_RADIO_CONFIG_V1_3_RADIOCONFIG_H
+#define ANDROID_HARDWARE_RADIO_CONFIG_V1_3_RADIOCONFIG_H
+
+#include <android/hardware/radio/config/1.3/IRadioConfig.h>
+#include <android/hardware/radio/config/1.3/IRadioConfigIndication.h>
+#include <android/hardware/radio/config/1.3/IRadioConfigResponse.h>
+#include <hidl/MQDescriptor.h>
+#include <hidl/Status.h>
+
+namespace android {
+namespace hardware {
+namespace radio {
+namespace config {
+namespace V1_3 {
+namespace implementation {
+
+using namespace ::android::hardware::radio::config;
+
+using ::android::sp;
+using ::android::hardware::hidl_array;
+using ::android::hardware::hidl_memory;
+using ::android::hardware::hidl_string;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+
+struct RadioConfig : public V1_3::IRadioConfig {
+    sp<V1_0::IRadioConfigResponse> mRadioConfigResponse;
+    sp<V1_0::IRadioConfigIndication> mRadioConfigIndication;
+    sp<V1_1::IRadioConfigResponse> mRadioConfigResponseV1_1;
+    sp<V1_1::IRadioConfigIndication> mRadioConfigIndicationV1_1;
+    sp<V1_2::IRadioConfigResponse> mRadioConfigResponseV1_2;
+    sp<V1_2::IRadioConfigIndication> mRadioConfigIndicationV1_2;
+    sp<V1_3::IRadioConfigResponse> mRadioConfigResponseV1_3;
+    sp<V1_3::IRadioConfigIndication> mRadioConfigIndicationV1_3;
+
+    // Methods from ::android::hardware::radio::config::V1_0::IRadioConfig follow.
+    Return<void> setResponseFunctions(
+            const sp<V1_0::IRadioConfigResponse>& radioConfigResponse,
+            const sp<V1_0::IRadioConfigIndication>& radioConfigIndication);
+    Return<void> getSimSlotsStatus(int32_t serial);
+    Return<void> setSimSlotsMapping(int32_t serial, const hidl_vec<uint32_t>& slotMap);
+
+    // Methods from ::android::hardware::radio::config::V1_1::IRadioConfig follow.
+    Return<void> getPhoneCapability(int32_t serial);
+    Return<void> setPreferredDataModem(int32_t serial, uint8_t modemId);
+    Return<void> setModemsConfig(int32_t serial, const V1_1::ModemsConfig& modemsConfig);
+    Return<void> getModemsConfig(int32_t serial);
+};
+
+}  // namespace implementation
+}  // namespace V1_3
+}  // namespace config
+}  // namespace radio
+}  // namespace hardware
+}  // namespace android
+
+#endif  // ANDROID_HARDWARE_RADIO_CONFIG_V1_3_RADIOCONFIG_H
diff --git a/radio/config/1.3/default/RadioConfigIndication.cpp b/radio/config/1.3/default/RadioConfigIndication.cpp
new file mode 100644
index 0000000..eb77a48
--- /dev/null
+++ b/radio/config/1.3/default/RadioConfigIndication.cpp
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.1 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.1
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "RadioConfigIndication.h"
+
+namespace android {
+namespace hardware {
+namespace radio {
+namespace config {
+namespace V1_3 {
+namespace implementation {
+
+using namespace ::android::hardware::radio::V1_0;
+using namespace ::android::hardware::radio::config::V1_0;
+using namespace ::android::hardware::radio::config::V1_2;
+
+// Methods from ::android::hardware::radio::config::V1_0::IRadioConfigIndication follow.
+Return<void> RadioConfigIndication::simSlotsStatusChanged(
+        RadioIndicationType /* type */, const hidl_vec<V1_0::SimSlotStatus>& /* slotStatus */) {
+    // TODO implement
+    return Void();
+}
+
+// Methods from ::android::hardware::radio::config::V1_2::IRadioConfigIndication follow.
+Return<void> RadioConfigIndication::simSlotsStatusChanged_1_2(
+        RadioIndicationType /* type */, const hidl_vec<V1_2::SimSlotStatus>& /* slotStatus */) {
+    // TODO implement
+    return Void();
+}
+
+}  // namespace implementation
+}  // namespace V1_3
+}  // namespace config
+}  // namespace radio
+}  // namespace hardware
+}  // namespace android
diff --git a/radio/config/1.3/default/RadioConfigIndication.h b/radio/config/1.3/default/RadioConfigIndication.h
new file mode 100644
index 0000000..3697492
--- /dev/null
+++ b/radio/config/1.3/default/RadioConfigIndication.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.1 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.1
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_RADIO_CONFIG_V1_3_RADIOCONFIGINDICATION_H
+#define ANDROID_HARDWARE_RADIO_CONFIG_V1_3_RADIOCONFIGINDICATION_H
+
+#include <android/hardware/radio/config/1.3/IRadioConfigIndication.h>
+#include <hidl/MQDescriptor.h>
+#include <hidl/Status.h>
+
+namespace android {
+namespace hardware {
+namespace radio {
+namespace config {
+namespace V1_3 {
+namespace implementation {
+
+using namespace ::android::hardware::radio::V1_0;
+using namespace ::android::hardware::radio::config;
+
+using ::android::sp;
+using ::android::hardware::hidl_array;
+using ::android::hardware::hidl_memory;
+using ::android::hardware::hidl_string;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+
+struct RadioConfigIndication : public IRadioConfigIndication {
+    // Methods from ::android::hardware::radio::config::V1_0::IRadioConfigIndication follow.
+    Return<void> simSlotsStatusChanged(RadioIndicationType type,
+                                       const hidl_vec<V1_0::SimSlotStatus>& slotStatus) override;
+
+    // Methods from ::android::hardware::radio::config::V1_2::IRadioConfigIndication follow.
+    Return<void> simSlotsStatusChanged_1_2(
+            RadioIndicationType type, const hidl_vec<V1_2::SimSlotStatus>& slotStatus) override;
+};
+
+}  // namespace implementation
+}  // namespace V1_3
+}  // namespace config
+}  // namespace radio
+}  // namespace hardware
+}  // namespace android
+
+#endif  // ANDROID_HARDWARE_RADIO_CONFIG_V1_3_RADIOCONFIGINDICATION_H
diff --git a/radio/config/1.3/default/RadioConfigResponse.cpp b/radio/config/1.3/default/RadioConfigResponse.cpp
new file mode 100644
index 0000000..48e81da
--- /dev/null
+++ b/radio/config/1.3/default/RadioConfigResponse.cpp
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.1 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.1
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "RadioConfigResponse.h"
+
+namespace android {
+namespace hardware {
+namespace radio {
+namespace config {
+namespace V1_3 {
+namespace implementation {
+
+using namespace ::android::hardware::radio::V1_0;
+using namespace ::android::hardware::radio::config::V1_0;
+using namespace ::android::hardware::radio::config::V1_1;
+using namespace ::android::hardware::radio::config::V1_2;
+
+// Methods from ::android::hardware::radio::config::V1_0::IRadioConfigResponse follow.
+Return<void> RadioConfigResponse::getSimSlotsStatusResponse(
+        const RadioResponseInfo& /* info */,
+        const hidl_vec<V1_0::SimSlotStatus>& /* slotStatus */) {
+    // TODO implement
+    return Void();
+}
+
+Return<void> RadioConfigResponse::setSimSlotsMappingResponse(const RadioResponseInfo& /* info */) {
+    // TODO implement
+    return Void();
+}
+
+// Methods from ::android::hardware::radio::config::V1_1::IRadioConfigResponse follow.
+Return<void> RadioConfigResponse::getPhoneCapabilityResponse(
+        const RadioResponseInfo& /* info */, const V1_1::PhoneCapability& /* phoneCapability */) {
+    // TODO implement
+    return Void();
+}
+
+Return<void> RadioConfigResponse::setPreferredDataModemResponse(
+        const RadioResponseInfo& /* info */) {
+    // TODO implement
+    return Void();
+}
+
+Return<void> RadioConfigResponse::setModemsConfigResponse(const RadioResponseInfo& /* info */) {
+    // TODO implement
+    return Void();
+}
+
+Return<void> RadioConfigResponse::getModemsConfigResponse(
+        const RadioResponseInfo& /* info */, const V1_1::ModemsConfig& /* modemsConfig */) {
+    // TODO implement
+    return Void();
+}
+
+// Methods from ::android::hardware::radio::config::V1_2::IRadioConfigResponse follow.
+Return<void> RadioConfigResponse::getSimSlotsStatusResponse_1_2(
+        const RadioResponseInfo& /* info */,
+        const hidl_vec<V1_2::SimSlotStatus>& /* slotStatus */) {
+    // TODO implement
+    return Void();
+}
+
+}  // namespace implementation
+}  // namespace V1_3
+}  // namespace config
+}  // namespace radio
+}  // namespace hardware
+}  // namespace android
diff --git a/radio/config/1.3/default/RadioConfigResponse.h b/radio/config/1.3/default/RadioConfigResponse.h
new file mode 100644
index 0000000..0f0033f
--- /dev/null
+++ b/radio/config/1.3/default/RadioConfigResponse.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.1 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.1
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_RADIO_CONFIG_V1_3_RADIOCONFIGRESPONSE_H
+#define ANDROID_HARDWARE_RADIO_CONFIG_V1_3_RADIOCONFIGRESPONSE_H
+
+#include <android/hardware/radio/config/1.3/IRadioConfigResponse.h>
+#include <hidl/MQDescriptor.h>
+#include <hidl/Status.h>
+
+namespace android {
+namespace hardware {
+namespace radio {
+namespace config {
+namespace V1_3 {
+namespace implementation {
+
+using ::android::sp;
+using ::android::hardware::hidl_array;
+using ::android::hardware::hidl_memory;
+using ::android::hardware::hidl_string;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+
+struct RadioConfigResponse : public IRadioConfigResponse {
+    // Methods from ::android::hardware::radio::config::V1_0::IRadioConfigResponse follow.
+    Return<void> getSimSlotsStatusResponse(
+            const ::android::hardware::radio::V1_0::RadioResponseInfo& info,
+            const hidl_vec<::android::hardware::radio::config::V1_0::SimSlotStatus>& slotStatus)
+            override;
+    Return<void> setSimSlotsMappingResponse(
+            const ::android::hardware::radio::V1_0::RadioResponseInfo& info) override;
+
+    // Methods from ::android::hardware::radio::config::V1_1::IRadioConfigResponse follow.
+    Return<void> getPhoneCapabilityResponse(
+            const ::android::hardware::radio::V1_0::RadioResponseInfo& info,
+            const ::android::hardware::radio::config::V1_1::PhoneCapability& phoneCapability)
+            override;
+    Return<void> setPreferredDataModemResponse(
+            const ::android::hardware::radio::V1_0::RadioResponseInfo& info) override;
+    Return<void> setModemsConfigResponse(
+            const ::android::hardware::radio::V1_0::RadioResponseInfo& info) override;
+    Return<void> getModemsConfigResponse(
+            const ::android::hardware::radio::V1_0::RadioResponseInfo& info,
+            const ::android::hardware::radio::config::V1_1::ModemsConfig& modemsConfig) override;
+
+    // Methods from ::android::hardware::radio::config::V1_2::IRadioConfigResponse follow.
+    Return<void> getSimSlotsStatusResponse_1_2(
+            const ::android::hardware::radio::V1_0::RadioResponseInfo& info,
+            const hidl_vec<::android::hardware::radio::config::V1_2::SimSlotStatus>& slotStatus)
+            override;
+
+    // Methods from ::android::hidl::base::V1_0::IBase follow.
+};
+
+}  // namespace implementation
+}  // namespace V1_3
+}  // namespace config
+}  // namespace radio
+}  // namespace hardware
+}  // namespace android
+
+#endif  // ANDROID_HARDWARE_RADIO_CONFIG_V1_3_RADIOCONFIGRESPONSE_H
diff --git a/radio/config/1.3/default/android.hardware.radio.config@1.3-service.rc b/radio/config/1.3/default/android.hardware.radio.config@1.3-service.rc
new file mode 100644
index 0000000..6df9b52
--- /dev/null
+++ b/radio/config/1.3/default/android.hardware.radio.config@1.3-service.rc
@@ -0,0 +1,7 @@
+service vendor.radio-config-hal-1-3 /vendor/bin/hw/android.hardware.radio.config@1.3-service
+    interface android.hardware.radio.config@1.0::IRadioConfig default
+    interface android.hardware.radio.config@1.1::IRadioConfig default
+    interface android.hardware.radio.config@1.3::IRadioConfig default
+    class hal
+    user system
+    group system
diff --git a/radio/config/1.3/default/radio-config-default.xml b/radio/config/1.3/default/radio-config-default.xml
new file mode 100644
index 0000000..72f363e
--- /dev/null
+++ b/radio/config/1.3/default/radio-config-default.xml
@@ -0,0 +1,29 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!--
+/*
+** Copyright 2019, The Android Open Source Project.
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+**     http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+-->
+<manifest version="1.0" type="device">
+    <hal format="hidl">
+        <name>android.hardware.radio.config</name>
+        <transport>hwbinder</transport>
+        <version>1.3</version>
+        <interface>
+            <name>IRadioConfig</name>
+            <instance>default</instance>
+        </interface>
+    </hal>
+</manifest>
diff --git a/radio/config/1.3/default/service.cpp b/radio/config/1.3/default/service.cpp
new file mode 100644
index 0000000..b1e6736
--- /dev/null
+++ b/radio/config/1.3/default/service.cpp
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.1 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.1
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "android.hardware.radio.config@1.3-service"
+
+#include <android/hardware/radio/config/1.3/IRadioConfig.h>
+#include <hidl/HidlTransportSupport.h>
+
+#include "RadioConfig.h"
+
+using android::OK;
+using android::sp;
+using android::status_t;
+using android::hardware::configureRpcThreadpool;
+using android::hardware::joinRpcThreadpool;
+using android::hardware::radio::config::V1_3::IRadioConfig;
+using android::hardware::radio::config::V1_3::implementation::RadioConfig;
+
+int main() {
+    configureRpcThreadpool(1, true);
+    sp<IRadioConfig> radioConfig = new RadioConfig;
+    const status_t status = radioConfig->registerAsService();
+    ALOGW_IF(status != OK, "Could not register IRadioConfig 1.3");
+    ALOGD("Default service is ready.");
+
+    joinRpcThreadpool();
+    return 1;
+}
diff --git a/radio/config/1.3/types.hal b/radio/config/1.3/types.hal
new file mode 100644
index 0000000..866002a
--- /dev/null
+++ b/radio/config/1.3/types.hal
@@ -0,0 +1,17 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.radio.config@1.3;
diff --git a/radio/config/1.3/vts/functional/Android.bp b/radio/config/1.3/vts/functional/Android.bp
new file mode 100644
index 0000000..6b28faf
--- /dev/null
+++ b/radio/config/1.3/vts/functional/Android.bp
@@ -0,0 +1,35 @@
+//
+// Copyright (C) 2019 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+cc_test {
+    name: "VtsHalRadioConfigV1_3TargetTest",
+    defaults: ["VtsHalTargetTestDefaults"],
+    srcs: [
+        "radio_config_hidl_hal_api.cpp",
+        "radio_config_hidl_hal_test.cpp",
+        "radio_config_response.cpp",
+        "VtsHalRadioConfigV1_3TargetTest.cpp",
+    ],
+    static_libs: [
+        "RadioVtsTestUtilBase",
+        "android.hardware.radio.config@1.0",
+        "android.hardware.radio.config@1.1",
+        "android.hardware.radio.config@1.2",
+        "android.hardware.radio.config@1.3",
+    ],
+    header_libs: ["radio.util.header@1.0"],
+    test_suites: ["general-tests", "vts-core"],
+}
diff --git a/radio/config/1.3/vts/functional/VtsHalRadioConfigV1_3TargetTest.cpp b/radio/config/1.3/vts/functional/VtsHalRadioConfigV1_3TargetTest.cpp
new file mode 100644
index 0000000..3bacacf
--- /dev/null
+++ b/radio/config/1.3/vts/functional/VtsHalRadioConfigV1_3TargetTest.cpp
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <radio_config_hidl_hal_utils.h>
+
+INSTANTIATE_TEST_SUITE_P(
+        PerInstance, RadioConfigHidlTest,
+        testing::ValuesIn(android::hardware::getAllHalInstanceNames(
+                ::android::hardware::radio::config::V1_3::IRadioConfig::descriptor)),
+        android::hardware::PrintInstanceNameToString);
diff --git a/radio/config/1.3/vts/functional/radio_config_hidl_hal_api.cpp b/radio/config/1.3/vts/functional/radio_config_hidl_hal_api.cpp
new file mode 100644
index 0000000..07e9ede
--- /dev/null
+++ b/radio/config/1.3/vts/functional/radio_config_hidl_hal_api.cpp
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <radio_config_hidl_hal_utils.h>
+
+#define ASSERT_OK(ret) ASSERT_TRUE(ret.isOk())
diff --git a/radio/config/1.3/vts/functional/radio_config_hidl_hal_test.cpp b/radio/config/1.3/vts/functional/radio_config_hidl_hal_test.cpp
new file mode 100644
index 0000000..dbb4bf4
--- /dev/null
+++ b/radio/config/1.3/vts/functional/radio_config_hidl_hal_test.cpp
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <radio_config_hidl_hal_utils.h>
+
+void RadioConfigHidlTest::SetUp() {
+    radioConfig = ::android::hardware::radio::config::V1_3::IRadioConfig::getService(GetParam());
+    ASSERT_NE(nullptr, radioConfig.get());
+
+    radioConfigRsp = new (std::nothrow) RadioConfigResponse(*this);
+    ASSERT_NE(nullptr, radioConfigRsp.get());
+
+    count_ = 0;
+
+    radioConfig->setResponseFunctions(radioConfigRsp, nullptr);
+}
+
+/*
+ * Notify that the response message is received.
+ */
+void RadioConfigHidlTest::notify(int receivedSerial) {
+    std::unique_lock<std::mutex> lock(mtx_);
+    if (serial == receivedSerial) {
+        count_++;
+        cv_.notify_one();
+    }
+}
+
+/*
+ * Wait till the response message is notified or till TIMEOUT_PERIOD.
+ */
+std::cv_status RadioConfigHidlTest::wait() {
+    std::unique_lock<std::mutex> lock(mtx_);
+
+    std::cv_status status = std::cv_status::no_timeout;
+    auto now = std::chrono::system_clock::now();
+    while (count_ == 0) {
+        status = cv_.wait_until(lock, now + std::chrono::seconds(TIMEOUT_PERIOD));
+        if (status == std::cv_status::timeout) {
+            return status;
+        }
+    }
+    count_--;
+    return status;
+}
diff --git a/radio/config/1.3/vts/functional/radio_config_hidl_hal_utils.h b/radio/config/1.3/vts/functional/radio_config_hidl_hal_utils.h
new file mode 100644
index 0000000..9b78c04
--- /dev/null
+++ b/radio/config/1.3/vts/functional/radio_config_hidl_hal_utils.h
@@ -0,0 +1,128 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <android-base/logging.h>
+
+#include <chrono>
+#include <condition_variable>
+#include <mutex>
+
+#include <android/hardware/radio/config/1.3/IRadioConfig.h>
+#include <android/hardware/radio/config/1.3/IRadioConfigIndication.h>
+#include <android/hardware/radio/config/1.3/IRadioConfigResponse.h>
+#include <android/hardware/radio/config/1.3/types.h>
+
+#include <gtest/gtest.h>
+#include <hidl/GtestPrinter.h>
+#include <hidl/ServiceManagement.h>
+
+#include "vts_test_util.h"
+
+using namespace ::android::hardware::radio::config::V1_1;
+using namespace ::android::hardware::radio::config::V1_2;
+using namespace ::android::hardware::radio::config::V1_3;
+
+using ::android::sp;
+using ::android::hardware::hidl_string;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+
+using ::android::hardware::radio::V1_0::RadioResponseInfo;
+using ::android::hardware::radio::V1_0::RadioResponseType;
+
+#define TIMEOUT_PERIOD 75
+
+class RadioConfigHidlTest;
+
+/* Callback class for radio config response */
+class RadioConfigResponse : public ::android::hardware::radio::config::V1_3::IRadioConfigResponse {
+  protected:
+    RadioConfigHidlTest& parent;
+
+  public:
+    RadioResponseInfo rspInfo;
+    PhoneCapability phoneCap;
+
+    RadioConfigResponse(RadioConfigHidlTest& parent);
+    virtual ~RadioConfigResponse() = default;
+
+    /* 1.0 Api */
+    Return<void> getSimSlotsStatusResponse(
+            const RadioResponseInfo& info,
+            const hidl_vec<::android::hardware::radio::config::V1_0::SimSlotStatus>& slotStatus);
+
+    Return<void> setSimSlotsMappingResponse(const RadioResponseInfo& info);
+
+    /* 1.1 Api */
+    Return<void> getPhoneCapabilityResponse(const RadioResponseInfo& info,
+                                            const PhoneCapability& phoneCapability);
+
+    Return<void> setPreferredDataModemResponse(const RadioResponseInfo& info);
+
+    Return<void> getModemsConfigResponse(const RadioResponseInfo& info,
+                                         const ModemsConfig& mConfig);
+
+    Return<void> setModemsConfigResponse(const RadioResponseInfo& info);
+
+    /* 1.2 Api */
+    Return<void> getSimSlotsStatusResponse_1_2(const RadioResponseInfo& info,
+                                               const hidl_vec<SimSlotStatus>& slotStatus);
+};
+
+/* Callback class for radio config indication */
+class RadioConfigIndication
+    : public ::android::hardware::radio::config::V1_3::IRadioConfigIndication {
+  protected:
+    RadioConfigHidlTest& parent;
+
+  public:
+    RadioConfigIndication(RadioConfigHidlTest& parent);
+    virtual ~RadioConfigIndication() = default;
+
+    /* 1.2 Api */
+    Return<void> simSlotsStatusChanged_1_2(
+            ::android::hardware::radio::V1_0::RadioIndicationType type,
+            const hidl_vec<SimSlotStatus>& slotStatus);
+};
+
+// The main test class for Radio config HIDL.
+class RadioConfigHidlTest : public ::testing::TestWithParam<std::string> {
+  protected:
+    std::mutex mtx_;
+    std::condition_variable cv_;
+    int count_;
+
+  public:
+    virtual void SetUp() override;
+
+    /* Used as a mechanism to inform the test about data/event callback */
+    void notify(int receivedSerial);
+
+    /* Test code calls this function to wait for response */
+    std::cv_status wait();
+
+    void updateSimCardStatus();
+
+    /* Serial number for radio request */
+    int serial;
+
+    /* radio config service handle */
+    sp<::android::hardware::radio::config::V1_3::IRadioConfig> radioConfig;
+
+    /* radio config response handle */
+    sp<RadioConfigResponse> radioConfigRsp;
+};
diff --git a/radio/config/1.3/vts/functional/radio_config_response.cpp b/radio/config/1.3/vts/functional/radio_config_response.cpp
new file mode 100644
index 0000000..1ca960e
--- /dev/null
+++ b/radio/config/1.3/vts/functional/radio_config_response.cpp
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <radio_config_hidl_hal_utils.h>
+
+using ::android::hardware::radio::V1_0::RadioResponseInfo;
+
+// SimSlotStatus slotStatus;
+
+RadioConfigResponse::RadioConfigResponse(RadioConfigHidlTest& parent) : parent(parent) {}
+
+/* 1.0 Apis */
+Return<void> RadioConfigResponse::getSimSlotsStatusResponse(
+        const RadioResponseInfo& /* info */,
+        const ::android::hardware::hidl_vec<
+                ::android::hardware::radio::config::V1_0::SimSlotStatus>& /* slotStatus */) {
+    return Void();
+}
+
+Return<void> RadioConfigResponse::setSimSlotsMappingResponse(const RadioResponseInfo& /* info */) {
+    return Void();
+}
+
+/* 1.1 Apis */
+Return<void> RadioConfigResponse::getPhoneCapabilityResponse(
+        const RadioResponseInfo& info, const PhoneCapability& phoneCapability) {
+    rspInfo = info;
+    phoneCap = phoneCapability;
+    parent.notify(info.serial);
+    return Void();
+}
+
+Return<void> RadioConfigResponse::setPreferredDataModemResponse(
+        const RadioResponseInfo& /* info */) {
+    return Void();
+}
+
+Return<void> RadioConfigResponse::getModemsConfigResponse(const RadioResponseInfo& /* info */,
+                                                          const ModemsConfig& /* mConfig */) {
+    return Void();
+}
+
+Return<void> RadioConfigResponse::setModemsConfigResponse(const RadioResponseInfo& /* info */) {
+    return Void();
+}
+
+/* 1.2 Apis */
+Return<void> RadioConfigResponse::getSimSlotsStatusResponse_1_2(
+        const RadioResponseInfo& /* info */,
+        const ::android::hardware::hidl_vec<SimSlotStatus>& /* slotStatus */) {
+    return Void();
+}
\ No newline at end of file
diff --git a/sensors/2.0/multihal/Android.bp b/sensors/2.0/multihal/Android.bp
index c13eaf2..811c455 100644
--- a/sensors/2.0/multihal/Android.bp
+++ b/sensors/2.0/multihal/Android.bp
@@ -43,10 +43,10 @@
     srcs: [
         "service.cpp",
         "HalProxy.cpp",
-        "ScopedWakelock.cpp",
     ],
     init_rc: ["android.hardware.sensors@2.0-service-multihal.rc"],
     vintf_fragments: ["android.hardware.sensors@2.0-multihal.xml"],
+    shared_libs: ["android.hardware.sensors@2.0-ScopedWakelock"]
 }
 
 cc_library_headers {
@@ -55,19 +55,40 @@
     export_include_dirs: ["include"],
 }
 
+cc_library_shared {
+    name: "android.hardware.sensors@2.0-ScopedWakelock",
+    defaults: [
+        "hidl_defaults",
+        "android.hardware.sensors@2.0-multihal-defaults",
+    ],
+    srcs: [
+        "ScopedWakelock.cpp",
+    ],
+    vendor_available: true,
+    export_header_lib_headers: [
+        "android.hardware.sensors@2.0-multihal.header"
+    ]
+}
+
 // The below targets should only be used for testing.
 cc_test_library {
     name: "android.hardware.sensors@2.0-HalProxy",
-    defaults: ["android.hardware.sensors@2.0-multihal-defaults"],
+    defaults: [
+        "hidl_defaults",
+        "android.hardware.sensors@2.0-multihal-defaults",
+    ],
     vendor_available: true,
     srcs: [
         "HalProxy.cpp",
-        "ScopedWakelock.cpp",
     ],
     export_header_lib_headers: [
         "android.hardware.sensors@2.0-multihal.header",
     ],
+    export_shared_lib_headers: [
+	"android.hardware.sensors@2.0-ScopedWakelock",
+    ],
     shared_libs: [
         "libutils",
+        "android.hardware.sensors@2.0-ScopedWakelock",
     ],
 }
diff --git a/sensors/2.0/multihal/tests/Android.bp b/sensors/2.0/multihal/tests/Android.bp
index e7f9499..1637312 100644
--- a/sensors/2.0/multihal/tests/Android.bp
+++ b/sensors/2.0/multihal/tests/Android.bp
@@ -25,6 +25,7 @@
     shared_libs: [
         "android.hardware.sensors@1.0",
         "android.hardware.sensors@2.0",
+	"android.hardware.sensors@2.0-ScopedWakelock",
         "libcutils",
         "libfmq",
         "libhardware",
@@ -83,6 +84,7 @@
     shared_libs: [
         "android.hardware.sensors@1.0",
         "android.hardware.sensors@2.0",
+        "android.hardware.sensors@2.0-ScopedWakelock",
         "libbase",
         "libcutils",
         "libfmq",
diff --git a/soundtrigger/2.1/Android.bp b/soundtrigger/2.1/Android.bp
index 68e425b..30173cb 100644
--- a/soundtrigger/2.1/Android.bp
+++ b/soundtrigger/2.1/Android.bp
@@ -15,5 +15,5 @@
         "android.hardware.soundtrigger@2.0",
         "android.hidl.base@1.0",
     ],
-    gen_java: false,
+    gen_java: true,
 }
diff --git a/soundtrigger/2.2/Android.bp b/soundtrigger/2.2/Android.bp
index 43898c7..7556aa4 100644
--- a/soundtrigger/2.2/Android.bp
+++ b/soundtrigger/2.2/Android.bp
@@ -15,5 +15,5 @@
         "android.hardware.soundtrigger@2.1",
         "android.hidl.base@1.0",
     ],
-    gen_java: false,
+    gen_java: true,
 }
diff --git a/tests/extension/vibrator/aidl/client/test-cpp-client.cpp b/tests/extension/vibrator/aidl/client/test-cpp-client.cpp
index f6f5537..015a345 100644
--- a/tests/extension/vibrator/aidl/client/test-cpp-client.cpp
+++ b/tests/extension/vibrator/aidl/client/test-cpp-client.cpp
@@ -20,9 +20,9 @@
 #include <binder/IServiceManager.h>
 #include <gtest/gtest.h>
 
+using android::checked_interface_cast;
 using android::IBinder;
 using android::IInterface;
-using android::interface_cast;
 using android::OK;
 using android::sp;
 using android::waitForVintfService;
@@ -44,7 +44,7 @@
     // getting the extension
     sp<IBinder> ext;
     ASSERT_EQ(OK, IInterface::asBinder(vib)->getExtension(&ext));
-    sp<ICustomVibrator> cvib = interface_cast<ICustomVibrator>(ext);
+    sp<ICustomVibrator> cvib = checked_interface_cast<ICustomVibrator>(ext);
     ASSERT_NE(nullptr, cvib.get());
 
     // calling extension method
diff --git a/tests/memory/1.0/Android.bp b/tests/memory/1.0/Android.bp
index 29f6be7..6612e31 100644
--- a/tests/memory/1.0/Android.bp
+++ b/tests/memory/1.0/Android.bp
@@ -11,5 +11,5 @@
         "android.hidl.memory.block@1.0",
         "android.hidl.memory.token@1.0",
     ],
-    gen_java: false,
+    gen_java: true,
 }
diff --git a/tests/memory/2.0/.hidl_for_test b/tests/memory/2.0/.hidl_for_test
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/memory/2.0/.hidl_for_test
diff --git a/tests/memory/2.0/Android.bp b/tests/memory/2.0/Android.bp
new file mode 100644
index 0000000..d24bd21
--- /dev/null
+++ b/tests/memory/2.0/Android.bp
@@ -0,0 +1,14 @@
+// This file is autogenerated by hidl-gen -Landroidbp.
+
+hidl_interface {
+    name: "android.hardware.tests.memory@2.0",
+    root: "android.hardware",
+    srcs: [
+        "types.hal",
+        "IMemoryInterface.hal",
+    ],
+    interfaces: [
+        "android.hidl.base@1.0",
+    ],
+    gen_java: true,
+}
diff --git a/tests/memory/2.0/IMemoryInterface.hal b/tests/memory/2.0/IMemoryInterface.hal
new file mode 100644
index 0000000..2c824bf
--- /dev/null
+++ b/tests/memory/2.0/IMemoryInterface.hal
@@ -0,0 +1,12 @@
+package android.hardware.tests.memory@2.0;
+
+interface IMemoryInterface {
+    // Flips all the bits in the given memory buffer.
+    bitwiseNot(memory mem);
+    // Returns a read-only buffer of size 8, containing the bytes 0..7.
+    getTestMem() generates(memory mem);
+    // Given two memory regions of the same size, returns two memory fields of
+    // equal size, the first contains the byte-wise sum and the other the byte-
+    // wise difference.
+    getSumDiff(TwoMemory in) generates(TwoMemory out);
+};
diff --git a/tests/memory/2.0/types.hal b/tests/memory/2.0/types.hal
new file mode 100644
index 0000000..9ec357b
--- /dev/null
+++ b/tests/memory/2.0/types.hal
@@ -0,0 +1,6 @@
+package android.hardware.tests.memory@2.0;
+
+struct TwoMemory {
+    memory mem1;
+    memory mem2;
+};
diff --git a/vibrator/aidl/Android.bp b/vibrator/aidl/Android.bp
index cd5439f..e26041a 100644
--- a/vibrator/aidl/Android.bp
+++ b/vibrator/aidl/Android.bp
@@ -5,5 +5,11 @@
         "android/hardware/vibrator/*.aidl",
     ],
     stability: "vintf",
+    backend: {
+        ndk: {
+            vndk: {
+                enabled: true,
+            },
+        },
+    },
 }
-