Merge "Data Injection Support for Default Sensors HAL 2.0"
diff --git a/audio/common/all-versions/default/service/Android.mk b/audio/common/all-versions/default/service/Android.mk
index 84de75e..e6ae03e 100644
--- a/audio/common/all-versions/default/service/Android.mk
+++ b/audio/common/all-versions/default/service/Android.mk
@@ -57,7 +57,4 @@
 LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
 endif
 
-# b/117506164
-LOCAL_SANITIZE := never
-
 include $(BUILD_EXECUTABLE)
diff --git a/bluetooth/1.0/default/android.hardware.bluetooth@1.0-service.rc b/bluetooth/1.0/default/android.hardware.bluetooth@1.0-service.rc
index a634441..b615227 100644
--- a/bluetooth/1.0/default/android.hardware.bluetooth@1.0-service.rc
+++ b/bluetooth/1.0/default/android.hardware.bluetooth@1.0-service.rc
@@ -5,11 +5,3 @@
     group bluetooth
     writepid /dev/stune/foreground/tasks
 
-on property:vts.native_server.on=1 && property:ro.build.type=userdebug
-    stop vendor.bluetooth-1-0
-on property:vts.native_server.on=1 && property:ro.build.type=eng
-    stop vendor.bluetooth-1-0
-on property:vts.native_server.on=0 && property:ro.build.type=userdebug
-    start vendor.bluetooth-1-0
-on property:vts.native_server.on=0 && property:ro.build.type=eng
-    start vendor.bluetooth-1-0
diff --git a/bluetooth/1.0/default/vendor_interface.cc b/bluetooth/1.0/default/vendor_interface.cc
index e5f02f3..d56e344 100644
--- a/bluetooth/1.0/default/vendor_interface.cc
+++ b/bluetooth/1.0/default/vendor_interface.cc
@@ -294,6 +294,7 @@
     lib_interface_->op(BT_VND_OP_POWER_CTRL, &power_state);
 
     lib_interface_->cleanup();
+    lib_interface_ = nullptr;
   }
 
   if (lib_handle_ != nullptr) {
diff --git a/bluetooth/1.0/default/vendor_interface.h b/bluetooth/1.0/default/vendor_interface.h
index 36f4e1b..1d69040 100644
--- a/bluetooth/1.0/default/vendor_interface.h
+++ b/bluetooth/1.0/default/vendor_interface.h
@@ -58,15 +58,15 @@
 
   void HandleIncomingEvent(const hidl_vec<uint8_t>& hci_packet);
 
-  void* lib_handle_;
-  bt_vendor_interface_t* lib_interface_;
+  void* lib_handle_ = nullptr;
+  bt_vendor_interface_t* lib_interface_ = nullptr;
   async::AsyncFdWatcher fd_watcher_;
   InitializeCompleteCallback initialize_complete_cb_;
-  hci::HciProtocol* hci_;
+  hci::HciProtocol* hci_ = nullptr;
 
   PacketReadCallback event_cb_;
 
-  FirmwareStartupTimer* firmware_startup_timer_;
+  FirmwareStartupTimer* firmware_startup_timer_ = nullptr;
 };
 
 }  // namespace implementation
diff --git a/bluetooth/1.0/vts/OWNERS b/bluetooth/1.0/vts/OWNERS
new file mode 100644
index 0000000..58d3a66
--- /dev/null
+++ b/bluetooth/1.0/vts/OWNERS
@@ -0,0 +1,8 @@
+zachoverflow@google.com
+siyuanh@google.com
+mylesgw@google.com
+jpawlowski@google.com
+apanicke@google.com
+stng@google.com
+hsz@google.com
+
diff --git a/bluetooth/1.0/vts/functional/VtsHalBluetoothV1_0TargetTest.cpp b/bluetooth/1.0/vts/functional/VtsHalBluetoothV1_0TargetTest.cpp
index 88d4234..90fbb3f 100644
--- a/bluetooth/1.0/vts/functional/VtsHalBluetoothV1_0TargetTest.cpp
+++ b/bluetooth/1.0/vts/functional/VtsHalBluetoothV1_0TargetTest.cpp
@@ -26,7 +26,10 @@
 #include <VtsHalHidlTargetCallbackBase.h>
 #include <VtsHalHidlTargetTestBase.h>
 #include <VtsHalHidlTargetTestEnvBase.h>
+
+#include <chrono>
 #include <queue>
+#include <thread>
 
 using ::android::sp;
 using ::android::hardware::hidl_death_recipient;
@@ -46,6 +49,7 @@
 #define WAIT_FOR_HCI_EVENT_TIMEOUT std::chrono::milliseconds(2000)
 #define WAIT_FOR_SCO_DATA_TIMEOUT std::chrono::milliseconds(1000)
 #define WAIT_FOR_ACL_DATA_TIMEOUT std::chrono::milliseconds(1000)
+#define INTERFACE_CLOSE_DELAY_MS std::chrono::milliseconds(200)
 
 #define COMMAND_HCI_SHOULD_BE_UNKNOWN \
   { 0xff, 0x3B, 0x08, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07 }
@@ -197,8 +201,10 @@
   }
 
   virtual void TearDown() override {
+    ALOGI("TearDown");
     // Should not be checked in production code
     ASSERT_TRUE(bluetooth->close().isOk());
+    std::this_thread::sleep_for(INTERFACE_CLOSE_DELAY_MS);
     handle_no_ops();
     EXPECT_EQ(static_cast<size_t>(0), event_queue.size());
     EXPECT_EQ(static_cast<size_t>(0), sco_queue.size());
@@ -222,9 +228,10 @@
 
   class BluetoothHciDeathRecipient : public hidl_death_recipient {
    public:
-    virtual void serviceDied(
+    void serviceDied(
         uint64_t /*cookie*/,
-        const android::wp<::android::hidl::base::V1_0::IBase>& /*who*/) {
+        const android::wp<::android::hidl::base::V1_0::IBase>& /*who*/)
+        override {
       FAIL();
     }
   };
@@ -308,7 +315,7 @@
     if (event_is_no_op) {
       event_queue.pop();
     } else {
-      return;
+      break;
     }
   }
 }
diff --git a/bluetooth/a2dp/1.0/vts/OWNERS b/bluetooth/a2dp/1.0/vts/OWNERS
new file mode 100644
index 0000000..58d3a66
--- /dev/null
+++ b/bluetooth/a2dp/1.0/vts/OWNERS
@@ -0,0 +1,8 @@
+zachoverflow@google.com
+siyuanh@google.com
+mylesgw@google.com
+jpawlowski@google.com
+apanicke@google.com
+stng@google.com
+hsz@google.com
+
diff --git a/camera/metadata/3.2/types.hal b/camera/metadata/3.2/types.hal
index 67b4e44..cef0397 100644
--- a/camera/metadata/3.2/types.hal
+++ b/camera/metadata/3.2/types.hal
@@ -1396,7 +1396,8 @@
      *
      * <p>The arrangement of color filters on sensor;
      * represents the colors in the top-left 2x2 section of
-     * the sensor, in reading order.</p>
+     * the sensor, in reading order, for a Bayer camera, or the
+     * light spectrum it captures for MONOCHROME camera.</p>
      */
     ANDROID_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT,
 
diff --git a/current.txt b/current.txt
index 8b10648..e19fb63 100644
--- a/current.txt
+++ b/current.txt
@@ -388,8 +388,10 @@
 2a55e224aa9bc62c0387cd85ad3c97e33f0c33a4e1489cbae86b2523e6f9df35 android.hardware.camera.device@3.2::ICameraDevice
 f61b616732d8f374e030f90575d7eba3ecc99d209a05b945949ba892bcb81e1d android.hardware.camera.device@3.2::ICameraDeviceSession
 684702a60deef03a1e8093961dc0a18c555c857ad5a77ba7340b0635ae01eb70 android.hardware.camera.device@3.4::ICameraDeviceSession
+291638a1b6d4e63283e9e722ab5049d9351717ffa2b66162124f84d1aa7c2835 android.hardware.camera.metadata@3.2::types
 dd2436f251a90f3e5e7ed773b1aeae21e381b00ae26b10ebe3a1001c894e5980 android.hardware.camera.metadata@3.3::types
 da33234403ff5d60f3473711917b9948e6484a4260b5247acdafb111193a9de2 android.hardware.configstore@1.0::ISurfaceFlingerConfigs
+21165b8e30c4b2d52980e4728f661420adc16e38bbe73476c06b2085be908f4c android.hardware.gnss@1.0::IGnssCallback
 d702fb01dc2a0733aa820b7eb65435ee3334f75632ef880bafd2fb8803a20a58 android.hardware.gnss@1.0::IGnssMeasurementCallback
 b7ecf29927055ec422ec44bf776223f07d79ad9f92ccf9becf167e62c2607e7a android.hardware.keymaster@4.0::IKeymasterDevice
 574e8f1499436fb4075894dcae0b36682427956ecb114f17f1fe22d116a83c6b android.hardware.neuralnetworks@1.0::IPreparedModel
diff --git a/drm/1.2/Android.bp b/drm/1.2/Android.bp
new file mode 100644
index 0000000..66a1bd8
--- /dev/null
+++ b/drm/1.2/Android.bp
@@ -0,0 +1,26 @@
+// This file is autogenerated by hidl-gen -Landroidbp.
+
+hidl_interface {
+    name: "android.hardware.drm@1.2",
+    root: "android.hardware",
+    vndk: {
+        enabled: true,
+    },
+    srcs: [
+        "types.hal",
+        "ICryptoFactory.hal",
+        "IDrmFactory.hal",
+        "IDrmPlugin.hal",
+    ],
+    interfaces: [
+        "android.hardware.drm@1.0",
+        "android.hardware.drm@1.1",
+        "android.hidl.base@1.0",
+    ],
+    types: [
+        "KeySetId",
+        "OfflineLicenseState",
+    ],
+    gen_java: false,
+}
+
diff --git a/drm/1.2/ICryptoFactory.hal b/drm/1.2/ICryptoFactory.hal
new file mode 100644
index 0000000..c4a9b4b
--- /dev/null
+++ b/drm/1.2/ICryptoFactory.hal
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package android.hardware.drm@1.2;
+
+import @1.1::ICryptoFactory;
+
+/**
+ * ICryptoFactory is the main entry point for interacting with a vendor's
+ * crypto HAL to create crypto plugins. Crypto plugins create crypto sessions
+ * which are used by a codec to decrypt protected video content.
+ *
+ * The 1.2 factory must always create 1.2 ICryptoPlugin interfaces, which are
+ * returned via the 1.0 createPlugin method.
+ *
+ * To use 1.2 features the caller must cast the returned interface to a
+ * 1.2 HAL, using V1_2::IDrmPlugin::castFrom().
+ *
+ * The ICryptoFactory hal is required because all top-level interfaces
+ * have to be updated in a minor uprev.
+ */
+interface ICryptoFactory extends @1.1::ICryptoFactory {
+};
diff --git a/drm/1.2/IDrmFactory.hal b/drm/1.2/IDrmFactory.hal
new file mode 100644
index 0000000..c94e4bb
--- /dev/null
+++ b/drm/1.2/IDrmFactory.hal
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package android.hardware.drm@1.2;
+
+import @1.1::IDrmFactory;
+import @1.1::IDrmPlugin;
+
+/**
+ * IDrmFactory is the main entry point for interacting with a vendor's
+ * drm HAL to create drm plugin instances. A drm plugin instance
+ * creates drm sessions which are used to obtain keys for a crypto
+ * session so it can decrypt protected video content.
+ *
+ * The 1.2 factory must always create 1.2 IDrmPlugin interfaces, which are
+ * returned via the 1.0 createPlugin method.
+ *
+ * To use 1.2 features the caller must cast the returned interface to a
+ * 1.2 HAL, using V1_2::IDrmPlugin::castFrom().
+ *
+ * The IDrmFactory hal is required because all top-level interfaces
+ * have to be updated in a minor uprev.
+ */
+
+interface IDrmFactory extends @1.1::IDrmFactory {
+};
diff --git a/drm/1.2/IDrmPlugin.hal b/drm/1.2/IDrmPlugin.hal
new file mode 100644
index 0000000..88338d6
--- /dev/null
+++ b/drm/1.2/IDrmPlugin.hal
@@ -0,0 +1,94 @@
+/**
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package android.hardware.drm@1.2;
+
+import @1.1::IDrmPlugin;
+import @1.0::Status;
+
+/**
+ * IDrmPlugin is used to interact with a specific drm plugin that was created by
+ * IDrm::createPlugin. A drm plugin provides methods for obtaining drm keys that
+ * may be used by a codec to decrypt protected video content.
+ */
+interface IDrmPlugin extends @1.1::IDrmPlugin {
+
+    /**
+     * The keys in an offline license allow protected content to be
+     * played even if the device is not connected to a network.
+     * Offline licenses are stored on the device after a key
+     * request/response exchange when the key request KeyType is
+     * OFFLINE. Normally each app is responsible for keeping track of
+     * the KeySetIds it has created. In some situations however, it
+     * may be necessary to request the list of stored offline license
+     * KeySetIds. If an app loses the KeySetId for any stored licenses
+     * that it created, for example, it must be able to recover the
+     * stored KeySetIds so those licenses can be removed when they
+     * expire or when the app is uninstalled.
+     * <p>
+     * This method returns a list of the KeySetIds for all offline
+     * licenses. The offline license KeySetId may be used to query
+     * the status of an offline license or remove it.
+     *
+     * @return status the status of the call. May be OK or
+     *     ERROR_DRM_INVALID_STATE if the HAL is in a state where the
+     *     KeySetIds can't be returned.
+     * @return a list of offline license keySetIds. If there are no offline
+     *     licenses, the list must be empty and OK must be returned as the
+     *     status.
+     */
+    getOfflineLicenseKeySetIds() generates (Status status, vec<KeySetId> keySetIds);
+
+    /**
+     * Normally offline licenses are released using a key
+     * request/response exchange using getKeyRequest where the KeyType
+     * is RELEASE, followed by provideKeyResponse. This allows the
+     * server to cryptographically confirm that the license has been
+     * removed and then adjust the count of offline licenses allocated
+     * to the device.
+     * <p>
+     * In some exceptional situations it may be necessary to directly
+     * remove offline licenses without notifying the server, which may
+     * be performed using this method.
+     *
+     * @param keySetId the id of the offline license to remove
+     * @return status the status of the call. May be one of OK on
+     *     success, BAD_VALUE if the license is not found or
+     *     ERROR_DRM_INVALID_STATE if the HAL is in a state where the
+     *     KeySetIds can't be returned.
+     */
+    removeOfflineLicense(KeySetId keySetId) generates (Status status);
+
+    /**
+     * Request the state of an offline license. An offline license may
+     * be usable or inactive. The keys in a usable offline license are
+     * available for decryption. When the offline license state is
+     * inactive, the keys have been marked for release using
+     * getKeyRequest with KeyType RELEASE but the key response has not
+     * been received. The keys in an inactive offline license are not
+     * usable for decryption.
+     *
+     * @param keySetId the id of the offline license
+     * @return status the status of the call. May be one of OK on
+     *     success, BAD_VALUE if the license is not found or
+     *     ERROR_DRM_INVALID_STATE if the HAL is in a state where the
+     *     offline license state can't be queried.
+     * @return the offline license state, one of USABLE or INACTIVE.
+     *     If the return status is not OK then state must be set to
+     *     UNKNOWN.
+     */
+    getOfflineLicenseState(KeySetId keySetId) generates (Status status,
+            OfflineLicenseState state);
+};
diff --git a/drm/1.2/types.hal b/drm/1.2/types.hal
new file mode 100644
index 0000000..8770c79
--- /dev/null
+++ b/drm/1.2/types.hal
@@ -0,0 +1,45 @@
+/**
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.drm@1.2;
+
+enum OfflineLicenseState : uint32_t {
+    /**
+     * Offline license state is unknown
+     */
+    UNKNOWN,
+
+    /**
+     * Offline license state is usable, the keys may be used for decryption.
+     */
+    USABLE,
+
+    /**
+     * Offline license state is inactive, the keys have been marked for
+     * release using {@link #getKeyRequest} with KEY_TYPE_RELEASE but the
+     * key response has not been received.
+     */
+    INACTIVE
+};
+
+/**
+ * KeySetId is an identifier that references a set of keys in an
+ * offline license. The keySetId is created by the HAL implementation
+ * and returned from provideKeyResponse and getOfflineLicenseIds. The
+ * framework passes KeySetId back to the HAL when referring to the key
+ * set in methods that take a KeySetId as an input parameter.
+ */
+typedef vec<uint8_t> KeySetId;
diff --git a/gnss/1.0/IGnssCallback.hal b/gnss/1.0/IGnssCallback.hal
index 7fb38c5..d62676f 100644
--- a/gnss/1.0/IGnssCallback.hal
+++ b/gnss/1.0/IGnssCallback.hal
@@ -115,9 +115,9 @@
         /**
          * Carrier frequency of the signal tracked, for example it can be the
          * GPS central frequency for L1 = 1575.45 MHz, or L2 = 1227.60 MHz, L5 =
-         * 1176.45 MHz, varying GLO channels, etc. If the field is not set, it
-         * is the primary common use central frequency, e.g. L1 = 1575.45 MHz
-         * for GPS.
+         * 1176.45 MHz, varying GLO channels, etc. If the field is zero, it is
+         * the primary common use central frequency, e.g. L1 = 1575.45 MHz for
+         * GPS.
          *
          * For an L1, L5 receiver tracking a satellite on L1 and L5 at the same
          * time, two GnssSvInfo structs must be reported for this same
@@ -125,8 +125,7 @@
          * to L1 must be filled, and in the other all of the values related to
          * L5 must be filled.
          *
-         * If the data is available, gnssClockFlags must contain
-         * HAS_CARRIER_FREQUENCY.
+         * If the data is available, svFlag must contain HAS_CARRIER_FREQUENCY.
          */
         float carrierFrequencyHz;
 
diff --git a/keymaster/4.0/support/authorization_set.cpp b/keymaster/4.0/support/authorization_set.cpp
index afbcdac..d6b50f5 100644
--- a/keymaster/4.0/support/authorization_set.cpp
+++ b/keymaster/4.0/support/authorization_set.cpp
@@ -18,6 +18,8 @@
 
 #include <assert.h>
 
+#include <android-base/logging.h>
+
 namespace android {
 namespace hardware {
 namespace keymaster {
@@ -97,10 +99,10 @@
         if (prev->tag == Tag::INVALID) continue;
 
         if (!keyParamEqual(*prev, *curr)) {
-            result.emplace_back(std::move(*prev));
+            result.push_back(std::move(*prev));
         }
     }
-    result.emplace_back(std::move(*prev));
+    result.push_back(std::move(*prev));
 
     std::swap(data_, result);
 }
@@ -127,6 +129,16 @@
     }
 }
 
+void AuthorizationSet::Filter(std::function<bool(const KeyParameter&)> doKeep) {
+    std::vector<KeyParameter> result;
+    for (auto& param : data_) {
+        if (doKeep(param)) {
+            result.push_back(std::move(param));
+        }
+    }
+    std::swap(data_, result);
+}
+
 KeyParameter& AuthorizationSet::operator[](int at) {
     return data_[at];
 }
@@ -191,6 +203,7 @@
 struct OutStreams {
     std::ostream& indirect;
     std::ostream& elements;
+    size_t skipped;
 };
 
 OutStreams& serializeParamValue(OutStreams& out, const hidl_vec<uint8_t>& blob) {
@@ -229,6 +242,7 @@
 
 OutStreams& serialize(TAG_INVALID_t&&, OutStreams& out, const KeyParameter&) {
     // skip invalid entries.
+    ++out.skipped;
     return out;
 }
 template <typename T>
@@ -248,7 +262,12 @@
 
 template <>
 struct choose_serializer<> {
-    static OutStreams& serialize(OutStreams& out, const KeyParameter&) { return out; }
+    static OutStreams& serialize(OutStreams& out, const KeyParameter& param) {
+        LOG(WARNING) << "Trying to serialize unknown tag " << unsigned(param.tag)
+                     << ". Did you forget to add it to all_tags_t?";
+        ++out.skipped;
+        return out;
+    }
 };
 
 template <TagType tag_type, Tag tag, typename... Tail>
@@ -269,7 +288,7 @@
 std::ostream& serialize(std::ostream& out, const std::vector<KeyParameter>& params) {
     std::stringstream indirect;
     std::stringstream elements;
-    OutStreams streams = {indirect, elements};
+    OutStreams streams = {indirect, elements, 0};
     for (const auto& param : params) {
         serialize(streams, param);
     }
@@ -289,7 +308,7 @@
         return out;
     }
     uint32_t elements_size = pos;
-    uint32_t element_count = params.size();
+    uint32_t element_count = params.size() - streams.skipped;
 
     out.write(reinterpret_cast<const char*>(&indirect_size), sizeof(uint32_t));
 
@@ -310,6 +329,7 @@
 struct InStreams {
     std::istream& indirect;
     std::istream& elements;
+    size_t invalids;
 };
 
 InStreams& deserializeParamValue(InStreams& in, hidl_vec<uint8_t>* blob) {
@@ -331,6 +351,7 @@
 
 InStreams& deserialize(TAG_INVALID_t&&, InStreams& in, KeyParameter*) {
     // there should be no invalid KeyParamaters but if handle them as zero sized.
+    ++in.invalids;
     return in;
 }
 
@@ -398,13 +419,28 @@
     // TODO write one-shot stream buffer to avoid copying here
     std::stringstream indirect(indirect_buffer);
     std::stringstream elements(elements_buffer);
-    InStreams streams = {indirect, elements};
+    InStreams streams = {indirect, elements, 0};
 
     params->resize(element_count);
 
     for (uint32_t i = 0; i < element_count; ++i) {
         deserialize(streams, &(*params)[i]);
     }
+
+    /*
+     * There are legacy blobs which have invalid tags in them due to a bug during serialization.
+     * This makes sure that invalid tags are filtered from the result before it is returned.
+     */
+    if (streams.invalids > 0) {
+        std::vector<KeyParameter> filtered(element_count - streams.invalids);
+        auto ifiltered = filtered.begin();
+        for (auto& p : *params) {
+            if (p.tag != Tag::INVALID) {
+                *ifiltered++ = std::move(p);
+            }
+        }
+        *params = std::move(filtered);
+    }
     return in;
 }
 
diff --git a/keymaster/4.0/support/include/keymasterV4_0/Keymaster.h b/keymaster/4.0/support/include/keymasterV4_0/Keymaster.h
index 83b1d69..458053a 100644
--- a/keymaster/4.0/support/include/keymasterV4_0/Keymaster.h
+++ b/keymaster/4.0/support/include/keymasterV4_0/Keymaster.h
@@ -20,6 +20,9 @@
 
 #include <android/hardware/keymaster/4.0/IKeymasterDevice.h>
 
+#include <memory>
+#include <vector>
+
 namespace android {
 namespace hardware {
 namespace keymaster {
diff --git a/keymaster/4.0/support/include/keymasterV4_0/authorization_set.h b/keymaster/4.0/support/include/keymasterV4_0/authorization_set.h
index 193e4ea..ac96c86 100644
--- a/keymaster/4.0/support/include/keymasterV4_0/authorization_set.h
+++ b/keymaster/4.0/support/include/keymasterV4_0/authorization_set.h
@@ -142,6 +142,11 @@
     std::vector<KeyParameter>::const_iterator end() const { return data_.end(); }
 
     /**
+     * Modifies this Authorization set such that it only keeps the entries for which doKeep
+     * returns true.
+     */
+    void Filter(std::function<bool(const KeyParameter&)> doKeep);
+    /**
      * Returns the nth element of the set.
      * Like for std::vector::operator[] there is no range check performed. Use of out of range
      * indices is undefined.
@@ -209,7 +214,7 @@
         }
     }
 
-    hidl_vec<KeyParameter> hidl_data() const {
+    const hidl_vec<KeyParameter> hidl_data() const {
         hidl_vec<KeyParameter> result;
         result.setToExternal(const_cast<KeyParameter*>(data()), size());
         return result;
diff --git a/keymaster/4.0/support/include/keymasterV4_0/keymaster_tags.h b/keymaster/4.0/support/include/keymasterV4_0/keymaster_tags.h
index 9e7d252..61c444c 100644
--- a/keymaster/4.0/support/include/keymasterV4_0/keymaster_tags.h
+++ b/keymaster/4.0/support/include/keymasterV4_0/keymaster_tags.h
@@ -122,6 +122,7 @@
 DECLARE_TYPED_TAG(CREATION_DATETIME);
 DECLARE_TYPED_TAG(DIGEST);
 DECLARE_TYPED_TAG(EC_CURVE);
+DECLARE_TYPED_TAG(HARDWARE_TYPE);
 DECLARE_TYPED_TAG(INCLUDE_UNIQUE_ID);
 DECLARE_TYPED_TAG(INVALID);
 DECLARE_TYPED_TAG(KEY_SIZE);
@@ -162,12 +163,13 @@
              TAG_USER_SECURE_ID_t, TAG_NO_AUTH_REQUIRED_t, TAG_AUTH_TIMEOUT_t,
              TAG_ALLOW_WHILE_ON_BODY_t, TAG_UNLOCKED_DEVICE_REQUIRED_t, TAG_APPLICATION_ID_t,
              TAG_APPLICATION_DATA_t, TAG_CREATION_DATETIME_t, TAG_ROLLBACK_RESISTANCE_t,
-             TAG_ROOT_OF_TRUST_t, TAG_ASSOCIATED_DATA_t, TAG_NONCE_t, TAG_BOOTLOADER_ONLY_t,
-             TAG_OS_VERSION_t, TAG_OS_PATCHLEVEL_t, TAG_UNIQUE_ID_t, TAG_ATTESTATION_CHALLENGE_t,
-             TAG_ATTESTATION_APPLICATION_ID_t, TAG_RESET_SINCE_ID_ROTATION_t, TAG_PURPOSE_t,
-             TAG_ALGORITHM_t, TAG_BLOCK_MODE_t, TAG_DIGEST_t, TAG_PADDING_t,
-             TAG_BLOB_USAGE_REQUIREMENTS_t, TAG_ORIGIN_t, TAG_USER_AUTH_TYPE_t, TAG_EC_CURVE_t,
-             TAG_BOOT_PATCHLEVEL_t, TAG_VENDOR_PATCHLEVEL_t, TAG_TRUSTED_USER_PRESENCE_REQUIRED_t>;
+             TAG_HARDWARE_TYPE_t, TAG_ROOT_OF_TRUST_t, TAG_ASSOCIATED_DATA_t, TAG_NONCE_t,
+             TAG_BOOTLOADER_ONLY_t, TAG_OS_VERSION_t, TAG_OS_PATCHLEVEL_t, TAG_UNIQUE_ID_t,
+             TAG_ATTESTATION_CHALLENGE_t, TAG_ATTESTATION_APPLICATION_ID_t,
+             TAG_RESET_SINCE_ID_ROTATION_t, TAG_PURPOSE_t, TAG_ALGORITHM_t, TAG_BLOCK_MODE_t,
+             TAG_DIGEST_t, TAG_PADDING_t, TAG_BLOB_USAGE_REQUIREMENTS_t, TAG_ORIGIN_t,
+             TAG_USER_AUTH_TYPE_t, TAG_EC_CURVE_t, TAG_BOOT_PATCHLEVEL_t, TAG_VENDOR_PATCHLEVEL_t,
+             TAG_TRUSTED_CONFIRMATION_REQUIRED_t, TAG_TRUSTED_USER_PRESENCE_REQUIRED_t>;
 
 template <typename TypedTagType>
 struct TypedTag2ValueType;
@@ -220,6 +222,7 @@
 MAKE_TAG_ENUM_VALUE_ACCESSOR(TAG_PADDING, f.paddingMode)
 MAKE_TAG_ENUM_VALUE_ACCESSOR(TAG_PURPOSE, f.purpose)
 MAKE_TAG_ENUM_VALUE_ACCESSOR(TAG_USER_AUTH_TYPE, f.hardwareAuthenticatorType)
+MAKE_TAG_ENUM_VALUE_ACCESSOR(TAG_HARDWARE_TYPE, f.hardwareType)
 
 template <TagType tag_type, Tag tag, typename ValueT>
 inline KeyParameter makeKeyParameter(TypedTag<tag_type, tag> ttag, ValueT&& value) {
diff --git a/keymaster/4.0/vts/functional/keymaster_hidl_hal_test.cpp b/keymaster/4.0/vts/functional/keymaster_hidl_hal_test.cpp
index 784ae30..a9c6f6c 100644
--- a/keymaster/4.0/vts/functional/keymaster_hidl_hal_test.cpp
+++ b/keymaster/4.0/vts/functional/keymaster_hidl_hal_test.cpp
@@ -181,7 +181,35 @@
     return d2i_X509(nullptr, &p, blob.size());
 }
 
-bool verify_chain(const hidl_vec<hidl_vec<uint8_t>>& chain) {
+bool verify_chain(const hidl_vec<hidl_vec<uint8_t>>& chain, const std::string& msg,
+                  const std::string& signature) {
+    {
+        EVP_MD_CTX md_ctx_verify;
+        X509_Ptr signing_cert(parse_cert_blob(chain[0]));
+        EVP_PKEY_Ptr signing_pubkey(X509_get_pubkey(signing_cert.get()));
+        EXPECT_TRUE(signing_pubkey);
+        ERR_print_errors_cb(
+            [](const char* str, size_t len, void* ctx) -> int {
+                (void)ctx;
+                std::cerr << std::string(str, len) << std::endl;
+                return 1;
+            },
+            nullptr);
+
+        EVP_MD_CTX_init(&md_ctx_verify);
+
+        bool result = false;
+        EXPECT_TRUE((result = EVP_DigestVerifyInit(&md_ctx_verify, NULL, EVP_sha256(), NULL,
+                                                   signing_pubkey.get())));
+        EXPECT_TRUE(
+            (result = result && EVP_DigestVerifyUpdate(&md_ctx_verify, msg.c_str(), msg.size())));
+        EXPECT_TRUE((result = result && EVP_DigestVerifyFinal(
+                                            &md_ctx_verify,
+                                            reinterpret_cast<const uint8_t*>(signature.c_str()),
+                                            signature.size())));
+        EVP_MD_CTX_cleanup(&md_ctx_verify);
+        if (!result) return false;
+    }
     for (size_t i = 0; i < chain.size(); ++i) {
         X509_Ptr key_cert(parse_cert_blob(chain[i]));
         X509_Ptr signing_cert;
@@ -3833,8 +3861,8 @@
     ASSERT_EQ(ErrorCode::OK, GenerateKey(AuthorizationSetBuilder()
                                              .Authorization(TAG_NO_AUTH_REQUIRED)
                                              .RsaSigningKey(2048, 65537)
-                                             .Digest(Digest::NONE)
-                                             .Padding(PaddingMode::NONE)
+                                             .Digest(Digest::SHA_2_256)
+                                             .Padding(PaddingMode::RSA_PKCS1_1_5_SIGN)
                                              .Authorization(TAG_INCLUDE_UNIQUE_ID)));
 
     hidl_vec<hidl_vec<uint8_t>> cert_chain;
@@ -3844,7 +3872,13 @@
                             .Authorization(TAG_ATTESTATION_APPLICATION_ID, HidlBuf("foo")),
                         &cert_chain));
     EXPECT_GE(cert_chain.size(), 2U);
-    EXPECT_TRUE(verify_chain(cert_chain));
+
+    string message = "12345678901234567890123456789012";
+    string signature = SignMessage(message, AuthorizationSetBuilder()
+                                                .Digest(Digest::SHA_2_256)
+                                                .Padding(PaddingMode::RSA_PKCS1_1_5_SIGN));
+
+    EXPECT_TRUE(verify_chain(cert_chain, message, signature));
     EXPECT_TRUE(verify_attestation_record("challenge", "foo",                     //
                                           key_characteristics_.softwareEnforced,  //
                                           key_characteristics_.hardwareEnforced,  //
@@ -3890,7 +3924,11 @@
                             .Authorization(TAG_ATTESTATION_APPLICATION_ID, HidlBuf("foo")),
                         &cert_chain));
     EXPECT_GE(cert_chain.size(), 2U);
-    EXPECT_TRUE(verify_chain(cert_chain));
+
+    string message(1024, 'a');
+    string signature = SignMessage(message, AuthorizationSetBuilder().Digest(Digest::SHA_2_256));
+
+    EXPECT_TRUE(verify_chain(cert_chain, message, signature));
 
     EXPECT_TRUE(verify_attestation_record("challenge", "foo",                     //
                                           key_characteristics_.softwareEnforced,  //
diff --git a/media/c2/1.0/Android.bp b/media/c2/1.0/Android.bp
index 3c99b6a..28829d2 100644
--- a/media/c2/1.0/Android.bp
+++ b/media/c2/1.0/Android.bp
@@ -17,11 +17,9 @@
         "IInputSurfaceConnection.hal",
     ],
     interfaces: [
-        "android.hardware.graphics.bufferqueue@2.0",
+        "android.hardware.graphics.bufferqueue@1.0",
         "android.hardware.graphics.common@1.0",
-        "android.hardware.graphics.common@1.1",
-        "android.hardware.graphics.common@1.2",
-        "android.hardware.media.bufferpool@2.0",
+        "android.hardware.media.bufferpool@1.0",
         "android.hardware.media.omx@1.0",
         "android.hardware.media@1.0",
         "android.hidl.base@1.0",
diff --git a/media/c2/1.0/IComponent.hal b/media/c2/1.0/IComponent.hal
index 822b24e..7fd551f 100644
--- a/media/c2/1.0/IComponent.hal
+++ b/media/c2/1.0/IComponent.hal
@@ -16,7 +16,7 @@
 
 package android.hardware.media.c2@1.0;
 
-import android.hardware.graphics.bufferqueue@2.0::IGraphicBufferProducer;
+import android.hardware.graphics.bufferqueue@1.0::IGraphicBufferProducer;
 import android.hardware.media.omx@1.0::IGraphicBufferSource;
 
 import IConfigurable;
diff --git a/media/c2/1.0/IComponentStore.hal b/media/c2/1.0/IComponentStore.hal
index 6a57c38..a027afe 100644
--- a/media/c2/1.0/IComponentStore.hal
+++ b/media/c2/1.0/IComponentStore.hal
@@ -16,7 +16,7 @@
 
 package android.hardware.media.c2@1.0;
 
-import android.hardware.media.bufferpool@2.0::IClientManager;
+import android.hardware.media.bufferpool@1.0::IClientManager;
 import IComponentInterface;
 import IComponentListener;
 import IComponent;
diff --git a/media/c2/1.0/IInputSurface.hal b/media/c2/1.0/IInputSurface.hal
index 0a1b56d..25c6c8e 100644
--- a/media/c2/1.0/IInputSurface.hal
+++ b/media/c2/1.0/IInputSurface.hal
@@ -16,7 +16,7 @@
 
 package android.hardware.media.c2@1.0;
 
-import android.hardware.graphics.bufferqueue@2.0::IGraphicBufferProducer;
+import android.hardware.graphics.bufferqueue@1.0::IGraphicBufferProducer;
 
 import IConfigurable;
 
diff --git a/media/c2/1.0/types.hal b/media/c2/1.0/types.hal
index 7b75041..13269d2 100644
--- a/media/c2/1.0/types.hal
+++ b/media/c2/1.0/types.hal
@@ -16,7 +16,7 @@
 
 package android.hardware.media.c2@1.0;
 
-import android.hardware.media.bufferpool@2.0::BufferStatusMessage;
+import android.hardware.media.bufferpool@1.0::BufferStatusMessage;
 
 /**
  * Common return values for Codec2 operations.
diff --git a/neuralnetworks/1.0/vts/functional/Android.bp b/neuralnetworks/1.0/vts/functional/Android.bp
index ffba45c..234527a 100644
--- a/neuralnetworks/1.0/vts/functional/Android.bp
+++ b/neuralnetworks/1.0/vts/functional/Android.bp
@@ -39,17 +39,14 @@
     ],
 }
 
-cc_test {
-    name: "VtsHalNeuralnetworksV1_0TargetTest",
+cc_defaults {
+    name: "VtsHalNeuralNetworksTargetTestDefaults",
+    defaults: ["VtsHalTargetTestDefaults"],
     srcs: [
-        "BasicTests.cpp",
-        "GeneratedTests.cpp",
         "ValidateModel.cpp",
         "ValidateRequest.cpp",
-        "ValidationTests.cpp",
         "VtsHalNeuralnetworks.cpp",
     ],
-    defaults: ["VtsHalTargetTestDefaults"],
     static_libs: [
         "android.hardware.neuralnetworks@1.0",
         "android.hardware.neuralnetworks@1.1",
@@ -66,4 +63,22 @@
         "libneuralnetworks_generated_test_harness_headers",
         "libneuralnetworks_generated_tests",
     ],
+    // Bug: http://b/74200014 - Disable arm32 asan since it triggers internal
+    // error in ld.gold.
+    arch: {
+        arm: {
+            sanitize: {
+                never: true,
+            },
+        },
+    },
+}
+
+cc_test {
+    name: "VtsHalNeuralnetworksV1_0TargetTest",
+    defaults: ["VtsHalNeuralNetworksTargetTestDefaults"],
+    srcs: [
+        "BasicTests.cpp",
+        "GeneratedTests.cpp",
+    ],
 }
diff --git a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp
index 1f66c43..802d018 100644
--- a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp
+++ b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp
@@ -45,6 +45,7 @@
 using ::test_helper::Int32Operands;
 using ::test_helper::MixedTyped;
 using ::test_helper::MixedTypedExample;
+using ::test_helper::MixedTypedIndex;
 using ::test_helper::Quant8Operands;
 using ::test_helper::resize_accordingly;
 
@@ -63,14 +64,16 @@
     copy_back_<int32_t>(dst, ra, src);
     copy_back_<uint8_t>(dst, ra, src);
     copy_back_<int16_t>(dst, ra, src);
-    static_assert(4 == std::tuple_size<MixedTyped>::value,
+    copy_back_<_Float16>(dst, ra, src);
+    static_assert(5 == std::tuple_size<MixedTyped>::value,
                   "Number of types in MixedTyped changed, but copy_back function wasn't updated");
 }
 
 // Top level driver for models and examples generated by test_generator.py
 // Test driver for those generated from ml/nn/runtime/test/spec
 void EvaluatePreparedModel(sp<IPreparedModel>& preparedModel, std::function<bool(int)> is_ignored,
-                           const std::vector<MixedTypedExample>& examples, float fpAtol = 1e-5f,
+                           const std::vector<MixedTypedExample>& examples,
+                           bool hasRelaxedFloat32Model = false, float fpAtol = 1e-5f,
                            float fpRtol = 1e-5f) {
     const uint32_t INPUT = 0;
     const uint32_t OUTPUT = 1;
@@ -78,13 +81,20 @@
     int example_no = 1;
     for (auto& example : examples) {
         SCOPED_TRACE(example_no++);
-
         const MixedTyped& inputs = example.operands.first;
         const MixedTyped& golden = example.operands.second;
 
+        const bool hasFloat16Inputs = !std::get<MixedTypedIndex<_Float16>::index>(inputs).empty();
+        if (hasRelaxedFloat32Model || hasFloat16Inputs) {
+            // TODO: Adjust the error limit based on testing.
+            // If in relaxed mode, set the absolute tolerance to be 5ULP of FP16.
+            fpAtol = 5.0f * 0.0009765625f;
+            // Set the relative tolerance to be 5ULP of the corresponding FP precision.
+            fpRtol = 5.0f * 0.0009765625f;
+        }
+
         std::vector<RequestArgument> inputs_info, outputs_info;
         uint32_t inputSize = 0, outputSize = 0;
-
         // This function only partially specifies the metadata (vector of RequestArguments).
         // The contents are copied over below.
         for_all(inputs, [&inputs_info, &inputSize](int index, auto, auto s) {
@@ -228,7 +238,8 @@
     ASSERT_NE(nullptr, preparedModel.get());
 
     float fpAtol = 1e-5f, fpRtol = 5.0f * 1.1920928955078125e-7f;
-    EvaluatePreparedModel(preparedModel, is_ignored, examples, fpAtol, fpRtol);
+    EvaluatePreparedModel(preparedModel, is_ignored, examples,
+                          /*hasRelaxedFloat32Model=*/false, fpAtol, fpRtol);
 }
 
 void Execute(const sp<V1_1::IDevice>& device, std::function<V1_1::Model(void)> create_model,
@@ -272,13 +283,8 @@
     EXPECT_EQ(ErrorStatus::NONE, prepareReturnStatus);
     ASSERT_NE(nullptr, preparedModel.get());
 
-    // TODO: Adjust the error limit based on testing.
-    // If in relaxed mode, set the absolute tolerance to be 5ULP of FP16.
-    float fpAtol = !model.relaxComputationFloat32toFloat16 ? 1e-5f : 5.0f * 0.0009765625f;
-    // Set the relative tolerance to be 5ULP of the corresponding FP precision.
-    float fpRtol = !model.relaxComputationFloat32toFloat16 ? 5.0f * 1.1920928955078125e-7f
-                                                           : 5.0f * 0.0009765625f;
-    EvaluatePreparedModel(preparedModel, is_ignored, examples, fpAtol, fpRtol);
+    EvaluatePreparedModel(preparedModel, is_ignored, examples,
+                          model.relaxComputationFloat32toFloat16);
 }
 
 // TODO: Reduce code duplication.
@@ -323,13 +329,8 @@
     EXPECT_EQ(ErrorStatus::NONE, prepareReturnStatus);
     ASSERT_NE(nullptr, preparedModel.get());
 
-    // TODO: Adjust the error limit based on testing.
-    // If in relaxed mode, set the absolute tolerance to be 5ULP of FP16.
-    float fpAtol = !model.relaxComputationFloat32toFloat16 ? 1e-5f : 5.0f * 0.0009765625f;
-    // Set the relative tolerance to be 5ULP of the corresponding FP precision.
-    float fpRtol = !model.relaxComputationFloat32toFloat16 ? 5.0f * 1.1920928955078125e-7f
-                                                           : 5.0f * 0.0009765625f;
-    EvaluatePreparedModel(preparedModel, is_ignored, examples, fpAtol, fpRtol);
+    EvaluatePreparedModel(preparedModel, is_ignored, examples,
+                          model.relaxComputationFloat32toFloat16);
 }
 
 }  // namespace generated_tests
diff --git a/neuralnetworks/1.0/vts/functional/GeneratedTests.cpp b/neuralnetworks/1.0/vts/functional/GeneratedTests.cpp
index ac1ae60..26b4d8b 100644
--- a/neuralnetworks/1.0/vts/functional/GeneratedTests.cpp
+++ b/neuralnetworks/1.0/vts/functional/GeneratedTests.cpp
@@ -45,6 +45,8 @@
 using ::android::nn::allocateSharedMemory;
 using ::test_helper::MixedTypedExample;
 
+std::vector<Request> createRequests(const std::vector<MixedTypedExample>& examples);
+
 // in frameworks/ml/nn/runtime/tests/generated/
 #include "all_generated_V1_0_vts_tests.cpp"
 
diff --git a/neuralnetworks/1.0/vts/functional/Models.h b/neuralnetworks/1.0/vts/functional/Models.h
deleted file mode 100644
index 268e671..0000000
--- a/neuralnetworks/1.0/vts/functional/Models.h
+++ /dev/null
@@ -1,200 +0,0 @@
-/*
- * Copyright (C) 2018 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef VTS_HAL_NEURALNETWORKS_V1_0_VTS_FUNCTIONAL_MODELS_H
-#define VTS_HAL_NEURALNETWORKS_V1_0_VTS_FUNCTIONAL_MODELS_H
-
-#define LOG_TAG "neuralnetworks_hidl_hal_test"
-
-#include "TestHarness.h"
-
-#include <android/hardware/neuralnetworks/1.0/types.h>
-
-namespace android {
-namespace hardware {
-namespace neuralnetworks {
-namespace V1_0 {
-namespace vts {
-namespace functional {
-
-using MixedTypedExample = test_helper::MixedTypedExample;
-
-#define FOR_EACH_TEST_MODEL(FN)                          \
-    FN(add_broadcast_quant8)                             \
-    FN(add)                                              \
-    FN(add_quant8)                                       \
-    FN(avg_pool_float_1)                                 \
-    FN(avg_pool_float_2)                                 \
-    FN(avg_pool_float_3)                                 \
-    FN(avg_pool_float_4)                                 \
-    FN(avg_pool_float_5)                                 \
-    FN(avg_pool_quant8_1)                                \
-    FN(avg_pool_quant8_2)                                \
-    FN(avg_pool_quant8_3)                                \
-    FN(avg_pool_quant8_4)                                \
-    FN(avg_pool_quant8_5)                                \
-    FN(concat_float_1)                                   \
-    FN(concat_float_2)                                   \
-    FN(concat_float_3)                                   \
-    FN(concat_quant8_1)                                  \
-    FN(concat_quant8_2)                                  \
-    FN(concat_quant8_3)                                  \
-    FN(conv_1_h3_w2_SAME)                                \
-    FN(conv_1_h3_w2_VALID)                               \
-    FN(conv_3_h3_w2_SAME)                                \
-    FN(conv_3_h3_w2_VALID)                               \
-    FN(conv_float_2)                                     \
-    FN(conv_float_channels)                              \
-    FN(conv_float_channels_weights_as_inputs)            \
-    FN(conv_float_large)                                 \
-    FN(conv_float_large_weights_as_inputs)               \
-    FN(conv_float)                                       \
-    FN(conv_float_weights_as_inputs)                     \
-    FN(conv_quant8_2)                                    \
-    FN(conv_quant8_channels)                             \
-    FN(conv_quant8_channels_weights_as_inputs)           \
-    FN(conv_quant8_large)                                \
-    FN(conv_quant8_large_weights_as_inputs)              \
-    FN(conv_quant8)                                      \
-    FN(conv_quant8_overflow)                             \
-    FN(conv_quant8_overflow_weights_as_inputs)           \
-    FN(conv_quant8_weights_as_inputs)                    \
-    FN(depth_to_space_float_1)                           \
-    FN(depth_to_space_float_2)                           \
-    FN(depth_to_space_float_3)                           \
-    FN(depth_to_space_quant8_1)                          \
-    FN(depth_to_space_quant8_2)                          \
-    FN(depthwise_conv2d_float_2)                         \
-    FN(depthwise_conv2d_float_large_2)                   \
-    FN(depthwise_conv2d_float_large_2_weights_as_inputs) \
-    FN(depthwise_conv2d_float_large)                     \
-    FN(depthwise_conv2d_float_large_weights_as_inputs)   \
-    FN(depthwise_conv2d_float)                           \
-    FN(depthwise_conv2d_float_weights_as_inputs)         \
-    FN(depthwise_conv2d_quant8_2)                        \
-    FN(depthwise_conv2d_quant8_large)                    \
-    FN(depthwise_conv2d_quant8_large_weights_as_inputs)  \
-    FN(depthwise_conv2d_quant8)                          \
-    FN(depthwise_conv2d_quant8_weights_as_inputs)        \
-    FN(depthwise_conv)                                   \
-    FN(dequantize)                                       \
-    FN(embedding_lookup)                                 \
-    FN(floor)                                            \
-    FN(fully_connected_float_2)                          \
-    FN(fully_connected_float_large)                      \
-    FN(fully_connected_float_large_weights_as_inputs)    \
-    FN(fully_connected_float)                            \
-    FN(fully_connected_float_weights_as_inputs)          \
-    FN(fully_connected_quant8_2)                         \
-    FN(fully_connected_quant8_large)                     \
-    FN(fully_connected_quant8_large_weights_as_inputs)   \
-    FN(fully_connected_quant8)                           \
-    FN(fully_connected_quant8_weights_as_inputs)         \
-    FN(hashtable_lookup_float)                           \
-    FN(hashtable_lookup_quant8)                          \
-    FN(l2_normalization_2)                               \
-    FN(l2_normalization_large)                           \
-    FN(l2_normalization)                                 \
-    FN(l2_pool_float_2)                                  \
-    FN(l2_pool_float_large)                              \
-    FN(l2_pool_float)                                    \
-    FN(local_response_norm_float_1)                      \
-    FN(local_response_norm_float_2)                      \
-    FN(local_response_norm_float_3)                      \
-    FN(local_response_norm_float_4)                      \
-    FN(logistic_float_1)                                 \
-    FN(logistic_float_2)                                 \
-    FN(logistic_quant8_1)                                \
-    FN(logistic_quant8_2)                                \
-    FN(lsh_projection_2)                                 \
-    FN(lsh_projection)                                   \
-    FN(lsh_projection_weights_as_inputs)                 \
-    FN(lstm2)                                            \
-    FN(lstm2_state2)                                     \
-    FN(lstm2_state)                                      \
-    FN(lstm3)                                            \
-    FN(lstm3_state2)                                     \
-    FN(lstm3_state3)                                     \
-    FN(lstm3_state)                                      \
-    FN(lstm)                                             \
-    FN(lstm_state2)                                      \
-    FN(lstm_state)                                       \
-    FN(max_pool_float_1)                                 \
-    FN(max_pool_float_2)                                 \
-    FN(max_pool_float_3)                                 \
-    FN(max_pool_float_4)                                 \
-    FN(max_pool_quant8_1)                                \
-    FN(max_pool_quant8_2)                                \
-    FN(max_pool_quant8_3)                                \
-    FN(max_pool_quant8_4)                                \
-    FN(mobilenet_224_gender_basic_fixed)                 \
-    FN(mobilenet_quantized)                              \
-    FN(mul_broadcast_quant8)                             \
-    FN(mul)                                              \
-    FN(mul_quant8)                                       \
-    FN(mul_relu)                                         \
-    FN(relu1_float_1)                                    \
-    FN(relu1_float_2)                                    \
-    FN(relu1_quant8_1)                                   \
-    FN(relu1_quant8_2)                                   \
-    FN(relu6_float_1)                                    \
-    FN(relu6_float_2)                                    \
-    FN(relu6_quant8_1)                                   \
-    FN(relu6_quant8_2)                                   \
-    FN(relu_float_1)                                     \
-    FN(relu_float_2)                                     \
-    FN(relu_quant8_1)                                    \
-    FN(relu_quant8_2)                                    \
-    FN(reshape)                                          \
-    FN(reshape_quant8)                                   \
-    FN(reshape_quant8_weights_as_inputs)                 \
-    FN(reshape_weights_as_inputs)                        \
-    FN(resize_bilinear_2)                                \
-    FN(resize_bilinear)                                  \
-    FN(rnn)                                              \
-    FN(rnn_state)                                        \
-    FN(softmax_float_1)                                  \
-    FN(softmax_float_2)                                  \
-    FN(softmax_quant8_1)                                 \
-    FN(softmax_quant8_2)                                 \
-    FN(space_to_depth_float_1)                           \
-    FN(space_to_depth_float_2)                           \
-    FN(space_to_depth_float_3)                           \
-    FN(space_to_depth_quant8_1)                          \
-    FN(space_to_depth_quant8_2)                          \
-    FN(svdf2)                                            \
-    FN(svdf)                                             \
-    FN(svdf_state)                                       \
-    FN(tanh)
-
-#define FORWARD_DECLARE_GENERATED_OBJECTS(function) \
-    namespace function {                            \
-    extern std::vector<MixedTypedExample> examples; \
-    Model createTestModel();                        \
-    }
-
-FOR_EACH_TEST_MODEL(FORWARD_DECLARE_GENERATED_OBJECTS)
-
-#undef FORWARD_DECLARE_GENERATED_OBJECTS
-
-}  // namespace functional
-}  // namespace vts
-}  // namespace V1_0
-}  // namespace neuralnetworks
-}  // namespace hardware
-}  // namespace android
-
-#endif  // VTS_HAL_NEURALNETWORKS_V1_0_VTS_FUNCTIONAL_MODELS_H
diff --git a/neuralnetworks/1.0/vts/functional/ValidationTests.cpp b/neuralnetworks/1.0/vts/functional/ValidationTests.cpp
deleted file mode 100644
index d3cbcff..0000000
--- a/neuralnetworks/1.0/vts/functional/ValidationTests.cpp
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Copyright (C) 2018 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "neuralnetworks_hidl_hal_test"
-
-#include "Models.h"
-#include "VtsHalNeuralnetworks.h"
-
-namespace android {
-namespace hardware {
-namespace neuralnetworks {
-namespace V1_0 {
-namespace vts {
-namespace functional {
-
-// forward declarations
-std::vector<Request> createRequests(const std::vector<::test_helper::MixedTypedExample>& examples);
-
-// generate validation tests
-#define VTS_CURRENT_TEST_CASE(TestName)                                           \
-    TEST_F(ValidationTest, TestName) {                                            \
-        const Model model = TestName::createTestModel();                          \
-        const std::vector<Request> requests = createRequests(TestName::examples); \
-        validateModel(model);                                                     \
-        validateRequests(model, requests);                                        \
-    }
-
-FOR_EACH_TEST_MODEL(VTS_CURRENT_TEST_CASE)
-
-#undef VTS_CURRENT_TEST_CASE
-
-}  // namespace functional
-}  // namespace vts
-}  // namespace V1_0
-}  // namespace neuralnetworks
-}  // namespace hardware
-}  // namespace android
diff --git a/neuralnetworks/1.1/vts/functional/Android.bp b/neuralnetworks/1.1/vts/functional/Android.bp
index a1c0f1f..07c9b6e 100644
--- a/neuralnetworks/1.1/vts/functional/Android.bp
+++ b/neuralnetworks/1.1/vts/functional/Android.bp
@@ -14,40 +14,21 @@
 // limitations under the License.
 //
 
+// Tests for V1_0 models using the V1_1 HAL.
+cc_test {
+    name: "VtsHalNeuralnetworksV1_1CompatV1_0TargetTest",
+    defaults: ["VtsHalNeuralNetworksTargetTestDefaults"],
+    srcs: [
+        "GeneratedTestsV1_0.cpp",
+    ],
+}
+
+// Tests for V1_1 models.
 cc_test {
     name: "VtsHalNeuralnetworksV1_1TargetTest",
+    defaults: ["VtsHalNeuralNetworksTargetTestDefaults"],
     srcs: [
         "BasicTests.cpp",
         "GeneratedTests.cpp",
-        "ValidateModel.cpp",
-        "ValidateRequest.cpp",
-        "ValidationTests.cpp",
-        "VtsHalNeuralnetworks.cpp",
     ],
-    defaults: ["VtsHalTargetTestDefaults"],
-    static_libs: [
-        "android.hardware.neuralnetworks@1.0",
-        "android.hardware.neuralnetworks@1.1",
-        "android.hardware.neuralnetworks@1.2",
-        "android.hidl.allocator@1.0",
-        "android.hidl.memory@1.0",
-        "libgmock",
-        "libhidlmemory",
-        "libneuralnetworks_utils",
-        "VtsHalNeuralnetworksTest_utils",
-    ],
-    header_libs: [
-        "libneuralnetworks_headers",
-        "libneuralnetworks_generated_test_harness_headers",
-        "libneuralnetworks_generated_tests",
-    ],
-    // Bug: http://b/74200014 - Disable arm32 asan since it triggers internal
-    // error in ld.gold.
-    arch: {
-        arm: {
-            sanitize: {
-                never: true,
-            },
-        },
-    },
 }
diff --git a/neuralnetworks/1.1/vts/functional/GeneratedTests.cpp b/neuralnetworks/1.1/vts/functional/GeneratedTests.cpp
index 1f49904..290a9d3 100644
--- a/neuralnetworks/1.1/vts/functional/GeneratedTests.cpp
+++ b/neuralnetworks/1.1/vts/functional/GeneratedTests.cpp
@@ -45,8 +45,9 @@
 using ::android::nn::allocateSharedMemory;
 using ::test_helper::MixedTypedExample;
 
+std::vector<Request> createRequests(const std::vector<MixedTypedExample>& examples);
+
 // in frameworks/ml/nn/runtime/tests/generated/
-#include "all_generated_V1_0_vts_tests.cpp"
 #include "all_generated_V1_1_vts_tests.cpp"
 
 }  // namespace functional
diff --git a/neuralnetworks/1.1/vts/functional/GeneratedTestsV1_0.cpp b/neuralnetworks/1.1/vts/functional/GeneratedTestsV1_0.cpp
new file mode 100644
index 0000000..a36b24c
--- /dev/null
+++ b/neuralnetworks/1.1/vts/functional/GeneratedTestsV1_0.cpp
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "neuralnetworks_hidl_hal_test"
+
+#include "VtsHalNeuralnetworks.h"
+
+#include "Callbacks.h"
+#include "TestHarness.h"
+#include "Utils.h"
+
+#include <android-base/logging.h>
+#include <android/hidl/memory/1.0/IMemory.h>
+#include <hidlmemory/mapping.h>
+
+namespace android {
+namespace hardware {
+namespace neuralnetworks {
+
+namespace generated_tests {
+using ::test_helper::MixedTypedExample;
+extern void Execute(const sp<V1_1::IDevice>&, std::function<V1_1::Model(void)>,
+                    std::function<bool(int)>, const std::vector<MixedTypedExample>&);
+}  // namespace generated_tests
+
+namespace V1_1 {
+namespace vts {
+namespace functional {
+
+using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
+using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
+using ::android::nn::allocateSharedMemory;
+using ::test_helper::MixedTypedExample;
+
+std::vector<Request> createRequests(const std::vector<MixedTypedExample>& examples);
+
+// in frameworks/ml/nn/runtime/tests/generated/
+#include "all_generated_V1_0_vts_tests.cpp"
+
+}  // namespace functional
+}  // namespace vts
+}  // namespace V1_1
+}  // namespace neuralnetworks
+}  // namespace hardware
+}  // namespace android
diff --git a/neuralnetworks/1.1/vts/functional/Models.h b/neuralnetworks/1.1/vts/functional/Models.h
deleted file mode 100644
index 62bc95e..0000000
--- a/neuralnetworks/1.1/vts/functional/Models.h
+++ /dev/null
@@ -1,377 +0,0 @@
-/*
- * Copyright (C) 2018 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef VTS_HAL_NEURALNETWORKS_V1_1_VTS_FUNCTIONAL_MODELS_H
-#define VTS_HAL_NEURALNETWORKS_V1_1_VTS_FUNCTIONAL_MODELS_H
-
-#define LOG_TAG "neuralnetworks_hidl_hal_test"
-
-#include "TestHarness.h"
-
-#include <android/hardware/neuralnetworks/1.0/types.h>
-#include <android/hardware/neuralnetworks/1.1/types.h>
-
-namespace android {
-namespace hardware {
-namespace neuralnetworks {
-namespace V1_1 {
-namespace vts {
-namespace functional {
-
-using MixedTypedExample = test_helper::MixedTypedExample;
-
-#define FOR_EACH_TEST_MODEL(FN)                                  \
-    FN(add)                                                      \
-    FN(add_broadcast_quant8)                                     \
-    FN(add_quant8)                                               \
-    FN(add_relaxed)                                              \
-    FN(avg_pool_float_1)                                         \
-    FN(avg_pool_float_1_relaxed)                                 \
-    FN(avg_pool_float_2)                                         \
-    FN(avg_pool_float_2_relaxed)                                 \
-    FN(avg_pool_float_3)                                         \
-    FN(avg_pool_float_3_relaxed)                                 \
-    FN(avg_pool_float_4)                                         \
-    FN(avg_pool_float_4_relaxed)                                 \
-    FN(avg_pool_float_5)                                         \
-    FN(avg_pool_float_5_relaxed)                                 \
-    FN(avg_pool_quant8_1)                                        \
-    FN(avg_pool_quant8_2)                                        \
-    FN(avg_pool_quant8_3)                                        \
-    FN(avg_pool_quant8_4)                                        \
-    FN(avg_pool_quant8_5)                                        \
-    FN(batch_to_space)                                           \
-    FN(batch_to_space_float_1)                                   \
-    FN(batch_to_space_float_1_relaxed)                           \
-    FN(batch_to_space_quant8_1)                                  \
-    FN(batch_to_space_relaxed)                                   \
-    FN(concat_float_1)                                           \
-    FN(concat_float_1_relaxed)                                   \
-    FN(concat_float_2)                                           \
-    FN(concat_float_2_relaxed)                                   \
-    FN(concat_float_3)                                           \
-    FN(concat_float_3_relaxed)                                   \
-    FN(concat_quant8_1)                                          \
-    FN(concat_quant8_2)                                          \
-    FN(concat_quant8_3)                                          \
-    FN(conv_1_h3_w2_SAME)                                        \
-    FN(conv_1_h3_w2_SAME_relaxed)                                \
-    FN(conv_1_h3_w2_VALID)                                       \
-    FN(conv_1_h3_w2_VALID_relaxed)                               \
-    FN(conv_3_h3_w2_SAME)                                        \
-    FN(conv_3_h3_w2_SAME_relaxed)                                \
-    FN(conv_3_h3_w2_VALID)                                       \
-    FN(conv_3_h3_w2_VALID_relaxed)                               \
-    FN(conv_float)                                               \
-    FN(conv_float_2)                                             \
-    FN(conv_float_2_relaxed)                                     \
-    FN(conv_float_channels)                                      \
-    FN(conv_float_channels_relaxed)                              \
-    FN(conv_float_channels_weights_as_inputs)                    \
-    FN(conv_float_channels_weights_as_inputs_relaxed)            \
-    FN(conv_float_large)                                         \
-    FN(conv_float_large_relaxed)                                 \
-    FN(conv_float_large_weights_as_inputs)                       \
-    FN(conv_float_large_weights_as_inputs_relaxed)               \
-    FN(conv_float_relaxed)                                       \
-    FN(conv_float_weights_as_inputs)                             \
-    FN(conv_float_weights_as_inputs_relaxed)                     \
-    FN(conv_quant8)                                              \
-    FN(conv_quant8_2)                                            \
-    FN(conv_quant8_channels)                                     \
-    FN(conv_quant8_channels_weights_as_inputs)                   \
-    FN(conv_quant8_large)                                        \
-    FN(conv_quant8_large_weights_as_inputs)                      \
-    FN(conv_quant8_overflow)                                     \
-    FN(conv_quant8_overflow_weights_as_inputs)                   \
-    FN(conv_quant8_weights_as_inputs)                            \
-    FN(depth_to_space_float_1)                                   \
-    FN(depth_to_space_float_1_relaxed)                           \
-    FN(depth_to_space_float_2)                                   \
-    FN(depth_to_space_float_2_relaxed)                           \
-    FN(depth_to_space_float_3)                                   \
-    FN(depth_to_space_float_3_relaxed)                           \
-    FN(depth_to_space_quant8_1)                                  \
-    FN(depth_to_space_quant8_2)                                  \
-    FN(depthwise_conv)                                           \
-    FN(depthwise_conv2d_float)                                   \
-    FN(depthwise_conv2d_float_2)                                 \
-    FN(depthwise_conv2d_float_2_relaxed)                         \
-    FN(depthwise_conv2d_float_large)                             \
-    FN(depthwise_conv2d_float_large_2)                           \
-    FN(depthwise_conv2d_float_large_2_relaxed)                   \
-    FN(depthwise_conv2d_float_large_2_weights_as_inputs)         \
-    FN(depthwise_conv2d_float_large_2_weights_as_inputs_relaxed) \
-    FN(depthwise_conv2d_float_large_relaxed)                     \
-    FN(depthwise_conv2d_float_large_weights_as_inputs)           \
-    FN(depthwise_conv2d_float_large_weights_as_inputs_relaxed)   \
-    FN(depthwise_conv2d_float_relaxed)                           \
-    FN(depthwise_conv2d_float_weights_as_inputs)                 \
-    FN(depthwise_conv2d_float_weights_as_inputs_relaxed)         \
-    FN(depthwise_conv2d_quant8)                                  \
-    FN(depthwise_conv2d_quant8_2)                                \
-    FN(depthwise_conv2d_quant8_large)                            \
-    FN(depthwise_conv2d_quant8_large_weights_as_inputs)          \
-    FN(depthwise_conv2d_quant8_weights_as_inputs)                \
-    FN(depthwise_conv_relaxed)                                   \
-    FN(dequantize)                                               \
-    FN(dequantize_relaxed)                                       \
-    FN(div)                                                      \
-    FN(div_broadcast_float)                                      \
-    FN(div_broadcast_float_relaxed)                              \
-    FN(div_relaxed)                                              \
-    FN(embedding_lookup)                                         \
-    FN(embedding_lookup_relaxed)                                 \
-    FN(floor)                                                    \
-    FN(floor_relaxed)                                            \
-    FN(fully_connected_float)                                    \
-    FN(fully_connected_float_2)                                  \
-    FN(fully_connected_float_2_relaxed)                          \
-    FN(fully_connected_float_4d_simple)                          \
-    FN(fully_connected_float_4d_simple_relaxed)                  \
-    FN(fully_connected_float_large)                              \
-    FN(fully_connected_float_large_relaxed)                      \
-    FN(fully_connected_float_large_weights_as_inputs)            \
-    FN(fully_connected_float_large_weights_as_inputs_relaxed)    \
-    FN(fully_connected_float_relaxed)                            \
-    FN(fully_connected_float_weights_as_inputs)                  \
-    FN(fully_connected_float_weights_as_inputs_relaxed)          \
-    FN(fully_connected_quant8)                                   \
-    FN(fully_connected_quant8_2)                                 \
-    FN(fully_connected_quant8_large)                             \
-    FN(fully_connected_quant8_large_weights_as_inputs)           \
-    FN(fully_connected_quant8_weights_as_inputs)                 \
-    FN(hashtable_lookup_float)                                   \
-    FN(hashtable_lookup_float_relaxed)                           \
-    FN(hashtable_lookup_quant8)                                  \
-    FN(l2_normalization)                                         \
-    FN(l2_normalization_2)                                       \
-    FN(l2_normalization_2_relaxed)                               \
-    FN(l2_normalization_large)                                   \
-    FN(l2_normalization_large_relaxed)                           \
-    FN(l2_normalization_relaxed)                                 \
-    FN(l2_pool_float)                                            \
-    FN(l2_pool_float_2)                                          \
-    FN(l2_pool_float_2_relaxed)                                  \
-    FN(l2_pool_float_large)                                      \
-    FN(l2_pool_float_large_relaxed)                              \
-    FN(l2_pool_float_relaxed)                                    \
-    FN(local_response_norm_float_1)                              \
-    FN(local_response_norm_float_1_relaxed)                      \
-    FN(local_response_norm_float_2)                              \
-    FN(local_response_norm_float_2_relaxed)                      \
-    FN(local_response_norm_float_3)                              \
-    FN(local_response_norm_float_3_relaxed)                      \
-    FN(local_response_norm_float_4)                              \
-    FN(local_response_norm_float_4_relaxed)                      \
-    FN(logistic_float_1)                                         \
-    FN(logistic_float_1_relaxed)                                 \
-    FN(logistic_float_2)                                         \
-    FN(logistic_float_2_relaxed)                                 \
-    FN(logistic_quant8_1)                                        \
-    FN(logistic_quant8_2)                                        \
-    FN(lsh_projection)                                           \
-    FN(lsh_projection_2)                                         \
-    FN(lsh_projection_2_relaxed)                                 \
-    FN(lsh_projection_relaxed)                                   \
-    FN(lsh_projection_weights_as_inputs)                         \
-    FN(lsh_projection_weights_as_inputs_relaxed)                 \
-    FN(lstm)                                                     \
-    FN(lstm2)                                                    \
-    FN(lstm2_relaxed)                                            \
-    FN(lstm2_state)                                              \
-    FN(lstm2_state2)                                             \
-    FN(lstm2_state2_relaxed)                                     \
-    FN(lstm2_state_relaxed)                                      \
-    FN(lstm3)                                                    \
-    FN(lstm3_relaxed)                                            \
-    FN(lstm3_state)                                              \
-    FN(lstm3_state2)                                             \
-    FN(lstm3_state2_relaxed)                                     \
-    FN(lstm3_state3)                                             \
-    FN(lstm3_state3_relaxed)                                     \
-    FN(lstm3_state_relaxed)                                      \
-    FN(lstm_relaxed)                                             \
-    FN(lstm_state)                                               \
-    FN(lstm_state2)                                              \
-    FN(lstm_state2_relaxed)                                      \
-    FN(lstm_state_relaxed)                                       \
-    FN(max_pool_float_1)                                         \
-    FN(max_pool_float_1_relaxed)                                 \
-    FN(max_pool_float_2)                                         \
-    FN(max_pool_float_2_relaxed)                                 \
-    FN(max_pool_float_3)                                         \
-    FN(max_pool_float_3_relaxed)                                 \
-    FN(max_pool_float_4)                                         \
-    FN(max_pool_float_4_relaxed)                                 \
-    FN(max_pool_quant8_1)                                        \
-    FN(max_pool_quant8_2)                                        \
-    FN(max_pool_quant8_3)                                        \
-    FN(max_pool_quant8_4)                                        \
-    FN(mean)                                                     \
-    FN(mean_float_1)                                             \
-    FN(mean_float_1_relaxed)                                     \
-    FN(mean_float_2)                                             \
-    FN(mean_float_2_relaxed)                                     \
-    FN(mean_quant8_1)                                            \
-    FN(mean_quant8_2)                                            \
-    FN(mean_relaxed)                                             \
-    FN(mobilenet_224_gender_basic_fixed)                         \
-    FN(mobilenet_224_gender_basic_fixed_relaxed)                 \
-    FN(mobilenet_quantized)                                      \
-    FN(mul)                                                      \
-    FN(mul_broadcast_quant8)                                     \
-    FN(mul_quant8)                                               \
-    FN(mul_relaxed)                                              \
-    FN(mul_relu)                                                 \
-    FN(mul_relu_relaxed)                                         \
-    FN(pad)                                                      \
-    FN(pad_float_1)                                              \
-    FN(pad_float_1_relaxed)                                      \
-    FN(pad_relaxed)                                              \
-    FN(relu1_float_1)                                            \
-    FN(relu1_float_1_relaxed)                                    \
-    FN(relu1_float_2)                                            \
-    FN(relu1_float_2_relaxed)                                    \
-    FN(relu1_quant8_1)                                           \
-    FN(relu1_quant8_2)                                           \
-    FN(relu6_float_1)                                            \
-    FN(relu6_float_1_relaxed)                                    \
-    FN(relu6_float_2)                                            \
-    FN(relu6_float_2_relaxed)                                    \
-    FN(relu6_quant8_1)                                           \
-    FN(relu6_quant8_2)                                           \
-    FN(relu_float_1)                                             \
-    FN(relu_float_1_relaxed)                                     \
-    FN(relu_float_2)                                             \
-    FN(relu_float_2_relaxed)                                     \
-    FN(relu_quant8_1)                                            \
-    FN(relu_quant8_2)                                            \
-    FN(reshape)                                                  \
-    FN(reshape_quant8)                                           \
-    FN(reshape_quant8_weights_as_inputs)                         \
-    FN(reshape_relaxed)                                          \
-    FN(reshape_weights_as_inputs)                                \
-    FN(reshape_weights_as_inputs_relaxed)                        \
-    FN(resize_bilinear)                                          \
-    FN(resize_bilinear_2)                                        \
-    FN(resize_bilinear_2_relaxed)                                \
-    FN(resize_bilinear_relaxed)                                  \
-    FN(rnn)                                                      \
-    FN(rnn_relaxed)                                              \
-    FN(rnn_state)                                                \
-    FN(rnn_state_relaxed)                                        \
-    FN(softmax_float_1)                                          \
-    FN(softmax_float_1_relaxed)                                  \
-    FN(softmax_float_2)                                          \
-    FN(softmax_float_2_relaxed)                                  \
-    FN(softmax_quant8_1)                                         \
-    FN(softmax_quant8_2)                                         \
-    FN(space_to_batch)                                           \
-    FN(space_to_batch_float_1)                                   \
-    FN(space_to_batch_float_1_relaxed)                           \
-    FN(space_to_batch_float_2)                                   \
-    FN(space_to_batch_float_2_relaxed)                           \
-    FN(space_to_batch_float_3)                                   \
-    FN(space_to_batch_float_3_relaxed)                           \
-    FN(space_to_batch_quant8_1)                                  \
-    FN(space_to_batch_quant8_2)                                  \
-    FN(space_to_batch_quant8_3)                                  \
-    FN(space_to_batch_relaxed)                                   \
-    FN(space_to_depth_float_1)                                   \
-    FN(space_to_depth_float_1_relaxed)                           \
-    FN(space_to_depth_float_2)                                   \
-    FN(space_to_depth_float_2_relaxed)                           \
-    FN(space_to_depth_float_3)                                   \
-    FN(space_to_depth_float_3_relaxed)                           \
-    FN(space_to_depth_quant8_1)                                  \
-    FN(space_to_depth_quant8_2)                                  \
-    FN(squeeze)                                                  \
-    FN(squeeze_float_1)                                          \
-    FN(squeeze_float_1_relaxed)                                  \
-    FN(squeeze_quant8_1)                                         \
-    FN(squeeze_relaxed)                                          \
-    FN(strided_slice)                                            \
-    FN(strided_slice_float_1)                                    \
-    FN(strided_slice_float_10)                                   \
-    FN(strided_slice_float_10_relaxed)                           \
-    FN(strided_slice_float_11)                                   \
-    FN(strided_slice_float_11_relaxed)                           \
-    FN(strided_slice_float_1_relaxed)                            \
-    FN(strided_slice_float_2)                                    \
-    FN(strided_slice_float_2_relaxed)                            \
-    FN(strided_slice_float_3)                                    \
-    FN(strided_slice_float_3_relaxed)                            \
-    FN(strided_slice_float_4)                                    \
-    FN(strided_slice_float_4_relaxed)                            \
-    FN(strided_slice_float_5)                                    \
-    FN(strided_slice_float_5_relaxed)                            \
-    FN(strided_slice_float_6)                                    \
-    FN(strided_slice_float_6_relaxed)                            \
-    FN(strided_slice_float_7)                                    \
-    FN(strided_slice_float_7_relaxed)                            \
-    FN(strided_slice_float_8)                                    \
-    FN(strided_slice_float_8_relaxed)                            \
-    FN(strided_slice_float_9)                                    \
-    FN(strided_slice_float_9_relaxed)                            \
-    FN(strided_slice_qaunt8_10)                                  \
-    FN(strided_slice_qaunt8_11)                                  \
-    FN(strided_slice_quant8_1)                                   \
-    FN(strided_slice_quant8_2)                                   \
-    FN(strided_slice_quant8_3)                                   \
-    FN(strided_slice_quant8_4)                                   \
-    FN(strided_slice_quant8_5)                                   \
-    FN(strided_slice_quant8_6)                                   \
-    FN(strided_slice_quant8_7)                                   \
-    FN(strided_slice_quant8_8)                                   \
-    FN(strided_slice_quant8_9)                                   \
-    FN(strided_slice_relaxed)                                    \
-    FN(sub)                                                      \
-    FN(sub_broadcast_float)                                      \
-    FN(sub_broadcast_float_relaxed)                              \
-    FN(sub_relaxed)                                              \
-    FN(svdf)                                                     \
-    FN(svdf2)                                                    \
-    FN(svdf2_relaxed)                                            \
-    FN(svdf_relaxed)                                             \
-    FN(svdf_state)                                               \
-    FN(svdf_state_relaxed)                                       \
-    FN(tanh)                                                     \
-    FN(tanh_relaxed)                                             \
-    FN(transpose)                                                \
-    FN(transpose_float_1)                                        \
-    FN(transpose_float_1_relaxed)                                \
-    FN(transpose_quant8_1)                                       \
-    FN(transpose_relaxed)
-
-#define FORWARD_DECLARE_GENERATED_OBJECTS(function) \
-    namespace function {                            \
-    extern std::vector<MixedTypedExample> examples; \
-    Model createTestModel();                        \
-    }
-
-FOR_EACH_TEST_MODEL(FORWARD_DECLARE_GENERATED_OBJECTS)
-
-#undef FORWARD_DECLARE_GENERATED_OBJECTS
-
-}  // namespace functional
-}  // namespace vts
-}  // namespace V1_1
-}  // namespace neuralnetworks
-}  // namespace hardware
-}  // namespace android
-
-#endif  // VTS_HAL_NEURALNETWORKS_V1_1_VTS_FUNCTIONAL_MODELS_H
diff --git a/neuralnetworks/1.1/vts/functional/ValidationTests.cpp b/neuralnetworks/1.1/vts/functional/ValidationTests.cpp
deleted file mode 100644
index 1c35ba8..0000000
--- a/neuralnetworks/1.1/vts/functional/ValidationTests.cpp
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Copyright (C) 2018 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "neuralnetworks_hidl_hal_test"
-
-#include "Models.h"
-#include "VtsHalNeuralnetworks.h"
-
-namespace android {
-namespace hardware {
-namespace neuralnetworks {
-namespace V1_1 {
-namespace vts {
-namespace functional {
-
-// forward declarations
-std::vector<Request> createRequests(const std::vector<MixedTypedExample>& examples);
-
-// generate validation tests
-#define VTS_CURRENT_TEST_CASE(TestName)                                           \
-    TEST_F(ValidationTest, TestName) {                                            \
-        const Model model = TestName::createTestModel();                          \
-        const std::vector<Request> requests = createRequests(TestName::examples); \
-        validateModel(model);                                                     \
-        validateRequests(model, requests);                                        \
-    }
-
-FOR_EACH_TEST_MODEL(VTS_CURRENT_TEST_CASE)
-
-#undef VTS_CURRENT_TEST_CASE
-
-}  // namespace functional
-}  // namespace vts
-}  // namespace V1_1
-}  // namespace neuralnetworks
-}  // namespace hardware
-}  // namespace android
diff --git a/neuralnetworks/1.2/types.hal b/neuralnetworks/1.2/types.hal
index 4a1e7a8..22d0002 100644
--- a/neuralnetworks/1.2/types.hal
+++ b/neuralnetworks/1.2/types.hal
@@ -47,6 +47,18 @@
 };
 
 /**
+ * The range of values in the OperandType enum.
+ *
+ * THE MAX VALUES MUST BE UPDATED WHEN ADDING NEW TYPES to the OperandType enum.
+ */
+enum OperandTypeRange : uint32_t {
+    OPERAND_FUNDAMENTAL_MIN = 0,
+    OPERAND_FUNDAMENTAL_MAX = 8,
+    OPERAND_OEM_MIN     = 10000,
+    OPERAND_OEM_MAX     = 10001,
+};
+
+/**
  * Operation types.
  *
  * The type of an operation in a model.
@@ -106,6 +118,18 @@
 };
 
 /**
+ * The range of values in the OperationType enum.
+ *
+ * THE MAX VALUES MUST BE UPDATED WHEN ADDING NEW TYPES to the OperationType enum.
+ */
+enum OperationTypeRange : uint32_t {
+    OPERATION_FUNDAMENTAL_MIN = 0,
+    OPERATION_FUNDAMENTAL_MAX = 87,
+    OPERATION_OEM_MIN = 10000,
+    OPERATION_OEM_MAX = 10000,
+};
+
+/**
  * Describes one operation of the model's graph.
  */
 struct Operation {
diff --git a/neuralnetworks/1.2/vts/functional/Android.bp b/neuralnetworks/1.2/vts/functional/Android.bp
index 09d0dc3..085d5db 100644
--- a/neuralnetworks/1.2/vts/functional/Android.bp
+++ b/neuralnetworks/1.2/vts/functional/Android.bp
@@ -14,40 +14,30 @@
 // limitations under the License.
 //
 
+// Tests for V1_0 models using the V1_2 HAL.
+cc_test {
+    name: "VtsHalNeuralnetworksV1_2CompatV1_0TargetTest",
+    defaults: ["VtsHalNeuralNetworksTargetTestDefaults"],
+    srcs: [
+        "GeneratedTestsV1_0.cpp",
+    ]
+}
+
+// Tests for V1_1 models using the V1_2 HAL.
+cc_test {
+    name: "VtsHalNeuralnetworksV1_2CompatV1_1TargetTest",
+    defaults: ["VtsHalNeuralNetworksTargetTestDefaults"],
+    srcs: [
+        "GeneratedTestsV1_1.cpp",
+    ],
+}
+
+// Tests for V1_2 models.
 cc_test {
     name: "VtsHalNeuralnetworksV1_2TargetTest",
+    defaults: ["VtsHalNeuralNetworksTargetTestDefaults"],
     srcs: [
         "BasicTests.cpp",
         "GeneratedTests.cpp",
-        "ValidateModel.cpp",
-        "ValidateRequest.cpp",
-        "ValidationTests.cpp",
-        "VtsHalNeuralnetworks.cpp",
     ],
-    defaults: ["VtsHalTargetTestDefaults"],
-    static_libs: [
-        "android.hardware.neuralnetworks@1.0",
-        "android.hardware.neuralnetworks@1.1",
-        "android.hardware.neuralnetworks@1.2",
-        "android.hidl.allocator@1.0",
-        "android.hidl.memory@1.0",
-        "libgmock",
-        "libhidlmemory",
-        "libneuralnetworks_utils",
-        "VtsHalNeuralnetworksTest_utils",
-    ],
-    header_libs: [
-        "libneuralnetworks_headers",
-        "libneuralnetworks_generated_test_harness_headers",
-        "libneuralnetworks_generated_tests",
-    ],
-    // Bug: http://b/74200014 - Disable arm32 asan since it triggers internal
-    // error in ld.gold.
-    arch: {
-        arm: {
-            sanitize: {
-                never: true,
-            },
-        },
-    },
 }
diff --git a/neuralnetworks/1.2/vts/functional/GeneratedTests.cpp b/neuralnetworks/1.2/vts/functional/GeneratedTests.cpp
index e87fa6b..79d5a60 100644
--- a/neuralnetworks/1.2/vts/functional/GeneratedTests.cpp
+++ b/neuralnetworks/1.2/vts/functional/GeneratedTests.cpp
@@ -45,9 +45,9 @@
 using ::android::nn::allocateSharedMemory;
 using ::test_helper::MixedTypedExample;
 
+std::vector<Request> createRequests(const std::vector<MixedTypedExample>& examples);
+
 // in frameworks/ml/nn/runtime/tests/generated/
-#include "all_generated_V1_0_vts_tests.cpp"
-#include "all_generated_V1_1_vts_tests.cpp"
 #include "all_generated_V1_2_vts_tests.cpp"
 
 }  // namespace functional
diff --git a/neuralnetworks/1.2/vts/functional/GeneratedTestsV1_0.cpp b/neuralnetworks/1.2/vts/functional/GeneratedTestsV1_0.cpp
new file mode 100644
index 0000000..42e22b0
--- /dev/null
+++ b/neuralnetworks/1.2/vts/functional/GeneratedTestsV1_0.cpp
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "neuralnetworks_hidl_hal_test"
+
+#include "VtsHalNeuralnetworks.h"
+
+#include "Callbacks.h"
+#include "TestHarness.h"
+#include "Utils.h"
+
+#include <android-base/logging.h>
+#include <android/hidl/memory/1.0/IMemory.h>
+#include <hidlmemory/mapping.h>
+
+namespace android {
+namespace hardware {
+namespace neuralnetworks {
+
+namespace generated_tests {
+using ::test_helper::MixedTypedExample;
+extern void Execute(const sp<V1_2::IDevice>&, std::function<V1_2::Model(void)>,
+                    std::function<bool(int)>, const std::vector<MixedTypedExample>&);
+}  // namespace generated_tests
+
+namespace V1_2 {
+namespace vts {
+namespace functional {
+
+using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
+using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
+using ::android::nn::allocateSharedMemory;
+using ::test_helper::MixedTypedExample;
+
+std::vector<Request> createRequests(const std::vector<MixedTypedExample>& examples);
+
+// in frameworks/ml/nn/runtime/tests/generated/
+#include "all_generated_V1_0_vts_tests.cpp"
+
+}  // namespace functional
+}  // namespace vts
+}  // namespace V1_2
+}  // namespace neuralnetworks
+}  // namespace hardware
+}  // namespace android
diff --git a/neuralnetworks/1.2/vts/functional/GeneratedTestsV1_1.cpp b/neuralnetworks/1.2/vts/functional/GeneratedTestsV1_1.cpp
new file mode 100644
index 0000000..aab5cb6
--- /dev/null
+++ b/neuralnetworks/1.2/vts/functional/GeneratedTestsV1_1.cpp
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "neuralnetworks_hidl_hal_test"
+
+#include "VtsHalNeuralnetworks.h"
+
+#include "Callbacks.h"
+#include "TestHarness.h"
+#include "Utils.h"
+
+#include <android-base/logging.h>
+#include <android/hidl/memory/1.0/IMemory.h>
+#include <hidlmemory/mapping.h>
+
+namespace android {
+namespace hardware {
+namespace neuralnetworks {
+
+namespace generated_tests {
+using ::test_helper::MixedTypedExample;
+extern void Execute(const sp<V1_2::IDevice>&, std::function<V1_2::Model(void)>,
+                    std::function<bool(int)>, const std::vector<MixedTypedExample>&);
+}  // namespace generated_tests
+
+namespace V1_2 {
+namespace vts {
+namespace functional {
+
+using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
+using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
+using ::android::nn::allocateSharedMemory;
+using ::test_helper::MixedTypedExample;
+
+std::vector<Request> createRequests(const std::vector<MixedTypedExample>& examples);
+
+// in frameworks/ml/nn/runtime/tests/generated/
+#include "all_generated_V1_1_vts_tests.cpp"
+
+}  // namespace functional
+}  // namespace vts
+}  // namespace V1_2
+}  // namespace neuralnetworks
+}  // namespace hardware
+}  // namespace android
diff --git a/neuralnetworks/1.2/vts/functional/Models.h b/neuralnetworks/1.2/vts/functional/Models.h
deleted file mode 100644
index 2d512fe..0000000
--- a/neuralnetworks/1.2/vts/functional/Models.h
+++ /dev/null
@@ -1,379 +0,0 @@
-/*
- * Copyright (C) 2018 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef VTS_HAL_NEURALNETWORKS_V1_2_VTS_FUNCTIONAL_MODELS_H
-#define VTS_HAL_NEURALNETWORKS_V1_2_VTS_FUNCTIONAL_MODELS_H
-
-#define LOG_TAG "neuralnetworks_hidl_hal_test"
-
-#include "TestHarness.h"
-
-#include <android/hardware/neuralnetworks/1.0/types.h>
-#include <android/hardware/neuralnetworks/1.1/types.h>
-#include <android/hardware/neuralnetworks/1.2/types.h>
-
-namespace android {
-namespace hardware {
-namespace neuralnetworks {
-namespace V1_2 {
-namespace vts {
-namespace functional {
-
-using MixedTypedExample = test_helper::MixedTypedExample;
-
-#define FOR_EACH_TEST_MODEL(FN)                                  \
-    FN(add)                                                      \
-    FN(add_broadcast_quant8)                                     \
-    FN(add_quant8)                                               \
-    FN(add_relaxed)                                              \
-    FN(avg_pool_float_1)                                         \
-    FN(avg_pool_float_1_relaxed)                                 \
-    FN(avg_pool_float_2)                                         \
-    FN(avg_pool_float_2_relaxed)                                 \
-    FN(avg_pool_float_3)                                         \
-    FN(avg_pool_float_3_relaxed)                                 \
-    FN(avg_pool_float_4)                                         \
-    FN(avg_pool_float_4_relaxed)                                 \
-    FN(avg_pool_float_5)                                         \
-    FN(avg_pool_float_5_relaxed)                                 \
-    FN(avg_pool_quant8_1)                                        \
-    FN(avg_pool_quant8_2)                                        \
-    FN(avg_pool_quant8_3)                                        \
-    FN(avg_pool_quant8_4)                                        \
-    FN(avg_pool_quant8_5)                                        \
-    FN(batch_to_space)                                           \
-    FN(batch_to_space_float_1)                                   \
-    FN(batch_to_space_float_1_relaxed)                           \
-    FN(batch_to_space_quant8_1)                                  \
-    FN(batch_to_space_relaxed)                                   \
-    FN(concat_float_1)                                           \
-    FN(concat_float_1_relaxed)                                   \
-    FN(concat_float_2)                                           \
-    FN(concat_float_2_relaxed)                                   \
-    FN(concat_float_3)                                           \
-    FN(concat_float_3_relaxed)                                   \
-    FN(concat_quant8_1)                                          \
-    FN(concat_quant8_2)                                          \
-    FN(concat_quant8_3)                                          \
-    FN(conv_1_h3_w2_SAME)                                        \
-    FN(conv_1_h3_w2_SAME_relaxed)                                \
-    FN(conv_1_h3_w2_VALID)                                       \
-    FN(conv_1_h3_w2_VALID_relaxed)                               \
-    FN(conv_3_h3_w2_SAME)                                        \
-    FN(conv_3_h3_w2_SAME_relaxed)                                \
-    FN(conv_3_h3_w2_VALID)                                       \
-    FN(conv_3_h3_w2_VALID_relaxed)                               \
-    FN(conv_float)                                               \
-    FN(conv_float_2)                                             \
-    FN(conv_float_2_relaxed)                                     \
-    FN(conv_float_channels)                                      \
-    FN(conv_float_channels_relaxed)                              \
-    FN(conv_float_channels_weights_as_inputs)                    \
-    FN(conv_float_channels_weights_as_inputs_relaxed)            \
-    FN(conv_float_large)                                         \
-    FN(conv_float_large_relaxed)                                 \
-    FN(conv_float_large_weights_as_inputs)                       \
-    FN(conv_float_large_weights_as_inputs_relaxed)               \
-    FN(conv_float_relaxed)                                       \
-    FN(conv_float_weights_as_inputs)                             \
-    FN(conv_float_weights_as_inputs_relaxed)                     \
-    FN(conv_quant8)                                              \
-    FN(conv_quant8_2)                                            \
-    FN(conv_quant8_channels)                                     \
-    FN(conv_quant8_channels_weights_as_inputs)                   \
-    FN(conv_quant8_large)                                        \
-    FN(conv_quant8_large_weights_as_inputs)                      \
-    FN(conv_quant8_overflow)                                     \
-    FN(conv_quant8_overflow_weights_as_inputs)                   \
-    FN(conv_quant8_weights_as_inputs)                            \
-    FN(depth_to_space_float_1)                                   \
-    FN(depth_to_space_float_1_relaxed)                           \
-    FN(depth_to_space_float_2)                                   \
-    FN(depth_to_space_float_2_relaxed)                           \
-    FN(depth_to_space_float_3)                                   \
-    FN(depth_to_space_float_3_relaxed)                           \
-    FN(depth_to_space_quant8_1)                                  \
-    FN(depth_to_space_quant8_2)                                  \
-    FN(depthwise_conv)                                           \
-    FN(depthwise_conv2d_float)                                   \
-    FN(depthwise_conv2d_float_2)                                 \
-    FN(depthwise_conv2d_float_2_relaxed)                         \
-    FN(depthwise_conv2d_float_large)                             \
-    FN(depthwise_conv2d_float_large_2)                           \
-    FN(depthwise_conv2d_float_large_2_relaxed)                   \
-    FN(depthwise_conv2d_float_large_2_weights_as_inputs)         \
-    FN(depthwise_conv2d_float_large_2_weights_as_inputs_relaxed) \
-    FN(depthwise_conv2d_float_large_relaxed)                     \
-    FN(depthwise_conv2d_float_large_weights_as_inputs)           \
-    FN(depthwise_conv2d_float_large_weights_as_inputs_relaxed)   \
-    FN(depthwise_conv2d_float_relaxed)                           \
-    FN(depthwise_conv2d_float_weights_as_inputs)                 \
-    FN(depthwise_conv2d_float_weights_as_inputs_relaxed)         \
-    FN(depthwise_conv2d_quant8)                                  \
-    FN(depthwise_conv2d_quant8_2)                                \
-    FN(depthwise_conv2d_quant8_large)                            \
-    FN(depthwise_conv2d_quant8_large_weights_as_inputs)          \
-    FN(depthwise_conv2d_quant8_weights_as_inputs)                \
-    FN(depthwise_conv_relaxed)                                   \
-    FN(dequantize)                                               \
-    FN(dequantize_relaxed)                                       \
-    FN(div)                                                      \
-    FN(div_broadcast_float)                                      \
-    FN(div_broadcast_float_relaxed)                              \
-    FN(div_relaxed)                                              \
-    FN(embedding_lookup)                                         \
-    FN(embedding_lookup_relaxed)                                 \
-    FN(floor)                                                    \
-    FN(floor_relaxed)                                            \
-    FN(fully_connected_float)                                    \
-    FN(fully_connected_float_2)                                  \
-    FN(fully_connected_float_2_relaxed)                          \
-    FN(fully_connected_float_4d_simple)                          \
-    FN(fully_connected_float_4d_simple_relaxed)                  \
-    FN(fully_connected_float_large)                              \
-    FN(fully_connected_float_large_relaxed)                      \
-    FN(fully_connected_float_large_weights_as_inputs)            \
-    FN(fully_connected_float_large_weights_as_inputs_relaxed)    \
-    FN(fully_connected_float_relaxed)                            \
-    FN(fully_connected_float_weights_as_inputs)                  \
-    FN(fully_connected_float_weights_as_inputs_relaxed)          \
-    FN(fully_connected_quant8)                                   \
-    FN(fully_connected_quant8_2)                                 \
-    FN(fully_connected_quant8_large)                             \
-    FN(fully_connected_quant8_large_weights_as_inputs)           \
-    FN(fully_connected_quant8_weights_as_inputs)                 \
-    FN(hashtable_lookup_float)                                   \
-    FN(hashtable_lookup_float_relaxed)                           \
-    FN(hashtable_lookup_quant8)                                  \
-    FN(l2_normalization)                                         \
-    FN(l2_normalization_2)                                       \
-    FN(l2_normalization_2_relaxed)                               \
-    FN(l2_normalization_large)                                   \
-    FN(l2_normalization_large_relaxed)                           \
-    FN(l2_normalization_relaxed)                                 \
-    FN(l2_pool_float)                                            \
-    FN(l2_pool_float_2)                                          \
-    FN(l2_pool_float_2_relaxed)                                  \
-    FN(l2_pool_float_large)                                      \
-    FN(l2_pool_float_large_relaxed)                              \
-    FN(l2_pool_float_relaxed)                                    \
-    FN(local_response_norm_float_1)                              \
-    FN(local_response_norm_float_1_relaxed)                      \
-    FN(local_response_norm_float_2)                              \
-    FN(local_response_norm_float_2_relaxed)                      \
-    FN(local_response_norm_float_3)                              \
-    FN(local_response_norm_float_3_relaxed)                      \
-    FN(local_response_norm_float_4)                              \
-    FN(local_response_norm_float_4_relaxed)                      \
-    FN(logistic_float_1)                                         \
-    FN(logistic_float_1_relaxed)                                 \
-    FN(logistic_float_2)                                         \
-    FN(logistic_float_2_relaxed)                                 \
-    FN(logistic_quant8_1)                                        \
-    FN(logistic_quant8_2)                                        \
-    FN(lsh_projection)                                           \
-    FN(lsh_projection_2)                                         \
-    FN(lsh_projection_2_relaxed)                                 \
-    FN(lsh_projection_relaxed)                                   \
-    FN(lsh_projection_weights_as_inputs)                         \
-    FN(lsh_projection_weights_as_inputs_relaxed)                 \
-    FN(lstm)                                                     \
-    FN(lstm2)                                                    \
-    FN(lstm2_relaxed)                                            \
-    FN(lstm2_state)                                              \
-    FN(lstm2_state2)                                             \
-    FN(lstm2_state2_relaxed)                                     \
-    FN(lstm2_state_relaxed)                                      \
-    FN(lstm3)                                                    \
-    FN(lstm3_relaxed)                                            \
-    FN(lstm3_state)                                              \
-    FN(lstm3_state2)                                             \
-    FN(lstm3_state2_relaxed)                                     \
-    FN(lstm3_state3)                                             \
-    FN(lstm3_state3_relaxed)                                     \
-    FN(lstm3_state_relaxed)                                      \
-    FN(lstm_relaxed)                                             \
-    FN(lstm_state)                                               \
-    FN(lstm_state2)                                              \
-    FN(lstm_state2_relaxed)                                      \
-    FN(lstm_state_relaxed)                                       \
-    FN(max_pool_float_1)                                         \
-    FN(max_pool_float_1_relaxed)                                 \
-    FN(max_pool_float_2)                                         \
-    FN(max_pool_float_2_relaxed)                                 \
-    FN(max_pool_float_3)                                         \
-    FN(max_pool_float_3_relaxed)                                 \
-    FN(max_pool_float_4)                                         \
-    FN(max_pool_float_4_relaxed)                                 \
-    FN(max_pool_quant8_1)                                        \
-    FN(max_pool_quant8_2)                                        \
-    FN(max_pool_quant8_3)                                        \
-    FN(max_pool_quant8_4)                                        \
-    FN(mean)                                                     \
-    FN(mean_float_1)                                             \
-    FN(mean_float_1_relaxed)                                     \
-    FN(mean_float_2)                                             \
-    FN(mean_float_2_relaxed)                                     \
-    FN(mean_quant8_1)                                            \
-    FN(mean_quant8_2)                                            \
-    FN(mean_relaxed)                                             \
-    FN(mobilenet_224_gender_basic_fixed)                         \
-    FN(mobilenet_224_gender_basic_fixed_relaxed)                 \
-    FN(mobilenet_quantized)                                      \
-    FN(mul)                                                      \
-    FN(mul_broadcast_quant8)                                     \
-    FN(mul_quant8)                                               \
-    FN(mul_relaxed)                                              \
-    FN(mul_relu)                                                 \
-    FN(mul_relu_relaxed)                                         \
-    FN(pad)                                                      \
-    FN(pad_float_1)                                              \
-    FN(pad_float_1_relaxed)                                      \
-    FN(pad_relaxed)                                              \
-    FN(random_multinomial)                                       \
-    FN(relu1_float_1)                                            \
-    FN(relu1_float_1_relaxed)                                    \
-    FN(relu1_float_2)                                            \
-    FN(relu1_float_2_relaxed)                                    \
-    FN(relu1_quant8_1)                                           \
-    FN(relu1_quant8_2)                                           \
-    FN(relu6_float_1)                                            \
-    FN(relu6_float_1_relaxed)                                    \
-    FN(relu6_float_2)                                            \
-    FN(relu6_float_2_relaxed)                                    \
-    FN(relu6_quant8_1)                                           \
-    FN(relu6_quant8_2)                                           \
-    FN(relu_float_1)                                             \
-    FN(relu_float_1_relaxed)                                     \
-    FN(relu_float_2)                                             \
-    FN(relu_float_2_relaxed)                                     \
-    FN(relu_quant8_1)                                            \
-    FN(relu_quant8_2)                                            \
-    FN(reshape)                                                  \
-    FN(reshape_quant8)                                           \
-    FN(reshape_quant8_weights_as_inputs)                         \
-    FN(reshape_relaxed)                                          \
-    FN(reshape_weights_as_inputs)                                \
-    FN(reshape_weights_as_inputs_relaxed)                        \
-    FN(resize_bilinear)                                          \
-    FN(resize_bilinear_2)                                        \
-    FN(resize_bilinear_2_relaxed)                                \
-    FN(resize_bilinear_relaxed)                                  \
-    FN(rnn)                                                      \
-    FN(rnn_relaxed)                                              \
-    FN(rnn_state)                                                \
-    FN(rnn_state_relaxed)                                        \
-    FN(softmax_float_1)                                          \
-    FN(softmax_float_1_relaxed)                                  \
-    FN(softmax_float_2)                                          \
-    FN(softmax_float_2_relaxed)                                  \
-    FN(softmax_quant8_1)                                         \
-    FN(softmax_quant8_2)                                         \
-    FN(space_to_batch)                                           \
-    FN(space_to_batch_float_1)                                   \
-    FN(space_to_batch_float_1_relaxed)                           \
-    FN(space_to_batch_float_2)                                   \
-    FN(space_to_batch_float_2_relaxed)                           \
-    FN(space_to_batch_float_3)                                   \
-    FN(space_to_batch_float_3_relaxed)                           \
-    FN(space_to_batch_quant8_1)                                  \
-    FN(space_to_batch_quant8_2)                                  \
-    FN(space_to_batch_quant8_3)                                  \
-    FN(space_to_batch_relaxed)                                   \
-    FN(space_to_depth_float_1)                                   \
-    FN(space_to_depth_float_1_relaxed)                           \
-    FN(space_to_depth_float_2)                                   \
-    FN(space_to_depth_float_2_relaxed)                           \
-    FN(space_to_depth_float_3)                                   \
-    FN(space_to_depth_float_3_relaxed)                           \
-    FN(space_to_depth_quant8_1)                                  \
-    FN(space_to_depth_quant8_2)                                  \
-    FN(squeeze)                                                  \
-    FN(squeeze_float_1)                                          \
-    FN(squeeze_float_1_relaxed)                                  \
-    FN(squeeze_quant8_1)                                         \
-    FN(squeeze_relaxed)                                          \
-    FN(strided_slice)                                            \
-    FN(strided_slice_float_1)                                    \
-    FN(strided_slice_float_10)                                   \
-    FN(strided_slice_float_10_relaxed)                           \
-    FN(strided_slice_float_11)                                   \
-    FN(strided_slice_float_11_relaxed)                           \
-    FN(strided_slice_float_1_relaxed)                            \
-    FN(strided_slice_float_2)                                    \
-    FN(strided_slice_float_2_relaxed)                            \
-    FN(strided_slice_float_3)                                    \
-    FN(strided_slice_float_3_relaxed)                            \
-    FN(strided_slice_float_4)                                    \
-    FN(strided_slice_float_4_relaxed)                            \
-    FN(strided_slice_float_5)                                    \
-    FN(strided_slice_float_5_relaxed)                            \
-    FN(strided_slice_float_6)                                    \
-    FN(strided_slice_float_6_relaxed)                            \
-    FN(strided_slice_float_7)                                    \
-    FN(strided_slice_float_7_relaxed)                            \
-    FN(strided_slice_float_8)                                    \
-    FN(strided_slice_float_8_relaxed)                            \
-    FN(strided_slice_float_9)                                    \
-    FN(strided_slice_float_9_relaxed)                            \
-    FN(strided_slice_qaunt8_10)                                  \
-    FN(strided_slice_qaunt8_11)                                  \
-    FN(strided_slice_quant8_1)                                   \
-    FN(strided_slice_quant8_2)                                   \
-    FN(strided_slice_quant8_3)                                   \
-    FN(strided_slice_quant8_4)                                   \
-    FN(strided_slice_quant8_5)                                   \
-    FN(strided_slice_quant8_6)                                   \
-    FN(strided_slice_quant8_7)                                   \
-    FN(strided_slice_quant8_8)                                   \
-    FN(strided_slice_quant8_9)                                   \
-    FN(strided_slice_relaxed)                                    \
-    FN(sub)                                                      \
-    FN(sub_broadcast_float)                                      \
-    FN(sub_broadcast_float_relaxed)                              \
-    FN(sub_relaxed)                                              \
-    FN(svdf)                                                     \
-    FN(svdf2)                                                    \
-    FN(svdf2_relaxed)                                            \
-    FN(svdf_relaxed)                                             \
-    FN(svdf_state)                                               \
-    FN(svdf_state_relaxed)                                       \
-    FN(tanh)                                                     \
-    FN(tanh_relaxed)                                             \
-    FN(transpose)                                                \
-    FN(transpose_float_1)                                        \
-    FN(transpose_float_1_relaxed)                                \
-    FN(transpose_quant8_1)                                       \
-    FN(transpose_relaxed)
-
-#define FORWARD_DECLARE_GENERATED_OBJECTS(function) \
-    namespace function {                            \
-    extern std::vector<MixedTypedExample> examples; \
-    Model createTestModel();                        \
-    }
-
-FOR_EACH_TEST_MODEL(FORWARD_DECLARE_GENERATED_OBJECTS)
-
-#undef FORWARD_DECLARE_GENERATED_OBJECTS
-
-}  // namespace functional
-}  // namespace vts
-}  // namespace V1_2
-}  // namespace neuralnetworks
-}  // namespace hardware
-}  // namespace android
-
-#endif  // VTS_HAL_NEURALNETWORKS_V1_2_VTS_FUNCTIONAL_MODELS_H
diff --git a/neuralnetworks/1.2/vts/functional/ValidateModel.cpp b/neuralnetworks/1.2/vts/functional/ValidateModel.cpp
index c1c6e55..e309642 100644
--- a/neuralnetworks/1.2/vts/functional/ValidateModel.cpp
+++ b/neuralnetworks/1.2/vts/functional/ValidateModel.cpp
@@ -128,16 +128,16 @@
 
 ///////////////////////// VALIDATE MODEL OPERAND TYPE /////////////////////////
 
-static const int32_t invalidOperandTypes[] = {
-    static_cast<int32_t>(OperandType::FLOAT32) - 1,               // lower bound fundamental
-    static_cast<int32_t>(OperandType::TENSOR_QUANT16_ASYMM) + 1,  // upper bound fundamental
-    static_cast<int32_t>(OperandType::OEM) - 1,                   // lower bound OEM
-    static_cast<int32_t>(OperandType::TENSOR_OEM_BYTE) + 1,       // upper bound OEM
+static const uint32_t invalidOperandTypes[] = {
+    static_cast<uint32_t>(OperandTypeRange::OPERAND_FUNDAMENTAL_MIN) - 1,
+    static_cast<uint32_t>(OperandTypeRange::OPERAND_FUNDAMENTAL_MAX) + 1,
+    static_cast<uint32_t>(OperandTypeRange::OPERAND_OEM_MIN) - 1,
+    static_cast<uint32_t>(OperandTypeRange::OPERAND_OEM_MAX) + 1,
 };
 
 static void mutateOperandTypeTest(const sp<IDevice>& device, const Model& model) {
     for (size_t operand = 0; operand < model.operands.size(); ++operand) {
-        for (int32_t invalidOperandType : invalidOperandTypes) {
+        for (uint32_t invalidOperandType : invalidOperandTypes) {
             const std::string message = "mutateOperandTypeTest: operand " +
                                         std::to_string(operand) + " set to value " +
                                         std::to_string(invalidOperandType);
@@ -224,8 +224,9 @@
         case OperandType::TENSOR_INT32:
             return {1};
         case OperandType::TENSOR_QUANT8_ASYMM:
-        case OperandType::TENSOR_QUANT16_ASYMM:
             return {-1, 256};
+        case OperandType::TENSOR_QUANT16_ASYMM:
+            return {-32769, 32768};
         default:
             return {};
     }
@@ -291,15 +292,33 @@
     *operand = newOperand;
 }
 
-static bool mutateOperationOperandTypeSkip(size_t operand, const Model& model) {
-    // LSH_PROJECTION's second argument is allowed to have any type. This is the
-    // only operation that currently has a type that can be anything independent
-    // from any other type. Changing the operand type to any other type will
-    // result in a valid model for LSH_PROJECTION. If this is the case, skip the
-    // test.
+static bool mutateOperationOperandTypeSkip(size_t operand, OperandType type, const Model& model) {
+    // Do not test OEM types
+    if (type == model.operands[operand].type || type == OperandType::OEM ||
+        type == OperandType::TENSOR_OEM_BYTE) {
+        return true;
+    }
     for (const Operation& operation : model.operations) {
-        if (operation.type == OperationType::LSH_PROJECTION && operand == operation.inputs[1]) {
-            return true;
+        // Skip mutateOperationOperandTypeTest for the following operations.
+        // - LSH_PROJECTION's second argument is allowed to have any type.
+        // - ARGMIN and ARGMAX's first argument can be any of TENSOR_(FLOAT32|INT32|QUANT8_ASYMM).
+        // - CAST's argument can be any of TENSOR_(FLOAT32|INT32|QUANT8_ASYMM).
+        switch (operation.type) {
+            case OperationType::LSH_PROJECTION: {
+                if (operand == operation.inputs[1]) {
+                    return true;
+                }
+            } break;
+            case OperationType::CAST:
+            case OperationType::ARGMAX:
+            case OperationType::ARGMIN: {
+                if (type == OperandType::TENSOR_FLOAT32 || type == OperandType::TENSOR_INT32 ||
+                    type == OperandType::TENSOR_QUANT8_ASYMM) {
+                    return true;
+                }
+            } break;
+            default:
+                break;
         }
     }
     return false;
@@ -307,14 +326,8 @@
 
 static void mutateOperationOperandTypeTest(const sp<IDevice>& device, const Model& model) {
     for (size_t operand = 0; operand < model.operands.size(); ++operand) {
-        if (mutateOperationOperandTypeSkip(operand, model)) {
-            continue;
-        }
         for (OperandType invalidOperandType : hidl_enum_range<OperandType>{}) {
-            // Do not test OEM types
-            if (invalidOperandType == model.operands[operand].type ||
-                invalidOperandType == OperandType::OEM ||
-                invalidOperandType == OperandType::TENSOR_OEM_BYTE) {
+            if (mutateOperationOperandTypeSkip(operand, invalidOperandType, model)) {
                 continue;
             }
             const std::string message = "mutateOperationOperandTypeTest: operand " +
@@ -329,16 +342,16 @@
 
 ///////////////////////// VALIDATE MODEL OPERATION TYPE /////////////////////////
 
-static const int32_t invalidOperationTypes[] = {
-    static_cast<int32_t>(OperationType::ADD) - 1,            // lower bound fundamental
-    static_cast<int32_t>(OperationType::TRANSPOSE) + 1,      // upper bound fundamental
-    static_cast<int32_t>(OperationType::OEM_OPERATION) - 1,  // lower bound OEM
-    static_cast<int32_t>(OperationType::OEM_OPERATION) + 1,  // upper bound OEM
+static const uint32_t invalidOperationTypes[] = {
+    static_cast<uint32_t>(OperationTypeRange::OPERATION_FUNDAMENTAL_MIN) - 1,
+    static_cast<uint32_t>(OperationTypeRange::OPERATION_FUNDAMENTAL_MAX) + 1,
+    static_cast<uint32_t>(OperationTypeRange::OPERATION_OEM_MIN) - 1,
+    static_cast<uint32_t>(OperationTypeRange::OPERATION_OEM_MAX) + 1,
 };
 
 static void mutateOperationTypeTest(const sp<IDevice>& device, const Model& model) {
     for (size_t operation = 0; operation < model.operations.size(); ++operation) {
-        for (int32_t invalidOperationType : invalidOperationTypes) {
+        for (uint32_t invalidOperationType : invalidOperationTypes) {
             const std::string message = "mutateOperationTypeTest: operation " +
                                         std::to_string(operation) + " set to value " +
                                         std::to_string(invalidOperationType);
@@ -406,8 +419,26 @@
     removeValueAndDecrementGreaterValues(&model->outputIndexes, index);
 }
 
+static bool removeOperandSkip(size_t operand, const Model& model) {
+    for (const Operation& operation : model.operations) {
+        // Skip removeOperandTest for the following operations.
+        // - SPLIT's outputs are not checked during prepareModel.
+        if (operation.type == OperationType::SPLIT) {
+            for (const size_t outOprand : operation.outputs) {
+                if (operand == outOprand) {
+                    return true;
+                }
+            }
+        }
+    }
+    return false;
+}
+
 static void removeOperandTest(const sp<IDevice>& device, const Model& model) {
     for (size_t operand = 0; operand < model.operands.size(); ++operand) {
+        if (removeOperandSkip(operand, model)) {
+            continue;
+        }
         const std::string message = "removeOperandTest: operand " + std::to_string(operand);
         validate(device, message, model,
                  [operand](Model* model) { removeOperand(model, operand); });
@@ -433,15 +464,76 @@
 
 ///////////////////////// REMOVE OPERATION INPUT /////////////////////////
 
+static bool removeOperationInputSkip(const Operation& op, size_t input) {
+    // Skip removeOperationInputTest for the following operations.
+    // - CONCATENATION has at least 2 inputs, with the last element being INT32.
+    // - CONV_2D, DEPTHWISE_CONV_2D, MAX_POOL_2D, AVERAGE_POOL_2D, L2_POOL_2D, RESIZE_BILINEAR,
+    //   SPACE_TO_DEPTH, SPACE_TO_DEPTH, SPACE_TO_BATCH_ND, BATCH_TO_SPACE_ND can have an optional
+    //   layout parameter.
+    // - L2_NORMALIZATION, LOCAL_RESPONSE_NORMALIZATION, SOFTMAX can have an optional axis
+    //   parameter.
+    switch (op.type) {
+        case OperationType::CONCATENATION: {
+            if (op.inputs.size() > 2 && input != op.inputs.size() - 1) {
+                return true;
+            }
+        } break;
+        case OperationType::DEPTHWISE_CONV_2D: {
+            if ((op.inputs.size() == 12 && input == 11) || (op.inputs.size() == 9 && input == 8)) {
+                return true;
+            }
+        } break;
+        case OperationType::CONV_2D:
+        case OperationType::AVERAGE_POOL_2D:
+        case OperationType::MAX_POOL_2D:
+        case OperationType::L2_POOL_2D: {
+            if ((op.inputs.size() == 11 && input == 10) || (op.inputs.size() == 8 && input == 7)) {
+                return true;
+            }
+        } break;
+        case OperationType::RESIZE_BILINEAR: {
+            if (op.inputs.size() == 4 && input == 3) {
+                return true;
+            }
+        } break;
+        case OperationType::SPACE_TO_DEPTH:
+        case OperationType::DEPTH_TO_SPACE:
+        case OperationType::BATCH_TO_SPACE_ND: {
+            if (op.inputs.size() == 3 && input == 2) {
+                return true;
+            }
+        } break;
+        case OperationType::SPACE_TO_BATCH_ND: {
+            if (op.inputs.size() == 4 && input == 3) {
+                return true;
+            }
+        } break;
+        case OperationType::L2_NORMALIZATION: {
+            if (op.inputs.size() == 2 && input == 1) {
+                return true;
+            }
+        } break;
+        case OperationType::LOCAL_RESPONSE_NORMALIZATION: {
+            if (op.inputs.size() == 6 && input == 5) {
+                return true;
+            }
+        } break;
+        case OperationType::SOFTMAX: {
+            if (op.inputs.size() == 3 && input == 2) {
+                return true;
+            }
+        } break;
+        default:
+            break;
+    }
+    return false;
+}
+
 static void removeOperationInputTest(const sp<IDevice>& device, const Model& model) {
     for (size_t operation = 0; operation < model.operations.size(); ++operation) {
         for (size_t input = 0; input < model.operations[operation].inputs.size(); ++input) {
             const Operation& op = model.operations[operation];
-            // CONCATENATION has at least 2 inputs, with the last element being
-            // INT32. Skip this test if removing one of CONCATENATION's
-            // inputs still produces a valid model.
-            if (op.type == OperationType::CONCATENATION && op.inputs.size() > 2 &&
-                input != op.inputs.size() - 1) {
+            if (removeOperationInputSkip(op, input)) {
                 continue;
             }
             const std::string message = "removeOperationInputTest: operation " +
@@ -479,12 +571,13 @@
 
 ///////////////////////// ADD OPERATION INPUT /////////////////////////
 
-static bool addOperationInputSkip(const Operation& operation) {
+static bool addOperationInputSkip(const Operation& op) {
     // Skip addOperationInputTest for the following operations.
-    // L2_NORMALIZATION, LOCAL_RESPONSE_NORMALIZATION, SOFTMAX can have an optional axis parameter.
-    if (operation.type == OperationType::L2_NORMALIZATION ||
-        operation.type == OperationType::LOCAL_RESPONSE_NORMALIZATION ||
-        operation.type == OperationType::SOFTMAX) {
+    // - L2_NORMALIZATION, LOCAL_RESPONSE_NORMALIZATION, SOFTMAX can have an optional INT32 axis
+    //   parameter.
+    if ((op.type == OperationType::L2_NORMALIZATION && op.inputs.size() == 1) ||
+        (op.type == OperationType::LOCAL_RESPONSE_NORMALIZATION && op.inputs.size() == 5) ||
+        (op.type == OperationType::SOFTMAX && op.inputs.size() == 2)) {
         return true;
     }
     return false;
diff --git a/neuralnetworks/1.2/vts/functional/ValidationTests.cpp b/neuralnetworks/1.2/vts/functional/ValidationTests.cpp
deleted file mode 100644
index 3bdc5cd..0000000
--- a/neuralnetworks/1.2/vts/functional/ValidationTests.cpp
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Copyright (C) 2018 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "neuralnetworks_hidl_hal_test"
-
-#include "Models.h"
-#include "VtsHalNeuralnetworks.h"
-
-namespace android {
-namespace hardware {
-namespace neuralnetworks {
-namespace V1_2 {
-namespace vts {
-namespace functional {
-
-// forward declarations
-std::vector<Request> createRequests(const std::vector<MixedTypedExample>& examples);
-
-// generate validation tests
-#define VTS_CURRENT_TEST_CASE(TestName)                                           \
-    TEST_F(ValidationTest, TestName) {                                            \
-        const Model model = TestName::createTestModel();                          \
-        const std::vector<Request> requests = createRequests(TestName::examples); \
-        validateModel(model);                                                     \
-        validateRequests(model, requests);                                        \
-    }
-
-FOR_EACH_TEST_MODEL(VTS_CURRENT_TEST_CASE)
-
-#undef VTS_CURRENT_TEST_CASE
-
-}  // namespace functional
-}  // namespace vts
-}  // namespace V1_2
-}  // namespace neuralnetworks
-}  // namespace hardware
-}  // namespace android
diff --git a/power/stats/1.0/default/Android.bp b/power/stats/1.0/default/Android.bp
new file mode 100644
index 0000000..04270c1
--- /dev/null
+++ b/power/stats/1.0/default/Android.bp
@@ -0,0 +1,34 @@
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+
+cc_library_shared {
+    name: "android.hardware.power.stats@1.0-service",
+    relative_install_path: "hw",
+    init_rc: ["android.hardware.power.stats@1.0-service.rc"],
+    srcs: ["service.cpp", "PowerStats.cpp"],
+    cflags: [
+        "-Wall",
+        "-Werror",
+    ],
+    shared_libs: [
+        "libbase",
+        "libcutils",
+        "libfmq",
+        "libhidlbase",
+        "libhidltransport",
+        "liblog",
+        "libutils",
+        "android.hardware.power.stats@1.0",
+    ],
+    vendor: true,
+}
diff --git a/power/stats/1.0/default/PowerStats.cpp b/power/stats/1.0/default/PowerStats.cpp
new file mode 100644
index 0000000..810c575
--- /dev/null
+++ b/power/stats/1.0/default/PowerStats.cpp
@@ -0,0 +1,317 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "PowerStats.h"
+#include <android-base/file.h>
+#include <android-base/logging.h>
+#include <android-base/properties.h>
+#include <android-base/stringprintf.h>
+#include <android-base/strings.h>
+#include <inttypes.h>
+#include <stdlib.h>
+#include <algorithm>
+#include <exception>
+#include <thread>
+
+namespace android {
+namespace hardware {
+namespace power {
+namespace stats {
+namespace V1_0 {
+namespace implementation {
+
+#define MAX_FILE_PATH_LEN 128
+#define MAX_DEVICE_NAME_LEN 64
+#define MAX_QUEUE_SIZE 8192
+
+constexpr char kIioDirRoot[] = "/sys/bus/iio/devices/";
+constexpr char kDeviceName[] = "pm_device_name";
+constexpr char kDeviceType[] = "iio:device";
+constexpr uint32_t MAX_SAMPLING_RATE = 10;
+constexpr uint64_t WRITE_TIMEOUT_NS = 1000000000;
+
+void PowerStats::findIioPowerMonitorNodes() {
+    struct dirent* ent;
+    int fd;
+    char devName[MAX_DEVICE_NAME_LEN];
+    char filePath[MAX_FILE_PATH_LEN];
+    DIR* iioDir = opendir(kIioDirRoot);
+    if (!iioDir) {
+        ALOGE("Error opening directory: %s", kIioDirRoot);
+        return;
+    }
+    while (ent = readdir(iioDir), ent) {
+        if (strcmp(ent->d_name, ".") != 0 && strcmp(ent->d_name, "..") != 0 &&
+            strlen(ent->d_name) > strlen(kDeviceType) &&
+            strncmp(ent->d_name, kDeviceType, strlen(kDeviceType)) == 0) {
+            snprintf(filePath, MAX_FILE_PATH_LEN, "%s/%s", ent->d_name, "name");
+            fd = openat(dirfd(iioDir), filePath, O_RDONLY);
+            if (fd < 0) {
+                ALOGW("Failed to open directory: %s", filePath);
+                continue;
+            }
+            if (read(fd, devName, MAX_DEVICE_NAME_LEN) < 0) {
+                ALOGW("Failed to read device name from file: %s(%d)", filePath, fd);
+                close(fd);
+                continue;
+            }
+
+            if (strncmp(devName, kDeviceName, strlen(kDeviceName)) == 0) {
+                snprintf(filePath, MAX_FILE_PATH_LEN, "%s/%s", kIioDirRoot, ent->d_name);
+                mPm.devicePaths.push_back(filePath);
+            }
+            close(fd);
+        }
+    }
+    closedir(iioDir);
+    return;
+}
+
+size_t PowerStats::parsePowerRails() {
+    std::string data;
+    std::string railFileName;
+    std::string spsFileName;
+    uint32_t index = 0;
+    uint32_t samplingRate;
+    for (const auto& path : mPm.devicePaths) {
+        railFileName = path + "/enabled_rails";
+        spsFileName = path + "/sampling_rate";
+        if (!android::base::ReadFileToString(spsFileName, &data)) {
+            ALOGW("Error reading file: %s", spsFileName.c_str());
+            continue;
+        }
+        samplingRate = strtoul(data.c_str(), NULL, 10);
+        if (!samplingRate || samplingRate == ULONG_MAX) {
+            ALOGE("Error parsing: %s", spsFileName.c_str());
+            break;
+        }
+        if (!android::base::ReadFileToString(railFileName, &data)) {
+            ALOGW("Error reading file: %s", railFileName.c_str());
+            continue;
+        }
+        std::istringstream railNames(data);
+        std::string line;
+        while (std::getline(railNames, line)) {
+            std::vector<std::string> words = android::base::Split(line, ":");
+            if (words.size() == 2) {
+                mPm.railsInfo.emplace(words[0], RailData{.devicePath = path,
+                                                         .index = index,
+                                                         .subsysName = words[1],
+                                                         .samplingRate = samplingRate});
+                index++;
+            } else {
+                ALOGW("Unexpected format in file: %s", railFileName.c_str());
+            }
+        }
+    }
+    return index;
+}
+
+int PowerStats::parseIioEnergyNode(std::string devName) {
+    int ret = 0;
+    std::string data;
+    std::string fileName = devName + "/energy_value";
+    if (!android::base::ReadFileToString(fileName, &data)) {
+        ALOGE("Error reading file: %s", fileName.c_str());
+        return -1;
+    }
+
+    std::istringstream energyData(data);
+    std::string line;
+    uint64_t timestamp = 0;
+    bool timestampRead = false;
+    while (std::getline(energyData, line)) {
+        std::vector<std::string> words = android::base::Split(line, ",");
+        if (timestampRead == false) {
+            if (words.size() == 1) {
+                timestamp = strtoull(words[0].c_str(), NULL, 10);
+                if (timestamp == 0 || timestamp == ULLONG_MAX) {
+                    ALOGW("Potentially wrong timestamp: %" PRIu64, timestamp);
+                }
+                timestampRead = true;
+            }
+        } else if (words.size() == 2) {
+            std::string railName = words[0];
+            if (mPm.railsInfo.count(railName) != 0) {
+                size_t index = mPm.railsInfo[railName].index;
+                mPm.reading[index].index = index;
+                mPm.reading[index].timestamp = timestamp;
+                mPm.reading[index].energy = strtoull(words[1].c_str(), NULL, 10);
+                if (mPm.reading[index].energy == ULLONG_MAX) {
+                    ALOGW("Potentially wrong energy value: %" PRIu64, mPm.reading[index].energy);
+                }
+            }
+        } else {
+            ALOGW("Unexpected format in file: %s", fileName.c_str());
+            ret = -1;
+            break;
+        }
+    }
+    return ret;
+}
+
+Status PowerStats::parseIioEnergyNodes() {
+    Status ret = Status::SUCCESS;
+    if (mPm.hwEnabled == false) {
+        return Status::NOT_SUPPORTED;
+    }
+
+    for (const auto& devicePath : mPm.devicePaths) {
+        if (parseIioEnergyNode(devicePath) < 0) {
+            ALOGE("Error in parsing power stats");
+            ret = Status::FILESYSTEM_ERROR;
+            break;
+        }
+    }
+    return ret;
+}
+
+PowerStats::PowerStats() {
+    findIioPowerMonitorNodes();
+    size_t numRails = parsePowerRails();
+    if (mPm.devicePaths.empty() || numRails == 0) {
+        mPm.hwEnabled = false;
+    } else {
+        mPm.hwEnabled = true;
+        mPm.reading.resize(numRails);
+    }
+}
+
+Return<void> PowerStats::getRailInfo(getRailInfo_cb _hidl_cb) {
+    hidl_vec<RailInfo> rInfo;
+    Status ret = Status::SUCCESS;
+    size_t index;
+    std::lock_guard<std::mutex> _lock(mPm.mLock);
+    if (mPm.hwEnabled == false) {
+        _hidl_cb(rInfo, Status::NOT_SUPPORTED);
+        return Void();
+    }
+    rInfo.resize(mPm.railsInfo.size());
+    for (const auto& railData : mPm.railsInfo) {
+        index = railData.second.index;
+        rInfo[index].railName = railData.first;
+        rInfo[index].subsysName = railData.second.subsysName;
+        rInfo[index].index = index;
+        rInfo[index].samplingRate = railData.second.samplingRate;
+    }
+    _hidl_cb(rInfo, ret);
+    return Void();
+}
+
+Return<void> PowerStats::getEnergyData(const hidl_vec<uint32_t>& railIndices,
+                                       getEnergyData_cb _hidl_cb) {
+    hidl_vec<EnergyData> eVal;
+    std::lock_guard<std::mutex> _lock(mPm.mLock);
+    Status ret = parseIioEnergyNodes();
+
+    if (ret != Status::SUCCESS) {
+        ALOGE("Failed to getEnergyData");
+        _hidl_cb(eVal, ret);
+        return Void();
+    }
+
+    if (railIndices.size() == 0) {
+        eVal.resize(mPm.railsInfo.size());
+        memcpy(&eVal[0], &mPm.reading[0], mPm.reading.size() * sizeof(EnergyData));
+    } else {
+        eVal.resize(railIndices.size());
+        int i = 0;
+        for (const auto& railIndex : railIndices) {
+            if (railIndex >= mPm.reading.size()) {
+                ret = Status::INVALID_INPUT;
+                eVal.resize(0);
+                break;
+            }
+            memcpy(&eVal[i], &mPm.reading[railIndex], sizeof(EnergyData));
+            i++;
+        }
+    }
+    _hidl_cb(eVal, ret);
+    return Void();
+}
+
+Return<void> PowerStats::streamEnergyData(uint32_t timeMs, uint32_t samplingRate,
+                                          streamEnergyData_cb _hidl_cb) {
+    std::lock_guard<std::mutex> _lock(mPm.mLock);
+    if (mPm.fmqSynchronized != nullptr) {
+        _hidl_cb(MessageQueueSync::Descriptor(), 0, 0, Status::INSUFFICIENT_RESOURCES);
+        return Void();
+    }
+    uint32_t sps = std::min(samplingRate, MAX_SAMPLING_RATE);
+    uint32_t numSamples = timeMs * sps / 1000;
+    mPm.fmqSynchronized.reset(new (std::nothrow) MessageQueueSync(MAX_QUEUE_SIZE, true));
+    if (mPm.fmqSynchronized == nullptr || mPm.fmqSynchronized->isValid() == false) {
+        mPm.fmqSynchronized = nullptr;
+        _hidl_cb(MessageQueueSync::Descriptor(), 0, 0, Status::INSUFFICIENT_RESOURCES);
+        return Void();
+    }
+    std::thread pollThread = std::thread([this, sps, numSamples]() {
+        uint64_t sleepTimeUs = 1000000 / sps;
+        uint32_t currSamples = 0;
+        while (currSamples < numSamples) {
+            mPm.mLock.lock();
+            if (parseIioEnergyNodes() == Status::SUCCESS) {
+                mPm.fmqSynchronized->writeBlocking(&mPm.reading[0], mPm.reading.size(),
+                                                   WRITE_TIMEOUT_NS);
+                mPm.mLock.unlock();
+                currSamples++;
+                if (usleep(sleepTimeUs) < 0) {
+                    ALOGW("Sleep interrupted");
+                    break;
+                }
+            } else {
+                mPm.mLock.unlock();
+                break;
+            }
+        }
+        mPm.mLock.lock();
+        mPm.fmqSynchronized = nullptr;
+        mPm.mLock.unlock();
+        return;
+    });
+    pollThread.detach();
+    _hidl_cb(*(mPm.fmqSynchronized)->getDesc(), numSamples, mPm.reading.size(), Status::SUCCESS);
+    return Void();
+}
+
+Return<void> PowerStats::getPowerEntityInfo(getPowerEntityInfo_cb _hidl_cb) {
+    hidl_vec<PowerEntityInfo> eInfo;
+    _hidl_cb(eInfo, Status::NOT_SUPPORTED);
+    return Void();
+}
+
+Return<void> PowerStats::getPowerEntityStateInfo(const hidl_vec<uint32_t>& powerEntityIds,
+                                                 getPowerEntityStateInfo_cb _hidl_cb) {
+    (void)powerEntityIds;
+    hidl_vec<PowerEntityStateSpace> powerEntityStateSpaces;
+    _hidl_cb(powerEntityStateSpaces, Status::NOT_SUPPORTED);
+    return Void();
+}
+
+Return<void> PowerStats::getPowerEntityStateResidencyData(
+    const hidl_vec<uint32_t>& powerEntityIds, getPowerEntityStateResidencyData_cb _hidl_cb) {
+    (void)powerEntityIds;
+    hidl_vec<PowerEntityStateResidencyResult> results;
+    _hidl_cb(results, Status::NOT_SUPPORTED);
+    return Void();
+}
+
+}  // namespace implementation
+}  // namespace V1_0
+}  // namespace stats
+}  // namespace power
+}  // namespace hardware
+}  // namespace android
diff --git a/power/stats/1.0/default/PowerStats.h b/power/stats/1.0/default/PowerStats.h
new file mode 100644
index 0000000..fb2c6a8
--- /dev/null
+++ b/power/stats/1.0/default/PowerStats.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_POWERSTATS_V1_0_POWERSTATS_H
+#define ANDROID_HARDWARE_POWERSTATS_V1_0_POWERSTATS_H
+
+#include <android/hardware/power/stats/1.0/IPowerStats.h>
+#include <fmq/MessageQueue.h>
+#include <hidl/MQDescriptor.h>
+#include <hidl/Status.h>
+
+namespace android {
+namespace hardware {
+namespace power {
+namespace stats {
+namespace V1_0 {
+namespace implementation {
+
+using ::android::hardware::hidl_vec;
+using ::android::hardware::kSynchronizedReadWrite;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+using ::android::hardware::power::stats::V1_0::EnergyData;
+using ::android::hardware::power::stats::V1_0::PowerEntityInfo;
+using ::android::hardware::power::stats::V1_0::PowerEntityStateInfo;
+using ::android::hardware::power::stats::V1_0::PowerEntityStateResidencyData;
+using ::android::hardware::power::stats::V1_0::PowerEntityStateResidencyResult;
+using ::android::hardware::power::stats::V1_0::PowerEntityStateSpace;
+using ::android::hardware::power::stats::V1_0::PowerEntityType;
+using ::android::hardware::power::stats::V1_0::RailInfo;
+using ::android::hardware::power::stats::V1_0::Status;
+
+typedef MessageQueue<EnergyData, kSynchronizedReadWrite> MessageQueueSync;
+struct RailData {
+    std::string devicePath;
+    uint32_t index;
+    std::string subsysName;
+    uint32_t samplingRate;
+};
+
+struct OnDeviceMmt {
+    std::mutex mLock;
+    bool hwEnabled;
+    std::vector<std::string> devicePaths;
+    std::map<std::string, RailData> railsInfo;
+    std::vector<EnergyData> reading;
+    std::unique_ptr<MessageQueueSync> fmqSynchronized;
+};
+
+struct PowerStats : public IPowerStats {
+    PowerStats();
+    // Methods from ::android::hardware::power::stats::V1_0::IPowerStats follow.
+    Return<void> getRailInfo(getRailInfo_cb _hidl_cb) override;
+    Return<void> getEnergyData(const hidl_vec<uint32_t>& railIndices,
+                               getEnergyData_cb _hidl_cb) override;
+    Return<void> streamEnergyData(uint32_t timeMs, uint32_t samplingRate,
+                                  streamEnergyData_cb _hidl_cb) override;
+    Return<void> getPowerEntityInfo(getPowerEntityInfo_cb _hidl_cb) override;
+    Return<void> getPowerEntityStateInfo(const hidl_vec<uint32_t>& powerEntityIds,
+                                         getPowerEntityStateInfo_cb _hidl_cb) override;
+    Return<void> getPowerEntityStateResidencyData(
+        const hidl_vec<uint32_t>& powerEntityIds,
+        getPowerEntityStateResidencyData_cb _hidl_cb) override;
+
+   private:
+    OnDeviceMmt mPm;
+    void findIioPowerMonitorNodes();
+    size_t parsePowerRails();
+    int parseIioEnergyNode(std::string devName);
+    Status parseIioEnergyNodes();
+};
+
+}  // namespace implementation
+}  // namespace V1_0
+}  // namespace stats
+}  // namespace power
+}  // namespace hardware
+}  // namespace android
+
+#endif  // ANDROID_HARDWARE_POWERSTATS_V1_0_POWERSTATS_H
diff --git a/power/stats/1.0/default/android.hardware.power.stats@1.0-service.rc b/power/stats/1.0/default/android.hardware.power.stats@1.0-service.rc
new file mode 100644
index 0000000..d7e546b
--- /dev/null
+++ b/power/stats/1.0/default/android.hardware.power.stats@1.0-service.rc
@@ -0,0 +1,4 @@
+service vendor.power.stats-hal-1-0 /vendor/bin/hw/android.hardware.power.stats@1.0-service
+    class hal
+    user system
+    group system
diff --git a/power/stats/1.0/default/service.cpp b/power/stats/1.0/default/service.cpp
new file mode 100644
index 0000000..80649f5
--- /dev/null
+++ b/power/stats/1.0/default/service.cpp
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "android.hardware.power.stats@1.0-service"
+
+#include <android/log.h>
+#include <hidl/HidlTransportSupport.h>
+
+#include "PowerStats.h"
+
+using android::OK;
+using android::sp;
+using android::status_t;
+
+// libhwbinder:
+using android::hardware::configureRpcThreadpool;
+using android::hardware::joinRpcThreadpool;
+
+// Generated HIDL files
+using android::hardware::power::stats::V1_0::IPowerStats;
+using android::hardware::power::stats::V1_0::implementation::PowerStats;
+
+int main(int /* argc */, char** /* argv */) {
+    ALOGI("power.stats service 1.0 is starting.");
+
+    android::sp<IPowerStats> service = new PowerStats();
+    if (service == nullptr) {
+        ALOGE("Can not create an instance of power.stats HAL Iface, exiting.");
+        return 1;
+    }
+
+    configureRpcThreadpool(1, true /*callerWillJoin*/);
+
+    status_t status = service->registerAsService();
+    if (status != OK) {
+        ALOGE("Could not register service for power.stats HAL Iface (%d), exiting.", status);
+        return 1;
+    }
+
+    ALOGI("power.stats service is ready");
+    joinRpcThreadpool();
+
+    // In normal operation, we don't expect the thread pool to exit
+    ALOGE("power.stats service is shutting down");
+    return 1;
+}
diff --git a/radio/1.3/Android.bp b/radio/1.3/Android.bp
index b6610e0..3c65180 100644
--- a/radio/1.3/Android.bp
+++ b/radio/1.3/Android.bp
@@ -21,12 +21,16 @@
     ],
     types: [
         "AccessNetwork",
+        "CellConfigLte",
+        "CellInfo",
+        "CellInfoLte",
         "DataProfileInfo",
         "DataRegStateResult",
         "EmergencyNumber",
         "EmergencyNumberSource",
         "EmergencyServiceCategory",
         "LteVopsInfo",
+        "NetworkScanResult",
     ],
     gen_java: true,
 }
diff --git a/radio/1.3/IRadioIndication.hal b/radio/1.3/IRadioIndication.hal
index 509eef8..e7f26ac 100644
--- a/radio/1.3/IRadioIndication.hal
+++ b/radio/1.3/IRadioIndication.hal
@@ -49,4 +49,17 @@
      */
     oneway currentEmergencyNumberList(RadioIndicationType type,
             vec<EmergencyNumber> emergencyNumberList);
+
+    /**
+     * Request all of the current cell information known to the radio.
+     *
+     * @param type Type of radio indication
+     * @param records Current cell information
+     */
+    oneway cellInfoList_1_3(RadioIndicationType type, vec<CellInfo> records);
+
+    /**
+     * Incremental network scan results
+     */
+    oneway networkScanResult_1_3(RadioIndicationType type, NetworkScanResult result);
 };
diff --git a/radio/1.3/IRadioResponse.hal b/radio/1.3/IRadioResponse.hal
index 10e7d63..75d1501 100644
--- a/radio/1.3/IRadioResponse.hal
+++ b/radio/1.3/IRadioResponse.hal
@@ -50,6 +50,17 @@
 
     /**
      * @param info Response info struct containing response type, serial no. and error
+     * @param cellInfo List of current cell information known to radio
+     *
+     * Valid errors returned:
+     *   RadioError:NONE
+     *   RadioError:RADIO_NOT_AVAILABLE
+     *   RadioError:INTERNAL_ERR
+     */
+    oneway getCellInfoListResponse_1_3(RadioResponseInfo info, vec<CellInfo> cellInfo);
+
+    /**
+     * @param info Response info struct containing response type, serial no. and error
      * @param dataRegResponse Current Data registration response as defined by DataRegStateResult in
      *        types.hal
      *
diff --git a/radio/1.3/types.hal b/radio/1.3/types.hal
index 8b0891c..b161e0e 100644
--- a/radio/1.3/types.hal
+++ b/radio/1.3/types.hal
@@ -18,11 +18,21 @@
 
 import @1.0::ApnAuthType;
 import @1.0::ApnTypes;
+import @1.0::CellInfoType;
 import @1.0::DataProfileId;
 import @1.0::DataProfileInfoType;
 import @1.0::RadioAccessFamily;
+import @1.0::RadioError;
 import @1.0::RegState;
+import @1.0::TimeStampType;
+import @1.1::ScanStatus;
 import @1.2::AccessNetwork;
+import @1.2::CellInfo;
+import @1.2::CellInfoCdma;
+import @1.2::CellInfoGsm;
+import @1.2::CellInfoLte;
+import @1.2::CellInfoTdscdma;
+import @1.2::CellInfoWcdma;
 import @1.2::CellIdentity;
 import @1.2::DataRegStateResult;
 
@@ -168,6 +178,78 @@
 
         LteVopsInfo lteVopsInfo; // LTE network capability
     } vopsInfo;
+
+    /**
+     * True if use of dual connectivity with NR is restricted.
+     * Reference: 3GPP TS 24.301 v15.03 section 9.3.3.12A.
+     */
+    bool isDcNrRestricted;
+
+    /**
+     * True if the bit N is in the PLMN-InfoList-r15 is true and the selected PLMN is present in
+     * plmn-IdentityList at position N.
+     * Reference: 3GPP TS 36.331 v15.2.2 section 6.3.1 PLMN-InfoList-r15.
+     *            3GPP TS 36.331 v15.2.2 section 6.2.2 SystemInformationBlockType1 message.
+     */
+    bool isNrAvailable;
+};
+
+/** Contains the configuration of the LTE cell tower. */
+struct CellConfigLte {
+    /**
+     * Indicates that if E-UTRA-NR Dual Connectivity (EN-DC) is supported by the LTE cell.
+     *
+     * True if the plmn-InfoList-r15 is present in SIB2 and at least one bit in this list is true,
+     * otherwise this value should be false.
+     *
+     * Reference: 3GPP TS 36.331 v15.2.2 6.3.1 System information blocks.
+     */
+    bool isEndcAvailable;
+};
+
+/** Inherits from @1.2::CellInfoLte, in order to add the LTE configuration. */
+struct CellInfoLte {
+    @1.2::CellInfoLte base;
+    CellConfigLte cellConfig;
+};
+
+/** Overwritten from @1.2::CellInfo in order to update the CellInfoLte to 1.3 version. */
+struct CellInfo {
+    /** Cell type for selecting from union CellInfo. */
+    CellInfoType cellInfoType;
+
+    /**
+     * True if the phone is registered to a mobile network that provides service on this cell and
+     * this cell is being used or would be used for network signaling.
+     */
+    bool isRegistered;
+
+    /** CellInfo details, cellInfoType can tell which cell info should be used. */
+    safe_union Info {
+        CellInfoGsm gsm;
+        CellInfoCdma cdma;
+        CellInfoWcdma wcdma;
+        CellInfoTdscdma tdscdma;
+        CellInfoLte lte;
+    } info;
+};
+
+/** Overwritten from @1.2::NetworkScanResult in order to update the CellInfo to 1.3 version. */
+struct NetworkScanResult {
+    /**
+     * The status of the scan.
+     */
+    ScanStatus status;
+
+    /**
+     * The error code of the incremental result.
+     */
+    RadioError error;
+
+    /**
+     * List of network information as CellInfo.
+     */
+    vec<CellInfo> networkInfos;
 };
 
 /**
diff --git a/radio/config/1.1/Android.bp b/radio/config/1.1/Android.bp
new file mode 100644
index 0000000..8dc0f27
--- /dev/null
+++ b/radio/config/1.1/Android.bp
@@ -0,0 +1,23 @@
+// This file is autogenerated by hidl-gen -Landroidbp.
+
+hidl_interface {
+    name: "android.hardware.radio.config@1.1",
+    root: "android.hardware",
+    vndk: {
+        enabled: true,
+    },
+    srcs: [
+        "types.hal",
+        "IRadioConfigIndication.hal",
+        "IRadioConfigResponse.hal",
+    ],
+    interfaces: [
+        "android.hardware.radio@1.0",
+        "android.hardware.radio.config@1.0",
+        "android.hidl.base@1.0",
+    ],
+    types: [
+        "SimSlotStatus",
+    ],
+    gen_java: true,
+}
diff --git a/radio/config/1.1/IRadioConfigIndication.hal b/radio/config/1.1/IRadioConfigIndication.hal
new file mode 100644
index 0000000..53eaa4d
--- /dev/null
+++ b/radio/config/1.1/IRadioConfigIndication.hal
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.radio.config@1.1;
+
+import @1.0::IRadioConfigIndication;
+import android.hardware.radio@1.0::RadioIndicationType;
+
+/**
+ * Interface declaring unsolicited radio config indications.
+ */
+interface IRadioConfigIndication extends @1.0::IRadioConfigIndication {
+
+    /**
+     * Indicates SIM slot status change.
+     *
+     * This indication must be sent by the modem whenever there is any slot status change, even the
+     * slot is inactive. For example, this indication must be triggered if a SIM card is inserted
+     * into an inactive slot.
+     *
+     * @param type Type of radio indication
+     * @param slotStatus new slot status info with size equals to the number of physical slots on
+     *        the device
+     */
+    oneway simSlotsStatusChanged_1_1(RadioIndicationType type, vec<SimSlotStatus> slotStatus);
+};
diff --git a/radio/config/1.1/IRadioConfigResponse.hal b/radio/config/1.1/IRadioConfigResponse.hal
new file mode 100644
index 0000000..6f543ab
--- /dev/null
+++ b/radio/config/1.1/IRadioConfigResponse.hal
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.radio.config@1.1;
+
+import android.hardware.radio@1.0::RadioResponseInfo;
+import @1.0::IRadioConfigResponse;
+import @1.1::SimSlotStatus;
+
+/**
+ * Interface declaring response functions to solicited radio config requests.
+ */
+interface IRadioConfigResponse extends @1.0::IRadioConfigResponse {
+
+    /**
+     * @param info Response info struct containing response type, serial no. and error
+     * @param slotStatus Sim slot struct containing all the physical SIM slots info with size
+     *        equal to the number of physical slots on the device
+     *
+     * Valid errors returned:
+     *   RadioError:NONE
+     *   RadioError:RADIO_NOT_AVAILABLE
+     *   RadioError:NO_MEMORY
+     *   RadioError:INTERNAL_ERR
+     *   RadioError:MODEM_ERR
+     */
+    oneway getSimSlotsStatusResponse_1_1(RadioResponseInfo info, vec<SimSlotStatus> slotStatus);
+};
diff --git a/radio/config/1.1/types.hal b/radio/config/1.1/types.hal
new file mode 100644
index 0000000..0c9d11e
--- /dev/null
+++ b/radio/config/1.1/types.hal
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.radio.config@1.1;
+
+import android.hardware.radio@1.0::CardState;
+import @1.0::SimSlotStatus;
+
+struct SimSlotStatus {
+    @1.0::SimSlotStatus base;
+    /**
+     * The EID is the eUICC identifier. The EID shall be stored within the ECASD and can be
+     * retrieved by the Device at any time using the standard GlobalPlatform GET DATA command.
+     *
+     * This data is mandatory and applicable only when cardState is CardState:PRESENT and SIM card
+     * supports eUICC.
+     */
+    string eid;
+};
diff --git a/soundtrigger/2.2/ISoundTriggerHw.hal b/soundtrigger/2.2/ISoundTriggerHw.hal
index fcb5087..a26896a 100644
--- a/soundtrigger/2.2/ISoundTriggerHw.hal
+++ b/soundtrigger/2.2/ISoundTriggerHw.hal
@@ -16,25 +16,27 @@
 
 package android.hardware.soundtrigger@2.2;
 
-import @2.0::ISoundTriggerHwCallback.RecognitionEvent;
 import @2.0::SoundModelHandle;
 import @2.1::ISoundTriggerHw;
 
 /**
- * SoundTrigger HAL interface. Used for hardware recognition of hotwords.
+ * SoundTrigger HAL interface. Used for hardware recognition of hotwords
+ * and other sounds.
  */
 interface ISoundTriggerHw extends @2.1::ISoundTriggerHw {
 
     /**
      * Get the state of a given model.
-     * The model state is returned as a RecognitionEvent.
-     * @param modelHandle The handle of the sound model to use for recognition
+     * The model state is returned asynchronously as a RecognitionEvent via
+     * the callback that was registered in StartRecognition().
+     * @param modelHandle The handle of the sound model whose state is being
+     *                    queried.
      * @return retval Operation completion status: 0 in case of success,
      *                -ENOSYS in case of invalid model handle,
      *                -ENOMEM in case of memory allocation failure,
-     *                -ENODEV in case of initialization error.
-     * @return state  RecognitionEvent in case of success
+     *                -ENODEV in case of initialization error,
+     *                -EINVAL in case where a recognition event is already
+     *                        being processed.
      */
-    getModelState(SoundModelHandle modelHandle)
-            generates (int32_t retval, @2.0::ISoundTriggerHwCallback.RecognitionEvent state);
+    getModelState(SoundModelHandle modelHandle) generates (int32_t retval);
 };
diff --git a/soundtrigger/2.2/default/SoundTriggerHw.cpp b/soundtrigger/2.2/default/SoundTriggerHw.cpp
index ffdf9fb..9d930ac 100644
--- a/soundtrigger/2.2/default/SoundTriggerHw.cpp
+++ b/soundtrigger/2.2/default/SoundTriggerHw.cpp
@@ -690,45 +690,26 @@
 
 // Begin V2_2 implementation
 
-Return<void> SoundTriggerHw::getModelState(int32_t modelHandle, getModelState_cb hidl_cb) {
-    int ret = 0;
-    V2_0::ISoundTriggerHwCallback::RecognitionEvent event;
-    struct sound_trigger_recognition_event* halEvent = NULL;
+Return<int32_t> SoundTriggerHw::getModelState(int32_t modelHandle) {
     sp<SoundModelClient> client;
     if (mHwDevice == NULL) {
-        ret = -ENODEV;
-        goto exit;
+        return -ENODEV;
     }
 
     {
         AutoMutex lock(mLock);
         client = mClients.valueFor(modelHandle);
         if (client == 0) {
-            ret = -ENOSYS;
-            goto exit;
+            return -ENOSYS;
         }
     }
 
     if (mHwDevice->get_model_state == NULL) {
         ALOGE("Failed to get model state from device, no such method");
-        ret = -ENODEV;
-        goto exit;
+        return -ENODEV;
     }
 
-    // Get the state from the device (as a recognition event)
-    halEvent = mHwDevice->get_model_state(mHwDevice, client->getHalHandle());
-    if (halEvent == NULL) {
-        ALOGE("Failed to get model state from device");
-        ret = -ENODEV;
-        goto exit;
-    }
-
-    convertRecognitionEventFromHal(&event, halEvent);
-
-exit:
-    hidl_cb(ret, event);
-    free(halEvent);
-    return Void();
+    return mHwDevice->get_model_state(mHwDevice, client->getHalHandle());
 }
 
 // Methods from ::android::hidl::base::V1_0::IBase follow.
diff --git a/soundtrigger/2.2/default/SoundTriggerHw.h b/soundtrigger/2.2/default/SoundTriggerHw.h
index 876b990..6676318 100644
--- a/soundtrigger/2.2/default/SoundTriggerHw.h
+++ b/soundtrigger/2.2/default/SoundTriggerHw.h
@@ -82,7 +82,7 @@
                                          int32_t cookie) override;
 
     // Methods from V2_2::ISoundTriggerHw follow.
-    Return<void> getModelState(int32_t modelHandle, getModelState_cb _hidl_cb) override;
+    Return<int32_t> getModelState(int32_t modelHandle) override;
 
     SoundTriggerHw();
 
diff --git a/soundtrigger/2.2/vts/functional/VtsHalSoundtriggerV2_2TargetTest.cpp b/soundtrigger/2.2/vts/functional/VtsHalSoundtriggerV2_2TargetTest.cpp
index a473c37..0f37816 100644
--- a/soundtrigger/2.2/vts/functional/VtsHalSoundtriggerV2_2TargetTest.cpp
+++ b/soundtrigger/2.2/vts/functional/VtsHalSoundtriggerV2_2TargetTest.cpp
@@ -74,21 +74,14 @@
  * Test ISoundTriggerHw::getModelState() method
  *
  * Verifies that:
- *  - the implementation returns -EINVAL with invalid model handle
+ *  - the implementation returns -ENOSYS with invalid model handle
  *
  */
 TEST_F(SoundTriggerHidlTest, GetModelStateInvalidModel) {
-    int ret = android::OK;
-    ::android::hardware::soundtrigger::V2_0::ISoundTriggerHwCallback::RecognitionEvent event;
     SoundModelHandle handle = 0;
-    Return<void> hidlReturn =
-        mSoundTriggerHal->getModelState(handle, [&](int32_t retval, auto res) {
-            ret = retval;
-            event = res;
-        });
-
+    Return<int32_t> hidlReturn = mSoundTriggerHal->getModelState(handle);
     EXPECT_TRUE(hidlReturn.isOk());
-    EXPECT_EQ(-ENOSYS, ret);
+    EXPECT_EQ(-ENOSYS, hidlReturn);
 }
 
 int main(int argc, char** argv) {
diff --git a/tests/safeunion/cpp/1.0/.hidl_for_test b/tests/safeunion/cpp/1.0/.hidl_for_test
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/safeunion/cpp/1.0/.hidl_for_test
diff --git a/tests/safeunion/cpp/1.0/Android.bp b/tests/safeunion/cpp/1.0/Android.bp
new file mode 100644
index 0000000..1111719
--- /dev/null
+++ b/tests/safeunion/cpp/1.0/Android.bp
@@ -0,0 +1,14 @@
+// This file is autogenerated by hidl-gen -Landroidbp.
+
+hidl_interface {
+    name: "android.hardware.tests.safeunion.cpp@1.0",
+    root: "android.hardware",
+    srcs: [
+        "ICppSafeUnion.hal",
+    ],
+    interfaces: [
+        "android.hidl.base@1.0",
+    ],
+    gen_java: false,
+}
+
diff --git a/tests/safeunion/cpp/1.0/ICppSafeUnion.hal b/tests/safeunion/cpp/1.0/ICppSafeUnion.hal
new file mode 100644
index 0000000..cc1a91e
--- /dev/null
+++ b/tests/safeunion/cpp/1.0/ICppSafeUnion.hal
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.tests.safeunion.cpp@1.0;
+
+/**
+ * Safe union for cpp-only types.
+ */
+interface ICppSafeUnion {
+    safe_union PointerFmqSafeUnion {
+        interface iface;
+        fmq_sync<uint8_t> fmqSync;
+        fmq_unsync<uint8_t> fmqUnsync;
+    };
+    safe_union FmqSafeUnion {
+        fmq_sync<uint8_t> fmqSync;
+        fmq_unsync<uint8_t> fmqUnsync;
+    };
+
+    repeatPointerFmqSafeUnion(PointerFmqSafeUnion fmq) generates (PointerFmqSafeUnion fmq);
+    repeatFmqSafeUnion(FmqSafeUnion fmq) generates (FmqSafeUnion fmq);
+};
diff --git a/tests/safeunion/cpp/1.0/default/Android.bp b/tests/safeunion/cpp/1.0/default/Android.bp
new file mode 100644
index 0000000..210a639
--- /dev/null
+++ b/tests/safeunion/cpp/1.0/default/Android.bp
@@ -0,0 +1,22 @@
+cc_library {
+    name: "android.hardware.tests.safeunion.cpp@1.0-impl",
+    relative_install_path: "hw",
+    srcs: [
+        "CppSafeUnion.cpp",
+    ],
+    shared_libs: [
+        "libbase",
+        "libcutils",
+        "libhidlbase",
+        "libhidltransport",
+        "libhwbinder",
+        "liblog",
+        "libutils",
+    ],
+
+    // These are static libs only for testing purposes and portability. Shared
+    // libs should be used on device.
+    static_libs: [
+        "android.hardware.tests.safeunion.cpp@1.0",
+    ],
+}
diff --git a/tests/safeunion/cpp/1.0/default/CppSafeUnion.cpp b/tests/safeunion/cpp/1.0/default/CppSafeUnion.cpp
new file mode 100644
index 0000000..dcbd9d7
--- /dev/null
+++ b/tests/safeunion/cpp/1.0/default/CppSafeUnion.cpp
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "CppSafeUnion.h"
+
+namespace android {
+namespace hardware {
+namespace tests {
+namespace safeunion {
+namespace cpp {
+namespace V1_0 {
+namespace implementation {
+
+Return<void> CppSafeUnion::repeatPointerFmqSafeUnion(const ICppSafeUnion::PointerFmqSafeUnion& fmq,
+                                                     repeatPointerFmqSafeUnion_cb _hidl_cb) {
+    _hidl_cb(fmq);
+    return Void();
+}
+
+Return<void> CppSafeUnion::repeatFmqSafeUnion(const ICppSafeUnion::FmqSafeUnion& fmq,
+                                              repeatFmqSafeUnion_cb _hidl_cb) {
+    _hidl_cb(fmq);
+    return Void();
+}
+
+ICppSafeUnion* HIDL_FETCH_ICppSafeUnion(const char* /* name */) {
+    return new CppSafeUnion();
+}
+
+}  // namespace implementation
+}  // namespace V1_0
+}  // namespace cpp
+}  // namespace safeunion
+}  // namespace tests
+}  // namespace hardware
+}  // namespace android
diff --git a/tests/safeunion/cpp/1.0/default/CppSafeUnion.h b/tests/safeunion/cpp/1.0/default/CppSafeUnion.h
new file mode 100644
index 0000000..a128273
--- /dev/null
+++ b/tests/safeunion/cpp/1.0/default/CppSafeUnion.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <android/hardware/tests/safeunion/cpp/1.0/ICppSafeUnion.h>
+#include <hidl/MQDescriptor.h>
+#include <hidl/Status.h>
+
+namespace android {
+namespace hardware {
+namespace tests {
+namespace safeunion {
+namespace cpp {
+namespace V1_0 {
+namespace implementation {
+
+using ::android::sp;
+using ::android::hardware::hidl_array;
+using ::android::hardware::hidl_memory;
+using ::android::hardware::hidl_string;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+
+struct CppSafeUnion : public ICppSafeUnion {
+    Return<void> repeatPointerFmqSafeUnion(const ICppSafeUnion::PointerFmqSafeUnion& fmq,
+                                           repeatPointerFmqSafeUnion_cb _hidl_cb) override;
+    Return<void> repeatFmqSafeUnion(const ICppSafeUnion::FmqSafeUnion& fmq,
+                                    repeatFmqSafeUnion_cb _hidl_cb) override;
+};
+
+extern "C" ICppSafeUnion* HIDL_FETCH_ICppSafeUnion(const char* name);
+
+}  // namespace implementation
+}  // namespace V1_0
+}  // namespace cpp
+}  // namespace safeunion
+}  // namespace tests
+}  // namespace hardware
+}  // namespace android