Merge "Camera: Clarify configureStreams doc"
diff --git a/automotive/vehicle/2.0/default/common/src/VehicleHalManager.cpp b/automotive/vehicle/2.0/default/common/src/VehicleHalManager.cpp
index b5de262..393d3ec 100644
--- a/automotive/vehicle/2.0/default/common/src/VehicleHalManager.cpp
+++ b/automotive/vehicle/2.0/default/common/src/VehicleHalManager.cpp
@@ -218,7 +218,7 @@
     const auto& clients =
         mSubscriptionManager.getSubscribedClients(property, SubscribeFlags::EVENTS_FROM_CAR);
 
-    for (auto client : clients) {
+    for (const auto& client : clients) {
         client->getCallback()->onPropertySetError(errorCode, property, areaId);
     }
 }
@@ -312,7 +312,7 @@
 void VehicleHalManager::handlePropertySetEvent(const VehiclePropValue& value) {
     auto clients =
         mSubscriptionManager.getSubscribedClients(value.prop, SubscribeFlags::EVENTS_FROM_ANDROID);
-    for (auto client : clients) {
+    for (const auto& client : clients) {
         client->getCallback()->onPropertySet(value);
     }
 }
diff --git a/automotive/vehicle/2.0/default/tests/SubscriptionManager_test.cpp b/automotive/vehicle/2.0/default/tests/SubscriptionManager_test.cpp
index ab2013d..23ab6bc 100644
--- a/automotive/vehicle/2.0/default/tests/SubscriptionManager_test.cpp
+++ b/automotive/vehicle/2.0/default/tests/SubscriptionManager_test.cpp
@@ -66,7 +66,7 @@
     static std::list<sp<IVehicleCallback>> extractCallbacks(
             const std::list<sp<HalClient>>& clients) {
         std::list<sp<IVehicleCallback>> callbacks;
-        for (auto c : clients) {
+        for (const auto& c : clients) {
             callbacks.push_back(c->getCallback());
         }
         return callbacks;
diff --git a/camera/device/3.2/default/CameraDevice.cpp b/camera/device/3.2/default/CameraDevice.cpp
index dfbb976..297e778 100644
--- a/camera/device/3.2/default/CameraDevice.cpp
+++ b/camera/device/3.2/default/CameraDevice.cpp
@@ -262,7 +262,7 @@
             session->getInterface()->interfaceChain([](
                 ::android::hardware::hidl_vec<::android::hardware::hidl_string> interfaceChain) {
                     ALOGV("Session interface chain:");
-                    for (auto iface : interfaceChain) {
+                    for (const auto& iface : interfaceChain) {
                         ALOGV("  %s", iface.c_str());
                     }
                 });
diff --git a/camera/device/3.3/default/CameraDevice.cpp b/camera/device/3.3/default/CameraDevice.cpp
index ce5e1de..b4d279e 100644
--- a/camera/device/3.3/default/CameraDevice.cpp
+++ b/camera/device/3.3/default/CameraDevice.cpp
@@ -49,7 +49,7 @@
         session->getInterface()->interfaceChain([](
             ::android::hardware::hidl_vec<::android::hardware::hidl_string> interfaceChain) {
                 ALOGV("Session interface chain:");
-                for (auto iface : interfaceChain) {
+                for (const auto& iface : interfaceChain) {
                     ALOGV("  %s", iface.c_str());
                 }
             });
diff --git a/camera/device/3.4/default/CameraDevice.cpp b/camera/device/3.4/default/CameraDevice.cpp
index d73833a..bc443de 100644
--- a/camera/device/3.4/default/CameraDevice.cpp
+++ b/camera/device/3.4/default/CameraDevice.cpp
@@ -49,7 +49,7 @@
         session->getInterface()->interfaceChain([](
             ::android::hardware::hidl_vec<::android::hardware::hidl_string> interfaceChain) {
                 ALOGV("Session interface chain:");
-                for (auto iface : interfaceChain) {
+                for (const auto& iface : interfaceChain) {
                     ALOGV("  %s", iface.c_str());
                 }
             });
diff --git a/cas/1.0/default/android.hardware.cas@1.0-service-lazy.rc b/cas/1.0/default/android.hardware.cas@1.0-service-lazy.rc
index 735cfbc..443549a 100644
--- a/cas/1.0/default/android.hardware.cas@1.0-service-lazy.rc
+++ b/cas/1.0/default/android.hardware.cas@1.0-service-lazy.rc
@@ -1,4 +1,4 @@
-service vendor.cas-hal-1-0 /vendor/bin/hw/android.hardware.cas@1.0-service
+service vendor.cas-hal-1-0 /vendor/bin/hw/android.hardware.cas@1.0-service-lazy
     interface android.hardware.cas@1.0::IMediaCasService default
     oneshot
     disabled
diff --git a/contexthub/1.0/vts/functional/VtsHalContexthubV1_0TargetTest.cpp b/contexthub/1.0/vts/functional/VtsHalContexthubV1_0TargetTest.cpp
index 7492152..629477a 100644
--- a/contexthub/1.0/vts/functional/VtsHalContexthubV1_0TargetTest.cpp
+++ b/contexthub/1.0/vts/functional/VtsHalContexthubV1_0TargetTest.cpp
@@ -83,7 +83,7 @@
     sp<IContexthub> hubApi = ::testing::VtsHalHidlTargetTestBase::getService<IContexthub>();
 
     if (hubApi != nullptr) {
-      for (ContextHub hub : getHubsSync(hubApi)) {
+      for (const ContextHub& hub : getHubsSync(hubApi)) {
         hubIds.push_back(hub.hubId);
       }
     }
@@ -206,7 +206,7 @@
   hidl_vec<ContextHub> hubs = getHubsSync(hubApi);
   ALOGD("System reports %zu hubs", hubs.size());
 
-  for (ContextHub hub : hubs) {
+  for (const ContextHub& hub : hubs) {
     ALOGD("Checking hub ID %" PRIu32, hub.hubId);
 
     EXPECT_FALSE(hub.name.empty());
diff --git a/drm/1.0/vts/functional/drm_hal_vendor_test.cpp b/drm/1.0/vts/functional/drm_hal_vendor_test.cpp
index d03b2af..20a2ca4 100644
--- a/drm/1.0/vts/functional/drm_hal_vendor_test.cpp
+++ b/drm/1.0/vts/functional/drm_hal_vendor_test.cpp
@@ -177,7 +177,7 @@
 TEST_P(DrmHalVendorFactoryTest, ValidateConfigurations) {
     const char* kVendorStr = "Vendor module ";
     size_t count = 0;
-    for (auto config : contentConfigurations) {
+    for (const auto& config : contentConfigurations) {
         ASSERT_TRUE(config.name.size() > 0) << kVendorStr << "has no name";
         ASSERT_TRUE(config.serverUrl.size() > 0) << kVendorStr
                                                  << "has no serverUrl";
@@ -186,7 +186,7 @@
         ASSERT_TRUE(config.mimeType.size() > 0) << kVendorStr
                                                 << "has no mime type";
         ASSERT_TRUE(config.keys.size() >= 1) << kVendorStr << "has no keys";
-        for (auto key : config.keys) {
+        for (const auto& key : config.keys) {
             ASSERT_TRUE(key.keyId.size() > 0) << kVendorStr
                                               << " has zero length keyId";
             ASSERT_TRUE(key.keyId.size() > 0) << kVendorStr
@@ -245,7 +245,7 @@
  */
 TEST_P(DrmHalVendorFactoryTest, ValidContentTypeSupported) {
     RETURN_IF_SKIPPED;
-    for (auto config : contentConfigurations) {
+    for (const auto& config : contentConfigurations) {
         EXPECT_TRUE(drmFactory->isContentTypeSupported(config.mimeType));
     }
 }
@@ -610,7 +610,7 @@
  */
 TEST_P(DrmHalVendorPluginTest, RestoreKeys) {
     RETURN_IF_SKIPPED;
-    for (auto config : contentConfigurations) {
+    for (const auto& config : contentConfigurations) {
         if (config.policy.allowOffline) {
             auto sessionId = openSession();
             hidl_vec<uint8_t> keySetId =
@@ -645,7 +645,7 @@
  */
 TEST_P(DrmHalVendorPluginTest, RestoreKeysClosedSession) {
     RETURN_IF_SKIPPED;
-    for (auto config : contentConfigurations) {
+    for (const auto& config : contentConfigurations) {
         if (config.policy.allowOffline) {
             auto sessionId = openSession();
             hidl_vec<uint8_t> keySetId =
@@ -1022,8 +1022,8 @@
  */
 TEST_P(DrmHalVendorPluginTest, RequiresSecureDecoderConfig) {
     RETURN_IF_SKIPPED;
-    for (auto config : contentConfigurations) {
-        for (auto key : config.keys) {
+    for (const auto& config : contentConfigurations) {
+        for (const auto& key : config.keys) {
             if (key.isSecure) {
                 EXPECT_TRUE(cryptoPlugin->requiresSecureDecoderComponent(config.mimeType));
                 break;
@@ -1471,7 +1471,7 @@
  */
 TEST_P(DrmHalVendorDecryptTest, QueryKeyStatus) {
     RETURN_IF_SKIPPED;
-    for (auto config : contentConfigurations) {
+    for (const auto& config : contentConfigurations) {
         auto sessionId = openSession();
         loadKeys(sessionId, config);
         auto keyStatus = queryKeyStatus(sessionId);
@@ -1485,8 +1485,8 @@
  */
 TEST_P(DrmHalVendorDecryptTest, ClearSegmentTest) {
     RETURN_IF_SKIPPED;
-    for (auto config : contentConfigurations) {
-        for (auto key : config.keys) {
+    for (const auto& config : contentConfigurations) {
+        for (const auto& key : config.keys) {
             const size_t kSegmentSize = 1024;
             vector<uint8_t> iv(AES_BLOCK_SIZE, 0);
             const Pattern noPattern = {0, 0};
@@ -1513,8 +1513,8 @@
  */
 TEST_P(DrmHalVendorDecryptTest, EncryptedAesCtrSegmentTest) {
     RETURN_IF_SKIPPED;
-    for (auto config : contentConfigurations) {
-        for (auto key : config.keys) {
+    for (const auto& config : contentConfigurations) {
+        for (const auto& key : config.keys) {
             const size_t kSegmentSize = 1024;
             vector<uint8_t> iv(AES_BLOCK_SIZE, 0);
             const Pattern noPattern = {0, 0};
@@ -1540,8 +1540,8 @@
  */
 TEST_P(DrmHalVendorDecryptTest, EncryptedAesCtrSegmentTestNoKeys) {
     RETURN_IF_SKIPPED;
-    for (auto config : contentConfigurations) {
-        for (auto key : config.keys) {
+    for (const auto& config : contentConfigurations) {
+        for (const auto& key : config.keys) {
             vector<uint8_t> iv(AES_BLOCK_SIZE, 0);
             const Pattern noPattern = {0, 0};
             const vector<SubSample> subSamples = {{.numBytesOfClearData = 256,
@@ -1567,8 +1567,8 @@
  */
 TEST_P(DrmHalVendorDecryptTest, AttemptDecryptWithKeysRemoved) {
     RETURN_IF_SKIPPED;
-    for (auto config : contentConfigurations) {
-        for (auto key : config.keys) {
+    for (const auto& config : contentConfigurations) {
+        for (const auto& key : config.keys) {
             vector<uint8_t> iv(AES_BLOCK_SIZE, 0);
             const Pattern noPattern = {0, 0};
             const vector<SubSample> subSamples = {{.numBytesOfClearData = 256,
diff --git a/drm/1.1/vts/functional/drm_hal_clearkey_test.cpp b/drm/1.1/vts/functional/drm_hal_clearkey_test.cpp
index 1246616..7dedd7f 100644
--- a/drm/1.1/vts/functional/drm_hal_clearkey_test.cpp
+++ b/drm/1.1/vts/functional/drm_hal_clearkey_test.cpp
@@ -228,13 +228,13 @@
                                       const std::string& componentName, const VT& componentValue) {
      bool validAttribute = false;
      bool validComponent = false;
-     for (DrmMetricGroup::Attribute attribute : metric.attributes) {
+     for (const DrmMetricGroup::Attribute& attribute : metric.attributes) {
          if (attribute.name == attributeName &&
              ValueEquals(attribute.type, attributeValue, attribute)) {
              validAttribute = true;
          }
      }
-     for (DrmMetricGroup::Value value : metric.values) {
+     for (const DrmMetricGroup::Value& value : metric.values) {
          if (value.componentName == componentName &&
              ValueEquals(value.type, componentValue, value)) {
              validComponent = true;
diff --git a/graphics/composer/2.1/utils/vts/ComposerVts.cpp b/graphics/composer/2.1/utils/vts/ComposerVts.cpp
index 2f531b4..6e668af 100644
--- a/graphics/composer/2.1/utils/vts/ComposerVts.cpp
+++ b/graphics/composer/2.1/utils/vts/ComposerVts.cpp
@@ -78,9 +78,9 @@
 ComposerClient::ComposerClient(const sp<IComposerClient>& client) : mClient(client) {}
 
 ComposerClient::~ComposerClient() {
-    for (auto it : mDisplayResources) {
+    for (const auto& it : mDisplayResources) {
         Display display = it.first;
-        DisplayResource& resource = it.second;
+        const DisplayResource& resource = it.second;
 
         for (auto layer : resource.layers) {
             EXPECT_EQ(Error::NONE, mClient->destroyLayer(display, layer))
diff --git a/health/2.0/default/healthd_common.cpp b/health/2.0/default/healthd_common.cpp
index 8ff409d..b5fdc8e 100644
--- a/health/2.0/default/healthd_common.cpp
+++ b/health/2.0/default/healthd_common.cpp
@@ -67,8 +67,6 @@
 
 #define POWER_SUPPLY_SUBSYSTEM "power_supply"
 
-// epoll_create() parameter is actually unused
-#define MAX_EPOLL_EVENTS 40
 static int uevent_fd;
 static int wakealarm_fd;
 
@@ -240,9 +238,9 @@
 }
 
 static int healthd_init() {
-    epollfd = epoll_create(MAX_EPOLL_EVENTS);
+    epollfd = epoll_create1(EPOLL_CLOEXEC);
     if (epollfd == -1) {
-        KLOG_ERROR(LOG_TAG, "epoll_create failed; errno=%d\n", errno);
+        KLOG_ERROR(LOG_TAG, "epoll_create1 failed; errno=%d\n", errno);
         return -1;
     }
 
diff --git a/neuralnetworks/1.0/vts/functional/Callbacks.cpp b/neuralnetworks/1.0/vts/functional/Callbacks.cpp
index 46bf243..a1c5a1a 100644
--- a/neuralnetworks/1.0/vts/functional/Callbacks.cpp
+++ b/neuralnetworks/1.0/vts/functional/Callbacks.cpp
@@ -1,10 +1,26 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
 #include "Callbacks.h"
 #include <android-base/logging.h>
 
 namespace android {
 namespace hardware {
 namespace neuralnetworks {
-namespace V1_0 {
+namespace V1_2 {
 namespace implementation {
 
 CallbackBase::CallbackBase() : mNotified(false) {}
@@ -88,7 +104,15 @@
 PreparedModelCallback::~PreparedModelCallback() {}
 
 Return<void> PreparedModelCallback::notify(ErrorStatus errorStatus,
-                                           const sp<IPreparedModel>& preparedModel) {
+                                           const sp<V1_0::IPreparedModel>& preparedModel) {
+    mErrorStatus = errorStatus;
+    mPreparedModel = preparedModel;
+    CallbackBase::notify();
+    return Void();
+}
+
+Return<void> PreparedModelCallback::notify_1_2(ErrorStatus errorStatus,
+                                               const sp<V1_2::IPreparedModel>& preparedModel) {
     mErrorStatus = errorStatus;
     mPreparedModel = preparedModel;
     CallbackBase::notify();
@@ -100,7 +124,7 @@
     return mErrorStatus;
 }
 
-sp<IPreparedModel> PreparedModelCallback::getPreparedModel() {
+sp<V1_0::IPreparedModel> PreparedModelCallback::getPreparedModel() {
     wait();
     return mPreparedModel;
 }
@@ -115,13 +139,19 @@
     return Void();
 }
 
+Return<void> ExecutionCallback::notify_1_2(ErrorStatus errorStatus) {
+    mErrorStatus = errorStatus;
+    CallbackBase::notify();
+    return Void();
+}
+
 ErrorStatus ExecutionCallback::getStatus() {
     wait();
     return mErrorStatus;
 }
 
 }  // namespace implementation
-}  // namespace V1_0
+}  // namespace V1_2
 }  // namespace neuralnetworks
 }  // namespace hardware
 }  // namespace android
diff --git a/neuralnetworks/1.0/vts/functional/Callbacks.h b/neuralnetworks/1.0/vts/functional/Callbacks.h
index 570a4fb..e89980d 100644
--- a/neuralnetworks/1.0/vts/functional/Callbacks.h
+++ b/neuralnetworks/1.0/vts/functional/Callbacks.h
@@ -1,22 +1,42 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
 #ifndef ANDROID_HARDWARE_NEURALNETWORKS_V1_0_CALLBACKS_H
 #define ANDROID_HARDWARE_NEURALNETWORKS_V1_0_CALLBACKS_H
 
 #include <android/hardware/neuralnetworks/1.0/IExecutionCallback.h>
 #include <android/hardware/neuralnetworks/1.0/IPreparedModelCallback.h>
+#include <android/hardware/neuralnetworks/1.2/IExecutionCallback.h>
+#include <android/hardware/neuralnetworks/1.2/IPreparedModelCallback.h>
+#include <hidl/MQDescriptor.h>
+#include <hidl/Status.h>
 #include <chrono>
 #include <condition_variable>
 #include <functional>
-#include <hidl/MQDescriptor.h>
-#include <hidl/Status.h>
 #include <mutex>
 #include <thread>
 
 namespace android {
 namespace hardware {
 namespace neuralnetworks {
-namespace V1_0 {
+namespace V1_2 {
 namespace implementation {
 
+using V1_0::ErrorStatus;
+
 /**
  * The CallbackBase class is used internally by the NeuralNetworks runtime to
  * synchronize between different threads. An asynchronous task is launched
@@ -156,11 +176,11 @@
  * asynchronously with respect to the runtime. If a calling thread calls wait*
  * or get* on a PreparedModelCallback object and the corresponding asynchronous
  * task has not finished preparing the model, the calling thread will block
- * until the asynchronous task has called notify. For more information on the
- * synchronization behavior, refer to the CallbackBase class.
+ * until the asynchronous task has either called notify or notify_1_2. For more
+ * information on the synchronization behavior, refer to the CallbackBase class.
  *
  * This class inherits the basic blocking and signaling calls from
- * CallbackBase, and implements the HIDL notify call from
+ * CallbackBase, and implements the HIDL notify and notify_1_2 calls from
  * IPreparedModelCallback. This callback object is passed as an argument to
  * IDevice::prepareModel.
  */
@@ -170,15 +190,15 @@
     ~PreparedModelCallback() override;
 
     /**
-     * IPreparedModelCallback::notify marks the callback object with the return
-     * status of the asynchronous model preparation along with the prepared
-     * model, and calls CallbackBase::notify, enabling all prior and future
-     * wait* calls on the PreparedModelCallback object to proceed. For more
-     * information on the synchronization behavior, refer to the CallbackBase
-     * class.
+     * IPreparedModelCallback::notify and IPreparedModelCallback::notify_1_2
+     * mark the callback object with the return status of the asynchronous
+     * model preparation along with the prepared model, and call
+     * CallbackBase::notify, enabling all prior and future wait* calls on the
+     * PreparedModelCallback object to proceed. For more information on the
+     * synchronization behavior, refer to the CallbackBase class.
      *
-     * IPreparedModelCallback::notify must be called exactly once on a given
-     * PreparedModelCallback object.
+     * Either IPreparedModelCallback::notify or IPreparedModelCallback::notify_1_2
+     * must be called exactly once on a given PreparedModelCallback object.
      *
      * @param status Error status returned from asynchronously preparing the
      *               model; will be:
@@ -189,7 +209,9 @@
      * @param preparedModel Returned model that has been prepared for execution,
      *                      nullptr if the model was unable to be prepared.
      */
-    Return<void> notify(ErrorStatus status, const sp<IPreparedModel>& preparedModel) override;
+    Return<void> notify(ErrorStatus status, const sp<V1_0::IPreparedModel>& preparedModel) override;
+    Return<void> notify_1_2(ErrorStatus status,
+                            const sp<V1_2::IPreparedModel>& preparedModel) override;
 
     /**
      * Retrieves the error status returned from the asynchronous task launched
@@ -217,11 +239,11 @@
      *                       execution, nullptr if the model was unable to be
      *                       prepared.
      */
-    sp<IPreparedModel> getPreparedModel();
+    sp<V1_0::IPreparedModel> getPreparedModel();
 
- private:
+   private:
     ErrorStatus        mErrorStatus;
-    sp<IPreparedModel> mPreparedModel;
+    sp<V1_0::IPreparedModel> mPreparedModel;
 };
 
 /**
@@ -229,12 +251,12 @@
  * execution from a task executing asynchronously with respect to the runtime.
  * If a calling thread calls wait* or get* on a PreparedModelCallback object and
  * the corresponding asynchronous task has not finished the execution, the
- * calling thread will block until the asynchronous task has called notify. For
- * more information on the synchronization behavior, refer to the CallbackBase
- * class.
+ * calling thread will block until the asynchronous task has either called notify
+ * or notify_1_2. For more information on the synchronization behavior, refer to
+ * the CallbackBase class.
  *
  * This class inherits the basic blocking and signaling calls from
- * CallbackBase, and implements the HIDL notify call from
+ * CallbackBase, and implements the HIDL notify and notify_1_2 calls from
  * IExecutionCallback. This callback object is passed as an argument to
  * IPreparedModel::execute.
  */
@@ -244,14 +266,14 @@
     ~ExecutionCallback() override;
 
     /**
-     * IExecutionCallback::notify marks the callback object with the return
-     * status of the asynchronous execution that held this callback and enables
-     * all prior and future wait* calls on the ExecutionCallback object to
-     * proceed. For more information on the synchronization behavior, refer to
-     * the CallbackBase class.
+     * IExecutionCallback::notify and IExecutionCallback::notify_1_2 mark the
+     * callback object with the return status of the asynchronous execution that
+     * held this callback and enable all prior and future wait* calls on the
+     * ExecutionCallback object to proceed. For more information on the
+     * synchronization behavior, refer to the CallbackBase class.
      *
-     * IExecutionCallback::notify must be called exactly once on a given
-     * ExecutionCallback object.
+     * Either IExecutionCallback::notify or IExecutionCallback::notify_1_2 must
+     * be called exactly once on a given ExecutionCallback object.
      *
      * @param status Error status returned from asynchronously preparing the
      *               model; will be:
@@ -263,6 +285,7 @@
      *               - INVALID_ARGUMENT if the input request is invalid
      */
     Return<void> notify(ErrorStatus status) override;
+    Return<void> notify_1_2(ErrorStatus status) override;
 
     /**
      * Retrieves the error status returned from the asynchronous task launched
@@ -299,7 +322,7 @@
 }
 
 }  // namespace implementation
-}  // namespace V1_0
+}  // namespace V1_2
 }  // namespace neuralnetworks
 }  // namespace hardware
 }  // namespace android
diff --git a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp
index 802d018..ab524c2 100644
--- a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp
+++ b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp
@@ -24,6 +24,11 @@
 #include <android/hardware/neuralnetworks/1.0/IPreparedModel.h>
 #include <android/hardware/neuralnetworks/1.0/IPreparedModelCallback.h>
 #include <android/hardware/neuralnetworks/1.0/types.h>
+#include <android/hardware/neuralnetworks/1.1/IDevice.h>
+#include <android/hardware/neuralnetworks/1.2/IDevice.h>
+#include <android/hardware/neuralnetworks/1.2/IExecutionCallback.h>
+#include <android/hardware/neuralnetworks/1.2/IPreparedModel.h>
+#include <android/hardware/neuralnetworks/1.2/IPreparedModelCallback.h>
 #include <android/hidl/allocator/1.0/IAllocator.h>
 #include <android/hidl/memory/1.0/IMemory.h>
 #include <hidlmemory/mapping.h>
@@ -34,8 +39,9 @@
 namespace neuralnetworks {
 
 namespace generated_tests {
-using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
-using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
+using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
+using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
+using ::test_helper::bool8;
 using ::test_helper::compare;
 using ::test_helper::expectMultinomialDistributionWithinTolerance;
 using ::test_helper::filter;
@@ -65,13 +71,25 @@
     copy_back_<uint8_t>(dst, ra, src);
     copy_back_<int16_t>(dst, ra, src);
     copy_back_<_Float16>(dst, ra, src);
-    static_assert(5 == std::tuple_size<MixedTyped>::value,
+    copy_back_<bool8>(dst, ra, src);
+    static_assert(6 == std::tuple_size<MixedTyped>::value,
                   "Number of types in MixedTyped changed, but copy_back function wasn't updated");
 }
 
 // Top level driver for models and examples generated by test_generator.py
 // Test driver for those generated from ml/nn/runtime/test/spec
-void EvaluatePreparedModel(sp<IPreparedModel>& preparedModel, std::function<bool(int)> is_ignored,
+static Return<ErrorStatus> ExecutePreparedModel(sp<V1_0::IPreparedModel>& preparedModel,
+                                                const Request& request,
+                                                sp<ExecutionCallback>& callback) {
+    return preparedModel->execute(request, callback);
+}
+static Return<ErrorStatus> ExecutePreparedModel(sp<V1_2::IPreparedModel>& preparedModel,
+                                                const Request& request,
+                                                sp<ExecutionCallback>& callback) {
+    return preparedModel->execute_1_2(request, callback);
+}
+template <typename T_IPreparedModel>
+void EvaluatePreparedModel(sp<T_IPreparedModel>& preparedModel, std::function<bool(int)> is_ignored,
                            const std::vector<MixedTypedExample>& examples,
                            bool hasRelaxedFloat32Model = false, float fpAtol = 1e-5f,
                            float fpRtol = 1e-5f) {
@@ -170,8 +188,9 @@
         // launch execution
         sp<ExecutionCallback> executionCallback = new ExecutionCallback();
         ASSERT_NE(nullptr, executionCallback.get());
-        Return<ErrorStatus> executionLaunchStatus = preparedModel->execute(
-            {.inputs = inputs_info, .outputs = outputs_info, .pools = pools}, executionCallback);
+        Return<ErrorStatus> executionLaunchStatus = ExecutePreparedModel(
+            preparedModel, {.inputs = inputs_info, .outputs = outputs_info, .pools = pools},
+            executionCallback);
         ASSERT_TRUE(executionLaunchStatus.isOk());
         EXPECT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(executionLaunchStatus));
 
@@ -197,6 +216,16 @@
     }
 }
 
+static void getPreparedModel(sp<PreparedModelCallback> callback,
+                             sp<V1_0::IPreparedModel>* preparedModel) {
+    *preparedModel = callback->getPreparedModel();
+}
+static void getPreparedModel(sp<PreparedModelCallback> callback,
+                             sp<V1_2::IPreparedModel>* preparedModel) {
+    sp<V1_0::IPreparedModel> preparedModelV1_0 = callback->getPreparedModel();
+    *preparedModel = V1_2::IPreparedModel::castFrom(preparedModelV1_0).withDefault(nullptr);
+}
+
 void Execute(const sp<V1_0::IDevice>& device, std::function<V1_0::Model(void)> create_model,
              std::function<bool(int)> is_ignored, const std::vector<MixedTypedExample>& examples) {
     V1_0::Model model = create_model();
@@ -222,7 +251,8 @@
     // retrieve prepared model
     preparedModelCallback->wait();
     ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
-    sp<IPreparedModel> preparedModel = preparedModelCallback->getPreparedModel();
+    sp<V1_0::IPreparedModel> preparedModel;
+    getPreparedModel(preparedModelCallback, &preparedModel);
 
     // early termination if vendor service cannot fully prepare model
     if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
@@ -268,7 +298,8 @@
     // retrieve prepared model
     preparedModelCallback->wait();
     ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
-    sp<IPreparedModel> preparedModel = preparedModelCallback->getPreparedModel();
+    sp<V1_0::IPreparedModel> preparedModel;
+    getPreparedModel(preparedModelCallback, &preparedModel);
 
     // early termination if vendor service cannot fully prepare model
     if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
@@ -314,7 +345,8 @@
     // retrieve prepared model
     preparedModelCallback->wait();
     ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
-    sp<IPreparedModel> preparedModel = preparedModelCallback->getPreparedModel();
+    sp<V1_2::IPreparedModel> preparedModel;
+    getPreparedModel(preparedModelCallback, &preparedModel);
 
     // early termination if vendor service cannot fully prepare model
     if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
diff --git a/neuralnetworks/1.0/vts/functional/GeneratedTests.cpp b/neuralnetworks/1.0/vts/functional/GeneratedTests.cpp
index 26b4d8b..55e5861 100644
--- a/neuralnetworks/1.0/vts/functional/GeneratedTests.cpp
+++ b/neuralnetworks/1.0/vts/functional/GeneratedTests.cpp
@@ -40,8 +40,8 @@
 namespace vts {
 namespace functional {
 
-using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
-using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
+using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
+using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
 using ::android::nn::allocateSharedMemory;
 using ::test_helper::MixedTypedExample;
 
diff --git a/neuralnetworks/1.0/vts/functional/ValidateModel.cpp b/neuralnetworks/1.0/vts/functional/ValidateModel.cpp
index b813c39..5d24fb5 100644
--- a/neuralnetworks/1.0/vts/functional/ValidateModel.cpp
+++ b/neuralnetworks/1.0/vts/functional/ValidateModel.cpp
@@ -27,8 +27,8 @@
 namespace vts {
 namespace functional {
 
-using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
-using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
+using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
+using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
 
 ///////////////////////// UTILITY FUNCTIONS /////////////////////////
 
diff --git a/neuralnetworks/1.0/vts/functional/ValidateRequest.cpp b/neuralnetworks/1.0/vts/functional/ValidateRequest.cpp
index 1d3dee3..72a5007 100644
--- a/neuralnetworks/1.0/vts/functional/ValidateRequest.cpp
+++ b/neuralnetworks/1.0/vts/functional/ValidateRequest.cpp
@@ -33,8 +33,8 @@
 namespace vts {
 namespace functional {
 
-using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
-using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
+using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
+using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
 using ::android::hidl::memory::V1_0::IMemory;
 using test_helper::for_all;
 using test_helper::MixedTyped;
diff --git a/neuralnetworks/1.1/vts/functional/GeneratedTests.cpp b/neuralnetworks/1.1/vts/functional/GeneratedTests.cpp
index 290a9d3..d98ea04 100644
--- a/neuralnetworks/1.1/vts/functional/GeneratedTests.cpp
+++ b/neuralnetworks/1.1/vts/functional/GeneratedTests.cpp
@@ -40,8 +40,8 @@
 namespace vts {
 namespace functional {
 
-using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
-using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
+using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
+using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
 using ::android::nn::allocateSharedMemory;
 using ::test_helper::MixedTypedExample;
 
diff --git a/neuralnetworks/1.1/vts/functional/GeneratedTestsV1_0.cpp b/neuralnetworks/1.1/vts/functional/GeneratedTestsV1_0.cpp
index a36b24c..1df3218 100644
--- a/neuralnetworks/1.1/vts/functional/GeneratedTestsV1_0.cpp
+++ b/neuralnetworks/1.1/vts/functional/GeneratedTestsV1_0.cpp
@@ -40,8 +40,8 @@
 namespace vts {
 namespace functional {
 
-using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
-using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
+using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
+using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
 using ::android::nn::allocateSharedMemory;
 using ::test_helper::MixedTypedExample;
 
diff --git a/neuralnetworks/1.1/vts/functional/ValidateModel.cpp b/neuralnetworks/1.1/vts/functional/ValidateModel.cpp
index d6c6533..b35a901 100644
--- a/neuralnetworks/1.1/vts/functional/ValidateModel.cpp
+++ b/neuralnetworks/1.1/vts/functional/ValidateModel.cpp
@@ -33,8 +33,8 @@
 namespace vts {
 namespace functional {
 
-using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
-using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
+using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
+using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
 
 ///////////////////////// UTILITY FUNCTIONS /////////////////////////
 
diff --git a/neuralnetworks/1.1/vts/functional/ValidateRequest.cpp b/neuralnetworks/1.1/vts/functional/ValidateRequest.cpp
index e7d96c7..5225bf7 100644
--- a/neuralnetworks/1.1/vts/functional/ValidateRequest.cpp
+++ b/neuralnetworks/1.1/vts/functional/ValidateRequest.cpp
@@ -33,8 +33,8 @@
 namespace vts {
 namespace functional {
 
-using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
-using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
+using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
+using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
 using ::android::hidl::memory::V1_0::IMemory;
 using test_helper::for_all;
 using test_helper::MixedTyped;
diff --git a/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.h b/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.h
index a64268f..970e8b5 100644
--- a/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.h
+++ b/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.h
@@ -36,6 +36,7 @@
 
 using V1_0::DeviceStatus;
 using V1_0::ErrorStatus;
+using V1_0::IPreparedModel;
 using V1_0::Operand;
 using V1_0::OperandType;
 using V1_0::Request;
diff --git a/neuralnetworks/1.2/Android.bp b/neuralnetworks/1.2/Android.bp
index e155bbd..528a2c7 100644
--- a/neuralnetworks/1.2/Android.bp
+++ b/neuralnetworks/1.2/Android.bp
@@ -9,6 +9,9 @@
     srcs: [
         "types.hal",
         "IDevice.hal",
+        "IExecutionCallback.hal",
+        "IPreparedModel.hal",
+        "IPreparedModelCallback.hal",
     ],
     interfaces: [
         "android.hardware.neuralnetworks@1.0",
diff --git a/neuralnetworks/1.2/IDevice.hal b/neuralnetworks/1.2/IDevice.hal
index aff4cf3..6a77961 100644
--- a/neuralnetworks/1.2/IDevice.hal
+++ b/neuralnetworks/1.2/IDevice.hal
@@ -17,9 +17,9 @@
 package android.hardware.neuralnetworks@1.2;
 
 import @1.0::ErrorStatus;
-import @1.0::IPreparedModelCallback;
 import @1.1::ExecutionPreference;
 import @1.1::IDevice;
+import IPreparedModelCallback;
 
 /**
  * This interface represents a device driver.
diff --git a/neuralnetworks/1.2/IExecutionCallback.hal b/neuralnetworks/1.2/IExecutionCallback.hal
new file mode 100644
index 0000000..667e0d6
--- /dev/null
+++ b/neuralnetworks/1.2/IExecutionCallback.hal
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.neuralnetworks@1.2;
+
+import @1.0::ErrorStatus;
+import @1.0::IExecutionCallback;
+
+/**
+ * IExecutionCallback must be used to return the error status result from an
+ * execution asynchronously launched from IPreparedModel::execute.
+ */
+interface IExecutionCallback extends @1.0::IExecutionCallback {
+
+    /**
+     * Either notify_1_2 or notify must be invoked immediately after the asynchronous
+     * task has finished performing the execution. Either notify_1_2 or notify must be
+     * provided with the ErrorStatus from the execution. If the asynchronous task is
+     * not launched, either notify_1_2 or notify must be invoked with the appropriate
+     * error.
+     *
+     * @param status Error status returned from launching the asynchronous task
+     *               (if the launch fails) or from the asynchronous task itself
+     *               (if the launch succeeds). Must be:
+     *               - NONE if the asynchronous execution was successful
+     *               - DEVICE_UNAVAILABLE if driver is offline or busy
+     *               - GENERAL_FAILURE if the asynchronous task resulted in an
+     *                 unspecified error
+     *               - OUTPUT_INSUFFICIENT_SIZE if provided output buffer is
+     *                 not large enough to store the resultant values
+     *               - INVALID_ARGUMENT if one of the input arguments to
+     *                 prepareModel is invalid
+     */
+    oneway notify_1_2(ErrorStatus status);
+};
diff --git a/neuralnetworks/1.2/IPreparedModel.hal b/neuralnetworks/1.2/IPreparedModel.hal
new file mode 100644
index 0000000..5590487
--- /dev/null
+++ b/neuralnetworks/1.2/IPreparedModel.hal
@@ -0,0 +1,74 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.neuralnetworks@1.2;
+
+import @1.0::ErrorStatus;
+import @1.0::IPreparedModel;
+import @1.0::Request;
+import IExecutionCallback;
+
+/**
+ * IPreparedModel describes a model that has been prepared for execution and
+ * is used to launch executions.
+ */
+interface IPreparedModel extends @1.0::IPreparedModel {
+    /**
+     * Launches an asynchronous execution on a prepared model.
+     *
+     * The execution is performed asynchronously with respect to the caller.
+     * execute_1_2 must verify the inputs to the function are correct. If there is
+     * an error, execute_1_2 must immediately invoke the callback with the
+     * appropriate ErrorStatus value, then return with the same ErrorStatus. If
+     * the inputs to the function are valid and there is no error, execute_1_2 must
+     * launch an asynchronous task to perform the execution in the background,
+     * and immediately return with ErrorStatus::NONE. If the asynchronous task
+     * fails to launch, execute_1_2 must immediately invoke the callback with
+     * ErrorStatus::GENERAL_FAILURE, then return with
+     * ErrorStatus::GENERAL_FAILURE.
+     *
+     * When the asynchronous task has finished its execution, it must
+     * immediately invoke the callback object provided as an input to the
+     * execute_1_2 function. This callback must be provided with the ErrorStatus of
+     * the execution.
+     *
+     * If the prepared model was prepared from a model wherein all
+     * tensor operands have fully specified dimensions, and the inputs
+     * to the function are valid, then the execution should launch
+     * and complete successfully (ErrorStatus::NONE). There must be
+     * no failure unless the device itself is in a bad state.
+     *
+     * Multiple threads can call the execute_1_2 function on the same IPreparedModel
+     * object concurrently with different requests.
+     *
+     * @param request The input and output information on which the prepared
+     *                model is to be executed.
+     * @param callback A callback object used to return the error status of
+     *                 the execution. The callback object's notify function must
+     *                 be called exactly once, even if the execution was
+     *                 unsuccessful.
+     * @return status Error status of the call, must be:
+     *                - NONE if task is successfully launched
+     *                - DEVICE_UNAVAILABLE if driver is offline or busy
+     *                - GENERAL_FAILURE if there is an unspecified error
+     *                - OUTPUT_INSUFFICIENT_SIZE if provided output buffer is
+     *                  not large enough to store the resultant values
+     *                - INVALID_ARGUMENT if one of the input arguments is
+     *                  invalid
+     */
+    execute_1_2(Request request, IExecutionCallback callback)
+        generates (ErrorStatus status);
+};
diff --git a/neuralnetworks/1.2/IPreparedModelCallback.hal b/neuralnetworks/1.2/IPreparedModelCallback.hal
new file mode 100644
index 0000000..d3830c6
--- /dev/null
+++ b/neuralnetworks/1.2/IPreparedModelCallback.hal
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.neuralnetworks@1.2;
+
+import @1.0::ErrorStatus;
+import @1.0::IPreparedModelCallback;
+import IPreparedModel;
+
+/**
+ * IPreparedModelCallback must be used to return a prepared model produced by an
+ * asynchronous task launched from IDevice::prepareModel.
+ */
+interface IPreparedModelCallback extends @1.0::IPreparedModelCallback {
+
+    /**
+     * Either notify_1_2 or notify must be invoked immediately after the asynchronous
+     * task holding this callback has finished preparing the model. If the model was
+     * successfully prepared, either notify_1_2 or notify must be invoked with
+     * ErrorStatus::NONE and the prepared model. If the model was not able to be
+     * successfully prepared, either notify_1_2 or notify must be invoked with the
+     * appropriate ErrorStatus and nullptr as the IPreparedModel. If the asynchronous
+     * task holding this callback fails to launch or if the model provided to
+     * IDevice::prepareModel is invalid, either notify_1_2 or notify must be invoked
+     * with the appropriate error as well as nullptr for the IPreparedModel.
+     *
+     * @param status Error status returned from the asynchronous model
+     *               preparation task; must be:
+     *               - NONE if the asynchronous task successfully prepared the
+     *                 model
+     *               - DEVICE_UNAVAILABLE if driver is offline or busy
+     *               - GENERAL_FAILURE if the asynchronous task resulted in an
+     *                 unspecified error
+     *               - INVALID_ARGUMENT if one of the input arguments to
+     *                 prepareModel is invalid
+     * @param preparedModel A model that has been asynchronously prepared for
+     *                      execution. If the model was unable to be prepared
+     *                      due to an error, nullptr must be passed in place of
+     *                      the IPreparedModel object.
+     */
+    oneway notify_1_2(ErrorStatus status, IPreparedModel preparedModel);
+};
diff --git a/neuralnetworks/1.2/types.hal b/neuralnetworks/1.2/types.hal
index fe9b312..a1a1bad 100644
--- a/neuralnetworks/1.2/types.hal
+++ b/neuralnetworks/1.2/types.hal
@@ -42,6 +42,13 @@
     TENSOR_QUANT16_SYMM = 7,
     /** A tensor of 16 bit floating point values. */
     TENSOR_FLOAT16 = 8,
+    /**
+     * A tensor of 8 bit boolean values.
+     *
+     * Values of this operand type are either true or false. A zero value
+     * represents false; any other value represents true.
+     */
+    TENSOR_BOOL8 = 9,
 };
 
 /**
@@ -51,7 +58,7 @@
  */
 enum OperandTypeRange : uint32_t {
     OPERAND_FUNDAMENTAL_MIN = 0,
-    OPERAND_FUNDAMENTAL_MAX = 8,
+    OPERAND_FUNDAMENTAL_MAX = 9,
     OPERAND_OEM_MIN     = 10000,
     OPERAND_OEM_MAX     = 10001,
 };
diff --git a/neuralnetworks/1.2/vts/functional/GeneratedTests.cpp b/neuralnetworks/1.2/vts/functional/GeneratedTests.cpp
index 79d5a60..9bff09c 100644
--- a/neuralnetworks/1.2/vts/functional/GeneratedTests.cpp
+++ b/neuralnetworks/1.2/vts/functional/GeneratedTests.cpp
@@ -40,8 +40,8 @@
 namespace vts {
 namespace functional {
 
-using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
-using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
+using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
+using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
 using ::android::nn::allocateSharedMemory;
 using ::test_helper::MixedTypedExample;
 
diff --git a/neuralnetworks/1.2/vts/functional/GeneratedTestsV1_0.cpp b/neuralnetworks/1.2/vts/functional/GeneratedTestsV1_0.cpp
index 42e22b0..56a61d4 100644
--- a/neuralnetworks/1.2/vts/functional/GeneratedTestsV1_0.cpp
+++ b/neuralnetworks/1.2/vts/functional/GeneratedTestsV1_0.cpp
@@ -40,8 +40,8 @@
 namespace vts {
 namespace functional {
 
-using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
-using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
+using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
+using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
 using ::android::nn::allocateSharedMemory;
 using ::test_helper::MixedTypedExample;
 
diff --git a/neuralnetworks/1.2/vts/functional/GeneratedTestsV1_1.cpp b/neuralnetworks/1.2/vts/functional/GeneratedTestsV1_1.cpp
index aab5cb6..1c781ec 100644
--- a/neuralnetworks/1.2/vts/functional/GeneratedTestsV1_1.cpp
+++ b/neuralnetworks/1.2/vts/functional/GeneratedTestsV1_1.cpp
@@ -40,8 +40,8 @@
 namespace vts {
 namespace functional {
 
-using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
-using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
+using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
+using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
 using ::android::nn::allocateSharedMemory;
 using ::test_helper::MixedTypedExample;
 
diff --git a/neuralnetworks/1.2/vts/functional/ValidateModel.cpp b/neuralnetworks/1.2/vts/functional/ValidateModel.cpp
index c4f1b5e..8024992 100644
--- a/neuralnetworks/1.2/vts/functional/ValidateModel.cpp
+++ b/neuralnetworks/1.2/vts/functional/ValidateModel.cpp
@@ -25,15 +25,14 @@
 namespace neuralnetworks {
 namespace V1_2 {
 
-using V1_0::IPreparedModel;
 using V1_0::OperandLifeTime;
 using V1_1::ExecutionPreference;
 
 namespace vts {
 namespace functional {
 
-using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
-using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
+using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
+using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
 
 ///////////////////////// UTILITY FUNCTIONS /////////////////////////
 
@@ -62,7 +61,7 @@
     preparedModelCallback->wait();
     ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
     ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, prepareReturnStatus);
-    sp<IPreparedModel> preparedModel = preparedModelCallback->getPreparedModel();
+    sp<IPreparedModel> preparedModel = getPreparedModel_1_2(preparedModelCallback);
     ASSERT_EQ(nullptr, preparedModel.get());
 }
 
diff --git a/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp b/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp
index b663535..e2722aa 100644
--- a/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp
+++ b/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp
@@ -33,8 +33,8 @@
 namespace vts {
 namespace functional {
 
-using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
-using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
+using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
+using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
 using ::android::hidl::memory::V1_0::IMemory;
 using test_helper::for_all;
 using test_helper::MixedTyped;
@@ -68,7 +68,7 @@
     // retrieve prepared model
     preparedModelCallback->wait();
     ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
-    *preparedModel = preparedModelCallback->getPreparedModel();
+    *preparedModel = getPreparedModel_1_2(preparedModelCallback);
 
     // The getSupportedOperations_1_2 call returns a list of operations that are
     // guaranteed not to fail if prepareModel_1_2 is called, and
@@ -101,7 +101,8 @@
 
     sp<ExecutionCallback> executionCallback = new ExecutionCallback();
     ASSERT_NE(nullptr, executionCallback.get());
-    Return<ErrorStatus> executeLaunchStatus = preparedModel->execute(request, executionCallback);
+    Return<ErrorStatus> executeLaunchStatus =
+        preparedModel->execute_1_2(request, executionCallback);
     ASSERT_TRUE(executeLaunchStatus.isOk());
     ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(executeLaunchStatus));
 
diff --git a/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.cpp b/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.cpp
index 90a910c..4eced82 100644
--- a/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.cpp
+++ b/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.cpp
@@ -58,6 +58,12 @@
     ::testing::VtsHalHidlTargetTestBase::TearDown();
 }
 
+sp<IPreparedModel> getPreparedModel_1_2(
+    const sp<V1_2::implementation::PreparedModelCallback>& callback) {
+    sp<V1_0::IPreparedModel> preparedModelV1_0 = callback->getPreparedModel();
+    return V1_2::IPreparedModel::castFrom(preparedModelV1_0).withDefault(nullptr);
+}
+
 }  // namespace functional
 }  // namespace vts
 
diff --git a/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.h b/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.h
index a87d788..dedab8d 100644
--- a/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.h
+++ b/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.h
@@ -17,6 +17,8 @@
 #ifndef VTS_HAL_NEURALNETWORKS_V1_2_H
 #define VTS_HAL_NEURALNETWORKS_V1_2_H
 
+#include "Callbacks.h"
+
 #include <android/hardware/neuralnetworks/1.0/types.h>
 #include <android/hardware/neuralnetworks/1.1/types.h>
 #include <android/hardware/neuralnetworks/1.2/IDevice.h>
@@ -77,6 +79,10 @@
 // Tag for the generated tests
 class GeneratedTest : public NeuralnetworksHidlTest {};
 
+// Utility function to get PreparedModel from callback and downcast to V1_2.
+sp<IPreparedModel> getPreparedModel_1_2(
+    const sp<V1_2::implementation::PreparedModelCallback>& callback);
+
 }  // namespace functional
 }  // namespace vts
 
diff --git a/radio/1.4/Android.bp b/radio/1.4/Android.bp
index c2ba47e..6257112 100644
--- a/radio/1.4/Android.bp
+++ b/radio/1.4/Android.bp
@@ -35,6 +35,8 @@
         "LteVopsInfo",
         "NetworkScanResult",
         "PhysicalChannelConfig",
+        "RadioAccessFamily",
+        "RadioCapability",
         "RadioFrequencyInfo",
         "RadioTechnology",
         "NrIndicators",
diff --git a/radio/1.4/IRadio.hal b/radio/1.4/IRadio.hal
index 8854453..c7a2c6b 100644
--- a/radio/1.4/IRadio.hal
+++ b/radio/1.4/IRadio.hal
@@ -22,6 +22,7 @@
 import @1.4::AccessNetwork;
 import @1.4::DataProfileInfo;
 import @1.4::EmergencyServiceCategory;
+import @1.4::RadioAccessFamily;
 
 /**
  * This interface is used by telephony and telecom to talk to cellular radio.
@@ -122,4 +123,24 @@
      */
     oneway emergencyDial(int32_t serial, Dial dialInfo,
             bitfield<EmergencyServiceCategory> categories);
+
+    /**
+     * Query the preferred network type bitmap.
+     *
+     * @param serial Serial number of request.
+     *
+     * Response callback is IRadioResponse.getPreferredNetworkTypeBitmapResponse()
+     */
+    oneway getPreferredNetworkTypeBitmap(int32_t serial);
+
+    /**
+     * Requests to set the preferred network type for searching and registering.
+     *
+     * @param serial Serial number of request.
+     * @param networkTypeBitmap a 32-bit bitmap of RadioAccessFamily.
+     *
+     * Response callback is IRadioResponse.setPreferredNetworkTypeBitmapResponse()
+     */
+    oneway setPreferredNetworkTypeBitmap(
+            int32_t serial, bitfield<RadioAccessFamily> networkTypeBitmap);
 };
diff --git a/radio/1.4/IRadioResponse.hal b/radio/1.4/IRadioResponse.hal
index d971850..d9a2bad 100644
--- a/radio/1.4/IRadioResponse.hal
+++ b/radio/1.4/IRadioResponse.hal
@@ -87,4 +87,44 @@
      *   RadioError:REQUEST_NOT_SUPPORTED
      */
     oneway getIccCardStatusResponse_1_4(RadioResponseInfo info, CardStatus cardStatus);
-};
\ No newline at end of file
+
+    /**
+     * @param info Response info struct containing response type, serial no. and error
+     * @param networkTypeBitmap a 32-bit bitmap of RadioAccessFamily.
+     *
+     * Valid errors returned:
+     *   RadioError:NONE
+     *   RadioError:RADIO_NOT_AVAILABLE
+     *   RadioError:NO_MEMORY
+     *   RadioError:INTERNAL_ERR
+     *   RadioError:SYSTEM_ERR
+     *   RadioError:INVALID_ARGUMENTS
+     *   RadioError:MODEM_ERR
+     *   RadioError:REQUEST_NOT_SUPPORTED
+     *   RadioError:NO_RESOURCES
+     *   RadioError:CANCELLED
+     */
+    oneway getPreferredNetworkTypeBitmapResponse(RadioResponseInfo info,
+            bitfield<RadioAccessFamily> networkTypeBitmap);
+
+    /**
+     * Callback of IRadio.setPreferredNetworkTypeBitmap(int, bitfield<RadioAccessFamily>)
+     *
+     * @param info Response info struct containing response type, serial no. and error
+     *
+     * Valid errors returned:
+     *   RadioError:NONE
+     *   RadioError:RADIO_NOT_AVAILABLE
+     *   RadioError:OPERATION_NOT_ALLOWED
+     *   RadioError:MODE_NOT_SUPPORTED
+     *   RadioError:NO_MEMORY
+     *   RadioError:INTERNAL_ERR
+     *   RadioError:SYSTEM_ERR
+     *   RadioError:INVALID_ARGUMENTS
+     *   RadioError:MODEM_ERR
+     *   RadioError:REQUEST_NOT_SUPPORTED
+     *   RadioError:NO_RESOURCES
+     *   RadioError:CANCELLED
+     */
+    oneway setPreferredNetworkTypeBitmapResponse(RadioResponseInfo info);
+};
diff --git a/radio/1.4/types.hal b/radio/1.4/types.hal
index 4e5b288..9c4f08c 100644
--- a/radio/1.4/types.hal
+++ b/radio/1.4/types.hal
@@ -22,6 +22,8 @@
 import @1.0::DataProfileId;
 import @1.0::DataProfileInfoType;
 import @1.0::RadioAccessFamily;
+import @1.0::RadioCapabilityPhase;
+import @1.0::RadioCapabilityStatus ;
 import @1.0::RadioError;
 import @1.0::RadioTechnology;
 import @1.0::RegState;
@@ -150,6 +152,11 @@
     NR = 20,
 };
 
+enum RadioAccessFamily : @1.0::RadioAccessFamily {
+    /** 5G NR. */
+    NR = 1 << RadioTechnology:NR,
+};
+
 /** Mapping the frequency to a rough range. */
 enum FrequencyRange : int32_t {
     /** Indicates the frequency range is below 1GHz. */
@@ -420,3 +427,22 @@
      */
     string eid;
 };
+
+/** Overwritten from @1.0::RadioCapability in order to use the latest RadioAccessFamily. */
+struct RadioCapability {
+    /** Unique session value defined by fr amework returned in all "responses/unslo". */
+    int32_t session;
+
+    RadioCapabilityPhase phase;
+
+    /** 32-bit bitmap of RadioAccessFamily. */
+    bitfield<RadioAccessFamily> raf;
+
+    /**
+     * A UUID typically "com.xxxx.lmX" where X is the logical modem.
+     * RadioConst:MAX_UUID_LENGTH is the max length.
+     */
+    string logicalModemUuid;
+
+    RadioCapabilityStatus status;
+};