Add HAL entry to allow querying the device type

Bug: 111425781
Bug: 112661050
Test: mm
Test: NeuralNetworksTest_static
Change-Id: Iced35c24eb2905c10fbf86d55319fdb5c7488703
diff --git a/neuralnetworks/1.2/IDevice.hal b/neuralnetworks/1.2/IDevice.hal
index 6a77961..6c3b483 100644
--- a/neuralnetworks/1.2/IDevice.hal
+++ b/neuralnetworks/1.2/IDevice.hal
@@ -56,6 +56,26 @@
     getVersionString() generates (ErrorStatus status, string version);
 
     /**
+     * Get the type of a given device.
+     *
+     * The device type can be used to help application developers to distribute
+     * Machine Learning workloads and other workloads such as graphical rendering.
+     * E.g., for an app which renders AR scenes based on real time object detection
+     * results, the developer could choose an ACCELERATOR type device for ML
+     * workloads, and reserve GPU for graphical rendering.
+     *
+     * @param status Error status returned from querying the device type. Must be:
+     *               - NONE if the query was successful
+     *               - DEVICE_UNAVAILABLE if driver is offline or busy
+     *               - GENERAL_FAILURE if the query resulted in an
+     *                 unspecified error
+     * @param type The DeviceType of the device. Please note, this is not a
+     *             bitfield of DeviceTypes. Each device must only be of a
+     *             single DeviceType.
+     */
+    getType() generates (ErrorStatus status, DeviceType type);
+
+    /**
      * Gets the supported operations in a model.
      *
      * getSupportedOperations indicates which operations of a model are fully
diff --git a/neuralnetworks/1.2/types.hal b/neuralnetworks/1.2/types.hal
index b072793..564cd8c 100644
--- a/neuralnetworks/1.2/types.hal
+++ b/neuralnetworks/1.2/types.hal
@@ -180,6 +180,26 @@
 };
 
 /**
+ * Device types.
+ *
+ * The type of NNAPI device.
+ */
+enum DeviceType : int32_t {
+    // Leaving 0 unused as it means unknown type in NDK NNAPI. There is no
+    // HAL equivalent of unknown type and a 1.2 HAL implementation must belong
+    // to one of the categories below.
+    /** The device does not fall into any category below. */
+    OTHER             = 1,
+    /** The device runs NNAPI models on single or multi-core CPU. */
+    CPU               = 2,
+    /** The device can run NNAPI models and also accelerate graphics APIs such
+      * as OpenGL ES and Vulkan. */
+    GPU               = 3,
+    /** Dedicated accelerator for Machine Learning workloads. */
+    ACCELERATOR       = 4,
+};
+
+/**
  * Describes one operation of the model's graph.
  */
 struct Operation {
diff --git a/neuralnetworks/1.2/vts/functional/BasicTests.cpp b/neuralnetworks/1.2/vts/functional/BasicTests.cpp
index eb3ebd3..8c3ad15 100644
--- a/neuralnetworks/1.2/vts/functional/BasicTests.cpp
+++ b/neuralnetworks/1.2/vts/functional/BasicTests.cpp
@@ -45,6 +45,16 @@
     });
     EXPECT_TRUE(ret.isOk());
 }
+
+// device type test
+TEST_F(NeuralnetworksHidlTest, GetDeviceTypeTest) {
+    Return<void> ret = device->getType([](ErrorStatus status, DeviceType type) {
+        EXPECT_EQ(ErrorStatus::NONE, status);
+        EXPECT_TRUE(type == DeviceType::OTHER || type == DeviceType::CPU ||
+                    type == DeviceType::GPU || type == DeviceType::ACCELERATOR);
+    });
+    EXPECT_TRUE(ret.isOk());
+}
 }  // namespace functional
 }  // namespace vts
 }  // namespace V1_2