Add @V1_2::Capabilities to support all non extension operand types.
Performance information in Capabilities is used by the runtime when
it selects the appropriate processor to distribute work to. Prior to
this CL, Capabilities can only distinguish between float and non-float
data types -- so, for example, float16 and float32 performance is
considered to be the same, and performance for all non-float data types is
considered to be the same.
Bug: 124041010
Test: NeuralNetworksTest_static
Test: VtsHalNeuralnetworksV1_2TargetTest --hal_service_instance=android.hardware.neuralnetworks@1.2::IDevice/sample-all
Change-Id: I83fb5920c1c75afbd7750d793a0b8f3e72a0552c
diff --git a/neuralnetworks/1.2/IDevice.hal b/neuralnetworks/1.2/IDevice.hal
index da9a966..d83f9e6 100644
--- a/neuralnetworks/1.2/IDevice.hal
+++ b/neuralnetworks/1.2/IDevice.hal
@@ -76,6 +76,17 @@
getType() generates (ErrorStatus status, DeviceType type);
/**
+ * Gets the capabilities of a driver.
+ *
+ * @return status Error status of the call, must be:
+ * - NONE if successful
+ * - DEVICE_UNAVAILABLE if driver is offline or busy
+ * - GENERAL_FAILURE if there is an unspecified error
+ * @return capabilities Capabilities of the driver.
+ */
+ getCapabilities_1_2() generates (ErrorStatus status, Capabilities capabilities);
+
+ /**
* Gets information about extensions supported by the driver implementation.
*
* All extension operations and operands must be fully supported for the
diff --git a/neuralnetworks/1.2/types.hal b/neuralnetworks/1.2/types.hal
index 28d0119..7691642 100644
--- a/neuralnetworks/1.2/types.hal
+++ b/neuralnetworks/1.2/types.hal
@@ -4618,6 +4618,39 @@
};
/**
+ * The capabilities of a driver.
+ *
+ * Performance of an operation comes from the type of its first operand.
+ * This represents performance for non extension operand types.
+ */
+struct Capabilities {
+ /**
+ * Driver performance when operating on float32 data but performing
+ * calculations with range and/or precision as low as that of the IEEE
+ * 754 16-bit floating-point format.
+ */
+ PerformanceInfo relaxedFloat32toFloat16PerformanceScalar;
+ PerformanceInfo relaxedFloat32toFloat16PerformanceTensor;
+
+ /**
+ * Driver performance when operating on a particular data type.
+ * In the case of float32 data, this is used when the calculations
+ * are not relaxed.
+ */
+ struct OperandPerformance {
+ OperandType type;
+ PerformanceInfo info;
+ };
+
+ /**
+ * Performance by operand type. Must be sorted by OperandType.
+ * If a particular OperandType is not present in operandPerformance,
+ * its performance is treated as { .execTime = FLT_MAX, .powerUsage = FLT_MAX }.
+ */
+ vec<OperandPerformance> operandPerformance;
+};
+
+/**
* Describes one operation of the model's graph.
*/
struct Operation {
diff --git a/neuralnetworks/1.2/vts/functional/BasicTests.cpp b/neuralnetworks/1.2/vts/functional/BasicTests.cpp
index 6fb16c2..5c269df 100644
--- a/neuralnetworks/1.2/vts/functional/BasicTests.cpp
+++ b/neuralnetworks/1.2/vts/functional/BasicTests.cpp
@@ -25,7 +25,7 @@
namespace vts {
namespace functional {
-using V1_1::Capabilities;
+using V1_0::PerformanceInfo;
// create device test
TEST_F(NeuralnetworksHidlTest, CreateDevice) {}
@@ -37,6 +37,31 @@
EXPECT_EQ(DeviceStatus::AVAILABLE, static_cast<DeviceStatus>(status));
}
+// initialization
+TEST_F(NeuralnetworksHidlTest, GetCapabilitiesTest) {
+ using OperandPerformance = Capabilities::OperandPerformance;
+ Return<void> ret = device->getCapabilities_1_2([](ErrorStatus status,
+ const Capabilities& capabilities) {
+ EXPECT_EQ(ErrorStatus::NONE, status);
+
+ auto isPositive = [](const PerformanceInfo& perf) {
+ return perf.execTime > 0.0f && perf.powerUsage > 0.0f;
+ };
+
+ EXPECT_TRUE(isPositive(capabilities.relaxedFloat32toFloat16PerformanceScalar));
+ EXPECT_TRUE(isPositive(capabilities.relaxedFloat32toFloat16PerformanceTensor));
+ const auto& opPerf = capabilities.operandPerformance;
+ EXPECT_TRUE(std::all_of(
+ opPerf.begin(), opPerf.end(),
+ [isPositive](const OperandPerformance& a) { return isPositive(a.info); }));
+ EXPECT_TRUE(std::is_sorted(opPerf.begin(), opPerf.end(),
+ [](const OperandPerformance& a, const OperandPerformance& b) {
+ return a.type < b.type;
+ }));
+ });
+ EXPECT_TRUE(ret.isOk());
+}
+
// device version test
TEST_F(NeuralnetworksHidlTest, GetDeviceVersionStringTest) {
Return<void> ret = device->getVersionString([](ErrorStatus status, const hidl_string& version) {