HAL interface for compilation and execution hints

The following AIDL types are added:
 - TokenValuePair
 - PrepareModelConfig
 - ExecutionConfig

The following AIDL methods are added:
 - IDevice::prepareModelWithConfig
 - IPreparedModel::executeSynchronouslyWithConfig
 - IPreparedModel::executeFencedWithConfig
 - IBurst::executeSynchronouslyWithConfig

The compilation and execution hints are being stored as a list of
token-value pairs as part of the PrepareModelConfig / ExecutionConfig.
And the PrepareModelConfig / ExecutionConfig parcelables are created in
order to make future extensions to the execution related interfaces
easier.

It is the drivers responsibility to verify the hints, and it is allowed
for the driver to ignore them.

Bug: 203248587
Test: neuralnetworks_utils_hal_aidl_test
Change-Id: I98240fd75089fc85cdfcaa0be28aab8a6f0dfca5
Merged-In: I98240fd75089fc85cdfcaa0be28aab8a6f0dfca5
(cherry picked from commit 0e671f3edb9d2c78658a4ef4169e3211e3f9bb00)
diff --git a/neuralnetworks/utils/common/test/MockDevice.h b/neuralnetworks/utils/common/test/MockDevice.h
index a9428bc..a0fc5c3 100644
--- a/neuralnetworks/utils/common/test/MockDevice.h
+++ b/neuralnetworks/utils/common/test/MockDevice.h
@@ -39,7 +39,9 @@
     MOCK_METHOD(GeneralResult<SharedPreparedModel>, prepareModel,
                 (const Model& model, ExecutionPreference preference, Priority priority,
                  OptionalTimePoint deadline, const std::vector<SharedHandle>& modelCache,
-                 const std::vector<SharedHandle>& dataCache, const CacheToken& token),
+                 const std::vector<SharedHandle>& dataCache, const CacheToken& token,
+                 const std::vector<TokenValuePair>& hints,
+                 const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix),
                 (const, override));
     MOCK_METHOD(GeneralResult<SharedPreparedModel>, prepareModelFromCache,
                 (OptionalTimePoint deadline, const std::vector<SharedHandle>& modelCache,
diff --git a/neuralnetworks/utils/common/test/MockPreparedModel.h b/neuralnetworks/utils/common/test/MockPreparedModel.h
index c8ce006..b8613b2 100644
--- a/neuralnetworks/utils/common/test/MockPreparedModel.h
+++ b/neuralnetworks/utils/common/test/MockPreparedModel.h
@@ -27,17 +27,23 @@
   public:
     MOCK_METHOD((ExecutionResult<std::pair<std::vector<OutputShape>, Timing>>), execute,
                 (const Request& request, MeasureTiming measure, const OptionalTimePoint& deadline,
-                 const OptionalDuration& loopTimeoutDuration),
+                 const OptionalDuration& loopTimeoutDuration,
+                 const std::vector<TokenValuePair>& hints,
+                 const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix),
                 (const, override));
     MOCK_METHOD((GeneralResult<std::pair<SyncFence, ExecuteFencedInfoCallback>>), executeFenced,
                 (const Request& request, const std::vector<SyncFence>& waitFor,
                  MeasureTiming measure, const OptionalTimePoint& deadline,
                  const OptionalDuration& loopTimeoutDuration,
-                 const OptionalDuration& timeoutDurationAfterFence),
+                 const OptionalDuration& timeoutDurationAfterFence,
+                 const std::vector<TokenValuePair>& hints,
+                 const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix),
                 (const, override));
     MOCK_METHOD((GeneralResult<SharedExecution>), createReusableExecution,
-                (const nn::Request& request, nn::MeasureTiming measure,
-                 const nn::OptionalDuration& loopTimeoutDuration),
+                (const Request& request, MeasureTiming measure,
+                 const OptionalDuration& loopTimeoutDuration,
+                 const std::vector<TokenValuePair>& hints,
+                 const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix),
                 (const, override));
     MOCK_METHOD(GeneralResult<SharedBurst>, configureExecutionBurst, (), (const, override));
     MOCK_METHOD(std::any, getUnderlyingResource, (), (const, override));
diff --git a/neuralnetworks/utils/common/test/ResilientDeviceTest.cpp b/neuralnetworks/utils/common/test/ResilientDeviceTest.cpp
index 0488b63..d9b8505 100644
--- a/neuralnetworks/utils/common/test/ResilientDeviceTest.cpp
+++ b/neuralnetworks/utils/common/test/ResilientDeviceTest.cpp
@@ -309,12 +309,12 @@
     // setup call
     const auto [mockDevice, mockDeviceFactory, device] = setup();
     const auto mockPreparedModel = std::make_shared<const nn::MockPreparedModel>();
-    EXPECT_CALL(*mockDevice, prepareModel(_, _, _, _, _, _, _))
+    EXPECT_CALL(*mockDevice, prepareModel(_, _, _, _, _, _, _, _, _))
             .Times(1)
             .WillOnce(Return(mockPreparedModel));
 
     // run test
-    const auto result = device->prepareModel({}, {}, {}, {}, {}, {}, {});
+    const auto result = device->prepareModel({}, {}, {}, {}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_TRUE(result.has_value())
@@ -324,12 +324,12 @@
 TEST(ResilientDeviceTest, prepareModelError) {
     // setup call
     const auto [mockDevice, mockDeviceFactory, device] = setup();
-    EXPECT_CALL(*mockDevice, prepareModel(_, _, _, _, _, _, _))
+    EXPECT_CALL(*mockDevice, prepareModel(_, _, _, _, _, _, _, _, _))
             .Times(1)
             .WillOnce(kReturnGeneralFailure);
 
     // run test
-    const auto result = device->prepareModel({}, {}, {}, {}, {}, {}, {});
+    const auto result = device->prepareModel({}, {}, {}, {}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_FALSE(result.has_value());
@@ -339,13 +339,13 @@
 TEST(ResilientDeviceTest, prepareModelDeadObjectFailedRecovery) {
     // setup call
     const auto [mockDevice, mockDeviceFactory, device] = setup();
-    EXPECT_CALL(*mockDevice, prepareModel(_, _, _, _, _, _, _))
+    EXPECT_CALL(*mockDevice, prepareModel(_, _, _, _, _, _, _, _, _))
             .Times(1)
             .WillOnce(kReturnDeadObject);
     EXPECT_CALL(*mockDeviceFactory, Call(false)).Times(1).WillOnce(kReturnGeneralFailure);
 
     // run test
-    const auto result = device->prepareModel({}, {}, {}, {}, {}, {}, {});
+    const auto result = device->prepareModel({}, {}, {}, {}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_FALSE(result.has_value());
@@ -355,18 +355,18 @@
 TEST(ResilientDeviceTest, prepareModelDeadObjectSuccessfulRecovery) {
     // setup call
     const auto [mockDevice, mockDeviceFactory, device] = setup();
-    EXPECT_CALL(*mockDevice, prepareModel(_, _, _, _, _, _, _))
+    EXPECT_CALL(*mockDevice, prepareModel(_, _, _, _, _, _, _, _, _))
             .Times(1)
             .WillOnce(kReturnDeadObject);
     const auto recoveredMockDevice = createConfiguredMockDevice();
     const auto mockPreparedModel = std::make_shared<const nn::MockPreparedModel>();
-    EXPECT_CALL(*recoveredMockDevice, prepareModel(_, _, _, _, _, _, _))
+    EXPECT_CALL(*recoveredMockDevice, prepareModel(_, _, _, _, _, _, _, _, _))
             .Times(1)
             .WillOnce(Return(mockPreparedModel));
     EXPECT_CALL(*mockDeviceFactory, Call(false)).Times(1).WillOnce(Return(recoveredMockDevice));
 
     // run test
-    const auto result = device->prepareModel({}, {}, {}, {}, {}, {}, {});
+    const auto result = device->prepareModel({}, {}, {}, {}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_TRUE(result.has_value())
@@ -679,7 +679,7 @@
     device->recover(mockDevice.get(), /*blocking=*/false);
 
     // run test
-    auto result = device->prepareModel({}, {}, {}, {}, {}, {}, {});
+    auto result = device->prepareModel({}, {}, {}, {}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_TRUE(result.has_value())
diff --git a/neuralnetworks/utils/common/test/ResilientPreparedModelTest.cpp b/neuralnetworks/utils/common/test/ResilientPreparedModelTest.cpp
index d396ca8..276bfba 100644
--- a/neuralnetworks/utils/common/test/ResilientPreparedModelTest.cpp
+++ b/neuralnetworks/utils/common/test/ResilientPreparedModelTest.cpp
@@ -104,12 +104,12 @@
 TEST(ResilientPreparedModelTest, execute) {
     // setup call
     const auto [mockPreparedModel, mockPreparedModelFactory, preparedModel] = setup();
-    EXPECT_CALL(*mockPreparedModel, execute(_, _, _, _))
+    EXPECT_CALL(*mockPreparedModel, execute(_, _, _, _, _, _))
             .Times(1)
             .WillOnce(Return(kNoExecutionError));
 
     // run test
-    const auto result = preparedModel->execute({}, {}, {}, {});
+    const auto result = preparedModel->execute({}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_TRUE(result.has_value())
@@ -119,10 +119,12 @@
 TEST(ResilientPreparedModelTest, executeError) {
     // setup call
     const auto [mockPreparedModel, mockPreparedModelFactory, preparedModel] = setup();
-    EXPECT_CALL(*mockPreparedModel, execute(_, _, _, _)).Times(1).WillOnce(kReturnGeneralFailure);
+    EXPECT_CALL(*mockPreparedModel, execute(_, _, _, _, _, _))
+            .Times(1)
+            .WillOnce(kReturnGeneralFailure);
 
     // run test
-    const auto result = preparedModel->execute({}, {}, {}, {});
+    const auto result = preparedModel->execute({}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_FALSE(result.has_value());
@@ -132,12 +134,12 @@
 TEST(ResilientPreparedModelTest, executeDeadObjectFailedRecovery) {
     // setup call
     const auto [mockPreparedModel, mockPreparedModelFactory, preparedModel] = setup();
-    EXPECT_CALL(*mockPreparedModel, execute(_, _, _, _)).Times(1).WillOnce(kReturnDeadObject);
+    EXPECT_CALL(*mockPreparedModel, execute(_, _, _, _, _, _)).Times(1).WillOnce(kReturnDeadObject);
     constexpr auto ret = [] { return nn::error(nn::ErrorStatus::GENERAL_FAILURE); };
     EXPECT_CALL(*mockPreparedModelFactory, Call()).Times(1).WillOnce(ret);
 
     // run test
-    const auto result = preparedModel->execute({}, {}, {}, {});
+    const auto result = preparedModel->execute({}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_FALSE(result.has_value());
@@ -147,9 +149,9 @@
 TEST(ResilientPreparedModelTest, executeDeadObjectSuccessfulRecovery) {
     // setup call
     const auto [mockPreparedModel, mockPreparedModelFactory, preparedModel] = setup();
-    EXPECT_CALL(*mockPreparedModel, execute(_, _, _, _)).Times(1).WillOnce(kReturnDeadObject);
+    EXPECT_CALL(*mockPreparedModel, execute(_, _, _, _, _, _)).Times(1).WillOnce(kReturnDeadObject);
     const auto recoveredMockPreparedModel = createConfiguredMockPreparedModel();
-    EXPECT_CALL(*recoveredMockPreparedModel, execute(_, _, _, _))
+    EXPECT_CALL(*recoveredMockPreparedModel, execute(_, _, _, _, _, _))
             .Times(1)
             .WillOnce(Return(kNoExecutionError));
     EXPECT_CALL(*mockPreparedModelFactory, Call())
@@ -157,7 +159,7 @@
             .WillOnce(Return(recoveredMockPreparedModel));
 
     // run test
-    const auto result = preparedModel->execute({}, {}, {}, {});
+    const auto result = preparedModel->execute({}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_TRUE(result.has_value())
@@ -167,12 +169,12 @@
 TEST(ResilientPreparedModelTest, executeFenced) {
     // setup call
     const auto [mockPreparedModel, mockPreparedModelFactory, preparedModel] = setup();
-    EXPECT_CALL(*mockPreparedModel, executeFenced(_, _, _, _, _, _))
+    EXPECT_CALL(*mockPreparedModel, executeFenced(_, _, _, _, _, _, _, _))
             .Times(1)
             .WillOnce(Return(kNoFencedExecutionError));
 
     // run test
-    const auto result = preparedModel->executeFenced({}, {}, {}, {}, {}, {});
+    const auto result = preparedModel->executeFenced({}, {}, {}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_TRUE(result.has_value())
@@ -182,12 +184,12 @@
 TEST(ResilientPreparedModelTest, executeFencedError) {
     // setup call
     const auto [mockPreparedModel, mockPreparedModelFactory, preparedModel] = setup();
-    EXPECT_CALL(*mockPreparedModel, executeFenced(_, _, _, _, _, _))
+    EXPECT_CALL(*mockPreparedModel, executeFenced(_, _, _, _, _, _, _, _))
             .Times(1)
             .WillOnce(kReturnGeneralFailure);
 
     // run test
-    const auto result = preparedModel->executeFenced({}, {}, {}, {}, {}, {});
+    const auto result = preparedModel->executeFenced({}, {}, {}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_FALSE(result.has_value());
@@ -197,13 +199,13 @@
 TEST(ResilientPreparedModelTest, executeFencedDeadObjectFailedRecovery) {
     // setup call
     const auto [mockPreparedModel, mockPreparedModelFactory, preparedModel] = setup();
-    EXPECT_CALL(*mockPreparedModel, executeFenced(_, _, _, _, _, _))
+    EXPECT_CALL(*mockPreparedModel, executeFenced(_, _, _, _, _, _, _, _))
             .Times(1)
             .WillOnce(kReturnDeadObject);
     EXPECT_CALL(*mockPreparedModelFactory, Call()).Times(1).WillOnce(kReturnGeneralFailure);
 
     // run test
-    const auto result = preparedModel->executeFenced({}, {}, {}, {}, {}, {});
+    const auto result = preparedModel->executeFenced({}, {}, {}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_FALSE(result.has_value());
@@ -213,11 +215,11 @@
 TEST(ResilientPreparedModelTest, executeFencedDeadObjectSuccessfulRecovery) {
     // setup call
     const auto [mockPreparedModel, mockPreparedModelFactory, preparedModel] = setup();
-    EXPECT_CALL(*mockPreparedModel, executeFenced(_, _, _, _, _, _))
+    EXPECT_CALL(*mockPreparedModel, executeFenced(_, _, _, _, _, _, _, _))
             .Times(1)
             .WillOnce(kReturnDeadObject);
     const auto recoveredMockPreparedModel = createConfiguredMockPreparedModel();
-    EXPECT_CALL(*recoveredMockPreparedModel, executeFenced(_, _, _, _, _, _))
+    EXPECT_CALL(*recoveredMockPreparedModel, executeFenced(_, _, _, _, _, _, _, _))
             .Times(1)
             .WillOnce(Return(kNoFencedExecutionError));
     EXPECT_CALL(*mockPreparedModelFactory, Call())
@@ -225,7 +227,7 @@
             .WillOnce(Return(recoveredMockPreparedModel));
 
     // run test
-    const auto result = preparedModel->executeFenced({}, {}, {}, {}, {}, {});
+    const auto result = preparedModel->executeFenced({}, {}, {}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_TRUE(result.has_value())
@@ -235,12 +237,12 @@
 TEST(ResilientPreparedModelTest, createReusableExecution) {
     // setup call
     const auto [mockPreparedModel, mockPreparedModelFactory, preparedModel] = setup();
-    EXPECT_CALL(*mockPreparedModel, createReusableExecution(_, _, _))
+    EXPECT_CALL(*mockPreparedModel, createReusableExecution(_, _, _, _, _))
             .Times(1)
             .WillOnce(Return(kNoCreateReusableExecutionError));
 
     // run test
-    const auto result = preparedModel->createReusableExecution({}, {}, {});
+    const auto result = preparedModel->createReusableExecution({}, {}, {}, {}, {});
 
     // verify result
     ASSERT_TRUE(result.has_value())
@@ -250,12 +252,12 @@
 TEST(ResilientPreparedModelTest, createReusableExecutionError) {
     // setup call
     const auto [mockPreparedModel, mockPreparedModelFactory, preparedModel] = setup();
-    EXPECT_CALL(*mockPreparedModel, createReusableExecution(_, _, _))
+    EXPECT_CALL(*mockPreparedModel, createReusableExecution(_, _, _, _, _))
             .Times(1)
             .WillOnce(kReturnGeneralFailure);
 
     // run test
-    const auto result = preparedModel->createReusableExecution({}, {}, {});
+    const auto result = preparedModel->createReusableExecution({}, {}, {}, {}, {});
 
     // verify result
     ASSERT_FALSE(result.has_value());