HAL interface for compilation and execution hints
The following AIDL types are added:
- TokenValuePair
- PrepareModelConfig
- ExecutionConfig
The following AIDL methods are added:
- IDevice::prepareModelWithConfig
- IPreparedModel::executeSynchronouslyWithConfig
- IPreparedModel::executeFencedWithConfig
- IBurst::executeSynchronouslyWithConfig
The compilation and execution hints are being stored as a list of
token-value pairs as part of the PrepareModelConfig / ExecutionConfig.
And the PrepareModelConfig / ExecutionConfig parcelables are created in
order to make future extensions to the execution related interfaces
easier.
It is the drivers responsibility to verify the hints, and it is allowed
for the driver to ignore them.
Bug: 203248587
Test: neuralnetworks_utils_hal_aidl_test
Change-Id: I98240fd75089fc85cdfcaa0be28aab8a6f0dfca5
Merged-In: I98240fd75089fc85cdfcaa0be28aab8a6f0dfca5
(cherry picked from commit 0e671f3edb9d2c78658a4ef4169e3211e3f9bb00)
diff --git a/neuralnetworks/aidl/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/aidl/vts/functional/GeneratedTestHarness.cpp
index 2460fba..8c8a87a 100644
--- a/neuralnetworks/aidl/vts/functional/GeneratedTestHarness.cpp
+++ b/neuralnetworks/aidl/vts/functional/GeneratedTestHarness.cpp
@@ -63,6 +63,8 @@
// it is skipped. The field is set to true by default and is set to false in
// quantization coupling tests to suppress skipping a test
bool reportSkipping;
+ // `useConfig` indicates if a test should use execute*WithConfig functions for the execution.
+ bool useConfig;
TestConfig(Executor executor, bool measureTiming, OutputType outputType, MemoryType memoryType,
bool reusable)
: executor(executor),
@@ -70,7 +72,8 @@
outputType(outputType),
memoryType(memoryType),
reusable(reusable),
- reportSkipping(true) {}
+ reportSkipping(true),
+ useConfig(false) {}
TestConfig(Executor executor, bool measureTiming, OutputType outputType, MemoryType memoryType,
bool reusable, bool reportSkipping)
: executor(executor),
@@ -78,7 +81,17 @@
outputType(outputType),
memoryType(memoryType),
reusable(reusable),
- reportSkipping(reportSkipping) {}
+ reportSkipping(reportSkipping),
+ useConfig(false) {}
+ TestConfig(Executor executor, bool measureTiming, OutputType outputType, MemoryType memoryType,
+ bool reusable, bool reportSkipping, bool useConfig)
+ : executor(executor),
+ measureTiming(measureTiming),
+ outputType(outputType),
+ memoryType(memoryType),
+ reusable(reusable),
+ reportSkipping(reportSkipping),
+ useConfig(useConfig) {}
};
std::string toString(OutputType type) {
@@ -100,7 +113,8 @@
<< ", .measureTiming=" << (config.measureTiming ? "true" : "false")
<< ", .outputType=" << toString(config.outputType)
<< ", .memoryType=" << toString(config.memoryType)
- << ", .reusable=" << (config.reusable ? "true" : "false") << "}";
+ << ", .reusable=" << (config.reusable ? "true" : "false")
+ << ", .useConfig=" << (config.useConfig ? "true" : "false") << "}";
return ss.str();
}
@@ -587,8 +601,8 @@
std::shared_ptr<IExecution> execution;
if (testConfig.reusable) {
- const auto ret = preparedModel->createReusableExecution(request, testConfig.measureTiming,
- loopTimeoutDurationNs, &execution);
+ const auto ret = preparedModel->createReusableExecution(
+ request, {testConfig.measureTiming, loopTimeoutDurationNs, {}, {}}, &execution);
ASSERT_TRUE(ret.isOk()) << static_cast<nn::ErrorStatus>(ret.getServiceSpecificError());
ASSERT_NE(nullptr, execution.get());
}
@@ -607,6 +621,10 @@
::ndk::ScopedAStatus ret;
if (testConfig.reusable) {
ret = execution->executeSynchronously(kNoDeadline, &executionResult);
+ } else if (testConfig.useConfig) {
+ ret = preparedModel->executeSynchronouslyWithConfig(
+ request, {testConfig.measureTiming, loopTimeoutDurationNs, {}, {}},
+ kNoDeadline, &executionResult);
} else {
ret = preparedModel->executeSynchronously(request, testConfig.measureTiming,
kNoDeadline, loopTimeoutDurationNs,
@@ -649,9 +667,16 @@
ExecutionResult executionResult;
// execute
- ret = burst->executeSynchronously(request, slots, testConfig.measureTiming,
- kNoDeadline, loopTimeoutDurationNs,
- &executionResult);
+ if (testConfig.useConfig) {
+ ret = burst->executeSynchronouslyWithConfig(
+ request, slots,
+ {testConfig.measureTiming, loopTimeoutDurationNs, {}, {}}, kNoDeadline,
+ &executionResult);
+ } else {
+ ret = burst->executeSynchronously(request, slots, testConfig.measureTiming,
+ kNoDeadline, loopTimeoutDurationNs,
+ &executionResult);
+ }
ASSERT_TRUE(ret.isOk() || ret.getExceptionCode() == EX_SERVICE_SPECIFIC)
<< ret.getDescription();
if (ret.isOk()) {
@@ -680,6 +705,10 @@
::ndk::ScopedAStatus ret;
if (testConfig.reusable) {
ret = execution->executeFenced({}, kNoDeadline, kNoDuration, &executionResult);
+ } else if (testConfig.useConfig) {
+ ret = preparedModel->executeFencedWithConfig(
+ request, {}, {testConfig.measureTiming, loopTimeoutDurationNs, {}, {}},
+ kNoDeadline, kNoDuration, &executionResult);
} else {
ret = preparedModel->executeFenced(request, {}, testConfig.measureTiming,
kNoDeadline, loopTimeoutDurationNs,
@@ -697,9 +726,19 @@
waitFor.emplace_back(dupFd);
// If a sync fence is returned, try start another run waiting for the sync
// fence.
- ret = preparedModel->executeFenced(request, waitFor, testConfig.measureTiming,
- kNoDeadline, loopTimeoutDurationNs,
- kNoDuration, &executionResult);
+ if (testConfig.reusable) {
+ ret = execution->executeFenced(waitFor, kNoDeadline, kNoDuration,
+ &executionResult);
+ } else if (testConfig.useConfig) {
+ ret = preparedModel->executeFencedWithConfig(
+ request, waitFor,
+ {testConfig.measureTiming, loopTimeoutDurationNs, {}, {}},
+ kNoDeadline, kNoDuration, &executionResult);
+ } else {
+ ret = preparedModel->executeFenced(
+ request, waitFor, testConfig.measureTiming, kNoDeadline,
+ loopTimeoutDurationNs, kNoDuration, &executionResult);
+ }
ASSERT_TRUE(ret.isOk());
waitForSyncFence(executionResult.syncFence.get());
}
@@ -830,11 +869,13 @@
std::vector<Executor> executorList;
std::vector<MemoryType> memoryTypeList;
std::vector<bool> reusableList = {false};
+ std::vector<bool> useConfigList = {false};
int deviceVersion;
ASSERT_TRUE(device->getInterfaceVersion(&deviceVersion).isOk());
if (deviceVersion >= kMinAidlLevelForFL8) {
reusableList.push_back(true);
+ useConfigList.push_back(true);
}
switch (testKind) {
@@ -879,11 +920,14 @@
for (const Executor executor : executorList) {
for (const MemoryType memoryType : memoryTypeList) {
for (const bool reusable : reusableList) {
- if (executor == Executor::BURST && reusable) continue;
- const TestConfig testConfig(executor, measureTiming, outputType, memoryType,
- reusable);
- SCOPED_TRACE(toString(testConfig));
- EvaluatePreparedModel(device, preparedModel, testModel, testConfig);
+ for (const bool useConfig : useConfigList) {
+ if ((useConfig || executor == Executor::BURST) && reusable) continue;
+ const TestConfig testConfig(executor, measureTiming, outputType,
+ memoryType, reusable,
+ /*reportSkipping=*/true, useConfig);
+ SCOPED_TRACE(toString(testConfig));
+ EvaluatePreparedModel(device, preparedModel, testModel, testConfig);
+ }
}
}
}
@@ -942,6 +986,13 @@
createPreparedModel(device, model, &preparedModel);
if (preparedModel == nullptr) return;
EvaluatePreparedModel(device, preparedModel, testModel, testKind);
+ int32_t deviceVersion;
+ ASSERT_TRUE(device->getInterfaceVersion(&deviceVersion).isOk());
+ if (deviceVersion >= kMinAidlLevelForFL8) {
+ createPreparedModel(device, model, &preparedModel, /*reportSkipping*/ true,
+ /*useConfig*/ true);
+ EvaluatePreparedModel(device, preparedModel, testModel, testKind);
+ }
} break;
case TestKind::QUANTIZATION_COUPLING: {
ASSERT_TRUE(testModel.hasQuant8CoupledOperands());
diff --git a/neuralnetworks/aidl/vts/functional/MemoryDomainTests.cpp b/neuralnetworks/aidl/vts/functional/MemoryDomainTests.cpp
index b3e9c63..97760ae 100644
--- a/neuralnetworks/aidl/vts/functional/MemoryDomainTests.cpp
+++ b/neuralnetworks/aidl/vts/functional/MemoryDomainTests.cpp
@@ -204,11 +204,23 @@
return ndk::ScopedAStatus::fromServiceSpecificError(
static_cast<int32_t>(ErrorStatus::GENERAL_FAILURE));
}
+ ndk::ScopedAStatus executeSynchronouslyWithConfig(const Request&, const ExecutionConfig&,
+ int64_t, ExecutionResult*) override {
+ return ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(ErrorStatus::GENERAL_FAILURE));
+ }
+ ndk::ScopedAStatus executeFencedWithConfig(const Request&,
+ const std::vector<ndk::ScopedFileDescriptor>&,
+ const ExecutionConfig&, int64_t, int64_t,
+ FencedExecutionResult*) override {
+ return ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(ErrorStatus::GENERAL_FAILURE));
+ }
ndk::ScopedAStatus configureExecutionBurst(std::shared_ptr<IBurst>*) override {
return ndk::ScopedAStatus::fromServiceSpecificError(
static_cast<int32_t>(ErrorStatus::GENERAL_FAILURE));
}
- ndk::ScopedAStatus createReusableExecution(const aidl_hal::Request&, bool, int64_t,
+ ndk::ScopedAStatus createReusableExecution(const aidl_hal::Request&, const ExecutionConfig&,
std::shared_ptr<aidl_hal::IExecution>*) override {
return ndk::ScopedAStatus::fromServiceSpecificError(
static_cast<int32_t>(ErrorStatus::GENERAL_FAILURE));
diff --git a/neuralnetworks/aidl/vts/functional/ValidateModel.cpp b/neuralnetworks/aidl/vts/functional/ValidateModel.cpp
index fdc7eff..931ba25 100644
--- a/neuralnetworks/aidl/vts/functional/ValidateModel.cpp
+++ b/neuralnetworks/aidl/vts/functional/ValidateModel.cpp
@@ -77,6 +77,28 @@
ASSERT_EQ(nullptr, preparedModel.get());
}
+static void validatePrepareModelWithConfig(const std::shared_ptr<IDevice>& device,
+ const std::string& message, const Model& model,
+ ExecutionPreference preference, Priority priority) {
+ SCOPED_TRACE(message + " [prepareModelWithConfig]");
+
+ std::shared_ptr<PreparedModelCallback> preparedModelCallback =
+ ndk::SharedRefBase::make<PreparedModelCallback>();
+ const auto prepareLaunchStatus = device->prepareModelWithConfig(
+ model, {preference, priority, kNoDeadline, {}, {}, kEmptyCacheToken, {}, {}},
+ preparedModelCallback);
+ ASSERT_FALSE(prepareLaunchStatus.isOk());
+ ASSERT_EQ(prepareLaunchStatus.getExceptionCode(), EX_SERVICE_SPECIFIC);
+ ASSERT_EQ(static_cast<ErrorStatus>(prepareLaunchStatus.getServiceSpecificError()),
+ ErrorStatus::INVALID_ARGUMENT);
+
+ preparedModelCallback->wait();
+ ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
+ ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, prepareReturnStatus);
+ std::shared_ptr<IPreparedModel> preparedModel = preparedModelCallback->getPreparedModel();
+ ASSERT_EQ(nullptr, preparedModel.get());
+}
+
static bool validExecutionPreference(ExecutionPreference preference) {
return preference == ExecutionPreference::LOW_POWER ||
preference == ExecutionPreference::FAST_SINGLE_ANSWER ||
@@ -103,6 +125,13 @@
}
validatePrepareModel(device, message, model, preference, priority);
+
+ int32_t aidlVersion;
+ ASSERT_TRUE(device->getInterfaceVersion(&aidlVersion).isOk());
+ if (aidlVersion >= kMinAidlLevelForFL8) {
+ // prepareModelWithConfig must satisfy all requirements enforced by prepareModel.
+ validatePrepareModelWithConfig(device, message, model, preference, priority);
+ }
}
static uint32_t addOperand(Model* model) {
diff --git a/neuralnetworks/aidl/vts/functional/ValidateRequest.cpp b/neuralnetworks/aidl/vts/functional/ValidateRequest.cpp
index e8debf7..d749841 100644
--- a/neuralnetworks/aidl/vts/functional/ValidateRequest.cpp
+++ b/neuralnetworks/aidl/vts/functional/ValidateRequest.cpp
@@ -45,7 +45,7 @@
{
SCOPED_TRACE(message + " [createReusableExecution]");
const auto createStatus = preparedModel->createReusableExecution(
- request, measure, kOmittedTimeoutDuration, &execution);
+ request, {measure, kOmittedTimeoutDuration, {}, {}}, &execution);
if (!createStatus.isOk()) {
ASSERT_EQ(createStatus.getExceptionCode(), EX_SERVICE_SPECIFIC);
ASSERT_EQ(static_cast<ErrorStatus>(createStatus.getServiceSpecificError()),
@@ -149,10 +149,59 @@
int32_t aidlVersion;
ASSERT_TRUE(preparedModel->getInterfaceVersion(&aidlVersion).isOk());
+ if (aidlVersion < kMinAidlLevelForFL8) {
+ return;
+ }
// validate reusable execution
- if (aidlVersion >= kMinAidlLevelForFL8) {
- validateReusableExecution(preparedModel, message, request, measure);
+ validateReusableExecution(preparedModel, message, request, measure);
+
+ // synchronous with empty hints
+ {
+ SCOPED_TRACE(message + " [executeSynchronouslyWithConfig]");
+ ExecutionResult executionResult;
+ const auto executeStatus = preparedModel->executeSynchronouslyWithConfig(
+ request, {measure, kOmittedTimeoutDuration, {}, {}}, kNoDeadline, &executionResult);
+ ASSERT_FALSE(executeStatus.isOk());
+ ASSERT_EQ(executeStatus.getExceptionCode(), EX_SERVICE_SPECIFIC);
+ ASSERT_EQ(static_cast<ErrorStatus>(executeStatus.getServiceSpecificError()),
+ ErrorStatus::INVALID_ARGUMENT);
+ }
+
+ // fenced with empty hints
+ {
+ SCOPED_TRACE(message + " [executeFencedWithConfig]");
+ FencedExecutionResult executionResult;
+ const auto executeStatus = preparedModel->executeFencedWithConfig(
+ request, {}, {false, kOmittedTimeoutDuration, {}, {}}, kNoDeadline, kNoDuration,
+ &executionResult);
+ ASSERT_FALSE(executeStatus.isOk());
+ ASSERT_EQ(executeStatus.getExceptionCode(), EX_SERVICE_SPECIFIC);
+ ASSERT_EQ(static_cast<ErrorStatus>(executeStatus.getServiceSpecificError()),
+ ErrorStatus::INVALID_ARGUMENT);
+ }
+
+ // burst with empty hints
+ {
+ SCOPED_TRACE(message + " [burst executeSynchronouslyWithConfig]");
+
+ // create burst
+ std::shared_ptr<IBurst> burst;
+ auto ret = preparedModel->configureExecutionBurst(&burst);
+ ASSERT_TRUE(ret.isOk()) << ret.getDescription();
+ ASSERT_NE(nullptr, burst.get());
+
+ // use -1 for all memory identifier tokens
+ const std::vector<int64_t> slots(request.pools.size(), -1);
+
+ ExecutionResult executionResult;
+ const auto executeStatus = burst->executeSynchronouslyWithConfig(
+ request, slots, {measure, kOmittedTimeoutDuration, {}, {}}, kNoDeadline,
+ &executionResult);
+ ASSERT_FALSE(executeStatus.isOk());
+ ASSERT_EQ(executeStatus.getExceptionCode(), EX_SERVICE_SPECIFIC);
+ ASSERT_EQ(static_cast<ErrorStatus>(executeStatus.getServiceSpecificError()),
+ ErrorStatus::INVALID_ARGUMENT);
}
}
diff --git a/neuralnetworks/aidl/vts/functional/VtsHalNeuralnetworks.cpp b/neuralnetworks/aidl/vts/functional/VtsHalNeuralnetworks.cpp
index c417356..ad93e6d 100644
--- a/neuralnetworks/aidl/vts/functional/VtsHalNeuralnetworks.cpp
+++ b/neuralnetworks/aidl/vts/functional/VtsHalNeuralnetworks.cpp
@@ -41,7 +41,8 @@
// internal helper function
void createPreparedModel(const std::shared_ptr<IDevice>& device, const Model& model,
- std::shared_ptr<IPreparedModel>* preparedModel, bool reportSkipping) {
+ std::shared_ptr<IPreparedModel>* preparedModel, bool reportSkipping,
+ bool useConfig) {
ASSERT_NE(nullptr, preparedModel);
*preparedModel = nullptr;
@@ -56,11 +57,25 @@
// launch prepare model
const std::shared_ptr<PreparedModelCallback> preparedModelCallback =
ndk::SharedRefBase::make<PreparedModelCallback>();
- const auto prepareLaunchStatus =
- device->prepareModel(model, ExecutionPreference::FAST_SINGLE_ANSWER, kDefaultPriority,
- kNoDeadline, {}, {}, kEmptyCacheToken, preparedModelCallback);
- ASSERT_TRUE(prepareLaunchStatus.isOk()) << prepareLaunchStatus.getDescription();
-
+ if (useConfig) {
+ const auto prepareLaunchStatus =
+ device->prepareModelWithConfig(model,
+ {ExecutionPreference::FAST_SINGLE_ANSWER,
+ kDefaultPriority,
+ kNoDeadline,
+ {},
+ {},
+ kEmptyCacheToken,
+ {},
+ {}},
+ preparedModelCallback);
+ ASSERT_TRUE(prepareLaunchStatus.isOk()) << prepareLaunchStatus.getDescription();
+ } else {
+ const auto prepareLaunchStatus = device->prepareModel(
+ model, ExecutionPreference::FAST_SINGLE_ANSWER, kDefaultPriority, kNoDeadline, {},
+ {}, kEmptyCacheToken, preparedModelCallback);
+ ASSERT_TRUE(prepareLaunchStatus.isOk()) << prepareLaunchStatus.getDescription();
+ }
// retrieve prepared model
preparedModelCallback->wait();
const ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
diff --git a/neuralnetworks/aidl/vts/functional/VtsHalNeuralnetworks.h b/neuralnetworks/aidl/vts/functional/VtsHalNeuralnetworks.h
index a900590..00d705c 100644
--- a/neuralnetworks/aidl/vts/functional/VtsHalNeuralnetworks.h
+++ b/neuralnetworks/aidl/vts/functional/VtsHalNeuralnetworks.h
@@ -51,8 +51,8 @@
// Create an IPreparedModel object. If the model cannot be prepared,
// "preparedModel" will be nullptr instead.
void createPreparedModel(const std::shared_ptr<IDevice>& device, const Model& model,
- std::shared_ptr<IPreparedModel>* preparedModel,
- bool reportSkipping = true);
+ std::shared_ptr<IPreparedModel>* preparedModel, bool reportSkipping = true,
+ bool useConfig = false);
enum class Executor { SYNC, BURST, FENCED };