NNAPI HAL: Remove priority from prepareModelFromCache_1_3
prepareModelFromCache_1_3 uses model cache and data cache that
represent a prepared model. Any argument that contributes in a
meaningful way to the prepared model is implicitly included in
this model cache and data cache. For example, "model" and
"executionPreference" appear in prepareModel_1_3 but not in
prepareModelFromCache_1_3 because they are implicitly included
in the model cache and data cache. In a similar way, because
it could affect the resulting model, "priority" should be
removed from prepareModelFromCache_1_3.
Fixes: 148802784
Test: mma
Test: VtsHalNeuralnetworksV1_3TargetTest
Change-Id: I518e493ead8aa97220b16370cce8523b425c378c
diff --git a/current.txt b/current.txt
index 74c0cbb..f1c588d 100644
--- a/current.txt
+++ b/current.txt
@@ -663,7 +663,7 @@
df9c79c4fdde2821550c6d5c3d07f5ec0adfb1b702561ce543c906ddef698703 android.hardware.media.c2@1.1::IComponent
a3eddd9bbdc87e8c22764070037dd1154f1cf006e6fba93364c4f85d4c134a19 android.hardware.media.c2@1.1::IComponentStore
65c16331e57f6dd68b3971f06f78fe9e3209afb60630c31705aa355f9a52bf0d android.hardware.neuralnetworks@1.3::IBuffer
-d1f382d14e1384b907d5bb5780df7f01934650d556fedbed2f15a90773c657d6 android.hardware.neuralnetworks@1.3::IDevice
+9b41dd49e2dcc2ecb4243d03f8421d72494ada5cf2945bff88f0019eeca56923 android.hardware.neuralnetworks@1.3::IDevice
4167dc3ad35e9cd0d2057d4868c7675ae2c3c9d05bbd614c1f5dccfa5fd68797 android.hardware.neuralnetworks@1.3::IExecutionCallback
2fa3679ad7c94b5e88724adcd560c561041068a4ca565c63830e68101988746a android.hardware.neuralnetworks@1.3::IFencedExecutionCallback
237b23b126a66f3432658020fed78cdd06ba6297459436fe6bae0ba753370833 android.hardware.neuralnetworks@1.3::IPreparedModel
diff --git a/neuralnetworks/1.3/IDevice.hal b/neuralnetworks/1.3/IDevice.hal
index 610db79..4931539 100644
--- a/neuralnetworks/1.3/IDevice.hal
+++ b/neuralnetworks/1.3/IDevice.hal
@@ -260,11 +260,6 @@
* the model, the callback object must be invoked with the appropriate
* ErrorStatus value and nullptr for the IPreparedModel.
*
- * The model is prepared with a priority. This priority is relative to other
- * prepared models owned by the same client. Higher priority executions may
- * use more compute resources than lower priority executions, and may
- * preempt or starve lower priority executions.
- *
* prepareModelFromCache_1_3 can be called with an optional deadline. If the
* model is not able to prepared before the provided deadline, the model
* preparation must be aborted, and either {@link
@@ -284,8 +279,6 @@
* used with different shapes of inputs on different (possibly concurrent)
* executions.
*
- * @param priority The priority of the prepared model relative to other
- * prepared models owned by the client.
* @param deadline The time by which the model must be prepared. If the
* model cannot be prepared by the deadline, the preparation must be
* aborted.
@@ -318,7 +311,7 @@
* met
* - RESOURCE_EXHAUSTED_* if the task was aborted by the driver
*/
- prepareModelFromCache_1_3(Priority priority, OptionalTimePoint deadline,
+ prepareModelFromCache_1_3(OptionalTimePoint deadline,
vec<handle> modelCache, vec<handle> dataCache,
uint8_t[Constant:BYTE_SIZE_OF_CACHE_TOKEN] token,
IPreparedModelCallback callback)
diff --git a/neuralnetworks/1.3/vts/functional/CompilationCachingTests.cpp b/neuralnetworks/1.3/vts/functional/CompilationCachingTests.cpp
index 576e524..0bd24da 100644
--- a/neuralnetworks/1.3/vts/functional/CompilationCachingTests.cpp
+++ b/neuralnetworks/1.3/vts/functional/CompilationCachingTests.cpp
@@ -370,7 +370,7 @@
sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
hidl_array<uint8_t, sizeof(mToken)> cacheToken(mToken);
Return<ErrorStatus> prepareLaunchStatus = kDevice->prepareModelFromCache_1_3(
- kDefaultPriority, {}, modelCache, dataCache, cacheToken, preparedModelCallback);
+ {}, modelCache, dataCache, cacheToken, preparedModelCallback);
ASSERT_TRUE(prepareLaunchStatus.isOk());
if (static_cast<ErrorStatus>(prepareLaunchStatus) != ErrorStatus::NONE) {
*preparedModel = nullptr;