blob: df95ac61b491a770c0e26d796398c3eb77a1d91f [file] [log] [blame]
Xusong Wang96e68dc2019-01-18 17:28:26 -08001/*
2 * Copyright (C) 2019 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#define LOG_TAG "neuralnetworks_hidl_hal_test"
18
Xusong Wang7cc0ccc2019-04-23 14:28:17 -070019#include <android-base/logging.h>
20#include <android/hidl/memory/1.0/IMemory.h>
21#include <ftw.h>
22#include <gtest/gtest.h>
23#include <hidlmemory/mapping.h>
24#include <unistd.h>
25
26#include <cstdio>
27#include <cstdlib>
28#include <random>
Xusong Wang96e68dc2019-01-18 17:28:26 -080029
30#include "Callbacks.h"
31#include "GeneratedTestHarness.h"
32#include "TestHarness.h"
33#include "Utils.h"
Xusong Wang7cc0ccc2019-04-23 14:28:17 -070034#include "VtsHalNeuralnetworks.h"
Xusong Wang96e68dc2019-01-18 17:28:26 -080035
36namespace android {
37namespace hardware {
38namespace neuralnetworks {
39namespace V1_2 {
40namespace vts {
41namespace functional {
42
43using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
44using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
45using ::android::nn::allocateSharedMemory;
46using ::test_helper::MixedTypedExample;
47
48namespace {
49
Xusong Wang7cc0ccc2019-04-23 14:28:17 -070050// In frameworks/ml/nn/runtime/test/generated/, creates a hidl model of mobilenet.
Xusong Wang96e68dc2019-01-18 17:28:26 -080051#include "examples/mobilenet_224_gender_basic_fixed.example.cpp"
52#include "vts_models/mobilenet_224_gender_basic_fixed.model.cpp"
53
54// Prevent the compiler from complaining about an otherwise unused function.
55[[maybe_unused]] auto dummy_createTestModel = createTestModel_dynamic_output_shape;
56[[maybe_unused]] auto dummy_get_examples = get_examples_dynamic_output_shape;
57
Xusong Wanged0822b2019-02-25 16:58:58 -080058enum class AccessMode { READ_WRITE, READ_ONLY, WRITE_ONLY };
Xusong Wang96e68dc2019-01-18 17:28:26 -080059
Xusong Wanged0822b2019-02-25 16:58:58 -080060// Creates cache handles based on provided file groups.
61// The outer vector corresponds to handles and the inner vector is for fds held by each handle.
62void createCacheHandles(const std::vector<std::vector<std::string>>& fileGroups,
63 const std::vector<AccessMode>& mode, hidl_vec<hidl_handle>* handles) {
64 handles->resize(fileGroups.size());
65 for (uint32_t i = 0; i < fileGroups.size(); i++) {
66 std::vector<int> fds;
67 for (const auto& file : fileGroups[i]) {
68 int fd;
69 if (mode[i] == AccessMode::READ_ONLY) {
70 fd = open(file.c_str(), O_RDONLY);
71 } else if (mode[i] == AccessMode::WRITE_ONLY) {
72 fd = open(file.c_str(), O_WRONLY | O_CREAT, S_IRUSR | S_IWUSR);
73 } else if (mode[i] == AccessMode::READ_WRITE) {
74 fd = open(file.c_str(), O_RDWR | O_CREAT, S_IRUSR | S_IWUSR);
75 } else {
76 FAIL();
77 }
78 ASSERT_GE(fd, 0);
79 fds.push_back(fd);
Xusong Wang96e68dc2019-01-18 17:28:26 -080080 }
Xusong Wanged0822b2019-02-25 16:58:58 -080081 native_handle_t* cacheNativeHandle = native_handle_create(fds.size(), 0);
82 ASSERT_NE(cacheNativeHandle, nullptr);
83 std::copy(fds.begin(), fds.end(), &cacheNativeHandle->data[0]);
84 (*handles)[i].setTo(cacheNativeHandle, /*shouldOwn=*/true);
Xusong Wang96e68dc2019-01-18 17:28:26 -080085 }
Xusong Wanged0822b2019-02-25 16:58:58 -080086}
87
88void createCacheHandles(const std::vector<std::vector<std::string>>& fileGroups, AccessMode mode,
89 hidl_vec<hidl_handle>* handles) {
90 createCacheHandles(fileGroups, std::vector<AccessMode>(fileGroups.size(), mode), handles);
Xusong Wang96e68dc2019-01-18 17:28:26 -080091}
92
Xusong Wang7cc0ccc2019-04-23 14:28:17 -070093// Create a chain of broadcast operations. The second operand is always constant tensor [1].
94// For simplicity, activation scalar is shared. The second operand is not shared
95// in the model to let driver maintain a non-trivial size of constant data and the corresponding
96// data locations in cache.
97//
98// --------- activation --------
99// ↓ ↓ ↓ ↓
100// E.g. input -> ADD -> ADD -> ADD -> ... -> ADD -> output
101// ↑ ↑ ↑ ↑
102// [1] [1] [1] [1]
103//
104Model createLargeTestModel(OperationType op, uint32_t len) {
105 // Model operations and operands.
106 std::vector<Operation> operations(len);
107 std::vector<Operand> operands(len * 2 + 2);
108
109 // The constant buffer pool. This contains the activation scalar, followed by the
110 // per-operation constant operands.
111 std::vector<uint8_t> operandValues(sizeof(int32_t) + len * sizeof(float));
112
113 // The activation scalar, value = 0.
114 operands[0] = {
115 .type = OperandType::INT32,
116 .dimensions = {},
117 .numberOfConsumers = len,
118 .scale = 0.0f,
119 .zeroPoint = 0,
120 .lifetime = OperandLifeTime::CONSTANT_COPY,
121 .location = {.poolIndex = 0, .offset = 0, .length = sizeof(int32_t)},
122 };
123 memset(operandValues.data(), 0, sizeof(int32_t));
124
125 const float floatBufferValue = 1.0f;
126 for (uint32_t i = 0; i < len; i++) {
127 const uint32_t firstInputIndex = i * 2 + 1;
128 const uint32_t secondInputIndex = firstInputIndex + 1;
129 const uint32_t outputIndex = secondInputIndex + 1;
130
131 // The first operation input.
132 operands[firstInputIndex] = {
133 .type = OperandType::TENSOR_FLOAT32,
134 .dimensions = {1},
135 .numberOfConsumers = 1,
136 .scale = 0.0f,
137 .zeroPoint = 0,
138 .lifetime = (i == 0 ? OperandLifeTime::MODEL_INPUT
139 : OperandLifeTime::TEMPORARY_VARIABLE),
140 .location = {},
141 };
142
143 // The second operation input, value = 1.
144 operands[secondInputIndex] = {
145 .type = OperandType::TENSOR_FLOAT32,
146 .dimensions = {1},
147 .numberOfConsumers = 1,
148 .scale = 0.0f,
149 .zeroPoint = 0,
150 .lifetime = OperandLifeTime::CONSTANT_COPY,
151 .location = {.poolIndex = 0,
152 .offset = static_cast<uint32_t>(i * sizeof(float) + sizeof(int32_t)),
153 .length = sizeof(float)},
154 };
155 memcpy(operandValues.data() + sizeof(int32_t) + i * sizeof(float), &floatBufferValue,
156 sizeof(float));
157
158 // The operation. All operations share the same activation scalar.
159 // The output operand is created as an input in the next iteration of the loop, in the case
160 // of all but the last member of the chain; and after the loop as a model output, in the
161 // case of the last member of the chain.
162 operations[i] = {
163 .type = op,
164 .inputs = {firstInputIndex, secondInputIndex, /*activation scalar*/ 0},
165 .outputs = {outputIndex},
166 };
167 }
168
169 // The model output.
170 operands.back() = {
171 .type = OperandType::TENSOR_FLOAT32,
172 .dimensions = {1},
173 .numberOfConsumers = 0,
174 .scale = 0.0f,
175 .zeroPoint = 0,
176 .lifetime = OperandLifeTime::MODEL_OUTPUT,
177 .location = {},
178 };
179
180 const std::vector<uint32_t> inputIndexes = {1};
181 const std::vector<uint32_t> outputIndexes = {len * 2 + 1};
182 const std::vector<hidl_memory> pools = {};
183
184 return {
185 .operands = operands,
186 .operations = operations,
187 .inputIndexes = inputIndexes,
188 .outputIndexes = outputIndexes,
189 .operandValues = operandValues,
190 .pools = pools,
191 };
192}
193
194// MixedTypedExample is defined in frameworks/ml/nn/tools/test_generator/include/TestHarness.h.
195// This function assumes the operation is always ADD.
196std::vector<MixedTypedExample> getLargeModelExamples(uint32_t len) {
197 float outputValue = 1.0f + static_cast<float>(len);
198 return {{.operands = {
199 // Input
200 {.operandDimensions = {{0, {1}}}, .float32Operands = {{0, {1.0f}}}},
201 // Output
202 {.operandDimensions = {{0, {1}}}, .float32Operands = {{0, {outputValue}}}}}}};
203};
204
Xusong Wang96e68dc2019-01-18 17:28:26 -0800205} // namespace
206
207// Tag for the compilation caching tests.
208class CompilationCachingTest : public NeuralnetworksHidlTest {
209 protected:
210 void SetUp() override {
211 NeuralnetworksHidlTest::SetUp();
Hervé Guihotac7ac522019-02-12 16:22:44 -0800212 ASSERT_NE(device.get(), nullptr);
Xusong Wang96e68dc2019-01-18 17:28:26 -0800213
Xusong Wanged0822b2019-02-25 16:58:58 -0800214 // Create cache directory. The cache directory and a temporary cache file is always created
215 // to test the behavior of prepareModelFromCache, even when caching is not supported.
Xusong Wang96e68dc2019-01-18 17:28:26 -0800216 char cacheDirTemp[] = "/data/local/tmp/TestCompilationCachingXXXXXX";
217 char* cacheDir = mkdtemp(cacheDirTemp);
218 ASSERT_NE(cacheDir, nullptr);
Xusong Wang6824cc12019-02-12 18:00:37 -0800219 mCacheDir = cacheDir;
Xusong Wanged0822b2019-02-25 16:58:58 -0800220 mCacheDir.push_back('/');
Xusong Wang6824cc12019-02-12 18:00:37 -0800221
Xusong Wanged0822b2019-02-25 16:58:58 -0800222 Return<void> ret = device->getNumberOfCacheFilesNeeded(
223 [this](ErrorStatus status, uint32_t numModelCache, uint32_t numDataCache) {
Xusong Wang96e68dc2019-01-18 17:28:26 -0800224 EXPECT_EQ(ErrorStatus::NONE, status);
Xusong Wanged0822b2019-02-25 16:58:58 -0800225 mNumModelCache = numModelCache;
226 mNumDataCache = numDataCache;
Xusong Wang96e68dc2019-01-18 17:28:26 -0800227 });
228 EXPECT_TRUE(ret.isOk());
Xusong Wanged0822b2019-02-25 16:58:58 -0800229 mIsCachingSupported = mNumModelCache > 0 || mNumDataCache > 0;
230
231 // Create empty cache files.
232 mTmpCache = mCacheDir + "tmp";
233 for (uint32_t i = 0; i < mNumModelCache; i++) {
234 mModelCache.push_back({mCacheDir + "model" + std::to_string(i)});
235 }
236 for (uint32_t i = 0; i < mNumDataCache; i++) {
237 mDataCache.push_back({mCacheDir + "data" + std::to_string(i)});
238 }
239 // Dummy handles, use AccessMode::WRITE_ONLY for createCacheHandles to create files.
240 hidl_vec<hidl_handle> modelHandle, dataHandle, tmpHandle;
241 createCacheHandles(mModelCache, AccessMode::WRITE_ONLY, &modelHandle);
242 createCacheHandles(mDataCache, AccessMode::WRITE_ONLY, &dataHandle);
243 createCacheHandles({{mTmpCache}}, AccessMode::WRITE_ONLY, &tmpHandle);
244
245 if (!mIsCachingSupported) {
Xusong Wang96e68dc2019-01-18 17:28:26 -0800246 LOG(INFO) << "NN VTS: Early termination of test because vendor service does not "
247 "support compilation caching.";
248 std::cout << "[ ] Early termination of test because vendor service does not "
249 "support compilation caching."
250 << std::endl;
Xusong Wang96e68dc2019-01-18 17:28:26 -0800251 }
Xusong Wang6824cc12019-02-12 18:00:37 -0800252 }
Xusong Wang96e68dc2019-01-18 17:28:26 -0800253
Xusong Wang6824cc12019-02-12 18:00:37 -0800254 void TearDown() override {
Xusong Wang7cc0ccc2019-04-23 14:28:17 -0700255 // If the test passes, remove the tmp directory. Otherwise, keep it for debugging purposes.
256 if (!::testing::Test::HasFailure()) {
257 // Recursively remove the cache directory specified by mCacheDir.
258 auto callback = [](const char* entry, const struct stat*, int, struct FTW*) {
259 return remove(entry);
260 };
261 nftw(mCacheDir.c_str(), callback, 128, FTW_DEPTH | FTW_MOUNT | FTW_PHYS);
Xusong Wang6824cc12019-02-12 18:00:37 -0800262 }
263 NeuralnetworksHidlTest::TearDown();
Xusong Wang96e68dc2019-01-18 17:28:26 -0800264 }
265
Xusong Wanged0822b2019-02-25 16:58:58 -0800266 void saveModelToCache(const V1_2::Model& model, const hidl_vec<hidl_handle>& modelCache,
267 const hidl_vec<hidl_handle>& dataCache, bool* supported,
268 sp<IPreparedModel>* preparedModel = nullptr) {
269 if (preparedModel != nullptr) *preparedModel = nullptr;
270
271 // See if service can handle model.
272 bool fullySupportsModel = false;
273 Return<void> supportedCall = device->getSupportedOperations_1_2(
274 model,
275 [&fullySupportsModel, &model](ErrorStatus status, const hidl_vec<bool>& supported) {
276 ASSERT_EQ(ErrorStatus::NONE, status);
277 ASSERT_EQ(supported.size(), model.operations.size());
278 fullySupportsModel = std::all_of(supported.begin(), supported.end(),
279 [](bool valid) { return valid; });
280 });
281 ASSERT_TRUE(supportedCall.isOk());
282 *supported = fullySupportsModel;
283 if (!fullySupportsModel) return;
284
285 // Launch prepare model.
286 sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
287 ASSERT_NE(nullptr, preparedModelCallback.get());
Xusong Wang96e68dc2019-01-18 17:28:26 -0800288 hidl_array<uint8_t, sizeof(mToken)> cacheToken(mToken);
Xusong Wanged0822b2019-02-25 16:58:58 -0800289 Return<ErrorStatus> prepareLaunchStatus =
290 device->prepareModel_1_2(model, ExecutionPreference::FAST_SINGLE_ANSWER, modelCache,
291 dataCache, cacheToken, preparedModelCallback);
292 ASSERT_TRUE(prepareLaunchStatus.isOk());
293 ASSERT_EQ(static_cast<ErrorStatus>(prepareLaunchStatus), ErrorStatus::NONE);
294
295 // Retrieve prepared model.
296 preparedModelCallback->wait();
297 ASSERT_EQ(preparedModelCallback->getStatus(), ErrorStatus::NONE);
298 if (preparedModel != nullptr) {
299 *preparedModel =
300 V1_2::IPreparedModel::castFrom(preparedModelCallback->getPreparedModel())
301 .withDefault(nullptr);
302 }
Xusong Wang96e68dc2019-01-18 17:28:26 -0800303 }
304
305 bool checkEarlyTermination(ErrorStatus status) {
306 if (status == ErrorStatus::GENERAL_FAILURE) {
307 LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
308 "save the prepared model that it does not support.";
309 std::cout << "[ ] Early termination of test because vendor service cannot "
310 "save the prepared model that it does not support."
311 << std::endl;
312 return true;
313 }
314 return false;
315 }
316
Xusong Wanged0822b2019-02-25 16:58:58 -0800317 bool checkEarlyTermination(bool supported) {
318 if (!supported) {
319 LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
320 "prepare model that it does not support.";
321 std::cout << "[ ] Early termination of test because vendor service cannot "
322 "prepare model that it does not support."
323 << std::endl;
324 return true;
325 }
326 return false;
327 }
328
329 void prepareModelFromCache(const hidl_vec<hidl_handle>& modelCache,
330 const hidl_vec<hidl_handle>& dataCache,
Xusong Wang96e68dc2019-01-18 17:28:26 -0800331 sp<IPreparedModel>* preparedModel, ErrorStatus* status) {
332 // Launch prepare model from cache.
333 sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
334 ASSERT_NE(nullptr, preparedModelCallback.get());
335 hidl_array<uint8_t, sizeof(mToken)> cacheToken(mToken);
Xusong Wanged0822b2019-02-25 16:58:58 -0800336 Return<ErrorStatus> prepareLaunchStatus = device->prepareModelFromCache(
337 modelCache, dataCache, cacheToken, preparedModelCallback);
Xusong Wang96e68dc2019-01-18 17:28:26 -0800338 ASSERT_TRUE(prepareLaunchStatus.isOk());
339 if (static_cast<ErrorStatus>(prepareLaunchStatus) != ErrorStatus::NONE) {
340 *preparedModel = nullptr;
341 *status = static_cast<ErrorStatus>(prepareLaunchStatus);
342 return;
343 }
344
345 // Retrieve prepared model.
346 preparedModelCallback->wait();
347 *status = preparedModelCallback->getStatus();
348 *preparedModel = V1_2::IPreparedModel::castFrom(preparedModelCallback->getPreparedModel())
349 .withDefault(nullptr);
350 }
351
Xusong Wanged0822b2019-02-25 16:58:58 -0800352 // Absolute path to the temporary cache directory.
Xusong Wang6824cc12019-02-12 18:00:37 -0800353 std::string mCacheDir;
Xusong Wanged0822b2019-02-25 16:58:58 -0800354
355 // Groups of file paths for model and data cache in the tmp cache directory, initialized with
356 // outer_size = mNum{Model|Data}Cache, inner_size = 1. The outer vector corresponds to handles
357 // and the inner vector is for fds held by each handle.
358 std::vector<std::vector<std::string>> mModelCache;
359 std::vector<std::vector<std::string>> mDataCache;
360
361 // A separate temporary file path in the tmp cache directory.
362 std::string mTmpCache;
363
Xusong Wang96e68dc2019-01-18 17:28:26 -0800364 uint8_t mToken[static_cast<uint32_t>(Constant::BYTE_SIZE_OF_CACHE_TOKEN)] = {};
Xusong Wanged0822b2019-02-25 16:58:58 -0800365 uint32_t mNumModelCache;
366 uint32_t mNumDataCache;
367 uint32_t mIsCachingSupported;
Xusong Wang96e68dc2019-01-18 17:28:26 -0800368};
369
370TEST_F(CompilationCachingTest, CacheSavingAndRetrieval) {
371 // Create test HIDL model and compile.
372 Model testModel = createTestModel();
373 sp<IPreparedModel> preparedModel = nullptr;
Xusong Wang96e68dc2019-01-18 17:28:26 -0800374
375 // Save the compilation to cache.
376 {
Xusong Wanged0822b2019-02-25 16:58:58 -0800377 bool supported;
378 hidl_vec<hidl_handle> modelCache, dataCache;
379 createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
380 createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
381 saveModelToCache(testModel, modelCache, dataCache, &supported);
382 if (checkEarlyTermination(supported)) return;
Xusong Wang96e68dc2019-01-18 17:28:26 -0800383 }
384
385 // Retrieve preparedModel from cache.
386 {
387 preparedModel = nullptr;
388 ErrorStatus status;
Xusong Wanged0822b2019-02-25 16:58:58 -0800389 hidl_vec<hidl_handle> modelCache, dataCache;
390 createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
391 createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
392 prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
Xusong Wang96e68dc2019-01-18 17:28:26 -0800393 if (!mIsCachingSupported) {
394 ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
395 ASSERT_EQ(preparedModel, nullptr);
396 return;
Xusong Wanged0822b2019-02-25 16:58:58 -0800397 } else if (checkEarlyTermination(status)) {
398 ASSERT_EQ(preparedModel, nullptr);
399 return;
Xusong Wang96e68dc2019-01-18 17:28:26 -0800400 } else {
401 ASSERT_EQ(status, ErrorStatus::NONE);
402 ASSERT_NE(preparedModel, nullptr);
403 }
404 }
405
406 // Execute and verify results.
407 generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; }, get_examples(),
408 testModel.relaxComputationFloat32toFloat16,
409 /*testDynamicOutputShape=*/false);
410}
411
412TEST_F(CompilationCachingTest, CacheSavingAndRetrievalNonZeroOffset) {
413 // Create test HIDL model and compile.
414 Model testModel = createTestModel();
415 sp<IPreparedModel> preparedModel = nullptr;
Xusong Wang96e68dc2019-01-18 17:28:26 -0800416
417 // Save the compilation to cache.
418 {
Xusong Wanged0822b2019-02-25 16:58:58 -0800419 bool supported;
420 hidl_vec<hidl_handle> modelCache, dataCache;
421 createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
422 createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
423 uint8_t dummyBytes[] = {0, 0};
424 // Write a dummy integer to the cache.
425 // The driver should be able to handle non-empty cache and non-zero fd offset.
426 for (uint32_t i = 0; i < modelCache.size(); i++) {
427 ASSERT_EQ(write(modelCache[i].getNativeHandle()->data[0], &dummyBytes,
428 sizeof(dummyBytes)),
429 sizeof(dummyBytes));
Xusong Wang96e68dc2019-01-18 17:28:26 -0800430 }
Xusong Wanged0822b2019-02-25 16:58:58 -0800431 for (uint32_t i = 0; i < dataCache.size(); i++) {
432 ASSERT_EQ(
433 write(dataCache[i].getNativeHandle()->data[0], &dummyBytes, sizeof(dummyBytes)),
434 sizeof(dummyBytes));
435 }
436 saveModelToCache(testModel, modelCache, dataCache, &supported);
437 if (checkEarlyTermination(supported)) return;
Xusong Wang96e68dc2019-01-18 17:28:26 -0800438 }
439
440 // Retrieve preparedModel from cache.
441 {
442 preparedModel = nullptr;
443 ErrorStatus status;
Xusong Wanged0822b2019-02-25 16:58:58 -0800444 hidl_vec<hidl_handle> modelCache, dataCache;
445 createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
446 createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
Xusong Wang96e68dc2019-01-18 17:28:26 -0800447 uint8_t dummyByte = 0;
Xusong Wanged0822b2019-02-25 16:58:58 -0800448 // Advance the offset of each handle by one byte.
449 // The driver should be able to handle non-zero fd offset.
450 for (uint32_t i = 0; i < modelCache.size(); i++) {
451 ASSERT_GE(read(modelCache[i].getNativeHandle()->data[0], &dummyByte, 1), 0);
452 }
453 for (uint32_t i = 0; i < dataCache.size(); i++) {
454 ASSERT_GE(read(dataCache[i].getNativeHandle()->data[0], &dummyByte, 1), 0);
455 }
456 prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
Xusong Wang96e68dc2019-01-18 17:28:26 -0800457 if (!mIsCachingSupported) {
458 ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
459 ASSERT_EQ(preparedModel, nullptr);
460 return;
Xusong Wanged0822b2019-02-25 16:58:58 -0800461 } else if (checkEarlyTermination(status)) {
462 ASSERT_EQ(preparedModel, nullptr);
463 return;
Xusong Wang96e68dc2019-01-18 17:28:26 -0800464 } else {
465 ASSERT_EQ(status, ErrorStatus::NONE);
466 ASSERT_NE(preparedModel, nullptr);
467 }
468 }
469
470 // Execute and verify results.
471 generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; }, get_examples(),
472 testModel.relaxComputationFloat32toFloat16,
473 /*testDynamicOutputShape=*/false);
474}
475
Xusong Wanged0822b2019-02-25 16:58:58 -0800476TEST_F(CompilationCachingTest, SaveToCacheInvalidNumCache) {
477 // Create test HIDL model and compile.
478 Model testModel = createTestModel();
479
480 // Test with number of model cache files greater than mNumModelCache.
481 {
482 bool supported;
483 hidl_vec<hidl_handle> modelCache, dataCache;
484 // Pass an additional cache file for model cache.
485 mModelCache.push_back({mTmpCache});
486 createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
487 createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
488 mModelCache.pop_back();
489 sp<IPreparedModel> preparedModel = nullptr;
490 saveModelToCache(testModel, modelCache, dataCache, &supported, &preparedModel);
491 if (checkEarlyTermination(supported)) return;
492 ASSERT_NE(preparedModel, nullptr);
493 // Execute and verify results.
494 generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; },
495 get_examples(),
496 testModel.relaxComputationFloat32toFloat16,
497 /*testDynamicOutputShape=*/false);
498 // Check if prepareModelFromCache fails.
499 preparedModel = nullptr;
500 ErrorStatus status;
501 prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
502 if (status != ErrorStatus::INVALID_ARGUMENT) {
503 ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
504 }
505 ASSERT_EQ(preparedModel, nullptr);
506 }
507
508 // Test with number of model cache files smaller than mNumModelCache.
509 if (mModelCache.size() > 0) {
510 bool supported;
511 hidl_vec<hidl_handle> modelCache, dataCache;
512 // Pop out the last cache file.
513 auto tmp = mModelCache.back();
514 mModelCache.pop_back();
515 createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
516 createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
517 mModelCache.push_back(tmp);
518 sp<IPreparedModel> preparedModel = nullptr;
519 saveModelToCache(testModel, modelCache, dataCache, &supported, &preparedModel);
520 if (checkEarlyTermination(supported)) return;
521 ASSERT_NE(preparedModel, nullptr);
522 // Execute and verify results.
523 generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; },
524 get_examples(),
525 testModel.relaxComputationFloat32toFloat16,
526 /*testDynamicOutputShape=*/false);
527 // Check if prepareModelFromCache fails.
528 preparedModel = nullptr;
529 ErrorStatus status;
530 prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
531 if (status != ErrorStatus::INVALID_ARGUMENT) {
532 ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
533 }
534 ASSERT_EQ(preparedModel, nullptr);
535 }
536
537 // Test with number of data cache files greater than mNumDataCache.
538 {
539 bool supported;
540 hidl_vec<hidl_handle> modelCache, dataCache;
541 // Pass an additional cache file for data cache.
542 mDataCache.push_back({mTmpCache});
543 createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
544 createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
545 mDataCache.pop_back();
546 sp<IPreparedModel> preparedModel = nullptr;
547 saveModelToCache(testModel, modelCache, dataCache, &supported, &preparedModel);
548 if (checkEarlyTermination(supported)) return;
549 ASSERT_NE(preparedModel, nullptr);
550 // Execute and verify results.
551 generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; },
552 get_examples(),
553 testModel.relaxComputationFloat32toFloat16,
554 /*testDynamicOutputShape=*/false);
555 // Check if prepareModelFromCache fails.
556 preparedModel = nullptr;
557 ErrorStatus status;
558 prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
559 if (status != ErrorStatus::INVALID_ARGUMENT) {
560 ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
561 }
562 ASSERT_EQ(preparedModel, nullptr);
563 }
564
565 // Test with number of data cache files smaller than mNumDataCache.
566 if (mDataCache.size() > 0) {
567 bool supported;
568 hidl_vec<hidl_handle> modelCache, dataCache;
569 // Pop out the last cache file.
570 auto tmp = mDataCache.back();
571 mDataCache.pop_back();
572 createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
573 createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
574 mDataCache.push_back(tmp);
575 sp<IPreparedModel> preparedModel = nullptr;
576 saveModelToCache(testModel, modelCache, dataCache, &supported, &preparedModel);
577 if (checkEarlyTermination(supported)) return;
578 ASSERT_NE(preparedModel, nullptr);
579 // Execute and verify results.
580 generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; },
581 get_examples(),
582 testModel.relaxComputationFloat32toFloat16,
583 /*testDynamicOutputShape=*/false);
584 // Check if prepareModelFromCache fails.
585 preparedModel = nullptr;
586 ErrorStatus status;
587 prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
588 if (status != ErrorStatus::INVALID_ARGUMENT) {
589 ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
590 }
591 ASSERT_EQ(preparedModel, nullptr);
592 }
593}
594
595TEST_F(CompilationCachingTest, PrepareModelFromCacheInvalidNumCache) {
596 // Create test HIDL model and compile.
597 Model testModel = createTestModel();
598
599 // Save the compilation to cache.
600 {
601 bool supported;
602 hidl_vec<hidl_handle> modelCache, dataCache;
603 createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
604 createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
605 saveModelToCache(testModel, modelCache, dataCache, &supported);
606 if (checkEarlyTermination(supported)) return;
607 }
608
609 // Test with number of model cache files greater than mNumModelCache.
610 {
611 sp<IPreparedModel> preparedModel = nullptr;
612 ErrorStatus status;
613 hidl_vec<hidl_handle> modelCache, dataCache;
614 mModelCache.push_back({mTmpCache});
615 createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
616 createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
617 mModelCache.pop_back();
618 prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
619 if (status != ErrorStatus::GENERAL_FAILURE) {
620 ASSERT_EQ(status, ErrorStatus::INVALID_ARGUMENT);
621 }
622 ASSERT_EQ(preparedModel, nullptr);
623 }
624
625 // Test with number of model cache files smaller than mNumModelCache.
626 if (mModelCache.size() > 0) {
627 sp<IPreparedModel> preparedModel = nullptr;
628 ErrorStatus status;
629 hidl_vec<hidl_handle> modelCache, dataCache;
630 auto tmp = mModelCache.back();
631 mModelCache.pop_back();
632 createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
633 createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
634 mModelCache.push_back(tmp);
635 prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
636 if (status != ErrorStatus::GENERAL_FAILURE) {
637 ASSERT_EQ(status, ErrorStatus::INVALID_ARGUMENT);
638 }
639 ASSERT_EQ(preparedModel, nullptr);
640 }
641
642 // Test with number of data cache files greater than mNumDataCache.
643 {
644 sp<IPreparedModel> preparedModel = nullptr;
645 ErrorStatus status;
646 hidl_vec<hidl_handle> modelCache, dataCache;
647 mDataCache.push_back({mTmpCache});
648 createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
649 createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
650 mDataCache.pop_back();
651 prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
652 if (status != ErrorStatus::GENERAL_FAILURE) {
653 ASSERT_EQ(status, ErrorStatus::INVALID_ARGUMENT);
654 }
655 ASSERT_EQ(preparedModel, nullptr);
656 }
657
658 // Test with number of data cache files smaller than mNumDataCache.
659 if (mDataCache.size() > 0) {
660 sp<IPreparedModel> preparedModel = nullptr;
661 ErrorStatus status;
662 hidl_vec<hidl_handle> modelCache, dataCache;
663 auto tmp = mDataCache.back();
664 mDataCache.pop_back();
665 createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
666 createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
667 mDataCache.push_back(tmp);
668 prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
669 if (status != ErrorStatus::GENERAL_FAILURE) {
670 ASSERT_EQ(status, ErrorStatus::INVALID_ARGUMENT);
671 }
672 ASSERT_EQ(preparedModel, nullptr);
673 }
674}
675
Xusong Wang96e68dc2019-01-18 17:28:26 -0800676TEST_F(CompilationCachingTest, SaveToCacheInvalidNumFd) {
677 // Create test HIDL model and compile.
678 Model testModel = createTestModel();
Xusong Wang96e68dc2019-01-18 17:28:26 -0800679
Xusong Wanged0822b2019-02-25 16:58:58 -0800680 // Go through each handle in model cache, test with NumFd greater than 1.
681 for (uint32_t i = 0; i < mNumModelCache; i++) {
682 bool supported;
683 hidl_vec<hidl_handle> modelCache, dataCache;
684 // Pass an invalid number of fds for handle i.
685 mModelCache[i].push_back(mTmpCache);
686 createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
687 createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
688 mModelCache[i].pop_back();
689 sp<IPreparedModel> preparedModel = nullptr;
690 saveModelToCache(testModel, modelCache, dataCache, &supported, &preparedModel);
691 if (checkEarlyTermination(supported)) return;
692 ASSERT_NE(preparedModel, nullptr);
693 // Execute and verify results.
694 generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; },
695 get_examples(),
696 testModel.relaxComputationFloat32toFloat16,
697 /*testDynamicOutputShape=*/false);
698 // Check if prepareModelFromCache fails.
699 preparedModel = nullptr;
Xusong Wang96e68dc2019-01-18 17:28:26 -0800700 ErrorStatus status;
Xusong Wanged0822b2019-02-25 16:58:58 -0800701 prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
702 if (status != ErrorStatus::INVALID_ARGUMENT) {
703 ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
Xusong Wang96e68dc2019-01-18 17:28:26 -0800704 }
Xusong Wanged0822b2019-02-25 16:58:58 -0800705 ASSERT_EQ(preparedModel, nullptr);
Xusong Wang96e68dc2019-01-18 17:28:26 -0800706 }
707
Xusong Wanged0822b2019-02-25 16:58:58 -0800708 // Go through each handle in model cache, test with NumFd equal to 0.
709 for (uint32_t i = 0; i < mNumModelCache; i++) {
710 bool supported;
711 hidl_vec<hidl_handle> modelCache, dataCache;
712 // Pass an invalid number of fds for handle i.
713 auto tmp = mModelCache[i].back();
714 mModelCache[i].pop_back();
715 createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
716 createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
717 mModelCache[i].push_back(tmp);
718 sp<IPreparedModel> preparedModel = nullptr;
719 saveModelToCache(testModel, modelCache, dataCache, &supported, &preparedModel);
720 if (checkEarlyTermination(supported)) return;
721 ASSERT_NE(preparedModel, nullptr);
722 // Execute and verify results.
723 generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; },
724 get_examples(),
725 testModel.relaxComputationFloat32toFloat16,
726 /*testDynamicOutputShape=*/false);
727 // Check if prepareModelFromCache fails.
728 preparedModel = nullptr;
Xusong Wang96e68dc2019-01-18 17:28:26 -0800729 ErrorStatus status;
Xusong Wanged0822b2019-02-25 16:58:58 -0800730 prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
731 if (status != ErrorStatus::INVALID_ARGUMENT) {
732 ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
Xusong Wang96e68dc2019-01-18 17:28:26 -0800733 }
Xusong Wanged0822b2019-02-25 16:58:58 -0800734 ASSERT_EQ(preparedModel, nullptr);
735 }
736
737 // Go through each handle in data cache, test with NumFd greater than 1.
738 for (uint32_t i = 0; i < mNumDataCache; i++) {
739 bool supported;
740 hidl_vec<hidl_handle> modelCache, dataCache;
741 // Pass an invalid number of fds for handle i.
742 mDataCache[i].push_back(mTmpCache);
743 createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
744 createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
745 mDataCache[i].pop_back();
746 sp<IPreparedModel> preparedModel = nullptr;
747 saveModelToCache(testModel, modelCache, dataCache, &supported, &preparedModel);
748 if (checkEarlyTermination(supported)) return;
749 ASSERT_NE(preparedModel, nullptr);
750 // Execute and verify results.
751 generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; },
752 get_examples(),
753 testModel.relaxComputationFloat32toFloat16,
754 /*testDynamicOutputShape=*/false);
755 // Check if prepareModelFromCache fails.
756 preparedModel = nullptr;
757 ErrorStatus status;
758 prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
759 if (status != ErrorStatus::INVALID_ARGUMENT) {
760 ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
761 }
762 ASSERT_EQ(preparedModel, nullptr);
763 }
764
765 // Go through each handle in data cache, test with NumFd equal to 0.
766 for (uint32_t i = 0; i < mNumDataCache; i++) {
767 bool supported;
768 hidl_vec<hidl_handle> modelCache, dataCache;
769 // Pass an invalid number of fds for handle i.
770 auto tmp = mDataCache[i].back();
771 mDataCache[i].pop_back();
772 createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
773 createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
774 mDataCache[i].push_back(tmp);
775 sp<IPreparedModel> preparedModel = nullptr;
776 saveModelToCache(testModel, modelCache, dataCache, &supported, &preparedModel);
777 if (checkEarlyTermination(supported)) return;
778 ASSERT_NE(preparedModel, nullptr);
779 // Execute and verify results.
780 generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; },
781 get_examples(),
782 testModel.relaxComputationFloat32toFloat16,
783 /*testDynamicOutputShape=*/false);
784 // Check if prepareModelFromCache fails.
785 preparedModel = nullptr;
786 ErrorStatus status;
787 prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
788 if (status != ErrorStatus::INVALID_ARGUMENT) {
789 ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
790 }
791 ASSERT_EQ(preparedModel, nullptr);
Xusong Wang96e68dc2019-01-18 17:28:26 -0800792 }
793}
794
795TEST_F(CompilationCachingTest, PrepareModelFromCacheInvalidNumFd) {
796 // Create test HIDL model and compile.
797 Model testModel = createTestModel();
Xusong Wang96e68dc2019-01-18 17:28:26 -0800798
799 // Save the compilation to cache.
800 {
Xusong Wanged0822b2019-02-25 16:58:58 -0800801 bool supported;
802 hidl_vec<hidl_handle> modelCache, dataCache;
803 createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
804 createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
805 saveModelToCache(testModel, modelCache, dataCache, &supported);
806 if (checkEarlyTermination(supported)) return;
Xusong Wang96e68dc2019-01-18 17:28:26 -0800807 }
808
Xusong Wanged0822b2019-02-25 16:58:58 -0800809 // Go through each handle in model cache, test with NumFd greater than 1.
810 for (uint32_t i = 0; i < mNumModelCache; i++) {
811 sp<IPreparedModel> preparedModel = nullptr;
Xusong Wang96e68dc2019-01-18 17:28:26 -0800812 ErrorStatus status;
Xusong Wanged0822b2019-02-25 16:58:58 -0800813 hidl_vec<hidl_handle> modelCache, dataCache;
814 mModelCache[i].push_back(mTmpCache);
815 createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
816 createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
817 mModelCache[i].pop_back();
818 prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
Xusong Wang96e68dc2019-01-18 17:28:26 -0800819 if (status != ErrorStatus::GENERAL_FAILURE) {
820 ASSERT_EQ(status, ErrorStatus::INVALID_ARGUMENT);
Xusong Wang96e68dc2019-01-18 17:28:26 -0800821 }
Xusong Wanged0822b2019-02-25 16:58:58 -0800822 ASSERT_EQ(preparedModel, nullptr);
Xusong Wang96e68dc2019-01-18 17:28:26 -0800823 }
824
Xusong Wanged0822b2019-02-25 16:58:58 -0800825 // Go through each handle in model cache, test with NumFd equal to 0.
826 for (uint32_t i = 0; i < mNumModelCache; i++) {
827 sp<IPreparedModel> preparedModel = nullptr;
Xusong Wang96e68dc2019-01-18 17:28:26 -0800828 ErrorStatus status;
Xusong Wanged0822b2019-02-25 16:58:58 -0800829 hidl_vec<hidl_handle> modelCache, dataCache;
830 auto tmp = mModelCache[i].back();
831 mModelCache[i].pop_back();
832 createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
833 createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
834 mModelCache[i].push_back(tmp);
835 prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
Xusong Wang96e68dc2019-01-18 17:28:26 -0800836 if (status != ErrorStatus::GENERAL_FAILURE) {
837 ASSERT_EQ(status, ErrorStatus::INVALID_ARGUMENT);
Xusong Wang96e68dc2019-01-18 17:28:26 -0800838 }
Xusong Wanged0822b2019-02-25 16:58:58 -0800839 ASSERT_EQ(preparedModel, nullptr);
840 }
841
842 // Go through each handle in data cache, test with NumFd greater than 1.
843 for (uint32_t i = 0; i < mNumDataCache; i++) {
844 sp<IPreparedModel> preparedModel = nullptr;
845 ErrorStatus status;
846 hidl_vec<hidl_handle> modelCache, dataCache;
847 mDataCache[i].push_back(mTmpCache);
848 createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
849 createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
850 mDataCache[i].pop_back();
851 prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
852 if (status != ErrorStatus::GENERAL_FAILURE) {
853 ASSERT_EQ(status, ErrorStatus::INVALID_ARGUMENT);
854 }
855 ASSERT_EQ(preparedModel, nullptr);
856 }
857
858 // Go through each handle in data cache, test with NumFd equal to 0.
859 for (uint32_t i = 0; i < mNumDataCache; i++) {
860 sp<IPreparedModel> preparedModel = nullptr;
861 ErrorStatus status;
862 hidl_vec<hidl_handle> modelCache, dataCache;
863 auto tmp = mDataCache[i].back();
864 mDataCache[i].pop_back();
865 createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
866 createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
867 mDataCache[i].push_back(tmp);
868 prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
869 if (status != ErrorStatus::GENERAL_FAILURE) {
870 ASSERT_EQ(status, ErrorStatus::INVALID_ARGUMENT);
871 }
872 ASSERT_EQ(preparedModel, nullptr);
Xusong Wang96e68dc2019-01-18 17:28:26 -0800873 }
874}
875
876TEST_F(CompilationCachingTest, SaveToCacheInvalidAccessMode) {
877 // Create test HIDL model and compile.
878 Model testModel = createTestModel();
Xusong Wanged0822b2019-02-25 16:58:58 -0800879 std::vector<AccessMode> modelCacheMode(mNumModelCache, AccessMode::READ_WRITE);
880 std::vector<AccessMode> dataCacheMode(mNumDataCache, AccessMode::READ_WRITE);
Xusong Wang96e68dc2019-01-18 17:28:26 -0800881
Xusong Wanged0822b2019-02-25 16:58:58 -0800882 // Go through each handle in model cache, test with invalid access mode.
883 for (uint32_t i = 0; i < mNumModelCache; i++) {
884 bool supported;
885 hidl_vec<hidl_handle> modelCache, dataCache;
886 modelCacheMode[i] = AccessMode::READ_ONLY;
887 createCacheHandles(mModelCache, modelCacheMode, &modelCache);
888 createCacheHandles(mDataCache, dataCacheMode, &dataCache);
889 modelCacheMode[i] = AccessMode::READ_WRITE;
890 sp<IPreparedModel> preparedModel = nullptr;
891 saveModelToCache(testModel, modelCache, dataCache, &supported, &preparedModel);
892 if (checkEarlyTermination(supported)) return;
893 ASSERT_NE(preparedModel, nullptr);
894 // Execute and verify results.
895 generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; },
896 get_examples(),
897 testModel.relaxComputationFloat32toFloat16,
898 /*testDynamicOutputShape=*/false);
899 // Check if prepareModelFromCache fails.
900 preparedModel = nullptr;
Xusong Wang96e68dc2019-01-18 17:28:26 -0800901 ErrorStatus status;
Xusong Wanged0822b2019-02-25 16:58:58 -0800902 prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
903 if (status != ErrorStatus::INVALID_ARGUMENT) {
904 ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
905 }
906 ASSERT_EQ(preparedModel, nullptr);
Xusong Wang96e68dc2019-01-18 17:28:26 -0800907 }
908
Xusong Wanged0822b2019-02-25 16:58:58 -0800909 // Go through each handle in data cache, test with invalid access mode.
910 for (uint32_t i = 0; i < mNumDataCache; i++) {
911 bool supported;
912 hidl_vec<hidl_handle> modelCache, dataCache;
913 dataCacheMode[i] = AccessMode::READ_ONLY;
914 createCacheHandles(mModelCache, modelCacheMode, &modelCache);
915 createCacheHandles(mDataCache, dataCacheMode, &dataCache);
916 dataCacheMode[i] = AccessMode::READ_WRITE;
917 sp<IPreparedModel> preparedModel = nullptr;
918 saveModelToCache(testModel, modelCache, dataCache, &supported, &preparedModel);
919 if (checkEarlyTermination(supported)) return;
920 ASSERT_NE(preparedModel, nullptr);
921 // Execute and verify results.
922 generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; },
923 get_examples(),
924 testModel.relaxComputationFloat32toFloat16,
925 /*testDynamicOutputShape=*/false);
926 // Check if prepareModelFromCache fails.
927 preparedModel = nullptr;
Xusong Wang96e68dc2019-01-18 17:28:26 -0800928 ErrorStatus status;
Xusong Wanged0822b2019-02-25 16:58:58 -0800929 prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
930 if (status != ErrorStatus::INVALID_ARGUMENT) {
931 ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
932 }
933 ASSERT_EQ(preparedModel, nullptr);
Xusong Wang96e68dc2019-01-18 17:28:26 -0800934 }
935}
936
937TEST_F(CompilationCachingTest, PrepareModelFromCacheInvalidAccessMode) {
938 // Create test HIDL model and compile.
939 Model testModel = createTestModel();
Xusong Wanged0822b2019-02-25 16:58:58 -0800940 std::vector<AccessMode> modelCacheMode(mNumModelCache, AccessMode::READ_WRITE);
941 std::vector<AccessMode> dataCacheMode(mNumDataCache, AccessMode::READ_WRITE);
Xusong Wang96e68dc2019-01-18 17:28:26 -0800942
943 // Save the compilation to cache.
944 {
Xusong Wanged0822b2019-02-25 16:58:58 -0800945 bool supported;
946 hidl_vec<hidl_handle> modelCache, dataCache;
947 createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
948 createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
949 saveModelToCache(testModel, modelCache, dataCache, &supported);
950 if (checkEarlyTermination(supported)) return;
Xusong Wang96e68dc2019-01-18 17:28:26 -0800951 }
952
Xusong Wanged0822b2019-02-25 16:58:58 -0800953 // Go through each handle in model cache, test with invalid access mode.
954 for (uint32_t i = 0; i < mNumModelCache; i++) {
955 sp<IPreparedModel> preparedModel = nullptr;
Xusong Wang96e68dc2019-01-18 17:28:26 -0800956 ErrorStatus status;
Xusong Wanged0822b2019-02-25 16:58:58 -0800957 hidl_vec<hidl_handle> modelCache, dataCache;
958 modelCacheMode[i] = AccessMode::WRITE_ONLY;
959 createCacheHandles(mModelCache, modelCacheMode, &modelCache);
960 createCacheHandles(mDataCache, dataCacheMode, &dataCache);
961 modelCacheMode[i] = AccessMode::READ_WRITE;
962 prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
Xusong Wang96e68dc2019-01-18 17:28:26 -0800963 ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
964 ASSERT_EQ(preparedModel, nullptr);
965 }
966
Xusong Wanged0822b2019-02-25 16:58:58 -0800967 // Go through each handle in data cache, test with invalid access mode.
968 for (uint32_t i = 0; i < mNumDataCache; i++) {
969 sp<IPreparedModel> preparedModel = nullptr;
Xusong Wang96e68dc2019-01-18 17:28:26 -0800970 ErrorStatus status;
Xusong Wanged0822b2019-02-25 16:58:58 -0800971 hidl_vec<hidl_handle> modelCache, dataCache;
972 dataCacheMode[i] = AccessMode::WRITE_ONLY;
973 createCacheHandles(mModelCache, modelCacheMode, &modelCache);
974 createCacheHandles(mDataCache, dataCacheMode, &dataCache);
975 dataCacheMode[i] = AccessMode::READ_WRITE;
976 prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
Xusong Wang96e68dc2019-01-18 17:28:26 -0800977 ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
978 ASSERT_EQ(preparedModel, nullptr);
979 }
980}
981
Xusong Wang7cc0ccc2019-04-23 14:28:17 -0700982// Copy file contents between file groups.
983// The outer vector corresponds to handles and the inner vector is for fds held by each handle.
984// The outer vector sizes must match and the inner vectors must have size = 1.
985static void copyCacheFiles(const std::vector<std::vector<std::string>>& from,
986 const std::vector<std::vector<std::string>>& to) {
987 constexpr size_t kBufferSize = 1000000;
988 uint8_t buffer[kBufferSize];
989
990 ASSERT_EQ(from.size(), to.size());
991 for (uint32_t i = 0; i < from.size(); i++) {
992 ASSERT_EQ(from[i].size(), 1u);
993 ASSERT_EQ(to[i].size(), 1u);
994 int fromFd = open(from[i][0].c_str(), O_RDONLY);
995 int toFd = open(to[i][0].c_str(), O_WRONLY | O_CREAT, S_IRUSR | S_IWUSR);
996 ASSERT_GE(fromFd, 0);
997 ASSERT_GE(toFd, 0);
998
999 ssize_t readBytes;
1000 while ((readBytes = read(fromFd, &buffer, kBufferSize)) > 0) {
1001 ASSERT_EQ(write(toFd, &buffer, readBytes), readBytes);
1002 }
1003 ASSERT_GE(readBytes, 0);
1004
1005 close(fromFd);
1006 close(toFd);
1007 }
1008}
1009
1010// Number of operations in the large test model.
1011constexpr uint32_t kLargeModelSize = 100;
1012constexpr uint32_t kNumIterationsTOCTOU = 100;
1013
1014TEST_F(CompilationCachingTest, SaveToCache_TOCTOU) {
1015 if (!mIsCachingSupported) return;
1016
1017 // Save the testModelMul compilation to cache.
1018 Model testModelMul = createLargeTestModel(OperationType::MUL, kLargeModelSize);
1019 auto modelCacheMul = mModelCache;
1020 for (auto& cache : modelCacheMul) {
1021 cache[0].append("_mul");
1022 }
1023 {
1024 hidl_vec<hidl_handle> modelCache, dataCache;
1025 createCacheHandles(modelCacheMul, AccessMode::READ_WRITE, &modelCache);
1026 createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
1027 bool supported;
1028 saveModelToCache(testModelMul, modelCache, dataCache, &supported);
1029 if (checkEarlyTermination(supported)) return;
1030 }
1031
1032 // Use a different token for testModelAdd.
1033 mToken[0]++;
1034
1035 // This test is probabilistic, so we run it multiple times.
1036 Model testModelAdd = createLargeTestModel(OperationType::ADD, kLargeModelSize);
1037 for (uint32_t i = 0; i < kNumIterationsTOCTOU; i++) {
1038 // Save the testModelAdd compilation to cache.
1039 {
1040 bool supported;
1041 hidl_vec<hidl_handle> modelCache, dataCache;
1042 createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
1043 createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
1044
1045 // Spawn a thread to copy the cache content concurrently while saving to cache.
1046 std::thread thread(copyCacheFiles, std::cref(modelCacheMul), std::cref(mModelCache));
1047 saveModelToCache(testModelAdd, modelCache, dataCache, &supported);
1048 thread.join();
1049 if (checkEarlyTermination(supported)) return;
1050 }
1051
1052 // Retrieve preparedModel from cache.
1053 {
1054 sp<IPreparedModel> preparedModel = nullptr;
1055 ErrorStatus status;
1056 hidl_vec<hidl_handle> modelCache, dataCache;
1057 createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
1058 createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
1059 prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
1060
1061 // The preparation may fail or succeed, but must not crash. If the preparation succeeds,
1062 // the prepared model must be executed with the correct result and not crash.
1063 if (status != ErrorStatus::NONE) {
1064 ASSERT_EQ(preparedModel, nullptr);
1065 } else {
1066 ASSERT_NE(preparedModel, nullptr);
1067 generated_tests::EvaluatePreparedModel(
1068 preparedModel, [](int) { return false; },
1069 getLargeModelExamples(kLargeModelSize),
1070 testModelAdd.relaxComputationFloat32toFloat16,
1071 /*testDynamicOutputShape=*/false);
1072 }
1073 }
1074 }
1075}
1076
1077TEST_F(CompilationCachingTest, PrepareFromCache_TOCTOU) {
1078 if (!mIsCachingSupported) return;
1079
1080 // Save the testModelMul compilation to cache.
1081 Model testModelMul = createLargeTestModel(OperationType::MUL, kLargeModelSize);
1082 auto modelCacheMul = mModelCache;
1083 for (auto& cache : modelCacheMul) {
1084 cache[0].append("_mul");
1085 }
1086 {
1087 hidl_vec<hidl_handle> modelCache, dataCache;
1088 createCacheHandles(modelCacheMul, AccessMode::READ_WRITE, &modelCache);
1089 createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
1090 bool supported;
1091 saveModelToCache(testModelMul, modelCache, dataCache, &supported);
1092 if (checkEarlyTermination(supported)) return;
1093 }
1094
1095 // Use a different token for testModelAdd.
1096 mToken[0]++;
1097
1098 // This test is probabilistic, so we run it multiple times.
1099 Model testModelAdd = createLargeTestModel(OperationType::ADD, kLargeModelSize);
1100 for (uint32_t i = 0; i < kNumIterationsTOCTOU; i++) {
1101 // Save the testModelAdd compilation to cache.
1102 {
1103 bool supported;
1104 hidl_vec<hidl_handle> modelCache, dataCache;
1105 createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
1106 createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
1107 saveModelToCache(testModelAdd, modelCache, dataCache, &supported);
1108 if (checkEarlyTermination(supported)) return;
1109 }
1110
1111 // Retrieve preparedModel from cache.
1112 {
1113 sp<IPreparedModel> preparedModel = nullptr;
1114 ErrorStatus status;
1115 hidl_vec<hidl_handle> modelCache, dataCache;
1116 createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
1117 createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
1118
1119 // Spawn a thread to copy the cache content concurrently while preparing from cache.
1120 std::thread thread(copyCacheFiles, std::cref(modelCacheMul), std::cref(mModelCache));
1121 prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
1122 thread.join();
1123
1124 // The preparation may fail or succeed, but must not crash. If the preparation succeeds,
1125 // the prepared model must be executed with the correct result and not crash.
1126 if (status != ErrorStatus::NONE) {
1127 ASSERT_EQ(preparedModel, nullptr);
1128 } else {
1129 ASSERT_NE(preparedModel, nullptr);
1130 generated_tests::EvaluatePreparedModel(
1131 preparedModel, [](int) { return false; },
1132 getLargeModelExamples(kLargeModelSize),
1133 testModelAdd.relaxComputationFloat32toFloat16,
1134 /*testDynamicOutputShape=*/false);
1135 }
1136 }
1137 }
1138}
1139
1140TEST_F(CompilationCachingTest, ReplaceSecuritySensitiveCache) {
1141 if (!mIsCachingSupported) return;
1142
1143 // Save the testModelMul compilation to cache.
1144 Model testModelMul = createLargeTestModel(OperationType::MUL, kLargeModelSize);
1145 auto modelCacheMul = mModelCache;
1146 for (auto& cache : modelCacheMul) {
1147 cache[0].append("_mul");
1148 }
1149 {
1150 hidl_vec<hidl_handle> modelCache, dataCache;
1151 createCacheHandles(modelCacheMul, AccessMode::READ_WRITE, &modelCache);
1152 createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
1153 bool supported;
1154 saveModelToCache(testModelMul, modelCache, dataCache, &supported);
1155 if (checkEarlyTermination(supported)) return;
1156 }
1157
1158 // Use a different token for testModelAdd.
1159 mToken[0]++;
1160
1161 // Save the testModelAdd compilation to cache.
1162 Model testModelAdd = createLargeTestModel(OperationType::ADD, kLargeModelSize);
1163 {
1164 bool supported;
1165 hidl_vec<hidl_handle> modelCache, dataCache;
1166 createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
1167 createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
1168 saveModelToCache(testModelAdd, modelCache, dataCache, &supported);
1169 if (checkEarlyTermination(supported)) return;
1170 }
1171
1172 // Replace the model cache of testModelAdd with testModelMul.
1173 copyCacheFiles(modelCacheMul, mModelCache);
1174
1175 // Retrieve the preparedModel from cache, expect failure.
1176 {
1177 sp<IPreparedModel> preparedModel = nullptr;
1178 ErrorStatus status;
1179 hidl_vec<hidl_handle> modelCache, dataCache;
1180 createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
1181 createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
1182 prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
1183 ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
1184 ASSERT_EQ(preparedModel, nullptr);
1185 }
1186}
1187
Xusong Wang96e68dc2019-01-18 17:28:26 -08001188class CompilationCachingSecurityTest : public CompilationCachingTest,
1189 public ::testing::WithParamInterface<uint32_t> {
1190 protected:
1191 void SetUp() {
1192 CompilationCachingTest::SetUp();
1193 generator.seed(kSeed);
1194 }
1195
1196 // Get a random integer within a closed range [lower, upper].
1197 template <typename T>
1198 T getRandomInt(T lower, T upper) {
1199 std::uniform_int_distribution<T> dis(lower, upper);
1200 return dis(generator);
1201 }
1202
1203 const uint32_t kSeed = GetParam();
1204 std::mt19937 generator;
1205};
1206
1207TEST_P(CompilationCachingSecurityTest, CorruptedSecuritySensitiveCache) {
1208 if (!mIsCachingSupported) return;
1209
1210 // Create test HIDL model and compile.
1211 Model testModel = createTestModel();
Xusong Wang96e68dc2019-01-18 17:28:26 -08001212
Xusong Wanged0822b2019-02-25 16:58:58 -08001213 for (uint32_t i = 0; i < mNumModelCache; i++) {
1214 // Save the compilation to cache.
1215 {
1216 bool supported;
1217 hidl_vec<hidl_handle> modelCache, dataCache;
1218 createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
1219 createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
1220 saveModelToCache(testModel, modelCache, dataCache, &supported);
1221 if (checkEarlyTermination(supported)) return;
1222 }
Xusong Wang96e68dc2019-01-18 17:28:26 -08001223
Xusong Wanged0822b2019-02-25 16:58:58 -08001224 // Randomly flip one single bit of the cache entry.
1225 FILE* pFile = fopen(mModelCache[i][0].c_str(), "r+");
1226 ASSERT_EQ(fseek(pFile, 0, SEEK_END), 0);
1227 long int fileSize = ftell(pFile);
1228 if (fileSize == 0) {
1229 fclose(pFile);
1230 continue;
1231 }
1232 ASSERT_EQ(fseek(pFile, getRandomInt(0l, fileSize - 1), SEEK_SET), 0);
1233 int readByte = fgetc(pFile);
1234 ASSERT_NE(readByte, EOF);
1235 ASSERT_EQ(fseek(pFile, -1, SEEK_CUR), 0);
1236 ASSERT_NE(fputc(static_cast<uint8_t>(readByte) ^ (1U << getRandomInt(0, 7)), pFile), EOF);
1237 fclose(pFile);
Xusong Wang96e68dc2019-01-18 17:28:26 -08001238
Xusong Wanged0822b2019-02-25 16:58:58 -08001239 // Retrieve preparedModel from cache, expect failure.
1240 {
1241 sp<IPreparedModel> preparedModel = nullptr;
1242 ErrorStatus status;
1243 hidl_vec<hidl_handle> modelCache, dataCache;
1244 createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
1245 createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
1246 prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
1247 ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
1248 ASSERT_EQ(preparedModel, nullptr);
1249 }
Xusong Wang96e68dc2019-01-18 17:28:26 -08001250 }
1251}
1252
1253TEST_P(CompilationCachingSecurityTest, WrongLengthSecuritySensitiveCache) {
1254 if (!mIsCachingSupported) return;
1255
1256 // Create test HIDL model and compile.
1257 Model testModel = createTestModel();
Xusong Wang96e68dc2019-01-18 17:28:26 -08001258
Xusong Wanged0822b2019-02-25 16:58:58 -08001259 for (uint32_t i = 0; i < mNumModelCache; i++) {
1260 // Save the compilation to cache.
1261 {
1262 bool supported;
1263 hidl_vec<hidl_handle> modelCache, dataCache;
1264 createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
1265 createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
1266 saveModelToCache(testModel, modelCache, dataCache, &supported);
1267 if (checkEarlyTermination(supported)) return;
1268 }
Xusong Wang96e68dc2019-01-18 17:28:26 -08001269
Xusong Wanged0822b2019-02-25 16:58:58 -08001270 // Randomly append bytes to the cache entry.
1271 FILE* pFile = fopen(mModelCache[i][0].c_str(), "a");
1272 uint32_t appendLength = getRandomInt(1, 256);
1273 for (uint32_t i = 0; i < appendLength; i++) {
1274 ASSERT_NE(fputc(getRandomInt<uint8_t>(0, 255), pFile), EOF);
1275 }
1276 fclose(pFile);
Xusong Wang96e68dc2019-01-18 17:28:26 -08001277
Xusong Wanged0822b2019-02-25 16:58:58 -08001278 // Retrieve preparedModel from cache, expect failure.
1279 {
1280 sp<IPreparedModel> preparedModel = nullptr;
1281 ErrorStatus status;
1282 hidl_vec<hidl_handle> modelCache, dataCache;
1283 createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
1284 createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
1285 prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
1286 ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
1287 ASSERT_EQ(preparedModel, nullptr);
1288 }
Xusong Wang96e68dc2019-01-18 17:28:26 -08001289 }
1290}
1291
1292TEST_P(CompilationCachingSecurityTest, WrongToken) {
1293 if (!mIsCachingSupported) return;
1294
1295 // Create test HIDL model and compile.
1296 Model testModel = createTestModel();
Xusong Wang96e68dc2019-01-18 17:28:26 -08001297
1298 // Save the compilation to cache.
1299 {
Xusong Wanged0822b2019-02-25 16:58:58 -08001300 bool supported;
1301 hidl_vec<hidl_handle> modelCache, dataCache;
1302 createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
1303 createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
1304 saveModelToCache(testModel, modelCache, dataCache, &supported);
1305 if (checkEarlyTermination(supported)) return;
Xusong Wang96e68dc2019-01-18 17:28:26 -08001306 }
1307
1308 // Randomly flip one single bit in mToken.
1309 uint32_t ind = getRandomInt(0u, static_cast<uint32_t>(Constant::BYTE_SIZE_OF_CACHE_TOKEN) - 1);
1310 mToken[ind] ^= (1U << getRandomInt(0, 7));
1311
1312 // Retrieve the preparedModel from cache, expect failure.
1313 {
Xusong Wanged0822b2019-02-25 16:58:58 -08001314 sp<IPreparedModel> preparedModel = nullptr;
Xusong Wang96e68dc2019-01-18 17:28:26 -08001315 ErrorStatus status;
Xusong Wanged0822b2019-02-25 16:58:58 -08001316 hidl_vec<hidl_handle> modelCache, dataCache;
1317 createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
1318 createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
1319 prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
Xusong Wang96e68dc2019-01-18 17:28:26 -08001320 ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
1321 ASSERT_EQ(preparedModel, nullptr);
1322 }
1323}
1324
1325INSTANTIATE_TEST_CASE_P(TestCompilationCaching, CompilationCachingSecurityTest,
1326 ::testing::Range(0U, 10U));
1327
1328} // namespace functional
1329} // namespace vts
1330} // namespace V1_2
1331} // namespace neuralnetworks
1332} // namespace hardware
1333} // namespace android