blob: 94ce5c11304c4dbb81038433aa953ccde762f81f [file] [log] [blame]
Lev Proleevc185e882020-12-15 19:25:32 +00001/*
2 * Copyright (C) 2021 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#define LOG_TAG "neuralnetworks_aidl_hal_test"
18
19#include <android-base/logging.h>
20#include <android/binder_auto_utils.h>
21#include <android/binder_interface_utils.h>
22#include <android/binder_status.h>
23#include <fcntl.h>
24#include <ftw.h>
25#include <gtest/gtest.h>
26#include <hidlmemory/mapping.h>
27#include <unistd.h>
28
29#include <cstdio>
30#include <cstdlib>
31#include <iterator>
32#include <random>
33#include <thread>
34
35#include "Callbacks.h"
36#include "GeneratedTestHarness.h"
37#include "MemoryUtils.h"
38#include "TestHarness.h"
39#include "Utils.h"
40#include "VtsHalNeuralnetworks.h"
41
42// Forward declaration of the mobilenet generated test models in
43// frameworks/ml/nn/runtime/test/generated/.
44namespace generated_tests::mobilenet_224_gender_basic_fixed {
45const test_helper::TestModel& get_test_model();
46} // namespace generated_tests::mobilenet_224_gender_basic_fixed
47
48namespace generated_tests::mobilenet_quantized {
49const test_helper::TestModel& get_test_model();
50} // namespace generated_tests::mobilenet_quantized
51
52namespace aidl::android::hardware::neuralnetworks::vts::functional {
53
54using namespace test_helper;
55using implementation::PreparedModelCallback;
56
57namespace float32_model {
58
59constexpr auto get_test_model = generated_tests::mobilenet_224_gender_basic_fixed::get_test_model;
60
61} // namespace float32_model
62
63namespace quant8_model {
64
65constexpr auto get_test_model = generated_tests::mobilenet_quantized::get_test_model;
66
67} // namespace quant8_model
68
69namespace {
70
71enum class AccessMode { READ_WRITE, READ_ONLY, WRITE_ONLY };
72
73// Creates cache handles based on provided file groups.
74// The outer vector corresponds to handles and the inner vector is for fds held by each handle.
75void createCacheFds(const std::vector<std::string>& files, const std::vector<AccessMode>& mode,
76 std::vector<ndk::ScopedFileDescriptor>* fds) {
77 fds->clear();
78 fds->reserve(files.size());
79 for (uint32_t i = 0; i < files.size(); i++) {
80 const auto& file = files[i];
81 int fd;
82 if (mode[i] == AccessMode::READ_ONLY) {
83 fd = open(file.c_str(), O_RDONLY);
84 } else if (mode[i] == AccessMode::WRITE_ONLY) {
85 fd = open(file.c_str(), O_WRONLY | O_CREAT, S_IRUSR | S_IWUSR);
86 } else if (mode[i] == AccessMode::READ_WRITE) {
87 fd = open(file.c_str(), O_RDWR | O_CREAT, S_IRUSR | S_IWUSR);
88 } else {
89 FAIL();
90 }
91 ASSERT_GE(fd, 0);
92 fds->emplace_back(fd);
93 }
94}
95
96void createCacheFds(const std::vector<std::string>& files, AccessMode mode,
97 std::vector<ndk::ScopedFileDescriptor>* fds) {
98 createCacheFds(files, std::vector<AccessMode>(files.size(), mode), fds);
99}
100
101// Create a chain of broadcast operations. The second operand is always constant tensor [1].
102// For simplicity, activation scalar is shared. The second operand is not shared
103// in the model to let driver maintain a non-trivial size of constant data and the corresponding
104// data locations in cache.
105//
106// --------- activation --------
107// ↓ ↓ ↓ ↓
108// E.g. input -> ADD -> ADD -> ADD -> ... -> ADD -> output
109// ↑ ↑ ↑ ↑
110// [1] [1] [1] [1]
111//
112// This function assumes the operation is either ADD or MUL.
113template <typename CppType, TestOperandType operandType>
114TestModel createLargeTestModelImpl(TestOperationType op, uint32_t len) {
115 EXPECT_TRUE(op == TestOperationType::ADD || op == TestOperationType::MUL);
116
117 // Model operations and operands.
118 std::vector<TestOperation> operations(len);
119 std::vector<TestOperand> operands(len * 2 + 2);
120
121 // The activation scalar, value = 0.
122 operands[0] = {
123 .type = TestOperandType::INT32,
124 .dimensions = {},
125 .numberOfConsumers = len,
126 .scale = 0.0f,
127 .zeroPoint = 0,
128 .lifetime = TestOperandLifeTime::CONSTANT_COPY,
129 .data = TestBuffer::createFromVector<int32_t>({0}),
130 };
131
132 // The buffer value of the constant second operand. The logical value is always 1.0f.
133 CppType bufferValue;
134 // The scale of the first and second operand.
135 float scale1, scale2;
136 if (operandType == TestOperandType::TENSOR_FLOAT32) {
137 bufferValue = 1.0f;
138 scale1 = 0.0f;
139 scale2 = 0.0f;
140 } else if (op == TestOperationType::ADD) {
141 bufferValue = 1;
142 scale1 = 1.0f;
143 scale2 = 1.0f;
144 } else {
145 // To satisfy the constraint on quant8 MUL: input0.scale * input1.scale < output.scale,
146 // set input1 to have scale = 0.5f and bufferValue = 2, i.e. 1.0f in floating point.
147 bufferValue = 2;
148 scale1 = 1.0f;
149 scale2 = 0.5f;
150 }
151
152 for (uint32_t i = 0; i < len; i++) {
153 const uint32_t firstInputIndex = i * 2 + 1;
154 const uint32_t secondInputIndex = firstInputIndex + 1;
155 const uint32_t outputIndex = secondInputIndex + 1;
156
157 // The first operation input.
158 operands[firstInputIndex] = {
159 .type = operandType,
160 .dimensions = {1},
161 .numberOfConsumers = 1,
162 .scale = scale1,
163 .zeroPoint = 0,
164 .lifetime = (i == 0 ? TestOperandLifeTime::MODEL_INPUT
165 : TestOperandLifeTime::TEMPORARY_VARIABLE),
166 .data = (i == 0 ? TestBuffer::createFromVector<CppType>({1}) : TestBuffer()),
167 };
168
169 // The second operation input, value = 1.
170 operands[secondInputIndex] = {
171 .type = operandType,
172 .dimensions = {1},
173 .numberOfConsumers = 1,
174 .scale = scale2,
175 .zeroPoint = 0,
176 .lifetime = TestOperandLifeTime::CONSTANT_COPY,
177 .data = TestBuffer::createFromVector<CppType>({bufferValue}),
178 };
179
180 // The operation. All operations share the same activation scalar.
181 // The output operand is created as an input in the next iteration of the loop, in the case
182 // of all but the last member of the chain; and after the loop as a model output, in the
183 // case of the last member of the chain.
184 operations[i] = {
185 .type = op,
186 .inputs = {firstInputIndex, secondInputIndex, /*activation scalar*/ 0},
187 .outputs = {outputIndex},
188 };
189 }
190
191 // For TestOperationType::ADD, output = 1 + 1 * len = len + 1
192 // For TestOperationType::MUL, output = 1 * 1 ^ len = 1
193 CppType outputResult = static_cast<CppType>(op == TestOperationType::ADD ? len + 1u : 1u);
194
195 // The model output.
196 operands.back() = {
197 .type = operandType,
198 .dimensions = {1},
199 .numberOfConsumers = 0,
200 .scale = scale1,
201 .zeroPoint = 0,
202 .lifetime = TestOperandLifeTime::MODEL_OUTPUT,
203 .data = TestBuffer::createFromVector<CppType>({outputResult}),
204 };
205
206 return {
207 .main = {.operands = std::move(operands),
208 .operations = std::move(operations),
209 .inputIndexes = {1},
210 .outputIndexes = {len * 2 + 1}},
211 .isRelaxed = false,
212 };
213}
214
215} // namespace
216
217// Tag for the compilation caching tests.
218class CompilationCachingTestBase : public testing::Test {
219 protected:
220 CompilationCachingTestBase(std::shared_ptr<IDevice> device, OperandType type)
221 : kDevice(std::move(device)), kOperandType(type) {}
222
223 void SetUp() override {
224 testing::Test::SetUp();
225 ASSERT_NE(kDevice.get(), nullptr);
226
227 // Create cache directory. The cache directory and a temporary cache file is always created
228 // to test the behavior of prepareModelFromCache, even when caching is not supported.
229 char cacheDirTemp[] = "/data/local/tmp/TestCompilationCachingXXXXXX";
230 char* cacheDir = mkdtemp(cacheDirTemp);
231 ASSERT_NE(cacheDir, nullptr);
232 mCacheDir = cacheDir;
233 mCacheDir.push_back('/');
234
235 NumberOfCacheFiles numCacheFiles;
236 const auto ret = kDevice->getNumberOfCacheFilesNeeded(&numCacheFiles);
237 ASSERT_TRUE(ret.isOk());
238
239 mNumModelCache = numCacheFiles.numModelCache;
240 mNumDataCache = numCacheFiles.numDataCache;
241 ASSERT_GE(mNumModelCache, 0) << "Invalid numModelCache: " << mNumModelCache;
242 ASSERT_GE(mNumDataCache, 0) << "Invalid numDataCache: " << mNumDataCache;
243 mIsCachingSupported = mNumModelCache > 0 || mNumDataCache > 0;
244
245 // Create empty cache files.
246 mTmpCache = mCacheDir + "tmp";
247 for (uint32_t i = 0; i < mNumModelCache; i++) {
248 mModelCache.push_back({mCacheDir + "model" + std::to_string(i)});
249 }
250 for (uint32_t i = 0; i < mNumDataCache; i++) {
251 mDataCache.push_back({mCacheDir + "data" + std::to_string(i)});
252 }
253 // Placeholder handles, use AccessMode::WRITE_ONLY for createCacheFds to create files.
254 std::vector<ndk::ScopedFileDescriptor> modelHandle, dataHandle, tmpHandle;
255 createCacheFds(mModelCache, AccessMode::WRITE_ONLY, &modelHandle);
256 createCacheFds(mDataCache, AccessMode::WRITE_ONLY, &dataHandle);
257 createCacheFds({mTmpCache}, AccessMode::WRITE_ONLY, &tmpHandle);
258
259 if (!mIsCachingSupported) {
260 LOG(INFO) << "NN VTS: Early termination of test because vendor service does not "
261 "support compilation caching.";
262 std::cout << "[ ] Early termination of test because vendor service does not "
263 "support compilation caching."
264 << std::endl;
265 }
266 }
267
268 void TearDown() override {
269 // If the test passes, remove the tmp directory. Otherwise, keep it for debugging purposes.
270 if (!testing::Test::HasFailure()) {
271 // Recursively remove the cache directory specified by mCacheDir.
272 auto callback = [](const char* entry, const struct stat*, int, struct FTW*) {
273 return remove(entry);
274 };
275 nftw(mCacheDir.c_str(), callback, 128, FTW_DEPTH | FTW_MOUNT | FTW_PHYS);
276 }
277 testing::Test::TearDown();
278 }
279
280 // Model and examples creators. According to kOperandType, the following methods will return
281 // either float32 model/examples or the quant8 variant.
282 TestModel createTestModel() {
283 if (kOperandType == OperandType::TENSOR_FLOAT32) {
284 return float32_model::get_test_model();
285 } else {
286 return quant8_model::get_test_model();
287 }
288 }
289
290 TestModel createLargeTestModel(OperationType op, uint32_t len) {
291 if (kOperandType == OperandType::TENSOR_FLOAT32) {
292 return createLargeTestModelImpl<float, TestOperandType::TENSOR_FLOAT32>(
293 static_cast<TestOperationType>(op), len);
294 } else {
295 return createLargeTestModelImpl<uint8_t, TestOperandType::TENSOR_QUANT8_ASYMM>(
296 static_cast<TestOperationType>(op), len);
297 }
298 }
299
300 // See if the service can handle the model.
301 bool isModelFullySupported(const Model& model) {
302 std::vector<bool> supportedOps;
303 const auto supportedCall = kDevice->getSupportedOperations(model, &supportedOps);
304 EXPECT_TRUE(supportedCall.isOk());
305 EXPECT_EQ(supportedOps.size(), model.main.operations.size());
306 if (!supportedCall.isOk() || supportedOps.size() != model.main.operations.size()) {
307 return false;
308 }
309 return std::all_of(supportedOps.begin(), supportedOps.end(),
310 [](bool valid) { return valid; });
311 }
312
313 void saveModelToCache(const Model& model,
314 const std::vector<ndk::ScopedFileDescriptor>& modelCache,
315 const std::vector<ndk::ScopedFileDescriptor>& dataCache,
316 std::shared_ptr<IPreparedModel>* preparedModel = nullptr) {
317 if (preparedModel != nullptr) *preparedModel = nullptr;
318
319 // Launch prepare model.
320 std::shared_ptr<PreparedModelCallback> preparedModelCallback =
321 ndk::SharedRefBase::make<PreparedModelCallback>();
322 std::vector<uint8_t> cacheToken(std::begin(mToken), std::end(mToken));
323 const auto prepareLaunchStatus = kDevice->prepareModel(
324 model, ExecutionPreference::FAST_SINGLE_ANSWER, kDefaultPriority, kNoDeadline,
325 modelCache, dataCache, cacheToken, preparedModelCallback);
326 ASSERT_TRUE(prepareLaunchStatus.isOk());
327
328 // Retrieve prepared model.
329 preparedModelCallback->wait();
330 ASSERT_EQ(preparedModelCallback->getStatus(), ErrorStatus::NONE);
331 if (preparedModel != nullptr) {
332 *preparedModel = preparedModelCallback->getPreparedModel();
333 }
334 }
335
336 bool checkEarlyTermination(ErrorStatus status) {
337 if (status == ErrorStatus::GENERAL_FAILURE) {
338 LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
339 "save the prepared model that it does not support.";
340 std::cout << "[ ] Early termination of test because vendor service cannot "
341 "save the prepared model that it does not support."
342 << std::endl;
343 return true;
344 }
345 return false;
346 }
347
348 bool checkEarlyTermination(const Model& model) {
349 if (!isModelFullySupported(model)) {
350 LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
351 "prepare model that it does not support.";
352 std::cout << "[ ] Early termination of test because vendor service cannot "
353 "prepare model that it does not support."
354 << std::endl;
355 return true;
356 }
357 return false;
358 }
359
Slava Shklyaev2980a7c2021-04-30 12:21:07 +0100360 // If fallbackModel is not provided, call prepareModelFromCache.
361 // If fallbackModel is provided, and prepareModelFromCache returns GENERAL_FAILURE,
362 // then prepareModel(fallbackModel) will be called.
363 // This replicates the behaviour of the runtime when loading a model from cache.
364 // NNAPI Shim depends on this behaviour and may try to load the model from cache in
365 // prepareModel (shim needs model information when loading from cache).
Lev Proleevc185e882020-12-15 19:25:32 +0000366 void prepareModelFromCache(const std::vector<ndk::ScopedFileDescriptor>& modelCache,
367 const std::vector<ndk::ScopedFileDescriptor>& dataCache,
Slava Shklyaev2980a7c2021-04-30 12:21:07 +0100368 std::shared_ptr<IPreparedModel>* preparedModel, ErrorStatus* status,
369 const Model* fallbackModel = nullptr) {
Lev Proleevc185e882020-12-15 19:25:32 +0000370 // Launch prepare model from cache.
371 std::shared_ptr<PreparedModelCallback> preparedModelCallback =
372 ndk::SharedRefBase::make<PreparedModelCallback>();
373 std::vector<uint8_t> cacheToken(std::begin(mToken), std::end(mToken));
Slava Shklyaev2980a7c2021-04-30 12:21:07 +0100374 auto prepareLaunchStatus = kDevice->prepareModelFromCache(
Lev Proleevc185e882020-12-15 19:25:32 +0000375 kNoDeadline, modelCache, dataCache, cacheToken, preparedModelCallback);
Slava Shklyaev2980a7c2021-04-30 12:21:07 +0100376
377 // The shim does not support prepareModelFromCache() properly, but it
378 // will still attempt to create a model from cache when modelCache or
379 // dataCache is provided in prepareModel(). Instead of failing straight
380 // away, we try to utilize that other code path when fallbackModel is
381 // set. Note that we cannot verify whether the returned model was
382 // actually prepared from cache in that case.
383 if (!prepareLaunchStatus.isOk() &&
384 prepareLaunchStatus.getExceptionCode() == EX_SERVICE_SPECIFIC &&
385 static_cast<ErrorStatus>(prepareLaunchStatus.getServiceSpecificError()) ==
386 ErrorStatus::GENERAL_FAILURE &&
387 mIsCachingSupported && fallbackModel != nullptr) {
388 preparedModelCallback = ndk::SharedRefBase::make<PreparedModelCallback>();
389 prepareLaunchStatus = kDevice->prepareModel(
390 *fallbackModel, ExecutionPreference::FAST_SINGLE_ANSWER, kDefaultPriority,
391 kNoDeadline, modelCache, dataCache, cacheToken, preparedModelCallback);
392 }
393
Lev Proleevc185e882020-12-15 19:25:32 +0000394 ASSERT_TRUE(prepareLaunchStatus.isOk() ||
395 prepareLaunchStatus.getExceptionCode() == EX_SERVICE_SPECIFIC)
396 << "prepareLaunchStatus: " << prepareLaunchStatus.getDescription();
397 if (!prepareLaunchStatus.isOk()) {
398 *preparedModel = nullptr;
399 *status = static_cast<ErrorStatus>(prepareLaunchStatus.getServiceSpecificError());
400 return;
401 }
402
403 // Retrieve prepared model.
404 preparedModelCallback->wait();
405 *status = preparedModelCallback->getStatus();
406 *preparedModel = preparedModelCallback->getPreparedModel();
407 }
408
Slava Shklyaev2980a7c2021-04-30 12:21:07 +0100409 // Replicate behaviour of runtime when loading model from cache.
410 // Test if prepareModelFromCache behaves correctly when faced with bad
411 // arguments. If prepareModelFromCache is not supported (GENERAL_FAILURE),
412 // it attempts to call prepareModel with same arguments, which is expected either
413 // to not support the model (GENERAL_FAILURE) or return a valid model.
414 void verifyModelPreparationBehaviour(const std::vector<ndk::ScopedFileDescriptor>& modelCache,
415 const std::vector<ndk::ScopedFileDescriptor>& dataCache,
416 const Model* model, const TestModel& testModel) {
417 std::shared_ptr<IPreparedModel> preparedModel;
418 ErrorStatus status;
419
420 // Verify that prepareModelFromCache fails either due to bad
421 // arguments (INVALID_ARGUMENT) or GENERAL_FAILURE if not supported.
422 prepareModelFromCache(modelCache, dataCache, &preparedModel, &status,
423 /*fallbackModel=*/nullptr);
424 if (status != ErrorStatus::INVALID_ARGUMENT) {
425 ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
426 }
427 ASSERT_EQ(preparedModel, nullptr);
428
429 // If caching is not supported, attempt calling prepareModel.
430 if (status == ErrorStatus::GENERAL_FAILURE) {
431 // Fallback with prepareModel should succeed regardless of cache files
432 prepareModelFromCache(modelCache, dataCache, &preparedModel, &status,
433 /*fallbackModel=*/model);
434 // Unless caching is not supported?
435 if (status != ErrorStatus::GENERAL_FAILURE) {
436 // But if it is, we should see a valid model.
437 ASSERT_EQ(status, ErrorStatus::NONE);
438 ASSERT_NE(preparedModel, nullptr);
439 EvaluatePreparedModel(kDevice, preparedModel, testModel,
440 /*testKind=*/TestKind::GENERAL);
441 }
442 }
443 }
444
Lev Proleevc185e882020-12-15 19:25:32 +0000445 // Absolute path to the temporary cache directory.
446 std::string mCacheDir;
447
448 // Groups of file paths for model and data cache in the tmp cache directory, initialized with
449 // size = mNum{Model|Data}Cache. The outer vector corresponds to handles and the inner vector is
450 // for fds held by each handle.
451 std::vector<std::string> mModelCache;
452 std::vector<std::string> mDataCache;
453
454 // A separate temporary file path in the tmp cache directory.
455 std::string mTmpCache;
456
457 uint8_t mToken[static_cast<uint32_t>(IDevice::BYTE_SIZE_OF_CACHE_TOKEN)] = {};
458 uint32_t mNumModelCache;
459 uint32_t mNumDataCache;
Slava Shklyaev2980a7c2021-04-30 12:21:07 +0100460 bool mIsCachingSupported;
Lev Proleevc185e882020-12-15 19:25:32 +0000461
462 const std::shared_ptr<IDevice> kDevice;
463 // The primary data type of the testModel.
464 const OperandType kOperandType;
465};
466
467using CompilationCachingTestParam = std::tuple<NamedDevice, OperandType>;
468
469// A parameterized fixture of CompilationCachingTestBase. Every test will run twice, with the first
470// pass running with float32 models and the second pass running with quant8 models.
471class CompilationCachingTest : public CompilationCachingTestBase,
472 public testing::WithParamInterface<CompilationCachingTestParam> {
473 protected:
474 CompilationCachingTest()
475 : CompilationCachingTestBase(getData(std::get<NamedDevice>(GetParam())),
476 std::get<OperandType>(GetParam())) {}
477};
478
479TEST_P(CompilationCachingTest, CacheSavingAndRetrieval) {
480 // Create test HIDL model and compile.
481 const TestModel& testModel = createTestModel();
482 const Model model = createModel(testModel);
483 if (checkEarlyTermination(model)) return;
484 std::shared_ptr<IPreparedModel> preparedModel = nullptr;
485
486 // Save the compilation to cache.
487 {
488 std::vector<ndk::ScopedFileDescriptor> modelCache, dataCache;
489 createCacheFds(mModelCache, AccessMode::READ_WRITE, &modelCache);
490 createCacheFds(mDataCache, AccessMode::READ_WRITE, &dataCache);
491 saveModelToCache(model, modelCache, dataCache);
492 }
493
494 // Retrieve preparedModel from cache.
495 {
496 preparedModel = nullptr;
497 ErrorStatus status;
498 std::vector<ndk::ScopedFileDescriptor> modelCache, dataCache;
499 createCacheFds(mModelCache, AccessMode::READ_WRITE, &modelCache);
500 createCacheFds(mDataCache, AccessMode::READ_WRITE, &dataCache);
Slava Shklyaev2980a7c2021-04-30 12:21:07 +0100501 prepareModelFromCache(modelCache, dataCache, &preparedModel, &status,
502 /*fallbackModel=*/&model);
Lev Proleevc185e882020-12-15 19:25:32 +0000503 if (!mIsCachingSupported) {
504 ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
505 ASSERT_EQ(preparedModel, nullptr);
506 return;
507 } else if (checkEarlyTermination(status)) {
508 ASSERT_EQ(preparedModel, nullptr);
509 return;
510 } else {
511 ASSERT_EQ(status, ErrorStatus::NONE);
512 ASSERT_NE(preparedModel, nullptr);
513 }
514 }
515
516 // Execute and verify results.
517 EvaluatePreparedModel(kDevice, preparedModel, testModel, /*testKind=*/TestKind::GENERAL);
518}
519
520TEST_P(CompilationCachingTest, CacheSavingAndRetrievalNonZeroOffset) {
521 // Create test HIDL model and compile.
522 const TestModel& testModel = createTestModel();
523 const Model model = createModel(testModel);
524 if (checkEarlyTermination(model)) return;
525 std::shared_ptr<IPreparedModel> preparedModel = nullptr;
526
527 // Save the compilation to cache.
528 {
529 std::vector<ndk::ScopedFileDescriptor> modelCache, dataCache;
530 createCacheFds(mModelCache, AccessMode::READ_WRITE, &modelCache);
531 createCacheFds(mDataCache, AccessMode::READ_WRITE, &dataCache);
532 uint8_t placeholderBytes[] = {0, 0};
533 // Write a placeholder integer to the cache.
534 // The driver should be able to handle non-empty cache and non-zero fd offset.
535 for (uint32_t i = 0; i < modelCache.size(); i++) {
536 ASSERT_EQ(write(modelCache[i].get(), &placeholderBytes, sizeof(placeholderBytes)),
537 sizeof(placeholderBytes));
538 }
539 for (uint32_t i = 0; i < dataCache.size(); i++) {
540 ASSERT_EQ(write(dataCache[i].get(), &placeholderBytes, sizeof(placeholderBytes)),
541 sizeof(placeholderBytes));
542 }
543 saveModelToCache(model, modelCache, dataCache);
544 }
545
546 // Retrieve preparedModel from cache.
547 {
548 preparedModel = nullptr;
549 ErrorStatus status;
550 std::vector<ndk::ScopedFileDescriptor> modelCache, dataCache;
551 createCacheFds(mModelCache, AccessMode::READ_WRITE, &modelCache);
552 createCacheFds(mDataCache, AccessMode::READ_WRITE, &dataCache);
553 uint8_t placeholderByte = 0;
554 // Advance the offset of each handle by one byte.
555 // The driver should be able to handle non-zero fd offset.
556 for (uint32_t i = 0; i < modelCache.size(); i++) {
557 ASSERT_GE(read(modelCache[i].get(), &placeholderByte, 1), 0);
558 }
559 for (uint32_t i = 0; i < dataCache.size(); i++) {
560 ASSERT_GE(read(dataCache[i].get(), &placeholderByte, 1), 0);
561 }
Slava Shklyaev2980a7c2021-04-30 12:21:07 +0100562 prepareModelFromCache(modelCache, dataCache, &preparedModel, &status,
563 /*fallbackModel=*/&model);
Lev Proleevc185e882020-12-15 19:25:32 +0000564 if (!mIsCachingSupported) {
565 ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
566 ASSERT_EQ(preparedModel, nullptr);
567 return;
568 } else if (checkEarlyTermination(status)) {
569 ASSERT_EQ(preparedModel, nullptr);
570 return;
571 } else {
572 ASSERT_EQ(status, ErrorStatus::NONE);
573 ASSERT_NE(preparedModel, nullptr);
574 }
575 }
576
577 // Execute and verify results.
578 EvaluatePreparedModel(kDevice, preparedModel, testModel, /*testKind=*/TestKind::GENERAL);
579}
580
581TEST_P(CompilationCachingTest, SaveToCacheInvalidNumCache) {
582 // Create test HIDL model and compile.
583 const TestModel& testModel = createTestModel();
584 const Model model = createModel(testModel);
585 if (checkEarlyTermination(model)) return;
586
587 // Test with number of model cache files greater than mNumModelCache.
588 {
589 std::vector<ndk::ScopedFileDescriptor> modelCache, dataCache;
590 // Pass an additional cache file for model cache.
591 mModelCache.push_back({mTmpCache});
592 createCacheFds(mModelCache, AccessMode::READ_WRITE, &modelCache);
593 createCacheFds(mDataCache, AccessMode::READ_WRITE, &dataCache);
594 mModelCache.pop_back();
595 std::shared_ptr<IPreparedModel> preparedModel = nullptr;
596 saveModelToCache(model, modelCache, dataCache, &preparedModel);
597 ASSERT_NE(preparedModel, nullptr);
598 // Execute and verify results.
599 EvaluatePreparedModel(kDevice, preparedModel, testModel, /*testKind=*/TestKind::GENERAL);
600 // Check if prepareModelFromCache fails.
Slava Shklyaev2980a7c2021-04-30 12:21:07 +0100601 verifyModelPreparationBehaviour(modelCache, dataCache, &model, testModel);
Lev Proleevc185e882020-12-15 19:25:32 +0000602 }
603
604 // Test with number of model cache files smaller than mNumModelCache.
605 if (mModelCache.size() > 0) {
606 std::vector<ndk::ScopedFileDescriptor> modelCache, dataCache;
607 // Pop out the last cache file.
608 auto tmp = mModelCache.back();
609 mModelCache.pop_back();
610 createCacheFds(mModelCache, AccessMode::READ_WRITE, &modelCache);
611 createCacheFds(mDataCache, AccessMode::READ_WRITE, &dataCache);
612 mModelCache.push_back(tmp);
613 std::shared_ptr<IPreparedModel> preparedModel = nullptr;
614 saveModelToCache(model, modelCache, dataCache, &preparedModel);
615 ASSERT_NE(preparedModel, nullptr);
616 // Execute and verify results.
617 EvaluatePreparedModel(kDevice, preparedModel, testModel, /*testKind=*/TestKind::GENERAL);
618 // Check if prepareModelFromCache fails.
Slava Shklyaev2980a7c2021-04-30 12:21:07 +0100619 verifyModelPreparationBehaviour(modelCache, dataCache, &model, testModel);
Lev Proleevc185e882020-12-15 19:25:32 +0000620 }
621
622 // Test with number of data cache files greater than mNumDataCache.
623 {
624 std::vector<ndk::ScopedFileDescriptor> modelCache, dataCache;
625 // Pass an additional cache file for data cache.
626 mDataCache.push_back({mTmpCache});
627 createCacheFds(mModelCache, AccessMode::READ_WRITE, &modelCache);
628 createCacheFds(mDataCache, AccessMode::READ_WRITE, &dataCache);
629 mDataCache.pop_back();
630 std::shared_ptr<IPreparedModel> preparedModel = nullptr;
631 saveModelToCache(model, modelCache, dataCache, &preparedModel);
632 ASSERT_NE(preparedModel, nullptr);
633 // Execute and verify results.
634 EvaluatePreparedModel(kDevice, preparedModel, testModel, /*testKind=*/TestKind::GENERAL);
635 // Check if prepareModelFromCache fails.
Slava Shklyaev2980a7c2021-04-30 12:21:07 +0100636 verifyModelPreparationBehaviour(modelCache, dataCache, &model, testModel);
Lev Proleevc185e882020-12-15 19:25:32 +0000637 }
638
639 // Test with number of data cache files smaller than mNumDataCache.
640 if (mDataCache.size() > 0) {
641 std::vector<ndk::ScopedFileDescriptor> modelCache, dataCache;
642 // Pop out the last cache file.
643 auto tmp = mDataCache.back();
644 mDataCache.pop_back();
645 createCacheFds(mModelCache, AccessMode::READ_WRITE, &modelCache);
646 createCacheFds(mDataCache, AccessMode::READ_WRITE, &dataCache);
647 mDataCache.push_back(tmp);
648 std::shared_ptr<IPreparedModel> preparedModel = nullptr;
649 saveModelToCache(model, modelCache, dataCache, &preparedModel);
650 ASSERT_NE(preparedModel, nullptr);
651 // Execute and verify results.
652 EvaluatePreparedModel(kDevice, preparedModel, testModel, /*testKind=*/TestKind::GENERAL);
653 // Check if prepareModelFromCache fails.
Slava Shklyaev2980a7c2021-04-30 12:21:07 +0100654 verifyModelPreparationBehaviour(modelCache, dataCache, &model, testModel);
Lev Proleevc185e882020-12-15 19:25:32 +0000655 }
656}
657
658TEST_P(CompilationCachingTest, PrepareModelFromCacheInvalidNumCache) {
659 // Create test HIDL model and compile.
660 const TestModel& testModel = createTestModel();
661 const Model model = createModel(testModel);
662 if (checkEarlyTermination(model)) return;
663
664 // Save the compilation to cache.
665 {
666 std::vector<ndk::ScopedFileDescriptor> modelCache, dataCache;
667 createCacheFds(mModelCache, AccessMode::READ_WRITE, &modelCache);
668 createCacheFds(mDataCache, AccessMode::READ_WRITE, &dataCache);
669 saveModelToCache(model, modelCache, dataCache);
670 }
671
672 // Test with number of model cache files greater than mNumModelCache.
673 {
Lev Proleevc185e882020-12-15 19:25:32 +0000674 std::vector<ndk::ScopedFileDescriptor> modelCache, dataCache;
675 mModelCache.push_back({mTmpCache});
676 createCacheFds(mModelCache, AccessMode::READ_WRITE, &modelCache);
677 createCacheFds(mDataCache, AccessMode::READ_WRITE, &dataCache);
678 mModelCache.pop_back();
Slava Shklyaev2980a7c2021-04-30 12:21:07 +0100679
680 verifyModelPreparationBehaviour(modelCache, dataCache, &model, testModel);
Lev Proleevc185e882020-12-15 19:25:32 +0000681 }
682
683 // Test with number of model cache files smaller than mNumModelCache.
684 if (mModelCache.size() > 0) {
Lev Proleevc185e882020-12-15 19:25:32 +0000685 std::vector<ndk::ScopedFileDescriptor> modelCache, dataCache;
686 auto tmp = mModelCache.back();
687 mModelCache.pop_back();
688 createCacheFds(mModelCache, AccessMode::READ_WRITE, &modelCache);
689 createCacheFds(mDataCache, AccessMode::READ_WRITE, &dataCache);
690 mModelCache.push_back(tmp);
Slava Shklyaev2980a7c2021-04-30 12:21:07 +0100691
692 verifyModelPreparationBehaviour(modelCache, dataCache, &model, testModel);
Lev Proleevc185e882020-12-15 19:25:32 +0000693 }
694
695 // Test with number of data cache files greater than mNumDataCache.
696 {
Lev Proleevc185e882020-12-15 19:25:32 +0000697 std::vector<ndk::ScopedFileDescriptor> modelCache, dataCache;
698 mDataCache.push_back({mTmpCache});
699 createCacheFds(mModelCache, AccessMode::READ_WRITE, &modelCache);
700 createCacheFds(mDataCache, AccessMode::READ_WRITE, &dataCache);
701 mDataCache.pop_back();
Slava Shklyaev2980a7c2021-04-30 12:21:07 +0100702
703 verifyModelPreparationBehaviour(modelCache, dataCache, &model, testModel);
Lev Proleevc185e882020-12-15 19:25:32 +0000704 }
705
706 // Test with number of data cache files smaller than mNumDataCache.
707 if (mDataCache.size() > 0) {
Lev Proleevc185e882020-12-15 19:25:32 +0000708 std::vector<ndk::ScopedFileDescriptor> modelCache, dataCache;
709 auto tmp = mDataCache.back();
710 mDataCache.pop_back();
711 createCacheFds(mModelCache, AccessMode::READ_WRITE, &modelCache);
712 createCacheFds(mDataCache, AccessMode::READ_WRITE, &dataCache);
713 mDataCache.push_back(tmp);
Slava Shklyaev2980a7c2021-04-30 12:21:07 +0100714
715 verifyModelPreparationBehaviour(modelCache, dataCache, &model, testModel);
Lev Proleevc185e882020-12-15 19:25:32 +0000716 }
717}
718
719TEST_P(CompilationCachingTest, SaveToCacheInvalidAccessMode) {
720 // Create test HIDL model and compile.
721 const TestModel& testModel = createTestModel();
722 const Model model = createModel(testModel);
723 if (checkEarlyTermination(model)) return;
724 std::vector<AccessMode> modelCacheMode(mNumModelCache, AccessMode::READ_WRITE);
725 std::vector<AccessMode> dataCacheMode(mNumDataCache, AccessMode::READ_WRITE);
726
727 // Go through each handle in model cache, test with invalid access mode.
728 for (uint32_t i = 0; i < mNumModelCache; i++) {
729 std::vector<ndk::ScopedFileDescriptor> modelCache, dataCache;
730 modelCacheMode[i] = AccessMode::READ_ONLY;
731 createCacheFds(mModelCache, modelCacheMode, &modelCache);
732 createCacheFds(mDataCache, dataCacheMode, &dataCache);
733 modelCacheMode[i] = AccessMode::READ_WRITE;
734 std::shared_ptr<IPreparedModel> preparedModel = nullptr;
735 saveModelToCache(model, modelCache, dataCache, &preparedModel);
736 ASSERT_NE(preparedModel, nullptr);
737 // Execute and verify results.
738 EvaluatePreparedModel(kDevice, preparedModel, testModel, /*testKind=*/TestKind::GENERAL);
739 // Check if prepareModelFromCache fails.
Slava Shklyaev2980a7c2021-04-30 12:21:07 +0100740 verifyModelPreparationBehaviour(modelCache, dataCache, &model, testModel);
Lev Proleevc185e882020-12-15 19:25:32 +0000741 }
742
743 // Go through each handle in data cache, test with invalid access mode.
744 for (uint32_t i = 0; i < mNumDataCache; i++) {
745 std::vector<ndk::ScopedFileDescriptor> modelCache, dataCache;
746 dataCacheMode[i] = AccessMode::READ_ONLY;
747 createCacheFds(mModelCache, modelCacheMode, &modelCache);
748 createCacheFds(mDataCache, dataCacheMode, &dataCache);
749 dataCacheMode[i] = AccessMode::READ_WRITE;
750 std::shared_ptr<IPreparedModel> preparedModel = nullptr;
751 saveModelToCache(model, modelCache, dataCache, &preparedModel);
752 ASSERT_NE(preparedModel, nullptr);
753 // Execute and verify results.
754 EvaluatePreparedModel(kDevice, preparedModel, testModel, /*testKind=*/TestKind::GENERAL);
755 // Check if prepareModelFromCache fails.
Slava Shklyaev2980a7c2021-04-30 12:21:07 +0100756 verifyModelPreparationBehaviour(modelCache, dataCache, &model, testModel);
Lev Proleevc185e882020-12-15 19:25:32 +0000757 }
758}
759
760TEST_P(CompilationCachingTest, PrepareModelFromCacheInvalidAccessMode) {
761 // Create test HIDL model and compile.
762 const TestModel& testModel = createTestModel();
763 const Model model = createModel(testModel);
764 if (checkEarlyTermination(model)) return;
765 std::vector<AccessMode> modelCacheMode(mNumModelCache, AccessMode::READ_WRITE);
766 std::vector<AccessMode> dataCacheMode(mNumDataCache, AccessMode::READ_WRITE);
767
768 // Save the compilation to cache.
769 {
770 std::vector<ndk::ScopedFileDescriptor> modelCache, dataCache;
771 createCacheFds(mModelCache, AccessMode::READ_WRITE, &modelCache);
772 createCacheFds(mDataCache, AccessMode::READ_WRITE, &dataCache);
773 saveModelToCache(model, modelCache, dataCache);
774 }
775
776 // Go through each handle in model cache, test with invalid access mode.
777 for (uint32_t i = 0; i < mNumModelCache; i++) {
Lev Proleevc185e882020-12-15 19:25:32 +0000778 std::vector<ndk::ScopedFileDescriptor> modelCache, dataCache;
779 modelCacheMode[i] = AccessMode::WRITE_ONLY;
780 createCacheFds(mModelCache, modelCacheMode, &modelCache);
781 createCacheFds(mDataCache, dataCacheMode, &dataCache);
782 modelCacheMode[i] = AccessMode::READ_WRITE;
Slava Shklyaev2980a7c2021-04-30 12:21:07 +0100783
784 verifyModelPreparationBehaviour(modelCache, dataCache, &model, testModel);
Lev Proleevc185e882020-12-15 19:25:32 +0000785 }
786
787 // Go through each handle in data cache, test with invalid access mode.
788 for (uint32_t i = 0; i < mNumDataCache; i++) {
Lev Proleevc185e882020-12-15 19:25:32 +0000789 std::vector<ndk::ScopedFileDescriptor> modelCache, dataCache;
790 dataCacheMode[i] = AccessMode::WRITE_ONLY;
791 createCacheFds(mModelCache, modelCacheMode, &modelCache);
792 createCacheFds(mDataCache, dataCacheMode, &dataCache);
793 dataCacheMode[i] = AccessMode::READ_WRITE;
Slava Shklyaev2980a7c2021-04-30 12:21:07 +0100794 verifyModelPreparationBehaviour(modelCache, dataCache, &model, testModel);
Lev Proleevc185e882020-12-15 19:25:32 +0000795 }
796}
797
798// Copy file contents between files.
799// The vector sizes must match.
800static void copyCacheFiles(const std::vector<std::string>& from,
801 const std::vector<std::string>& to) {
802 constexpr size_t kBufferSize = 1000000;
803 uint8_t buffer[kBufferSize];
804
805 ASSERT_EQ(from.size(), to.size());
806 for (uint32_t i = 0; i < from.size(); i++) {
807 int fromFd = open(from[i].c_str(), O_RDONLY);
808 int toFd = open(to[i].c_str(), O_WRONLY | O_CREAT, S_IRUSR | S_IWUSR);
809 ASSERT_GE(fromFd, 0);
810 ASSERT_GE(toFd, 0);
811
812 ssize_t readBytes;
813 while ((readBytes = read(fromFd, &buffer, kBufferSize)) > 0) {
814 ASSERT_EQ(write(toFd, &buffer, readBytes), readBytes);
815 }
816 ASSERT_GE(readBytes, 0);
817
818 close(fromFd);
819 close(toFd);
820 }
821}
822
823// Number of operations in the large test model.
824constexpr uint32_t kLargeModelSize = 100;
825constexpr uint32_t kNumIterationsTOCTOU = 100;
826
827TEST_P(CompilationCachingTest, SaveToCache_TOCTOU) {
828 if (!mIsCachingSupported) return;
829
830 // Create test models and check if fully supported by the service.
831 const TestModel testModelMul = createLargeTestModel(OperationType::MUL, kLargeModelSize);
832 const Model modelMul = createModel(testModelMul);
833 if (checkEarlyTermination(modelMul)) return;
834 const TestModel testModelAdd = createLargeTestModel(OperationType::ADD, kLargeModelSize);
835 const Model modelAdd = createModel(testModelAdd);
836 if (checkEarlyTermination(modelAdd)) return;
837
838 // Save the modelMul compilation to cache.
839 auto modelCacheMul = mModelCache;
840 for (auto& cache : modelCacheMul) {
841 cache.append("_mul");
842 }
843 {
844 std::vector<ndk::ScopedFileDescriptor> modelCache, dataCache;
845 createCacheFds(modelCacheMul, AccessMode::READ_WRITE, &modelCache);
846 createCacheFds(mDataCache, AccessMode::READ_WRITE, &dataCache);
847 saveModelToCache(modelMul, modelCache, dataCache);
848 }
849
850 // Use a different token for modelAdd.
851 mToken[0]++;
852
853 // This test is probabilistic, so we run it multiple times.
854 for (uint32_t i = 0; i < kNumIterationsTOCTOU; i++) {
855 // Save the modelAdd compilation to cache.
856 {
857 std::vector<ndk::ScopedFileDescriptor> modelCache, dataCache;
858 createCacheFds(mModelCache, AccessMode::READ_WRITE, &modelCache);
859 createCacheFds(mDataCache, AccessMode::READ_WRITE, &dataCache);
860
861 // Spawn a thread to copy the cache content concurrently while saving to cache.
862 std::thread thread(copyCacheFiles, std::cref(modelCacheMul), std::cref(mModelCache));
863 saveModelToCache(modelAdd, modelCache, dataCache);
864 thread.join();
865 }
866
867 // Retrieve preparedModel from cache.
868 {
869 std::shared_ptr<IPreparedModel> preparedModel = nullptr;
870 ErrorStatus status;
871 std::vector<ndk::ScopedFileDescriptor> modelCache, dataCache;
872 createCacheFds(mModelCache, AccessMode::READ_WRITE, &modelCache);
873 createCacheFds(mDataCache, AccessMode::READ_WRITE, &dataCache);
Slava Shklyaev2980a7c2021-04-30 12:21:07 +0100874 prepareModelFromCache(modelCache, dataCache, &preparedModel, &status,
875 /*fallbackModel=*/nullptr);
Lev Proleevc185e882020-12-15 19:25:32 +0000876
877 // The preparation may fail or succeed, but must not crash. If the preparation succeeds,
878 // the prepared model must be executed with the correct result and not crash.
879 if (status != ErrorStatus::NONE) {
880 ASSERT_EQ(preparedModel, nullptr);
881 } else {
882 ASSERT_NE(preparedModel, nullptr);
883 EvaluatePreparedModel(kDevice, preparedModel, testModelAdd,
884 /*testKind=*/TestKind::GENERAL);
885 }
886 }
887 }
888}
889
890TEST_P(CompilationCachingTest, PrepareFromCache_TOCTOU) {
891 if (!mIsCachingSupported) return;
892
893 // Create test models and check if fully supported by the service.
894 const TestModel testModelMul = createLargeTestModel(OperationType::MUL, kLargeModelSize);
895 const Model modelMul = createModel(testModelMul);
896 if (checkEarlyTermination(modelMul)) return;
897 const TestModel testModelAdd = createLargeTestModel(OperationType::ADD, kLargeModelSize);
898 const Model modelAdd = createModel(testModelAdd);
899 if (checkEarlyTermination(modelAdd)) return;
900
901 // Save the modelMul compilation to cache.
902 auto modelCacheMul = mModelCache;
903 for (auto& cache : modelCacheMul) {
904 cache.append("_mul");
905 }
906 {
907 std::vector<ndk::ScopedFileDescriptor> modelCache, dataCache;
908 createCacheFds(modelCacheMul, AccessMode::READ_WRITE, &modelCache);
909 createCacheFds(mDataCache, AccessMode::READ_WRITE, &dataCache);
910 saveModelToCache(modelMul, modelCache, dataCache);
911 }
912
913 // Use a different token for modelAdd.
914 mToken[0]++;
915
916 // This test is probabilistic, so we run it multiple times.
917 for (uint32_t i = 0; i < kNumIterationsTOCTOU; i++) {
918 // Save the modelAdd compilation to cache.
919 {
920 std::vector<ndk::ScopedFileDescriptor> modelCache, dataCache;
921 createCacheFds(mModelCache, AccessMode::READ_WRITE, &modelCache);
922 createCacheFds(mDataCache, AccessMode::READ_WRITE, &dataCache);
923 saveModelToCache(modelAdd, modelCache, dataCache);
924 }
925
926 // Retrieve preparedModel from cache.
927 {
928 std::shared_ptr<IPreparedModel> preparedModel = nullptr;
929 ErrorStatus status;
930 std::vector<ndk::ScopedFileDescriptor> modelCache, dataCache;
931 createCacheFds(mModelCache, AccessMode::READ_WRITE, &modelCache);
932 createCacheFds(mDataCache, AccessMode::READ_WRITE, &dataCache);
933
934 // Spawn a thread to copy the cache content concurrently while preparing from cache.
935 std::thread thread(copyCacheFiles, std::cref(modelCacheMul), std::cref(mModelCache));
Slava Shklyaev2980a7c2021-04-30 12:21:07 +0100936 prepareModelFromCache(modelCache, dataCache, &preparedModel, &status,
937 /*fallbackModel=*/nullptr);
Lev Proleevc185e882020-12-15 19:25:32 +0000938 thread.join();
939
940 // The preparation may fail or succeed, but must not crash. If the preparation succeeds,
941 // the prepared model must be executed with the correct result and not crash.
942 if (status != ErrorStatus::NONE) {
943 ASSERT_EQ(preparedModel, nullptr);
944 } else {
945 ASSERT_NE(preparedModel, nullptr);
946 EvaluatePreparedModel(kDevice, preparedModel, testModelAdd,
947 /*testKind=*/TestKind::GENERAL);
948 }
949 }
950 }
951}
952
953TEST_P(CompilationCachingTest, ReplaceSecuritySensitiveCache) {
954 if (!mIsCachingSupported) return;
955
956 // Create test models and check if fully supported by the service.
957 const TestModel testModelMul = createLargeTestModel(OperationType::MUL, kLargeModelSize);
958 const Model modelMul = createModel(testModelMul);
959 if (checkEarlyTermination(modelMul)) return;
960 const TestModel testModelAdd = createLargeTestModel(OperationType::ADD, kLargeModelSize);
961 const Model modelAdd = createModel(testModelAdd);
962 if (checkEarlyTermination(modelAdd)) return;
963
964 // Save the modelMul compilation to cache.
965 auto modelCacheMul = mModelCache;
966 for (auto& cache : modelCacheMul) {
967 cache.append("_mul");
968 }
969 {
970 std::vector<ndk::ScopedFileDescriptor> modelCache, dataCache;
971 createCacheFds(modelCacheMul, AccessMode::READ_WRITE, &modelCache);
972 createCacheFds(mDataCache, AccessMode::READ_WRITE, &dataCache);
973 saveModelToCache(modelMul, modelCache, dataCache);
974 }
975
976 // Use a different token for modelAdd.
977 mToken[0]++;
978
979 // Save the modelAdd compilation to cache.
980 {
981 std::vector<ndk::ScopedFileDescriptor> modelCache, dataCache;
982 createCacheFds(mModelCache, AccessMode::READ_WRITE, &modelCache);
983 createCacheFds(mDataCache, AccessMode::READ_WRITE, &dataCache);
984 saveModelToCache(modelAdd, modelCache, dataCache);
985 }
986
987 // Replace the model cache of modelAdd with modelMul.
988 copyCacheFiles(modelCacheMul, mModelCache);
989
990 // Retrieve the preparedModel from cache, expect failure.
991 {
992 std::shared_ptr<IPreparedModel> preparedModel = nullptr;
993 ErrorStatus status;
994 std::vector<ndk::ScopedFileDescriptor> modelCache, dataCache;
995 createCacheFds(mModelCache, AccessMode::READ_WRITE, &modelCache);
996 createCacheFds(mDataCache, AccessMode::READ_WRITE, &dataCache);
997 prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
998 ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
999 ASSERT_EQ(preparedModel, nullptr);
1000 }
1001}
1002
1003// TODO(b/179270601): restore kNamedDeviceChoices.
1004static const auto kOperandTypeChoices =
1005 testing::Values(OperandType::TENSOR_FLOAT32, OperandType::TENSOR_QUANT8_ASYMM);
1006
1007std::string printCompilationCachingTest(
1008 const testing::TestParamInfo<CompilationCachingTestParam>& info) {
1009 const auto& [namedDevice, operandType] = info.param;
1010 const std::string type = (operandType == OperandType::TENSOR_FLOAT32 ? "float32" : "quant8");
1011 return gtestCompliantName(getName(namedDevice) + "_" + type);
1012}
1013
1014GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(CompilationCachingTest);
1015INSTANTIATE_TEST_SUITE_P(TestCompilationCaching, CompilationCachingTest,
1016 testing::Combine(testing::ValuesIn(getNamedDevices()),
1017 kOperandTypeChoices),
1018 printCompilationCachingTest);
1019
1020using CompilationCachingSecurityTestParam = std::tuple<NamedDevice, OperandType, uint32_t>;
1021
1022class CompilationCachingSecurityTest
1023 : public CompilationCachingTestBase,
1024 public testing::WithParamInterface<CompilationCachingSecurityTestParam> {
1025 protected:
1026 CompilationCachingSecurityTest()
1027 : CompilationCachingTestBase(getData(std::get<NamedDevice>(GetParam())),
1028 std::get<OperandType>(GetParam())) {}
1029
1030 void SetUp() {
1031 CompilationCachingTestBase::SetUp();
1032 generator.seed(kSeed);
1033 }
1034
1035 // Get a random integer within a closed range [lower, upper].
1036 template <typename T>
1037 T getRandomInt(T lower, T upper) {
1038 std::uniform_int_distribution<T> dis(lower, upper);
1039 return dis(generator);
1040 }
1041
1042 // Randomly flip one single bit of the cache entry.
1043 void flipOneBitOfCache(const std::string& filename, bool* skip) {
1044 FILE* pFile = fopen(filename.c_str(), "r+");
1045 ASSERT_EQ(fseek(pFile, 0, SEEK_END), 0);
1046 long int fileSize = ftell(pFile);
1047 if (fileSize == 0) {
1048 fclose(pFile);
1049 *skip = true;
1050 return;
1051 }
1052 ASSERT_EQ(fseek(pFile, getRandomInt(0l, fileSize - 1), SEEK_SET), 0);
1053 int readByte = fgetc(pFile);
1054 ASSERT_NE(readByte, EOF);
1055 ASSERT_EQ(fseek(pFile, -1, SEEK_CUR), 0);
1056 ASSERT_NE(fputc(static_cast<uint8_t>(readByte) ^ (1U << getRandomInt(0, 7)), pFile), EOF);
1057 fclose(pFile);
1058 *skip = false;
1059 }
1060
1061 // Randomly append bytes to the cache entry.
1062 void appendBytesToCache(const std::string& filename, bool* skip) {
1063 FILE* pFile = fopen(filename.c_str(), "a");
1064 uint32_t appendLength = getRandomInt(1, 256);
1065 for (uint32_t i = 0; i < appendLength; i++) {
1066 ASSERT_NE(fputc(getRandomInt<uint8_t>(0, 255), pFile), EOF);
1067 }
1068 fclose(pFile);
1069 *skip = false;
1070 }
1071
1072 enum class ExpectedResult { GENERAL_FAILURE, NOT_CRASH };
1073
1074 // Test if the driver behaves as expected when given corrupted cache or token.
1075 // The modifier will be invoked after save to cache but before prepare from cache.
1076 // The modifier accepts one pointer argument "skip" as the returning value, indicating
1077 // whether the test should be skipped or not.
1078 void testCorruptedCache(ExpectedResult expected, std::function<void(bool*)> modifier) {
1079 const TestModel& testModel = createTestModel();
1080 const Model model = createModel(testModel);
1081 if (checkEarlyTermination(model)) return;
1082
1083 // Save the compilation to cache.
1084 {
1085 std::vector<ndk::ScopedFileDescriptor> modelCache, dataCache;
1086 createCacheFds(mModelCache, AccessMode::READ_WRITE, &modelCache);
1087 createCacheFds(mDataCache, AccessMode::READ_WRITE, &dataCache);
1088 saveModelToCache(model, modelCache, dataCache);
1089 }
1090
1091 bool skip = false;
1092 modifier(&skip);
1093 if (skip) return;
1094
1095 // Retrieve preparedModel from cache.
1096 {
1097 std::shared_ptr<IPreparedModel> preparedModel = nullptr;
1098 ErrorStatus status;
1099 std::vector<ndk::ScopedFileDescriptor> modelCache, dataCache;
1100 createCacheFds(mModelCache, AccessMode::READ_WRITE, &modelCache);
1101 createCacheFds(mDataCache, AccessMode::READ_WRITE, &dataCache);
1102 prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
1103
1104 switch (expected) {
1105 case ExpectedResult::GENERAL_FAILURE:
1106 ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
1107 ASSERT_EQ(preparedModel, nullptr);
1108 break;
1109 case ExpectedResult::NOT_CRASH:
1110 ASSERT_EQ(preparedModel == nullptr, status != ErrorStatus::NONE);
1111 break;
1112 default:
1113 FAIL();
1114 }
1115 }
1116 }
1117
1118 const uint32_t kSeed = std::get<uint32_t>(GetParam());
1119 std::mt19937 generator;
1120};
1121
1122TEST_P(CompilationCachingSecurityTest, CorruptedModelCache) {
1123 if (!mIsCachingSupported) return;
1124 for (uint32_t i = 0; i < mNumModelCache; i++) {
1125 testCorruptedCache(ExpectedResult::GENERAL_FAILURE,
1126 [this, i](bool* skip) { flipOneBitOfCache(mModelCache[i], skip); });
1127 }
1128}
1129
1130TEST_P(CompilationCachingSecurityTest, WrongLengthModelCache) {
1131 if (!mIsCachingSupported) return;
1132 for (uint32_t i = 0; i < mNumModelCache; i++) {
1133 testCorruptedCache(ExpectedResult::GENERAL_FAILURE,
1134 [this, i](bool* skip) { appendBytesToCache(mModelCache[i], skip); });
1135 }
1136}
1137
1138TEST_P(CompilationCachingSecurityTest, CorruptedDataCache) {
1139 if (!mIsCachingSupported) return;
1140 for (uint32_t i = 0; i < mNumDataCache; i++) {
1141 testCorruptedCache(ExpectedResult::NOT_CRASH,
1142 [this, i](bool* skip) { flipOneBitOfCache(mDataCache[i], skip); });
1143 }
1144}
1145
1146TEST_P(CompilationCachingSecurityTest, WrongLengthDataCache) {
1147 if (!mIsCachingSupported) return;
1148 for (uint32_t i = 0; i < mNumDataCache; i++) {
1149 testCorruptedCache(ExpectedResult::NOT_CRASH,
1150 [this, i](bool* skip) { appendBytesToCache(mDataCache[i], skip); });
1151 }
1152}
1153
1154TEST_P(CompilationCachingSecurityTest, WrongToken) {
1155 if (!mIsCachingSupported) return;
1156 testCorruptedCache(ExpectedResult::GENERAL_FAILURE, [this](bool* skip) {
1157 // Randomly flip one single bit in mToken.
1158 uint32_t ind =
1159 getRandomInt(0u, static_cast<uint32_t>(IDevice::BYTE_SIZE_OF_CACHE_TOKEN) - 1);
1160 mToken[ind] ^= (1U << getRandomInt(0, 7));
1161 *skip = false;
1162 });
1163}
1164
1165std::string printCompilationCachingSecurityTest(
1166 const testing::TestParamInfo<CompilationCachingSecurityTestParam>& info) {
1167 const auto& [namedDevice, operandType, seed] = info.param;
1168 const std::string type = (operandType == OperandType::TENSOR_FLOAT32 ? "float32" : "quant8");
1169 return gtestCompliantName(getName(namedDevice) + "_" + type + "_" + std::to_string(seed));
1170}
1171
1172GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(CompilationCachingSecurityTest);
1173INSTANTIATE_TEST_SUITE_P(TestCompilationCaching, CompilationCachingSecurityTest,
1174 testing::Combine(testing::ValuesIn(getNamedDevices()), kOperandTypeChoices,
1175 testing::Range(0U, 10U)),
1176 printCompilationCachingSecurityTest);
1177
1178} // namespace aidl::android::hardware::neuralnetworks::vts::functional