blob: 40f6cd1573c707c2020436465218a71155fdcf89 [file] [log] [blame]
Lev Proleevc185e882020-12-15 19:25:32 +00001/*
2 * Copyright (C) 2021 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "GeneratedTestHarness.h"
18
19#include <aidl/android/hardware/neuralnetworks/ErrorStatus.h>
Michael Butler7fc7e372021-03-10 22:51:53 -080020#include <aidl/android/hardware/neuralnetworks/RequestMemoryPool.h>
Lev Proleevc185e882020-12-15 19:25:32 +000021#include <android-base/logging.h>
22#include <android/binder_auto_utils.h>
Lev Proleevc185e882020-12-15 19:25:32 +000023#include <gtest/gtest.h>
24
25#include <algorithm>
26#include <chrono>
27#include <iostream>
28#include <iterator>
29#include <numeric>
30#include <vector>
31
Lev Proleevc185e882020-12-15 19:25:32 +000032#include <android/binder_status.h>
33#include <nnapi/Result.h>
34#include <nnapi/SharedMemory.h>
35#include <nnapi/Types.h>
36#include <nnapi/hal/aidl/Conversions.h>
37#include <nnapi/hal/aidl/Utils.h>
38
39#include "Callbacks.h"
40#include "TestHarness.h"
41#include "Utils.h"
42#include "VtsHalNeuralnetworks.h"
43
Michael Butlerd0d9a7a2022-01-12 11:41:17 -080044#ifdef __ANDROID__
45#include <android/sync.h>
46#endif // __ANDROID__
47
Lev Proleevc185e882020-12-15 19:25:32 +000048namespace aidl::android::hardware::neuralnetworks::vts::functional {
49
50namespace nn = ::android::nn;
51using namespace test_helper;
52using implementation::PreparedModelCallback;
53
54namespace {
55
56enum class OutputType { FULLY_SPECIFIED, UNSPECIFIED, INSUFFICIENT, MISSED_DEADLINE };
57
58struct TestConfig {
59 Executor executor;
60 bool measureTiming;
61 OutputType outputType;
62 MemoryType memoryType;
Xusong Wang72e06c22022-01-11 14:25:55 -080063 bool reusable;
Lev Proleevc185e882020-12-15 19:25:32 +000064 // `reportSkipping` indicates if a test should print an info message in case
65 // it is skipped. The field is set to true by default and is set to false in
66 // quantization coupling tests to suppress skipping a test
67 bool reportSkipping;
Miao Wangb5c8a822021-10-26 20:03:05 +000068 // `useConfig` indicates if a test should use execute*WithConfig functions for the execution.
69 bool useConfig;
Xusong Wang72e06c22022-01-11 14:25:55 -080070 TestConfig(Executor executor, bool measureTiming, OutputType outputType, MemoryType memoryType,
71 bool reusable)
Lev Proleevc185e882020-12-15 19:25:32 +000072 : executor(executor),
73 measureTiming(measureTiming),
74 outputType(outputType),
75 memoryType(memoryType),
Xusong Wang72e06c22022-01-11 14:25:55 -080076 reusable(reusable),
Miao Wangb5c8a822021-10-26 20:03:05 +000077 reportSkipping(true),
78 useConfig(false) {}
Lev Proleevc185e882020-12-15 19:25:32 +000079 TestConfig(Executor executor, bool measureTiming, OutputType outputType, MemoryType memoryType,
Xusong Wang72e06c22022-01-11 14:25:55 -080080 bool reusable, bool reportSkipping)
Lev Proleevc185e882020-12-15 19:25:32 +000081 : executor(executor),
82 measureTiming(measureTiming),
83 outputType(outputType),
84 memoryType(memoryType),
Xusong Wang72e06c22022-01-11 14:25:55 -080085 reusable(reusable),
Miao Wangb5c8a822021-10-26 20:03:05 +000086 reportSkipping(reportSkipping),
87 useConfig(false) {}
88 TestConfig(Executor executor, bool measureTiming, OutputType outputType, MemoryType memoryType,
89 bool reusable, bool reportSkipping, bool useConfig)
90 : executor(executor),
91 measureTiming(measureTiming),
92 outputType(outputType),
93 memoryType(memoryType),
94 reusable(reusable),
95 reportSkipping(reportSkipping),
96 useConfig(useConfig) {}
Lev Proleevc185e882020-12-15 19:25:32 +000097};
98
Xusong Wang72e06c22022-01-11 14:25:55 -080099std::string toString(OutputType type) {
100 switch (type) {
101 case OutputType::FULLY_SPECIFIED:
102 return "FULLY_SPECIFIED";
103 case OutputType::UNSPECIFIED:
104 return "UNSPECIFIED";
105 case OutputType::INSUFFICIENT:
106 return "INSUFFICIENT";
107 case OutputType::MISSED_DEADLINE:
108 return "MISSED_DEADLINE";
109 }
110}
111
112std::string toString(const TestConfig& config) {
113 std::stringstream ss;
114 ss << "TestConfig{.executor=" << toString(config.executor)
115 << ", .measureTiming=" << (config.measureTiming ? "true" : "false")
116 << ", .outputType=" << toString(config.outputType)
117 << ", .memoryType=" << toString(config.memoryType)
Miao Wangb5c8a822021-10-26 20:03:05 +0000118 << ", .reusable=" << (config.reusable ? "true" : "false")
119 << ", .useConfig=" << (config.useConfig ? "true" : "false") << "}";
Xusong Wang72e06c22022-01-11 14:25:55 -0800120 return ss.str();
121}
122
Lev Proleevc185e882020-12-15 19:25:32 +0000123enum class IOType { INPUT, OUTPUT };
124
125class DeviceMemoryAllocator {
126 public:
127 DeviceMemoryAllocator(const std::shared_ptr<IDevice>& device,
128 const std::shared_ptr<IPreparedModel>& preparedModel,
129 const TestModel& testModel)
130 : kDevice(device), kPreparedModel(preparedModel), kTestModel(testModel) {}
131
132 // Allocate device memory for a target input/output operand.
133 // Return {IBuffer object, token} if successful.
134 // Return {nullptr, 0} if device memory is not supported.
135 template <IOType ioType>
136 std::pair<std::shared_ptr<IBuffer>, int32_t> allocate(uint32_t index) {
137 std::pair<std::shared_ptr<IBuffer>, int32_t> buffer;
138 allocateInternal<ioType>(index, &buffer);
139 return buffer;
140 }
141
142 private:
143 template <IOType ioType>
144 void allocateInternal(int32_t index, std::pair<std::shared_ptr<IBuffer>, int32_t>* result) {
145 ASSERT_NE(result, nullptr);
146
147 // Prepare arguments.
Xusong Wang3633d072021-03-19 13:58:24 -0700148 BufferRole role = {.modelIndex = 0, .ioIndex = index, .probability = 1.0f};
Lev Proleevc185e882020-12-15 19:25:32 +0000149 std::vector<BufferRole> inputRoles, outputRoles;
150 if constexpr (ioType == IOType::INPUT) {
151 inputRoles = {role};
152 } else {
153 outputRoles = {role};
154 }
155
156 // Allocate device memory.
157 DeviceBuffer buffer;
158 IPreparedModelParcel parcel;
159 parcel.preparedModel = kPreparedModel;
160 const auto ret = kDevice->allocate({}, {parcel}, inputRoles, outputRoles, &buffer);
161
162 // Check allocation results.
163 if (ret.isOk()) {
164 ASSERT_NE(buffer.buffer, nullptr);
165 ASSERT_GT(buffer.token, 0);
166 } else {
167 ASSERT_EQ(ret.getExceptionCode(), EX_SERVICE_SPECIFIC);
168 ASSERT_EQ(static_cast<ErrorStatus>(ret.getServiceSpecificError()),
169 ErrorStatus::GENERAL_FAILURE);
170 buffer.buffer = nullptr;
171 buffer.token = 0;
172 }
173
174 // Initialize input data from TestBuffer.
175 if constexpr (ioType == IOType::INPUT) {
176 if (buffer.buffer != nullptr) {
177 // TestBuffer -> Shared memory.
178 const auto& testBuffer =
179 kTestModel.main.operands[kTestModel.main.inputIndexes[index]].data;
180 ASSERT_GT(testBuffer.size(), 0);
181 const auto sharedMemory = nn::createSharedMemory(testBuffer.size()).value();
182 const auto memory = utils::convert(sharedMemory).value();
183 const auto mapping = nn::map(sharedMemory).value();
184 uint8_t* inputPtr = static_cast<uint8_t*>(std::get<void*>(mapping.pointer));
185 ASSERT_NE(inputPtr, nullptr);
186 const uint8_t* begin = testBuffer.get<uint8_t>();
187 const uint8_t* end = begin + testBuffer.size();
188 std::copy(begin, end, inputPtr);
189
190 // Shared memory -> IBuffer.
191 auto ret = buffer.buffer->copyFrom(memory, {});
192 ASSERT_TRUE(ret.isOk());
193 }
194 }
195 *result = {std::move(buffer.buffer), buffer.token};
196 }
197
198 const std::shared_ptr<IDevice> kDevice;
199 const std::shared_ptr<IPreparedModel> kPreparedModel;
200 const TestModel& kTestModel;
201};
202
203Subgraph createSubgraph(const TestSubgraph& testSubgraph, uint32_t* constCopySize,
204 std::vector<const TestBuffer*>* constCopies, uint32_t* constRefSize,
205 std::vector<const TestBuffer*>* constReferences) {
206 CHECK(constCopySize != nullptr);
207 CHECK(constCopies != nullptr);
208 CHECK(constRefSize != nullptr);
209 CHECK(constReferences != nullptr);
210
211 // Operands.
212 std::vector<Operand> operands(testSubgraph.operands.size());
213 for (uint32_t i = 0; i < testSubgraph.operands.size(); i++) {
214 const auto& op = testSubgraph.operands[i];
215
216 DataLocation loc = {};
217 if (op.lifetime == TestOperandLifeTime::CONSTANT_COPY) {
218 loc = {
219 .poolIndex = 0,
220 .offset = *constCopySize,
221 .length = static_cast<int64_t>(op.data.size()),
222 };
223 constCopies->push_back(&op.data);
224 *constCopySize += op.data.alignedSize();
225 } else if (op.lifetime == TestOperandLifeTime::CONSTANT_REFERENCE) {
226 loc = {
227 .poolIndex = 0,
228 .offset = *constRefSize,
229 .length = static_cast<int64_t>(op.data.size()),
230 };
231 constReferences->push_back(&op.data);
232 *constRefSize += op.data.alignedSize();
233 } else if (op.lifetime == TestOperandLifeTime::SUBGRAPH) {
234 loc = {
235 .poolIndex = 0,
236 .offset = *op.data.get<uint32_t>(),
237 .length = 0,
238 };
239 }
240
241 std::optional<OperandExtraParams> extraParams;
242 if (op.type == TestOperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) {
243 using Tag = OperandExtraParams::Tag;
244 extraParams = OperandExtraParams::make<Tag::channelQuant>(SymmPerChannelQuantParams{
245 .scales = op.channelQuant.scales,
246 .channelDim = static_cast<int32_t>(op.channelQuant.channelDim)});
247 }
248
249 operands[i] = {.type = static_cast<OperandType>(op.type),
250 .dimensions = utils::toSigned(op.dimensions).value(),
251 .scale = op.scale,
252 .zeroPoint = op.zeroPoint,
253 .lifetime = static_cast<OperandLifeTime>(op.lifetime),
254 .location = loc,
255 .extraParams = std::move(extraParams)};
256 }
257
258 // Operations.
259 std::vector<Operation> operations(testSubgraph.operations.size());
260 std::transform(testSubgraph.operations.begin(), testSubgraph.operations.end(),
261 operations.begin(), [](const TestOperation& op) -> Operation {
262 return {.type = static_cast<OperationType>(op.type),
263 .inputs = utils::toSigned(op.inputs).value(),
264 .outputs = utils::toSigned(op.outputs).value()};
265 });
266
267 return {.operands = std::move(operands),
268 .operations = std::move(operations),
269 .inputIndexes = utils::toSigned(testSubgraph.inputIndexes).value(),
270 .outputIndexes = utils::toSigned(testSubgraph.outputIndexes).value()};
271}
272
273void copyTestBuffers(const std::vector<const TestBuffer*>& buffers, uint8_t* output) {
274 uint32_t offset = 0;
275 for (const TestBuffer* buffer : buffers) {
276 const uint8_t* begin = buffer->get<uint8_t>();
277 const uint8_t* end = begin + buffer->size();
278 std::copy(begin, end, output + offset);
279 offset += buffer->alignedSize();
280 }
281}
282
283} // namespace
284
285void waitForSyncFence(int syncFd) {
Lev Proleevc185e882020-12-15 19:25:32 +0000286 ASSERT_GT(syncFd, 0);
Michael Butlerd0d9a7a2022-01-12 11:41:17 -0800287#ifdef __ANDROID__
288 constexpr int kInfiniteTimeout = -1;
Lev Proleevc185e882020-12-15 19:25:32 +0000289 int r = sync_wait(syncFd, kInfiniteTimeout);
290 ASSERT_GE(r, 0);
Michael Butlerd0d9a7a2022-01-12 11:41:17 -0800291#else // __ANDROID__
292 LOG(FATAL) << "waitForSyncFence not supported on host";
293#endif // __ANDROID__
Lev Proleevc185e882020-12-15 19:25:32 +0000294}
295
296Model createModel(const TestModel& testModel) {
297 uint32_t constCopySize = 0;
298 uint32_t constRefSize = 0;
299 std::vector<const TestBuffer*> constCopies;
300 std::vector<const TestBuffer*> constReferences;
301
302 Subgraph mainSubgraph = createSubgraph(testModel.main, &constCopySize, &constCopies,
303 &constRefSize, &constReferences);
304 std::vector<Subgraph> refSubgraphs(testModel.referenced.size());
305 std::transform(testModel.referenced.begin(), testModel.referenced.end(), refSubgraphs.begin(),
306 [&constCopySize, &constCopies, &constRefSize,
307 &constReferences](const TestSubgraph& testSubgraph) {
308 return createSubgraph(testSubgraph, &constCopySize, &constCopies,
309 &constRefSize, &constReferences);
310 });
311
312 // Constant copies.
313 std::vector<uint8_t> operandValues(constCopySize);
314 copyTestBuffers(constCopies, operandValues.data());
315
316 // Shared memory.
Michael Butlerfadeb8a2021-02-07 00:11:13 -0800317 std::vector<nn::SharedMemory> pools = {};
Lev Proleevc185e882020-12-15 19:25:32 +0000318 if (constRefSize > 0) {
319 const auto pool = nn::createSharedMemory(constRefSize).value();
320 pools.push_back(pool);
321
322 // load data
323 const auto mappedMemory = nn::map(pool).value();
324 uint8_t* mappedPtr = static_cast<uint8_t*>(std::get<void*>(mappedMemory.pointer));
325 CHECK(mappedPtr != nullptr);
326
327 copyTestBuffers(constReferences, mappedPtr);
328 }
329
330 std::vector<Memory> aidlPools;
331 aidlPools.reserve(pools.size());
332 for (auto& pool : pools) {
333 auto aidlPool = utils::convert(pool).value();
334 aidlPools.push_back(std::move(aidlPool));
335 }
336
337 return {.main = std::move(mainSubgraph),
338 .referenced = std::move(refSubgraphs),
339 .operandValues = std::move(operandValues),
340 .pools = std::move(aidlPools),
341 .relaxComputationFloat32toFloat16 = testModel.isRelaxed};
342}
343
344static bool isOutputSizeGreaterThanOne(const TestModel& testModel, uint32_t index) {
345 const auto byteSize = testModel.main.operands[testModel.main.outputIndexes[index]].data.size();
346 return byteSize > 1u;
347}
348
349static void makeOutputInsufficientSize(uint32_t outputIndex, Request* request) {
Xusong Wang16858a62021-02-17 21:59:39 -0800350 auto& loc = request->outputs[outputIndex].location;
351 ASSERT_GT(loc.length, 1u);
352 loc.length -= 1u;
353 // Test that the padding is not used for output data.
354 loc.padding += 1u;
Lev Proleevc185e882020-12-15 19:25:32 +0000355}
356
357static void makeOutputDimensionsUnspecified(Model* model) {
358 for (auto i : model->main.outputIndexes) {
359 auto& dims = model->main.operands[i].dimensions;
360 std::fill(dims.begin(), dims.end(), 0);
361 }
362}
363
364// Manages the lifetime of memory resources used in an execution.
365class ExecutionContext {
366 public:
367 ExecutionContext(std::shared_ptr<IDevice> device, std::shared_ptr<IPreparedModel> preparedModel)
368 : kDevice(std::move(device)), kPreparedModel(std::move(preparedModel)) {}
369
370 std::optional<Request> createRequest(const TestModel& testModel, MemoryType memoryType);
371 std::vector<TestBuffer> getOutputBuffers(const TestModel& testModel,
372 const Request& request) const;
373
374 private:
375 // Get a TestBuffer with data copied from an IBuffer object.
376 void getBuffer(const std::shared_ptr<IBuffer>& buffer, size_t size,
377 TestBuffer* testBuffer) const;
378
379 static constexpr uint32_t kInputPoolIndex = 0;
380 static constexpr uint32_t kOutputPoolIndex = 1;
381 static constexpr uint32_t kDeviceMemoryBeginIndex = 2;
382
383 const std::shared_ptr<IDevice> kDevice;
384 const std::shared_ptr<IPreparedModel> kPreparedModel;
385 std::unique_ptr<TestMemoryBase> mInputMemory, mOutputMemory;
386 std::vector<std::shared_ptr<IBuffer>> mBuffers;
387};
388
Xusong Wang16858a62021-02-17 21:59:39 -0800389// Returns the number of bytes needed to round up "size" to the nearest multiple of "multiple".
390static uint32_t roundUpBytesNeeded(uint32_t size, uint32_t multiple) {
391 CHECK(multiple != 0);
392 return ((size + multiple - 1) / multiple) * multiple - size;
393}
394
Lev Proleevc185e882020-12-15 19:25:32 +0000395std::optional<Request> ExecutionContext::createRequest(const TestModel& testModel,
396 MemoryType memoryType) {
397 // Memory pools are organized as:
398 // - 0: Input shared memory pool
399 // - 1: Output shared memory pool
400 // - [2, 2+i): Input device memories
401 // - [2+i, 2+i+o): Output device memories
402 DeviceMemoryAllocator allocator(kDevice, kPreparedModel, testModel);
403 std::vector<int32_t> tokens;
404 mBuffers.clear();
405
406 // Model inputs.
407 std::vector<RequestArgument> inputs(testModel.main.inputIndexes.size());
408 size_t inputSize = 0;
409 for (uint32_t i = 0; i < testModel.main.inputIndexes.size(); i++) {
410 const auto& op = testModel.main.operands[testModel.main.inputIndexes[i]];
411 if (op.data.size() == 0) {
412 // Omitted input.
413 inputs[i] = {.hasNoValue = true};
414 continue;
415 } else if (memoryType == MemoryType::DEVICE) {
416 SCOPED_TRACE("Input index = " + std::to_string(i));
417 auto [buffer, token] = allocator.allocate<IOType::INPUT>(i);
418 if (buffer != nullptr) {
419 DataLocation loc = {.poolIndex = static_cast<int32_t>(mBuffers.size() +
420 kDeviceMemoryBeginIndex)};
421 mBuffers.push_back(std::move(buffer));
422 tokens.push_back(token);
423 inputs[i] = {.hasNoValue = false, .location = loc, .dimensions = {}};
424 continue;
425 }
426 }
427
428 // Reserve shared memory for input.
Xusong Wang16858a62021-02-17 21:59:39 -0800429 inputSize += roundUpBytesNeeded(inputSize, nn::kDefaultRequestMemoryAlignment);
430 const auto padding = roundUpBytesNeeded(op.data.size(), nn::kDefaultRequestMemoryPadding);
Lev Proleevc185e882020-12-15 19:25:32 +0000431 DataLocation loc = {.poolIndex = kInputPoolIndex,
432 .offset = static_cast<int64_t>(inputSize),
Xusong Wang16858a62021-02-17 21:59:39 -0800433 .length = static_cast<int64_t>(op.data.size()),
434 .padding = static_cast<int64_t>(padding)};
435 inputSize += (op.data.size() + padding);
Lev Proleevc185e882020-12-15 19:25:32 +0000436 inputs[i] = {.hasNoValue = false, .location = loc, .dimensions = {}};
437 }
438
439 // Model outputs.
440 std::vector<RequestArgument> outputs(testModel.main.outputIndexes.size());
441 size_t outputSize = 0;
442 for (uint32_t i = 0; i < testModel.main.outputIndexes.size(); i++) {
443 const auto& op = testModel.main.operands[testModel.main.outputIndexes[i]];
444 if (memoryType == MemoryType::DEVICE) {
445 SCOPED_TRACE("Output index = " + std::to_string(i));
446 auto [buffer, token] = allocator.allocate<IOType::OUTPUT>(i);
447 if (buffer != nullptr) {
448 DataLocation loc = {.poolIndex = static_cast<int32_t>(mBuffers.size() +
449 kDeviceMemoryBeginIndex)};
450 mBuffers.push_back(std::move(buffer));
451 tokens.push_back(token);
452 outputs[i] = {.hasNoValue = false, .location = loc, .dimensions = {}};
453 continue;
454 }
455 }
456
457 // In the case of zero-sized output, we should at least provide a one-byte buffer.
458 // This is because zero-sized tensors are only supported internally to the driver, or
459 // reported in output shapes. It is illegal for the client to pre-specify a zero-sized
460 // tensor as model output. Otherwise, we will have two semantic conflicts:
461 // - "Zero dimension" conflicts with "unspecified dimension".
462 // - "Omitted operand buffer" conflicts with "zero-sized operand buffer".
463 size_t bufferSize = std::max<size_t>(op.data.size(), 1);
464
465 // Reserve shared memory for output.
Xusong Wang16858a62021-02-17 21:59:39 -0800466 outputSize += roundUpBytesNeeded(outputSize, nn::kDefaultRequestMemoryAlignment);
467 const auto padding = roundUpBytesNeeded(bufferSize, nn::kDefaultRequestMemoryPadding);
Lev Proleevc185e882020-12-15 19:25:32 +0000468 DataLocation loc = {.poolIndex = kOutputPoolIndex,
469 .offset = static_cast<int64_t>(outputSize),
Xusong Wang16858a62021-02-17 21:59:39 -0800470 .length = static_cast<int64_t>(bufferSize),
471 .padding = static_cast<int64_t>(padding)};
472 outputSize += (bufferSize + padding);
Lev Proleevc185e882020-12-15 19:25:32 +0000473 outputs[i] = {.hasNoValue = false, .location = loc, .dimensions = {}};
474 }
475
476 if (memoryType == MemoryType::DEVICE && mBuffers.empty()) {
477 return std::nullopt;
478 }
479
480 // Memory pools.
481 if (memoryType == MemoryType::BLOB_AHWB) {
482 mInputMemory = TestBlobAHWB::create(std::max<size_t>(inputSize, 1));
483 mOutputMemory = TestBlobAHWB::create(std::max<size_t>(outputSize, 1));
484 } else {
Xusong Wang378a9382021-05-21 14:58:40 -0700485 mInputMemory = TestAshmem::create(std::max<size_t>(inputSize, 1), /*aidlReadonly=*/true);
486 mOutputMemory = TestAshmem::create(std::max<size_t>(outputSize, 1), /*aidlReadonly=*/false);
Lev Proleevc185e882020-12-15 19:25:32 +0000487 }
488 CHECK_NE(mInputMemory, nullptr);
489 CHECK_NE(mOutputMemory, nullptr);
490 std::vector<RequestMemoryPool> pools;
491 pools.reserve(kDeviceMemoryBeginIndex + mBuffers.size());
492
493 auto copiedInputMemory = utils::clone(*mInputMemory->getAidlMemory());
494 CHECK(copiedInputMemory.has_value()) << copiedInputMemory.error().message;
495 auto copiedOutputMemory = utils::clone(*mOutputMemory->getAidlMemory());
496 CHECK(copiedOutputMemory.has_value()) << copiedOutputMemory.error().message;
497
498 pools.push_back(RequestMemoryPool::make<RequestMemoryPool::Tag::pool>(
499 std::move(copiedInputMemory).value()));
500 pools.push_back(RequestMemoryPool::make<RequestMemoryPool::Tag::pool>(
501 std::move(copiedOutputMemory).value()));
502 for (const auto& token : tokens) {
503 pools.push_back(RequestMemoryPool::make<RequestMemoryPool::Tag::token>(token));
504 }
505
506 // Copy input data to the input shared memory pool.
507 uint8_t* inputPtr = mInputMemory->getPointer();
508 for (uint32_t i = 0; i < testModel.main.inputIndexes.size(); i++) {
509 if (!inputs[i].hasNoValue && inputs[i].location.poolIndex == kInputPoolIndex) {
510 const auto& op = testModel.main.operands[testModel.main.inputIndexes[i]];
511 const uint8_t* begin = op.data.get<uint8_t>();
512 const uint8_t* end = begin + op.data.size();
513 std::copy(begin, end, inputPtr + inputs[i].location.offset);
514 }
515 }
516 return Request{
517 .inputs = std::move(inputs), .outputs = std::move(outputs), .pools = std::move(pools)};
518}
519
520std::vector<TestBuffer> ExecutionContext::getOutputBuffers(const TestModel& testModel,
521 const Request& request) const {
522 // Copy out output results.
523 uint8_t* outputPtr = mOutputMemory->getPointer();
524 std::vector<TestBuffer> outputBuffers;
525 for (uint32_t i = 0; i < request.outputs.size(); i++) {
526 const auto& outputLoc = request.outputs[i].location;
527 if (outputLoc.poolIndex == kOutputPoolIndex) {
528 outputBuffers.emplace_back(outputLoc.length, outputPtr + outputLoc.offset);
529 } else {
530 const auto& op = testModel.main.operands[testModel.main.outputIndexes[i]];
531 if (op.data.size() == 0) {
532 outputBuffers.emplace_back(0, nullptr);
533 } else {
534 SCOPED_TRACE("Output index = " + std::to_string(i));
535 const uint32_t bufferIndex = outputLoc.poolIndex - kDeviceMemoryBeginIndex;
536 TestBuffer buffer;
537 getBuffer(mBuffers[bufferIndex], op.data.size(), &buffer);
538 outputBuffers.push_back(std::move(buffer));
539 }
540 }
541 }
542 return outputBuffers;
543}
544
545// Get a TestBuffer with data copied from an IBuffer object.
546void ExecutionContext::getBuffer(const std::shared_ptr<IBuffer>& buffer, size_t size,
547 TestBuffer* testBuffer) const {
548 // IBuffer -> Shared memory.
549 auto sharedMemory = nn::createSharedMemory(size).value();
550 auto aidlMemory = utils::convert(sharedMemory).value();
551 const auto ret = buffer->copyTo(aidlMemory);
552 ASSERT_TRUE(ret.isOk());
553
554 // Shared memory -> TestBuffer.
555 const auto outputMemory = nn::map(sharedMemory).value();
556 const uint8_t* outputPtr = std::visit(
557 [](auto* ptr) { return static_cast<const uint8_t*>(ptr); }, outputMemory.pointer);
558 ASSERT_NE(outputPtr, nullptr);
559 ASSERT_NE(testBuffer, nullptr);
560 *testBuffer = TestBuffer(size, outputPtr);
561}
562
563static bool hasZeroSizedOutput(const TestModel& testModel) {
564 return std::any_of(testModel.main.outputIndexes.begin(), testModel.main.outputIndexes.end(),
565 [&testModel](uint32_t index) {
566 return testModel.main.operands[index].data.size() == 0;
567 });
568}
569
570void EvaluatePreparedModel(const std::shared_ptr<IDevice>& device,
571 const std::shared_ptr<IPreparedModel>& preparedModel,
572 const TestModel& testModel, const TestConfig& testConfig,
573 bool* skipped = nullptr) {
574 if (skipped != nullptr) {
575 *skipped = false;
576 }
577 // If output0 does not have size larger than one byte, we can not test with insufficient buffer.
578 if (testConfig.outputType == OutputType::INSUFFICIENT &&
579 !isOutputSizeGreaterThanOne(testModel, 0)) {
580 return;
581 }
582
583 ExecutionContext context(device, preparedModel);
584 auto maybeRequest = context.createRequest(testModel, testConfig.memoryType);
585 // Skip if testing memory domain but no device memory has been allocated.
586 if (!maybeRequest.has_value()) {
587 return;
588 }
589
590 Request request = std::move(maybeRequest).value();
591
592 constexpr uint32_t kInsufficientOutputIndex = 0;
593 if (testConfig.outputType == OutputType::INSUFFICIENT) {
594 makeOutputInsufficientSize(kInsufficientOutputIndex, &request);
595 }
596
Lev Proleev8df7d6e2021-04-14 20:54:27 +0100597 int64_t loopTimeoutDurationNs = kOmittedTimeoutDuration;
Lev Proleevc185e882020-12-15 19:25:32 +0000598 // OutputType::MISSED_DEADLINE is only used by
599 // TestKind::INTINITE_LOOP_TIMEOUT tests to verify that an infinite loop is
600 // aborted after a timeout.
601 if (testConfig.outputType == OutputType::MISSED_DEADLINE) {
602 // Override the default loop timeout duration with a small value to
603 // speed up test execution.
604 constexpr int64_t kMillisecond = 1'000'000;
Lev Proleev8df7d6e2021-04-14 20:54:27 +0100605 loopTimeoutDurationNs = 1 * kMillisecond;
Lev Proleevc185e882020-12-15 19:25:32 +0000606 }
607
Xusong Wang72e06c22022-01-11 14:25:55 -0800608 std::shared_ptr<IExecution> execution;
609 if (testConfig.reusable) {
Miao Wangb5c8a822021-10-26 20:03:05 +0000610 const auto ret = preparedModel->createReusableExecution(
611 request, {testConfig.measureTiming, loopTimeoutDurationNs, {}, {}}, &execution);
Xusong Wang72e06c22022-01-11 14:25:55 -0800612 ASSERT_TRUE(ret.isOk()) << static_cast<nn::ErrorStatus>(ret.getServiceSpecificError());
613 ASSERT_NE(nullptr, execution.get());
614 }
Lev Proleevc185e882020-12-15 19:25:32 +0000615
Xusong Wang72e06c22022-01-11 14:25:55 -0800616 const auto executeAndCheckResults = [&preparedModel, &execution, &testConfig, &testModel,
617 &context, &request, loopTimeoutDurationNs, skipped]() {
618 ErrorStatus executionStatus;
619 std::vector<OutputShape> outputShapes;
620 Timing timing = kNoTiming;
621 switch (testConfig.executor) {
622 case Executor::SYNC: {
623 SCOPED_TRACE("synchronous");
Michael Butler7fc7e372021-03-10 22:51:53 -0800624
Xusong Wang72e06c22022-01-11 14:25:55 -0800625 ExecutionResult executionResult;
626 // execute
627 ::ndk::ScopedAStatus ret;
628 if (testConfig.reusable) {
629 ret = execution->executeSynchronously(kNoDeadline, &executionResult);
Miao Wangb5c8a822021-10-26 20:03:05 +0000630 } else if (testConfig.useConfig) {
631 ret = preparedModel->executeSynchronouslyWithConfig(
632 request, {testConfig.measureTiming, loopTimeoutDurationNs, {}, {}},
633 kNoDeadline, &executionResult);
Michael Butler7fc7e372021-03-10 22:51:53 -0800634 } else {
Xusong Wang72e06c22022-01-11 14:25:55 -0800635 ret = preparedModel->executeSynchronously(request, testConfig.measureTiming,
636 kNoDeadline, loopTimeoutDurationNs,
637 &executionResult);
Michael Butler7fc7e372021-03-10 22:51:53 -0800638 }
Xusong Wang72e06c22022-01-11 14:25:55 -0800639 ASSERT_TRUE(ret.isOk() || ret.getExceptionCode() == EX_SERVICE_SPECIFIC)
640 << ret.getDescription();
641 if (ret.isOk()) {
642 executionStatus = executionResult.outputSufficientSize
643 ? ErrorStatus::NONE
644 : ErrorStatus::OUTPUT_INSUFFICIENT_SIZE;
645 outputShapes = std::move(executionResult.outputShapes);
646 timing = executionResult.timing;
647 } else {
648 executionStatus = static_cast<ErrorStatus>(ret.getServiceSpecificError());
649 }
650 break;
Michael Butler7fc7e372021-03-10 22:51:53 -0800651 }
Xusong Wang72e06c22022-01-11 14:25:55 -0800652 case Executor::BURST: {
653 SCOPED_TRACE("burst");
Michael Butler7fc7e372021-03-10 22:51:53 -0800654
Xusong Wang72e06c22022-01-11 14:25:55 -0800655 // create burst
656 std::shared_ptr<IBurst> burst;
657 auto ret = preparedModel->configureExecutionBurst(&burst);
Michael Butler7fc7e372021-03-10 22:51:53 -0800658 ASSERT_TRUE(ret.isOk()) << ret.getDescription();
Xusong Wang72e06c22022-01-11 14:25:55 -0800659 ASSERT_NE(nullptr, burst.get());
Michael Butler7fc7e372021-03-10 22:51:53 -0800660
Xusong Wang72e06c22022-01-11 14:25:55 -0800661 // associate a unique slot with each memory pool
662 int64_t currentSlot = 0;
663 std::vector<int64_t> slots;
664 slots.reserve(request.pools.size());
665 for (const auto& pool : request.pools) {
666 if (pool.getTag() == RequestMemoryPool::Tag::pool) {
667 slots.push_back(currentSlot++);
668 } else {
669 EXPECT_EQ(pool.getTag(), RequestMemoryPool::Tag::token);
670 slots.push_back(-1);
671 }
Lev Proleevc185e882020-12-15 19:25:32 +0000672 }
Xusong Wang72e06c22022-01-11 14:25:55 -0800673
674 ExecutionResult executionResult;
675 // execute
Miao Wangb5c8a822021-10-26 20:03:05 +0000676 if (testConfig.useConfig) {
677 ret = burst->executeSynchronouslyWithConfig(
678 request, slots,
679 {testConfig.measureTiming, loopTimeoutDurationNs, {}, {}}, kNoDeadline,
680 &executionResult);
681 } else {
682 ret = burst->executeSynchronously(request, slots, testConfig.measureTiming,
683 kNoDeadline, loopTimeoutDurationNs,
684 &executionResult);
685 }
Xusong Wang72e06c22022-01-11 14:25:55 -0800686 ASSERT_TRUE(ret.isOk() || ret.getExceptionCode() == EX_SERVICE_SPECIFIC)
687 << ret.getDescription();
688 if (ret.isOk()) {
689 executionStatus = executionResult.outputSufficientSize
690 ? ErrorStatus::NONE
691 : ErrorStatus::OUTPUT_INSUFFICIENT_SIZE;
692 outputShapes = std::move(executionResult.outputShapes);
693 timing = executionResult.timing;
694 } else {
695 executionStatus = static_cast<ErrorStatus>(ret.getServiceSpecificError());
696 }
697
698 // Mark each slot as unused after the execution. This is unnecessary because the
699 // burst is freed after this scope ends, but this is here to test the functionality.
700 for (int64_t slot : slots) {
701 ret = burst->releaseMemoryResource(slot);
702 ASSERT_TRUE(ret.isOk()) << ret.getDescription();
703 }
704
705 break;
Lev Proleevc185e882020-12-15 19:25:32 +0000706 }
Xusong Wang72e06c22022-01-11 14:25:55 -0800707 case Executor::FENCED: {
708 SCOPED_TRACE("fenced");
709 ErrorStatus result = ErrorStatus::NONE;
710 FencedExecutionResult executionResult;
711 ::ndk::ScopedAStatus ret;
712 if (testConfig.reusable) {
713 ret = execution->executeFenced({}, kNoDeadline, kNoDuration, &executionResult);
Miao Wangb5c8a822021-10-26 20:03:05 +0000714 } else if (testConfig.useConfig) {
715 ret = preparedModel->executeFencedWithConfig(
716 request, {}, {testConfig.measureTiming, loopTimeoutDurationNs, {}, {}},
717 kNoDeadline, kNoDuration, &executionResult);
Xusong Wang72e06c22022-01-11 14:25:55 -0800718 } else {
719 ret = preparedModel->executeFenced(request, {}, testConfig.measureTiming,
720 kNoDeadline, loopTimeoutDurationNs,
721 kNoDuration, &executionResult);
722 }
723 ASSERT_TRUE(ret.isOk() || ret.getExceptionCode() == EX_SERVICE_SPECIFIC)
724 << ret.getDescription();
725 if (!ret.isOk()) {
726 result = static_cast<ErrorStatus>(ret.getServiceSpecificError());
727 executionStatus = result;
728 } else if (executionResult.syncFence.get() != -1) {
729 std::vector<ndk::ScopedFileDescriptor> waitFor;
730 auto dupFd = dup(executionResult.syncFence.get());
731 ASSERT_NE(dupFd, -1);
732 waitFor.emplace_back(dupFd);
733 // If a sync fence is returned, try start another run waiting for the sync
734 // fence.
Miao Wangb5c8a822021-10-26 20:03:05 +0000735 if (testConfig.reusable) {
736 ret = execution->executeFenced(waitFor, kNoDeadline, kNoDuration,
737 &executionResult);
738 } else if (testConfig.useConfig) {
739 ret = preparedModel->executeFencedWithConfig(
740 request, waitFor,
741 {testConfig.measureTiming, loopTimeoutDurationNs, {}, {}},
742 kNoDeadline, kNoDuration, &executionResult);
743 } else {
744 ret = preparedModel->executeFenced(
745 request, waitFor, testConfig.measureTiming, kNoDeadline,
746 loopTimeoutDurationNs, kNoDuration, &executionResult);
747 }
Xusong Wang72e06c22022-01-11 14:25:55 -0800748 ASSERT_TRUE(ret.isOk());
749 waitForSyncFence(executionResult.syncFence.get());
750 }
751 if (result == ErrorStatus::NONE) {
752 ASSERT_NE(executionResult.callback, nullptr);
753 Timing timingFenced;
754 auto ret = executionResult.callback->getExecutionInfo(&timing, &timingFenced,
755 &executionStatus);
756 ASSERT_TRUE(ret.isOk());
757 }
758 break;
759 }
760 default: {
761 FAIL() << "Unsupported execution mode for AIDL interface.";
762 }
763 }
764
765 if (testConfig.outputType != OutputType::FULLY_SPECIFIED &&
766 executionStatus == ErrorStatus::GENERAL_FAILURE) {
767 if (skipped != nullptr) {
768 *skipped = true;
769 }
770 if (!testConfig.reportSkipping) {
771 return;
772 }
773 LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
774 "execute model that it does not support.";
775 std::cout << "[ ] Early termination of test because vendor service cannot "
776 "execute model that it does not support."
777 << std::endl;
778 GTEST_SKIP();
779 }
780 if (!testConfig.measureTiming) {
781 EXPECT_EQ(timing, kNoTiming);
782 } else {
783 if (timing.timeOnDeviceNs != -1 && timing.timeInDriverNs != -1) {
784 EXPECT_LE(timing.timeOnDeviceNs, timing.timeInDriverNs);
785 }
786 }
787
788 switch (testConfig.outputType) {
789 case OutputType::FULLY_SPECIFIED:
790 if (testConfig.executor == Executor::FENCED && hasZeroSizedOutput(testModel)) {
791 // Executor::FENCED does not support zero-sized output.
792 ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, executionStatus);
793 return;
794 }
795 // If the model output operands are fully specified, outputShapes must be either
796 // either empty, or have the same number of elements as the number of outputs.
797 ASSERT_EQ(ErrorStatus::NONE, executionStatus);
798 ASSERT_TRUE(outputShapes.size() == 0 ||
799 outputShapes.size() == testModel.main.outputIndexes.size());
800 break;
801 case OutputType::UNSPECIFIED:
802 if (testConfig.executor == Executor::FENCED) {
803 // For Executor::FENCED, the output shape must be fully specified.
804 ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, executionStatus);
805 return;
806 }
807 // If the model output operands are not fully specified, outputShapes must have
808 // the same number of elements as the number of outputs.
809 ASSERT_EQ(ErrorStatus::NONE, executionStatus);
810 ASSERT_EQ(outputShapes.size(), testModel.main.outputIndexes.size());
811 break;
812 case OutputType::INSUFFICIENT:
813 if (testConfig.executor == Executor::FENCED) {
814 // For Executor::FENCED, the output shape must be fully specified.
815 ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, executionStatus);
816 return;
817 }
818 ASSERT_EQ(ErrorStatus::OUTPUT_INSUFFICIENT_SIZE, executionStatus);
819 ASSERT_EQ(outputShapes.size(), testModel.main.outputIndexes.size());
820 // Check that all returned output dimensions are at least as fully specified as the
821 // union of the information about the corresponding operand in the model and in the
822 // request. In this test, all model outputs have known rank with all dimensions
823 // unspecified, and no dimensional information is provided in the request.
824 for (uint32_t i = 0; i < outputShapes.size(); i++) {
825 ASSERT_EQ(outputShapes[i].isSufficient, i != kInsufficientOutputIndex);
826 const auto& actual = outputShapes[i].dimensions;
827 const auto& golden =
828 testModel.main.operands[testModel.main.outputIndexes[i]].dimensions;
829 ASSERT_EQ(actual.size(), golden.size());
830 for (uint32_t j = 0; j < actual.size(); j++) {
831 if (actual[j] == 0) continue;
832 EXPECT_EQ(actual[j], golden[j]) << "index: " << j;
833 }
834 }
835 return;
836 case OutputType::MISSED_DEADLINE:
837 ASSERT_TRUE(executionStatus == ErrorStatus::MISSED_DEADLINE_TRANSIENT ||
838 executionStatus == ErrorStatus::MISSED_DEADLINE_PERSISTENT)
839 << "executionStatus = " << executionStatus;
840 return;
841 }
842
843 // Go through all outputs, check returned output shapes.
844 for (uint32_t i = 0; i < outputShapes.size(); i++) {
845 EXPECT_TRUE(outputShapes[i].isSufficient);
846 const auto& expect =
847 testModel.main.operands[testModel.main.outputIndexes[i]].dimensions;
848 const auto unsignedActual = nn::toUnsigned(outputShapes[i].dimensions);
849 ASSERT_TRUE(unsignedActual.has_value());
850 const std::vector<uint32_t>& actual = unsignedActual.value();
851 EXPECT_EQ(expect, actual);
852 }
853
854 // Retrieve execution results.
855 const std::vector<TestBuffer> outputs = context.getOutputBuffers(testModel, request);
856
857 // We want "close-enough" results.
858 checkResults(testModel, outputs);
859 };
860
861 executeAndCheckResults();
862
863 // For reusable execution tests, run the execution twice.
864 if (testConfig.reusable) {
865 SCOPED_TRACE("Second execution");
866 executeAndCheckResults();
Lev Proleevc185e882020-12-15 19:25:32 +0000867 }
Lev Proleevc185e882020-12-15 19:25:32 +0000868}
869
870void EvaluatePreparedModel(const std::shared_ptr<IDevice>& device,
871 const std::shared_ptr<IPreparedModel>& preparedModel,
872 const TestModel& testModel, TestKind testKind) {
873 std::vector<OutputType> outputTypesList;
874 std::vector<bool> measureTimingList;
875 std::vector<Executor> executorList;
876 std::vector<MemoryType> memoryTypeList;
Xusong Wang72e06c22022-01-11 14:25:55 -0800877 std::vector<bool> reusableList = {false};
Miao Wangb5c8a822021-10-26 20:03:05 +0000878 std::vector<bool> useConfigList = {false};
Xusong Wang72e06c22022-01-11 14:25:55 -0800879
880 int deviceVersion;
881 ASSERT_TRUE(device->getInterfaceVersion(&deviceVersion).isOk());
882 if (deviceVersion >= kMinAidlLevelForFL8) {
883 reusableList.push_back(true);
Miao Wangb5c8a822021-10-26 20:03:05 +0000884 useConfigList.push_back(true);
Xusong Wang72e06c22022-01-11 14:25:55 -0800885 }
Lev Proleevc185e882020-12-15 19:25:32 +0000886
887 switch (testKind) {
888 case TestKind::GENERAL: {
889 outputTypesList = {OutputType::FULLY_SPECIFIED};
890 measureTimingList = {false, true};
Michael Butler7fc7e372021-03-10 22:51:53 -0800891 executorList = {Executor::SYNC, Executor::BURST};
Lev Proleevc185e882020-12-15 19:25:32 +0000892 memoryTypeList = {MemoryType::ASHMEM};
893 } break;
894 case TestKind::DYNAMIC_SHAPE: {
895 outputTypesList = {OutputType::UNSPECIFIED, OutputType::INSUFFICIENT};
896 measureTimingList = {false, true};
Michael Butler7fc7e372021-03-10 22:51:53 -0800897 executorList = {Executor::SYNC, Executor::BURST, Executor::FENCED};
Lev Proleevc185e882020-12-15 19:25:32 +0000898 memoryTypeList = {MemoryType::ASHMEM};
899 } break;
900 case TestKind::MEMORY_DOMAIN: {
901 outputTypesList = {OutputType::FULLY_SPECIFIED};
902 measureTimingList = {false};
Michael Butler7fc7e372021-03-10 22:51:53 -0800903 executorList = {Executor::SYNC, Executor::BURST, Executor::FENCED};
Michael Butlerd0d9a7a2022-01-12 11:41:17 -0800904#ifdef __ANDROID__
Lev Proleevc185e882020-12-15 19:25:32 +0000905 memoryTypeList = {MemoryType::BLOB_AHWB, MemoryType::DEVICE};
Michael Butlerd0d9a7a2022-01-12 11:41:17 -0800906#else // __ANDROID__
907 memoryTypeList = {MemoryType::DEVICE}; // BLOB_AHWB is not supported on the host.
908#endif // __ANDROID__
Lev Proleevc185e882020-12-15 19:25:32 +0000909 } break;
910 case TestKind::FENCED_COMPUTE: {
911 outputTypesList = {OutputType::FULLY_SPECIFIED};
912 measureTimingList = {false, true};
913 executorList = {Executor::FENCED};
914 memoryTypeList = {MemoryType::ASHMEM};
915 } break;
916 case TestKind::QUANTIZATION_COUPLING: {
917 LOG(FATAL) << "Wrong TestKind for EvaluatePreparedModel";
918 return;
919 } break;
920 case TestKind::INTINITE_LOOP_TIMEOUT: {
921 outputTypesList = {OutputType::MISSED_DEADLINE};
922 measureTimingList = {false, true};
Michael Butler7fc7e372021-03-10 22:51:53 -0800923 executorList = {Executor::SYNC, Executor::BURST, Executor::FENCED};
Lev Proleevc185e882020-12-15 19:25:32 +0000924 memoryTypeList = {MemoryType::ASHMEM};
925 } break;
926 }
927
928 for (const OutputType outputType : outputTypesList) {
929 for (const bool measureTiming : measureTimingList) {
930 for (const Executor executor : executorList) {
931 for (const MemoryType memoryType : memoryTypeList) {
Xusong Wang72e06c22022-01-11 14:25:55 -0800932 for (const bool reusable : reusableList) {
Miao Wangb5c8a822021-10-26 20:03:05 +0000933 for (const bool useConfig : useConfigList) {
934 if ((useConfig || executor == Executor::BURST) && reusable) continue;
935 const TestConfig testConfig(executor, measureTiming, outputType,
936 memoryType, reusable,
937 /*reportSkipping=*/true, useConfig);
938 SCOPED_TRACE(toString(testConfig));
939 EvaluatePreparedModel(device, preparedModel, testModel, testConfig);
940 }
Xusong Wang72e06c22022-01-11 14:25:55 -0800941 }
Lev Proleevc185e882020-12-15 19:25:32 +0000942 }
943 }
944 }
945 }
946}
947
948void EvaluatePreparedCoupledModels(const std::shared_ptr<IDevice>& device,
949 const std::shared_ptr<IPreparedModel>& preparedModel,
950 const TestModel& testModel,
951 const std::shared_ptr<IPreparedModel>& preparedCoupledModel,
952 const TestModel& coupledModel) {
953 const std::vector<OutputType> outputTypesList = {OutputType::FULLY_SPECIFIED};
954 const std::vector<bool> measureTimingList = {false, true};
Michael Butler7fc7e372021-03-10 22:51:53 -0800955 const std::vector<Executor> executorList = {Executor::SYNC, Executor::BURST, Executor::FENCED};
Lev Proleevc185e882020-12-15 19:25:32 +0000956
957 for (const OutputType outputType : outputTypesList) {
958 for (const bool measureTiming : measureTimingList) {
959 for (const Executor executor : executorList) {
960 const TestConfig testConfig(executor, measureTiming, outputType, MemoryType::ASHMEM,
Xusong Wang72e06c22022-01-11 14:25:55 -0800961 /*reusable=*/false, /*reportSkipping=*/false);
Lev Proleevc185e882020-12-15 19:25:32 +0000962 bool baseSkipped = false;
963 EvaluatePreparedModel(device, preparedModel, testModel, testConfig, &baseSkipped);
964 bool coupledSkipped = false;
965 EvaluatePreparedModel(device, preparedCoupledModel, coupledModel, testConfig,
966 &coupledSkipped);
967 ASSERT_EQ(baseSkipped, coupledSkipped);
968 if (baseSkipped) {
969 LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
970 "execute model that it does not support.";
971 std::cout << "[ ] Early termination of test because vendor service "
972 "cannot "
973 "execute model that it does not support."
974 << std::endl;
975 GTEST_SKIP();
976 }
977 }
978 }
979 }
980}
981
982void Execute(const std::shared_ptr<IDevice>& device, const TestModel& testModel,
983 TestKind testKind) {
984 Model model = createModel(testModel);
985 if (testKind == TestKind::DYNAMIC_SHAPE) {
986 makeOutputDimensionsUnspecified(&model);
987 }
988
989 std::shared_ptr<IPreparedModel> preparedModel;
990 switch (testKind) {
991 case TestKind::GENERAL:
992 case TestKind::DYNAMIC_SHAPE:
993 case TestKind::MEMORY_DOMAIN:
994 case TestKind::FENCED_COMPUTE:
995 case TestKind::INTINITE_LOOP_TIMEOUT: {
996 createPreparedModel(device, model, &preparedModel);
997 if (preparedModel == nullptr) return;
998 EvaluatePreparedModel(device, preparedModel, testModel, testKind);
Miao Wangb5c8a822021-10-26 20:03:05 +0000999 int32_t deviceVersion;
1000 ASSERT_TRUE(device->getInterfaceVersion(&deviceVersion).isOk());
1001 if (deviceVersion >= kMinAidlLevelForFL8) {
1002 createPreparedModel(device, model, &preparedModel, /*reportSkipping*/ true,
1003 /*useConfig*/ true);
1004 EvaluatePreparedModel(device, preparedModel, testModel, testKind);
1005 }
Lev Proleevc185e882020-12-15 19:25:32 +00001006 } break;
1007 case TestKind::QUANTIZATION_COUPLING: {
1008 ASSERT_TRUE(testModel.hasQuant8CoupledOperands());
1009 createPreparedModel(device, model, &preparedModel,
1010 /*reportSkipping*/ false);
1011 TestModel signedQuantizedModel = convertQuant8AsymmOperandsToSigned(testModel);
1012 std::shared_ptr<IPreparedModel> preparedCoupledModel;
1013 createPreparedModel(device, createModel(signedQuantizedModel), &preparedCoupledModel,
1014 /*reportSkipping*/ false);
1015 // If we couldn't prepare a model with unsigned quantization, we must
1016 // fail to prepare a model with signed quantization as well.
1017 if (preparedModel == nullptr) {
1018 ASSERT_EQ(preparedCoupledModel, nullptr);
1019 // If we failed to prepare both of the models, we can safely skip
1020 // the test.
1021 LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
1022 "prepare model that it does not support.";
1023 std::cout
1024 << "[ ] Early termination of test because vendor service cannot "
1025 "prepare model that it does not support."
1026 << std::endl;
1027 GTEST_SKIP();
1028 }
1029 ASSERT_NE(preparedCoupledModel, nullptr);
1030 EvaluatePreparedCoupledModels(device, preparedModel, testModel, preparedCoupledModel,
1031 signedQuantizedModel);
1032 } break;
1033 }
1034}
1035
1036void GeneratedTestBase::SetUp() {
1037 testing::TestWithParam<GeneratedTestParam>::SetUp();
1038 ASSERT_NE(kDevice, nullptr);
Michael Butler9c3c8642021-08-23 18:14:50 -07001039 const bool deviceIsResponsive =
1040 ndk::ScopedAStatus::fromStatus(AIBinder_ping(kDevice->asBinder().get())).isOk();
1041 ASSERT_TRUE(deviceIsResponsive);
Ian Huaca46f972021-10-15 11:06:31 +01001042 // TODO(b/201260787): We should require old drivers to report the model as
1043 // unsupported instead of simply skipping the test.
1044 SkipIfDriverOlderThanTestModel();
1045}
1046
1047void GeneratedTestBase::SkipIfDriverOlderThanTestModel() {
1048 int32_t deviceVersion;
1049 ASSERT_TRUE(kDevice->getInterfaceVersion(&deviceVersion).isOk());
1050 const int32_t modelVersion = kTestModel.getAidlVersionInt();
1051 if (deviceVersion < modelVersion) {
1052 GTEST_SKIP() << "Device interface version " << deviceVersion
1053 << " is older than test model's minimum supported HAL version " << modelVersion
1054 << ". Skipping test.";
1055 }
Lev Proleevc185e882020-12-15 19:25:32 +00001056}
1057
1058std::vector<NamedModel> getNamedModels(const FilterFn& filter) {
1059 return TestModelManager::get().getTestModels(filter);
1060}
1061
1062std::vector<NamedModel> getNamedModels(const FilterNameFn& filter) {
1063 return TestModelManager::get().getTestModels(filter);
1064}
1065
1066std::string printGeneratedTest(const testing::TestParamInfo<GeneratedTestParam>& info) {
1067 const auto& [namedDevice, namedModel] = info.param;
1068 return gtestCompliantName(getName(namedDevice) + "_" + getName(namedModel));
1069}
1070
1071// Tag for the generated tests
1072class GeneratedTest : public GeneratedTestBase {};
1073
1074// Tag for the dynamic output shape tests
1075class DynamicOutputShapeTest : public GeneratedTest {};
1076
1077// Tag for the memory domain tests
1078class MemoryDomainTest : public GeneratedTest {};
1079
1080// Tag for the fenced compute tests
1081class FencedComputeTest : public GeneratedTest {};
1082
1083// Tag for the dynamic output shape tests
1084class QuantizationCouplingTest : public GeneratedTest {};
1085
1086// Tag for the loop timeout tests
1087class InfiniteLoopTimeoutTest : public GeneratedTest {};
1088
1089TEST_P(GeneratedTest, Test) {
1090 Execute(kDevice, kTestModel, TestKind::GENERAL);
1091}
1092
1093TEST_P(DynamicOutputShapeTest, Test) {
1094 Execute(kDevice, kTestModel, TestKind::DYNAMIC_SHAPE);
1095}
1096
1097TEST_P(MemoryDomainTest, Test) {
1098 Execute(kDevice, kTestModel, TestKind::MEMORY_DOMAIN);
1099}
1100
1101TEST_P(FencedComputeTest, Test) {
1102 Execute(kDevice, kTestModel, TestKind::FENCED_COMPUTE);
1103}
1104
1105TEST_P(QuantizationCouplingTest, Test) {
1106 Execute(kDevice, kTestModel, TestKind::QUANTIZATION_COUPLING);
1107}
1108
1109TEST_P(InfiniteLoopTimeoutTest, Test) {
1110 Execute(kDevice, kTestModel, TestKind::INTINITE_LOOP_TIMEOUT);
1111}
1112
1113INSTANTIATE_GENERATED_TEST(GeneratedTest,
1114 [](const TestModel& testModel) { return !testModel.expectFailure; });
1115
1116INSTANTIATE_GENERATED_TEST(DynamicOutputShapeTest, [](const TestModel& testModel) {
1117 return !testModel.expectFailure && !testModel.hasScalarOutputs();
1118});
1119
1120INSTANTIATE_GENERATED_TEST(MemoryDomainTest,
1121 [](const TestModel& testModel) { return !testModel.expectFailure; });
1122
1123INSTANTIATE_GENERATED_TEST(FencedComputeTest,
1124 [](const TestModel& testModel) { return !testModel.expectFailure; });
1125
1126INSTANTIATE_GENERATED_TEST(QuantizationCouplingTest, [](const TestModel& testModel) {
1127 return !testModel.expectFailure && testModel.hasQuant8CoupledOperands() &&
1128 testModel.main.operations.size() == 1;
1129});
1130
1131INSTANTIATE_GENERATED_TEST(InfiniteLoopTimeoutTest, [](const TestModel& testModel) {
1132 return testModel.isInfiniteLoopTimeoutTest();
1133});
1134
1135} // namespace aidl::android::hardware::neuralnetworks::vts::functional