blob: 8c8a87a37633545e8df3f3c5630699f329efbba8 [file] [log] [blame]
Lev Proleevc185e882020-12-15 19:25:32 +00001/*
2 * Copyright (C) 2021 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "GeneratedTestHarness.h"
18
19#include <aidl/android/hardware/neuralnetworks/ErrorStatus.h>
Michael Butler7fc7e372021-03-10 22:51:53 -080020#include <aidl/android/hardware/neuralnetworks/RequestMemoryPool.h>
Lev Proleevc185e882020-12-15 19:25:32 +000021#include <android-base/logging.h>
22#include <android/binder_auto_utils.h>
23#include <android/sync.h>
24#include <gtest/gtest.h>
25
26#include <algorithm>
27#include <chrono>
28#include <iostream>
29#include <iterator>
30#include <numeric>
31#include <vector>
32
33#include <MemoryUtils.h>
34#include <android/binder_status.h>
35#include <nnapi/Result.h>
36#include <nnapi/SharedMemory.h>
37#include <nnapi/Types.h>
38#include <nnapi/hal/aidl/Conversions.h>
39#include <nnapi/hal/aidl/Utils.h>
40
41#include "Callbacks.h"
42#include "TestHarness.h"
43#include "Utils.h"
44#include "VtsHalNeuralnetworks.h"
45
46namespace aidl::android::hardware::neuralnetworks::vts::functional {
47
48namespace nn = ::android::nn;
49using namespace test_helper;
50using implementation::PreparedModelCallback;
51
52namespace {
53
54enum class OutputType { FULLY_SPECIFIED, UNSPECIFIED, INSUFFICIENT, MISSED_DEADLINE };
55
56struct TestConfig {
57 Executor executor;
58 bool measureTiming;
59 OutputType outputType;
60 MemoryType memoryType;
Xusong Wang72e06c22022-01-11 14:25:55 -080061 bool reusable;
Lev Proleevc185e882020-12-15 19:25:32 +000062 // `reportSkipping` indicates if a test should print an info message in case
63 // it is skipped. The field is set to true by default and is set to false in
64 // quantization coupling tests to suppress skipping a test
65 bool reportSkipping;
Miao Wangb5c8a822021-10-26 20:03:05 +000066 // `useConfig` indicates if a test should use execute*WithConfig functions for the execution.
67 bool useConfig;
Xusong Wang72e06c22022-01-11 14:25:55 -080068 TestConfig(Executor executor, bool measureTiming, OutputType outputType, MemoryType memoryType,
69 bool reusable)
Lev Proleevc185e882020-12-15 19:25:32 +000070 : executor(executor),
71 measureTiming(measureTiming),
72 outputType(outputType),
73 memoryType(memoryType),
Xusong Wang72e06c22022-01-11 14:25:55 -080074 reusable(reusable),
Miao Wangb5c8a822021-10-26 20:03:05 +000075 reportSkipping(true),
76 useConfig(false) {}
Lev Proleevc185e882020-12-15 19:25:32 +000077 TestConfig(Executor executor, bool measureTiming, OutputType outputType, MemoryType memoryType,
Xusong Wang72e06c22022-01-11 14:25:55 -080078 bool reusable, bool reportSkipping)
Lev Proleevc185e882020-12-15 19:25:32 +000079 : executor(executor),
80 measureTiming(measureTiming),
81 outputType(outputType),
82 memoryType(memoryType),
Xusong Wang72e06c22022-01-11 14:25:55 -080083 reusable(reusable),
Miao Wangb5c8a822021-10-26 20:03:05 +000084 reportSkipping(reportSkipping),
85 useConfig(false) {}
86 TestConfig(Executor executor, bool measureTiming, OutputType outputType, MemoryType memoryType,
87 bool reusable, bool reportSkipping, bool useConfig)
88 : executor(executor),
89 measureTiming(measureTiming),
90 outputType(outputType),
91 memoryType(memoryType),
92 reusable(reusable),
93 reportSkipping(reportSkipping),
94 useConfig(useConfig) {}
Lev Proleevc185e882020-12-15 19:25:32 +000095};
96
Xusong Wang72e06c22022-01-11 14:25:55 -080097std::string toString(OutputType type) {
98 switch (type) {
99 case OutputType::FULLY_SPECIFIED:
100 return "FULLY_SPECIFIED";
101 case OutputType::UNSPECIFIED:
102 return "UNSPECIFIED";
103 case OutputType::INSUFFICIENT:
104 return "INSUFFICIENT";
105 case OutputType::MISSED_DEADLINE:
106 return "MISSED_DEADLINE";
107 }
108}
109
110std::string toString(const TestConfig& config) {
111 std::stringstream ss;
112 ss << "TestConfig{.executor=" << toString(config.executor)
113 << ", .measureTiming=" << (config.measureTiming ? "true" : "false")
114 << ", .outputType=" << toString(config.outputType)
115 << ", .memoryType=" << toString(config.memoryType)
Miao Wangb5c8a822021-10-26 20:03:05 +0000116 << ", .reusable=" << (config.reusable ? "true" : "false")
117 << ", .useConfig=" << (config.useConfig ? "true" : "false") << "}";
Xusong Wang72e06c22022-01-11 14:25:55 -0800118 return ss.str();
119}
120
Lev Proleevc185e882020-12-15 19:25:32 +0000121enum class IOType { INPUT, OUTPUT };
122
123class DeviceMemoryAllocator {
124 public:
125 DeviceMemoryAllocator(const std::shared_ptr<IDevice>& device,
126 const std::shared_ptr<IPreparedModel>& preparedModel,
127 const TestModel& testModel)
128 : kDevice(device), kPreparedModel(preparedModel), kTestModel(testModel) {}
129
130 // Allocate device memory for a target input/output operand.
131 // Return {IBuffer object, token} if successful.
132 // Return {nullptr, 0} if device memory is not supported.
133 template <IOType ioType>
134 std::pair<std::shared_ptr<IBuffer>, int32_t> allocate(uint32_t index) {
135 std::pair<std::shared_ptr<IBuffer>, int32_t> buffer;
136 allocateInternal<ioType>(index, &buffer);
137 return buffer;
138 }
139
140 private:
141 template <IOType ioType>
142 void allocateInternal(int32_t index, std::pair<std::shared_ptr<IBuffer>, int32_t>* result) {
143 ASSERT_NE(result, nullptr);
144
145 // Prepare arguments.
Xusong Wang3633d072021-03-19 13:58:24 -0700146 BufferRole role = {.modelIndex = 0, .ioIndex = index, .probability = 1.0f};
Lev Proleevc185e882020-12-15 19:25:32 +0000147 std::vector<BufferRole> inputRoles, outputRoles;
148 if constexpr (ioType == IOType::INPUT) {
149 inputRoles = {role};
150 } else {
151 outputRoles = {role};
152 }
153
154 // Allocate device memory.
155 DeviceBuffer buffer;
156 IPreparedModelParcel parcel;
157 parcel.preparedModel = kPreparedModel;
158 const auto ret = kDevice->allocate({}, {parcel}, inputRoles, outputRoles, &buffer);
159
160 // Check allocation results.
161 if (ret.isOk()) {
162 ASSERT_NE(buffer.buffer, nullptr);
163 ASSERT_GT(buffer.token, 0);
164 } else {
165 ASSERT_EQ(ret.getExceptionCode(), EX_SERVICE_SPECIFIC);
166 ASSERT_EQ(static_cast<ErrorStatus>(ret.getServiceSpecificError()),
167 ErrorStatus::GENERAL_FAILURE);
168 buffer.buffer = nullptr;
169 buffer.token = 0;
170 }
171
172 // Initialize input data from TestBuffer.
173 if constexpr (ioType == IOType::INPUT) {
174 if (buffer.buffer != nullptr) {
175 // TestBuffer -> Shared memory.
176 const auto& testBuffer =
177 kTestModel.main.operands[kTestModel.main.inputIndexes[index]].data;
178 ASSERT_GT(testBuffer.size(), 0);
179 const auto sharedMemory = nn::createSharedMemory(testBuffer.size()).value();
180 const auto memory = utils::convert(sharedMemory).value();
181 const auto mapping = nn::map(sharedMemory).value();
182 uint8_t* inputPtr = static_cast<uint8_t*>(std::get<void*>(mapping.pointer));
183 ASSERT_NE(inputPtr, nullptr);
184 const uint8_t* begin = testBuffer.get<uint8_t>();
185 const uint8_t* end = begin + testBuffer.size();
186 std::copy(begin, end, inputPtr);
187
188 // Shared memory -> IBuffer.
189 auto ret = buffer.buffer->copyFrom(memory, {});
190 ASSERT_TRUE(ret.isOk());
191 }
192 }
193 *result = {std::move(buffer.buffer), buffer.token};
194 }
195
196 const std::shared_ptr<IDevice> kDevice;
197 const std::shared_ptr<IPreparedModel> kPreparedModel;
198 const TestModel& kTestModel;
199};
200
201Subgraph createSubgraph(const TestSubgraph& testSubgraph, uint32_t* constCopySize,
202 std::vector<const TestBuffer*>* constCopies, uint32_t* constRefSize,
203 std::vector<const TestBuffer*>* constReferences) {
204 CHECK(constCopySize != nullptr);
205 CHECK(constCopies != nullptr);
206 CHECK(constRefSize != nullptr);
207 CHECK(constReferences != nullptr);
208
209 // Operands.
210 std::vector<Operand> operands(testSubgraph.operands.size());
211 for (uint32_t i = 0; i < testSubgraph.operands.size(); i++) {
212 const auto& op = testSubgraph.operands[i];
213
214 DataLocation loc = {};
215 if (op.lifetime == TestOperandLifeTime::CONSTANT_COPY) {
216 loc = {
217 .poolIndex = 0,
218 .offset = *constCopySize,
219 .length = static_cast<int64_t>(op.data.size()),
220 };
221 constCopies->push_back(&op.data);
222 *constCopySize += op.data.alignedSize();
223 } else if (op.lifetime == TestOperandLifeTime::CONSTANT_REFERENCE) {
224 loc = {
225 .poolIndex = 0,
226 .offset = *constRefSize,
227 .length = static_cast<int64_t>(op.data.size()),
228 };
229 constReferences->push_back(&op.data);
230 *constRefSize += op.data.alignedSize();
231 } else if (op.lifetime == TestOperandLifeTime::SUBGRAPH) {
232 loc = {
233 .poolIndex = 0,
234 .offset = *op.data.get<uint32_t>(),
235 .length = 0,
236 };
237 }
238
239 std::optional<OperandExtraParams> extraParams;
240 if (op.type == TestOperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) {
241 using Tag = OperandExtraParams::Tag;
242 extraParams = OperandExtraParams::make<Tag::channelQuant>(SymmPerChannelQuantParams{
243 .scales = op.channelQuant.scales,
244 .channelDim = static_cast<int32_t>(op.channelQuant.channelDim)});
245 }
246
247 operands[i] = {.type = static_cast<OperandType>(op.type),
248 .dimensions = utils::toSigned(op.dimensions).value(),
249 .scale = op.scale,
250 .zeroPoint = op.zeroPoint,
251 .lifetime = static_cast<OperandLifeTime>(op.lifetime),
252 .location = loc,
253 .extraParams = std::move(extraParams)};
254 }
255
256 // Operations.
257 std::vector<Operation> operations(testSubgraph.operations.size());
258 std::transform(testSubgraph.operations.begin(), testSubgraph.operations.end(),
259 operations.begin(), [](const TestOperation& op) -> Operation {
260 return {.type = static_cast<OperationType>(op.type),
261 .inputs = utils::toSigned(op.inputs).value(),
262 .outputs = utils::toSigned(op.outputs).value()};
263 });
264
265 return {.operands = std::move(operands),
266 .operations = std::move(operations),
267 .inputIndexes = utils::toSigned(testSubgraph.inputIndexes).value(),
268 .outputIndexes = utils::toSigned(testSubgraph.outputIndexes).value()};
269}
270
271void copyTestBuffers(const std::vector<const TestBuffer*>& buffers, uint8_t* output) {
272 uint32_t offset = 0;
273 for (const TestBuffer* buffer : buffers) {
274 const uint8_t* begin = buffer->get<uint8_t>();
275 const uint8_t* end = begin + buffer->size();
276 std::copy(begin, end, output + offset);
277 offset += buffer->alignedSize();
278 }
279}
280
281} // namespace
282
283void waitForSyncFence(int syncFd) {
284 constexpr int kInfiniteTimeout = -1;
285 ASSERT_GT(syncFd, 0);
286 int r = sync_wait(syncFd, kInfiniteTimeout);
287 ASSERT_GE(r, 0);
288}
289
290Model createModel(const TestModel& testModel) {
291 uint32_t constCopySize = 0;
292 uint32_t constRefSize = 0;
293 std::vector<const TestBuffer*> constCopies;
294 std::vector<const TestBuffer*> constReferences;
295
296 Subgraph mainSubgraph = createSubgraph(testModel.main, &constCopySize, &constCopies,
297 &constRefSize, &constReferences);
298 std::vector<Subgraph> refSubgraphs(testModel.referenced.size());
299 std::transform(testModel.referenced.begin(), testModel.referenced.end(), refSubgraphs.begin(),
300 [&constCopySize, &constCopies, &constRefSize,
301 &constReferences](const TestSubgraph& testSubgraph) {
302 return createSubgraph(testSubgraph, &constCopySize, &constCopies,
303 &constRefSize, &constReferences);
304 });
305
306 // Constant copies.
307 std::vector<uint8_t> operandValues(constCopySize);
308 copyTestBuffers(constCopies, operandValues.data());
309
310 // Shared memory.
Michael Butlerfadeb8a2021-02-07 00:11:13 -0800311 std::vector<nn::SharedMemory> pools = {};
Lev Proleevc185e882020-12-15 19:25:32 +0000312 if (constRefSize > 0) {
313 const auto pool = nn::createSharedMemory(constRefSize).value();
314 pools.push_back(pool);
315
316 // load data
317 const auto mappedMemory = nn::map(pool).value();
318 uint8_t* mappedPtr = static_cast<uint8_t*>(std::get<void*>(mappedMemory.pointer));
319 CHECK(mappedPtr != nullptr);
320
321 copyTestBuffers(constReferences, mappedPtr);
322 }
323
324 std::vector<Memory> aidlPools;
325 aidlPools.reserve(pools.size());
326 for (auto& pool : pools) {
327 auto aidlPool = utils::convert(pool).value();
328 aidlPools.push_back(std::move(aidlPool));
329 }
330
331 return {.main = std::move(mainSubgraph),
332 .referenced = std::move(refSubgraphs),
333 .operandValues = std::move(operandValues),
334 .pools = std::move(aidlPools),
335 .relaxComputationFloat32toFloat16 = testModel.isRelaxed};
336}
337
338static bool isOutputSizeGreaterThanOne(const TestModel& testModel, uint32_t index) {
339 const auto byteSize = testModel.main.operands[testModel.main.outputIndexes[index]].data.size();
340 return byteSize > 1u;
341}
342
343static void makeOutputInsufficientSize(uint32_t outputIndex, Request* request) {
Xusong Wang16858a62021-02-17 21:59:39 -0800344 auto& loc = request->outputs[outputIndex].location;
345 ASSERT_GT(loc.length, 1u);
346 loc.length -= 1u;
347 // Test that the padding is not used for output data.
348 loc.padding += 1u;
Lev Proleevc185e882020-12-15 19:25:32 +0000349}
350
351static void makeOutputDimensionsUnspecified(Model* model) {
352 for (auto i : model->main.outputIndexes) {
353 auto& dims = model->main.operands[i].dimensions;
354 std::fill(dims.begin(), dims.end(), 0);
355 }
356}
357
358// Manages the lifetime of memory resources used in an execution.
359class ExecutionContext {
360 public:
361 ExecutionContext(std::shared_ptr<IDevice> device, std::shared_ptr<IPreparedModel> preparedModel)
362 : kDevice(std::move(device)), kPreparedModel(std::move(preparedModel)) {}
363
364 std::optional<Request> createRequest(const TestModel& testModel, MemoryType memoryType);
365 std::vector<TestBuffer> getOutputBuffers(const TestModel& testModel,
366 const Request& request) const;
367
368 private:
369 // Get a TestBuffer with data copied from an IBuffer object.
370 void getBuffer(const std::shared_ptr<IBuffer>& buffer, size_t size,
371 TestBuffer* testBuffer) const;
372
373 static constexpr uint32_t kInputPoolIndex = 0;
374 static constexpr uint32_t kOutputPoolIndex = 1;
375 static constexpr uint32_t kDeviceMemoryBeginIndex = 2;
376
377 const std::shared_ptr<IDevice> kDevice;
378 const std::shared_ptr<IPreparedModel> kPreparedModel;
379 std::unique_ptr<TestMemoryBase> mInputMemory, mOutputMemory;
380 std::vector<std::shared_ptr<IBuffer>> mBuffers;
381};
382
Xusong Wang16858a62021-02-17 21:59:39 -0800383// Returns the number of bytes needed to round up "size" to the nearest multiple of "multiple".
384static uint32_t roundUpBytesNeeded(uint32_t size, uint32_t multiple) {
385 CHECK(multiple != 0);
386 return ((size + multiple - 1) / multiple) * multiple - size;
387}
388
Lev Proleevc185e882020-12-15 19:25:32 +0000389std::optional<Request> ExecutionContext::createRequest(const TestModel& testModel,
390 MemoryType memoryType) {
391 // Memory pools are organized as:
392 // - 0: Input shared memory pool
393 // - 1: Output shared memory pool
394 // - [2, 2+i): Input device memories
395 // - [2+i, 2+i+o): Output device memories
396 DeviceMemoryAllocator allocator(kDevice, kPreparedModel, testModel);
397 std::vector<int32_t> tokens;
398 mBuffers.clear();
399
400 // Model inputs.
401 std::vector<RequestArgument> inputs(testModel.main.inputIndexes.size());
402 size_t inputSize = 0;
403 for (uint32_t i = 0; i < testModel.main.inputIndexes.size(); i++) {
404 const auto& op = testModel.main.operands[testModel.main.inputIndexes[i]];
405 if (op.data.size() == 0) {
406 // Omitted input.
407 inputs[i] = {.hasNoValue = true};
408 continue;
409 } else if (memoryType == MemoryType::DEVICE) {
410 SCOPED_TRACE("Input index = " + std::to_string(i));
411 auto [buffer, token] = allocator.allocate<IOType::INPUT>(i);
412 if (buffer != nullptr) {
413 DataLocation loc = {.poolIndex = static_cast<int32_t>(mBuffers.size() +
414 kDeviceMemoryBeginIndex)};
415 mBuffers.push_back(std::move(buffer));
416 tokens.push_back(token);
417 inputs[i] = {.hasNoValue = false, .location = loc, .dimensions = {}};
418 continue;
419 }
420 }
421
422 // Reserve shared memory for input.
Xusong Wang16858a62021-02-17 21:59:39 -0800423 inputSize += roundUpBytesNeeded(inputSize, nn::kDefaultRequestMemoryAlignment);
424 const auto padding = roundUpBytesNeeded(op.data.size(), nn::kDefaultRequestMemoryPadding);
Lev Proleevc185e882020-12-15 19:25:32 +0000425 DataLocation loc = {.poolIndex = kInputPoolIndex,
426 .offset = static_cast<int64_t>(inputSize),
Xusong Wang16858a62021-02-17 21:59:39 -0800427 .length = static_cast<int64_t>(op.data.size()),
428 .padding = static_cast<int64_t>(padding)};
429 inputSize += (op.data.size() + padding);
Lev Proleevc185e882020-12-15 19:25:32 +0000430 inputs[i] = {.hasNoValue = false, .location = loc, .dimensions = {}};
431 }
432
433 // Model outputs.
434 std::vector<RequestArgument> outputs(testModel.main.outputIndexes.size());
435 size_t outputSize = 0;
436 for (uint32_t i = 0; i < testModel.main.outputIndexes.size(); i++) {
437 const auto& op = testModel.main.operands[testModel.main.outputIndexes[i]];
438 if (memoryType == MemoryType::DEVICE) {
439 SCOPED_TRACE("Output index = " + std::to_string(i));
440 auto [buffer, token] = allocator.allocate<IOType::OUTPUT>(i);
441 if (buffer != nullptr) {
442 DataLocation loc = {.poolIndex = static_cast<int32_t>(mBuffers.size() +
443 kDeviceMemoryBeginIndex)};
444 mBuffers.push_back(std::move(buffer));
445 tokens.push_back(token);
446 outputs[i] = {.hasNoValue = false, .location = loc, .dimensions = {}};
447 continue;
448 }
449 }
450
451 // In the case of zero-sized output, we should at least provide a one-byte buffer.
452 // This is because zero-sized tensors are only supported internally to the driver, or
453 // reported in output shapes. It is illegal for the client to pre-specify a zero-sized
454 // tensor as model output. Otherwise, we will have two semantic conflicts:
455 // - "Zero dimension" conflicts with "unspecified dimension".
456 // - "Omitted operand buffer" conflicts with "zero-sized operand buffer".
457 size_t bufferSize = std::max<size_t>(op.data.size(), 1);
458
459 // Reserve shared memory for output.
Xusong Wang16858a62021-02-17 21:59:39 -0800460 outputSize += roundUpBytesNeeded(outputSize, nn::kDefaultRequestMemoryAlignment);
461 const auto padding = roundUpBytesNeeded(bufferSize, nn::kDefaultRequestMemoryPadding);
Lev Proleevc185e882020-12-15 19:25:32 +0000462 DataLocation loc = {.poolIndex = kOutputPoolIndex,
463 .offset = static_cast<int64_t>(outputSize),
Xusong Wang16858a62021-02-17 21:59:39 -0800464 .length = static_cast<int64_t>(bufferSize),
465 .padding = static_cast<int64_t>(padding)};
466 outputSize += (bufferSize + padding);
Lev Proleevc185e882020-12-15 19:25:32 +0000467 outputs[i] = {.hasNoValue = false, .location = loc, .dimensions = {}};
468 }
469
470 if (memoryType == MemoryType::DEVICE && mBuffers.empty()) {
471 return std::nullopt;
472 }
473
474 // Memory pools.
475 if (memoryType == MemoryType::BLOB_AHWB) {
476 mInputMemory = TestBlobAHWB::create(std::max<size_t>(inputSize, 1));
477 mOutputMemory = TestBlobAHWB::create(std::max<size_t>(outputSize, 1));
478 } else {
Xusong Wang378a9382021-05-21 14:58:40 -0700479 mInputMemory = TestAshmem::create(std::max<size_t>(inputSize, 1), /*aidlReadonly=*/true);
480 mOutputMemory = TestAshmem::create(std::max<size_t>(outputSize, 1), /*aidlReadonly=*/false);
Lev Proleevc185e882020-12-15 19:25:32 +0000481 }
482 CHECK_NE(mInputMemory, nullptr);
483 CHECK_NE(mOutputMemory, nullptr);
484 std::vector<RequestMemoryPool> pools;
485 pools.reserve(kDeviceMemoryBeginIndex + mBuffers.size());
486
487 auto copiedInputMemory = utils::clone(*mInputMemory->getAidlMemory());
488 CHECK(copiedInputMemory.has_value()) << copiedInputMemory.error().message;
489 auto copiedOutputMemory = utils::clone(*mOutputMemory->getAidlMemory());
490 CHECK(copiedOutputMemory.has_value()) << copiedOutputMemory.error().message;
491
492 pools.push_back(RequestMemoryPool::make<RequestMemoryPool::Tag::pool>(
493 std::move(copiedInputMemory).value()));
494 pools.push_back(RequestMemoryPool::make<RequestMemoryPool::Tag::pool>(
495 std::move(copiedOutputMemory).value()));
496 for (const auto& token : tokens) {
497 pools.push_back(RequestMemoryPool::make<RequestMemoryPool::Tag::token>(token));
498 }
499
500 // Copy input data to the input shared memory pool.
501 uint8_t* inputPtr = mInputMemory->getPointer();
502 for (uint32_t i = 0; i < testModel.main.inputIndexes.size(); i++) {
503 if (!inputs[i].hasNoValue && inputs[i].location.poolIndex == kInputPoolIndex) {
504 const auto& op = testModel.main.operands[testModel.main.inputIndexes[i]];
505 const uint8_t* begin = op.data.get<uint8_t>();
506 const uint8_t* end = begin + op.data.size();
507 std::copy(begin, end, inputPtr + inputs[i].location.offset);
508 }
509 }
510 return Request{
511 .inputs = std::move(inputs), .outputs = std::move(outputs), .pools = std::move(pools)};
512}
513
514std::vector<TestBuffer> ExecutionContext::getOutputBuffers(const TestModel& testModel,
515 const Request& request) const {
516 // Copy out output results.
517 uint8_t* outputPtr = mOutputMemory->getPointer();
518 std::vector<TestBuffer> outputBuffers;
519 for (uint32_t i = 0; i < request.outputs.size(); i++) {
520 const auto& outputLoc = request.outputs[i].location;
521 if (outputLoc.poolIndex == kOutputPoolIndex) {
522 outputBuffers.emplace_back(outputLoc.length, outputPtr + outputLoc.offset);
523 } else {
524 const auto& op = testModel.main.operands[testModel.main.outputIndexes[i]];
525 if (op.data.size() == 0) {
526 outputBuffers.emplace_back(0, nullptr);
527 } else {
528 SCOPED_TRACE("Output index = " + std::to_string(i));
529 const uint32_t bufferIndex = outputLoc.poolIndex - kDeviceMemoryBeginIndex;
530 TestBuffer buffer;
531 getBuffer(mBuffers[bufferIndex], op.data.size(), &buffer);
532 outputBuffers.push_back(std::move(buffer));
533 }
534 }
535 }
536 return outputBuffers;
537}
538
539// Get a TestBuffer with data copied from an IBuffer object.
540void ExecutionContext::getBuffer(const std::shared_ptr<IBuffer>& buffer, size_t size,
541 TestBuffer* testBuffer) const {
542 // IBuffer -> Shared memory.
543 auto sharedMemory = nn::createSharedMemory(size).value();
544 auto aidlMemory = utils::convert(sharedMemory).value();
545 const auto ret = buffer->copyTo(aidlMemory);
546 ASSERT_TRUE(ret.isOk());
547
548 // Shared memory -> TestBuffer.
549 const auto outputMemory = nn::map(sharedMemory).value();
550 const uint8_t* outputPtr = std::visit(
551 [](auto* ptr) { return static_cast<const uint8_t*>(ptr); }, outputMemory.pointer);
552 ASSERT_NE(outputPtr, nullptr);
553 ASSERT_NE(testBuffer, nullptr);
554 *testBuffer = TestBuffer(size, outputPtr);
555}
556
557static bool hasZeroSizedOutput(const TestModel& testModel) {
558 return std::any_of(testModel.main.outputIndexes.begin(), testModel.main.outputIndexes.end(),
559 [&testModel](uint32_t index) {
560 return testModel.main.operands[index].data.size() == 0;
561 });
562}
563
564void EvaluatePreparedModel(const std::shared_ptr<IDevice>& device,
565 const std::shared_ptr<IPreparedModel>& preparedModel,
566 const TestModel& testModel, const TestConfig& testConfig,
567 bool* skipped = nullptr) {
568 if (skipped != nullptr) {
569 *skipped = false;
570 }
571 // If output0 does not have size larger than one byte, we can not test with insufficient buffer.
572 if (testConfig.outputType == OutputType::INSUFFICIENT &&
573 !isOutputSizeGreaterThanOne(testModel, 0)) {
574 return;
575 }
576
577 ExecutionContext context(device, preparedModel);
578 auto maybeRequest = context.createRequest(testModel, testConfig.memoryType);
579 // Skip if testing memory domain but no device memory has been allocated.
580 if (!maybeRequest.has_value()) {
581 return;
582 }
583
584 Request request = std::move(maybeRequest).value();
585
586 constexpr uint32_t kInsufficientOutputIndex = 0;
587 if (testConfig.outputType == OutputType::INSUFFICIENT) {
588 makeOutputInsufficientSize(kInsufficientOutputIndex, &request);
589 }
590
Lev Proleev8df7d6e2021-04-14 20:54:27 +0100591 int64_t loopTimeoutDurationNs = kOmittedTimeoutDuration;
Lev Proleevc185e882020-12-15 19:25:32 +0000592 // OutputType::MISSED_DEADLINE is only used by
593 // TestKind::INTINITE_LOOP_TIMEOUT tests to verify that an infinite loop is
594 // aborted after a timeout.
595 if (testConfig.outputType == OutputType::MISSED_DEADLINE) {
596 // Override the default loop timeout duration with a small value to
597 // speed up test execution.
598 constexpr int64_t kMillisecond = 1'000'000;
Lev Proleev8df7d6e2021-04-14 20:54:27 +0100599 loopTimeoutDurationNs = 1 * kMillisecond;
Lev Proleevc185e882020-12-15 19:25:32 +0000600 }
601
Xusong Wang72e06c22022-01-11 14:25:55 -0800602 std::shared_ptr<IExecution> execution;
603 if (testConfig.reusable) {
Miao Wangb5c8a822021-10-26 20:03:05 +0000604 const auto ret = preparedModel->createReusableExecution(
605 request, {testConfig.measureTiming, loopTimeoutDurationNs, {}, {}}, &execution);
Xusong Wang72e06c22022-01-11 14:25:55 -0800606 ASSERT_TRUE(ret.isOk()) << static_cast<nn::ErrorStatus>(ret.getServiceSpecificError());
607 ASSERT_NE(nullptr, execution.get());
608 }
Lev Proleevc185e882020-12-15 19:25:32 +0000609
Xusong Wang72e06c22022-01-11 14:25:55 -0800610 const auto executeAndCheckResults = [&preparedModel, &execution, &testConfig, &testModel,
611 &context, &request, loopTimeoutDurationNs, skipped]() {
612 ErrorStatus executionStatus;
613 std::vector<OutputShape> outputShapes;
614 Timing timing = kNoTiming;
615 switch (testConfig.executor) {
616 case Executor::SYNC: {
617 SCOPED_TRACE("synchronous");
Michael Butler7fc7e372021-03-10 22:51:53 -0800618
Xusong Wang72e06c22022-01-11 14:25:55 -0800619 ExecutionResult executionResult;
620 // execute
621 ::ndk::ScopedAStatus ret;
622 if (testConfig.reusable) {
623 ret = execution->executeSynchronously(kNoDeadline, &executionResult);
Miao Wangb5c8a822021-10-26 20:03:05 +0000624 } else if (testConfig.useConfig) {
625 ret = preparedModel->executeSynchronouslyWithConfig(
626 request, {testConfig.measureTiming, loopTimeoutDurationNs, {}, {}},
627 kNoDeadline, &executionResult);
Michael Butler7fc7e372021-03-10 22:51:53 -0800628 } else {
Xusong Wang72e06c22022-01-11 14:25:55 -0800629 ret = preparedModel->executeSynchronously(request, testConfig.measureTiming,
630 kNoDeadline, loopTimeoutDurationNs,
631 &executionResult);
Michael Butler7fc7e372021-03-10 22:51:53 -0800632 }
Xusong Wang72e06c22022-01-11 14:25:55 -0800633 ASSERT_TRUE(ret.isOk() || ret.getExceptionCode() == EX_SERVICE_SPECIFIC)
634 << ret.getDescription();
635 if (ret.isOk()) {
636 executionStatus = executionResult.outputSufficientSize
637 ? ErrorStatus::NONE
638 : ErrorStatus::OUTPUT_INSUFFICIENT_SIZE;
639 outputShapes = std::move(executionResult.outputShapes);
640 timing = executionResult.timing;
641 } else {
642 executionStatus = static_cast<ErrorStatus>(ret.getServiceSpecificError());
643 }
644 break;
Michael Butler7fc7e372021-03-10 22:51:53 -0800645 }
Xusong Wang72e06c22022-01-11 14:25:55 -0800646 case Executor::BURST: {
647 SCOPED_TRACE("burst");
Michael Butler7fc7e372021-03-10 22:51:53 -0800648
Xusong Wang72e06c22022-01-11 14:25:55 -0800649 // create burst
650 std::shared_ptr<IBurst> burst;
651 auto ret = preparedModel->configureExecutionBurst(&burst);
Michael Butler7fc7e372021-03-10 22:51:53 -0800652 ASSERT_TRUE(ret.isOk()) << ret.getDescription();
Xusong Wang72e06c22022-01-11 14:25:55 -0800653 ASSERT_NE(nullptr, burst.get());
Michael Butler7fc7e372021-03-10 22:51:53 -0800654
Xusong Wang72e06c22022-01-11 14:25:55 -0800655 // associate a unique slot with each memory pool
656 int64_t currentSlot = 0;
657 std::vector<int64_t> slots;
658 slots.reserve(request.pools.size());
659 for (const auto& pool : request.pools) {
660 if (pool.getTag() == RequestMemoryPool::Tag::pool) {
661 slots.push_back(currentSlot++);
662 } else {
663 EXPECT_EQ(pool.getTag(), RequestMemoryPool::Tag::token);
664 slots.push_back(-1);
665 }
Lev Proleevc185e882020-12-15 19:25:32 +0000666 }
Xusong Wang72e06c22022-01-11 14:25:55 -0800667
668 ExecutionResult executionResult;
669 // execute
Miao Wangb5c8a822021-10-26 20:03:05 +0000670 if (testConfig.useConfig) {
671 ret = burst->executeSynchronouslyWithConfig(
672 request, slots,
673 {testConfig.measureTiming, loopTimeoutDurationNs, {}, {}}, kNoDeadline,
674 &executionResult);
675 } else {
676 ret = burst->executeSynchronously(request, slots, testConfig.measureTiming,
677 kNoDeadline, loopTimeoutDurationNs,
678 &executionResult);
679 }
Xusong Wang72e06c22022-01-11 14:25:55 -0800680 ASSERT_TRUE(ret.isOk() || ret.getExceptionCode() == EX_SERVICE_SPECIFIC)
681 << ret.getDescription();
682 if (ret.isOk()) {
683 executionStatus = executionResult.outputSufficientSize
684 ? ErrorStatus::NONE
685 : ErrorStatus::OUTPUT_INSUFFICIENT_SIZE;
686 outputShapes = std::move(executionResult.outputShapes);
687 timing = executionResult.timing;
688 } else {
689 executionStatus = static_cast<ErrorStatus>(ret.getServiceSpecificError());
690 }
691
692 // Mark each slot as unused after the execution. This is unnecessary because the
693 // burst is freed after this scope ends, but this is here to test the functionality.
694 for (int64_t slot : slots) {
695 ret = burst->releaseMemoryResource(slot);
696 ASSERT_TRUE(ret.isOk()) << ret.getDescription();
697 }
698
699 break;
Lev Proleevc185e882020-12-15 19:25:32 +0000700 }
Xusong Wang72e06c22022-01-11 14:25:55 -0800701 case Executor::FENCED: {
702 SCOPED_TRACE("fenced");
703 ErrorStatus result = ErrorStatus::NONE;
704 FencedExecutionResult executionResult;
705 ::ndk::ScopedAStatus ret;
706 if (testConfig.reusable) {
707 ret = execution->executeFenced({}, kNoDeadline, kNoDuration, &executionResult);
Miao Wangb5c8a822021-10-26 20:03:05 +0000708 } else if (testConfig.useConfig) {
709 ret = preparedModel->executeFencedWithConfig(
710 request, {}, {testConfig.measureTiming, loopTimeoutDurationNs, {}, {}},
711 kNoDeadline, kNoDuration, &executionResult);
Xusong Wang72e06c22022-01-11 14:25:55 -0800712 } else {
713 ret = preparedModel->executeFenced(request, {}, testConfig.measureTiming,
714 kNoDeadline, loopTimeoutDurationNs,
715 kNoDuration, &executionResult);
716 }
717 ASSERT_TRUE(ret.isOk() || ret.getExceptionCode() == EX_SERVICE_SPECIFIC)
718 << ret.getDescription();
719 if (!ret.isOk()) {
720 result = static_cast<ErrorStatus>(ret.getServiceSpecificError());
721 executionStatus = result;
722 } else if (executionResult.syncFence.get() != -1) {
723 std::vector<ndk::ScopedFileDescriptor> waitFor;
724 auto dupFd = dup(executionResult.syncFence.get());
725 ASSERT_NE(dupFd, -1);
726 waitFor.emplace_back(dupFd);
727 // If a sync fence is returned, try start another run waiting for the sync
728 // fence.
Miao Wangb5c8a822021-10-26 20:03:05 +0000729 if (testConfig.reusable) {
730 ret = execution->executeFenced(waitFor, kNoDeadline, kNoDuration,
731 &executionResult);
732 } else if (testConfig.useConfig) {
733 ret = preparedModel->executeFencedWithConfig(
734 request, waitFor,
735 {testConfig.measureTiming, loopTimeoutDurationNs, {}, {}},
736 kNoDeadline, kNoDuration, &executionResult);
737 } else {
738 ret = preparedModel->executeFenced(
739 request, waitFor, testConfig.measureTiming, kNoDeadline,
740 loopTimeoutDurationNs, kNoDuration, &executionResult);
741 }
Xusong Wang72e06c22022-01-11 14:25:55 -0800742 ASSERT_TRUE(ret.isOk());
743 waitForSyncFence(executionResult.syncFence.get());
744 }
745 if (result == ErrorStatus::NONE) {
746 ASSERT_NE(executionResult.callback, nullptr);
747 Timing timingFenced;
748 auto ret = executionResult.callback->getExecutionInfo(&timing, &timingFenced,
749 &executionStatus);
750 ASSERT_TRUE(ret.isOk());
751 }
752 break;
753 }
754 default: {
755 FAIL() << "Unsupported execution mode for AIDL interface.";
756 }
757 }
758
759 if (testConfig.outputType != OutputType::FULLY_SPECIFIED &&
760 executionStatus == ErrorStatus::GENERAL_FAILURE) {
761 if (skipped != nullptr) {
762 *skipped = true;
763 }
764 if (!testConfig.reportSkipping) {
765 return;
766 }
767 LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
768 "execute model that it does not support.";
769 std::cout << "[ ] Early termination of test because vendor service cannot "
770 "execute model that it does not support."
771 << std::endl;
772 GTEST_SKIP();
773 }
774 if (!testConfig.measureTiming) {
775 EXPECT_EQ(timing, kNoTiming);
776 } else {
777 if (timing.timeOnDeviceNs != -1 && timing.timeInDriverNs != -1) {
778 EXPECT_LE(timing.timeOnDeviceNs, timing.timeInDriverNs);
779 }
780 }
781
782 switch (testConfig.outputType) {
783 case OutputType::FULLY_SPECIFIED:
784 if (testConfig.executor == Executor::FENCED && hasZeroSizedOutput(testModel)) {
785 // Executor::FENCED does not support zero-sized output.
786 ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, executionStatus);
787 return;
788 }
789 // If the model output operands are fully specified, outputShapes must be either
790 // either empty, or have the same number of elements as the number of outputs.
791 ASSERT_EQ(ErrorStatus::NONE, executionStatus);
792 ASSERT_TRUE(outputShapes.size() == 0 ||
793 outputShapes.size() == testModel.main.outputIndexes.size());
794 break;
795 case OutputType::UNSPECIFIED:
796 if (testConfig.executor == Executor::FENCED) {
797 // For Executor::FENCED, the output shape must be fully specified.
798 ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, executionStatus);
799 return;
800 }
801 // If the model output operands are not fully specified, outputShapes must have
802 // the same number of elements as the number of outputs.
803 ASSERT_EQ(ErrorStatus::NONE, executionStatus);
804 ASSERT_EQ(outputShapes.size(), testModel.main.outputIndexes.size());
805 break;
806 case OutputType::INSUFFICIENT:
807 if (testConfig.executor == Executor::FENCED) {
808 // For Executor::FENCED, the output shape must be fully specified.
809 ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, executionStatus);
810 return;
811 }
812 ASSERT_EQ(ErrorStatus::OUTPUT_INSUFFICIENT_SIZE, executionStatus);
813 ASSERT_EQ(outputShapes.size(), testModel.main.outputIndexes.size());
814 // Check that all returned output dimensions are at least as fully specified as the
815 // union of the information about the corresponding operand in the model and in the
816 // request. In this test, all model outputs have known rank with all dimensions
817 // unspecified, and no dimensional information is provided in the request.
818 for (uint32_t i = 0; i < outputShapes.size(); i++) {
819 ASSERT_EQ(outputShapes[i].isSufficient, i != kInsufficientOutputIndex);
820 const auto& actual = outputShapes[i].dimensions;
821 const auto& golden =
822 testModel.main.operands[testModel.main.outputIndexes[i]].dimensions;
823 ASSERT_EQ(actual.size(), golden.size());
824 for (uint32_t j = 0; j < actual.size(); j++) {
825 if (actual[j] == 0) continue;
826 EXPECT_EQ(actual[j], golden[j]) << "index: " << j;
827 }
828 }
829 return;
830 case OutputType::MISSED_DEADLINE:
831 ASSERT_TRUE(executionStatus == ErrorStatus::MISSED_DEADLINE_TRANSIENT ||
832 executionStatus == ErrorStatus::MISSED_DEADLINE_PERSISTENT)
833 << "executionStatus = " << executionStatus;
834 return;
835 }
836
837 // Go through all outputs, check returned output shapes.
838 for (uint32_t i = 0; i < outputShapes.size(); i++) {
839 EXPECT_TRUE(outputShapes[i].isSufficient);
840 const auto& expect =
841 testModel.main.operands[testModel.main.outputIndexes[i]].dimensions;
842 const auto unsignedActual = nn::toUnsigned(outputShapes[i].dimensions);
843 ASSERT_TRUE(unsignedActual.has_value());
844 const std::vector<uint32_t>& actual = unsignedActual.value();
845 EXPECT_EQ(expect, actual);
846 }
847
848 // Retrieve execution results.
849 const std::vector<TestBuffer> outputs = context.getOutputBuffers(testModel, request);
850
851 // We want "close-enough" results.
852 checkResults(testModel, outputs);
853 };
854
855 executeAndCheckResults();
856
857 // For reusable execution tests, run the execution twice.
858 if (testConfig.reusable) {
859 SCOPED_TRACE("Second execution");
860 executeAndCheckResults();
Lev Proleevc185e882020-12-15 19:25:32 +0000861 }
Lev Proleevc185e882020-12-15 19:25:32 +0000862}
863
864void EvaluatePreparedModel(const std::shared_ptr<IDevice>& device,
865 const std::shared_ptr<IPreparedModel>& preparedModel,
866 const TestModel& testModel, TestKind testKind) {
867 std::vector<OutputType> outputTypesList;
868 std::vector<bool> measureTimingList;
869 std::vector<Executor> executorList;
870 std::vector<MemoryType> memoryTypeList;
Xusong Wang72e06c22022-01-11 14:25:55 -0800871 std::vector<bool> reusableList = {false};
Miao Wangb5c8a822021-10-26 20:03:05 +0000872 std::vector<bool> useConfigList = {false};
Xusong Wang72e06c22022-01-11 14:25:55 -0800873
874 int deviceVersion;
875 ASSERT_TRUE(device->getInterfaceVersion(&deviceVersion).isOk());
876 if (deviceVersion >= kMinAidlLevelForFL8) {
877 reusableList.push_back(true);
Miao Wangb5c8a822021-10-26 20:03:05 +0000878 useConfigList.push_back(true);
Xusong Wang72e06c22022-01-11 14:25:55 -0800879 }
Lev Proleevc185e882020-12-15 19:25:32 +0000880
881 switch (testKind) {
882 case TestKind::GENERAL: {
883 outputTypesList = {OutputType::FULLY_SPECIFIED};
884 measureTimingList = {false, true};
Michael Butler7fc7e372021-03-10 22:51:53 -0800885 executorList = {Executor::SYNC, Executor::BURST};
Lev Proleevc185e882020-12-15 19:25:32 +0000886 memoryTypeList = {MemoryType::ASHMEM};
887 } break;
888 case TestKind::DYNAMIC_SHAPE: {
889 outputTypesList = {OutputType::UNSPECIFIED, OutputType::INSUFFICIENT};
890 measureTimingList = {false, true};
Michael Butler7fc7e372021-03-10 22:51:53 -0800891 executorList = {Executor::SYNC, Executor::BURST, Executor::FENCED};
Lev Proleevc185e882020-12-15 19:25:32 +0000892 memoryTypeList = {MemoryType::ASHMEM};
893 } break;
894 case TestKind::MEMORY_DOMAIN: {
895 outputTypesList = {OutputType::FULLY_SPECIFIED};
896 measureTimingList = {false};
Michael Butler7fc7e372021-03-10 22:51:53 -0800897 executorList = {Executor::SYNC, Executor::BURST, Executor::FENCED};
Lev Proleevc185e882020-12-15 19:25:32 +0000898 memoryTypeList = {MemoryType::BLOB_AHWB, MemoryType::DEVICE};
899 } break;
900 case TestKind::FENCED_COMPUTE: {
901 outputTypesList = {OutputType::FULLY_SPECIFIED};
902 measureTimingList = {false, true};
903 executorList = {Executor::FENCED};
904 memoryTypeList = {MemoryType::ASHMEM};
905 } break;
906 case TestKind::QUANTIZATION_COUPLING: {
907 LOG(FATAL) << "Wrong TestKind for EvaluatePreparedModel";
908 return;
909 } break;
910 case TestKind::INTINITE_LOOP_TIMEOUT: {
911 outputTypesList = {OutputType::MISSED_DEADLINE};
912 measureTimingList = {false, true};
Michael Butler7fc7e372021-03-10 22:51:53 -0800913 executorList = {Executor::SYNC, Executor::BURST, Executor::FENCED};
Lev Proleevc185e882020-12-15 19:25:32 +0000914 memoryTypeList = {MemoryType::ASHMEM};
915 } break;
916 }
917
918 for (const OutputType outputType : outputTypesList) {
919 for (const bool measureTiming : measureTimingList) {
920 for (const Executor executor : executorList) {
921 for (const MemoryType memoryType : memoryTypeList) {
Xusong Wang72e06c22022-01-11 14:25:55 -0800922 for (const bool reusable : reusableList) {
Miao Wangb5c8a822021-10-26 20:03:05 +0000923 for (const bool useConfig : useConfigList) {
924 if ((useConfig || executor == Executor::BURST) && reusable) continue;
925 const TestConfig testConfig(executor, measureTiming, outputType,
926 memoryType, reusable,
927 /*reportSkipping=*/true, useConfig);
928 SCOPED_TRACE(toString(testConfig));
929 EvaluatePreparedModel(device, preparedModel, testModel, testConfig);
930 }
Xusong Wang72e06c22022-01-11 14:25:55 -0800931 }
Lev Proleevc185e882020-12-15 19:25:32 +0000932 }
933 }
934 }
935 }
936}
937
938void EvaluatePreparedCoupledModels(const std::shared_ptr<IDevice>& device,
939 const std::shared_ptr<IPreparedModel>& preparedModel,
940 const TestModel& testModel,
941 const std::shared_ptr<IPreparedModel>& preparedCoupledModel,
942 const TestModel& coupledModel) {
943 const std::vector<OutputType> outputTypesList = {OutputType::FULLY_SPECIFIED};
944 const std::vector<bool> measureTimingList = {false, true};
Michael Butler7fc7e372021-03-10 22:51:53 -0800945 const std::vector<Executor> executorList = {Executor::SYNC, Executor::BURST, Executor::FENCED};
Lev Proleevc185e882020-12-15 19:25:32 +0000946
947 for (const OutputType outputType : outputTypesList) {
948 for (const bool measureTiming : measureTimingList) {
949 for (const Executor executor : executorList) {
950 const TestConfig testConfig(executor, measureTiming, outputType, MemoryType::ASHMEM,
Xusong Wang72e06c22022-01-11 14:25:55 -0800951 /*reusable=*/false, /*reportSkipping=*/false);
Lev Proleevc185e882020-12-15 19:25:32 +0000952 bool baseSkipped = false;
953 EvaluatePreparedModel(device, preparedModel, testModel, testConfig, &baseSkipped);
954 bool coupledSkipped = false;
955 EvaluatePreparedModel(device, preparedCoupledModel, coupledModel, testConfig,
956 &coupledSkipped);
957 ASSERT_EQ(baseSkipped, coupledSkipped);
958 if (baseSkipped) {
959 LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
960 "execute model that it does not support.";
961 std::cout << "[ ] Early termination of test because vendor service "
962 "cannot "
963 "execute model that it does not support."
964 << std::endl;
965 GTEST_SKIP();
966 }
967 }
968 }
969 }
970}
971
972void Execute(const std::shared_ptr<IDevice>& device, const TestModel& testModel,
973 TestKind testKind) {
974 Model model = createModel(testModel);
975 if (testKind == TestKind::DYNAMIC_SHAPE) {
976 makeOutputDimensionsUnspecified(&model);
977 }
978
979 std::shared_ptr<IPreparedModel> preparedModel;
980 switch (testKind) {
981 case TestKind::GENERAL:
982 case TestKind::DYNAMIC_SHAPE:
983 case TestKind::MEMORY_DOMAIN:
984 case TestKind::FENCED_COMPUTE:
985 case TestKind::INTINITE_LOOP_TIMEOUT: {
986 createPreparedModel(device, model, &preparedModel);
987 if (preparedModel == nullptr) return;
988 EvaluatePreparedModel(device, preparedModel, testModel, testKind);
Miao Wangb5c8a822021-10-26 20:03:05 +0000989 int32_t deviceVersion;
990 ASSERT_TRUE(device->getInterfaceVersion(&deviceVersion).isOk());
991 if (deviceVersion >= kMinAidlLevelForFL8) {
992 createPreparedModel(device, model, &preparedModel, /*reportSkipping*/ true,
993 /*useConfig*/ true);
994 EvaluatePreparedModel(device, preparedModel, testModel, testKind);
995 }
Lev Proleevc185e882020-12-15 19:25:32 +0000996 } break;
997 case TestKind::QUANTIZATION_COUPLING: {
998 ASSERT_TRUE(testModel.hasQuant8CoupledOperands());
999 createPreparedModel(device, model, &preparedModel,
1000 /*reportSkipping*/ false);
1001 TestModel signedQuantizedModel = convertQuant8AsymmOperandsToSigned(testModel);
1002 std::shared_ptr<IPreparedModel> preparedCoupledModel;
1003 createPreparedModel(device, createModel(signedQuantizedModel), &preparedCoupledModel,
1004 /*reportSkipping*/ false);
1005 // If we couldn't prepare a model with unsigned quantization, we must
1006 // fail to prepare a model with signed quantization as well.
1007 if (preparedModel == nullptr) {
1008 ASSERT_EQ(preparedCoupledModel, nullptr);
1009 // If we failed to prepare both of the models, we can safely skip
1010 // the test.
1011 LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
1012 "prepare model that it does not support.";
1013 std::cout
1014 << "[ ] Early termination of test because vendor service cannot "
1015 "prepare model that it does not support."
1016 << std::endl;
1017 GTEST_SKIP();
1018 }
1019 ASSERT_NE(preparedCoupledModel, nullptr);
1020 EvaluatePreparedCoupledModels(device, preparedModel, testModel, preparedCoupledModel,
1021 signedQuantizedModel);
1022 } break;
1023 }
1024}
1025
1026void GeneratedTestBase::SetUp() {
1027 testing::TestWithParam<GeneratedTestParam>::SetUp();
1028 ASSERT_NE(kDevice, nullptr);
Michael Butler9c3c8642021-08-23 18:14:50 -07001029 const bool deviceIsResponsive =
1030 ndk::ScopedAStatus::fromStatus(AIBinder_ping(kDevice->asBinder().get())).isOk();
1031 ASSERT_TRUE(deviceIsResponsive);
Ian Huaca46f972021-10-15 11:06:31 +01001032 // TODO(b/201260787): We should require old drivers to report the model as
1033 // unsupported instead of simply skipping the test.
1034 SkipIfDriverOlderThanTestModel();
1035}
1036
1037void GeneratedTestBase::SkipIfDriverOlderThanTestModel() {
1038 int32_t deviceVersion;
1039 ASSERT_TRUE(kDevice->getInterfaceVersion(&deviceVersion).isOk());
1040 const int32_t modelVersion = kTestModel.getAidlVersionInt();
1041 if (deviceVersion < modelVersion) {
1042 GTEST_SKIP() << "Device interface version " << deviceVersion
1043 << " is older than test model's minimum supported HAL version " << modelVersion
1044 << ". Skipping test.";
1045 }
Lev Proleevc185e882020-12-15 19:25:32 +00001046}
1047
1048std::vector<NamedModel> getNamedModels(const FilterFn& filter) {
1049 return TestModelManager::get().getTestModels(filter);
1050}
1051
1052std::vector<NamedModel> getNamedModels(const FilterNameFn& filter) {
1053 return TestModelManager::get().getTestModels(filter);
1054}
1055
1056std::string printGeneratedTest(const testing::TestParamInfo<GeneratedTestParam>& info) {
1057 const auto& [namedDevice, namedModel] = info.param;
1058 return gtestCompliantName(getName(namedDevice) + "_" + getName(namedModel));
1059}
1060
1061// Tag for the generated tests
1062class GeneratedTest : public GeneratedTestBase {};
1063
1064// Tag for the dynamic output shape tests
1065class DynamicOutputShapeTest : public GeneratedTest {};
1066
1067// Tag for the memory domain tests
1068class MemoryDomainTest : public GeneratedTest {};
1069
1070// Tag for the fenced compute tests
1071class FencedComputeTest : public GeneratedTest {};
1072
1073// Tag for the dynamic output shape tests
1074class QuantizationCouplingTest : public GeneratedTest {};
1075
1076// Tag for the loop timeout tests
1077class InfiniteLoopTimeoutTest : public GeneratedTest {};
1078
1079TEST_P(GeneratedTest, Test) {
1080 Execute(kDevice, kTestModel, TestKind::GENERAL);
1081}
1082
1083TEST_P(DynamicOutputShapeTest, Test) {
1084 Execute(kDevice, kTestModel, TestKind::DYNAMIC_SHAPE);
1085}
1086
1087TEST_P(MemoryDomainTest, Test) {
1088 Execute(kDevice, kTestModel, TestKind::MEMORY_DOMAIN);
1089}
1090
1091TEST_P(FencedComputeTest, Test) {
1092 Execute(kDevice, kTestModel, TestKind::FENCED_COMPUTE);
1093}
1094
1095TEST_P(QuantizationCouplingTest, Test) {
1096 Execute(kDevice, kTestModel, TestKind::QUANTIZATION_COUPLING);
1097}
1098
1099TEST_P(InfiniteLoopTimeoutTest, Test) {
1100 Execute(kDevice, kTestModel, TestKind::INTINITE_LOOP_TIMEOUT);
1101}
1102
1103INSTANTIATE_GENERATED_TEST(GeneratedTest,
1104 [](const TestModel& testModel) { return !testModel.expectFailure; });
1105
1106INSTANTIATE_GENERATED_TEST(DynamicOutputShapeTest, [](const TestModel& testModel) {
1107 return !testModel.expectFailure && !testModel.hasScalarOutputs();
1108});
1109
1110INSTANTIATE_GENERATED_TEST(MemoryDomainTest,
1111 [](const TestModel& testModel) { return !testModel.expectFailure; });
1112
1113INSTANTIATE_GENERATED_TEST(FencedComputeTest,
1114 [](const TestModel& testModel) { return !testModel.expectFailure; });
1115
1116INSTANTIATE_GENERATED_TEST(QuantizationCouplingTest, [](const TestModel& testModel) {
1117 return !testModel.expectFailure && testModel.hasQuant8CoupledOperands() &&
1118 testModel.main.operations.size() == 1;
1119});
1120
1121INSTANTIATE_GENERATED_TEST(InfiniteLoopTimeoutTest, [](const TestModel& testModel) {
1122 return testModel.isInfiniteLoopTimeoutTest();
1123});
1124
1125} // namespace aidl::android::hardware::neuralnetworks::vts::functional