blob: 3e947f51631b44ac5316cfc693d4fcdf278b7a43 [file] [log] [blame]
Lev Proleev13fdfcd2019-08-30 11:35:34 +01001/*
2 * Copyright (C) 2019 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "GeneratedTestHarness.h"
18
19#include <android-base/logging.h>
20#include <android/hardware/neuralnetworks/1.0/IDevice.h>
21#include <android/hardware/neuralnetworks/1.0/IExecutionCallback.h>
22#include <android/hardware/neuralnetworks/1.0/IPreparedModel.h>
23#include <android/hardware/neuralnetworks/1.0/IPreparedModelCallback.h>
24#include <android/hardware/neuralnetworks/1.0/types.h>
25#include <android/hardware/neuralnetworks/1.1/IDevice.h>
26#include <android/hardware/neuralnetworks/1.2/IDevice.h>
27#include <android/hardware/neuralnetworks/1.2/IExecutionCallback.h>
28#include <android/hardware/neuralnetworks/1.2/IPreparedModel.h>
29#include <android/hardware/neuralnetworks/1.2/IPreparedModelCallback.h>
Lev Proleev26d1bc82019-08-30 11:57:18 +010030#include <android/hardware/neuralnetworks/1.2/types.h>
31#include <android/hardware/neuralnetworks/1.3/IDevice.h>
Xusong Wang1b3f4262019-10-25 12:07:17 -070032#include <android/hardware/neuralnetworks/1.3/IPreparedModel.h>
Xusong Wangcc47dff2019-10-23 10:35:07 -070033#include <android/hardware/neuralnetworks/1.3/IPreparedModelCallback.h>
Lev Proleev26d1bc82019-08-30 11:57:18 +010034#include <android/hardware/neuralnetworks/1.3/types.h>
Lev Proleev13fdfcd2019-08-30 11:35:34 +010035#include <android/hidl/allocator/1.0/IAllocator.h>
36#include <android/hidl/memory/1.0/IMemory.h>
37#include <hidlmemory/mapping.h>
38
39#include <gtest/gtest.h>
40#include <algorithm>
Michael Butler648ada52019-07-25 17:22:11 -070041#include <chrono>
Lev Proleev13fdfcd2019-08-30 11:35:34 +010042#include <iostream>
43#include <numeric>
44
45#include "1.0/Utils.h"
46#include "1.2/Callbacks.h"
Xusong Wangcc47dff2019-10-23 10:35:07 -070047#include "1.3/Callbacks.h"
Lev Proleev13fdfcd2019-08-30 11:35:34 +010048#include "ExecutionBurstController.h"
49#include "MemoryUtils.h"
50#include "TestHarness.h"
51#include "Utils.h"
52#include "VtsHalNeuralnetworks.h"
53
Lev Proleev26d1bc82019-08-30 11:57:18 +010054namespace android::hardware::neuralnetworks::V1_3::vts::functional {
Lev Proleev13fdfcd2019-08-30 11:35:34 +010055
56using namespace test_helper;
57using hidl::memory::V1_0::IMemory;
Xusong Wangcc47dff2019-10-23 10:35:07 -070058using implementation::PreparedModelCallback;
Lev Proleev13fdfcd2019-08-30 11:35:34 +010059using V1_0::DataLocation;
60using V1_0::ErrorStatus;
61using V1_0::OperandLifeTime;
62using V1_0::Request;
63using V1_1::ExecutionPreference;
Lev Proleev26d1bc82019-08-30 11:57:18 +010064using V1_2::Constant;
Lev Proleev26d1bc82019-08-30 11:57:18 +010065using V1_2::MeasureTiming;
Lev Proleev26d1bc82019-08-30 11:57:18 +010066using V1_2::OutputShape;
67using V1_2::SymmPerChannelQuantParams;
68using V1_2::Timing;
69using V1_2::implementation::ExecutionCallback;
Lev Proleev13fdfcd2019-08-30 11:35:34 +010070using HidlToken = hidl_array<uint8_t, static_cast<uint32_t>(Constant::BYTE_SIZE_OF_CACHE_TOKEN)>;
71
Lev Proleev0d4ba3f2019-10-02 17:32:06 +010072namespace {
73
74enum class Executor { ASYNC, SYNC, BURST };
75
Lev Proleev13fdfcd2019-08-30 11:35:34 +010076enum class OutputType { FULLY_SPECIFIED, UNSPECIFIED, INSUFFICIENT };
77
Lev Proleev0d4ba3f2019-10-02 17:32:06 +010078struct TestConfig {
79 Executor executor;
80 MeasureTiming measureTiming;
81 OutputType outputType;
Lev Proleev9226c1e2019-10-03 14:43:18 +010082 // `reportSkipping` indicates if a test should print an info message in case
83 // it is skipped. The field is set to true by default and is set to false in
84 // quantization coupling tests to suppress skipping a test
85 bool reportSkipping;
86 TestConfig(Executor executor, MeasureTiming measureTiming, OutputType outputType)
87 : executor(executor),
88 measureTiming(measureTiming),
89 outputType(outputType),
90 reportSkipping(true) {}
91 TestConfig(Executor executor, MeasureTiming measureTiming, OutputType outputType,
92 bool reportSkipping)
93 : executor(executor),
94 measureTiming(measureTiming),
95 outputType(outputType),
96 reportSkipping(reportSkipping) {}
Lev Proleev0d4ba3f2019-10-02 17:32:06 +010097};
98
99} // namespace
100
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100101Model createModel(const TestModel& testModel) {
102 // Model operands.
103 hidl_vec<Operand> operands(testModel.operands.size());
104 size_t constCopySize = 0, constRefSize = 0;
105 for (uint32_t i = 0; i < testModel.operands.size(); i++) {
106 const auto& op = testModel.operands[i];
107
108 DataLocation loc = {};
109 if (op.lifetime == TestOperandLifeTime::CONSTANT_COPY) {
110 loc = {.poolIndex = 0,
111 .offset = static_cast<uint32_t>(constCopySize),
112 .length = static_cast<uint32_t>(op.data.size())};
113 constCopySize += op.data.alignedSize();
114 } else if (op.lifetime == TestOperandLifeTime::CONSTANT_REFERENCE) {
115 loc = {.poolIndex = 0,
116 .offset = static_cast<uint32_t>(constRefSize),
117 .length = static_cast<uint32_t>(op.data.size())};
118 constRefSize += op.data.alignedSize();
119 }
120
121 Operand::ExtraParams extraParams;
122 if (op.type == TestOperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) {
123 extraParams.channelQuant(SymmPerChannelQuantParams{
124 .scales = op.channelQuant.scales, .channelDim = op.channelQuant.channelDim});
125 }
126
127 operands[i] = {.type = static_cast<OperandType>(op.type),
128 .dimensions = op.dimensions,
129 .numberOfConsumers = op.numberOfConsumers,
130 .scale = op.scale,
131 .zeroPoint = op.zeroPoint,
132 .lifetime = static_cast<OperandLifeTime>(op.lifetime),
133 .location = loc,
134 .extraParams = std::move(extraParams)};
135 }
136
137 // Model operations.
138 hidl_vec<Operation> operations(testModel.operations.size());
139 std::transform(testModel.operations.begin(), testModel.operations.end(), operations.begin(),
140 [](const TestOperation& op) -> Operation {
141 return {.type = static_cast<OperationType>(op.type),
142 .inputs = op.inputs,
143 .outputs = op.outputs};
144 });
145
146 // Constant copies.
147 hidl_vec<uint8_t> operandValues(constCopySize);
148 for (uint32_t i = 0; i < testModel.operands.size(); i++) {
149 const auto& op = testModel.operands[i];
150 if (op.lifetime == TestOperandLifeTime::CONSTANT_COPY) {
151 const uint8_t* begin = op.data.get<uint8_t>();
152 const uint8_t* end = begin + op.data.size();
153 std::copy(begin, end, operandValues.data() + operands[i].location.offset);
154 }
155 }
156
157 // Shared memory.
158 hidl_vec<hidl_memory> pools = {};
159 if (constRefSize > 0) {
160 hidl_vec_push_back(&pools, nn::allocateSharedMemory(constRefSize));
161 CHECK_NE(pools[0].size(), 0u);
162
163 // load data
164 sp<IMemory> mappedMemory = mapMemory(pools[0]);
165 CHECK(mappedMemory.get() != nullptr);
166 uint8_t* mappedPtr =
167 reinterpret_cast<uint8_t*>(static_cast<void*>(mappedMemory->getPointer()));
168 CHECK(mappedPtr != nullptr);
169
170 for (uint32_t i = 0; i < testModel.operands.size(); i++) {
171 const auto& op = testModel.operands[i];
172 if (op.lifetime == TestOperandLifeTime::CONSTANT_REFERENCE) {
173 const uint8_t* begin = op.data.get<uint8_t>();
174 const uint8_t* end = begin + op.data.size();
175 std::copy(begin, end, mappedPtr + operands[i].location.offset);
176 }
177 }
178 }
179
180 return {.operands = std::move(operands),
181 .operations = std::move(operations),
182 .inputIndexes = testModel.inputIndexes,
183 .outputIndexes = testModel.outputIndexes,
184 .operandValues = std::move(operandValues),
185 .pools = std::move(pools),
186 .relaxComputationFloat32toFloat16 = testModel.isRelaxed};
187}
188
189static bool isOutputSizeGreaterThanOne(const TestModel& testModel, uint32_t index) {
190 const auto byteSize = testModel.operands[testModel.outputIndexes[index]].data.size();
191 return byteSize > 1u;
192}
193
194static void makeOutputInsufficientSize(uint32_t outputIndex, Request* request) {
195 auto& length = request->outputs[outputIndex].location.length;
196 ASSERT_GT(length, 1u);
197 length -= 1u;
198}
199
200static void makeOutputDimensionsUnspecified(Model* model) {
201 for (auto i : model->outputIndexes) {
202 auto& dims = model->operands[i].dimensions;
203 std::fill(dims.begin(), dims.end(), 0);
204 }
205}
206
207static Return<ErrorStatus> ExecutePreparedModel(const sp<IPreparedModel>& preparedModel,
208 const Request& request, MeasureTiming measure,
209 sp<ExecutionCallback>& callback) {
Xusong Wang1b3f4262019-10-25 12:07:17 -0700210 return preparedModel->execute_1_3(request, measure, callback);
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100211}
212static Return<ErrorStatus> ExecutePreparedModel(const sp<IPreparedModel>& preparedModel,
213 const Request& request, MeasureTiming measure,
214 hidl_vec<OutputShape>* outputShapes,
215 Timing* timing) {
216 ErrorStatus result;
217 Return<void> ret = preparedModel->executeSynchronously(
218 request, measure,
219 [&result, outputShapes, timing](ErrorStatus error, const hidl_vec<OutputShape>& shapes,
220 const Timing& time) {
221 result = error;
222 *outputShapes = shapes;
223 *timing = time;
224 });
225 if (!ret.isOk()) {
226 return ErrorStatus::GENERAL_FAILURE;
227 }
228 return result;
229}
230static std::shared_ptr<::android::nn::ExecutionBurstController> CreateBurst(
231 const sp<IPreparedModel>& preparedModel) {
Michael Butler648ada52019-07-25 17:22:11 -0700232 return android::nn::ExecutionBurstController::create(preparedModel,
233 std::chrono::microseconds{0});
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100234}
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100235
236void EvaluatePreparedModel(const sp<IPreparedModel>& preparedModel, const TestModel& testModel,
Lev Proleev9226c1e2019-10-03 14:43:18 +0100237 const TestConfig& testConfig, bool* skipped = nullptr) {
238 if (skipped != nullptr) {
239 *skipped = false;
240 }
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100241 // If output0 does not have size larger than one byte, we can not test with insufficient buffer.
Lev Proleev0d4ba3f2019-10-02 17:32:06 +0100242 if (testConfig.outputType == OutputType::INSUFFICIENT &&
243 !isOutputSizeGreaterThanOne(testModel, 0)) {
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100244 return;
245 }
246
247 Request request = createRequest(testModel);
Lev Proleev0d4ba3f2019-10-02 17:32:06 +0100248 if (testConfig.outputType == OutputType::INSUFFICIENT) {
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100249 makeOutputInsufficientSize(/*outputIndex=*/0, &request);
250 }
251
252 ErrorStatus executionStatus;
253 hidl_vec<OutputShape> outputShapes;
254 Timing timing;
Lev Proleev0d4ba3f2019-10-02 17:32:06 +0100255 switch (testConfig.executor) {
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100256 case Executor::ASYNC: {
257 SCOPED_TRACE("asynchronous");
258
259 // launch execution
260 sp<ExecutionCallback> executionCallback = new ExecutionCallback();
Lev Proleev0d4ba3f2019-10-02 17:32:06 +0100261 Return<ErrorStatus> executionLaunchStatus = ExecutePreparedModel(
262 preparedModel, request, testConfig.measureTiming, executionCallback);
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100263 ASSERT_TRUE(executionLaunchStatus.isOk());
264 EXPECT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(executionLaunchStatus));
265
266 // retrieve execution status
267 executionCallback->wait();
268 executionStatus = executionCallback->getStatus();
269 outputShapes = executionCallback->getOutputShapes();
270 timing = executionCallback->getTiming();
271
272 break;
273 }
274 case Executor::SYNC: {
275 SCOPED_TRACE("synchronous");
276
277 // execute
Lev Proleev0d4ba3f2019-10-02 17:32:06 +0100278 Return<ErrorStatus> executionReturnStatus = ExecutePreparedModel(
279 preparedModel, request, testConfig.measureTiming, &outputShapes, &timing);
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100280 ASSERT_TRUE(executionReturnStatus.isOk());
281 executionStatus = static_cast<ErrorStatus>(executionReturnStatus);
282
283 break;
284 }
285 case Executor::BURST: {
286 SCOPED_TRACE("burst");
287
288 // create burst
289 const std::shared_ptr<::android::nn::ExecutionBurstController> controller =
290 CreateBurst(preparedModel);
291 ASSERT_NE(nullptr, controller.get());
292
293 // create memory keys
294 std::vector<intptr_t> keys(request.pools.size());
295 for (size_t i = 0; i < keys.size(); ++i) {
296 keys[i] = reinterpret_cast<intptr_t>(&request.pools[i]);
297 }
298
299 // execute burst
Michael Butler648ada52019-07-25 17:22:11 -0700300 int n;
301 std::tie(n, outputShapes, timing, std::ignore) =
Lev Proleev0d4ba3f2019-10-02 17:32:06 +0100302 controller->compute(request, testConfig.measureTiming, keys);
Michael Butler648ada52019-07-25 17:22:11 -0700303 executionStatus = nn::convertResultCodeToErrorStatus(n);
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100304
305 break;
306 }
307 }
308
Lev Proleev0d4ba3f2019-10-02 17:32:06 +0100309 if (testConfig.outputType != OutputType::FULLY_SPECIFIED &&
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100310 executionStatus == ErrorStatus::GENERAL_FAILURE) {
Lev Proleev9226c1e2019-10-03 14:43:18 +0100311 if (skipped != nullptr) {
312 *skipped = true;
313 }
314 if (!testConfig.reportSkipping) {
315 return;
316 }
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100317 LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
318 "execute model that it does not support.";
319 std::cout << "[ ] Early termination of test because vendor service cannot "
320 "execute model that it does not support."
321 << std::endl;
322 GTEST_SKIP();
323 }
Lev Proleev0d4ba3f2019-10-02 17:32:06 +0100324 if (testConfig.measureTiming == MeasureTiming::NO) {
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100325 EXPECT_EQ(UINT64_MAX, timing.timeOnDevice);
326 EXPECT_EQ(UINT64_MAX, timing.timeInDriver);
327 } else {
328 if (timing.timeOnDevice != UINT64_MAX && timing.timeInDriver != UINT64_MAX) {
329 EXPECT_LE(timing.timeOnDevice, timing.timeInDriver);
330 }
331 }
332
Lev Proleev0d4ba3f2019-10-02 17:32:06 +0100333 switch (testConfig.outputType) {
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100334 case OutputType::FULLY_SPECIFIED:
335 // If the model output operands are fully specified, outputShapes must be either
336 // either empty, or have the same number of elements as the number of outputs.
337 ASSERT_EQ(ErrorStatus::NONE, executionStatus);
338 ASSERT_TRUE(outputShapes.size() == 0 ||
339 outputShapes.size() == testModel.outputIndexes.size());
340 break;
341 case OutputType::UNSPECIFIED:
342 // If the model output operands are not fully specified, outputShapes must have
343 // the same number of elements as the number of outputs.
344 ASSERT_EQ(ErrorStatus::NONE, executionStatus);
345 ASSERT_EQ(outputShapes.size(), testModel.outputIndexes.size());
346 break;
347 case OutputType::INSUFFICIENT:
348 ASSERT_EQ(ErrorStatus::OUTPUT_INSUFFICIENT_SIZE, executionStatus);
349 ASSERT_EQ(outputShapes.size(), testModel.outputIndexes.size());
350 ASSERT_FALSE(outputShapes[0].isSufficient);
351 return;
352 }
353
354 // Go through all outputs, check returned output shapes.
355 for (uint32_t i = 0; i < outputShapes.size(); i++) {
356 EXPECT_TRUE(outputShapes[i].isSufficient);
357 const auto& expect = testModel.operands[testModel.outputIndexes[i]].dimensions;
358 const std::vector<uint32_t> actual = outputShapes[i].dimensions;
359 EXPECT_EQ(expect, actual);
360 }
361
362 // Retrieve execution results.
363 const std::vector<TestBuffer> outputs = getOutputBuffers(request);
364
365 // We want "close-enough" results.
366 checkResults(testModel, outputs);
367}
368
369void EvaluatePreparedModel(const sp<IPreparedModel>& preparedModel, const TestModel& testModel,
Lev Proleev9226c1e2019-10-03 14:43:18 +0100370 TestKind testKind) {
Lev Proleev0d4ba3f2019-10-02 17:32:06 +0100371 std::initializer_list<OutputType> outputTypesList;
372 std::initializer_list<MeasureTiming> measureTimingList;
373 std::initializer_list<Executor> executorList;
374
Lev Proleev9226c1e2019-10-03 14:43:18 +0100375 switch (testKind) {
376 case TestKind::GENERAL: {
377 outputTypesList = {OutputType::FULLY_SPECIFIED};
378 measureTimingList = {MeasureTiming::NO, MeasureTiming::YES};
379 executorList = {Executor::ASYNC, Executor::SYNC, Executor::BURST};
380 } break;
381 case TestKind::DYNAMIC_SHAPE: {
382 outputTypesList = {OutputType::UNSPECIFIED, OutputType::INSUFFICIENT};
383 measureTimingList = {MeasureTiming::NO, MeasureTiming::YES};
384 executorList = {Executor::ASYNC, Executor::SYNC, Executor::BURST};
385 } break;
386 case TestKind::QUANTIZATION_COUPLING: {
387 LOG(FATAL) << "Wrong TestKind for EvaluatePreparedModel";
388 return;
389 } break;
Lev Proleev0d4ba3f2019-10-02 17:32:06 +0100390 }
391
392 for (const OutputType outputType : outputTypesList) {
393 for (const MeasureTiming measureTiming : measureTimingList) {
394 for (const Executor executor : executorList) {
Lev Proleev9226c1e2019-10-03 14:43:18 +0100395 const TestConfig testConfig(executor, measureTiming, outputType);
Lev Proleev0d4ba3f2019-10-02 17:32:06 +0100396 EvaluatePreparedModel(preparedModel, testModel, testConfig);
397 }
398 }
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100399 }
400}
401
Lev Proleev9226c1e2019-10-03 14:43:18 +0100402void EvaluatePreparedCoupledModels(const sp<IPreparedModel>& preparedModel,
403 const TestModel& testModel,
404 const sp<IPreparedModel>& preparedCoupledModel,
405 const TestModel& coupledModel) {
406 std::initializer_list<OutputType> outputTypesList = {OutputType::FULLY_SPECIFIED};
407 std::initializer_list<MeasureTiming> measureTimingList = {MeasureTiming::NO,
408 MeasureTiming::YES};
409 std::initializer_list<Executor> executorList = {Executor::ASYNC, Executor::SYNC,
410 Executor::BURST};
411
412 for (const OutputType outputType : outputTypesList) {
413 for (const MeasureTiming measureTiming : measureTimingList) {
414 for (const Executor executor : executorList) {
415 const TestConfig testConfig(executor, measureTiming, outputType,
416 /*reportSkipping=*/false);
417 bool baseSkipped = false;
418 EvaluatePreparedModel(preparedModel, testModel, testConfig, &baseSkipped);
419 bool coupledSkipped = false;
420 EvaluatePreparedModel(preparedCoupledModel, coupledModel, testConfig,
421 &coupledSkipped);
422 ASSERT_EQ(baseSkipped, coupledSkipped);
423 if (baseSkipped) {
424 LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
425 "execute model that it does not support.";
426 std::cout << "[ ] Early termination of test because vendor service "
427 "cannot "
428 "execute model that it does not support."
429 << std::endl;
430 GTEST_SKIP();
431 }
432 }
433 }
434 }
435}
436
437void Execute(const sp<IDevice>& device, const TestModel& testModel, TestKind testKind) {
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100438 Model model = createModel(testModel);
Lev Proleev9226c1e2019-10-03 14:43:18 +0100439 if (testKind == TestKind::DYNAMIC_SHAPE) {
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100440 makeOutputDimensionsUnspecified(&model);
441 }
442
443 sp<IPreparedModel> preparedModel;
Lev Proleev9226c1e2019-10-03 14:43:18 +0100444 switch (testKind) {
445 case TestKind::GENERAL: {
446 createPreparedModel(device, model, &preparedModel);
447 if (preparedModel == nullptr) return;
448 EvaluatePreparedModel(preparedModel, testModel, TestKind::GENERAL);
449 } break;
450 case TestKind::DYNAMIC_SHAPE: {
451 createPreparedModel(device, model, &preparedModel);
452 if (preparedModel == nullptr) return;
453 EvaluatePreparedModel(preparedModel, testModel, TestKind::DYNAMIC_SHAPE);
454 } break;
455 case TestKind::QUANTIZATION_COUPLING: {
456 ASSERT_TRUE(testModel.hasQuant8AsymmOperands());
457 createPreparedModel(device, model, &preparedModel, /*reportSkipping*/ false);
458 TestModel signedQuantizedModel = convertQuant8AsymmOperandsToSigned(testModel);
459 sp<IPreparedModel> preparedCoupledModel;
460 createPreparedModel(device, createModel(signedQuantizedModel), &preparedCoupledModel,
461 /*reportSkipping*/ false);
462 // If we couldn't prepare a model with unsigned quantization, we must
463 // fail to prepare a model with signed quantization as well.
464 if (preparedModel == nullptr) {
465 ASSERT_EQ(preparedCoupledModel, nullptr);
466 // If we failed to prepare both of the models, we can safely skip
467 // the test.
468 LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
469 "prepare model that it does not support.";
470 std::cout
471 << "[ ] Early termination of test because vendor service cannot "
472 "prepare model that it does not support."
473 << std::endl;
474 GTEST_SKIP();
475 }
476 ASSERT_NE(preparedCoupledModel, nullptr);
477 EvaluatePreparedCoupledModels(preparedModel, testModel, preparedCoupledModel,
478 signedQuantizedModel);
479 } break;
480 }
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100481}
482
483void GeneratedTestBase::SetUp() {
484 testing::TestWithParam<GeneratedTestParam>::SetUp();
485 ASSERT_NE(kDevice, nullptr);
486}
487
488std::vector<NamedModel> getNamedModels(const FilterFn& filter) {
489 return TestModelManager::get().getTestModels(filter);
490}
491
492std::string printGeneratedTest(const testing::TestParamInfo<GeneratedTestParam>& info) {
493 const auto& [namedDevice, namedModel] = info.param;
494 return gtestCompliantName(getName(namedDevice) + "_" + getName(namedModel));
495}
496
497// Tag for the generated tests
498class GeneratedTest : public GeneratedTestBase {};
499
500// Tag for the dynamic output shape tests
501class DynamicOutputShapeTest : public GeneratedTest {};
502
Lev Proleev9226c1e2019-10-03 14:43:18 +0100503// Tag for the dynamic output shape tests
504class DISABLED_QuantizationCouplingTest : public GeneratedTest {};
505
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100506TEST_P(GeneratedTest, Test) {
Lev Proleev9226c1e2019-10-03 14:43:18 +0100507 Execute(kDevice, kTestModel, /*testKind=*/TestKind::GENERAL);
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100508}
509
510TEST_P(DynamicOutputShapeTest, Test) {
Lev Proleev9226c1e2019-10-03 14:43:18 +0100511 Execute(kDevice, kTestModel, /*testKind=*/TestKind::DYNAMIC_SHAPE);
512}
513
514TEST_P(DISABLED_QuantizationCouplingTest, Test) {
515 Execute(kDevice, kTestModel, /*testKind=*/TestKind::QUANTIZATION_COUPLING);
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100516}
517
518INSTANTIATE_GENERATED_TEST(GeneratedTest,
519 [](const TestModel& testModel) { return !testModel.expectFailure; });
520
521INSTANTIATE_GENERATED_TEST(DynamicOutputShapeTest,
522 [](const TestModel& testModel) { return !testModel.expectFailure; });
523
Lev Proleev9226c1e2019-10-03 14:43:18 +0100524INSTANTIATE_GENERATED_TEST(DISABLED_QuantizationCouplingTest, [](const TestModel& testModel) {
525 return testModel.hasQuant8AsymmOperands() && testModel.operations.size() == 1;
526});
527
Lev Proleev26d1bc82019-08-30 11:57:18 +0100528} // namespace android::hardware::neuralnetworks::V1_3::vts::functional