blob: 2ec29887dfe582796362695dff5fb9a8e973d03e [file] [log] [blame]
Lev Proleev13fdfcd2019-08-30 11:35:34 +01001/*
2 * Copyright (C) 2019 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "GeneratedTestHarness.h"
18
19#include <android-base/logging.h>
20#include <android/hardware/neuralnetworks/1.0/IDevice.h>
21#include <android/hardware/neuralnetworks/1.0/IExecutionCallback.h>
22#include <android/hardware/neuralnetworks/1.0/IPreparedModel.h>
23#include <android/hardware/neuralnetworks/1.0/IPreparedModelCallback.h>
24#include <android/hardware/neuralnetworks/1.0/types.h>
25#include <android/hardware/neuralnetworks/1.1/IDevice.h>
26#include <android/hardware/neuralnetworks/1.2/IDevice.h>
27#include <android/hardware/neuralnetworks/1.2/IExecutionCallback.h>
28#include <android/hardware/neuralnetworks/1.2/IPreparedModel.h>
29#include <android/hardware/neuralnetworks/1.2/IPreparedModelCallback.h>
Lev Proleev26d1bc82019-08-30 11:57:18 +010030#include <android/hardware/neuralnetworks/1.2/types.h>
31#include <android/hardware/neuralnetworks/1.3/IDevice.h>
Xusong Wang1b3f4262019-10-25 12:07:17 -070032#include <android/hardware/neuralnetworks/1.3/IPreparedModel.h>
Xusong Wangcc47dff2019-10-23 10:35:07 -070033#include <android/hardware/neuralnetworks/1.3/IPreparedModelCallback.h>
Lev Proleev26d1bc82019-08-30 11:57:18 +010034#include <android/hardware/neuralnetworks/1.3/types.h>
Lev Proleev13fdfcd2019-08-30 11:35:34 +010035#include <android/hidl/allocator/1.0/IAllocator.h>
36#include <android/hidl/memory/1.0/IMemory.h>
37#include <hidlmemory/mapping.h>
38
39#include <gtest/gtest.h>
40#include <algorithm>
Michael Butler648ada52019-07-25 17:22:11 -070041#include <chrono>
Lev Proleev13fdfcd2019-08-30 11:35:34 +010042#include <iostream>
43#include <numeric>
44
45#include "1.0/Utils.h"
46#include "1.2/Callbacks.h"
Xusong Wangcc47dff2019-10-23 10:35:07 -070047#include "1.3/Callbacks.h"
Lev Proleev13fdfcd2019-08-30 11:35:34 +010048#include "ExecutionBurstController.h"
49#include "MemoryUtils.h"
50#include "TestHarness.h"
51#include "Utils.h"
52#include "VtsHalNeuralnetworks.h"
53
Lev Proleev26d1bc82019-08-30 11:57:18 +010054namespace android::hardware::neuralnetworks::V1_3::vts::functional {
Lev Proleev13fdfcd2019-08-30 11:35:34 +010055
56using namespace test_helper;
57using hidl::memory::V1_0::IMemory;
Xusong Wangcc47dff2019-10-23 10:35:07 -070058using implementation::PreparedModelCallback;
Lev Proleev13fdfcd2019-08-30 11:35:34 +010059using V1_0::DataLocation;
60using V1_0::ErrorStatus;
61using V1_0::OperandLifeTime;
62using V1_0::Request;
63using V1_1::ExecutionPreference;
Lev Proleev26d1bc82019-08-30 11:57:18 +010064using V1_2::Constant;
Lev Proleev26d1bc82019-08-30 11:57:18 +010065using V1_2::MeasureTiming;
Lev Proleev26d1bc82019-08-30 11:57:18 +010066using V1_2::OutputShape;
67using V1_2::SymmPerChannelQuantParams;
68using V1_2::Timing;
69using V1_2::implementation::ExecutionCallback;
Lev Proleev13fdfcd2019-08-30 11:35:34 +010070using HidlToken = hidl_array<uint8_t, static_cast<uint32_t>(Constant::BYTE_SIZE_OF_CACHE_TOKEN)>;
71
Lev Proleev0d4ba3f2019-10-02 17:32:06 +010072namespace {
73
74enum class Executor { ASYNC, SYNC, BURST };
75
Lev Proleev13fdfcd2019-08-30 11:35:34 +010076enum class OutputType { FULLY_SPECIFIED, UNSPECIFIED, INSUFFICIENT };
77
Lev Proleev0d4ba3f2019-10-02 17:32:06 +010078struct TestConfig {
79 Executor executor;
80 MeasureTiming measureTiming;
81 OutputType outputType;
82};
83
84} // namespace
85
Lev Proleev13fdfcd2019-08-30 11:35:34 +010086Model createModel(const TestModel& testModel) {
87 // Model operands.
88 hidl_vec<Operand> operands(testModel.operands.size());
89 size_t constCopySize = 0, constRefSize = 0;
90 for (uint32_t i = 0; i < testModel.operands.size(); i++) {
91 const auto& op = testModel.operands[i];
92
93 DataLocation loc = {};
94 if (op.lifetime == TestOperandLifeTime::CONSTANT_COPY) {
95 loc = {.poolIndex = 0,
96 .offset = static_cast<uint32_t>(constCopySize),
97 .length = static_cast<uint32_t>(op.data.size())};
98 constCopySize += op.data.alignedSize();
99 } else if (op.lifetime == TestOperandLifeTime::CONSTANT_REFERENCE) {
100 loc = {.poolIndex = 0,
101 .offset = static_cast<uint32_t>(constRefSize),
102 .length = static_cast<uint32_t>(op.data.size())};
103 constRefSize += op.data.alignedSize();
104 }
105
106 Operand::ExtraParams extraParams;
107 if (op.type == TestOperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) {
108 extraParams.channelQuant(SymmPerChannelQuantParams{
109 .scales = op.channelQuant.scales, .channelDim = op.channelQuant.channelDim});
110 }
111
112 operands[i] = {.type = static_cast<OperandType>(op.type),
113 .dimensions = op.dimensions,
114 .numberOfConsumers = op.numberOfConsumers,
115 .scale = op.scale,
116 .zeroPoint = op.zeroPoint,
117 .lifetime = static_cast<OperandLifeTime>(op.lifetime),
118 .location = loc,
119 .extraParams = std::move(extraParams)};
120 }
121
122 // Model operations.
123 hidl_vec<Operation> operations(testModel.operations.size());
124 std::transform(testModel.operations.begin(), testModel.operations.end(), operations.begin(),
125 [](const TestOperation& op) -> Operation {
126 return {.type = static_cast<OperationType>(op.type),
127 .inputs = op.inputs,
128 .outputs = op.outputs};
129 });
130
131 // Constant copies.
132 hidl_vec<uint8_t> operandValues(constCopySize);
133 for (uint32_t i = 0; i < testModel.operands.size(); i++) {
134 const auto& op = testModel.operands[i];
135 if (op.lifetime == TestOperandLifeTime::CONSTANT_COPY) {
136 const uint8_t* begin = op.data.get<uint8_t>();
137 const uint8_t* end = begin + op.data.size();
138 std::copy(begin, end, operandValues.data() + operands[i].location.offset);
139 }
140 }
141
142 // Shared memory.
143 hidl_vec<hidl_memory> pools = {};
144 if (constRefSize > 0) {
145 hidl_vec_push_back(&pools, nn::allocateSharedMemory(constRefSize));
146 CHECK_NE(pools[0].size(), 0u);
147
148 // load data
149 sp<IMemory> mappedMemory = mapMemory(pools[0]);
150 CHECK(mappedMemory.get() != nullptr);
151 uint8_t* mappedPtr =
152 reinterpret_cast<uint8_t*>(static_cast<void*>(mappedMemory->getPointer()));
153 CHECK(mappedPtr != nullptr);
154
155 for (uint32_t i = 0; i < testModel.operands.size(); i++) {
156 const auto& op = testModel.operands[i];
157 if (op.lifetime == TestOperandLifeTime::CONSTANT_REFERENCE) {
158 const uint8_t* begin = op.data.get<uint8_t>();
159 const uint8_t* end = begin + op.data.size();
160 std::copy(begin, end, mappedPtr + operands[i].location.offset);
161 }
162 }
163 }
164
165 return {.operands = std::move(operands),
166 .operations = std::move(operations),
167 .inputIndexes = testModel.inputIndexes,
168 .outputIndexes = testModel.outputIndexes,
169 .operandValues = std::move(operandValues),
170 .pools = std::move(pools),
171 .relaxComputationFloat32toFloat16 = testModel.isRelaxed};
172}
173
174static bool isOutputSizeGreaterThanOne(const TestModel& testModel, uint32_t index) {
175 const auto byteSize = testModel.operands[testModel.outputIndexes[index]].data.size();
176 return byteSize > 1u;
177}
178
179static void makeOutputInsufficientSize(uint32_t outputIndex, Request* request) {
180 auto& length = request->outputs[outputIndex].location.length;
181 ASSERT_GT(length, 1u);
182 length -= 1u;
183}
184
185static void makeOutputDimensionsUnspecified(Model* model) {
186 for (auto i : model->outputIndexes) {
187 auto& dims = model->operands[i].dimensions;
188 std::fill(dims.begin(), dims.end(), 0);
189 }
190}
191
192static Return<ErrorStatus> ExecutePreparedModel(const sp<IPreparedModel>& preparedModel,
193 const Request& request, MeasureTiming measure,
194 sp<ExecutionCallback>& callback) {
Xusong Wang1b3f4262019-10-25 12:07:17 -0700195 return preparedModel->execute_1_3(request, measure, callback);
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100196}
197static Return<ErrorStatus> ExecutePreparedModel(const sp<IPreparedModel>& preparedModel,
198 const Request& request, MeasureTiming measure,
199 hidl_vec<OutputShape>* outputShapes,
200 Timing* timing) {
201 ErrorStatus result;
202 Return<void> ret = preparedModel->executeSynchronously(
203 request, measure,
204 [&result, outputShapes, timing](ErrorStatus error, const hidl_vec<OutputShape>& shapes,
205 const Timing& time) {
206 result = error;
207 *outputShapes = shapes;
208 *timing = time;
209 });
210 if (!ret.isOk()) {
211 return ErrorStatus::GENERAL_FAILURE;
212 }
213 return result;
214}
215static std::shared_ptr<::android::nn::ExecutionBurstController> CreateBurst(
216 const sp<IPreparedModel>& preparedModel) {
Michael Butler648ada52019-07-25 17:22:11 -0700217 return android::nn::ExecutionBurstController::create(preparedModel,
218 std::chrono::microseconds{0});
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100219}
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100220
221void EvaluatePreparedModel(const sp<IPreparedModel>& preparedModel, const TestModel& testModel,
Lev Proleev0d4ba3f2019-10-02 17:32:06 +0100222 const TestConfig& testConfig) {
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100223 // If output0 does not have size larger than one byte, we can not test with insufficient buffer.
Lev Proleev0d4ba3f2019-10-02 17:32:06 +0100224 if (testConfig.outputType == OutputType::INSUFFICIENT &&
225 !isOutputSizeGreaterThanOne(testModel, 0)) {
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100226 return;
227 }
228
229 Request request = createRequest(testModel);
Lev Proleev0d4ba3f2019-10-02 17:32:06 +0100230 if (testConfig.outputType == OutputType::INSUFFICIENT) {
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100231 makeOutputInsufficientSize(/*outputIndex=*/0, &request);
232 }
233
234 ErrorStatus executionStatus;
235 hidl_vec<OutputShape> outputShapes;
236 Timing timing;
Lev Proleev0d4ba3f2019-10-02 17:32:06 +0100237 switch (testConfig.executor) {
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100238 case Executor::ASYNC: {
239 SCOPED_TRACE("asynchronous");
240
241 // launch execution
242 sp<ExecutionCallback> executionCallback = new ExecutionCallback();
Lev Proleev0d4ba3f2019-10-02 17:32:06 +0100243 Return<ErrorStatus> executionLaunchStatus = ExecutePreparedModel(
244 preparedModel, request, testConfig.measureTiming, executionCallback);
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100245 ASSERT_TRUE(executionLaunchStatus.isOk());
246 EXPECT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(executionLaunchStatus));
247
248 // retrieve execution status
249 executionCallback->wait();
250 executionStatus = executionCallback->getStatus();
251 outputShapes = executionCallback->getOutputShapes();
252 timing = executionCallback->getTiming();
253
254 break;
255 }
256 case Executor::SYNC: {
257 SCOPED_TRACE("synchronous");
258
259 // execute
Lev Proleev0d4ba3f2019-10-02 17:32:06 +0100260 Return<ErrorStatus> executionReturnStatus = ExecutePreparedModel(
261 preparedModel, request, testConfig.measureTiming, &outputShapes, &timing);
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100262 ASSERT_TRUE(executionReturnStatus.isOk());
263 executionStatus = static_cast<ErrorStatus>(executionReturnStatus);
264
265 break;
266 }
267 case Executor::BURST: {
268 SCOPED_TRACE("burst");
269
270 // create burst
271 const std::shared_ptr<::android::nn::ExecutionBurstController> controller =
272 CreateBurst(preparedModel);
273 ASSERT_NE(nullptr, controller.get());
274
275 // create memory keys
276 std::vector<intptr_t> keys(request.pools.size());
277 for (size_t i = 0; i < keys.size(); ++i) {
278 keys[i] = reinterpret_cast<intptr_t>(&request.pools[i]);
279 }
280
281 // execute burst
Michael Butler648ada52019-07-25 17:22:11 -0700282 int n;
283 std::tie(n, outputShapes, timing, std::ignore) =
Lev Proleev0d4ba3f2019-10-02 17:32:06 +0100284 controller->compute(request, testConfig.measureTiming, keys);
Michael Butler648ada52019-07-25 17:22:11 -0700285 executionStatus = nn::convertResultCodeToErrorStatus(n);
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100286
287 break;
288 }
289 }
290
Lev Proleev0d4ba3f2019-10-02 17:32:06 +0100291 if (testConfig.outputType != OutputType::FULLY_SPECIFIED &&
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100292 executionStatus == ErrorStatus::GENERAL_FAILURE) {
293 LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
294 "execute model that it does not support.";
295 std::cout << "[ ] Early termination of test because vendor service cannot "
296 "execute model that it does not support."
297 << std::endl;
298 GTEST_SKIP();
299 }
Lev Proleev0d4ba3f2019-10-02 17:32:06 +0100300 if (testConfig.measureTiming == MeasureTiming::NO) {
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100301 EXPECT_EQ(UINT64_MAX, timing.timeOnDevice);
302 EXPECT_EQ(UINT64_MAX, timing.timeInDriver);
303 } else {
304 if (timing.timeOnDevice != UINT64_MAX && timing.timeInDriver != UINT64_MAX) {
305 EXPECT_LE(timing.timeOnDevice, timing.timeInDriver);
306 }
307 }
308
Lev Proleev0d4ba3f2019-10-02 17:32:06 +0100309 switch (testConfig.outputType) {
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100310 case OutputType::FULLY_SPECIFIED:
311 // If the model output operands are fully specified, outputShapes must be either
312 // either empty, or have the same number of elements as the number of outputs.
313 ASSERT_EQ(ErrorStatus::NONE, executionStatus);
314 ASSERT_TRUE(outputShapes.size() == 0 ||
315 outputShapes.size() == testModel.outputIndexes.size());
316 break;
317 case OutputType::UNSPECIFIED:
318 // If the model output operands are not fully specified, outputShapes must have
319 // the same number of elements as the number of outputs.
320 ASSERT_EQ(ErrorStatus::NONE, executionStatus);
321 ASSERT_EQ(outputShapes.size(), testModel.outputIndexes.size());
322 break;
323 case OutputType::INSUFFICIENT:
324 ASSERT_EQ(ErrorStatus::OUTPUT_INSUFFICIENT_SIZE, executionStatus);
325 ASSERT_EQ(outputShapes.size(), testModel.outputIndexes.size());
326 ASSERT_FALSE(outputShapes[0].isSufficient);
327 return;
328 }
329
330 // Go through all outputs, check returned output shapes.
331 for (uint32_t i = 0; i < outputShapes.size(); i++) {
332 EXPECT_TRUE(outputShapes[i].isSufficient);
333 const auto& expect = testModel.operands[testModel.outputIndexes[i]].dimensions;
334 const std::vector<uint32_t> actual = outputShapes[i].dimensions;
335 EXPECT_EQ(expect, actual);
336 }
337
338 // Retrieve execution results.
339 const std::vector<TestBuffer> outputs = getOutputBuffers(request);
340
341 // We want "close-enough" results.
342 checkResults(testModel, outputs);
343}
344
345void EvaluatePreparedModel(const sp<IPreparedModel>& preparedModel, const TestModel& testModel,
346 bool testDynamicOutputShape) {
Lev Proleev0d4ba3f2019-10-02 17:32:06 +0100347 std::initializer_list<OutputType> outputTypesList;
348 std::initializer_list<MeasureTiming> measureTimingList;
349 std::initializer_list<Executor> executorList;
350
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100351 if (testDynamicOutputShape) {
Lev Proleev0d4ba3f2019-10-02 17:32:06 +0100352 outputTypesList = {OutputType::UNSPECIFIED, OutputType::INSUFFICIENT};
353 measureTimingList = {MeasureTiming::NO, MeasureTiming::YES};
354 executorList = {Executor::ASYNC, Executor::SYNC, Executor::BURST};
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100355 } else {
Lev Proleev0d4ba3f2019-10-02 17:32:06 +0100356 outputTypesList = {OutputType::FULLY_SPECIFIED};
357 measureTimingList = {MeasureTiming::NO, MeasureTiming::YES};
358 executorList = {Executor::ASYNC, Executor::SYNC, Executor::BURST};
359 }
360
361 for (const OutputType outputType : outputTypesList) {
362 for (const MeasureTiming measureTiming : measureTimingList) {
363 for (const Executor executor : executorList) {
364 const TestConfig testConfig = {.executor = executor,
365 .measureTiming = measureTiming,
366 .outputType = outputType};
367 EvaluatePreparedModel(preparedModel, testModel, testConfig);
368 }
369 }
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100370 }
371}
372
373void Execute(const sp<IDevice>& device, const TestModel& testModel, bool testDynamicOutputShape) {
374 Model model = createModel(testModel);
375 if (testDynamicOutputShape) {
376 makeOutputDimensionsUnspecified(&model);
377 }
378
379 sp<IPreparedModel> preparedModel;
380 createPreparedModel(device, model, &preparedModel);
381 if (preparedModel == nullptr) return;
382
383 EvaluatePreparedModel(preparedModel, testModel, testDynamicOutputShape);
384}
385
386void GeneratedTestBase::SetUp() {
387 testing::TestWithParam<GeneratedTestParam>::SetUp();
388 ASSERT_NE(kDevice, nullptr);
389}
390
391std::vector<NamedModel> getNamedModels(const FilterFn& filter) {
392 return TestModelManager::get().getTestModels(filter);
393}
394
395std::string printGeneratedTest(const testing::TestParamInfo<GeneratedTestParam>& info) {
396 const auto& [namedDevice, namedModel] = info.param;
397 return gtestCompliantName(getName(namedDevice) + "_" + getName(namedModel));
398}
399
400// Tag for the generated tests
401class GeneratedTest : public GeneratedTestBase {};
402
403// Tag for the dynamic output shape tests
404class DynamicOutputShapeTest : public GeneratedTest {};
405
406TEST_P(GeneratedTest, Test) {
407 Execute(kDevice, kTestModel, /*testDynamicOutputShape=*/false);
408}
409
410TEST_P(DynamicOutputShapeTest, Test) {
411 Execute(kDevice, kTestModel, /*testDynamicOutputShape=*/true);
412}
413
414INSTANTIATE_GENERATED_TEST(GeneratedTest,
415 [](const TestModel& testModel) { return !testModel.expectFailure; });
416
417INSTANTIATE_GENERATED_TEST(DynamicOutputShapeTest,
418 [](const TestModel& testModel) { return !testModel.expectFailure; });
419
Lev Proleev26d1bc82019-08-30 11:57:18 +0100420} // namespace android::hardware::neuralnetworks::V1_3::vts::functional