blob: b8111492f058c59c32ba9d78c928630bf79ad7db [file] [log] [blame]
Lev Proleev13fdfcd2019-08-30 11:35:34 +01001/*
2 * Copyright (C) 2019 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "GeneratedTestHarness.h"
18
19#include <android-base/logging.h>
20#include <android/hardware/neuralnetworks/1.0/IDevice.h>
21#include <android/hardware/neuralnetworks/1.0/IExecutionCallback.h>
22#include <android/hardware/neuralnetworks/1.0/IPreparedModel.h>
23#include <android/hardware/neuralnetworks/1.0/IPreparedModelCallback.h>
24#include <android/hardware/neuralnetworks/1.0/types.h>
25#include <android/hardware/neuralnetworks/1.1/IDevice.h>
26#include <android/hardware/neuralnetworks/1.2/IDevice.h>
27#include <android/hardware/neuralnetworks/1.2/IExecutionCallback.h>
28#include <android/hardware/neuralnetworks/1.2/IPreparedModel.h>
29#include <android/hardware/neuralnetworks/1.2/IPreparedModelCallback.h>
Lev Proleev26d1bc82019-08-30 11:57:18 +010030#include <android/hardware/neuralnetworks/1.2/types.h>
31#include <android/hardware/neuralnetworks/1.3/IDevice.h>
Miao Wang2b5c4cd2019-12-26 18:03:56 -080032#include <android/hardware/neuralnetworks/1.3/IFencedExecutionCallback.h>
Xusong Wang1b3f4262019-10-25 12:07:17 -070033#include <android/hardware/neuralnetworks/1.3/IPreparedModel.h>
Xusong Wangcc47dff2019-10-23 10:35:07 -070034#include <android/hardware/neuralnetworks/1.3/IPreparedModelCallback.h>
Lev Proleev26d1bc82019-08-30 11:57:18 +010035#include <android/hardware/neuralnetworks/1.3/types.h>
Lev Proleev13fdfcd2019-08-30 11:35:34 +010036#include <android/hidl/allocator/1.0/IAllocator.h>
37#include <android/hidl/memory/1.0/IMemory.h>
Miao Wang2b5c4cd2019-12-26 18:03:56 -080038#include <android/sync.h>
Lev Proleev56cda832019-12-05 14:49:47 +000039#include <gtest/gtest.h>
Lev Proleev13fdfcd2019-08-30 11:35:34 +010040#include <hidlmemory/mapping.h>
41
Lev Proleev13fdfcd2019-08-30 11:35:34 +010042#include <algorithm>
Michael Butler648ada52019-07-25 17:22:11 -070043#include <chrono>
Lev Proleev13fdfcd2019-08-30 11:35:34 +010044#include <iostream>
45#include <numeric>
Lev Proleev56cda832019-12-05 14:49:47 +000046#include <vector>
Lev Proleev13fdfcd2019-08-30 11:35:34 +010047
48#include "1.0/Utils.h"
Xusong Wangcc47dff2019-10-23 10:35:07 -070049#include "1.3/Callbacks.h"
Michael Butler95899b32020-01-07 14:52:44 -080050#include "1.3/Utils.h"
Lev Proleev13fdfcd2019-08-30 11:35:34 +010051#include "ExecutionBurstController.h"
52#include "MemoryUtils.h"
53#include "TestHarness.h"
54#include "Utils.h"
55#include "VtsHalNeuralnetworks.h"
56
Lev Proleev26d1bc82019-08-30 11:57:18 +010057namespace android::hardware::neuralnetworks::V1_3::vts::functional {
Lev Proleev13fdfcd2019-08-30 11:35:34 +010058
59using namespace test_helper;
60using hidl::memory::V1_0::IMemory;
Michael Butler79a41d72019-12-11 19:08:08 -080061using implementation::ExecutionCallback;
Xusong Wangcc47dff2019-10-23 10:35:07 -070062using implementation::PreparedModelCallback;
Lev Proleev13fdfcd2019-08-30 11:35:34 +010063using V1_0::DataLocation;
Xusong Wange9da9852020-01-13 11:44:45 -080064using V1_0::RequestArgument;
Lev Proleev13fdfcd2019-08-30 11:35:34 +010065using V1_1::ExecutionPreference;
Lev Proleev26d1bc82019-08-30 11:57:18 +010066using V1_2::Constant;
Lev Proleev26d1bc82019-08-30 11:57:18 +010067using V1_2::MeasureTiming;
Lev Proleev26d1bc82019-08-30 11:57:18 +010068using V1_2::OutputShape;
69using V1_2::SymmPerChannelQuantParams;
70using V1_2::Timing;
Lev Proleev13fdfcd2019-08-30 11:35:34 +010071using HidlToken = hidl_array<uint8_t, static_cast<uint32_t>(Constant::BYTE_SIZE_OF_CACHE_TOKEN)>;
72
Lev Proleev0d4ba3f2019-10-02 17:32:06 +010073namespace {
74
Miao Wang2b5c4cd2019-12-26 18:03:56 -080075enum class Executor { ASYNC, SYNC, BURST, FENCED };
Lev Proleev0d4ba3f2019-10-02 17:32:06 +010076
Lev Proleev13fdfcd2019-08-30 11:35:34 +010077enum class OutputType { FULLY_SPECIFIED, UNSPECIFIED, INSUFFICIENT };
78
Xusong Wange9da9852020-01-13 11:44:45 -080079enum class MemoryType { SHARED, DEVICE };
80
81enum class IOType { INPUT, OUTPUT };
82
Lev Proleev0d4ba3f2019-10-02 17:32:06 +010083struct TestConfig {
84 Executor executor;
85 MeasureTiming measureTiming;
86 OutputType outputType;
Xusong Wange9da9852020-01-13 11:44:45 -080087 MemoryType memoryType;
Lev Proleev9226c1e2019-10-03 14:43:18 +010088 // `reportSkipping` indicates if a test should print an info message in case
89 // it is skipped. The field is set to true by default and is set to false in
90 // quantization coupling tests to suppress skipping a test
91 bool reportSkipping;
Xusong Wange9da9852020-01-13 11:44:45 -080092 TestConfig(Executor executor, MeasureTiming measureTiming, OutputType outputType,
93 MemoryType memoryType)
Lev Proleev9226c1e2019-10-03 14:43:18 +010094 : executor(executor),
95 measureTiming(measureTiming),
96 outputType(outputType),
Xusong Wange9da9852020-01-13 11:44:45 -080097 memoryType(memoryType),
Lev Proleev9226c1e2019-10-03 14:43:18 +010098 reportSkipping(true) {}
99 TestConfig(Executor executor, MeasureTiming measureTiming, OutputType outputType,
Xusong Wange9da9852020-01-13 11:44:45 -0800100 MemoryType memoryType, bool reportSkipping)
Lev Proleev9226c1e2019-10-03 14:43:18 +0100101 : executor(executor),
102 measureTiming(measureTiming),
103 outputType(outputType),
Xusong Wange9da9852020-01-13 11:44:45 -0800104 memoryType(memoryType),
Lev Proleev9226c1e2019-10-03 14:43:18 +0100105 reportSkipping(reportSkipping) {}
Lev Proleev0d4ba3f2019-10-02 17:32:06 +0100106};
107
Xusong Wange9da9852020-01-13 11:44:45 -0800108class DeviceMemoryAllocator {
109 public:
110 DeviceMemoryAllocator(const sp<IDevice>& device, const sp<IPreparedModel>& preparedModel,
111 const TestModel& testModel)
112 : kDevice(device), kPreparedModel(preparedModel), kTestModel(testModel) {}
113
114 // Allocate device memory for a target input/output operand.
115 // Return {IBuffer object, token} if successful.
116 // Return {nullptr, 0} if device memory is not supported.
117 template <IOType ioType>
118 std::pair<sp<IBuffer>, int32_t> allocate(uint32_t index) {
119 std::pair<sp<IBuffer>, int32_t> buffer;
120 allocateInternal<ioType>(index, &buffer);
121 return buffer;
122 }
123
124 private:
125 template <IOType ioType>
126 void allocateInternal(uint32_t index, std::pair<sp<IBuffer>, int32_t>* result) {
127 ASSERT_NE(result, nullptr);
128
129 // Prepare arguments.
130 BufferRole role = {.modelIndex = 0, .ioIndex = index, .frequency = 1.0f};
131 hidl_vec<BufferRole> inputRoles, outputRoles;
132 if constexpr (ioType == IOType::INPUT) {
133 inputRoles = {role};
134 } else {
135 outputRoles = {role};
136 }
137
138 // Allocate device memory.
139 ErrorStatus status;
140 sp<IBuffer> buffer;
141 int32_t token;
142 const auto ret = kDevice->allocate(
143 {}, {kPreparedModel}, inputRoles, outputRoles,
144 [&status, &buffer, &token](ErrorStatus error, const sp<IBuffer>& buf, int32_t tok) {
145 status = error;
146 buffer = buf;
147 token = tok;
148 });
149
150 // Check allocation results.
151 ASSERT_TRUE(ret.isOk());
152 if (status == ErrorStatus::NONE) {
153 ASSERT_NE(buffer, nullptr);
154 ASSERT_GT(token, 0);
155 } else {
156 ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
157 ASSERT_EQ(buffer, nullptr);
158 ASSERT_EQ(token, 0);
159 }
160
161 // Initialize input data from TestBuffer.
162 if constexpr (ioType == IOType::INPUT) {
163 if (buffer != nullptr) {
164 // TestBuffer -> Shared memory.
165 const auto& testBuffer = kTestModel.operands[kTestModel.inputIndexes[index]].data;
166 ASSERT_GT(testBuffer.size(), 0);
167 hidl_memory tmp = nn::allocateSharedMemory(testBuffer.size());
168 sp<IMemory> inputMemory = mapMemory(tmp);
169 ASSERT_NE(inputMemory.get(), nullptr);
170 uint8_t* inputPtr =
171 static_cast<uint8_t*>(static_cast<void*>(inputMemory->getPointer()));
172 ASSERT_NE(inputPtr, nullptr);
173 const uint8_t* begin = testBuffer.get<uint8_t>();
174 const uint8_t* end = begin + testBuffer.size();
175 std::copy(begin, end, inputPtr);
176
177 // Shared memory -> IBuffer.
178 auto ret = buffer->copyFrom(tmp, {});
179 ASSERT_TRUE(ret.isOk());
180 ASSERT_EQ(static_cast<ErrorStatus>(ret), ErrorStatus::NONE);
181 }
182 }
183 *result = {std::move(buffer), token};
184 }
185
186 const sp<IDevice> kDevice;
187 const sp<IPreparedModel> kPreparedModel;
188 const TestModel& kTestModel;
189};
190
Lev Proleev0d4ba3f2019-10-02 17:32:06 +0100191} // namespace
192
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100193Model createModel(const TestModel& testModel) {
194 // Model operands.
195 hidl_vec<Operand> operands(testModel.operands.size());
196 size_t constCopySize = 0, constRefSize = 0;
197 for (uint32_t i = 0; i < testModel.operands.size(); i++) {
198 const auto& op = testModel.operands[i];
199
200 DataLocation loc = {};
201 if (op.lifetime == TestOperandLifeTime::CONSTANT_COPY) {
202 loc = {.poolIndex = 0,
203 .offset = static_cast<uint32_t>(constCopySize),
204 .length = static_cast<uint32_t>(op.data.size())};
205 constCopySize += op.data.alignedSize();
206 } else if (op.lifetime == TestOperandLifeTime::CONSTANT_REFERENCE) {
207 loc = {.poolIndex = 0,
208 .offset = static_cast<uint32_t>(constRefSize),
209 .length = static_cast<uint32_t>(op.data.size())};
210 constRefSize += op.data.alignedSize();
211 }
212
213 Operand::ExtraParams extraParams;
214 if (op.type == TestOperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) {
215 extraParams.channelQuant(SymmPerChannelQuantParams{
216 .scales = op.channelQuant.scales, .channelDim = op.channelQuant.channelDim});
217 }
218
219 operands[i] = {.type = static_cast<OperandType>(op.type),
220 .dimensions = op.dimensions,
221 .numberOfConsumers = op.numberOfConsumers,
222 .scale = op.scale,
223 .zeroPoint = op.zeroPoint,
224 .lifetime = static_cast<OperandLifeTime>(op.lifetime),
225 .location = loc,
226 .extraParams = std::move(extraParams)};
227 }
228
229 // Model operations.
230 hidl_vec<Operation> operations(testModel.operations.size());
231 std::transform(testModel.operations.begin(), testModel.operations.end(), operations.begin(),
232 [](const TestOperation& op) -> Operation {
233 return {.type = static_cast<OperationType>(op.type),
234 .inputs = op.inputs,
235 .outputs = op.outputs};
236 });
237
238 // Constant copies.
239 hidl_vec<uint8_t> operandValues(constCopySize);
240 for (uint32_t i = 0; i < testModel.operands.size(); i++) {
241 const auto& op = testModel.operands[i];
242 if (op.lifetime == TestOperandLifeTime::CONSTANT_COPY) {
243 const uint8_t* begin = op.data.get<uint8_t>();
244 const uint8_t* end = begin + op.data.size();
245 std::copy(begin, end, operandValues.data() + operands[i].location.offset);
246 }
247 }
248
249 // Shared memory.
250 hidl_vec<hidl_memory> pools = {};
251 if (constRefSize > 0) {
252 hidl_vec_push_back(&pools, nn::allocateSharedMemory(constRefSize));
253 CHECK_NE(pools[0].size(), 0u);
254
255 // load data
256 sp<IMemory> mappedMemory = mapMemory(pools[0]);
257 CHECK(mappedMemory.get() != nullptr);
258 uint8_t* mappedPtr =
259 reinterpret_cast<uint8_t*>(static_cast<void*>(mappedMemory->getPointer()));
260 CHECK(mappedPtr != nullptr);
261
262 for (uint32_t i = 0; i < testModel.operands.size(); i++) {
263 const auto& op = testModel.operands[i];
264 if (op.lifetime == TestOperandLifeTime::CONSTANT_REFERENCE) {
265 const uint8_t* begin = op.data.get<uint8_t>();
266 const uint8_t* end = begin + op.data.size();
267 std::copy(begin, end, mappedPtr + operands[i].location.offset);
268 }
269 }
270 }
271
Slava Shklyaevf8124a82019-12-13 12:24:35 +0000272 return {.main = {.operands = std::move(operands),
273 .operations = std::move(operations),
274 .inputIndexes = testModel.inputIndexes,
275 .outputIndexes = testModel.outputIndexes},
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100276 .operandValues = std::move(operandValues),
277 .pools = std::move(pools),
278 .relaxComputationFloat32toFloat16 = testModel.isRelaxed};
279}
280
281static bool isOutputSizeGreaterThanOne(const TestModel& testModel, uint32_t index) {
282 const auto byteSize = testModel.operands[testModel.outputIndexes[index]].data.size();
283 return byteSize > 1u;
284}
285
Xusong Wange9da9852020-01-13 11:44:45 -0800286static void makeOutputInsufficientSize(uint32_t outputIndex, Request* request) {
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100287 auto& length = request->outputs[outputIndex].location.length;
288 ASSERT_GT(length, 1u);
289 length -= 1u;
290}
291
292static void makeOutputDimensionsUnspecified(Model* model) {
Slava Shklyaevf8124a82019-12-13 12:24:35 +0000293 for (auto i : model->main.outputIndexes) {
294 auto& dims = model->main.operands[i].dimensions;
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100295 std::fill(dims.begin(), dims.end(), 0);
296 }
297}
298
Xusong Wange9da9852020-01-13 11:44:45 -0800299constexpr uint32_t kInputPoolIndex = 0;
300constexpr uint32_t kOutputPoolIndex = 1;
301constexpr uint32_t kDeviceMemoryBeginIndex = 2;
302
303static std::pair<Request, std::vector<sp<IBuffer>>> createRequest(
304 const sp<IDevice>& device, const sp<IPreparedModel>& preparedModel,
305 const TestModel& testModel, bool preferDeviceMemory) {
306 // Memory pools are organized as:
307 // - 0: Input shared memory pool
308 // - 1: Output shared memory pool
309 // - [2, 2+i): Input device memories
310 // - [2+i, 2+i+o): Output device memories
311 DeviceMemoryAllocator allocator(device, preparedModel, testModel);
312 std::vector<sp<IBuffer>> buffers;
313 std::vector<int32_t> tokens;
314
315 // Model inputs.
316 hidl_vec<RequestArgument> inputs(testModel.inputIndexes.size());
317 size_t inputSize = 0;
318 for (uint32_t i = 0; i < testModel.inputIndexes.size(); i++) {
319 const auto& op = testModel.operands[testModel.inputIndexes[i]];
320 if (op.data.size() == 0) {
321 // Omitted input.
322 inputs[i] = {.hasNoValue = true};
323 continue;
324 } else if (preferDeviceMemory) {
325 SCOPED_TRACE("Input index = " + std::to_string(i));
326 auto [buffer, token] = allocator.allocate<IOType::INPUT>(i);
327 if (buffer != nullptr) {
328 DataLocation loc = {.poolIndex = static_cast<uint32_t>(buffers.size() +
329 kDeviceMemoryBeginIndex)};
330 buffers.push_back(std::move(buffer));
331 tokens.push_back(token);
332 inputs[i] = {.hasNoValue = false, .location = loc, .dimensions = {}};
333 continue;
334 }
335 }
336
337 // Reserve shared memory for input.
338 DataLocation loc = {.poolIndex = kInputPoolIndex,
339 .offset = static_cast<uint32_t>(inputSize),
340 .length = static_cast<uint32_t>(op.data.size())};
341 inputSize += op.data.alignedSize();
342 inputs[i] = {.hasNoValue = false, .location = loc, .dimensions = {}};
343 }
344
345 // Model outputs.
346 hidl_vec<RequestArgument> outputs(testModel.outputIndexes.size());
347 size_t outputSize = 0;
348 for (uint32_t i = 0; i < testModel.outputIndexes.size(); i++) {
349 const auto& op = testModel.operands[testModel.outputIndexes[i]];
350 if (preferDeviceMemory) {
351 SCOPED_TRACE("Output index = " + std::to_string(i));
352 auto [buffer, token] = allocator.allocate<IOType::OUTPUT>(i);
353 if (buffer != nullptr) {
354 DataLocation loc = {.poolIndex = static_cast<uint32_t>(buffers.size() +
355 kDeviceMemoryBeginIndex)};
356 buffers.push_back(std::move(buffer));
357 tokens.push_back(token);
358 outputs[i] = {.hasNoValue = false, .location = loc, .dimensions = {}};
359 continue;
360 }
361 }
362
363 // In the case of zero-sized output, we should at least provide a one-byte buffer.
364 // This is because zero-sized tensors are only supported internally to the driver, or
365 // reported in output shapes. It is illegal for the client to pre-specify a zero-sized
366 // tensor as model output. Otherwise, we will have two semantic conflicts:
367 // - "Zero dimension" conflicts with "unspecified dimension".
368 // - "Omitted operand buffer" conflicts with "zero-sized operand buffer".
369 size_t bufferSize = std::max<size_t>(op.data.size(), 1);
370
371 // Reserve shared memory for output.
372 DataLocation loc = {.poolIndex = kOutputPoolIndex,
373 .offset = static_cast<uint32_t>(outputSize),
374 .length = static_cast<uint32_t>(bufferSize)};
375 outputSize += op.data.size() == 0 ? TestBuffer::kAlignment : op.data.alignedSize();
376 outputs[i] = {.hasNoValue = false, .location = loc, .dimensions = {}};
377 }
378
379 // Memory pools.
380 hidl_vec<Request::MemoryPool> pools(kDeviceMemoryBeginIndex + buffers.size());
381 pools[kInputPoolIndex].hidlMemory(nn::allocateSharedMemory(std::max<size_t>(inputSize, 1)));
382 pools[kOutputPoolIndex].hidlMemory(nn::allocateSharedMemory(std::max<size_t>(outputSize, 1)));
383 CHECK_NE(pools[kInputPoolIndex].hidlMemory().size(), 0u);
384 CHECK_NE(pools[kOutputPoolIndex].hidlMemory().size(), 0u);
385 for (uint32_t i = 0; i < buffers.size(); i++) {
386 pools[kDeviceMemoryBeginIndex + i].token(tokens[i]);
387 }
388
389 // Copy input data to the input shared memory pool.
390 sp<IMemory> inputMemory = mapMemory(pools[kInputPoolIndex].hidlMemory());
391 CHECK(inputMemory.get() != nullptr);
392 uint8_t* inputPtr = static_cast<uint8_t*>(static_cast<void*>(inputMemory->getPointer()));
393 CHECK(inputPtr != nullptr);
394 for (uint32_t i = 0; i < testModel.inputIndexes.size(); i++) {
395 if (!inputs[i].hasNoValue && inputs[i].location.poolIndex == kInputPoolIndex) {
396 const auto& op = testModel.operands[testModel.inputIndexes[i]];
397 const uint8_t* begin = op.data.get<uint8_t>();
398 const uint8_t* end = begin + op.data.size();
399 std::copy(begin, end, inputPtr + inputs[i].location.offset);
400 }
401 }
402
403 Request request = {
404 .inputs = std::move(inputs), .outputs = std::move(outputs), .pools = std::move(pools)};
405 return {std::move(request), std::move(buffers)};
406}
407
408// Get a TestBuffer with data copied from an IBuffer object.
409static void getBuffer(const sp<IBuffer>& buffer, size_t size, TestBuffer* testBuffer) {
410 // IBuffer -> Shared memory.
411 hidl_memory tmp = nn::allocateSharedMemory(size);
412 const auto ret = buffer->copyTo(tmp);
413 ASSERT_TRUE(ret.isOk());
414 ASSERT_EQ(static_cast<ErrorStatus>(ret), ErrorStatus::NONE);
415
416 // Shared memory -> TestBuffer.
417 sp<IMemory> outputMemory = mapMemory(tmp);
418 ASSERT_NE(outputMemory.get(), nullptr);
419 uint8_t* outputPtr = static_cast<uint8_t*>(static_cast<void*>(outputMemory->getPointer()));
420 ASSERT_NE(outputPtr, nullptr);
421 ASSERT_NE(testBuffer, nullptr);
422 *testBuffer = TestBuffer(size, outputPtr);
423}
424
425static std::vector<TestBuffer> getOutputBuffers(const TestModel& testModel, const Request& request,
426 const std::vector<sp<IBuffer>>& buffers) {
427 sp<IMemory> outputMemory = mapMemory(request.pools[kOutputPoolIndex].hidlMemory());
428 CHECK(outputMemory.get() != nullptr);
429 uint8_t* outputPtr = static_cast<uint8_t*>(static_cast<void*>(outputMemory->getPointer()));
430 CHECK(outputPtr != nullptr);
431
432 // Copy out output results.
433 std::vector<TestBuffer> outputBuffers;
434 for (uint32_t i = 0; i < request.outputs.size(); i++) {
435 const auto& outputLoc = request.outputs[i].location;
436 if (outputLoc.poolIndex == kOutputPoolIndex) {
437 outputBuffers.emplace_back(outputLoc.length, outputPtr + outputLoc.offset);
438 } else {
439 const auto& op = testModel.operands[testModel.outputIndexes[i]];
440 if (op.data.size() == 0) {
441 outputBuffers.emplace_back();
442 } else {
443 SCOPED_TRACE("Output index = " + std::to_string(i));
444 const uint32_t bufferIndex = outputLoc.poolIndex - kDeviceMemoryBeginIndex;
445 TestBuffer buffer;
446 getBuffer(buffers[bufferIndex], op.data.size(), &buffer);
447 outputBuffers.push_back(std::move(buffer));
448 }
449 }
450 }
451 return outputBuffers;
452}
453
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100454static Return<ErrorStatus> ExecutePreparedModel(const sp<IPreparedModel>& preparedModel,
455 const Request& request, MeasureTiming measure,
456 sp<ExecutionCallback>& callback) {
Michael Butler79a41d72019-12-11 19:08:08 -0800457 return preparedModel->execute_1_3(request, measure, {}, callback);
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100458}
459static Return<ErrorStatus> ExecutePreparedModel(const sp<IPreparedModel>& preparedModel,
460 const Request& request, MeasureTiming measure,
461 hidl_vec<OutputShape>* outputShapes,
462 Timing* timing) {
463 ErrorStatus result;
Xusong Wangd4a060b2019-10-28 11:11:19 -0700464 Return<void> ret = preparedModel->executeSynchronously_1_3(
Michael Butler79a41d72019-12-11 19:08:08 -0800465 request, measure, {},
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100466 [&result, outputShapes, timing](ErrorStatus error, const hidl_vec<OutputShape>& shapes,
467 const Timing& time) {
468 result = error;
469 *outputShapes = shapes;
470 *timing = time;
471 });
472 if (!ret.isOk()) {
473 return ErrorStatus::GENERAL_FAILURE;
474 }
475 return result;
476}
477static std::shared_ptr<::android::nn::ExecutionBurstController> CreateBurst(
478 const sp<IPreparedModel>& preparedModel) {
Michael Butler648ada52019-07-25 17:22:11 -0700479 return android::nn::ExecutionBurstController::create(preparedModel,
480 std::chrono::microseconds{0});
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100481}
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100482
Xusong Wange9da9852020-01-13 11:44:45 -0800483void EvaluatePreparedModel(const sp<IDevice>& device, const sp<IPreparedModel>& preparedModel,
484 const TestModel& testModel, const TestConfig& testConfig,
485 bool* skipped = nullptr) {
Lev Proleev9226c1e2019-10-03 14:43:18 +0100486 if (skipped != nullptr) {
487 *skipped = false;
488 }
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100489 // If output0 does not have size larger than one byte, we can not test with insufficient buffer.
Lev Proleev0d4ba3f2019-10-02 17:32:06 +0100490 if (testConfig.outputType == OutputType::INSUFFICIENT &&
491 !isOutputSizeGreaterThanOne(testModel, 0)) {
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100492 return;
493 }
494
Xusong Wange9da9852020-01-13 11:44:45 -0800495 auto [request, buffers] =
496 createRequest(device, preparedModel, testModel,
497 /*preferDeviceMemory=*/testConfig.memoryType == MemoryType::DEVICE);
498 // Skip if testing memory domain but no device memory has been allocated.
499 if (testConfig.memoryType == MemoryType::DEVICE && buffers.empty()) {
500 return;
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100501 }
Xusong Wange9da9852020-01-13 11:44:45 -0800502 if (testConfig.outputType == OutputType::INSUFFICIENT) {
503 makeOutputInsufficientSize(/*outputIndex=*/0, &request);
504 }
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100505
506 ErrorStatus executionStatus;
507 hidl_vec<OutputShape> outputShapes;
508 Timing timing;
Lev Proleev0d4ba3f2019-10-02 17:32:06 +0100509 switch (testConfig.executor) {
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100510 case Executor::ASYNC: {
511 SCOPED_TRACE("asynchronous");
512
513 // launch execution
514 sp<ExecutionCallback> executionCallback = new ExecutionCallback();
Lev Proleev0d4ba3f2019-10-02 17:32:06 +0100515 Return<ErrorStatus> executionLaunchStatus = ExecutePreparedModel(
516 preparedModel, request, testConfig.measureTiming, executionCallback);
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100517 ASSERT_TRUE(executionLaunchStatus.isOk());
518 EXPECT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(executionLaunchStatus));
519
520 // retrieve execution status
521 executionCallback->wait();
522 executionStatus = executionCallback->getStatus();
523 outputShapes = executionCallback->getOutputShapes();
524 timing = executionCallback->getTiming();
525
526 break;
527 }
528 case Executor::SYNC: {
529 SCOPED_TRACE("synchronous");
530
531 // execute
Lev Proleev0d4ba3f2019-10-02 17:32:06 +0100532 Return<ErrorStatus> executionReturnStatus = ExecutePreparedModel(
533 preparedModel, request, testConfig.measureTiming, &outputShapes, &timing);
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100534 ASSERT_TRUE(executionReturnStatus.isOk());
535 executionStatus = static_cast<ErrorStatus>(executionReturnStatus);
536
537 break;
538 }
539 case Executor::BURST: {
Xusong Wangb345a462019-11-27 12:46:48 -0800540 // TODO(butlermichael): Check if we need to test burst in V1_3 if the interface remains
541 // V1_2.
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100542 SCOPED_TRACE("burst");
543
Xusong Wange9da9852020-01-13 11:44:45 -0800544 // check compliance
545 ASSERT_TRUE(nn::compliantWithV1_0(request));
546 V1_0::Request request10 = nn::convertToV1_0(request);
547
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100548 // create burst
549 const std::shared_ptr<::android::nn::ExecutionBurstController> controller =
550 CreateBurst(preparedModel);
551 ASSERT_NE(nullptr, controller.get());
552
553 // create memory keys
Xusong Wangb345a462019-11-27 12:46:48 -0800554 std::vector<intptr_t> keys(request10.pools.size());
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100555 for (size_t i = 0; i < keys.size(); ++i) {
Xusong Wangb345a462019-11-27 12:46:48 -0800556 keys[i] = reinterpret_cast<intptr_t>(&request10.pools[i]);
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100557 }
558
559 // execute burst
Michael Butler648ada52019-07-25 17:22:11 -0700560 int n;
561 std::tie(n, outputShapes, timing, std::ignore) =
Xusong Wangb345a462019-11-27 12:46:48 -0800562 controller->compute(request10, testConfig.measureTiming, keys);
Michael Butler648ada52019-07-25 17:22:11 -0700563 executionStatus = nn::convertResultCodeToErrorStatus(n);
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100564
565 break;
566 }
Miao Wang2b5c4cd2019-12-26 18:03:56 -0800567 case Executor::FENCED: {
568 SCOPED_TRACE("fenced");
569 ErrorStatus result;
570 hidl_handle sync_fence_handle;
571 sp<IFencedExecutionCallback> fenced_callback;
572 Return<void> ret = preparedModel->executeFenced(
573 request, {}, testConfig.measureTiming,
574 [&result, &sync_fence_handle, &fenced_callback](
575 ErrorStatus error, const hidl_handle& handle,
576 const sp<IFencedExecutionCallback>& callback) {
577 result = error;
578 sync_fence_handle = handle;
579 fenced_callback = callback;
580 });
581 ASSERT_TRUE(ret.isOk());
582 if (result != ErrorStatus::NONE) {
583 ASSERT_EQ(sync_fence_handle.getNativeHandle(), nullptr);
584 ASSERT_EQ(fenced_callback, nullptr);
585 executionStatus = ErrorStatus::GENERAL_FAILURE;
586 } else if (sync_fence_handle.getNativeHandle()) {
587 constexpr int kInfiniteTimeout = -1;
588 int sync_fd = sync_fence_handle.getNativeHandle()->data[0];
589 ASSERT_GT(sync_fd, 0);
590 int r = sync_wait(sync_fd, kInfiniteTimeout);
591 ASSERT_GE(r, 0);
592 }
593 if (result == ErrorStatus::NONE) {
594 ASSERT_NE(fenced_callback, nullptr);
595 Return<void> ret = fenced_callback->getExecutionInfo(
596 [&executionStatus, &timing](ErrorStatus error, Timing t) {
597 executionStatus = error;
598 timing = t;
599 });
600 ASSERT_TRUE(ret.isOk());
601 }
602 break;
603 }
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100604 }
605
Lev Proleev0d4ba3f2019-10-02 17:32:06 +0100606 if (testConfig.outputType != OutputType::FULLY_SPECIFIED &&
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100607 executionStatus == ErrorStatus::GENERAL_FAILURE) {
Lev Proleev9226c1e2019-10-03 14:43:18 +0100608 if (skipped != nullptr) {
609 *skipped = true;
610 }
611 if (!testConfig.reportSkipping) {
612 return;
613 }
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100614 LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
615 "execute model that it does not support.";
616 std::cout << "[ ] Early termination of test because vendor service cannot "
617 "execute model that it does not support."
618 << std::endl;
619 GTEST_SKIP();
620 }
Lev Proleev0d4ba3f2019-10-02 17:32:06 +0100621 if (testConfig.measureTiming == MeasureTiming::NO) {
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100622 EXPECT_EQ(UINT64_MAX, timing.timeOnDevice);
623 EXPECT_EQ(UINT64_MAX, timing.timeInDriver);
624 } else {
625 if (timing.timeOnDevice != UINT64_MAX && timing.timeInDriver != UINT64_MAX) {
626 EXPECT_LE(timing.timeOnDevice, timing.timeInDriver);
627 }
628 }
629
Lev Proleev0d4ba3f2019-10-02 17:32:06 +0100630 switch (testConfig.outputType) {
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100631 case OutputType::FULLY_SPECIFIED:
632 // If the model output operands are fully specified, outputShapes must be either
633 // either empty, or have the same number of elements as the number of outputs.
634 ASSERT_EQ(ErrorStatus::NONE, executionStatus);
635 ASSERT_TRUE(outputShapes.size() == 0 ||
636 outputShapes.size() == testModel.outputIndexes.size());
637 break;
638 case OutputType::UNSPECIFIED:
639 // If the model output operands are not fully specified, outputShapes must have
640 // the same number of elements as the number of outputs.
641 ASSERT_EQ(ErrorStatus::NONE, executionStatus);
642 ASSERT_EQ(outputShapes.size(), testModel.outputIndexes.size());
643 break;
644 case OutputType::INSUFFICIENT:
645 ASSERT_EQ(ErrorStatus::OUTPUT_INSUFFICIENT_SIZE, executionStatus);
646 ASSERT_EQ(outputShapes.size(), testModel.outputIndexes.size());
647 ASSERT_FALSE(outputShapes[0].isSufficient);
648 return;
649 }
650
651 // Go through all outputs, check returned output shapes.
652 for (uint32_t i = 0; i < outputShapes.size(); i++) {
653 EXPECT_TRUE(outputShapes[i].isSufficient);
654 const auto& expect = testModel.operands[testModel.outputIndexes[i]].dimensions;
655 const std::vector<uint32_t> actual = outputShapes[i].dimensions;
656 EXPECT_EQ(expect, actual);
657 }
658
659 // Retrieve execution results.
Xusong Wange9da9852020-01-13 11:44:45 -0800660 const std::vector<TestBuffer> outputs = getOutputBuffers(testModel, request, buffers);
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100661
662 // We want "close-enough" results.
663 checkResults(testModel, outputs);
664}
665
Xusong Wange9da9852020-01-13 11:44:45 -0800666void EvaluatePreparedModel(const sp<IDevice>& device, const sp<IPreparedModel>& preparedModel,
667 const TestModel& testModel, TestKind testKind) {
Lev Proleev56cda832019-12-05 14:49:47 +0000668 std::vector<OutputType> outputTypesList;
669 std::vector<MeasureTiming> measureTimingList;
670 std::vector<Executor> executorList;
Xusong Wange9da9852020-01-13 11:44:45 -0800671 MemoryType memoryType = MemoryType::SHARED;
Lev Proleev0d4ba3f2019-10-02 17:32:06 +0100672
Lev Proleev9226c1e2019-10-03 14:43:18 +0100673 switch (testKind) {
674 case TestKind::GENERAL: {
675 outputTypesList = {OutputType::FULLY_SPECIFIED};
676 measureTimingList = {MeasureTiming::NO, MeasureTiming::YES};
Miao Wang2b5c4cd2019-12-26 18:03:56 -0800677 executorList = {Executor::ASYNC, Executor::SYNC, Executor::BURST, Executor::FENCED};
Lev Proleev9226c1e2019-10-03 14:43:18 +0100678 } break;
679 case TestKind::DYNAMIC_SHAPE: {
680 outputTypesList = {OutputType::UNSPECIFIED, OutputType::INSUFFICIENT};
681 measureTimingList = {MeasureTiming::NO, MeasureTiming::YES};
682 executorList = {Executor::ASYNC, Executor::SYNC, Executor::BURST};
683 } break;
Xusong Wange9da9852020-01-13 11:44:45 -0800684 case TestKind::MEMORY_DOMAIN: {
685 outputTypesList = {OutputType::FULLY_SPECIFIED};
686 measureTimingList = {MeasureTiming::NO};
687 executorList = {Executor::ASYNC, Executor::SYNC};
688 memoryType = MemoryType::DEVICE;
689 } break;
Lev Proleev9226c1e2019-10-03 14:43:18 +0100690 case TestKind::QUANTIZATION_COUPLING: {
691 LOG(FATAL) << "Wrong TestKind for EvaluatePreparedModel";
692 return;
693 } break;
Lev Proleev0d4ba3f2019-10-02 17:32:06 +0100694 }
695
696 for (const OutputType outputType : outputTypesList) {
697 for (const MeasureTiming measureTiming : measureTimingList) {
698 for (const Executor executor : executorList) {
Xusong Wange9da9852020-01-13 11:44:45 -0800699 const TestConfig testConfig(executor, measureTiming, outputType, memoryType);
700 EvaluatePreparedModel(device, preparedModel, testModel, testConfig);
Lev Proleev0d4ba3f2019-10-02 17:32:06 +0100701 }
702 }
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100703 }
704}
705
Xusong Wange9da9852020-01-13 11:44:45 -0800706void EvaluatePreparedCoupledModels(const sp<IDevice>& device,
707 const sp<IPreparedModel>& preparedModel,
Lev Proleev9226c1e2019-10-03 14:43:18 +0100708 const TestModel& testModel,
709 const sp<IPreparedModel>& preparedCoupledModel,
710 const TestModel& coupledModel) {
Lev Proleev56cda832019-12-05 14:49:47 +0000711 const std::vector<OutputType> outputTypesList = {OutputType::FULLY_SPECIFIED};
712 const std::vector<MeasureTiming> measureTimingList = {MeasureTiming::NO, MeasureTiming::YES};
Miao Wang2b5c4cd2019-12-26 18:03:56 -0800713 const std::vector<Executor> executorList = {Executor::ASYNC, Executor::SYNC, Executor::BURST,
714 Executor::FENCED};
Lev Proleev9226c1e2019-10-03 14:43:18 +0100715
716 for (const OutputType outputType : outputTypesList) {
717 for (const MeasureTiming measureTiming : measureTimingList) {
718 for (const Executor executor : executorList) {
Xusong Wange9da9852020-01-13 11:44:45 -0800719 const TestConfig testConfig(executor, measureTiming, outputType, MemoryType::SHARED,
Lev Proleev9226c1e2019-10-03 14:43:18 +0100720 /*reportSkipping=*/false);
721 bool baseSkipped = false;
Xusong Wange9da9852020-01-13 11:44:45 -0800722 EvaluatePreparedModel(device, preparedModel, testModel, testConfig, &baseSkipped);
Lev Proleev9226c1e2019-10-03 14:43:18 +0100723 bool coupledSkipped = false;
Xusong Wange9da9852020-01-13 11:44:45 -0800724 EvaluatePreparedModel(device, preparedCoupledModel, coupledModel, testConfig,
Lev Proleev9226c1e2019-10-03 14:43:18 +0100725 &coupledSkipped);
726 ASSERT_EQ(baseSkipped, coupledSkipped);
727 if (baseSkipped) {
728 LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
729 "execute model that it does not support.";
730 std::cout << "[ ] Early termination of test because vendor service "
731 "cannot "
732 "execute model that it does not support."
733 << std::endl;
734 GTEST_SKIP();
735 }
736 }
737 }
738 }
739}
740
741void Execute(const sp<IDevice>& device, const TestModel& testModel, TestKind testKind) {
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100742 Model model = createModel(testModel);
Lev Proleev9226c1e2019-10-03 14:43:18 +0100743 if (testKind == TestKind::DYNAMIC_SHAPE) {
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100744 makeOutputDimensionsUnspecified(&model);
745 }
746
747 sp<IPreparedModel> preparedModel;
Lev Proleev9226c1e2019-10-03 14:43:18 +0100748 switch (testKind) {
Xusong Wange9da9852020-01-13 11:44:45 -0800749 case TestKind::GENERAL:
750 case TestKind::DYNAMIC_SHAPE:
751 case TestKind::MEMORY_DOMAIN: {
Lev Proleev9226c1e2019-10-03 14:43:18 +0100752 createPreparedModel(device, model, &preparedModel);
753 if (preparedModel == nullptr) return;
Xusong Wange9da9852020-01-13 11:44:45 -0800754 EvaluatePreparedModel(device, preparedModel, testModel, testKind);
Lev Proleev9226c1e2019-10-03 14:43:18 +0100755 } break;
756 case TestKind::QUANTIZATION_COUPLING: {
Lev Proleev673fdcf2020-01-02 18:22:30 +0000757 ASSERT_TRUE(testModel.hasQuant8CoupledOperands());
Michael Butler95899b32020-01-07 14:52:44 -0800758 createPreparedModel(device, model, &preparedModel,
759 /*reportSkipping*/ false);
Lev Proleev9226c1e2019-10-03 14:43:18 +0100760 TestModel signedQuantizedModel = convertQuant8AsymmOperandsToSigned(testModel);
761 sp<IPreparedModel> preparedCoupledModel;
762 createPreparedModel(device, createModel(signedQuantizedModel), &preparedCoupledModel,
763 /*reportSkipping*/ false);
764 // If we couldn't prepare a model with unsigned quantization, we must
765 // fail to prepare a model with signed quantization as well.
766 if (preparedModel == nullptr) {
767 ASSERT_EQ(preparedCoupledModel, nullptr);
768 // If we failed to prepare both of the models, we can safely skip
769 // the test.
770 LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
771 "prepare model that it does not support.";
772 std::cout
773 << "[ ] Early termination of test because vendor service cannot "
774 "prepare model that it does not support."
775 << std::endl;
776 GTEST_SKIP();
777 }
778 ASSERT_NE(preparedCoupledModel, nullptr);
Xusong Wange9da9852020-01-13 11:44:45 -0800779 EvaluatePreparedCoupledModels(device, preparedModel, testModel, preparedCoupledModel,
Lev Proleev9226c1e2019-10-03 14:43:18 +0100780 signedQuantizedModel);
781 } break;
782 }
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100783}
784
785void GeneratedTestBase::SetUp() {
786 testing::TestWithParam<GeneratedTestParam>::SetUp();
787 ASSERT_NE(kDevice, nullptr);
Michael Butler95899b32020-01-07 14:52:44 -0800788
789 const Return<void> ret =
790 kDevice->supportsDeadlines([this](bool prepareModelDeadline, bool executionDeadline) {
791 mSupportsDeadlines = {prepareModelDeadline, executionDeadline};
792 });
793 ASSERT_TRUE(ret.isOk());
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100794}
795
796std::vector<NamedModel> getNamedModels(const FilterFn& filter) {
797 return TestModelManager::get().getTestModels(filter);
798}
799
800std::string printGeneratedTest(const testing::TestParamInfo<GeneratedTestParam>& info) {
801 const auto& [namedDevice, namedModel] = info.param;
802 return gtestCompliantName(getName(namedDevice) + "_" + getName(namedModel));
803}
804
805// Tag for the generated tests
806class GeneratedTest : public GeneratedTestBase {};
807
808// Tag for the dynamic output shape tests
809class DynamicOutputShapeTest : public GeneratedTest {};
810
Xusong Wange9da9852020-01-13 11:44:45 -0800811// Tag for the memory domain tests
812class MemoryDomainTest : public GeneratedTest {};
813
Lev Proleev9226c1e2019-10-03 14:43:18 +0100814// Tag for the dynamic output shape tests
Lev Proleev3c68b342020-01-09 16:37:28 +0000815class QuantizationCouplingTest : public GeneratedTest {};
Lev Proleev9226c1e2019-10-03 14:43:18 +0100816
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100817TEST_P(GeneratedTest, Test) {
Lev Proleev9226c1e2019-10-03 14:43:18 +0100818 Execute(kDevice, kTestModel, /*testKind=*/TestKind::GENERAL);
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100819}
820
821TEST_P(DynamicOutputShapeTest, Test) {
Lev Proleev9226c1e2019-10-03 14:43:18 +0100822 Execute(kDevice, kTestModel, /*testKind=*/TestKind::DYNAMIC_SHAPE);
823}
824
Xusong Wange9da9852020-01-13 11:44:45 -0800825TEST_P(MemoryDomainTest, Test) {
826 Execute(kDevice, kTestModel, /*testKind=*/TestKind::MEMORY_DOMAIN);
827}
828
Lev Proleev3c68b342020-01-09 16:37:28 +0000829TEST_P(QuantizationCouplingTest, Test) {
Lev Proleev9226c1e2019-10-03 14:43:18 +0100830 Execute(kDevice, kTestModel, /*testKind=*/TestKind::QUANTIZATION_COUPLING);
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100831}
832
833INSTANTIATE_GENERATED_TEST(GeneratedTest,
834 [](const TestModel& testModel) { return !testModel.expectFailure; });
835
Lev Proleev53a51cb2020-01-20 18:54:46 +0000836INSTANTIATE_GENERATED_TEST(DynamicOutputShapeTest, [](const TestModel& testModel) {
837 return !testModel.expectFailure && !testModel.hasScalarOutputs();
838});
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100839
Xusong Wange9da9852020-01-13 11:44:45 -0800840INSTANTIATE_GENERATED_TEST(MemoryDomainTest,
841 [](const TestModel& testModel) { return !testModel.expectFailure; });
842
Lev Proleev3c68b342020-01-09 16:37:28 +0000843INSTANTIATE_GENERATED_TEST(QuantizationCouplingTest, [](const TestModel& testModel) {
Lev Proleev673fdcf2020-01-02 18:22:30 +0000844 return testModel.hasQuant8CoupledOperands() && testModel.operations.size() == 1;
Lev Proleev9226c1e2019-10-03 14:43:18 +0100845});
846
Lev Proleev26d1bc82019-08-30 11:57:18 +0100847} // namespace android::hardware::neuralnetworks::V1_3::vts::functional