blob: 805d5b53aa6760467fe35bdce64500aa49280f19 [file] [log] [blame]
Lev Proleev13fdfcd2019-08-30 11:35:34 +01001/*
2 * Copyright (C) 2019 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "GeneratedTestHarness.h"
18
19#include <android-base/logging.h>
20#include <android/hardware/neuralnetworks/1.0/IDevice.h>
21#include <android/hardware/neuralnetworks/1.0/IExecutionCallback.h>
22#include <android/hardware/neuralnetworks/1.0/IPreparedModel.h>
23#include <android/hardware/neuralnetworks/1.0/IPreparedModelCallback.h>
24#include <android/hardware/neuralnetworks/1.0/types.h>
25#include <android/hardware/neuralnetworks/1.1/IDevice.h>
26#include <android/hardware/neuralnetworks/1.2/IDevice.h>
27#include <android/hardware/neuralnetworks/1.2/IExecutionCallback.h>
28#include <android/hardware/neuralnetworks/1.2/IPreparedModel.h>
29#include <android/hardware/neuralnetworks/1.2/IPreparedModelCallback.h>
Lev Proleev26d1bc82019-08-30 11:57:18 +010030#include <android/hardware/neuralnetworks/1.2/types.h>
31#include <android/hardware/neuralnetworks/1.3/IDevice.h>
Xusong Wang1b3f4262019-10-25 12:07:17 -070032#include <android/hardware/neuralnetworks/1.3/IPreparedModel.h>
Xusong Wangcc47dff2019-10-23 10:35:07 -070033#include <android/hardware/neuralnetworks/1.3/IPreparedModelCallback.h>
Lev Proleev26d1bc82019-08-30 11:57:18 +010034#include <android/hardware/neuralnetworks/1.3/types.h>
Lev Proleev13fdfcd2019-08-30 11:35:34 +010035#include <android/hidl/allocator/1.0/IAllocator.h>
36#include <android/hidl/memory/1.0/IMemory.h>
Lev Proleev56cda832019-12-05 14:49:47 +000037#include <gtest/gtest.h>
Lev Proleev13fdfcd2019-08-30 11:35:34 +010038#include <hidlmemory/mapping.h>
39
Lev Proleev13fdfcd2019-08-30 11:35:34 +010040#include <algorithm>
Michael Butler648ada52019-07-25 17:22:11 -070041#include <chrono>
Lev Proleev13fdfcd2019-08-30 11:35:34 +010042#include <iostream>
43#include <numeric>
Lev Proleev56cda832019-12-05 14:49:47 +000044#include <vector>
Lev Proleev13fdfcd2019-08-30 11:35:34 +010045
46#include "1.0/Utils.h"
47#include "1.2/Callbacks.h"
Xusong Wangcc47dff2019-10-23 10:35:07 -070048#include "1.3/Callbacks.h"
Lev Proleev13fdfcd2019-08-30 11:35:34 +010049#include "ExecutionBurstController.h"
50#include "MemoryUtils.h"
51#include "TestHarness.h"
52#include "Utils.h"
53#include "VtsHalNeuralnetworks.h"
54
Lev Proleev26d1bc82019-08-30 11:57:18 +010055namespace android::hardware::neuralnetworks::V1_3::vts::functional {
Lev Proleev13fdfcd2019-08-30 11:35:34 +010056
57using namespace test_helper;
58using hidl::memory::V1_0::IMemory;
Xusong Wangcc47dff2019-10-23 10:35:07 -070059using implementation::PreparedModelCallback;
Lev Proleev13fdfcd2019-08-30 11:35:34 +010060using V1_0::DataLocation;
61using V1_0::ErrorStatus;
Xusong Wange9da9852020-01-13 11:44:45 -080062using V1_0::RequestArgument;
Lev Proleev13fdfcd2019-08-30 11:35:34 +010063using V1_1::ExecutionPreference;
Lev Proleev26d1bc82019-08-30 11:57:18 +010064using V1_2::Constant;
Lev Proleev26d1bc82019-08-30 11:57:18 +010065using V1_2::MeasureTiming;
Lev Proleev26d1bc82019-08-30 11:57:18 +010066using V1_2::OutputShape;
67using V1_2::SymmPerChannelQuantParams;
68using V1_2::Timing;
69using V1_2::implementation::ExecutionCallback;
Lev Proleev13fdfcd2019-08-30 11:35:34 +010070using HidlToken = hidl_array<uint8_t, static_cast<uint32_t>(Constant::BYTE_SIZE_OF_CACHE_TOKEN)>;
71
Lev Proleev0d4ba3f2019-10-02 17:32:06 +010072namespace {
73
74enum class Executor { ASYNC, SYNC, BURST };
75
Lev Proleev13fdfcd2019-08-30 11:35:34 +010076enum class OutputType { FULLY_SPECIFIED, UNSPECIFIED, INSUFFICIENT };
77
Xusong Wange9da9852020-01-13 11:44:45 -080078enum class MemoryType { SHARED, DEVICE };
79
80enum class IOType { INPUT, OUTPUT };
81
Lev Proleev0d4ba3f2019-10-02 17:32:06 +010082struct TestConfig {
83 Executor executor;
84 MeasureTiming measureTiming;
85 OutputType outputType;
Xusong Wange9da9852020-01-13 11:44:45 -080086 MemoryType memoryType;
Lev Proleev9226c1e2019-10-03 14:43:18 +010087 // `reportSkipping` indicates if a test should print an info message in case
88 // it is skipped. The field is set to true by default and is set to false in
89 // quantization coupling tests to suppress skipping a test
90 bool reportSkipping;
Xusong Wange9da9852020-01-13 11:44:45 -080091 TestConfig(Executor executor, MeasureTiming measureTiming, OutputType outputType,
92 MemoryType memoryType)
Lev Proleev9226c1e2019-10-03 14:43:18 +010093 : executor(executor),
94 measureTiming(measureTiming),
95 outputType(outputType),
Xusong Wange9da9852020-01-13 11:44:45 -080096 memoryType(memoryType),
Lev Proleev9226c1e2019-10-03 14:43:18 +010097 reportSkipping(true) {}
98 TestConfig(Executor executor, MeasureTiming measureTiming, OutputType outputType,
Xusong Wange9da9852020-01-13 11:44:45 -080099 MemoryType memoryType, bool reportSkipping)
Lev Proleev9226c1e2019-10-03 14:43:18 +0100100 : executor(executor),
101 measureTiming(measureTiming),
102 outputType(outputType),
Xusong Wange9da9852020-01-13 11:44:45 -0800103 memoryType(memoryType),
Lev Proleev9226c1e2019-10-03 14:43:18 +0100104 reportSkipping(reportSkipping) {}
Lev Proleev0d4ba3f2019-10-02 17:32:06 +0100105};
106
Xusong Wange9da9852020-01-13 11:44:45 -0800107class DeviceMemoryAllocator {
108 public:
109 DeviceMemoryAllocator(const sp<IDevice>& device, const sp<IPreparedModel>& preparedModel,
110 const TestModel& testModel)
111 : kDevice(device), kPreparedModel(preparedModel), kTestModel(testModel) {}
112
113 // Allocate device memory for a target input/output operand.
114 // Return {IBuffer object, token} if successful.
115 // Return {nullptr, 0} if device memory is not supported.
116 template <IOType ioType>
117 std::pair<sp<IBuffer>, int32_t> allocate(uint32_t index) {
118 std::pair<sp<IBuffer>, int32_t> buffer;
119 allocateInternal<ioType>(index, &buffer);
120 return buffer;
121 }
122
123 private:
124 template <IOType ioType>
125 void allocateInternal(uint32_t index, std::pair<sp<IBuffer>, int32_t>* result) {
126 ASSERT_NE(result, nullptr);
127
128 // Prepare arguments.
129 BufferRole role = {.modelIndex = 0, .ioIndex = index, .frequency = 1.0f};
130 hidl_vec<BufferRole> inputRoles, outputRoles;
131 if constexpr (ioType == IOType::INPUT) {
132 inputRoles = {role};
133 } else {
134 outputRoles = {role};
135 }
136
137 // Allocate device memory.
138 ErrorStatus status;
139 sp<IBuffer> buffer;
140 int32_t token;
141 const auto ret = kDevice->allocate(
142 {}, {kPreparedModel}, inputRoles, outputRoles,
143 [&status, &buffer, &token](ErrorStatus error, const sp<IBuffer>& buf, int32_t tok) {
144 status = error;
145 buffer = buf;
146 token = tok;
147 });
148
149 // Check allocation results.
150 ASSERT_TRUE(ret.isOk());
151 if (status == ErrorStatus::NONE) {
152 ASSERT_NE(buffer, nullptr);
153 ASSERT_GT(token, 0);
154 } else {
155 ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
156 ASSERT_EQ(buffer, nullptr);
157 ASSERT_EQ(token, 0);
158 }
159
160 // Initialize input data from TestBuffer.
161 if constexpr (ioType == IOType::INPUT) {
162 if (buffer != nullptr) {
163 // TestBuffer -> Shared memory.
164 const auto& testBuffer = kTestModel.operands[kTestModel.inputIndexes[index]].data;
165 ASSERT_GT(testBuffer.size(), 0);
166 hidl_memory tmp = nn::allocateSharedMemory(testBuffer.size());
167 sp<IMemory> inputMemory = mapMemory(tmp);
168 ASSERT_NE(inputMemory.get(), nullptr);
169 uint8_t* inputPtr =
170 static_cast<uint8_t*>(static_cast<void*>(inputMemory->getPointer()));
171 ASSERT_NE(inputPtr, nullptr);
172 const uint8_t* begin = testBuffer.get<uint8_t>();
173 const uint8_t* end = begin + testBuffer.size();
174 std::copy(begin, end, inputPtr);
175
176 // Shared memory -> IBuffer.
177 auto ret = buffer->copyFrom(tmp, {});
178 ASSERT_TRUE(ret.isOk());
179 ASSERT_EQ(static_cast<ErrorStatus>(ret), ErrorStatus::NONE);
180 }
181 }
182 *result = {std::move(buffer), token};
183 }
184
185 const sp<IDevice> kDevice;
186 const sp<IPreparedModel> kPreparedModel;
187 const TestModel& kTestModel;
188};
189
Lev Proleev0d4ba3f2019-10-02 17:32:06 +0100190} // namespace
191
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100192Model createModel(const TestModel& testModel) {
193 // Model operands.
194 hidl_vec<Operand> operands(testModel.operands.size());
195 size_t constCopySize = 0, constRefSize = 0;
196 for (uint32_t i = 0; i < testModel.operands.size(); i++) {
197 const auto& op = testModel.operands[i];
198
199 DataLocation loc = {};
200 if (op.lifetime == TestOperandLifeTime::CONSTANT_COPY) {
201 loc = {.poolIndex = 0,
202 .offset = static_cast<uint32_t>(constCopySize),
203 .length = static_cast<uint32_t>(op.data.size())};
204 constCopySize += op.data.alignedSize();
205 } else if (op.lifetime == TestOperandLifeTime::CONSTANT_REFERENCE) {
206 loc = {.poolIndex = 0,
207 .offset = static_cast<uint32_t>(constRefSize),
208 .length = static_cast<uint32_t>(op.data.size())};
209 constRefSize += op.data.alignedSize();
210 }
211
212 Operand::ExtraParams extraParams;
213 if (op.type == TestOperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) {
214 extraParams.channelQuant(SymmPerChannelQuantParams{
215 .scales = op.channelQuant.scales, .channelDim = op.channelQuant.channelDim});
216 }
217
218 operands[i] = {.type = static_cast<OperandType>(op.type),
219 .dimensions = op.dimensions,
220 .numberOfConsumers = op.numberOfConsumers,
221 .scale = op.scale,
222 .zeroPoint = op.zeroPoint,
223 .lifetime = static_cast<OperandLifeTime>(op.lifetime),
224 .location = loc,
225 .extraParams = std::move(extraParams)};
226 }
227
228 // Model operations.
229 hidl_vec<Operation> operations(testModel.operations.size());
230 std::transform(testModel.operations.begin(), testModel.operations.end(), operations.begin(),
231 [](const TestOperation& op) -> Operation {
232 return {.type = static_cast<OperationType>(op.type),
233 .inputs = op.inputs,
234 .outputs = op.outputs};
235 });
236
237 // Constant copies.
238 hidl_vec<uint8_t> operandValues(constCopySize);
239 for (uint32_t i = 0; i < testModel.operands.size(); i++) {
240 const auto& op = testModel.operands[i];
241 if (op.lifetime == TestOperandLifeTime::CONSTANT_COPY) {
242 const uint8_t* begin = op.data.get<uint8_t>();
243 const uint8_t* end = begin + op.data.size();
244 std::copy(begin, end, operandValues.data() + operands[i].location.offset);
245 }
246 }
247
248 // Shared memory.
249 hidl_vec<hidl_memory> pools = {};
250 if (constRefSize > 0) {
251 hidl_vec_push_back(&pools, nn::allocateSharedMemory(constRefSize));
252 CHECK_NE(pools[0].size(), 0u);
253
254 // load data
255 sp<IMemory> mappedMemory = mapMemory(pools[0]);
256 CHECK(mappedMemory.get() != nullptr);
257 uint8_t* mappedPtr =
258 reinterpret_cast<uint8_t*>(static_cast<void*>(mappedMemory->getPointer()));
259 CHECK(mappedPtr != nullptr);
260
261 for (uint32_t i = 0; i < testModel.operands.size(); i++) {
262 const auto& op = testModel.operands[i];
263 if (op.lifetime == TestOperandLifeTime::CONSTANT_REFERENCE) {
264 const uint8_t* begin = op.data.get<uint8_t>();
265 const uint8_t* end = begin + op.data.size();
266 std::copy(begin, end, mappedPtr + operands[i].location.offset);
267 }
268 }
269 }
270
Slava Shklyaevf8124a82019-12-13 12:24:35 +0000271 return {.main = {.operands = std::move(operands),
272 .operations = std::move(operations),
273 .inputIndexes = testModel.inputIndexes,
274 .outputIndexes = testModel.outputIndexes},
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100275 .operandValues = std::move(operandValues),
276 .pools = std::move(pools),
277 .relaxComputationFloat32toFloat16 = testModel.isRelaxed};
278}
279
280static bool isOutputSizeGreaterThanOne(const TestModel& testModel, uint32_t index) {
281 const auto byteSize = testModel.operands[testModel.outputIndexes[index]].data.size();
282 return byteSize > 1u;
283}
284
Xusong Wange9da9852020-01-13 11:44:45 -0800285static void makeOutputInsufficientSize(uint32_t outputIndex, Request* request) {
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100286 auto& length = request->outputs[outputIndex].location.length;
287 ASSERT_GT(length, 1u);
288 length -= 1u;
289}
290
291static void makeOutputDimensionsUnspecified(Model* model) {
Slava Shklyaevf8124a82019-12-13 12:24:35 +0000292 for (auto i : model->main.outputIndexes) {
293 auto& dims = model->main.operands[i].dimensions;
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100294 std::fill(dims.begin(), dims.end(), 0);
295 }
296}
297
Xusong Wange9da9852020-01-13 11:44:45 -0800298constexpr uint32_t kInputPoolIndex = 0;
299constexpr uint32_t kOutputPoolIndex = 1;
300constexpr uint32_t kDeviceMemoryBeginIndex = 2;
301
302static std::pair<Request, std::vector<sp<IBuffer>>> createRequest(
303 const sp<IDevice>& device, const sp<IPreparedModel>& preparedModel,
304 const TestModel& testModel, bool preferDeviceMemory) {
305 // Memory pools are organized as:
306 // - 0: Input shared memory pool
307 // - 1: Output shared memory pool
308 // - [2, 2+i): Input device memories
309 // - [2+i, 2+i+o): Output device memories
310 DeviceMemoryAllocator allocator(device, preparedModel, testModel);
311 std::vector<sp<IBuffer>> buffers;
312 std::vector<int32_t> tokens;
313
314 // Model inputs.
315 hidl_vec<RequestArgument> inputs(testModel.inputIndexes.size());
316 size_t inputSize = 0;
317 for (uint32_t i = 0; i < testModel.inputIndexes.size(); i++) {
318 const auto& op = testModel.operands[testModel.inputIndexes[i]];
319 if (op.data.size() == 0) {
320 // Omitted input.
321 inputs[i] = {.hasNoValue = true};
322 continue;
323 } else if (preferDeviceMemory) {
324 SCOPED_TRACE("Input index = " + std::to_string(i));
325 auto [buffer, token] = allocator.allocate<IOType::INPUT>(i);
326 if (buffer != nullptr) {
327 DataLocation loc = {.poolIndex = static_cast<uint32_t>(buffers.size() +
328 kDeviceMemoryBeginIndex)};
329 buffers.push_back(std::move(buffer));
330 tokens.push_back(token);
331 inputs[i] = {.hasNoValue = false, .location = loc, .dimensions = {}};
332 continue;
333 }
334 }
335
336 // Reserve shared memory for input.
337 DataLocation loc = {.poolIndex = kInputPoolIndex,
338 .offset = static_cast<uint32_t>(inputSize),
339 .length = static_cast<uint32_t>(op.data.size())};
340 inputSize += op.data.alignedSize();
341 inputs[i] = {.hasNoValue = false, .location = loc, .dimensions = {}};
342 }
343
344 // Model outputs.
345 hidl_vec<RequestArgument> outputs(testModel.outputIndexes.size());
346 size_t outputSize = 0;
347 for (uint32_t i = 0; i < testModel.outputIndexes.size(); i++) {
348 const auto& op = testModel.operands[testModel.outputIndexes[i]];
349 if (preferDeviceMemory) {
350 SCOPED_TRACE("Output index = " + std::to_string(i));
351 auto [buffer, token] = allocator.allocate<IOType::OUTPUT>(i);
352 if (buffer != nullptr) {
353 DataLocation loc = {.poolIndex = static_cast<uint32_t>(buffers.size() +
354 kDeviceMemoryBeginIndex)};
355 buffers.push_back(std::move(buffer));
356 tokens.push_back(token);
357 outputs[i] = {.hasNoValue = false, .location = loc, .dimensions = {}};
358 continue;
359 }
360 }
361
362 // In the case of zero-sized output, we should at least provide a one-byte buffer.
363 // This is because zero-sized tensors are only supported internally to the driver, or
364 // reported in output shapes. It is illegal for the client to pre-specify a zero-sized
365 // tensor as model output. Otherwise, we will have two semantic conflicts:
366 // - "Zero dimension" conflicts with "unspecified dimension".
367 // - "Omitted operand buffer" conflicts with "zero-sized operand buffer".
368 size_t bufferSize = std::max<size_t>(op.data.size(), 1);
369
370 // Reserve shared memory for output.
371 DataLocation loc = {.poolIndex = kOutputPoolIndex,
372 .offset = static_cast<uint32_t>(outputSize),
373 .length = static_cast<uint32_t>(bufferSize)};
374 outputSize += op.data.size() == 0 ? TestBuffer::kAlignment : op.data.alignedSize();
375 outputs[i] = {.hasNoValue = false, .location = loc, .dimensions = {}};
376 }
377
378 // Memory pools.
379 hidl_vec<Request::MemoryPool> pools(kDeviceMemoryBeginIndex + buffers.size());
380 pools[kInputPoolIndex].hidlMemory(nn::allocateSharedMemory(std::max<size_t>(inputSize, 1)));
381 pools[kOutputPoolIndex].hidlMemory(nn::allocateSharedMemory(std::max<size_t>(outputSize, 1)));
382 CHECK_NE(pools[kInputPoolIndex].hidlMemory().size(), 0u);
383 CHECK_NE(pools[kOutputPoolIndex].hidlMemory().size(), 0u);
384 for (uint32_t i = 0; i < buffers.size(); i++) {
385 pools[kDeviceMemoryBeginIndex + i].token(tokens[i]);
386 }
387
388 // Copy input data to the input shared memory pool.
389 sp<IMemory> inputMemory = mapMemory(pools[kInputPoolIndex].hidlMemory());
390 CHECK(inputMemory.get() != nullptr);
391 uint8_t* inputPtr = static_cast<uint8_t*>(static_cast<void*>(inputMemory->getPointer()));
392 CHECK(inputPtr != nullptr);
393 for (uint32_t i = 0; i < testModel.inputIndexes.size(); i++) {
394 if (!inputs[i].hasNoValue && inputs[i].location.poolIndex == kInputPoolIndex) {
395 const auto& op = testModel.operands[testModel.inputIndexes[i]];
396 const uint8_t* begin = op.data.get<uint8_t>();
397 const uint8_t* end = begin + op.data.size();
398 std::copy(begin, end, inputPtr + inputs[i].location.offset);
399 }
400 }
401
402 Request request = {
403 .inputs = std::move(inputs), .outputs = std::move(outputs), .pools = std::move(pools)};
404 return {std::move(request), std::move(buffers)};
405}
406
407// Get a TestBuffer with data copied from an IBuffer object.
408static void getBuffer(const sp<IBuffer>& buffer, size_t size, TestBuffer* testBuffer) {
409 // IBuffer -> Shared memory.
410 hidl_memory tmp = nn::allocateSharedMemory(size);
411 const auto ret = buffer->copyTo(tmp);
412 ASSERT_TRUE(ret.isOk());
413 ASSERT_EQ(static_cast<ErrorStatus>(ret), ErrorStatus::NONE);
414
415 // Shared memory -> TestBuffer.
416 sp<IMemory> outputMemory = mapMemory(tmp);
417 ASSERT_NE(outputMemory.get(), nullptr);
418 uint8_t* outputPtr = static_cast<uint8_t*>(static_cast<void*>(outputMemory->getPointer()));
419 ASSERT_NE(outputPtr, nullptr);
420 ASSERT_NE(testBuffer, nullptr);
421 *testBuffer = TestBuffer(size, outputPtr);
422}
423
424static std::vector<TestBuffer> getOutputBuffers(const TestModel& testModel, const Request& request,
425 const std::vector<sp<IBuffer>>& buffers) {
426 sp<IMemory> outputMemory = mapMemory(request.pools[kOutputPoolIndex].hidlMemory());
427 CHECK(outputMemory.get() != nullptr);
428 uint8_t* outputPtr = static_cast<uint8_t*>(static_cast<void*>(outputMemory->getPointer()));
429 CHECK(outputPtr != nullptr);
430
431 // Copy out output results.
432 std::vector<TestBuffer> outputBuffers;
433 for (uint32_t i = 0; i < request.outputs.size(); i++) {
434 const auto& outputLoc = request.outputs[i].location;
435 if (outputLoc.poolIndex == kOutputPoolIndex) {
436 outputBuffers.emplace_back(outputLoc.length, outputPtr + outputLoc.offset);
437 } else {
438 const auto& op = testModel.operands[testModel.outputIndexes[i]];
439 if (op.data.size() == 0) {
440 outputBuffers.emplace_back();
441 } else {
442 SCOPED_TRACE("Output index = " + std::to_string(i));
443 const uint32_t bufferIndex = outputLoc.poolIndex - kDeviceMemoryBeginIndex;
444 TestBuffer buffer;
445 getBuffer(buffers[bufferIndex], op.data.size(), &buffer);
446 outputBuffers.push_back(std::move(buffer));
447 }
448 }
449 }
450 return outputBuffers;
451}
452
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100453static Return<ErrorStatus> ExecutePreparedModel(const sp<IPreparedModel>& preparedModel,
454 const Request& request, MeasureTiming measure,
455 sp<ExecutionCallback>& callback) {
Xusong Wang1b3f4262019-10-25 12:07:17 -0700456 return preparedModel->execute_1_3(request, measure, callback);
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100457}
458static Return<ErrorStatus> ExecutePreparedModel(const sp<IPreparedModel>& preparedModel,
459 const Request& request, MeasureTiming measure,
460 hidl_vec<OutputShape>* outputShapes,
461 Timing* timing) {
462 ErrorStatus result;
Xusong Wangd4a060b2019-10-28 11:11:19 -0700463 Return<void> ret = preparedModel->executeSynchronously_1_3(
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100464 request, measure,
465 [&result, outputShapes, timing](ErrorStatus error, const hidl_vec<OutputShape>& shapes,
466 const Timing& time) {
467 result = error;
468 *outputShapes = shapes;
469 *timing = time;
470 });
471 if (!ret.isOk()) {
472 return ErrorStatus::GENERAL_FAILURE;
473 }
474 return result;
475}
476static std::shared_ptr<::android::nn::ExecutionBurstController> CreateBurst(
477 const sp<IPreparedModel>& preparedModel) {
Michael Butler648ada52019-07-25 17:22:11 -0700478 return android::nn::ExecutionBurstController::create(preparedModel,
479 std::chrono::microseconds{0});
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100480}
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100481
Xusong Wange9da9852020-01-13 11:44:45 -0800482void EvaluatePreparedModel(const sp<IDevice>& device, const sp<IPreparedModel>& preparedModel,
483 const TestModel& testModel, const TestConfig& testConfig,
484 bool* skipped = nullptr) {
Lev Proleev9226c1e2019-10-03 14:43:18 +0100485 if (skipped != nullptr) {
486 *skipped = false;
487 }
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100488 // If output0 does not have size larger than one byte, we can not test with insufficient buffer.
Lev Proleev0d4ba3f2019-10-02 17:32:06 +0100489 if (testConfig.outputType == OutputType::INSUFFICIENT &&
490 !isOutputSizeGreaterThanOne(testModel, 0)) {
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100491 return;
492 }
493
Xusong Wange9da9852020-01-13 11:44:45 -0800494 auto [request, buffers] =
495 createRequest(device, preparedModel, testModel,
496 /*preferDeviceMemory=*/testConfig.memoryType == MemoryType::DEVICE);
497 // Skip if testing memory domain but no device memory has been allocated.
498 if (testConfig.memoryType == MemoryType::DEVICE && buffers.empty()) {
499 return;
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100500 }
Xusong Wange9da9852020-01-13 11:44:45 -0800501 if (testConfig.outputType == OutputType::INSUFFICIENT) {
502 makeOutputInsufficientSize(/*outputIndex=*/0, &request);
503 }
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100504
505 ErrorStatus executionStatus;
506 hidl_vec<OutputShape> outputShapes;
507 Timing timing;
Lev Proleev0d4ba3f2019-10-02 17:32:06 +0100508 switch (testConfig.executor) {
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100509 case Executor::ASYNC: {
510 SCOPED_TRACE("asynchronous");
511
512 // launch execution
513 sp<ExecutionCallback> executionCallback = new ExecutionCallback();
Lev Proleev0d4ba3f2019-10-02 17:32:06 +0100514 Return<ErrorStatus> executionLaunchStatus = ExecutePreparedModel(
515 preparedModel, request, testConfig.measureTiming, executionCallback);
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100516 ASSERT_TRUE(executionLaunchStatus.isOk());
517 EXPECT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(executionLaunchStatus));
518
519 // retrieve execution status
520 executionCallback->wait();
521 executionStatus = executionCallback->getStatus();
522 outputShapes = executionCallback->getOutputShapes();
523 timing = executionCallback->getTiming();
524
525 break;
526 }
527 case Executor::SYNC: {
528 SCOPED_TRACE("synchronous");
529
530 // execute
Lev Proleev0d4ba3f2019-10-02 17:32:06 +0100531 Return<ErrorStatus> executionReturnStatus = ExecutePreparedModel(
532 preparedModel, request, testConfig.measureTiming, &outputShapes, &timing);
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100533 ASSERT_TRUE(executionReturnStatus.isOk());
534 executionStatus = static_cast<ErrorStatus>(executionReturnStatus);
535
536 break;
537 }
538 case Executor::BURST: {
Xusong Wangb345a462019-11-27 12:46:48 -0800539 // TODO(butlermichael): Check if we need to test burst in V1_3 if the interface remains
540 // V1_2.
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100541 SCOPED_TRACE("burst");
542
Xusong Wange9da9852020-01-13 11:44:45 -0800543 // check compliance
544 ASSERT_TRUE(nn::compliantWithV1_0(request));
545 V1_0::Request request10 = nn::convertToV1_0(request);
546
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100547 // create burst
548 const std::shared_ptr<::android::nn::ExecutionBurstController> controller =
549 CreateBurst(preparedModel);
550 ASSERT_NE(nullptr, controller.get());
551
552 // create memory keys
Xusong Wangb345a462019-11-27 12:46:48 -0800553 std::vector<intptr_t> keys(request10.pools.size());
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100554 for (size_t i = 0; i < keys.size(); ++i) {
Xusong Wangb345a462019-11-27 12:46:48 -0800555 keys[i] = reinterpret_cast<intptr_t>(&request10.pools[i]);
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100556 }
557
558 // execute burst
Michael Butler648ada52019-07-25 17:22:11 -0700559 int n;
560 std::tie(n, outputShapes, timing, std::ignore) =
Xusong Wangb345a462019-11-27 12:46:48 -0800561 controller->compute(request10, testConfig.measureTiming, keys);
Michael Butler648ada52019-07-25 17:22:11 -0700562 executionStatus = nn::convertResultCodeToErrorStatus(n);
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100563
564 break;
565 }
566 }
567
Lev Proleev0d4ba3f2019-10-02 17:32:06 +0100568 if (testConfig.outputType != OutputType::FULLY_SPECIFIED &&
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100569 executionStatus == ErrorStatus::GENERAL_FAILURE) {
Lev Proleev9226c1e2019-10-03 14:43:18 +0100570 if (skipped != nullptr) {
571 *skipped = true;
572 }
573 if (!testConfig.reportSkipping) {
574 return;
575 }
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100576 LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
577 "execute model that it does not support.";
578 std::cout << "[ ] Early termination of test because vendor service cannot "
579 "execute model that it does not support."
580 << std::endl;
581 GTEST_SKIP();
582 }
Lev Proleev0d4ba3f2019-10-02 17:32:06 +0100583 if (testConfig.measureTiming == MeasureTiming::NO) {
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100584 EXPECT_EQ(UINT64_MAX, timing.timeOnDevice);
585 EXPECT_EQ(UINT64_MAX, timing.timeInDriver);
586 } else {
587 if (timing.timeOnDevice != UINT64_MAX && timing.timeInDriver != UINT64_MAX) {
588 EXPECT_LE(timing.timeOnDevice, timing.timeInDriver);
589 }
590 }
591
Lev Proleev0d4ba3f2019-10-02 17:32:06 +0100592 switch (testConfig.outputType) {
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100593 case OutputType::FULLY_SPECIFIED:
594 // If the model output operands are fully specified, outputShapes must be either
595 // either empty, or have the same number of elements as the number of outputs.
596 ASSERT_EQ(ErrorStatus::NONE, executionStatus);
597 ASSERT_TRUE(outputShapes.size() == 0 ||
598 outputShapes.size() == testModel.outputIndexes.size());
599 break;
600 case OutputType::UNSPECIFIED:
601 // If the model output operands are not fully specified, outputShapes must have
602 // the same number of elements as the number of outputs.
603 ASSERT_EQ(ErrorStatus::NONE, executionStatus);
604 ASSERT_EQ(outputShapes.size(), testModel.outputIndexes.size());
605 break;
606 case OutputType::INSUFFICIENT:
607 ASSERT_EQ(ErrorStatus::OUTPUT_INSUFFICIENT_SIZE, executionStatus);
608 ASSERT_EQ(outputShapes.size(), testModel.outputIndexes.size());
609 ASSERT_FALSE(outputShapes[0].isSufficient);
610 return;
611 }
612
613 // Go through all outputs, check returned output shapes.
614 for (uint32_t i = 0; i < outputShapes.size(); i++) {
615 EXPECT_TRUE(outputShapes[i].isSufficient);
616 const auto& expect = testModel.operands[testModel.outputIndexes[i]].dimensions;
617 const std::vector<uint32_t> actual = outputShapes[i].dimensions;
618 EXPECT_EQ(expect, actual);
619 }
620
621 // Retrieve execution results.
Xusong Wange9da9852020-01-13 11:44:45 -0800622 const std::vector<TestBuffer> outputs = getOutputBuffers(testModel, request, buffers);
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100623
624 // We want "close-enough" results.
625 checkResults(testModel, outputs);
626}
627
Xusong Wange9da9852020-01-13 11:44:45 -0800628void EvaluatePreparedModel(const sp<IDevice>& device, const sp<IPreparedModel>& preparedModel,
629 const TestModel& testModel, TestKind testKind) {
Lev Proleev56cda832019-12-05 14:49:47 +0000630 std::vector<OutputType> outputTypesList;
631 std::vector<MeasureTiming> measureTimingList;
632 std::vector<Executor> executorList;
Xusong Wange9da9852020-01-13 11:44:45 -0800633 MemoryType memoryType = MemoryType::SHARED;
Lev Proleev0d4ba3f2019-10-02 17:32:06 +0100634
Lev Proleev9226c1e2019-10-03 14:43:18 +0100635 switch (testKind) {
636 case TestKind::GENERAL: {
637 outputTypesList = {OutputType::FULLY_SPECIFIED};
638 measureTimingList = {MeasureTiming::NO, MeasureTiming::YES};
639 executorList = {Executor::ASYNC, Executor::SYNC, Executor::BURST};
640 } break;
641 case TestKind::DYNAMIC_SHAPE: {
642 outputTypesList = {OutputType::UNSPECIFIED, OutputType::INSUFFICIENT};
643 measureTimingList = {MeasureTiming::NO, MeasureTiming::YES};
644 executorList = {Executor::ASYNC, Executor::SYNC, Executor::BURST};
645 } break;
Xusong Wange9da9852020-01-13 11:44:45 -0800646 case TestKind::MEMORY_DOMAIN: {
647 outputTypesList = {OutputType::FULLY_SPECIFIED};
648 measureTimingList = {MeasureTiming::NO};
649 executorList = {Executor::ASYNC, Executor::SYNC};
650 memoryType = MemoryType::DEVICE;
651 } break;
Lev Proleev9226c1e2019-10-03 14:43:18 +0100652 case TestKind::QUANTIZATION_COUPLING: {
653 LOG(FATAL) << "Wrong TestKind for EvaluatePreparedModel";
654 return;
655 } break;
Lev Proleev0d4ba3f2019-10-02 17:32:06 +0100656 }
657
658 for (const OutputType outputType : outputTypesList) {
659 for (const MeasureTiming measureTiming : measureTimingList) {
660 for (const Executor executor : executorList) {
Xusong Wange9da9852020-01-13 11:44:45 -0800661 const TestConfig testConfig(executor, measureTiming, outputType, memoryType);
662 EvaluatePreparedModel(device, preparedModel, testModel, testConfig);
Lev Proleev0d4ba3f2019-10-02 17:32:06 +0100663 }
664 }
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100665 }
666}
667
Xusong Wange9da9852020-01-13 11:44:45 -0800668void EvaluatePreparedCoupledModels(const sp<IDevice>& device,
669 const sp<IPreparedModel>& preparedModel,
Lev Proleev9226c1e2019-10-03 14:43:18 +0100670 const TestModel& testModel,
671 const sp<IPreparedModel>& preparedCoupledModel,
672 const TestModel& coupledModel) {
Lev Proleev56cda832019-12-05 14:49:47 +0000673 const std::vector<OutputType> outputTypesList = {OutputType::FULLY_SPECIFIED};
674 const std::vector<MeasureTiming> measureTimingList = {MeasureTiming::NO, MeasureTiming::YES};
675 const std::vector<Executor> executorList = {Executor::ASYNC, Executor::SYNC, Executor::BURST};
Lev Proleev9226c1e2019-10-03 14:43:18 +0100676
677 for (const OutputType outputType : outputTypesList) {
678 for (const MeasureTiming measureTiming : measureTimingList) {
679 for (const Executor executor : executorList) {
Xusong Wange9da9852020-01-13 11:44:45 -0800680 const TestConfig testConfig(executor, measureTiming, outputType, MemoryType::SHARED,
Lev Proleev9226c1e2019-10-03 14:43:18 +0100681 /*reportSkipping=*/false);
682 bool baseSkipped = false;
Xusong Wange9da9852020-01-13 11:44:45 -0800683 EvaluatePreparedModel(device, preparedModel, testModel, testConfig, &baseSkipped);
Lev Proleev9226c1e2019-10-03 14:43:18 +0100684 bool coupledSkipped = false;
Xusong Wange9da9852020-01-13 11:44:45 -0800685 EvaluatePreparedModel(device, preparedCoupledModel, coupledModel, testConfig,
Lev Proleev9226c1e2019-10-03 14:43:18 +0100686 &coupledSkipped);
687 ASSERT_EQ(baseSkipped, coupledSkipped);
688 if (baseSkipped) {
689 LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
690 "execute model that it does not support.";
691 std::cout << "[ ] Early termination of test because vendor service "
692 "cannot "
693 "execute model that it does not support."
694 << std::endl;
695 GTEST_SKIP();
696 }
697 }
698 }
699 }
700}
701
702void Execute(const sp<IDevice>& device, const TestModel& testModel, TestKind testKind) {
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100703 Model model = createModel(testModel);
Lev Proleev9226c1e2019-10-03 14:43:18 +0100704 if (testKind == TestKind::DYNAMIC_SHAPE) {
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100705 makeOutputDimensionsUnspecified(&model);
706 }
707
708 sp<IPreparedModel> preparedModel;
Lev Proleev9226c1e2019-10-03 14:43:18 +0100709 switch (testKind) {
Xusong Wange9da9852020-01-13 11:44:45 -0800710 case TestKind::GENERAL:
711 case TestKind::DYNAMIC_SHAPE:
712 case TestKind::MEMORY_DOMAIN: {
Lev Proleev9226c1e2019-10-03 14:43:18 +0100713 createPreparedModel(device, model, &preparedModel);
714 if (preparedModel == nullptr) return;
Xusong Wange9da9852020-01-13 11:44:45 -0800715 EvaluatePreparedModel(device, preparedModel, testModel, testKind);
Lev Proleev9226c1e2019-10-03 14:43:18 +0100716 } break;
717 case TestKind::QUANTIZATION_COUPLING: {
Lev Proleev673fdcf2020-01-02 18:22:30 +0000718 ASSERT_TRUE(testModel.hasQuant8CoupledOperands());
Lev Proleev9226c1e2019-10-03 14:43:18 +0100719 createPreparedModel(device, model, &preparedModel, /*reportSkipping*/ false);
720 TestModel signedQuantizedModel = convertQuant8AsymmOperandsToSigned(testModel);
721 sp<IPreparedModel> preparedCoupledModel;
722 createPreparedModel(device, createModel(signedQuantizedModel), &preparedCoupledModel,
723 /*reportSkipping*/ false);
724 // If we couldn't prepare a model with unsigned quantization, we must
725 // fail to prepare a model with signed quantization as well.
726 if (preparedModel == nullptr) {
727 ASSERT_EQ(preparedCoupledModel, nullptr);
728 // If we failed to prepare both of the models, we can safely skip
729 // the test.
730 LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
731 "prepare model that it does not support.";
732 std::cout
733 << "[ ] Early termination of test because vendor service cannot "
734 "prepare model that it does not support."
735 << std::endl;
736 GTEST_SKIP();
737 }
738 ASSERT_NE(preparedCoupledModel, nullptr);
Xusong Wange9da9852020-01-13 11:44:45 -0800739 EvaluatePreparedCoupledModels(device, preparedModel, testModel, preparedCoupledModel,
Lev Proleev9226c1e2019-10-03 14:43:18 +0100740 signedQuantizedModel);
741 } break;
742 }
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100743}
744
745void GeneratedTestBase::SetUp() {
746 testing::TestWithParam<GeneratedTestParam>::SetUp();
747 ASSERT_NE(kDevice, nullptr);
748}
749
750std::vector<NamedModel> getNamedModels(const FilterFn& filter) {
751 return TestModelManager::get().getTestModels(filter);
752}
753
754std::string printGeneratedTest(const testing::TestParamInfo<GeneratedTestParam>& info) {
755 const auto& [namedDevice, namedModel] = info.param;
756 return gtestCompliantName(getName(namedDevice) + "_" + getName(namedModel));
757}
758
759// Tag for the generated tests
760class GeneratedTest : public GeneratedTestBase {};
761
762// Tag for the dynamic output shape tests
763class DynamicOutputShapeTest : public GeneratedTest {};
764
Xusong Wange9da9852020-01-13 11:44:45 -0800765// Tag for the memory domain tests
766class MemoryDomainTest : public GeneratedTest {};
767
Lev Proleev9226c1e2019-10-03 14:43:18 +0100768// Tag for the dynamic output shape tests
Lev Proleev3c68b342020-01-09 16:37:28 +0000769class QuantizationCouplingTest : public GeneratedTest {};
Lev Proleev9226c1e2019-10-03 14:43:18 +0100770
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100771TEST_P(GeneratedTest, Test) {
Lev Proleev9226c1e2019-10-03 14:43:18 +0100772 Execute(kDevice, kTestModel, /*testKind=*/TestKind::GENERAL);
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100773}
774
775TEST_P(DynamicOutputShapeTest, Test) {
Lev Proleev9226c1e2019-10-03 14:43:18 +0100776 Execute(kDevice, kTestModel, /*testKind=*/TestKind::DYNAMIC_SHAPE);
777}
778
Xusong Wange9da9852020-01-13 11:44:45 -0800779TEST_P(MemoryDomainTest, Test) {
780 Execute(kDevice, kTestModel, /*testKind=*/TestKind::MEMORY_DOMAIN);
781}
782
Lev Proleev3c68b342020-01-09 16:37:28 +0000783TEST_P(QuantizationCouplingTest, Test) {
Lev Proleev9226c1e2019-10-03 14:43:18 +0100784 Execute(kDevice, kTestModel, /*testKind=*/TestKind::QUANTIZATION_COUPLING);
Lev Proleev13fdfcd2019-08-30 11:35:34 +0100785}
786
787INSTANTIATE_GENERATED_TEST(GeneratedTest,
788 [](const TestModel& testModel) { return !testModel.expectFailure; });
789
790INSTANTIATE_GENERATED_TEST(DynamicOutputShapeTest,
791 [](const TestModel& testModel) { return !testModel.expectFailure; });
792
Xusong Wange9da9852020-01-13 11:44:45 -0800793INSTANTIATE_GENERATED_TEST(MemoryDomainTest,
794 [](const TestModel& testModel) { return !testModel.expectFailure; });
795
Lev Proleev3c68b342020-01-09 16:37:28 +0000796INSTANTIATE_GENERATED_TEST(QuantizationCouplingTest, [](const TestModel& testModel) {
Lev Proleev673fdcf2020-01-02 18:22:30 +0000797 return testModel.hasQuant8CoupledOperands() && testModel.operations.size() == 1;
Lev Proleev9226c1e2019-10-03 14:43:18 +0100798});
799
Lev Proleev26d1bc82019-08-30 11:57:18 +0100800} // namespace android::hardware::neuralnetworks::V1_3::vts::functional