blob: 1dcebbe39c120aa69bf555c51893ea4474fef3cf [file] [log] [blame]
Slava Shklyaev73ee79d2019-05-14 14:15:14 +01001/*
2 * Copyright (C) 2019 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "GeneratedTestHarness.h"
18
19#include <android-base/logging.h>
20#include <android/hardware/neuralnetworks/1.0/IDevice.h>
21#include <android/hardware/neuralnetworks/1.0/IExecutionCallback.h>
22#include <android/hardware/neuralnetworks/1.0/IPreparedModel.h>
23#include <android/hardware/neuralnetworks/1.0/IPreparedModelCallback.h>
24#include <android/hardware/neuralnetworks/1.0/types.h>
25#include <android/hardware/neuralnetworks/1.1/IDevice.h>
26#include <android/hardware/neuralnetworks/1.2/IDevice.h>
27#include <android/hardware/neuralnetworks/1.2/IExecutionCallback.h>
28#include <android/hardware/neuralnetworks/1.2/IPreparedModel.h>
29#include <android/hardware/neuralnetworks/1.2/IPreparedModelCallback.h>
30#include <android/hidl/allocator/1.0/IAllocator.h>
31#include <android/hidl/memory/1.0/IMemory.h>
32#include <hidlmemory/mapping.h>
33
Xusong Wangead950d2019-08-09 16:45:24 -070034#include <gtest/gtest.h>
35#include <algorithm>
Slava Shklyaev73ee79d2019-05-14 14:15:14 +010036#include <iostream>
Xusong Wangead950d2019-08-09 16:45:24 -070037#include <numeric>
Slava Shklyaev73ee79d2019-05-14 14:15:14 +010038
39#include "1.0/Utils.h"
40#include "1.2/Callbacks.h"
41#include "ExecutionBurstController.h"
42#include "MemoryUtils.h"
43#include "TestHarness.h"
44#include "Utils.h"
45
46namespace android {
47namespace hardware {
48namespace neuralnetworks {
Slava Shklyaeve8b24462019-07-17 15:50:57 +010049namespace V1_2 {
Slava Shklyaev73ee79d2019-05-14 14:15:14 +010050namespace generated_tests {
51
Xusong Wangead950d2019-08-09 16:45:24 -070052using namespace test_helper;
53using ::android::hardware::neuralnetworks::V1_0::DataLocation;
Slava Shklyaev73ee79d2019-05-14 14:15:14 +010054using ::android::hardware::neuralnetworks::V1_0::ErrorStatus;
Xusong Wangead950d2019-08-09 16:45:24 -070055using ::android::hardware::neuralnetworks::V1_0::OperandLifeTime;
Slava Shklyaev73ee79d2019-05-14 14:15:14 +010056using ::android::hardware::neuralnetworks::V1_0::Request;
57using ::android::hardware::neuralnetworks::V1_0::RequestArgument;
58using ::android::hardware::neuralnetworks::V1_1::ExecutionPreference;
Michael Butler3835f612019-07-11 15:43:22 -070059using ::android::hardware::neuralnetworks::V1_2::Constant;
Slava Shklyaev73ee79d2019-05-14 14:15:14 +010060using ::android::hardware::neuralnetworks::V1_2::IDevice;
61using ::android::hardware::neuralnetworks::V1_2::IPreparedModel;
Michael Butler3835f612019-07-11 15:43:22 -070062using ::android::hardware::neuralnetworks::V1_2::MeasureTiming;
Slava Shklyaev73ee79d2019-05-14 14:15:14 +010063using ::android::hardware::neuralnetworks::V1_2::Model;
Michael Butler3835f612019-07-11 15:43:22 -070064using ::android::hardware::neuralnetworks::V1_2::OutputShape;
65using ::android::hardware::neuralnetworks::V1_2::Timing;
Slava Shklyaev73ee79d2019-05-14 14:15:14 +010066using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
67using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
68using ::android::hidl::memory::V1_0::IMemory;
Slava Shklyaev73ee79d2019-05-14 14:15:14 +010069using HidlToken = hidl_array<uint8_t, static_cast<uint32_t>(Constant::BYTE_SIZE_OF_CACHE_TOKEN)>;
70
Xusong Wangead950d2019-08-09 16:45:24 -070071enum class OutputType { FULLY_SPECIFIED, UNSPECIFIED, INSUFFICIENT };
72
73Model createModel(const TestModel& testModel) {
74 // Model operands.
75 hidl_vec<Operand> operands(testModel.operands.size());
76 size_t constCopySize = 0, constRefSize = 0;
77 for (uint32_t i = 0; i < testModel.operands.size(); i++) {
78 const auto& op = testModel.operands[i];
79
80 DataLocation loc = {};
81 if (op.lifetime == TestOperandLifeTime::CONSTANT_COPY) {
82 loc = {.poolIndex = 0,
83 .offset = static_cast<uint32_t>(constCopySize),
84 .length = static_cast<uint32_t>(op.data.size())};
85 constCopySize += op.data.alignedSize();
86 } else if (op.lifetime == TestOperandLifeTime::CONSTANT_REFERENCE) {
87 loc = {.poolIndex = 0,
88 .offset = static_cast<uint32_t>(constRefSize),
89 .length = static_cast<uint32_t>(op.data.size())};
90 constRefSize += op.data.alignedSize();
91 }
92
93 Operand::ExtraParams extraParams;
94 if (op.type == TestOperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) {
95 extraParams.channelQuant(SymmPerChannelQuantParams{
96 .scales = op.channelQuant.scales, .channelDim = op.channelQuant.channelDim});
97 }
98
99 operands[i] = {.type = static_cast<OperandType>(op.type),
100 .dimensions = op.dimensions,
101 .numberOfConsumers = op.numberOfConsumers,
102 .scale = op.scale,
103 .zeroPoint = op.zeroPoint,
104 .lifetime = static_cast<OperandLifeTime>(op.lifetime),
105 .location = loc,
106 .extraParams = std::move(extraParams)};
Slava Shklyaev73ee79d2019-05-14 14:15:14 +0100107 }
Xusong Wangead950d2019-08-09 16:45:24 -0700108
109 // Model operations.
110 hidl_vec<Operation> operations(testModel.operations.size());
111 std::transform(testModel.operations.begin(), testModel.operations.end(), operations.begin(),
112 [](const TestOperation& op) -> Operation {
113 return {.type = static_cast<OperationType>(op.type),
114 .inputs = op.inputs,
115 .outputs = op.outputs};
116 });
117
118 // Constant copies.
119 hidl_vec<uint8_t> operandValues(constCopySize);
120 for (uint32_t i = 0; i < testModel.operands.size(); i++) {
121 const auto& op = testModel.operands[i];
122 if (op.lifetime == TestOperandLifeTime::CONSTANT_COPY) {
123 const uint8_t* begin = op.data.get<uint8_t>();
124 const uint8_t* end = begin + op.data.size();
125 std::copy(begin, end, operandValues.data() + operands[i].location.offset);
126 }
127 }
128
129 // Shared memory.
130 hidl_vec<hidl_memory> pools = {};
131 if (constRefSize > 0) {
132 hidl_vec_push_back(&pools, nn::allocateSharedMemory(constRefSize));
133 CHECK_NE(pools[0].size(), 0u);
134
135 // load data
136 sp<IMemory> mappedMemory = mapMemory(pools[0]);
137 CHECK(mappedMemory.get() != nullptr);
138 uint8_t* mappedPtr =
139 reinterpret_cast<uint8_t*>(static_cast<void*>(mappedMemory->getPointer()));
140 CHECK(mappedPtr != nullptr);
141
142 for (uint32_t i = 0; i < testModel.operands.size(); i++) {
143 const auto& op = testModel.operands[i];
144 if (op.lifetime == TestOperandLifeTime::CONSTANT_REFERENCE) {
145 const uint8_t* begin = op.data.get<uint8_t>();
146 const uint8_t* end = begin + op.data.size();
147 std::copy(begin, end, mappedPtr + operands[i].location.offset);
148 }
149 }
150 }
151
152 return {.operands = std::move(operands),
153 .operations = std::move(operations),
154 .inputIndexes = testModel.inputIndexes,
155 .outputIndexes = testModel.outputIndexes,
156 .operandValues = std::move(operandValues),
157 .pools = std::move(pools),
158 .relaxComputationFloat32toFloat16 = testModel.isRelaxed};
Slava Shklyaev73ee79d2019-05-14 14:15:14 +0100159}
160
Xusong Wangead950d2019-08-09 16:45:24 -0700161static bool isOutputSizeGreaterThanOne(const TestModel& testModel, uint32_t index) {
162 const auto byteSize = testModel.operands[testModel.outputIndexes[index]].data.size();
163 return byteSize > 1u;
164}
165
166static void makeOutputInsufficientSize(uint32_t outputIndex, Request* request) {
167 auto& length = request->outputs[outputIndex].location.length;
168 ASSERT_GT(length, 1u);
169 length -= 1u;
170}
171
172static void makeOutputDimensionsUnspecified(Model* model) {
173 for (auto i : model->outputIndexes) {
174 auto& dims = model->operands[i].dimensions;
175 std::fill(dims.begin(), dims.end(), 0);
176 }
177}
178
179static Return<ErrorStatus> ExecutePreparedModel(const sp<IPreparedModel>& preparedModel,
Slava Shklyaev73ee79d2019-05-14 14:15:14 +0100180 const Request& request, MeasureTiming measure,
181 sp<ExecutionCallback>& callback) {
182 return preparedModel->execute_1_2(request, measure, callback);
183}
Xusong Wangead950d2019-08-09 16:45:24 -0700184static Return<ErrorStatus> ExecutePreparedModel(const sp<IPreparedModel>& preparedModel,
Slava Shklyaev73ee79d2019-05-14 14:15:14 +0100185 const Request& request, MeasureTiming measure,
186 hidl_vec<OutputShape>* outputShapes,
187 Timing* timing) {
188 ErrorStatus result;
189 Return<void> ret = preparedModel->executeSynchronously(
190 request, measure,
191 [&result, outputShapes, timing](ErrorStatus error, const hidl_vec<OutputShape>& shapes,
192 const Timing& time) {
193 result = error;
194 *outputShapes = shapes;
195 *timing = time;
196 });
197 if (!ret.isOk()) {
198 return ErrorStatus::GENERAL_FAILURE;
199 }
200 return result;
201}
202static std::shared_ptr<::android::nn::ExecutionBurstController> CreateBurst(
203 const sp<IPreparedModel>& preparedModel) {
204 return ::android::nn::ExecutionBurstController::create(preparedModel, /*blocking=*/true);
205}
206enum class Executor { ASYNC, SYNC, BURST };
Xusong Wangead950d2019-08-09 16:45:24 -0700207
208void EvaluatePreparedModel(const sp<IPreparedModel>& preparedModel, const TestModel& testModel,
Slava Shklyaev73ee79d2019-05-14 14:15:14 +0100209 Executor executor, MeasureTiming measure, OutputType outputType) {
Xusong Wangead950d2019-08-09 16:45:24 -0700210 // If output0 does not have size larger than one byte, we can not test with insufficient buffer.
211 if (outputType == OutputType::INSUFFICIENT && !isOutputSizeGreaterThanOne(testModel, 0)) {
212 return;
213 }
Slava Shklyaev73ee79d2019-05-14 14:15:14 +0100214
Xusong Wangead950d2019-08-09 16:45:24 -0700215 Request request = createRequest(testModel);
216 if (outputType == OutputType::INSUFFICIENT) {
217 makeOutputInsufficientSize(/*outputIndex=*/0, &request);
218 }
Slava Shklyaev73ee79d2019-05-14 14:15:14 +0100219
Xusong Wangead950d2019-08-09 16:45:24 -0700220 ErrorStatus executionStatus;
221 hidl_vec<OutputShape> outputShapes;
222 Timing timing;
223 switch (executor) {
224 case Executor::ASYNC: {
225 SCOPED_TRACE("asynchronous");
226
227 // launch execution
228 sp<ExecutionCallback> executionCallback = new ExecutionCallback();
229 Return<ErrorStatus> executionLaunchStatus =
230 ExecutePreparedModel(preparedModel, request, measure, executionCallback);
231 ASSERT_TRUE(executionLaunchStatus.isOk());
232 EXPECT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(executionLaunchStatus));
233
234 // retrieve execution status
235 executionCallback->wait();
236 executionStatus = executionCallback->getStatus();
237 outputShapes = executionCallback->getOutputShapes();
238 timing = executionCallback->getTiming();
239
240 break;
Slava Shklyaev73ee79d2019-05-14 14:15:14 +0100241 }
Xusong Wangead950d2019-08-09 16:45:24 -0700242 case Executor::SYNC: {
243 SCOPED_TRACE("synchronous");
Slava Shklyaev73ee79d2019-05-14 14:15:14 +0100244
Xusong Wangead950d2019-08-09 16:45:24 -0700245 // execute
246 Return<ErrorStatus> executionReturnStatus =
247 ExecutePreparedModel(preparedModel, request, measure, &outputShapes, &timing);
248 ASSERT_TRUE(executionReturnStatus.isOk());
249 executionStatus = static_cast<ErrorStatus>(executionReturnStatus);
250
251 break;
252 }
253 case Executor::BURST: {
254 SCOPED_TRACE("burst");
255
256 // create burst
257 const std::shared_ptr<::android::nn::ExecutionBurstController> controller =
258 CreateBurst(preparedModel);
259 ASSERT_NE(nullptr, controller.get());
260
261 // create memory keys
262 std::vector<intptr_t> keys(request.pools.size());
263 for (size_t i = 0; i < keys.size(); ++i) {
264 keys[i] = reinterpret_cast<intptr_t>(&request.pools[i]);
Slava Shklyaev73ee79d2019-05-14 14:15:14 +0100265 }
Slava Shklyaev73ee79d2019-05-14 14:15:14 +0100266
Xusong Wangead950d2019-08-09 16:45:24 -0700267 // execute burst
268 std::tie(executionStatus, outputShapes, timing) =
269 controller->compute(request, measure, keys);
Slava Shklyaev73ee79d2019-05-14 14:15:14 +0100270
Xusong Wangead950d2019-08-09 16:45:24 -0700271 break;
Slava Shklyaev73ee79d2019-05-14 14:15:14 +0100272 }
273 }
Xusong Wangead950d2019-08-09 16:45:24 -0700274
275 if (outputType != OutputType::FULLY_SPECIFIED &&
276 executionStatus == ErrorStatus::GENERAL_FAILURE) {
277 LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
278 "execute model that it does not support.";
279 std::cout << "[ ] Early termination of test because vendor service cannot "
280 "execute model that it does not support."
281 << std::endl;
282 GTEST_SKIP();
283 }
284 if (measure == MeasureTiming::NO) {
285 EXPECT_EQ(UINT64_MAX, timing.timeOnDevice);
286 EXPECT_EQ(UINT64_MAX, timing.timeInDriver);
287 } else {
288 if (timing.timeOnDevice != UINT64_MAX && timing.timeInDriver != UINT64_MAX) {
289 EXPECT_LE(timing.timeOnDevice, timing.timeInDriver);
290 }
291 }
292
293 switch (outputType) {
294 case OutputType::FULLY_SPECIFIED:
295 // If the model output operands are fully specified, outputShapes must be either
296 // either empty, or have the same number of elements as the number of outputs.
297 ASSERT_EQ(ErrorStatus::NONE, executionStatus);
298 ASSERT_TRUE(outputShapes.size() == 0 ||
299 outputShapes.size() == testModel.outputIndexes.size());
300 break;
301 case OutputType::UNSPECIFIED:
302 // If the model output operands are not fully specified, outputShapes must have
303 // the same number of elements as the number of outputs.
304 ASSERT_EQ(ErrorStatus::NONE, executionStatus);
305 ASSERT_EQ(outputShapes.size(), testModel.outputIndexes.size());
306 break;
307 case OutputType::INSUFFICIENT:
308 ASSERT_EQ(ErrorStatus::OUTPUT_INSUFFICIENT_SIZE, executionStatus);
309 ASSERT_EQ(outputShapes.size(), testModel.outputIndexes.size());
310 ASSERT_FALSE(outputShapes[0].isSufficient);
311 return;
312 }
313
314 // Go through all outputs, check returned output shapes.
315 for (uint32_t i = 0; i < outputShapes.size(); i++) {
316 EXPECT_TRUE(outputShapes[i].isSufficient);
317 const auto& expect = testModel.operands[testModel.outputIndexes[i]].dimensions;
318 const std::vector<uint32_t> actual = outputShapes[i].dimensions;
319 EXPECT_EQ(expect, actual);
320 }
321
322 // Retrieve execution results.
323 const std::vector<TestBuffer> outputs = getOutputBuffers(request);
324
325 // We want "close-enough" results.
326 checkResults(testModel, outputs);
Slava Shklyaev73ee79d2019-05-14 14:15:14 +0100327}
328
Xusong Wangead950d2019-08-09 16:45:24 -0700329void EvaluatePreparedModel(const sp<IPreparedModel>& preparedModel, const TestModel& testModel,
330 bool testDynamicOutputShape) {
Slava Shklyaev73ee79d2019-05-14 14:15:14 +0100331 if (testDynamicOutputShape) {
Xusong Wangead950d2019-08-09 16:45:24 -0700332 EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::NO,
333 OutputType::UNSPECIFIED);
334 EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::NO,
335 OutputType::UNSPECIFIED);
336 EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::NO,
337 OutputType::UNSPECIFIED);
338 EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::YES,
339 OutputType::UNSPECIFIED);
340 EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::YES,
341 OutputType::UNSPECIFIED);
342 EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::YES,
343 OutputType::UNSPECIFIED);
344 EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::NO,
345 OutputType::INSUFFICIENT);
346 EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::NO,
347 OutputType::INSUFFICIENT);
348 EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::NO,
349 OutputType::INSUFFICIENT);
350 EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::YES,
351 OutputType::INSUFFICIENT);
352 EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::YES,
353 OutputType::INSUFFICIENT);
354 EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::YES,
355 OutputType::INSUFFICIENT);
Slava Shklyaev73ee79d2019-05-14 14:15:14 +0100356 } else {
Xusong Wangead950d2019-08-09 16:45:24 -0700357 EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::NO,
358 OutputType::FULLY_SPECIFIED);
359 EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::NO,
360 OutputType::FULLY_SPECIFIED);
361 EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::NO,
362 OutputType::FULLY_SPECIFIED);
363 EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::YES,
364 OutputType::FULLY_SPECIFIED);
365 EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::YES,
366 OutputType::FULLY_SPECIFIED);
367 EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::YES,
368 OutputType::FULLY_SPECIFIED);
Slava Shklyaev73ee79d2019-05-14 14:15:14 +0100369 }
370}
371
372void PrepareModel(const sp<IDevice>& device, const Model& model,
373 sp<IPreparedModel>* preparedModel) {
374 // see if service can handle model
375 bool fullySupportsModel = false;
376 Return<void> supportedCall = device->getSupportedOperations_1_2(
377 model, [&fullySupportsModel](ErrorStatus status, const hidl_vec<bool>& supported) {
378 ASSERT_EQ(ErrorStatus::NONE, status);
379 ASSERT_NE(0ul, supported.size());
380 fullySupportsModel = std::all_of(supported.begin(), supported.end(),
381 [](bool valid) { return valid; });
382 });
383 ASSERT_TRUE(supportedCall.isOk());
384
385 // launch prepare model
386 sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
Slava Shklyaev73ee79d2019-05-14 14:15:14 +0100387 Return<ErrorStatus> prepareLaunchStatus = device->prepareModel_1_2(
388 model, ExecutionPreference::FAST_SINGLE_ANSWER, hidl_vec<hidl_handle>(),
389 hidl_vec<hidl_handle>(), HidlToken(), preparedModelCallback);
390 ASSERT_TRUE(prepareLaunchStatus.isOk());
391 ASSERT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus));
392
393 // retrieve prepared model
394 preparedModelCallback->wait();
395 ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
396 sp<V1_0::IPreparedModel> preparedModelV1_0 = preparedModelCallback->getPreparedModel();
397 *preparedModel = IPreparedModel::castFrom(preparedModelV1_0).withDefault(nullptr);
398
399 // early termination if vendor service cannot fully prepare model
400 if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
401 ASSERT_EQ(nullptr, preparedModel->get());
402 LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
403 "prepare model that it does not support.";
404 std::cout << "[ ] Early termination of test because vendor service cannot "
405 "prepare model that it does not support."
406 << std::endl;
407 return;
408 }
409 EXPECT_EQ(ErrorStatus::NONE, prepareReturnStatus);
410 ASSERT_NE(nullptr, preparedModel->get());
411}
412
Xusong Wangead950d2019-08-09 16:45:24 -0700413void Execute(const sp<IDevice>& device, const TestModel& testModel, bool testDynamicOutputShape) {
414 Model model = createModel(testModel);
415 if (testDynamicOutputShape) {
416 makeOutputDimensionsUnspecified(&model);
417 }
418
Slava Shklyaev73ee79d2019-05-14 14:15:14 +0100419 sp<IPreparedModel> preparedModel = nullptr;
420 PrepareModel(device, model, &preparedModel);
421 if (preparedModel == nullptr) {
422 GTEST_SKIP();
423 }
Xusong Wangead950d2019-08-09 16:45:24 -0700424 EvaluatePreparedModel(preparedModel, testModel, testDynamicOutputShape);
Slava Shklyaev73ee79d2019-05-14 14:15:14 +0100425}
426
427} // namespace generated_tests
Slava Shklyaeve8b24462019-07-17 15:50:57 +0100428} // namespace V1_2
Slava Shklyaev73ee79d2019-05-14 14:15:14 +0100429} // namespace neuralnetworks
430} // namespace hardware
431} // namespace android