Lev Proleev | 13fdfcd | 2019-08-30 11:35:34 +0100 | [diff] [blame^] | 1 | /* |
| 2 | * Copyright (C) 2019 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | #include "GeneratedTestHarness.h" |
| 18 | |
| 19 | #include <android-base/logging.h> |
| 20 | #include <android/hardware/neuralnetworks/1.0/IDevice.h> |
| 21 | #include <android/hardware/neuralnetworks/1.0/IExecutionCallback.h> |
| 22 | #include <android/hardware/neuralnetworks/1.0/IPreparedModel.h> |
| 23 | #include <android/hardware/neuralnetworks/1.0/IPreparedModelCallback.h> |
| 24 | #include <android/hardware/neuralnetworks/1.0/types.h> |
| 25 | #include <android/hardware/neuralnetworks/1.1/IDevice.h> |
| 26 | #include <android/hardware/neuralnetworks/1.2/IDevice.h> |
| 27 | #include <android/hardware/neuralnetworks/1.2/IExecutionCallback.h> |
| 28 | #include <android/hardware/neuralnetworks/1.2/IPreparedModel.h> |
| 29 | #include <android/hardware/neuralnetworks/1.2/IPreparedModelCallback.h> |
| 30 | #include <android/hidl/allocator/1.0/IAllocator.h> |
| 31 | #include <android/hidl/memory/1.0/IMemory.h> |
| 32 | #include <hidlmemory/mapping.h> |
| 33 | |
| 34 | #include <gtest/gtest.h> |
| 35 | #include <algorithm> |
| 36 | #include <iostream> |
| 37 | #include <numeric> |
| 38 | |
| 39 | #include "1.0/Utils.h" |
| 40 | #include "1.2/Callbacks.h" |
| 41 | #include "ExecutionBurstController.h" |
| 42 | #include "MemoryUtils.h" |
| 43 | #include "TestHarness.h" |
| 44 | #include "Utils.h" |
| 45 | #include "VtsHalNeuralnetworks.h" |
| 46 | |
| 47 | namespace android::hardware::neuralnetworks::V1_2::vts::functional { |
| 48 | |
| 49 | using namespace test_helper; |
| 50 | using hidl::memory::V1_0::IMemory; |
| 51 | using implementation::ExecutionCallback; |
| 52 | using implementation::PreparedModelCallback; |
| 53 | using V1_0::DataLocation; |
| 54 | using V1_0::ErrorStatus; |
| 55 | using V1_0::OperandLifeTime; |
| 56 | using V1_0::Request; |
| 57 | using V1_1::ExecutionPreference; |
| 58 | using HidlToken = hidl_array<uint8_t, static_cast<uint32_t>(Constant::BYTE_SIZE_OF_CACHE_TOKEN)>; |
| 59 | |
| 60 | enum class OutputType { FULLY_SPECIFIED, UNSPECIFIED, INSUFFICIENT }; |
| 61 | |
| 62 | Model createModel(const TestModel& testModel) { |
| 63 | // Model operands. |
| 64 | hidl_vec<Operand> operands(testModel.operands.size()); |
| 65 | size_t constCopySize = 0, constRefSize = 0; |
| 66 | for (uint32_t i = 0; i < testModel.operands.size(); i++) { |
| 67 | const auto& op = testModel.operands[i]; |
| 68 | |
| 69 | DataLocation loc = {}; |
| 70 | if (op.lifetime == TestOperandLifeTime::CONSTANT_COPY) { |
| 71 | loc = {.poolIndex = 0, |
| 72 | .offset = static_cast<uint32_t>(constCopySize), |
| 73 | .length = static_cast<uint32_t>(op.data.size())}; |
| 74 | constCopySize += op.data.alignedSize(); |
| 75 | } else if (op.lifetime == TestOperandLifeTime::CONSTANT_REFERENCE) { |
| 76 | loc = {.poolIndex = 0, |
| 77 | .offset = static_cast<uint32_t>(constRefSize), |
| 78 | .length = static_cast<uint32_t>(op.data.size())}; |
| 79 | constRefSize += op.data.alignedSize(); |
| 80 | } |
| 81 | |
| 82 | Operand::ExtraParams extraParams; |
| 83 | if (op.type == TestOperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) { |
| 84 | extraParams.channelQuant(SymmPerChannelQuantParams{ |
| 85 | .scales = op.channelQuant.scales, .channelDim = op.channelQuant.channelDim}); |
| 86 | } |
| 87 | |
| 88 | operands[i] = {.type = static_cast<OperandType>(op.type), |
| 89 | .dimensions = op.dimensions, |
| 90 | .numberOfConsumers = op.numberOfConsumers, |
| 91 | .scale = op.scale, |
| 92 | .zeroPoint = op.zeroPoint, |
| 93 | .lifetime = static_cast<OperandLifeTime>(op.lifetime), |
| 94 | .location = loc, |
| 95 | .extraParams = std::move(extraParams)}; |
| 96 | } |
| 97 | |
| 98 | // Model operations. |
| 99 | hidl_vec<Operation> operations(testModel.operations.size()); |
| 100 | std::transform(testModel.operations.begin(), testModel.operations.end(), operations.begin(), |
| 101 | [](const TestOperation& op) -> Operation { |
| 102 | return {.type = static_cast<OperationType>(op.type), |
| 103 | .inputs = op.inputs, |
| 104 | .outputs = op.outputs}; |
| 105 | }); |
| 106 | |
| 107 | // Constant copies. |
| 108 | hidl_vec<uint8_t> operandValues(constCopySize); |
| 109 | for (uint32_t i = 0; i < testModel.operands.size(); i++) { |
| 110 | const auto& op = testModel.operands[i]; |
| 111 | if (op.lifetime == TestOperandLifeTime::CONSTANT_COPY) { |
| 112 | const uint8_t* begin = op.data.get<uint8_t>(); |
| 113 | const uint8_t* end = begin + op.data.size(); |
| 114 | std::copy(begin, end, operandValues.data() + operands[i].location.offset); |
| 115 | } |
| 116 | } |
| 117 | |
| 118 | // Shared memory. |
| 119 | hidl_vec<hidl_memory> pools = {}; |
| 120 | if (constRefSize > 0) { |
| 121 | hidl_vec_push_back(&pools, nn::allocateSharedMemory(constRefSize)); |
| 122 | CHECK_NE(pools[0].size(), 0u); |
| 123 | |
| 124 | // load data |
| 125 | sp<IMemory> mappedMemory = mapMemory(pools[0]); |
| 126 | CHECK(mappedMemory.get() != nullptr); |
| 127 | uint8_t* mappedPtr = |
| 128 | reinterpret_cast<uint8_t*>(static_cast<void*>(mappedMemory->getPointer())); |
| 129 | CHECK(mappedPtr != nullptr); |
| 130 | |
| 131 | for (uint32_t i = 0; i < testModel.operands.size(); i++) { |
| 132 | const auto& op = testModel.operands[i]; |
| 133 | if (op.lifetime == TestOperandLifeTime::CONSTANT_REFERENCE) { |
| 134 | const uint8_t* begin = op.data.get<uint8_t>(); |
| 135 | const uint8_t* end = begin + op.data.size(); |
| 136 | std::copy(begin, end, mappedPtr + operands[i].location.offset); |
| 137 | } |
| 138 | } |
| 139 | } |
| 140 | |
| 141 | return {.operands = std::move(operands), |
| 142 | .operations = std::move(operations), |
| 143 | .inputIndexes = testModel.inputIndexes, |
| 144 | .outputIndexes = testModel.outputIndexes, |
| 145 | .operandValues = std::move(operandValues), |
| 146 | .pools = std::move(pools), |
| 147 | .relaxComputationFloat32toFloat16 = testModel.isRelaxed}; |
| 148 | } |
| 149 | |
| 150 | static bool isOutputSizeGreaterThanOne(const TestModel& testModel, uint32_t index) { |
| 151 | const auto byteSize = testModel.operands[testModel.outputIndexes[index]].data.size(); |
| 152 | return byteSize > 1u; |
| 153 | } |
| 154 | |
| 155 | static void makeOutputInsufficientSize(uint32_t outputIndex, Request* request) { |
| 156 | auto& length = request->outputs[outputIndex].location.length; |
| 157 | ASSERT_GT(length, 1u); |
| 158 | length -= 1u; |
| 159 | } |
| 160 | |
| 161 | static void makeOutputDimensionsUnspecified(Model* model) { |
| 162 | for (auto i : model->outputIndexes) { |
| 163 | auto& dims = model->operands[i].dimensions; |
| 164 | std::fill(dims.begin(), dims.end(), 0); |
| 165 | } |
| 166 | } |
| 167 | |
| 168 | static Return<ErrorStatus> ExecutePreparedModel(const sp<IPreparedModel>& preparedModel, |
| 169 | const Request& request, MeasureTiming measure, |
| 170 | sp<ExecutionCallback>& callback) { |
| 171 | return preparedModel->execute_1_2(request, measure, callback); |
| 172 | } |
| 173 | static Return<ErrorStatus> ExecutePreparedModel(const sp<IPreparedModel>& preparedModel, |
| 174 | const Request& request, MeasureTiming measure, |
| 175 | hidl_vec<OutputShape>* outputShapes, |
| 176 | Timing* timing) { |
| 177 | ErrorStatus result; |
| 178 | Return<void> ret = preparedModel->executeSynchronously( |
| 179 | request, measure, |
| 180 | [&result, outputShapes, timing](ErrorStatus error, const hidl_vec<OutputShape>& shapes, |
| 181 | const Timing& time) { |
| 182 | result = error; |
| 183 | *outputShapes = shapes; |
| 184 | *timing = time; |
| 185 | }); |
| 186 | if (!ret.isOk()) { |
| 187 | return ErrorStatus::GENERAL_FAILURE; |
| 188 | } |
| 189 | return result; |
| 190 | } |
| 191 | static std::shared_ptr<::android::nn::ExecutionBurstController> CreateBurst( |
| 192 | const sp<IPreparedModel>& preparedModel) { |
| 193 | return android::nn::ExecutionBurstController::create(preparedModel, /*blocking=*/true); |
| 194 | } |
| 195 | enum class Executor { ASYNC, SYNC, BURST }; |
| 196 | |
| 197 | void EvaluatePreparedModel(const sp<IPreparedModel>& preparedModel, const TestModel& testModel, |
| 198 | Executor executor, MeasureTiming measure, OutputType outputType) { |
| 199 | // If output0 does not have size larger than one byte, we can not test with insufficient buffer. |
| 200 | if (outputType == OutputType::INSUFFICIENT && !isOutputSizeGreaterThanOne(testModel, 0)) { |
| 201 | return; |
| 202 | } |
| 203 | |
| 204 | Request request = createRequest(testModel); |
| 205 | if (outputType == OutputType::INSUFFICIENT) { |
| 206 | makeOutputInsufficientSize(/*outputIndex=*/0, &request); |
| 207 | } |
| 208 | |
| 209 | ErrorStatus executionStatus; |
| 210 | hidl_vec<OutputShape> outputShapes; |
| 211 | Timing timing; |
| 212 | switch (executor) { |
| 213 | case Executor::ASYNC: { |
| 214 | SCOPED_TRACE("asynchronous"); |
| 215 | |
| 216 | // launch execution |
| 217 | sp<ExecutionCallback> executionCallback = new ExecutionCallback(); |
| 218 | Return<ErrorStatus> executionLaunchStatus = |
| 219 | ExecutePreparedModel(preparedModel, request, measure, executionCallback); |
| 220 | ASSERT_TRUE(executionLaunchStatus.isOk()); |
| 221 | EXPECT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(executionLaunchStatus)); |
| 222 | |
| 223 | // retrieve execution status |
| 224 | executionCallback->wait(); |
| 225 | executionStatus = executionCallback->getStatus(); |
| 226 | outputShapes = executionCallback->getOutputShapes(); |
| 227 | timing = executionCallback->getTiming(); |
| 228 | |
| 229 | break; |
| 230 | } |
| 231 | case Executor::SYNC: { |
| 232 | SCOPED_TRACE("synchronous"); |
| 233 | |
| 234 | // execute |
| 235 | Return<ErrorStatus> executionReturnStatus = |
| 236 | ExecutePreparedModel(preparedModel, request, measure, &outputShapes, &timing); |
| 237 | ASSERT_TRUE(executionReturnStatus.isOk()); |
| 238 | executionStatus = static_cast<ErrorStatus>(executionReturnStatus); |
| 239 | |
| 240 | break; |
| 241 | } |
| 242 | case Executor::BURST: { |
| 243 | SCOPED_TRACE("burst"); |
| 244 | |
| 245 | // create burst |
| 246 | const std::shared_ptr<::android::nn::ExecutionBurstController> controller = |
| 247 | CreateBurst(preparedModel); |
| 248 | ASSERT_NE(nullptr, controller.get()); |
| 249 | |
| 250 | // create memory keys |
| 251 | std::vector<intptr_t> keys(request.pools.size()); |
| 252 | for (size_t i = 0; i < keys.size(); ++i) { |
| 253 | keys[i] = reinterpret_cast<intptr_t>(&request.pools[i]); |
| 254 | } |
| 255 | |
| 256 | // execute burst |
| 257 | std::tie(executionStatus, outputShapes, timing) = |
| 258 | controller->compute(request, measure, keys); |
| 259 | |
| 260 | break; |
| 261 | } |
| 262 | } |
| 263 | |
| 264 | if (outputType != OutputType::FULLY_SPECIFIED && |
| 265 | executionStatus == ErrorStatus::GENERAL_FAILURE) { |
| 266 | LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot " |
| 267 | "execute model that it does not support."; |
| 268 | std::cout << "[ ] Early termination of test because vendor service cannot " |
| 269 | "execute model that it does not support." |
| 270 | << std::endl; |
| 271 | GTEST_SKIP(); |
| 272 | } |
| 273 | if (measure == MeasureTiming::NO) { |
| 274 | EXPECT_EQ(UINT64_MAX, timing.timeOnDevice); |
| 275 | EXPECT_EQ(UINT64_MAX, timing.timeInDriver); |
| 276 | } else { |
| 277 | if (timing.timeOnDevice != UINT64_MAX && timing.timeInDriver != UINT64_MAX) { |
| 278 | EXPECT_LE(timing.timeOnDevice, timing.timeInDriver); |
| 279 | } |
| 280 | } |
| 281 | |
| 282 | switch (outputType) { |
| 283 | case OutputType::FULLY_SPECIFIED: |
| 284 | // If the model output operands are fully specified, outputShapes must be either |
| 285 | // either empty, or have the same number of elements as the number of outputs. |
| 286 | ASSERT_EQ(ErrorStatus::NONE, executionStatus); |
| 287 | ASSERT_TRUE(outputShapes.size() == 0 || |
| 288 | outputShapes.size() == testModel.outputIndexes.size()); |
| 289 | break; |
| 290 | case OutputType::UNSPECIFIED: |
| 291 | // If the model output operands are not fully specified, outputShapes must have |
| 292 | // the same number of elements as the number of outputs. |
| 293 | ASSERT_EQ(ErrorStatus::NONE, executionStatus); |
| 294 | ASSERT_EQ(outputShapes.size(), testModel.outputIndexes.size()); |
| 295 | break; |
| 296 | case OutputType::INSUFFICIENT: |
| 297 | ASSERT_EQ(ErrorStatus::OUTPUT_INSUFFICIENT_SIZE, executionStatus); |
| 298 | ASSERT_EQ(outputShapes.size(), testModel.outputIndexes.size()); |
| 299 | ASSERT_FALSE(outputShapes[0].isSufficient); |
| 300 | return; |
| 301 | } |
| 302 | |
| 303 | // Go through all outputs, check returned output shapes. |
| 304 | for (uint32_t i = 0; i < outputShapes.size(); i++) { |
| 305 | EXPECT_TRUE(outputShapes[i].isSufficient); |
| 306 | const auto& expect = testModel.operands[testModel.outputIndexes[i]].dimensions; |
| 307 | const std::vector<uint32_t> actual = outputShapes[i].dimensions; |
| 308 | EXPECT_EQ(expect, actual); |
| 309 | } |
| 310 | |
| 311 | // Retrieve execution results. |
| 312 | const std::vector<TestBuffer> outputs = getOutputBuffers(request); |
| 313 | |
| 314 | // We want "close-enough" results. |
| 315 | checkResults(testModel, outputs); |
| 316 | } |
| 317 | |
| 318 | void EvaluatePreparedModel(const sp<IPreparedModel>& preparedModel, const TestModel& testModel, |
| 319 | bool testDynamicOutputShape) { |
| 320 | if (testDynamicOutputShape) { |
| 321 | EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::NO, |
| 322 | OutputType::UNSPECIFIED); |
| 323 | EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::NO, |
| 324 | OutputType::UNSPECIFIED); |
| 325 | EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::NO, |
| 326 | OutputType::UNSPECIFIED); |
| 327 | EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::YES, |
| 328 | OutputType::UNSPECIFIED); |
| 329 | EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::YES, |
| 330 | OutputType::UNSPECIFIED); |
| 331 | EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::YES, |
| 332 | OutputType::UNSPECIFIED); |
| 333 | EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::NO, |
| 334 | OutputType::INSUFFICIENT); |
| 335 | EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::NO, |
| 336 | OutputType::INSUFFICIENT); |
| 337 | EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::NO, |
| 338 | OutputType::INSUFFICIENT); |
| 339 | EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::YES, |
| 340 | OutputType::INSUFFICIENT); |
| 341 | EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::YES, |
| 342 | OutputType::INSUFFICIENT); |
| 343 | EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::YES, |
| 344 | OutputType::INSUFFICIENT); |
| 345 | } else { |
| 346 | EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::NO, |
| 347 | OutputType::FULLY_SPECIFIED); |
| 348 | EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::NO, |
| 349 | OutputType::FULLY_SPECIFIED); |
| 350 | EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::NO, |
| 351 | OutputType::FULLY_SPECIFIED); |
| 352 | EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::YES, |
| 353 | OutputType::FULLY_SPECIFIED); |
| 354 | EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::YES, |
| 355 | OutputType::FULLY_SPECIFIED); |
| 356 | EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::YES, |
| 357 | OutputType::FULLY_SPECIFIED); |
| 358 | } |
| 359 | } |
| 360 | |
| 361 | void Execute(const sp<IDevice>& device, const TestModel& testModel, bool testDynamicOutputShape) { |
| 362 | Model model = createModel(testModel); |
| 363 | if (testDynamicOutputShape) { |
| 364 | makeOutputDimensionsUnspecified(&model); |
| 365 | } |
| 366 | |
| 367 | sp<IPreparedModel> preparedModel; |
| 368 | createPreparedModel(device, model, &preparedModel); |
| 369 | if (preparedModel == nullptr) return; |
| 370 | |
| 371 | EvaluatePreparedModel(preparedModel, testModel, testDynamicOutputShape); |
| 372 | } |
| 373 | |
| 374 | void GeneratedTestBase::SetUp() { |
| 375 | testing::TestWithParam<GeneratedTestParam>::SetUp(); |
| 376 | ASSERT_NE(kDevice, nullptr); |
| 377 | } |
| 378 | |
| 379 | std::vector<NamedModel> getNamedModels(const FilterFn& filter) { |
| 380 | return TestModelManager::get().getTestModels(filter); |
| 381 | } |
| 382 | |
| 383 | std::string printGeneratedTest(const testing::TestParamInfo<GeneratedTestParam>& info) { |
| 384 | const auto& [namedDevice, namedModel] = info.param; |
| 385 | return gtestCompliantName(getName(namedDevice) + "_" + getName(namedModel)); |
| 386 | } |
| 387 | |
| 388 | // Tag for the generated tests |
| 389 | class GeneratedTest : public GeneratedTestBase {}; |
| 390 | |
| 391 | // Tag for the dynamic output shape tests |
| 392 | class DynamicOutputShapeTest : public GeneratedTest {}; |
| 393 | |
| 394 | TEST_P(GeneratedTest, Test) { |
| 395 | Execute(kDevice, kTestModel, /*testDynamicOutputShape=*/false); |
| 396 | } |
| 397 | |
| 398 | TEST_P(DynamicOutputShapeTest, Test) { |
| 399 | Execute(kDevice, kTestModel, /*testDynamicOutputShape=*/true); |
| 400 | } |
| 401 | |
| 402 | INSTANTIATE_GENERATED_TEST(GeneratedTest, |
| 403 | [](const TestModel& testModel) { return !testModel.expectFailure; }); |
| 404 | |
| 405 | INSTANTIATE_GENERATED_TEST(DynamicOutputShapeTest, |
| 406 | [](const TestModel& testModel) { return !testModel.expectFailure; }); |
| 407 | |
| 408 | } // namespace android::hardware::neuralnetworks::V1_2::vts::functional |