blob: 73eeb93a474bfd8f7bee0ed8a4a2972e919407a1 [file] [log] [blame]
Slava Shklyaev73ee79d2019-05-14 14:15:14 +01001/*
2 * Copyright (C) 2017 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "GeneratedTestHarness.h"
18
19#include <android-base/logging.h>
20#include <android/hardware/neuralnetworks/1.0/IPreparedModel.h>
21#include <android/hardware/neuralnetworks/1.0/types.h>
22#include <android/hardware/neuralnetworks/1.1/IDevice.h>
23#include <android/hidl/allocator/1.0/IAllocator.h>
24#include <android/hidl/memory/1.0/IMemory.h>
25#include <hidlmemory/mapping.h>
26
Xusong Wang81611962019-08-09 16:41:16 -070027#include <gtest/gtest.h>
Slava Shklyaev73ee79d2019-05-14 14:15:14 +010028#include <iostream>
29
30#include "1.0/Callbacks.h"
31#include "1.0/Utils.h"
32#include "MemoryUtils.h"
33#include "TestHarness.h"
34
35namespace android {
36namespace hardware {
37namespace neuralnetworks {
Slava Shklyaeve8b24462019-07-17 15:50:57 +010038namespace V1_1 {
Slava Shklyaev73ee79d2019-05-14 14:15:14 +010039namespace generated_tests {
40
Xusong Wang81611962019-08-09 16:41:16 -070041using namespace test_helper;
42using ::android::hardware::neuralnetworks::V1_0::DataLocation;
Slava Shklyaev73ee79d2019-05-14 14:15:14 +010043using ::android::hardware::neuralnetworks::V1_0::ErrorStatus;
44using ::android::hardware::neuralnetworks::V1_0::IPreparedModel;
Xusong Wang81611962019-08-09 16:41:16 -070045using ::android::hardware::neuralnetworks::V1_0::Operand;
46using ::android::hardware::neuralnetworks::V1_0::OperandLifeTime;
47using ::android::hardware::neuralnetworks::V1_0::OperandType;
Slava Shklyaev73ee79d2019-05-14 14:15:14 +010048using ::android::hardware::neuralnetworks::V1_0::Request;
49using ::android::hardware::neuralnetworks::V1_0::RequestArgument;
50using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
51using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
52using ::android::hardware::neuralnetworks::V1_1::ExecutionPreference;
53using ::android::hardware::neuralnetworks::V1_1::IDevice;
54using ::android::hardware::neuralnetworks::V1_1::Model;
55using ::android::hidl::memory::V1_0::IMemory;
Xusong Wang81611962019-08-09 16:41:16 -070056
57Model createModel(const TestModel& testModel) {
58 // Model operands.
59 hidl_vec<Operand> operands(testModel.operands.size());
60 size_t constCopySize = 0, constRefSize = 0;
61 for (uint32_t i = 0; i < testModel.operands.size(); i++) {
62 const auto& op = testModel.operands[i];
63
64 DataLocation loc = {};
65 if (op.lifetime == TestOperandLifeTime::CONSTANT_COPY) {
66 loc = {.poolIndex = 0,
67 .offset = static_cast<uint32_t>(constCopySize),
68 .length = static_cast<uint32_t>(op.data.size())};
69 constCopySize += op.data.alignedSize();
70 } else if (op.lifetime == TestOperandLifeTime::CONSTANT_REFERENCE) {
71 loc = {.poolIndex = 0,
72 .offset = static_cast<uint32_t>(constRefSize),
73 .length = static_cast<uint32_t>(op.data.size())};
74 constRefSize += op.data.alignedSize();
75 }
76
77 operands[i] = {.type = static_cast<OperandType>(op.type),
78 .dimensions = op.dimensions,
79 .numberOfConsumers = op.numberOfConsumers,
80 .scale = op.scale,
81 .zeroPoint = op.zeroPoint,
82 .lifetime = static_cast<OperandLifeTime>(op.lifetime),
83 .location = loc};
84 }
85
86 // Model operations.
87 hidl_vec<Operation> operations(testModel.operations.size());
88 std::transform(testModel.operations.begin(), testModel.operations.end(), operations.begin(),
89 [](const TestOperation& op) -> Operation {
90 return {.type = static_cast<OperationType>(op.type),
91 .inputs = op.inputs,
92 .outputs = op.outputs};
93 });
94
95 // Constant copies.
96 hidl_vec<uint8_t> operandValues(constCopySize);
97 for (uint32_t i = 0; i < testModel.operands.size(); i++) {
98 const auto& op = testModel.operands[i];
99 if (op.lifetime == TestOperandLifeTime::CONSTANT_COPY) {
100 const uint8_t* begin = op.data.get<uint8_t>();
101 const uint8_t* end = begin + op.data.size();
102 std::copy(begin, end, operandValues.data() + operands[i].location.offset);
103 }
104 }
105
106 // Shared memory.
107 hidl_vec<hidl_memory> pools;
108 if (constRefSize > 0) {
109 hidl_vec_push_back(&pools, nn::allocateSharedMemory(constRefSize));
110 CHECK_NE(pools[0].size(), 0u);
111
112 // load data
113 sp<IMemory> mappedMemory = mapMemory(pools[0]);
114 CHECK(mappedMemory.get() != nullptr);
115 uint8_t* mappedPtr =
116 reinterpret_cast<uint8_t*>(static_cast<void*>(mappedMemory->getPointer()));
117 CHECK(mappedPtr != nullptr);
118
119 for (uint32_t i = 0; i < testModel.operands.size(); i++) {
120 const auto& op = testModel.operands[i];
121 if (op.lifetime == TestOperandLifeTime::CONSTANT_REFERENCE) {
122 const uint8_t* begin = op.data.get<uint8_t>();
123 const uint8_t* end = begin + op.data.size();
124 std::copy(begin, end, mappedPtr + operands[i].location.offset);
125 }
126 }
127 }
128
129 return {.operands = std::move(operands),
130 .operations = std::move(operations),
131 .inputIndexes = testModel.inputIndexes,
132 .outputIndexes = testModel.outputIndexes,
133 .operandValues = std::move(operandValues),
134 .pools = std::move(pools),
135 .relaxComputationFloat32toFloat16 = testModel.isRelaxed};
136}
Slava Shklyaev73ee79d2019-05-14 14:15:14 +0100137
138// Top level driver for models and examples generated by test_generator.py
139// Test driver for those generated from ml/nn/runtime/test/spec
Xusong Wang81611962019-08-09 16:41:16 -0700140void EvaluatePreparedModel(const sp<IPreparedModel>& preparedModel, const TestModel& testModel) {
141 const Request request = createRequest(testModel);
Slava Shklyaev73ee79d2019-05-14 14:15:14 +0100142
Xusong Wang81611962019-08-09 16:41:16 -0700143 // Launch execution.
144 sp<ExecutionCallback> executionCallback = new ExecutionCallback();
145 Return<ErrorStatus> executionLaunchStatus = preparedModel->execute(request, executionCallback);
146 ASSERT_TRUE(executionLaunchStatus.isOk());
147 EXPECT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(executionLaunchStatus));
Slava Shklyaev73ee79d2019-05-14 14:15:14 +0100148
Xusong Wang81611962019-08-09 16:41:16 -0700149 // Retrieve execution status.
150 executionCallback->wait();
151 ASSERT_EQ(ErrorStatus::NONE, executionCallback->getStatus());
Slava Shklyaev73ee79d2019-05-14 14:15:14 +0100152
Xusong Wang81611962019-08-09 16:41:16 -0700153 // Retrieve execution results.
154 const std::vector<TestBuffer> outputs = getOutputBuffers(request);
Slava Shklyaev73ee79d2019-05-14 14:15:14 +0100155
Xusong Wang81611962019-08-09 16:41:16 -0700156 // We want "close-enough" results.
157 checkResults(testModel, outputs);
Slava Shklyaev73ee79d2019-05-14 14:15:14 +0100158}
159
Xusong Wang81611962019-08-09 16:41:16 -0700160void Execute(const sp<IDevice>& device, const TestModel& testModel) {
161 Model model = createModel(testModel);
Slava Shklyaev73ee79d2019-05-14 14:15:14 +0100162
163 // see if service can handle model
164 bool fullySupportsModel = false;
165 Return<void> supportedCall = device->getSupportedOperations_1_1(
166 model, [&fullySupportsModel](ErrorStatus status, const hidl_vec<bool>& supported) {
167 ASSERT_EQ(ErrorStatus::NONE, status);
168 ASSERT_NE(0ul, supported.size());
169 fullySupportsModel = std::all_of(supported.begin(), supported.end(),
170 [](bool valid) { return valid; });
171 });
172 ASSERT_TRUE(supportedCall.isOk());
173
174 // launch prepare model
175 sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
Slava Shklyaev73ee79d2019-05-14 14:15:14 +0100176 Return<ErrorStatus> prepareLaunchStatus = device->prepareModel_1_1(
177 model, ExecutionPreference::FAST_SINGLE_ANSWER, preparedModelCallback);
178 ASSERT_TRUE(prepareLaunchStatus.isOk());
179 ASSERT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus));
180
181 // retrieve prepared model
182 preparedModelCallback->wait();
183 ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
184 sp<IPreparedModel> preparedModel = preparedModelCallback->getPreparedModel();
185
186 // early termination if vendor service cannot fully prepare model
187 if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
188 ASSERT_EQ(nullptr, preparedModel.get());
189 LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
190 "prepare model that it does not support.";
191 std::cout << "[ ] Early termination of test because vendor service cannot "
192 "prepare model that it does not support."
193 << std::endl;
194 GTEST_SKIP();
195 }
196 EXPECT_EQ(ErrorStatus::NONE, prepareReturnStatus);
197 ASSERT_NE(nullptr, preparedModel.get());
198
Xusong Wang81611962019-08-09 16:41:16 -0700199 EvaluatePreparedModel(preparedModel, testModel);
Slava Shklyaev73ee79d2019-05-14 14:15:14 +0100200}
201
202} // namespace generated_tests
Slava Shklyaeve8b24462019-07-17 15:50:57 +0100203} // namespace V1_1
Slava Shklyaev73ee79d2019-05-14 14:15:14 +0100204} // namespace neuralnetworks
205} // namespace hardware
206} // namespace android