blob: e7d59eca912dc55e836ba8ac02c65098d8dbf1fb [file] [log] [blame]
Slava Shklyaev73ee79d2019-05-14 14:15:14 +01001/*
2 * Copyright (C) 2017 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "GeneratedTestHarness.h"
18
19#include <android-base/logging.h>
20#include <android/hardware/neuralnetworks/1.0/IPreparedModel.h>
21#include <android/hardware/neuralnetworks/1.0/types.h>
22#include <android/hardware/neuralnetworks/1.1/IDevice.h>
23#include <android/hidl/allocator/1.0/IAllocator.h>
24#include <android/hidl/memory/1.0/IMemory.h>
25#include <hidlmemory/mapping.h>
26
27#include <iostream>
28
29#include "1.0/Callbacks.h"
30#include "1.0/Utils.h"
31#include "MemoryUtils.h"
32#include "TestHarness.h"
33
34namespace android {
35namespace hardware {
36namespace neuralnetworks {
Slava Shklyaeve8b24462019-07-17 15:50:57 +010037namespace V1_1 {
Slava Shklyaev73ee79d2019-05-14 14:15:14 +010038namespace generated_tests {
39
40using ::android::hardware::neuralnetworks::V1_0::ErrorStatus;
41using ::android::hardware::neuralnetworks::V1_0::IPreparedModel;
42using ::android::hardware::neuralnetworks::V1_0::Request;
43using ::android::hardware::neuralnetworks::V1_0::RequestArgument;
44using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
45using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
46using ::android::hardware::neuralnetworks::V1_1::ExecutionPreference;
47using ::android::hardware::neuralnetworks::V1_1::IDevice;
48using ::android::hardware::neuralnetworks::V1_1::Model;
49using ::android::hidl::memory::V1_0::IMemory;
50using ::test_helper::compare;
51using ::test_helper::filter;
52using ::test_helper::for_all;
53using ::test_helper::MixedTyped;
54using ::test_helper::MixedTypedExample;
55using ::test_helper::resize_accordingly;
56
57// Top level driver for models and examples generated by test_generator.py
58// Test driver for those generated from ml/nn/runtime/test/spec
59void EvaluatePreparedModel(sp<IPreparedModel>& preparedModel, std::function<bool(int)> is_ignored,
60 const std::vector<MixedTypedExample>& examples,
61 bool hasRelaxedFloat32Model, float fpAtol, float fpRtol) {
62 const uint32_t INPUT = 0;
63 const uint32_t OUTPUT = 1;
64
65 int example_no = 1;
66 for (auto& example : examples) {
67 SCOPED_TRACE(example_no++);
68 const MixedTyped& inputs = example.operands.first;
69 const MixedTyped& golden = example.operands.second;
70
71 const bool hasFloat16Inputs = !inputs.float16Operands.empty();
72 if (hasRelaxedFloat32Model || hasFloat16Inputs) {
73 // TODO: Adjust the error limit based on testing.
74 // If in relaxed mode, set the absolute tolerance to be 5ULP of FP16.
75 fpAtol = 5.0f * 0.0009765625f;
76 // Set the relative tolerance to be 5ULP of the corresponding FP precision.
77 fpRtol = 5.0f * 0.0009765625f;
78 }
79
80 std::vector<RequestArgument> inputs_info, outputs_info;
81 uint32_t inputSize = 0, outputSize = 0;
82 // This function only partially specifies the metadata (vector of RequestArguments).
83 // The contents are copied over below.
84 for_all(inputs, [&inputs_info, &inputSize](int index, auto, auto s) {
85 if (inputs_info.size() <= static_cast<size_t>(index)) inputs_info.resize(index + 1);
86 RequestArgument arg = {
87 .location = {.poolIndex = INPUT,
88 .offset = 0,
89 .length = static_cast<uint32_t>(s)},
90 .dimensions = {},
91 };
92 RequestArgument arg_empty = {
93 .hasNoValue = true,
94 };
95 inputs_info[index] = s ? arg : arg_empty;
96 inputSize += s;
97 });
98 // Compute offset for inputs 1 and so on
99 {
100 size_t offset = 0;
101 for (auto& i : inputs_info) {
102 if (!i.hasNoValue) i.location.offset = offset;
103 offset += i.location.length;
104 }
105 }
106
107 MixedTyped test; // holding test results
108
109 // Go through all outputs, initialize RequestArgument descriptors
110 resize_accordingly(golden, test);
111 for_all(golden, [&outputs_info, &outputSize](int index, auto, auto s) {
112 if (outputs_info.size() <= static_cast<size_t>(index)) outputs_info.resize(index + 1);
113 RequestArgument arg = {
114 .location = {.poolIndex = OUTPUT,
115 .offset = 0,
116 .length = static_cast<uint32_t>(s)},
117 .dimensions = {},
118 };
119 outputs_info[index] = arg;
120 outputSize += s;
121 });
122 // Compute offset for outputs 1 and so on
123 {
124 size_t offset = 0;
125 for (auto& i : outputs_info) {
126 i.location.offset = offset;
127 offset += i.location.length;
128 }
129 }
130 std::vector<hidl_memory> pools = {nn::allocateSharedMemory(inputSize),
131 nn::allocateSharedMemory(outputSize)};
132 ASSERT_NE(0ull, pools[INPUT].size());
133 ASSERT_NE(0ull, pools[OUTPUT].size());
134
135 // load data
136 sp<IMemory> inputMemory = mapMemory(pools[INPUT]);
137 sp<IMemory> outputMemory = mapMemory(pools[OUTPUT]);
138 ASSERT_NE(nullptr, inputMemory.get());
139 ASSERT_NE(nullptr, outputMemory.get());
140 char* inputPtr = reinterpret_cast<char*>(static_cast<void*>(inputMemory->getPointer()));
141 char* outputPtr = reinterpret_cast<char*>(static_cast<void*>(outputMemory->getPointer()));
142 ASSERT_NE(nullptr, inputPtr);
143 ASSERT_NE(nullptr, outputPtr);
144 inputMemory->update();
145 outputMemory->update();
146
147 // Go through all inputs, copy the values
148 for_all(inputs, [&inputs_info, inputPtr](int index, auto p, auto s) {
149 char* begin = (char*)p;
150 char* end = begin + s;
151 // TODO: handle more than one input
152 std::copy(begin, end, inputPtr + inputs_info[index].location.offset);
153 });
154
155 inputMemory->commit();
156 outputMemory->commit();
157
158 const Request request = {.inputs = inputs_info, .outputs = outputs_info, .pools = pools};
159
160 // launch execution
161 sp<ExecutionCallback> executionCallback = new ExecutionCallback();
162 ASSERT_NE(nullptr, executionCallback.get());
163 Return<ErrorStatus> executionLaunchStatus =
164 preparedModel->execute(request, executionCallback);
165 ASSERT_TRUE(executionLaunchStatus.isOk());
166 EXPECT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(executionLaunchStatus));
167
168 // retrieve execution status
169 executionCallback->wait();
170 ASSERT_EQ(ErrorStatus::NONE, executionCallback->getStatus());
171
172 // validate results
173 outputMemory->read();
174 copy_back(&test, outputs_info, outputPtr);
175 outputMemory->commit();
176 // Filter out don't cares
177 MixedTyped filtered_golden = filter(golden, is_ignored);
178 MixedTyped filtered_test = filter(test, is_ignored);
179
180 // We want "close-enough" results for float
181 compare(filtered_golden, filtered_test, fpAtol, fpRtol);
182 }
183}
184
185void Execute(const sp<IDevice>& device, std::function<Model(void)> create_model,
186 std::function<bool(int)> is_ignored, const std::vector<MixedTypedExample>& examples) {
187 Model model = create_model();
188
189 // see if service can handle model
190 bool fullySupportsModel = false;
191 Return<void> supportedCall = device->getSupportedOperations_1_1(
192 model, [&fullySupportsModel](ErrorStatus status, const hidl_vec<bool>& supported) {
193 ASSERT_EQ(ErrorStatus::NONE, status);
194 ASSERT_NE(0ul, supported.size());
195 fullySupportsModel = std::all_of(supported.begin(), supported.end(),
196 [](bool valid) { return valid; });
197 });
198 ASSERT_TRUE(supportedCall.isOk());
199
200 // launch prepare model
201 sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
202 ASSERT_NE(nullptr, preparedModelCallback.get());
203 Return<ErrorStatus> prepareLaunchStatus = device->prepareModel_1_1(
204 model, ExecutionPreference::FAST_SINGLE_ANSWER, preparedModelCallback);
205 ASSERT_TRUE(prepareLaunchStatus.isOk());
206 ASSERT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus));
207
208 // retrieve prepared model
209 preparedModelCallback->wait();
210 ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
211 sp<IPreparedModel> preparedModel = preparedModelCallback->getPreparedModel();
212
213 // early termination if vendor service cannot fully prepare model
214 if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
215 ASSERT_EQ(nullptr, preparedModel.get());
216 LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
217 "prepare model that it does not support.";
218 std::cout << "[ ] Early termination of test because vendor service cannot "
219 "prepare model that it does not support."
220 << std::endl;
221 GTEST_SKIP();
222 }
223 EXPECT_EQ(ErrorStatus::NONE, prepareReturnStatus);
224 ASSERT_NE(nullptr, preparedModel.get());
225
226 EvaluatePreparedModel(preparedModel, is_ignored, examples,
227 model.relaxComputationFloat32toFloat16, 1e-5f, 1e-5f);
228}
229
230} // namespace generated_tests
Slava Shklyaeve8b24462019-07-17 15:50:57 +0100231} // namespace V1_1
Slava Shklyaev73ee79d2019-05-14 14:15:14 +0100232} // namespace neuralnetworks
233} // namespace hardware
234} // namespace android