blob: d80fbcf689d4330c74e4c972446e92c04a23be1f [file] [log] [blame]
Slava Shklyaevfeb87a92018-09-12 14:52:02 +01001/*
2 * Copyright (C) 2018 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#define LOG_TAG "neuralnetworks_hidl_hal_test"
18
19#include "VtsHalNeuralnetworks.h"
20
21#include "Callbacks.h"
22#include "TestHarness.h"
23#include "Utils.h"
24
25#include <android-base/logging.h>
26#include <android/hidl/memory/1.0/IMemory.h>
27#include <hidlmemory/mapping.h>
28
29namespace android {
30namespace hardware {
31namespace neuralnetworks {
32namespace V1_2 {
33namespace vts {
34namespace functional {
35
Xusong Wang1a06e772018-10-31 08:43:12 -070036using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
37using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
Slava Shklyaevfeb87a92018-09-12 14:52:02 +010038using ::android::hidl::memory::V1_0::IMemory;
39using test_helper::for_all;
40using test_helper::MixedTyped;
Michael K. Sandersda3bdbc2018-10-19 14:39:09 +010041using test_helper::MixedTypedExample;
Slava Shklyaevfeb87a92018-09-12 14:52:02 +010042
43///////////////////////// UTILITY FUNCTIONS /////////////////////////
44
45static void createPreparedModel(const sp<IDevice>& device, const Model& model,
46 sp<IPreparedModel>* preparedModel) {
47 ASSERT_NE(nullptr, preparedModel);
48
49 // see if service can handle model
50 bool fullySupportsModel = false;
51 Return<void> supportedOpsLaunchStatus = device->getSupportedOperations_1_2(
52 model, [&fullySupportsModel](ErrorStatus status, const hidl_vec<bool>& supported) {
53 ASSERT_EQ(ErrorStatus::NONE, status);
54 ASSERT_NE(0ul, supported.size());
55 fullySupportsModel =
56 std::all_of(supported.begin(), supported.end(), [](bool valid) { return valid; });
57 });
58 ASSERT_TRUE(supportedOpsLaunchStatus.isOk());
59
60 // launch prepare model
61 sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
62 ASSERT_NE(nullptr, preparedModelCallback.get());
63 Return<ErrorStatus> prepareLaunchStatus = device->prepareModel_1_2(
64 model, ExecutionPreference::FAST_SINGLE_ANSWER, preparedModelCallback);
65 ASSERT_TRUE(prepareLaunchStatus.isOk());
66 ASSERT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus));
67
68 // retrieve prepared model
69 preparedModelCallback->wait();
70 ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
Xusong Wang1a06e772018-10-31 08:43:12 -070071 *preparedModel = getPreparedModel_1_2(preparedModelCallback);
Slava Shklyaevfeb87a92018-09-12 14:52:02 +010072
73 // The getSupportedOperations_1_2 call returns a list of operations that are
74 // guaranteed not to fail if prepareModel_1_2 is called, and
75 // 'fullySupportsModel' is true i.f.f. the entire model is guaranteed.
76 // If a driver has any doubt that it can prepare an operation, it must
77 // return false. So here, if a driver isn't sure if it can support an
78 // operation, but reports that it successfully prepared the model, the test
79 // can continue.
80 if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
81 ASSERT_EQ(nullptr, preparedModel->get());
82 LOG(INFO) << "NN VTS: Unable to test Request validation because vendor service cannot "
83 "prepare model that it does not support.";
84 std::cout << "[ ] Unable to test Request validation because vendor service "
85 "cannot prepare model that it does not support."
86 << std::endl;
87 return;
88 }
89 ASSERT_EQ(ErrorStatus::NONE, prepareReturnStatus);
90 ASSERT_NE(nullptr, preparedModel->get());
91}
92
93// Primary validation function. This function will take a valid request, apply a
94// mutation to it to invalidate the request, then pass it to interface calls
95// that use the request. Note that the request here is passed by value, and any
96// mutation to the request does not leave this function.
97static void validate(const sp<IPreparedModel>& preparedModel, const std::string& message,
98 Request request, const std::function<void(Request*)>& mutation) {
99 mutation(&request);
Slava Shklyaevfeb87a92018-09-12 14:52:02 +0100100
David Gross4592ed12018-12-21 11:20:26 -0800101 {
102 SCOPED_TRACE(message + " [execute_1_2]");
Slava Shklyaevfeb87a92018-09-12 14:52:02 +0100103
David Gross4592ed12018-12-21 11:20:26 -0800104 sp<ExecutionCallback> executionCallback = new ExecutionCallback();
105 ASSERT_NE(nullptr, executionCallback.get());
106 Return<ErrorStatus> executeLaunchStatus =
107 preparedModel->execute_1_2(request, executionCallback);
108 ASSERT_TRUE(executeLaunchStatus.isOk());
109 ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(executeLaunchStatus));
110
111 executionCallback->wait();
112 ErrorStatus executionReturnStatus = executionCallback->getStatus();
113 ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, executionReturnStatus);
114 }
115
116 {
117 SCOPED_TRACE(message + " [executeSynchronously]");
118
119 Return<ErrorStatus> executeStatus = preparedModel->executeSynchronously(request);
120 ASSERT_TRUE(executeStatus.isOk());
121 ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(executeStatus));
122 }
Slava Shklyaevfeb87a92018-09-12 14:52:02 +0100123}
124
125// Delete element from hidl_vec. hidl_vec doesn't support a "remove" operation,
126// so this is efficiently accomplished by moving the element to the end and
127// resizing the hidl_vec to one less.
128template <typename Type>
129static void hidl_vec_removeAt(hidl_vec<Type>* vec, uint32_t index) {
130 if (vec) {
131 std::rotate(vec->begin() + index, vec->begin() + index + 1, vec->end());
132 vec->resize(vec->size() - 1);
133 }
134}
135
136template <typename Type>
137static uint32_t hidl_vec_push_back(hidl_vec<Type>* vec, const Type& value) {
138 // assume vec is valid
139 const uint32_t index = vec->size();
140 vec->resize(index + 1);
141 (*vec)[index] = value;
142 return index;
143}
144
145///////////////////////// REMOVE INPUT ////////////////////////////////////
146
147static void removeInputTest(const sp<IPreparedModel>& preparedModel, const Request& request) {
148 for (size_t input = 0; input < request.inputs.size(); ++input) {
149 const std::string message = "removeInput: removed input " + std::to_string(input);
150 validate(preparedModel, message, request,
151 [input](Request* request) { hidl_vec_removeAt(&request->inputs, input); });
152 }
153}
154
155///////////////////////// REMOVE OUTPUT ////////////////////////////////////
156
157static void removeOutputTest(const sp<IPreparedModel>& preparedModel, const Request& request) {
158 for (size_t output = 0; output < request.outputs.size(); ++output) {
159 const std::string message = "removeOutput: removed Output " + std::to_string(output);
160 validate(preparedModel, message, request,
161 [output](Request* request) { hidl_vec_removeAt(&request->outputs, output); });
162 }
163}
164
165///////////////////////////// ENTRY POINT //////////////////////////////////
166
Michael K. Sandersda3bdbc2018-10-19 14:39:09 +0100167std::vector<Request> createRequests(const std::vector<MixedTypedExample>& examples) {
Slava Shklyaevfeb87a92018-09-12 14:52:02 +0100168 const uint32_t INPUT = 0;
169 const uint32_t OUTPUT = 1;
170
171 std::vector<Request> requests;
172
173 for (auto& example : examples) {
Michael K. Sandersda3bdbc2018-10-19 14:39:09 +0100174 const MixedTyped& inputs = example.operands.first;
175 const MixedTyped& outputs = example.operands.second;
Slava Shklyaevfeb87a92018-09-12 14:52:02 +0100176
177 std::vector<RequestArgument> inputs_info, outputs_info;
178 uint32_t inputSize = 0, outputSize = 0;
179
180 // This function only partially specifies the metadata (vector of RequestArguments).
181 // The contents are copied over below.
182 for_all(inputs, [&inputs_info, &inputSize](int index, auto, auto s) {
183 if (inputs_info.size() <= static_cast<size_t>(index)) inputs_info.resize(index + 1);
184 RequestArgument arg = {
185 .location = {.poolIndex = INPUT, .offset = 0, .length = static_cast<uint32_t>(s)},
186 .dimensions = {},
187 };
188 RequestArgument arg_empty = {
189 .hasNoValue = true,
190 };
191 inputs_info[index] = s ? arg : arg_empty;
192 inputSize += s;
193 });
194 // Compute offset for inputs 1 and so on
195 {
196 size_t offset = 0;
197 for (auto& i : inputs_info) {
198 if (!i.hasNoValue) i.location.offset = offset;
199 offset += i.location.length;
200 }
201 }
202
203 // Go through all outputs, initialize RequestArgument descriptors
204 for_all(outputs, [&outputs_info, &outputSize](int index, auto, auto s) {
205 if (outputs_info.size() <= static_cast<size_t>(index)) outputs_info.resize(index + 1);
206 RequestArgument arg = {
207 .location = {.poolIndex = OUTPUT, .offset = 0, .length = static_cast<uint32_t>(s)},
208 .dimensions = {},
209 };
210 outputs_info[index] = arg;
211 outputSize += s;
212 });
213 // Compute offset for outputs 1 and so on
214 {
215 size_t offset = 0;
216 for (auto& i : outputs_info) {
217 i.location.offset = offset;
218 offset += i.location.length;
219 }
220 }
221 std::vector<hidl_memory> pools = {nn::allocateSharedMemory(inputSize),
222 nn::allocateSharedMemory(outputSize)};
223 if (pools[INPUT].size() == 0 || pools[OUTPUT].size() == 0) {
224 return {};
225 }
226
227 // map pool
228 sp<IMemory> inputMemory = mapMemory(pools[INPUT]);
229 if (inputMemory == nullptr) {
230 return {};
231 }
232 char* inputPtr = reinterpret_cast<char*>(static_cast<void*>(inputMemory->getPointer()));
233 if (inputPtr == nullptr) {
234 return {};
235 }
236
237 // initialize pool
238 inputMemory->update();
239 for_all(inputs, [&inputs_info, inputPtr](int index, auto p, auto s) {
240 char* begin = (char*)p;
241 char* end = begin + s;
242 // TODO: handle more than one input
243 std::copy(begin, end, inputPtr + inputs_info[index].location.offset);
244 });
245 inputMemory->commit();
246
247 requests.push_back({.inputs = inputs_info, .outputs = outputs_info, .pools = pools});
248 }
249
250 return requests;
251}
252
253void ValidationTest::validateRequests(const Model& model, const std::vector<Request>& requests) {
254 // create IPreparedModel
255 sp<IPreparedModel> preparedModel;
256 ASSERT_NO_FATAL_FAILURE(createPreparedModel(device, model, &preparedModel));
257 if (preparedModel == nullptr) {
258 return;
259 }
260
261 // validate each request
262 for (const Request& request : requests) {
263 removeInputTest(preparedModel, request);
264 removeOutputTest(preparedModel, request);
265 }
266}
267
268} // namespace functional
269} // namespace vts
270} // namespace V1_2
271} // namespace neuralnetworks
272} // namespace hardware
273} // namespace android