blob: bd966144f6a017d92a1cef1d316d88603985961f [file] [log] [blame]
Michael Butlerf76acd02018-03-22 16:37:57 -07001/*
2 * Copyright (C) 2018 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#define LOG_TAG "neuralnetworks_hidl_hal_test"
18
19#include "VtsHalNeuralnetworks.h"
20
21#include "Callbacks.h"
22#include "TestHarness.h"
23#include "Utils.h"
24
25#include <android-base/logging.h>
26#include <android/hidl/memory/1.0/IMemory.h>
27#include <hidlmemory/mapping.h>
28
29namespace android {
30namespace hardware {
31namespace neuralnetworks {
32namespace V1_1 {
33namespace vts {
34namespace functional {
35
36using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
37using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
38using ::android::hidl::memory::V1_0::IMemory;
39using generated_tests::MixedTyped;
40using generated_tests::MixedTypedExampleType;
41using generated_tests::for_all;
42
43///////////////////////// UTILITY FUNCTIONS /////////////////////////
44
45static void createPreparedModel(const sp<IDevice>& device, const V1_1::Model& model,
46 sp<IPreparedModel>* preparedModel) {
47 ASSERT_NE(nullptr, preparedModel);
48
49 // see if service can handle model
50 bool fullySupportsModel = false;
51 Return<void> supportedOpsLaunchStatus = device->getSupportedOperations_1_1(
52 model, [&fullySupportsModel](ErrorStatus status, const hidl_vec<bool>& supported) {
53 ASSERT_EQ(ErrorStatus::NONE, status);
54 ASSERT_NE(0ul, supported.size());
55 fullySupportsModel =
56 std::all_of(supported.begin(), supported.end(), [](bool valid) { return valid; });
57 });
58 ASSERT_TRUE(supportedOpsLaunchStatus.isOk());
59
60 // launch prepare model
61 sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
62 ASSERT_NE(nullptr, preparedModelCallback.get());
63 Return<ErrorStatus> prepareLaunchStatus =
64 device->prepareModel_1_1(model, preparedModelCallback);
65 ASSERT_TRUE(prepareLaunchStatus.isOk());
66 ASSERT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus));
67
68 // retrieve prepared model
69 preparedModelCallback->wait();
70 ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
71 *preparedModel = preparedModelCallback->getPreparedModel();
72
73 // The getSupportedOperations_1_1 call returns a list of operations that are
74 // guaranteed not to fail if prepareModel_1_1 is called, and
75 // 'fullySupportsModel' is true i.f.f. the entire model is guaranteed.
76 // If a driver has any doubt that it can prepare an operation, it must
77 // return false. So here, if a driver isn't sure if it can support an
78 // operation, but reports that it successfully prepared the model, the test
79 // can continue.
80 if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
81 ASSERT_EQ(nullptr, preparedModel->get());
82 LOG(INFO) << "NN VTS: Unable to test Request validation because vendor service cannot "
83 "prepare model that it does not support.";
84 std::cout << "[ ] Unable to test Request validation because vendor service "
85 "cannot prepare model that it does not support."
86 << std::endl;
87 return;
88 }
89 ASSERT_EQ(ErrorStatus::NONE, prepareReturnStatus);
90 ASSERT_NE(nullptr, preparedModel->get());
91}
92
93// Primary validation function. This function will take a valid request, apply a
94// mutation to it to invalidate the request, then pass it to interface calls
95// that use the request. Note that the request here is passed by value, and any
96// mutation to the request does not leave this function.
97static void validate(const sp<IPreparedModel>& preparedModel, const std::string& message,
98 Request request, const std::function<void(Request*)>& mutation) {
99 mutation(&request);
100 SCOPED_TRACE(message + " [execute]");
101
102 sp<ExecutionCallback> executionCallback = new ExecutionCallback();
103 ASSERT_NE(nullptr, executionCallback.get());
104 Return<ErrorStatus> executeLaunchStatus = preparedModel->execute(request, executionCallback);
105 ASSERT_TRUE(executeLaunchStatus.isOk());
106 ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(executeLaunchStatus));
107
108 executionCallback->wait();
109 ErrorStatus executionReturnStatus = executionCallback->getStatus();
110 ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, executionReturnStatus);
111}
112
113// Delete element from hidl_vec. hidl_vec doesn't support a "remove" operation,
114// so this is efficiently accomplished by moving the element to the end and
115// resizing the hidl_vec to one less.
116template <typename Type>
117static void hidl_vec_removeAt(hidl_vec<Type>* vec, uint32_t index) {
118 if (vec) {
119 std::rotate(vec->begin() + index, vec->begin() + index + 1, vec->end());
120 vec->resize(vec->size() - 1);
121 }
122}
123
124template <typename Type>
125static uint32_t hidl_vec_push_back(hidl_vec<Type>* vec, const Type& value) {
126 // assume vec is valid
127 const uint32_t index = vec->size();
128 vec->resize(index + 1);
129 (*vec)[index] = value;
130 return index;
131}
132
133///////////////////////// REMOVE INPUT ////////////////////////////////////
134
135static void removeInputTest(const sp<IPreparedModel>& preparedModel, const Request& request) {
136 for (size_t input = 0; input < request.inputs.size(); ++input) {
137 const std::string message = "removeInput: removed input " + std::to_string(input);
138 validate(preparedModel, message, request,
139 [input](Request* request) { hidl_vec_removeAt(&request->inputs, input); });
140 }
141}
142
143///////////////////////// REMOVE OUTPUT ////////////////////////////////////
144
145static void removeOutputTest(const sp<IPreparedModel>& preparedModel, const Request& request) {
146 for (size_t output = 0; output < request.outputs.size(); ++output) {
147 const std::string message = "removeOutput: removed Output " + std::to_string(output);
148 validate(preparedModel, message, request,
149 [output](Request* request) { hidl_vec_removeAt(&request->outputs, output); });
150 }
151}
152
153///////////////////////////// ENTRY POINT //////////////////////////////////
154
155std::vector<Request> createRequests(const std::vector<MixedTypedExampleType>& examples) {
156 const uint32_t INPUT = 0;
157 const uint32_t OUTPUT = 1;
158
159 std::vector<Request> requests;
160
161 for (auto& example : examples) {
162 const MixedTyped& inputs = example.first;
163 const MixedTyped& outputs = example.second;
164
165 std::vector<RequestArgument> inputs_info, outputs_info;
166 uint32_t inputSize = 0, outputSize = 0;
167
168 // This function only partially specifies the metadata (vector of RequestArguments).
169 // The contents are copied over below.
170 for_all(inputs, [&inputs_info, &inputSize](int index, auto, auto s) {
171 if (inputs_info.size() <= static_cast<size_t>(index)) inputs_info.resize(index + 1);
172 RequestArgument arg = {
173 .location = {.poolIndex = INPUT, .offset = 0, .length = static_cast<uint32_t>(s)},
174 .dimensions = {},
175 };
176 RequestArgument arg_empty = {
177 .hasNoValue = true,
178 };
179 inputs_info[index] = s ? arg : arg_empty;
180 inputSize += s;
181 });
182 // Compute offset for inputs 1 and so on
183 {
184 size_t offset = 0;
185 for (auto& i : inputs_info) {
186 if (!i.hasNoValue) i.location.offset = offset;
187 offset += i.location.length;
188 }
189 }
190
191 // Go through all outputs, initialize RequestArgument descriptors
192 for_all(outputs, [&outputs_info, &outputSize](int index, auto, auto s) {
193 if (outputs_info.size() <= static_cast<size_t>(index)) outputs_info.resize(index + 1);
194 RequestArgument arg = {
195 .location = {.poolIndex = OUTPUT, .offset = 0, .length = static_cast<uint32_t>(s)},
196 .dimensions = {},
197 };
198 outputs_info[index] = arg;
199 outputSize += s;
200 });
201 // Compute offset for outputs 1 and so on
202 {
203 size_t offset = 0;
204 for (auto& i : outputs_info) {
205 i.location.offset = offset;
206 offset += i.location.length;
207 }
208 }
209 std::vector<hidl_memory> pools = {nn::allocateSharedMemory(inputSize),
210 nn::allocateSharedMemory(outputSize)};
211 if (pools[INPUT].size() == 0 || pools[OUTPUT].size() == 0) {
212 return {};
213 }
214
215 // map pool
216 sp<IMemory> inputMemory = mapMemory(pools[INPUT]);
217 if (inputMemory == nullptr) {
218 return {};
219 }
220 char* inputPtr = reinterpret_cast<char*>(static_cast<void*>(inputMemory->getPointer()));
221 if (inputPtr == nullptr) {
222 return {};
223 }
224
225 // initialize pool
226 inputMemory->update();
227 for_all(inputs, [&inputs_info, inputPtr](int index, auto p, auto s) {
228 char* begin = (char*)p;
229 char* end = begin + s;
230 // TODO: handle more than one input
231 std::copy(begin, end, inputPtr + inputs_info[index].location.offset);
232 });
233 inputMemory->commit();
234
235 requests.push_back({.inputs = inputs_info, .outputs = outputs_info, .pools = pools});
236 }
237
238 return requests;
239}
240
241void ValidationTest::validateRequests(const V1_1::Model& model,
242 const std::vector<Request>& requests) {
243 // create IPreparedModel
244 sp<IPreparedModel> preparedModel;
245 ASSERT_NO_FATAL_FAILURE(createPreparedModel(device, model, &preparedModel));
246 if (preparedModel == nullptr) {
247 return;
248 }
249
250 // validate each request
251 for (const Request& request : requests) {
252 removeInputTest(preparedModel, request);
253 removeOutputTest(preparedModel, request);
254 }
255}
256
257} // namespace functional
258} // namespace vts
259} // namespace V1_1
260} // namespace neuralnetworks
261} // namespace hardware
262} // namespace android