blob: 9c85c8c2cc433c7f8c8e46ab93c7486bbdf153f5 [file] [log] [blame]
I-Jui (Ray) Sung2c4e1362017-09-06 02:15:54 -07001/*
2 * Copyright (C) 2017 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "Event.h"
18#include "TestHarness.h"
19#include "VtsHalNeuralnetworksV1_0TargetTest.h"
20
21#include <android-base/logging.h>
22#include <android/hidl/memory/1.0/IMemory.h>
23#include <hidlmemory/mapping.h>
24
25namespace android {
26namespace hardware {
27namespace neuralnetworks {
28namespace V1_0 {
29namespace vts {
30namespace functional {
31// allocator helper
32hidl_memory allocateSharedMemory(int64_t size, const std::string& type = "ashmem");
33
34namespace generated_tests {
35using ::android::hardware::neuralnetworks::V1_0::implementation::Event;
36using ::generated_tests::for_all;
37using ::generated_tests::for_each;
38using ::generated_tests::resize_accordingly;
39using ::generated_tests::MixedTyped;
40using ::generated_tests::MixedTypedExampleType;
41using ::generated_tests::Float32Operands;
42using ::generated_tests::Int32Operands;
43using ::generated_tests::Quant8Operands;
44// Top level driver for models and examples generated by test_generator.py
45// Test driver for those generated from ml/nn/runtime/test/spec
46void Execute(const sp<IDevice>& device, std::function<Model(void)> create_model,
47 const std::vector<MixedTypedExampleType>& examples) {
48 Model model = create_model();
49 sp<IPreparedModel> preparedModel;
50 sp<Event> preparationEvent = new Event();
51 ASSERT_NE(nullptr, preparationEvent.get());
52 Return<void> prepareRet = device->prepareModel(
53 model, preparationEvent, [&](ErrorStatus status, const sp<IPreparedModel>& prepared) {
54 EXPECT_EQ(ErrorStatus::NONE, status);
55 preparedModel = prepared;
56 });
57 ASSERT_TRUE(prepareRet.isOk());
58 ASSERT_NE(nullptr, preparedModel.get());
59 Event::Status preparationStatus = preparationEvent->wait();
60 EXPECT_EQ(Event::Status::SUCCESS, preparationStatus);
61
62 const uint32_t INPUT = 0;
63 const uint32_t OUTPUT = 1;
64
65 int example_no = 1;
66 for (auto& example : examples) {
67 SCOPED_TRACE(example_no++);
68
69 const MixedTyped& inputs = example.first;
70 const MixedTyped& golden = example.second;
71
72 std::vector<RequestArgument> inputs_info, outputs_info;
73 uint32_t inputSize = 0, outputSize = 0;
74
75 // This function only partially specifies the metadata (vector of RequestArguments).
76 // The contents are copied over below.
77 for_all(inputs, [&inputs_info, &inputSize](int index, auto, auto s) {
78 if (inputs_info.size() <= static_cast<size_t>(index)) inputs_info.resize(index + 1);
79 RequestArgument arg = {
80 .location = {.poolIndex = INPUT, .offset = 0, .length = static_cast<uint32_t>(s)},
81 .dimensions = {},
82 };
83 inputs_info[index] = arg;
84 inputSize += s;
85 });
86 // Compute offset for inputs 1 and so on
87 {
88 size_t offset = 0;
89 for (auto& i : inputs_info) {
90 i.location.offset = offset;
91 offset += i.location.length;
92 }
93 }
94
95 MixedTyped test; // holding test results
96
97 // Go through all outputs, initialize RequestArgument descriptors
98 resize_accordingly<float>(golden, test);
99 resize_accordingly<int32_t>(golden, test);
100 resize_accordingly<uint8_t>(golden, test);
101 for_all(golden, [&outputs_info, &outputSize](int index, auto, auto s) {
102 if (outputs_info.size() <= static_cast<size_t>(index)) outputs_info.resize(index + 1);
103 RequestArgument arg = {
104 .location = {.poolIndex = OUTPUT, .offset = 0, .length = static_cast<uint32_t>(s)},
105 .dimensions = {},
106 };
107 outputs_info[index] = arg;
108 outputSize += s;
109 });
110 // Compute offset for outputs 1 and so on
111 {
112 size_t offset = 0;
113 for (auto& i : outputs_info) {
114 i.location.offset = offset;
115 offset += i.location.length;
116 }
117 }
118 std::vector<hidl_memory> pools = {allocateSharedMemory(inputSize),
119 allocateSharedMemory(outputSize)};
120 ASSERT_NE(0ull, pools[INPUT].size());
121 ASSERT_NE(0ull, pools[OUTPUT].size());
122
123 // load data
124 sp<IMemory> inputMemory = mapMemory(pools[INPUT]);
125 sp<IMemory> outputMemory = mapMemory(pools[OUTPUT]);
126 ASSERT_NE(nullptr, inputMemory.get());
127 ASSERT_NE(nullptr, outputMemory.get());
128 char* inputPtr = reinterpret_cast<char*>(static_cast<void*>(inputMemory->getPointer()));
129 char* outputPtr = reinterpret_cast<char*>(static_cast<void*>(outputMemory->getPointer()));
130 ASSERT_NE(nullptr, inputPtr);
131 ASSERT_NE(nullptr, outputPtr);
132 inputMemory->update();
133 outputMemory->update();
134
135 // Go through all inputs, copy the values
136 for_all(inputs, [&inputs_info, inputPtr](int index, auto p, auto s) {
137 char* begin = (char*)p;
138 char* end = begin + s;
139 // TODO: handle more than one input
140 std::copy(begin, end, inputPtr + inputs_info[index].location.offset);
141 });
142
143 inputMemory->commit();
144 outputMemory->commit();
145 // execute request
146 sp<Event> executionEvent = new Event();
147 ASSERT_NE(nullptr, executionEvent.get());
148 Return<ErrorStatus> executeStatus = preparedModel->execute(
149 {.inputs = inputs_info, .outputs = outputs_info, .pools = pools}, executionEvent);
150 ASSERT_TRUE(executeStatus.isOk());
151 EXPECT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(executeStatus));
152 Event::Status eventStatus = executionEvent->wait();
153 EXPECT_EQ(Event::Status::SUCCESS, eventStatus);
154
155 // validate results
156 outputMemory->read();
157#define COPY_BACK(ty) \
158 for_each<ty>(test, [&outputs_info, outputPtr](int index, std::vector<ty>& m) { \
159 RequestArgument& i = outputs_info[index]; \
160 ASSERT_EQ(m.size(), i.location.length / sizeof(ty)); \
161 char* begin = outputPtr + i.location.offset; \
162 memcpy(m.data(), begin, i.location.length); \
163 });
164 COPY_BACK(float);
165 COPY_BACK(int32_t);
166 COPY_BACK(uint8_t);
167#undef COPY_BACK
168 outputMemory->commit();
169 // We want "close-enough" results for float
170 for_each<float>(golden, [&test](int index, auto& golden_float) {
171 auto& test_float_operands = std::get<Float32Operands>(test);
172 auto& test_float = test_float_operands[index];
173 for (unsigned int i = 0; i < golden_float.size(); i++) {
174 SCOPED_TRACE(i);
I-Jui (Ray) Sung773369a2017-09-13 16:47:50 -0700175 EXPECT_NEAR(golden_float[i], test_float[i], 1.e-5);
I-Jui (Ray) Sung2c4e1362017-09-06 02:15:54 -0700176 }
177 });
178
179 EXPECT_EQ(std::get<Int32Operands>(golden), std::get<Int32Operands>(test));
180 EXPECT_EQ(std::get<Quant8Operands>(golden), std::get<Quant8Operands>(test));
181 }
182}
183
184} // namespace generated_tests
185
186} // namespace functional
187} // namespace vts
188} // namespace V1_0
189} // namespace neuralnetworks
190} // namespace hardware
191} // namespace android