blob: 9802f6213183851d9b3b08d3ebda5fe1f1fe154e [file] [log] [blame]
Michael Butler2f499a92017-09-19 19:59:45 -07001/*
2 * Copyright (C) 2017 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#define LOG_TAG "neuralnetworks_hidl_hal_test"
18
19#include "Models.h"
20#include <android/hidl/memory/1.0/IMemory.h>
21#include <hidlmemory/mapping.h>
22#include <vector>
23
24namespace android {
25namespace hardware {
26namespace neuralnetworks {
27namespace V1_0 {
28namespace vts {
29namespace functional {
30
31// create a valid model
32Model createValidTestModel() {
33 const std::vector<float> operand2Data = {5.0f, 6.0f, 7.0f, 8.0f};
34 const uint32_t size = operand2Data.size() * sizeof(float);
35
36 const uint32_t operand1 = 0;
37 const uint32_t operand2 = 1;
38 const uint32_t operand3 = 2;
39 const uint32_t operand4 = 3;
40
41 const std::vector<Operand> operands = {
42 {
43 .type = OperandType::TENSOR_FLOAT32,
44 .dimensions = {1, 2, 2, 1},
45 .numberOfConsumers = 1,
46 .scale = 0.0f,
47 .zeroPoint = 0,
48 .lifetime = OperandLifeTime::MODEL_INPUT,
49 .location = {.poolIndex = 0, .offset = 0, .length = 0},
50 },
51 {
52 .type = OperandType::TENSOR_FLOAT32,
53 .dimensions = {1, 2, 2, 1},
54 .numberOfConsumers = 1,
55 .scale = 0.0f,
56 .zeroPoint = 0,
57 .lifetime = OperandLifeTime::CONSTANT_COPY,
58 .location = {.poolIndex = 0, .offset = 0, .length = size},
59 },
60 {
61 .type = OperandType::INT32,
62 .dimensions = {},
63 .numberOfConsumers = 1,
64 .scale = 0.0f,
65 .zeroPoint = 0,
66 .lifetime = OperandLifeTime::CONSTANT_COPY,
67 .location = {.poolIndex = 0, .offset = size, .length = sizeof(int32_t)},
68 },
69 {
70 .type = OperandType::TENSOR_FLOAT32,
71 .dimensions = {1, 2, 2, 1},
72 .numberOfConsumers = 0,
73 .scale = 0.0f,
74 .zeroPoint = 0,
75 .lifetime = OperandLifeTime::MODEL_OUTPUT,
76 .location = {.poolIndex = 0, .offset = 0, .length = 0},
77 },
78 };
79
80 const std::vector<Operation> operations = {{
81 .opTuple = {OperationType::ADD, OperandType::TENSOR_FLOAT32},
82 .inputs = {operand1, operand2, operand3},
83 .outputs = {operand4},
84 }};
85
86 const std::vector<uint32_t> inputIndexes = {operand1};
87 const std::vector<uint32_t> outputIndexes = {operand4};
88 std::vector<uint8_t> operandValues(
89 reinterpret_cast<const uint8_t*>(operand2Data.data()),
90 reinterpret_cast<const uint8_t*>(operand2Data.data()) + size);
91 int32_t activation[1] = {static_cast<int32_t>(FusedActivationFunc::NONE)};
92 operandValues.insert(operandValues.end(), reinterpret_cast<const uint8_t*>(&activation[0]),
93 reinterpret_cast<const uint8_t*>(&activation[1]));
94
95 const std::vector<hidl_memory> pools = {};
96
97 return {
98 .operands = operands,
99 .operations = operations,
100 .inputIndexes = inputIndexes,
101 .outputIndexes = outputIndexes,
102 .operandValues = operandValues,
103 .pools = pools,
104 };
105}
106
107// create first invalid model
108Model createInvalidTestModel1() {
109 Model model = createValidTestModel();
110 model.operations[0].opTuple = {static_cast<OperationType>(0xDEADBEEF) /* INVALID */,
111 OperandType::TENSOR_FLOAT32};
112 return model;
113}
114
115// create second invalid model
116Model createInvalidTestModel2() {
117 Model model = createValidTestModel();
118 const uint32_t operand1 = 0;
119 const uint32_t operand5 = 4; // INVALID OPERAND
120 model.inputIndexes = std::vector<uint32_t>({operand1, operand5 /* INVALID OPERAND */});
121 return model;
122}
123
124// allocator helper
125hidl_memory allocateSharedMemory(int64_t size, const std::string& type = "ashmem") {
126 hidl_memory memory;
127
128 sp<IAllocator> allocator = IAllocator::getService(type);
129 if (!allocator.get()) {
130 return {};
131 }
132
133 Return<void> ret = allocator->allocate(size, [&](bool success, const hidl_memory& mem) {
134 ASSERT_TRUE(success);
135 memory = mem;
136 });
137 if (!ret.isOk()) {
138 return {};
139 }
140
141 return memory;
142}
143
144// create a valid request
145Request createValidTestRequest() {
146 std::vector<float> inputData = {1.0f, 2.0f, 3.0f, 4.0f};
147 std::vector<float> outputData = {-1.0f, -1.0f, -1.0f, -1.0f};
148 const uint32_t INPUT = 0;
149 const uint32_t OUTPUT = 1;
150
151 // prepare inputs
152 uint32_t inputSize = static_cast<uint32_t>(inputData.size() * sizeof(float));
153 uint32_t outputSize = static_cast<uint32_t>(outputData.size() * sizeof(float));
154 std::vector<RequestArgument> inputs = {{
155 .location = {.poolIndex = INPUT, .offset = 0, .length = inputSize}, .dimensions = {},
156 }};
157 std::vector<RequestArgument> outputs = {{
158 .location = {.poolIndex = OUTPUT, .offset = 0, .length = outputSize}, .dimensions = {},
159 }};
160 std::vector<hidl_memory> pools = {allocateSharedMemory(inputSize),
161 allocateSharedMemory(outputSize)};
162 if (pools[INPUT].size() == 0 || pools[OUTPUT].size() == 0) {
163 return {};
164 }
165
166 // load data
167 sp<IMemory> inputMemory = mapMemory(pools[INPUT]);
168 sp<IMemory> outputMemory = mapMemory(pools[OUTPUT]);
169 if (inputMemory.get() == nullptr || outputMemory.get() == nullptr) {
170 return {};
171 }
172 float* inputPtr = reinterpret_cast<float*>(static_cast<void*>(inputMemory->getPointer()));
173 float* outputPtr = reinterpret_cast<float*>(static_cast<void*>(outputMemory->getPointer()));
174 if (inputPtr == nullptr || outputPtr == nullptr) {
175 return {};
176 }
177 inputMemory->update();
178 outputMemory->update();
179 std::copy(inputData.begin(), inputData.end(), inputPtr);
180 std::copy(outputData.begin(), outputData.end(), outputPtr);
181 inputMemory->commit();
182 outputMemory->commit();
183
184 return {.inputs = inputs, .outputs = outputs, .pools = pools};
185}
186
187// create first invalid request
188Request createInvalidTestRequest1() {
189 Request request = createValidTestRequest();
190 const uint32_t INVALID = 2;
191 std::vector<float> inputData = {1.0f, 2.0f, 3.0f, 4.0f};
192 uint32_t inputSize = static_cast<uint32_t>(inputData.size() * sizeof(float));
193 request.inputs[0].location = {
194 .poolIndex = INVALID /* INVALID */, .offset = 0, .length = inputSize};
195 return request;
196}
197
198// create second invalid request
199Request createInvalidTestRequest2() {
200 Request request = createValidTestRequest();
201 request.inputs[0].dimensions = std::vector<uint32_t>({1, 2, 3, 4, 5, 6, 7, 8} /* INVALID */);
202 return request;
203}
204
205} // namespace functional
206} // namespace vts
207} // namespace V1_0
208} // namespace neuralnetworks
209} // namespace hardware
210} // namespace android