blob: 14a496a3035c36ecafd230859488fca513cbb7ea [file] [log] [blame]
Lev Proleevb38bb4f2020-12-15 19:25:32 +00001/*
2 * Copyright (C) 2021 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "Utils.h"
18
19#include <aidl/android/hardware/neuralnetworks/IPreparedModelParcel.h>
20#include <aidl/android/hardware/neuralnetworks/Operand.h>
21#include <aidl/android/hardware/neuralnetworks/OperandType.h>
22#include <android-base/logging.h>
23#include <android/binder_status.h>
24#include <android/hardware_buffer.h>
25
26#include <iostream>
27#include <limits>
28#include <numeric>
29
30#include <MemoryUtils.h>
31#include <nnapi/SharedMemory.h>
32#include <nnapi/hal/aidl/Conversions.h>
33#include <nnapi/hal/aidl/Utils.h>
34
35namespace aidl::android::hardware::neuralnetworks {
36
37using test_helper::TestBuffer;
38using test_helper::TestModel;
39
40uint32_t sizeOfData(OperandType type) {
41 switch (type) {
42 case OperandType::FLOAT32:
43 case OperandType::INT32:
44 case OperandType::UINT32:
45 case OperandType::TENSOR_FLOAT32:
46 case OperandType::TENSOR_INT32:
47 return 4;
48 case OperandType::TENSOR_QUANT16_SYMM:
49 case OperandType::TENSOR_FLOAT16:
50 case OperandType::FLOAT16:
51 case OperandType::TENSOR_QUANT16_ASYMM:
52 return 2;
53 case OperandType::TENSOR_QUANT8_ASYMM:
54 case OperandType::BOOL:
55 case OperandType::TENSOR_BOOL8:
56 case OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL:
57 case OperandType::TENSOR_QUANT8_SYMM:
58 case OperandType::TENSOR_QUANT8_ASYMM_SIGNED:
59 return 1;
60 case OperandType::SUBGRAPH:
61 return 0;
62 default:
63 CHECK(false) << "Invalid OperandType " << static_cast<uint32_t>(type);
64 return 0;
65 }
66}
67
68static bool isTensor(OperandType type) {
69 switch (type) {
70 case OperandType::FLOAT32:
71 case OperandType::INT32:
72 case OperandType::UINT32:
73 case OperandType::FLOAT16:
74 case OperandType::BOOL:
75 case OperandType::SUBGRAPH:
76 return false;
77 case OperandType::TENSOR_FLOAT32:
78 case OperandType::TENSOR_INT32:
79 case OperandType::TENSOR_QUANT16_SYMM:
80 case OperandType::TENSOR_FLOAT16:
81 case OperandType::TENSOR_QUANT16_ASYMM:
82 case OperandType::TENSOR_QUANT8_ASYMM:
83 case OperandType::TENSOR_BOOL8:
84 case OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL:
85 case OperandType::TENSOR_QUANT8_SYMM:
86 case OperandType::TENSOR_QUANT8_ASYMM_SIGNED:
87 return true;
88 default:
89 CHECK(false) << "Invalid OperandType " << static_cast<uint32_t>(type);
90 return false;
91 }
92}
93
94uint32_t sizeOfData(const Operand& operand) {
95 const uint32_t dataSize = sizeOfData(operand.type);
96 if (isTensor(operand.type) && operand.dimensions.size() == 0) return 0;
97 return std::accumulate(operand.dimensions.begin(), operand.dimensions.end(), dataSize,
98 std::multiplies<>{});
99}
100
101std::unique_ptr<TestAshmem> TestAshmem::create(uint32_t size) {
102 auto ashmem = std::make_unique<TestAshmem>(size);
103 return ashmem->mIsValid ? std::move(ashmem) : nullptr;
104}
105
106void TestAshmem::initialize(uint32_t size) {
107 mIsValid = false;
108 ASSERT_GT(size, 0);
109 const auto sharedMemory = nn::createSharedMemory(size).value();
110 mMappedMemory = nn::map(sharedMemory).value();
111 mPtr = static_cast<uint8_t*>(std::get<void*>(mMappedMemory.pointer));
112 CHECK_NE(mPtr, nullptr);
113 mAidlMemory = utils::convert(sharedMemory).value();
114 mIsValid = true;
115}
116
117std::unique_ptr<TestBlobAHWB> TestBlobAHWB::create(uint32_t size) {
118 auto ahwb = std::make_unique<TestBlobAHWB>(size);
119 return ahwb->mIsValid ? std::move(ahwb) : nullptr;
120}
121
122void TestBlobAHWB::initialize(uint32_t size) {
123 mIsValid = false;
124 ASSERT_GT(size, 0);
125 const auto usage = AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN | AHARDWAREBUFFER_USAGE_CPU_WRITE_OFTEN;
126 const AHardwareBuffer_Desc desc = {
127 .width = size,
128 .height = 1,
129 .layers = 1,
130 .format = AHARDWAREBUFFER_FORMAT_BLOB,
131 .usage = usage,
132 .stride = size,
133 };
134
135 ASSERT_EQ(AHardwareBuffer_allocate(&desc, &mAhwb), 0);
136 ASSERT_NE(mAhwb, nullptr);
137
138 const auto sharedMemory = nn::createSharedMemoryFromAHWB(*mAhwb).value();
139 mMapping = nn::map(sharedMemory).value();
140 mPtr = static_cast<uint8_t*>(std::get<void*>(mMapping.pointer));
141 CHECK_NE(mPtr, nullptr);
142 mAidlMemory = utils::convert(sharedMemory).value();
143
144 mIsValid = true;
145}
146
147TestBlobAHWB::~TestBlobAHWB() {
148 if (mAhwb) {
149 AHardwareBuffer_unlock(mAhwb, nullptr);
150 AHardwareBuffer_release(mAhwb);
151 }
152}
153
154std::string gtestCompliantName(std::string name) {
155 // gtest test names must only contain alphanumeric characters
156 std::replace_if(
157 name.begin(), name.end(), [](char c) { return !std::isalnum(c); }, '_');
158 return name;
159}
160
161::std::ostream& operator<<(::std::ostream& os, ErrorStatus errorStatus) {
162 return os << toString(errorStatus);
163}
164
165Request ExecutionContext::createRequest(const TestModel& testModel, MemoryType memoryType) {
166 CHECK(memoryType == MemoryType::ASHMEM || memoryType == MemoryType::BLOB_AHWB);
167
168 // Model inputs.
169 std::vector<RequestArgument> inputs(testModel.main.inputIndexes.size());
170 size_t inputSize = 0;
171 for (uint32_t i = 0; i < testModel.main.inputIndexes.size(); i++) {
172 const auto& op = testModel.main.operands[testModel.main.inputIndexes[i]];
173 if (op.data.size() == 0) {
174 // Omitted input.
175 inputs[i] = {.hasNoValue = true};
176 } else {
177 DataLocation loc = {.poolIndex = kInputPoolIndex,
178 .offset = static_cast<int64_t>(inputSize),
179 .length = static_cast<int64_t>(op.data.size())};
180 inputSize += op.data.alignedSize();
181 inputs[i] = {.hasNoValue = false, .location = loc, .dimensions = {}};
182 }
183 }
184
185 // Model outputs.
186 std::vector<RequestArgument> outputs(testModel.main.outputIndexes.size());
187 size_t outputSize = 0;
188 for (uint32_t i = 0; i < testModel.main.outputIndexes.size(); i++) {
189 const auto& op = testModel.main.operands[testModel.main.outputIndexes[i]];
190
191 // In the case of zero-sized output, we should at least provide a one-byte buffer.
192 // This is because zero-sized tensors are only supported internally to the driver, or
193 // reported in output shapes. It is illegal for the client to pre-specify a zero-sized
194 // tensor as model output. Otherwise, we will have two semantic conflicts:
195 // - "Zero dimension" conflicts with "unspecified dimension".
196 // - "Omitted operand buffer" conflicts with "zero-sized operand buffer".
197 size_t bufferSize = std::max<size_t>(op.data.size(), 1);
198
199 DataLocation loc = {.poolIndex = kOutputPoolIndex,
200 .offset = static_cast<int64_t>(outputSize),
201 .length = static_cast<int64_t>(bufferSize)};
202 outputSize += op.data.size() == 0 ? TestBuffer::kAlignment : op.data.alignedSize();
203 outputs[i] = {.hasNoValue = false, .location = loc, .dimensions = {}};
204 }
205
206 // Allocate memory pools.
207 if (memoryType == MemoryType::ASHMEM) {
208 mInputMemory = TestAshmem::create(inputSize);
209 mOutputMemory = TestAshmem::create(outputSize);
210 } else {
211 mInputMemory = TestBlobAHWB::create(inputSize);
212 mOutputMemory = TestBlobAHWB::create(outputSize);
213 }
214 CHECK_NE(mInputMemory, nullptr);
215 CHECK_NE(mOutputMemory, nullptr);
216
217 auto copiedInputMemory = utils::clone(*mInputMemory->getAidlMemory());
218 CHECK(copiedInputMemory.has_value()) << copiedInputMemory.error().message;
219 auto copiedOutputMemory = utils::clone(*mOutputMemory->getAidlMemory());
220 CHECK(copiedOutputMemory.has_value()) << copiedOutputMemory.error().message;
221
222 std::vector<RequestMemoryPool> pools;
223 pools.push_back(RequestMemoryPool::make<RequestMemoryPool::Tag::pool>(
224 std::move(copiedInputMemory).value()));
225 pools.push_back(RequestMemoryPool::make<RequestMemoryPool::Tag::pool>(
226 std::move(copiedOutputMemory).value()));
227
228 // Copy input data to the memory pool.
229 uint8_t* inputPtr = mInputMemory->getPointer();
230 for (uint32_t i = 0; i < testModel.main.inputIndexes.size(); i++) {
231 const auto& op = testModel.main.operands[testModel.main.inputIndexes[i]];
232 if (op.data.size() > 0) {
233 const uint8_t* begin = op.data.get<uint8_t>();
234 const uint8_t* end = begin + op.data.size();
235 std::copy(begin, end, inputPtr + inputs[i].location.offset);
236 }
237 }
238
239 return {.inputs = std::move(inputs), .outputs = std::move(outputs), .pools = std::move(pools)};
240}
241
242std::vector<TestBuffer> ExecutionContext::getOutputBuffers(const Request& request) const {
243 // Copy out output results.
244 uint8_t* outputPtr = mOutputMemory->getPointer();
245 std::vector<TestBuffer> outputBuffers;
246 for (const auto& output : request.outputs) {
247 outputBuffers.emplace_back(output.location.length, outputPtr + output.location.offset);
248 }
249 return outputBuffers;
250}
251
252} // namespace aidl::android::hardware::neuralnetworks