blob: 7fee16b5f2f3582142fb5cdca7cc2555d97950da [file] [log] [blame]
Michael Butlerb98aa6d2020-02-22 22:37:59 -08001/*
2 * Copyright (C) 2020 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "Conversions.h"
18
19#include <android-base/logging.h>
20#include <android/hardware/neuralnetworks/1.0/types.h>
21#include <android/hardware/neuralnetworks/1.1/types.h>
22#include <nnapi/OperandTypes.h>
23#include <nnapi/OperationTypes.h>
24#include <nnapi/Result.h>
25#include <nnapi/SharedMemory.h>
26#include <nnapi/Types.h>
27#include <nnapi/hal/1.0/Conversions.h>
28#include <nnapi/hal/CommonUtils.h>
29
30#include <algorithm>
31#include <functional>
32#include <iterator>
33#include <type_traits>
34#include <utility>
35
36namespace android::nn {
37namespace {
38
39using hardware::hidl_vec;
40
41template <typename Input>
42using convertOutput = std::decay_t<decltype(convert(std::declval<Input>()).value())>;
43
44template <typename Type>
45Result<std::vector<convertOutput<Type>>> convert(const hidl_vec<Type>& arguments) {
46 std::vector<convertOutput<Type>> canonical;
47 canonical.reserve(arguments.size());
48 for (const auto& argument : arguments) {
49 canonical.push_back(NN_TRY(nn::convert(argument)));
50 }
51 return canonical;
52}
53
54} // anonymous namespace
55
56Result<OperationType> convert(const hal::V1_1::OperationType& operationType) {
57 return static_cast<OperationType>(operationType);
58}
59
60Result<Capabilities> convert(const hal::V1_1::Capabilities& capabilities) {
61 const auto quantized8Performance = NN_TRY(convert(capabilities.quantized8Performance));
62 const auto float32Performance = NN_TRY(convert(capabilities.float32Performance));
63 const auto relaxedFloat32toFloat16Performance =
64 NN_TRY(convert(capabilities.relaxedFloat32toFloat16Performance));
65
66 auto table = hal::utils::makeQuantized8PerformanceConsistentWithP(float32Performance,
67 quantized8Performance);
68
69 return Capabilities{
70 .relaxedFloat32toFloat16PerformanceScalar = relaxedFloat32toFloat16Performance,
71 .relaxedFloat32toFloat16PerformanceTensor = relaxedFloat32toFloat16Performance,
72 .operandPerformance = std::move(table),
73 };
74}
75
76Result<Operation> convert(const hal::V1_1::Operation& operation) {
77 return Operation{
78 .type = NN_TRY(convert(operation.type)),
79 .inputs = operation.inputs,
80 .outputs = operation.outputs,
81 };
82}
83
84Result<Model> convert(const hal::V1_1::Model& model) {
85 auto operations = NN_TRY(convert(model.operations));
86
87 // Verify number of consumers.
88 const auto numberOfConsumers =
89 hal::utils::countNumberOfConsumers(model.operands.size(), operations);
90 CHECK(model.operands.size() == numberOfConsumers.size());
91 for (size_t i = 0; i < model.operands.size(); ++i) {
92 if (model.operands[i].numberOfConsumers != numberOfConsumers[i]) {
93 return NN_ERROR() << "Invalid numberOfConsumers for operand " << i << ", expected "
94 << numberOfConsumers[i] << " but found "
95 << model.operands[i].numberOfConsumers;
96 }
97 }
98
99 auto main = Model::Subgraph{
100 .operands = NN_TRY(convert(model.operands)),
101 .operations = std::move(operations),
102 .inputIndexes = model.inputIndexes,
103 .outputIndexes = model.outputIndexes,
104 };
105
106 return Model{
107 .main = std::move(main),
108 .operandValues = NN_TRY(convert(model.operandValues)),
109 .pools = NN_TRY(convert(model.pools)),
110 .relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16,
111 };
112}
113
114Result<ExecutionPreference> convert(const hal::V1_1::ExecutionPreference& executionPreference) {
115 return static_cast<ExecutionPreference>(executionPreference);
116}
117
118} // namespace android::nn
119
120namespace android::hardware::neuralnetworks::V1_1::utils {
121namespace {
122
123using utils::convert;
124
125nn::Result<V1_0::PerformanceInfo> convert(
126 const nn::Capabilities::PerformanceInfo& performanceInfo) {
127 return V1_0::utils::convert(performanceInfo);
128}
129
130nn::Result<V1_0::Operand> convert(const nn::Operand& operand) {
131 return V1_0::utils::convert(operand);
132}
133
134nn::Result<hidl_vec<uint8_t>> convert(const nn::Model::OperandValues& operandValues) {
135 return V1_0::utils::convert(operandValues);
136}
137
138nn::Result<hidl_memory> convert(const nn::Memory& memory) {
139 return V1_0::utils::convert(memory);
140}
141
142template <typename Input>
143using convertOutput = std::decay_t<decltype(convert(std::declval<Input>()).value())>;
144
145template <typename Type>
146nn::Result<hidl_vec<convertOutput<Type>>> convert(const std::vector<Type>& arguments) {
147 hidl_vec<convertOutput<Type>> halObject(arguments.size());
148 for (size_t i = 0; i < arguments.size(); ++i) {
149 halObject[i] = NN_TRY(convert(arguments[i]));
150 }
151 return halObject;
152}
153
154} // anonymous namespace
155
156nn::Result<OperationType> convert(const nn::OperationType& operationType) {
157 return static_cast<OperationType>(operationType);
158}
159
160nn::Result<Capabilities> convert(const nn::Capabilities& capabilities) {
161 return Capabilities{
162 .float32Performance = NN_TRY(convert(
163 capabilities.operandPerformance.lookup(nn::OperandType::TENSOR_FLOAT32))),
164 .quantized8Performance = NN_TRY(convert(
165 capabilities.operandPerformance.lookup(nn::OperandType::TENSOR_QUANT8_ASYMM))),
166 .relaxedFloat32toFloat16Performance =
167 NN_TRY(convert(capabilities.relaxedFloat32toFloat16PerformanceTensor)),
168 };
169}
170
171nn::Result<Operation> convert(const nn::Operation& operation) {
172 return Operation{
173 .type = NN_TRY(convert(operation.type)),
174 .inputs = operation.inputs,
175 .outputs = operation.outputs,
176 };
177}
178
179nn::Result<Model> convert(const nn::Model& model) {
180 if (!hal::utils::hasNoPointerData(model)) {
181 return NN_ERROR() << "Mdoel cannot be converted because it contains pointer-based memory";
182 }
183
184 auto operands = NN_TRY(convert(model.main.operands));
185
186 // Update number of consumers.
187 const auto numberOfConsumers =
188 hal::utils::countNumberOfConsumers(operands.size(), model.main.operations);
189 CHECK(operands.size() == numberOfConsumers.size());
190 for (size_t i = 0; i < operands.size(); ++i) {
191 operands[i].numberOfConsumers = numberOfConsumers[i];
192 }
193
194 return Model{
195 .operands = std::move(operands),
196 .operations = NN_TRY(convert(model.main.operations)),
197 .inputIndexes = model.main.inputIndexes,
198 .outputIndexes = model.main.outputIndexes,
199 .operandValues = NN_TRY(convert(model.operandValues)),
200 .pools = NN_TRY(convert(model.pools)),
201 .relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16,
202 };
203}
204
205nn::Result<ExecutionPreference> convert(const nn::ExecutionPreference& executionPreference) {
206 return static_cast<ExecutionPreference>(executionPreference);
207}
208
209} // namespace android::hardware::neuralnetworks::V1_1::utils