blob: 0e93b02a1e1b0355ec9a888a608965081baad314 [file] [log] [blame]
Lev Proleev6b6dfcd2020-11-11 18:28:50 +00001/*
2 * Copyright (C) 2021 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "Conversions.h"
18
19#include <aidl/android/hardware/common/NativeHandle.h>
20#include <android-base/logging.h>
21#include <nnapi/OperandTypes.h>
22#include <nnapi/OperationTypes.h>
23#include <nnapi/Result.h>
24#include <nnapi/SharedMemory.h>
25#include <nnapi/TypeUtils.h>
26#include <nnapi/Types.h>
27#include <nnapi/Validation.h>
28#include <nnapi/hal/CommonUtils.h>
29#include <nnapi/hal/HandleError.h>
30
31#include <algorithm>
32#include <chrono>
33#include <functional>
34#include <iterator>
35#include <limits>
36#include <type_traits>
37#include <utility>
38
39#define VERIFY_NON_NEGATIVE(value) \
40 while (UNLIKELY(value < 0)) return NN_ERROR()
41
42namespace {
43
44template <typename Type>
45constexpr std::underlying_type_t<Type> underlyingType(Type value) {
46 return static_cast<std::underlying_type_t<Type>>(value);
47}
48
49constexpr auto kVersion = android::nn::Version::ANDROID_S;
50
51} // namespace
52
53namespace android::nn {
54namespace {
55
56constexpr auto validOperandType(nn::OperandType operandType) {
57 switch (operandType) {
58 case nn::OperandType::FLOAT32:
59 case nn::OperandType::INT32:
60 case nn::OperandType::UINT32:
61 case nn::OperandType::TENSOR_FLOAT32:
62 case nn::OperandType::TENSOR_INT32:
63 case nn::OperandType::TENSOR_QUANT8_ASYMM:
64 case nn::OperandType::BOOL:
65 case nn::OperandType::TENSOR_QUANT16_SYMM:
66 case nn::OperandType::TENSOR_FLOAT16:
67 case nn::OperandType::TENSOR_BOOL8:
68 case nn::OperandType::FLOAT16:
69 case nn::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL:
70 case nn::OperandType::TENSOR_QUANT16_ASYMM:
71 case nn::OperandType::TENSOR_QUANT8_SYMM:
72 case nn::OperandType::TENSOR_QUANT8_ASYMM_SIGNED:
73 case nn::OperandType::SUBGRAPH:
74 return true;
75 case nn::OperandType::OEM:
76 case nn::OperandType::TENSOR_OEM_BYTE:
77 return false;
78 }
79 return nn::isExtension(operandType);
80}
81
82template <typename Input>
83using UnvalidatedConvertOutput =
84 std::decay_t<decltype(unvalidatedConvert(std::declval<Input>()).value())>;
85
86template <typename Type>
87GeneralResult<std::vector<UnvalidatedConvertOutput<Type>>> unvalidatedConvertVec(
88 const std::vector<Type>& arguments) {
89 std::vector<UnvalidatedConvertOutput<Type>> canonical;
90 canonical.reserve(arguments.size());
91 for (const auto& argument : arguments) {
92 canonical.push_back(NN_TRY(nn::unvalidatedConvert(argument)));
93 }
94 return canonical;
95}
96
97template <typename Type>
98GeneralResult<std::vector<UnvalidatedConvertOutput<Type>>> unvalidatedConvert(
99 const std::vector<Type>& arguments) {
100 return unvalidatedConvertVec(arguments);
101}
102
103template <typename Type>
104GeneralResult<UnvalidatedConvertOutput<Type>> validatedConvert(const Type& halObject) {
105 auto canonical = NN_TRY(nn::unvalidatedConvert(halObject));
106 const auto maybeVersion = validate(canonical);
107 if (!maybeVersion.has_value()) {
108 return error() << maybeVersion.error();
109 }
110 const auto version = maybeVersion.value();
111 if (version > kVersion) {
112 return NN_ERROR() << "Insufficient version: " << version << " vs required " << kVersion;
113 }
114 return canonical;
115}
116
117template <typename Type>
118GeneralResult<std::vector<UnvalidatedConvertOutput<Type>>> validatedConvert(
119 const std::vector<Type>& arguments) {
120 std::vector<UnvalidatedConvertOutput<Type>> canonical;
121 canonical.reserve(arguments.size());
122 for (const auto& argument : arguments) {
123 canonical.push_back(NN_TRY(validatedConvert(argument)));
124 }
125 return canonical;
126}
127
128} // anonymous namespace
129
130GeneralResult<OperandType> unvalidatedConvert(const aidl_hal::OperandType& operandType) {
131 VERIFY_NON_NEGATIVE(underlyingType(operandType)) << "Negative operand types are not allowed.";
132 return static_cast<OperandType>(operandType);
133}
134
135GeneralResult<OperationType> unvalidatedConvert(const aidl_hal::OperationType& operationType) {
136 VERIFY_NON_NEGATIVE(underlyingType(operationType))
137 << "Negative operation types are not allowed.";
138 return static_cast<OperationType>(operationType);
139}
140
141GeneralResult<DeviceType> unvalidatedConvert(const aidl_hal::DeviceType& deviceType) {
142 return static_cast<DeviceType>(deviceType);
143}
144
145GeneralResult<Priority> unvalidatedConvert(const aidl_hal::Priority& priority) {
146 return static_cast<Priority>(priority);
147}
148
149GeneralResult<Capabilities> unvalidatedConvert(const aidl_hal::Capabilities& capabilities) {
150 const bool validOperandTypes = std::all_of(
151 capabilities.operandPerformance.begin(), capabilities.operandPerformance.end(),
152 [](const aidl_hal::OperandPerformance& operandPerformance) {
153 const auto maybeType = unvalidatedConvert(operandPerformance.type);
154 return !maybeType.has_value() ? false : validOperandType(maybeType.value());
155 });
156 if (!validOperandTypes) {
157 return NN_ERROR() << "Invalid OperandType when unvalidatedConverting OperandPerformance in "
158 "Capabilities";
159 }
160
161 auto operandPerformance = NN_TRY(unvalidatedConvert(capabilities.operandPerformance));
162 auto table = NN_TRY(hal::utils::makeGeneralFailure(
163 Capabilities::OperandPerformanceTable::create(std::move(operandPerformance)),
164 nn::ErrorStatus::GENERAL_FAILURE));
165
166 return Capabilities{
167 .relaxedFloat32toFloat16PerformanceScalar = NN_TRY(
168 unvalidatedConvert(capabilities.relaxedFloat32toFloat16PerformanceScalar)),
169 .relaxedFloat32toFloat16PerformanceTensor = NN_TRY(
170 unvalidatedConvert(capabilities.relaxedFloat32toFloat16PerformanceTensor)),
171 .operandPerformance = std::move(table),
172 .ifPerformance = NN_TRY(unvalidatedConvert(capabilities.ifPerformance)),
173 .whilePerformance = NN_TRY(unvalidatedConvert(capabilities.whilePerformance)),
174 };
175}
176
177GeneralResult<Capabilities::OperandPerformance> unvalidatedConvert(
178 const aidl_hal::OperandPerformance& operandPerformance) {
179 return Capabilities::OperandPerformance{
180 .type = NN_TRY(unvalidatedConvert(operandPerformance.type)),
181 .info = NN_TRY(unvalidatedConvert(operandPerformance.info)),
182 };
183}
184
185GeneralResult<Capabilities::PerformanceInfo> unvalidatedConvert(
186 const aidl_hal::PerformanceInfo& performanceInfo) {
187 return Capabilities::PerformanceInfo{
188 .execTime = performanceInfo.execTime,
189 .powerUsage = performanceInfo.powerUsage,
190 };
191}
192
193GeneralResult<DataLocation> unvalidatedConvert(const aidl_hal::DataLocation& location) {
194 VERIFY_NON_NEGATIVE(location.poolIndex) << "DataLocation: pool index must not be negative";
195 VERIFY_NON_NEGATIVE(location.offset) << "DataLocation: offset must not be negative";
196 VERIFY_NON_NEGATIVE(location.length) << "DataLocation: length must not be negative";
197 if (location.offset > std::numeric_limits<uint32_t>::max()) {
198 return NN_ERROR() << "DataLocation: offset must be <= std::numeric_limits<uint32_t>::max()";
199 }
200 if (location.length > std::numeric_limits<uint32_t>::max()) {
201 return NN_ERROR() << "DataLocation: length must be <= std::numeric_limits<uint32_t>::max()";
202 }
203 return DataLocation{
204 .poolIndex = static_cast<uint32_t>(location.poolIndex),
205 .offset = static_cast<uint32_t>(location.offset),
206 .length = static_cast<uint32_t>(location.length),
207 };
208}
209
210GeneralResult<Operation> unvalidatedConvert(const aidl_hal::Operation& operation) {
211 return Operation{
212 .type = NN_TRY(unvalidatedConvert(operation.type)),
213 .inputs = NN_TRY(toUnsigned(operation.inputs)),
214 .outputs = NN_TRY(toUnsigned(operation.outputs)),
215 };
216}
217
218GeneralResult<Operand::LifeTime> unvalidatedConvert(
219 const aidl_hal::OperandLifeTime& operandLifeTime) {
220 return static_cast<Operand::LifeTime>(operandLifeTime);
221}
222
223GeneralResult<Operand> unvalidatedConvert(const aidl_hal::Operand& operand) {
224 return Operand{
225 .type = NN_TRY(unvalidatedConvert(operand.type)),
226 .dimensions = NN_TRY(toUnsigned(operand.dimensions)),
227 .scale = operand.scale,
228 .zeroPoint = operand.zeroPoint,
229 .lifetime = NN_TRY(unvalidatedConvert(operand.lifetime)),
230 .location = NN_TRY(unvalidatedConvert(operand.location)),
231 .extraParams = NN_TRY(unvalidatedConvert(operand.extraParams)),
232 };
233}
234
235GeneralResult<Operand::ExtraParams> unvalidatedConvert(
236 const std::optional<aidl_hal::OperandExtraParams>& optionalExtraParams) {
237 if (!optionalExtraParams.has_value()) {
238 return Operand::NoParams{};
239 }
240 const auto& extraParams = optionalExtraParams.value();
241 using Tag = aidl_hal::OperandExtraParams::Tag;
242 switch (extraParams.getTag()) {
243 case Tag::channelQuant:
244 return unvalidatedConvert(extraParams.get<Tag::channelQuant>());
245 case Tag::extension:
246 return extraParams.get<Tag::extension>();
247 }
248 return NN_ERROR() << "Unrecognized Operand::ExtraParams tag: "
249 << underlyingType(extraParams.getTag());
250}
251
252GeneralResult<Operand::SymmPerChannelQuantParams> unvalidatedConvert(
253 const aidl_hal::SymmPerChannelQuantParams& symmPerChannelQuantParams) {
254 VERIFY_NON_NEGATIVE(symmPerChannelQuantParams.channelDim)
255 << "Per-channel quantization channel dimension must not be negative.";
256 return Operand::SymmPerChannelQuantParams{
257 .scales = symmPerChannelQuantParams.scales,
258 .channelDim = static_cast<uint32_t>(symmPerChannelQuantParams.channelDim),
259 };
260}
261
262GeneralResult<Model> unvalidatedConvert(const aidl_hal::Model& model) {
263 return Model{
264 .main = NN_TRY(unvalidatedConvert(model.main)),
265 .referenced = NN_TRY(unvalidatedConvert(model.referenced)),
266 .operandValues = NN_TRY(unvalidatedConvert(model.operandValues)),
267 .pools = NN_TRY(unvalidatedConvert(model.pools)),
268 .relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16,
269 .extensionNameToPrefix = NN_TRY(unvalidatedConvert(model.extensionNameToPrefix)),
270 };
271}
272
273GeneralResult<Model::Subgraph> unvalidatedConvert(const aidl_hal::Subgraph& subgraph) {
274 return Model::Subgraph{
275 .operands = NN_TRY(unvalidatedConvert(subgraph.operands)),
276 .operations = NN_TRY(unvalidatedConvert(subgraph.operations)),
277 .inputIndexes = NN_TRY(toUnsigned(subgraph.inputIndexes)),
278 .outputIndexes = NN_TRY(toUnsigned(subgraph.outputIndexes)),
279 };
280}
281
282GeneralResult<Model::ExtensionNameAndPrefix> unvalidatedConvert(
283 const aidl_hal::ExtensionNameAndPrefix& extensionNameAndPrefix) {
284 return Model::ExtensionNameAndPrefix{
285 .name = extensionNameAndPrefix.name,
286 .prefix = extensionNameAndPrefix.prefix,
287 };
288}
289
290GeneralResult<Extension> unvalidatedConvert(const aidl_hal::Extension& extension) {
291 return Extension{
292 .name = extension.name,
293 .operandTypes = NN_TRY(unvalidatedConvert(extension.operandTypes)),
294 };
295}
296
297GeneralResult<Extension::OperandTypeInformation> unvalidatedConvert(
298 const aidl_hal::ExtensionOperandTypeInformation& operandTypeInformation) {
299 VERIFY_NON_NEGATIVE(operandTypeInformation.byteSize)
300 << "Extension operand type byte size must not be negative";
301 return Extension::OperandTypeInformation{
302 .type = operandTypeInformation.type,
303 .isTensor = operandTypeInformation.isTensor,
304 .byteSize = static_cast<uint32_t>(operandTypeInformation.byteSize),
305 };
306}
307
308GeneralResult<OutputShape> unvalidatedConvert(const aidl_hal::OutputShape& outputShape) {
309 return OutputShape{
310 .dimensions = NN_TRY(toUnsigned(outputShape.dimensions)),
311 .isSufficient = outputShape.isSufficient,
312 };
313}
314
315GeneralResult<MeasureTiming> unvalidatedConvert(bool measureTiming) {
316 return measureTiming ? MeasureTiming::YES : MeasureTiming::NO;
317}
318
319GeneralResult<Memory> unvalidatedConvert(const aidl_hal::Memory& memory) {
320 VERIFY_NON_NEGATIVE(memory.size) << "Memory size must not be negative";
321 return Memory{
322 .handle = NN_TRY(unvalidatedConvert(memory.handle)),
323 .size = static_cast<uint32_t>(memory.size),
324 .name = memory.name,
325 };
326}
327
328GeneralResult<Model::OperandValues> unvalidatedConvert(const std::vector<uint8_t>& operandValues) {
329 return Model::OperandValues(operandValues.data(), operandValues.size());
330}
331
332GeneralResult<BufferDesc> unvalidatedConvert(const aidl_hal::BufferDesc& bufferDesc) {
333 return BufferDesc{.dimensions = NN_TRY(toUnsigned(bufferDesc.dimensions))};
334}
335
336GeneralResult<BufferRole> unvalidatedConvert(const aidl_hal::BufferRole& bufferRole) {
337 VERIFY_NON_NEGATIVE(bufferRole.modelIndex) << "BufferRole: modelIndex must not be negative";
338 VERIFY_NON_NEGATIVE(bufferRole.ioIndex) << "BufferRole: ioIndex must not be negative";
339 return BufferRole{
340 .modelIndex = static_cast<uint32_t>(bufferRole.modelIndex),
341 .ioIndex = static_cast<uint32_t>(bufferRole.ioIndex),
342 .frequency = bufferRole.frequency,
343 };
344}
345
346GeneralResult<Request> unvalidatedConvert(const aidl_hal::Request& request) {
347 return Request{
348 .inputs = NN_TRY(unvalidatedConvert(request.inputs)),
349 .outputs = NN_TRY(unvalidatedConvert(request.outputs)),
350 .pools = NN_TRY(unvalidatedConvert(request.pools)),
351 };
352}
353
354GeneralResult<Request::Argument> unvalidatedConvert(const aidl_hal::RequestArgument& argument) {
355 const auto lifetime = argument.hasNoValue ? Request::Argument::LifeTime::NO_VALUE
356 : Request::Argument::LifeTime::POOL;
357 return Request::Argument{
358 .lifetime = lifetime,
359 .location = NN_TRY(unvalidatedConvert(argument.location)),
360 .dimensions = NN_TRY(toUnsigned(argument.dimensions)),
361 };
362}
363
364GeneralResult<Request::MemoryPool> unvalidatedConvert(
365 const aidl_hal::RequestMemoryPool& memoryPool) {
366 using Tag = aidl_hal::RequestMemoryPool::Tag;
367 switch (memoryPool.getTag()) {
368 case Tag::pool:
369 return unvalidatedConvert(memoryPool.get<Tag::pool>());
370 case Tag::token: {
371 const auto token = memoryPool.get<Tag::token>();
372 VERIFY_NON_NEGATIVE(token) << "Memory pool token must not be negative";
373 return static_cast<Request::MemoryDomainToken>(token);
374 }
375 }
376 return NN_ERROR() << "Invalid Request::MemoryPool tag " << underlyingType(memoryPool.getTag());
377}
378
379GeneralResult<ErrorStatus> unvalidatedConvert(const aidl_hal::ErrorStatus& status) {
380 switch (status) {
381 case aidl_hal::ErrorStatus::NONE:
382 case aidl_hal::ErrorStatus::DEVICE_UNAVAILABLE:
383 case aidl_hal::ErrorStatus::GENERAL_FAILURE:
384 case aidl_hal::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE:
385 case aidl_hal::ErrorStatus::INVALID_ARGUMENT:
386 case aidl_hal::ErrorStatus::MISSED_DEADLINE_TRANSIENT:
387 case aidl_hal::ErrorStatus::MISSED_DEADLINE_PERSISTENT:
388 case aidl_hal::ErrorStatus::RESOURCE_EXHAUSTED_TRANSIENT:
389 case aidl_hal::ErrorStatus::RESOURCE_EXHAUSTED_PERSISTENT:
390 return static_cast<ErrorStatus>(status);
391 }
392 return NN_ERROR() << "Invalid ErrorStatus " << underlyingType(status);
393}
394
395GeneralResult<ExecutionPreference> unvalidatedConvert(
396 const aidl_hal::ExecutionPreference& executionPreference) {
397 return static_cast<ExecutionPreference>(executionPreference);
398}
399
400GeneralResult<SharedHandle> unvalidatedConvert(
401 const ::aidl::android::hardware::common::NativeHandle& aidlNativeHandle) {
402 std::vector<base::unique_fd> fds;
403 fds.reserve(aidlNativeHandle.fds.size());
404 for (const auto& fd : aidlNativeHandle.fds) {
405 int dupFd = dup(fd.get());
406 if (dupFd == -1) {
407 // TODO(b/120417090): is ANEURALNETWORKS_UNEXPECTED_NULL the correct error to return
408 // here?
409 return NN_ERROR() << "Failed to dup the fd";
410 }
411 fds.emplace_back(dupFd);
412 }
413
414 return std::make_shared<const Handle>(Handle{
415 .fds = std::move(fds),
416 .ints = aidlNativeHandle.ints,
417 });
418}
419
420GeneralResult<ExecutionPreference> convert(
421 const aidl_hal::ExecutionPreference& executionPreference) {
422 return validatedConvert(executionPreference);
423}
424
425GeneralResult<Memory> convert(const aidl_hal::Memory& operand) {
426 return validatedConvert(operand);
427}
428
429GeneralResult<Model> convert(const aidl_hal::Model& model) {
430 return validatedConvert(model);
431}
432
433GeneralResult<Operand> convert(const aidl_hal::Operand& operand) {
434 return unvalidatedConvert(operand);
435}
436
437GeneralResult<OperandType> convert(const aidl_hal::OperandType& operandType) {
438 return unvalidatedConvert(operandType);
439}
440
441GeneralResult<Priority> convert(const aidl_hal::Priority& priority) {
442 return validatedConvert(priority);
443}
444
445GeneralResult<Request::MemoryPool> convert(const aidl_hal::RequestMemoryPool& memoryPool) {
446 return unvalidatedConvert(memoryPool);
447}
448
449GeneralResult<Request> convert(const aidl_hal::Request& request) {
450 return validatedConvert(request);
451}
452
453GeneralResult<std::vector<Operation>> convert(const std::vector<aidl_hal::Operation>& operations) {
454 return unvalidatedConvert(operations);
455}
456
457GeneralResult<std::vector<Memory>> convert(const std::vector<aidl_hal::Memory>& memories) {
458 return validatedConvert(memories);
459}
460
461GeneralResult<std::vector<uint32_t>> toUnsigned(const std::vector<int32_t>& vec) {
462 if (!std::all_of(vec.begin(), vec.end(), [](int32_t v) { return v >= 0; })) {
463 return NN_ERROR() << "Negative value passed to conversion from signed to unsigned";
464 }
465 return std::vector<uint32_t>(vec.begin(), vec.end());
466}
467
468} // namespace android::nn
469
470namespace aidl::android::hardware::neuralnetworks::utils {
471namespace {
472
473template <typename Input>
474using UnvalidatedConvertOutput =
475 std::decay_t<decltype(unvalidatedConvert(std::declval<Input>()).value())>;
476
477template <typename Type>
478nn::GeneralResult<std::vector<UnvalidatedConvertOutput<Type>>> unvalidatedConvertVec(
479 const std::vector<Type>& arguments) {
480 std::vector<UnvalidatedConvertOutput<Type>> halObject(arguments.size());
481 for (size_t i = 0; i < arguments.size(); ++i) {
482 halObject[i] = NN_TRY(unvalidatedConvert(arguments[i]));
483 }
484 return halObject;
485}
486
487template <typename Type>
488nn::GeneralResult<UnvalidatedConvertOutput<Type>> validatedConvert(const Type& canonical) {
489 const auto maybeVersion = nn::validate(canonical);
490 if (!maybeVersion.has_value()) {
491 return nn::error() << maybeVersion.error();
492 }
493 const auto version = maybeVersion.value();
494 if (version > kVersion) {
495 return NN_ERROR() << "Insufficient version: " << version << " vs required " << kVersion;
496 }
497 return utils::unvalidatedConvert(canonical);
498}
499
500template <typename Type>
501nn::GeneralResult<std::vector<UnvalidatedConvertOutput<Type>>> validatedConvert(
502 const std::vector<Type>& arguments) {
503 std::vector<UnvalidatedConvertOutput<Type>> halObject(arguments.size());
504 for (size_t i = 0; i < arguments.size(); ++i) {
505 halObject[i] = NN_TRY(validatedConvert(arguments[i]));
506 }
507 return halObject;
508}
509
510} // namespace
511
512nn::GeneralResult<common::NativeHandle> unvalidatedConvert(const nn::SharedHandle& sharedHandle) {
513 common::NativeHandle aidlNativeHandle;
514 aidlNativeHandle.fds.reserve(sharedHandle->fds.size());
515 for (const auto& fd : sharedHandle->fds) {
516 int dupFd = dup(fd.get());
517 if (dupFd == -1) {
518 // TODO(b/120417090): is ANEURALNETWORKS_UNEXPECTED_NULL the correct error to return
519 // here?
520 return NN_ERROR() << "Failed to dup the fd";
521 }
522 aidlNativeHandle.fds.emplace_back(dupFd);
523 }
524 aidlNativeHandle.ints = sharedHandle->ints;
525 return aidlNativeHandle;
526}
527
528nn::GeneralResult<Memory> unvalidatedConvert(const nn::Memory& memory) {
529 if (memory.size > std::numeric_limits<int64_t>::max()) {
530 return NN_ERROR() << "Memory size doesn't fit into int64_t.";
531 }
532 return Memory{
533 .handle = NN_TRY(unvalidatedConvert(memory.handle)),
534 .size = static_cast<int64_t>(memory.size),
535 .name = memory.name,
536 };
537}
538
539nn::GeneralResult<ErrorStatus> unvalidatedConvert(const nn::ErrorStatus& errorStatus) {
540 switch (errorStatus) {
541 case nn::ErrorStatus::NONE:
542 case nn::ErrorStatus::DEVICE_UNAVAILABLE:
543 case nn::ErrorStatus::GENERAL_FAILURE:
544 case nn::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE:
545 case nn::ErrorStatus::INVALID_ARGUMENT:
546 case nn::ErrorStatus::MISSED_DEADLINE_TRANSIENT:
547 case nn::ErrorStatus::MISSED_DEADLINE_PERSISTENT:
548 case nn::ErrorStatus::RESOURCE_EXHAUSTED_TRANSIENT:
549 case nn::ErrorStatus::RESOURCE_EXHAUSTED_PERSISTENT:
550 return static_cast<ErrorStatus>(errorStatus);
551 default:
552 return ErrorStatus::GENERAL_FAILURE;
553 }
554}
555
556nn::GeneralResult<OutputShape> unvalidatedConvert(const nn::OutputShape& outputShape) {
557 return OutputShape{.dimensions = NN_TRY(toSigned(outputShape.dimensions)),
558 .isSufficient = outputShape.isSufficient};
559}
560
561nn::GeneralResult<Memory> convert(const nn::Memory& memory) {
562 return validatedConvert(memory);
563}
564
565nn::GeneralResult<ErrorStatus> convert(const nn::ErrorStatus& errorStatus) {
566 return validatedConvert(errorStatus);
567}
568
569nn::GeneralResult<std::vector<OutputShape>> convert(
570 const std::vector<nn::OutputShape>& outputShapes) {
571 return validatedConvert(outputShapes);
572}
573
574nn::GeneralResult<std::vector<int32_t>> toSigned(const std::vector<uint32_t>& vec) {
575 if (!std::all_of(vec.begin(), vec.end(),
576 [](uint32_t v) { return v <= std::numeric_limits<int32_t>::max(); })) {
577 return NN_ERROR() << "Vector contains a value that doesn't fit into int32_t.";
578 }
579 return std::vector<int32_t>(vec.begin(), vec.end());
580}
581
582} // namespace aidl::android::hardware::neuralnetworks::utils