blob: db3504bb7455f2b148659ad5bc85aa8d291e027c [file] [log] [blame]
Lev Proleev6b6dfcd2020-11-11 18:28:50 +00001/*
2 * Copyright (C) 2021 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "Conversions.h"
18
19#include <aidl/android/hardware/common/NativeHandle.h>
20#include <android-base/logging.h>
Michael Butlerab2f4822021-02-08 00:05:07 -080021#include <android/hardware_buffer.h>
22#include <cutils/native_handle.h>
Lev Proleev6b6dfcd2020-11-11 18:28:50 +000023#include <nnapi/OperandTypes.h>
24#include <nnapi/OperationTypes.h>
25#include <nnapi/Result.h>
26#include <nnapi/SharedMemory.h>
27#include <nnapi/TypeUtils.h>
28#include <nnapi/Types.h>
29#include <nnapi/Validation.h>
30#include <nnapi/hal/CommonUtils.h>
31#include <nnapi/hal/HandleError.h>
Michael Butlerab2f4822021-02-08 00:05:07 -080032#include <vndk/hardware_buffer.h>
Lev Proleev6b6dfcd2020-11-11 18:28:50 +000033
34#include <algorithm>
35#include <chrono>
36#include <functional>
37#include <iterator>
38#include <limits>
39#include <type_traits>
40#include <utility>
41
42#define VERIFY_NON_NEGATIVE(value) \
43 while (UNLIKELY(value < 0)) return NN_ERROR()
44
45namespace {
46
47template <typename Type>
48constexpr std::underlying_type_t<Type> underlyingType(Type value) {
49 return static_cast<std::underlying_type_t<Type>>(value);
50}
51
52constexpr auto kVersion = android::nn::Version::ANDROID_S;
53
54} // namespace
55
56namespace android::nn {
57namespace {
58
Michael Butlerfadeb8a2021-02-07 00:11:13 -080059using ::aidl::android::hardware::common::NativeHandle;
60
Lev Proleev6b6dfcd2020-11-11 18:28:50 +000061constexpr auto validOperandType(nn::OperandType operandType) {
62 switch (operandType) {
63 case nn::OperandType::FLOAT32:
64 case nn::OperandType::INT32:
65 case nn::OperandType::UINT32:
66 case nn::OperandType::TENSOR_FLOAT32:
67 case nn::OperandType::TENSOR_INT32:
68 case nn::OperandType::TENSOR_QUANT8_ASYMM:
69 case nn::OperandType::BOOL:
70 case nn::OperandType::TENSOR_QUANT16_SYMM:
71 case nn::OperandType::TENSOR_FLOAT16:
72 case nn::OperandType::TENSOR_BOOL8:
73 case nn::OperandType::FLOAT16:
74 case nn::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL:
75 case nn::OperandType::TENSOR_QUANT16_ASYMM:
76 case nn::OperandType::TENSOR_QUANT8_SYMM:
77 case nn::OperandType::TENSOR_QUANT8_ASYMM_SIGNED:
78 case nn::OperandType::SUBGRAPH:
79 return true;
80 case nn::OperandType::OEM:
81 case nn::OperandType::TENSOR_OEM_BYTE:
82 return false;
83 }
84 return nn::isExtension(operandType);
85}
86
87template <typename Input>
88using UnvalidatedConvertOutput =
89 std::decay_t<decltype(unvalidatedConvert(std::declval<Input>()).value())>;
90
91template <typename Type>
92GeneralResult<std::vector<UnvalidatedConvertOutput<Type>>> unvalidatedConvertVec(
93 const std::vector<Type>& arguments) {
94 std::vector<UnvalidatedConvertOutput<Type>> canonical;
95 canonical.reserve(arguments.size());
96 for (const auto& argument : arguments) {
97 canonical.push_back(NN_TRY(nn::unvalidatedConvert(argument)));
98 }
99 return canonical;
100}
101
102template <typename Type>
103GeneralResult<std::vector<UnvalidatedConvertOutput<Type>>> unvalidatedConvert(
104 const std::vector<Type>& arguments) {
105 return unvalidatedConvertVec(arguments);
106}
107
108template <typename Type>
109GeneralResult<UnvalidatedConvertOutput<Type>> validatedConvert(const Type& halObject) {
110 auto canonical = NN_TRY(nn::unvalidatedConvert(halObject));
111 const auto maybeVersion = validate(canonical);
112 if (!maybeVersion.has_value()) {
113 return error() << maybeVersion.error();
114 }
115 const auto version = maybeVersion.value();
116 if (version > kVersion) {
117 return NN_ERROR() << "Insufficient version: " << version << " vs required " << kVersion;
118 }
119 return canonical;
120}
121
122template <typename Type>
123GeneralResult<std::vector<UnvalidatedConvertOutput<Type>>> validatedConvert(
124 const std::vector<Type>& arguments) {
125 std::vector<UnvalidatedConvertOutput<Type>> canonical;
126 canonical.reserve(arguments.size());
127 for (const auto& argument : arguments) {
128 canonical.push_back(NN_TRY(validatedConvert(argument)));
129 }
130 return canonical;
131}
132
Michael Butlerab2f4822021-02-08 00:05:07 -0800133GeneralResult<Handle> unvalidatedConvertHelper(const NativeHandle& aidlNativeHandle) {
134 std::vector<base::unique_fd> fds;
135 fds.reserve(aidlNativeHandle.fds.size());
136 for (const auto& fd : aidlNativeHandle.fds) {
137 const int dupFd = dup(fd.get());
138 if (dupFd == -1) {
139 // TODO(b/120417090): is ANEURALNETWORKS_UNEXPECTED_NULL the correct error to return
140 // here?
141 return NN_ERROR() << "Failed to dup the fd";
142 }
143 fds.emplace_back(dupFd);
144 }
145
146 return Handle{.fds = std::move(fds), .ints = aidlNativeHandle.ints};
147}
148
149struct NativeHandleDeleter {
150 void operator()(native_handle_t* handle) const {
151 if (handle) {
152 native_handle_close(handle);
153 native_handle_delete(handle);
154 }
155 }
156};
157
158using UniqueNativeHandle = std::unique_ptr<native_handle_t, NativeHandleDeleter>;
159
160static nn::GeneralResult<UniqueNativeHandle> nativeHandleFromAidlHandle(
161 const NativeHandle& handle) {
162 std::vector<base::unique_fd> fds;
163 fds.reserve(handle.fds.size());
164 for (const auto& fd : handle.fds) {
165 const int dupFd = dup(fd.get());
166 if (dupFd == -1) {
167 return NN_ERROR() << "Failed to dup the fd";
168 }
169 fds.emplace_back(dupFd);
170 }
171
172 constexpr size_t kIntMax = std::numeric_limits<int>::max();
173 CHECK_LE(handle.fds.size(), kIntMax);
174 CHECK_LE(handle.ints.size(), kIntMax);
175 native_handle_t* nativeHandle = native_handle_create(static_cast<int>(handle.fds.size()),
176 static_cast<int>(handle.ints.size()));
177 if (nativeHandle == nullptr) {
178 return NN_ERROR() << "Failed to create native_handle";
179 }
180 for (size_t i = 0; i < fds.size(); ++i) {
181 nativeHandle->data[i] = fds[i].release();
182 }
183 std::copy(handle.ints.begin(), handle.ints.end(), &nativeHandle->data[nativeHandle->numFds]);
184
185 return UniqueNativeHandle(nativeHandle);
186}
187
Lev Proleev6b6dfcd2020-11-11 18:28:50 +0000188} // anonymous namespace
189
190GeneralResult<OperandType> unvalidatedConvert(const aidl_hal::OperandType& operandType) {
191 VERIFY_NON_NEGATIVE(underlyingType(operandType)) << "Negative operand types are not allowed.";
192 return static_cast<OperandType>(operandType);
193}
194
195GeneralResult<OperationType> unvalidatedConvert(const aidl_hal::OperationType& operationType) {
196 VERIFY_NON_NEGATIVE(underlyingType(operationType))
197 << "Negative operation types are not allowed.";
198 return static_cast<OperationType>(operationType);
199}
200
201GeneralResult<DeviceType> unvalidatedConvert(const aidl_hal::DeviceType& deviceType) {
202 return static_cast<DeviceType>(deviceType);
203}
204
205GeneralResult<Priority> unvalidatedConvert(const aidl_hal::Priority& priority) {
206 return static_cast<Priority>(priority);
207}
208
209GeneralResult<Capabilities> unvalidatedConvert(const aidl_hal::Capabilities& capabilities) {
210 const bool validOperandTypes = std::all_of(
211 capabilities.operandPerformance.begin(), capabilities.operandPerformance.end(),
212 [](const aidl_hal::OperandPerformance& operandPerformance) {
213 const auto maybeType = unvalidatedConvert(operandPerformance.type);
214 return !maybeType.has_value() ? false : validOperandType(maybeType.value());
215 });
216 if (!validOperandTypes) {
217 return NN_ERROR() << "Invalid OperandType when unvalidatedConverting OperandPerformance in "
218 "Capabilities";
219 }
220
221 auto operandPerformance = NN_TRY(unvalidatedConvert(capabilities.operandPerformance));
222 auto table = NN_TRY(hal::utils::makeGeneralFailure(
223 Capabilities::OperandPerformanceTable::create(std::move(operandPerformance)),
224 nn::ErrorStatus::GENERAL_FAILURE));
225
226 return Capabilities{
227 .relaxedFloat32toFloat16PerformanceScalar = NN_TRY(
228 unvalidatedConvert(capabilities.relaxedFloat32toFloat16PerformanceScalar)),
229 .relaxedFloat32toFloat16PerformanceTensor = NN_TRY(
230 unvalidatedConvert(capabilities.relaxedFloat32toFloat16PerformanceTensor)),
231 .operandPerformance = std::move(table),
232 .ifPerformance = NN_TRY(unvalidatedConvert(capabilities.ifPerformance)),
233 .whilePerformance = NN_TRY(unvalidatedConvert(capabilities.whilePerformance)),
234 };
235}
236
237GeneralResult<Capabilities::OperandPerformance> unvalidatedConvert(
238 const aidl_hal::OperandPerformance& operandPerformance) {
239 return Capabilities::OperandPerformance{
240 .type = NN_TRY(unvalidatedConvert(operandPerformance.type)),
241 .info = NN_TRY(unvalidatedConvert(operandPerformance.info)),
242 };
243}
244
245GeneralResult<Capabilities::PerformanceInfo> unvalidatedConvert(
246 const aidl_hal::PerformanceInfo& performanceInfo) {
247 return Capabilities::PerformanceInfo{
248 .execTime = performanceInfo.execTime,
249 .powerUsage = performanceInfo.powerUsage,
250 };
251}
252
253GeneralResult<DataLocation> unvalidatedConvert(const aidl_hal::DataLocation& location) {
254 VERIFY_NON_NEGATIVE(location.poolIndex) << "DataLocation: pool index must not be negative";
255 VERIFY_NON_NEGATIVE(location.offset) << "DataLocation: offset must not be negative";
256 VERIFY_NON_NEGATIVE(location.length) << "DataLocation: length must not be negative";
257 if (location.offset > std::numeric_limits<uint32_t>::max()) {
258 return NN_ERROR() << "DataLocation: offset must be <= std::numeric_limits<uint32_t>::max()";
259 }
260 if (location.length > std::numeric_limits<uint32_t>::max()) {
261 return NN_ERROR() << "DataLocation: length must be <= std::numeric_limits<uint32_t>::max()";
262 }
263 return DataLocation{
264 .poolIndex = static_cast<uint32_t>(location.poolIndex),
265 .offset = static_cast<uint32_t>(location.offset),
266 .length = static_cast<uint32_t>(location.length),
267 };
268}
269
270GeneralResult<Operation> unvalidatedConvert(const aidl_hal::Operation& operation) {
271 return Operation{
272 .type = NN_TRY(unvalidatedConvert(operation.type)),
273 .inputs = NN_TRY(toUnsigned(operation.inputs)),
274 .outputs = NN_TRY(toUnsigned(operation.outputs)),
275 };
276}
277
278GeneralResult<Operand::LifeTime> unvalidatedConvert(
279 const aidl_hal::OperandLifeTime& operandLifeTime) {
280 return static_cast<Operand::LifeTime>(operandLifeTime);
281}
282
283GeneralResult<Operand> unvalidatedConvert(const aidl_hal::Operand& operand) {
284 return Operand{
285 .type = NN_TRY(unvalidatedConvert(operand.type)),
286 .dimensions = NN_TRY(toUnsigned(operand.dimensions)),
287 .scale = operand.scale,
288 .zeroPoint = operand.zeroPoint,
289 .lifetime = NN_TRY(unvalidatedConvert(operand.lifetime)),
290 .location = NN_TRY(unvalidatedConvert(operand.location)),
291 .extraParams = NN_TRY(unvalidatedConvert(operand.extraParams)),
292 };
293}
294
295GeneralResult<Operand::ExtraParams> unvalidatedConvert(
296 const std::optional<aidl_hal::OperandExtraParams>& optionalExtraParams) {
297 if (!optionalExtraParams.has_value()) {
298 return Operand::NoParams{};
299 }
300 const auto& extraParams = optionalExtraParams.value();
301 using Tag = aidl_hal::OperandExtraParams::Tag;
302 switch (extraParams.getTag()) {
303 case Tag::channelQuant:
304 return unvalidatedConvert(extraParams.get<Tag::channelQuant>());
305 case Tag::extension:
306 return extraParams.get<Tag::extension>();
307 }
308 return NN_ERROR() << "Unrecognized Operand::ExtraParams tag: "
309 << underlyingType(extraParams.getTag());
310}
311
312GeneralResult<Operand::SymmPerChannelQuantParams> unvalidatedConvert(
313 const aidl_hal::SymmPerChannelQuantParams& symmPerChannelQuantParams) {
314 VERIFY_NON_NEGATIVE(symmPerChannelQuantParams.channelDim)
315 << "Per-channel quantization channel dimension must not be negative.";
316 return Operand::SymmPerChannelQuantParams{
317 .scales = symmPerChannelQuantParams.scales,
318 .channelDim = static_cast<uint32_t>(symmPerChannelQuantParams.channelDim),
319 };
320}
321
322GeneralResult<Model> unvalidatedConvert(const aidl_hal::Model& model) {
323 return Model{
324 .main = NN_TRY(unvalidatedConvert(model.main)),
325 .referenced = NN_TRY(unvalidatedConvert(model.referenced)),
326 .operandValues = NN_TRY(unvalidatedConvert(model.operandValues)),
327 .pools = NN_TRY(unvalidatedConvert(model.pools)),
328 .relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16,
329 .extensionNameToPrefix = NN_TRY(unvalidatedConvert(model.extensionNameToPrefix)),
330 };
331}
332
333GeneralResult<Model::Subgraph> unvalidatedConvert(const aidl_hal::Subgraph& subgraph) {
334 return Model::Subgraph{
335 .operands = NN_TRY(unvalidatedConvert(subgraph.operands)),
336 .operations = NN_TRY(unvalidatedConvert(subgraph.operations)),
337 .inputIndexes = NN_TRY(toUnsigned(subgraph.inputIndexes)),
338 .outputIndexes = NN_TRY(toUnsigned(subgraph.outputIndexes)),
339 };
340}
341
342GeneralResult<Model::ExtensionNameAndPrefix> unvalidatedConvert(
343 const aidl_hal::ExtensionNameAndPrefix& extensionNameAndPrefix) {
344 return Model::ExtensionNameAndPrefix{
345 .name = extensionNameAndPrefix.name,
346 .prefix = extensionNameAndPrefix.prefix,
347 };
348}
349
350GeneralResult<Extension> unvalidatedConvert(const aidl_hal::Extension& extension) {
351 return Extension{
352 .name = extension.name,
353 .operandTypes = NN_TRY(unvalidatedConvert(extension.operandTypes)),
354 };
355}
356
357GeneralResult<Extension::OperandTypeInformation> unvalidatedConvert(
358 const aidl_hal::ExtensionOperandTypeInformation& operandTypeInformation) {
359 VERIFY_NON_NEGATIVE(operandTypeInformation.byteSize)
360 << "Extension operand type byte size must not be negative";
361 return Extension::OperandTypeInformation{
362 .type = operandTypeInformation.type,
363 .isTensor = operandTypeInformation.isTensor,
364 .byteSize = static_cast<uint32_t>(operandTypeInformation.byteSize),
365 };
366}
367
368GeneralResult<OutputShape> unvalidatedConvert(const aidl_hal::OutputShape& outputShape) {
369 return OutputShape{
370 .dimensions = NN_TRY(toUnsigned(outputShape.dimensions)),
371 .isSufficient = outputShape.isSufficient,
372 };
373}
374
375GeneralResult<MeasureTiming> unvalidatedConvert(bool measureTiming) {
376 return measureTiming ? MeasureTiming::YES : MeasureTiming::NO;
377}
378
Michael Butlerab2f4822021-02-08 00:05:07 -0800379static uint32_t roundUpToMultiple(uint32_t value, uint32_t multiple) {
380 return (value + multiple - 1) / multiple * multiple;
381}
382
Michael Butlerfadeb8a2021-02-07 00:11:13 -0800383GeneralResult<SharedMemory> unvalidatedConvert(const aidl_hal::Memory& memory) {
Lev Proleev6b6dfcd2020-11-11 18:28:50 +0000384 VERIFY_NON_NEGATIVE(memory.size) << "Memory size must not be negative";
Michael Butlerab2f4822021-02-08 00:05:07 -0800385 if (memory.size > std::numeric_limits<uint32_t>::max()) {
386 return NN_ERROR() << "Memory: size must be <= std::numeric_limits<size_t>::max()";
387 }
388
389 if (memory.name != "hardware_buffer_blob") {
390 return std::make_shared<const Memory>(Memory{
391 .handle = NN_TRY(unvalidatedConvertHelper(memory.handle)),
392 .size = static_cast<uint32_t>(memory.size),
393 .name = memory.name,
394 });
395 }
396
397 const auto size = static_cast<uint32_t>(memory.size);
398 const auto format = AHARDWAREBUFFER_FORMAT_BLOB;
399 const auto usage = AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN | AHARDWAREBUFFER_USAGE_CPU_WRITE_OFTEN;
400 const uint32_t width = size;
401 const uint32_t height = 1; // height is always 1 for BLOB mode AHardwareBuffer.
402 const uint32_t layers = 1; // layers is always 1 for BLOB mode AHardwareBuffer.
403
404 const UniqueNativeHandle handle = NN_TRY(nativeHandleFromAidlHandle(memory.handle));
405 const native_handle_t* nativeHandle = handle.get();
406
407 // AHardwareBuffer_createFromHandle() might fail because an allocator
408 // expects a specific stride value. In that case, we try to guess it by
409 // aligning the width to small powers of 2.
410 // TODO(b/174120849): Avoid stride assumptions.
411 AHardwareBuffer* hardwareBuffer = nullptr;
412 status_t status = UNKNOWN_ERROR;
413 for (uint32_t alignment : {1, 4, 32, 64, 128, 2, 8, 16}) {
414 const uint32_t stride = roundUpToMultiple(width, alignment);
415 AHardwareBuffer_Desc desc{
416 .width = width,
417 .height = height,
418 .layers = layers,
419 .format = format,
420 .usage = usage,
421 .stride = stride,
422 };
423 status = AHardwareBuffer_createFromHandle(&desc, nativeHandle,
424 AHARDWAREBUFFER_CREATE_FROM_HANDLE_METHOD_CLONE,
425 &hardwareBuffer);
426 if (status == NO_ERROR) {
427 break;
428 }
429 }
430 if (status != NO_ERROR) {
431 return NN_ERROR(ErrorStatus::GENERAL_FAILURE)
432 << "Can't create AHardwareBuffer from handle. Error: " << status;
433 }
434
Michael Butlerfadeb8a2021-02-07 00:11:13 -0800435 return std::make_shared<const Memory>(Memory{
Michael Butlerab2f4822021-02-08 00:05:07 -0800436 .handle = HardwareBufferHandle(hardwareBuffer, /*takeOwnership=*/true),
Lev Proleev6b6dfcd2020-11-11 18:28:50 +0000437 .size = static_cast<uint32_t>(memory.size),
438 .name = memory.name,
Michael Butlerfadeb8a2021-02-07 00:11:13 -0800439 });
Lev Proleev6b6dfcd2020-11-11 18:28:50 +0000440}
441
442GeneralResult<Model::OperandValues> unvalidatedConvert(const std::vector<uint8_t>& operandValues) {
443 return Model::OperandValues(operandValues.data(), operandValues.size());
444}
445
446GeneralResult<BufferDesc> unvalidatedConvert(const aidl_hal::BufferDesc& bufferDesc) {
447 return BufferDesc{.dimensions = NN_TRY(toUnsigned(bufferDesc.dimensions))};
448}
449
450GeneralResult<BufferRole> unvalidatedConvert(const aidl_hal::BufferRole& bufferRole) {
451 VERIFY_NON_NEGATIVE(bufferRole.modelIndex) << "BufferRole: modelIndex must not be negative";
452 VERIFY_NON_NEGATIVE(bufferRole.ioIndex) << "BufferRole: ioIndex must not be negative";
453 return BufferRole{
454 .modelIndex = static_cast<uint32_t>(bufferRole.modelIndex),
455 .ioIndex = static_cast<uint32_t>(bufferRole.ioIndex),
456 .frequency = bufferRole.frequency,
457 };
458}
459
460GeneralResult<Request> unvalidatedConvert(const aidl_hal::Request& request) {
461 return Request{
462 .inputs = NN_TRY(unvalidatedConvert(request.inputs)),
463 .outputs = NN_TRY(unvalidatedConvert(request.outputs)),
464 .pools = NN_TRY(unvalidatedConvert(request.pools)),
465 };
466}
467
468GeneralResult<Request::Argument> unvalidatedConvert(const aidl_hal::RequestArgument& argument) {
469 const auto lifetime = argument.hasNoValue ? Request::Argument::LifeTime::NO_VALUE
470 : Request::Argument::LifeTime::POOL;
471 return Request::Argument{
472 .lifetime = lifetime,
473 .location = NN_TRY(unvalidatedConvert(argument.location)),
474 .dimensions = NN_TRY(toUnsigned(argument.dimensions)),
475 };
476}
477
478GeneralResult<Request::MemoryPool> unvalidatedConvert(
479 const aidl_hal::RequestMemoryPool& memoryPool) {
480 using Tag = aidl_hal::RequestMemoryPool::Tag;
481 switch (memoryPool.getTag()) {
482 case Tag::pool:
483 return unvalidatedConvert(memoryPool.get<Tag::pool>());
484 case Tag::token: {
485 const auto token = memoryPool.get<Tag::token>();
486 VERIFY_NON_NEGATIVE(token) << "Memory pool token must not be negative";
487 return static_cast<Request::MemoryDomainToken>(token);
488 }
489 }
490 return NN_ERROR() << "Invalid Request::MemoryPool tag " << underlyingType(memoryPool.getTag());
491}
492
493GeneralResult<ErrorStatus> unvalidatedConvert(const aidl_hal::ErrorStatus& status) {
494 switch (status) {
495 case aidl_hal::ErrorStatus::NONE:
496 case aidl_hal::ErrorStatus::DEVICE_UNAVAILABLE:
497 case aidl_hal::ErrorStatus::GENERAL_FAILURE:
498 case aidl_hal::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE:
499 case aidl_hal::ErrorStatus::INVALID_ARGUMENT:
500 case aidl_hal::ErrorStatus::MISSED_DEADLINE_TRANSIENT:
501 case aidl_hal::ErrorStatus::MISSED_DEADLINE_PERSISTENT:
502 case aidl_hal::ErrorStatus::RESOURCE_EXHAUSTED_TRANSIENT:
503 case aidl_hal::ErrorStatus::RESOURCE_EXHAUSTED_PERSISTENT:
504 return static_cast<ErrorStatus>(status);
505 }
506 return NN_ERROR() << "Invalid ErrorStatus " << underlyingType(status);
507}
508
509GeneralResult<ExecutionPreference> unvalidatedConvert(
510 const aidl_hal::ExecutionPreference& executionPreference) {
511 return static_cast<ExecutionPreference>(executionPreference);
512}
513
Michael Butlerfadeb8a2021-02-07 00:11:13 -0800514GeneralResult<SharedHandle> unvalidatedConvert(const NativeHandle& aidlNativeHandle) {
Michael Butlerab2f4822021-02-08 00:05:07 -0800515 return std::make_shared<const Handle>(NN_TRY(unvalidatedConvertHelper(aidlNativeHandle)));
Lev Proleev6b6dfcd2020-11-11 18:28:50 +0000516}
517
518GeneralResult<ExecutionPreference> convert(
519 const aidl_hal::ExecutionPreference& executionPreference) {
520 return validatedConvert(executionPreference);
521}
522
Michael Butlerfadeb8a2021-02-07 00:11:13 -0800523GeneralResult<SharedMemory> convert(const aidl_hal::Memory& operand) {
Lev Proleev6b6dfcd2020-11-11 18:28:50 +0000524 return validatedConvert(operand);
525}
526
527GeneralResult<Model> convert(const aidl_hal::Model& model) {
528 return validatedConvert(model);
529}
530
531GeneralResult<Operand> convert(const aidl_hal::Operand& operand) {
532 return unvalidatedConvert(operand);
533}
534
535GeneralResult<OperandType> convert(const aidl_hal::OperandType& operandType) {
536 return unvalidatedConvert(operandType);
537}
538
539GeneralResult<Priority> convert(const aidl_hal::Priority& priority) {
540 return validatedConvert(priority);
541}
542
543GeneralResult<Request::MemoryPool> convert(const aidl_hal::RequestMemoryPool& memoryPool) {
544 return unvalidatedConvert(memoryPool);
545}
546
547GeneralResult<Request> convert(const aidl_hal::Request& request) {
548 return validatedConvert(request);
549}
550
551GeneralResult<std::vector<Operation>> convert(const std::vector<aidl_hal::Operation>& operations) {
552 return unvalidatedConvert(operations);
553}
554
Michael Butlerfadeb8a2021-02-07 00:11:13 -0800555GeneralResult<std::vector<SharedMemory>> convert(const std::vector<aidl_hal::Memory>& memories) {
Lev Proleev6b6dfcd2020-11-11 18:28:50 +0000556 return validatedConvert(memories);
557}
558
559GeneralResult<std::vector<uint32_t>> toUnsigned(const std::vector<int32_t>& vec) {
560 if (!std::all_of(vec.begin(), vec.end(), [](int32_t v) { return v >= 0; })) {
561 return NN_ERROR() << "Negative value passed to conversion from signed to unsigned";
562 }
563 return std::vector<uint32_t>(vec.begin(), vec.end());
564}
565
566} // namespace android::nn
567
568namespace aidl::android::hardware::neuralnetworks::utils {
569namespace {
570
571template <typename Input>
572using UnvalidatedConvertOutput =
573 std::decay_t<decltype(unvalidatedConvert(std::declval<Input>()).value())>;
574
575template <typename Type>
576nn::GeneralResult<std::vector<UnvalidatedConvertOutput<Type>>> unvalidatedConvertVec(
577 const std::vector<Type>& arguments) {
578 std::vector<UnvalidatedConvertOutput<Type>> halObject(arguments.size());
579 for (size_t i = 0; i < arguments.size(); ++i) {
580 halObject[i] = NN_TRY(unvalidatedConvert(arguments[i]));
581 }
582 return halObject;
583}
584
585template <typename Type>
586nn::GeneralResult<UnvalidatedConvertOutput<Type>> validatedConvert(const Type& canonical) {
587 const auto maybeVersion = nn::validate(canonical);
588 if (!maybeVersion.has_value()) {
589 return nn::error() << maybeVersion.error();
590 }
591 const auto version = maybeVersion.value();
592 if (version > kVersion) {
593 return NN_ERROR() << "Insufficient version: " << version << " vs required " << kVersion;
594 }
595 return utils::unvalidatedConvert(canonical);
596}
597
598template <typename Type>
599nn::GeneralResult<std::vector<UnvalidatedConvertOutput<Type>>> validatedConvert(
600 const std::vector<Type>& arguments) {
601 std::vector<UnvalidatedConvertOutput<Type>> halObject(arguments.size());
602 for (size_t i = 0; i < arguments.size(); ++i) {
603 halObject[i] = NN_TRY(validatedConvert(arguments[i]));
604 }
605 return halObject;
606}
607
Michael Butlerab2f4822021-02-08 00:05:07 -0800608nn::GeneralResult<common::NativeHandle> unvalidatedConvert(const nn::Handle& handle) {
Lev Proleev6b6dfcd2020-11-11 18:28:50 +0000609 common::NativeHandle aidlNativeHandle;
Michael Butlerab2f4822021-02-08 00:05:07 -0800610 aidlNativeHandle.fds.reserve(handle.fds.size());
611 for (const auto& fd : handle.fds) {
612 const int dupFd = dup(fd.get());
Lev Proleev6b6dfcd2020-11-11 18:28:50 +0000613 if (dupFd == -1) {
614 // TODO(b/120417090): is ANEURALNETWORKS_UNEXPECTED_NULL the correct error to return
615 // here?
616 return NN_ERROR() << "Failed to dup the fd";
617 }
618 aidlNativeHandle.fds.emplace_back(dupFd);
619 }
Michael Butlerab2f4822021-02-08 00:05:07 -0800620 aidlNativeHandle.ints = handle.ints;
Lev Proleev6b6dfcd2020-11-11 18:28:50 +0000621 return aidlNativeHandle;
622}
623
Michael Butlerab2f4822021-02-08 00:05:07 -0800624static nn::GeneralResult<common::NativeHandle> aidlHandleFromNativeHandle(
625 const native_handle_t& handle) {
626 common::NativeHandle aidlNativeHandle;
627
628 aidlNativeHandle.fds.reserve(handle.numFds);
629 for (int i = 0; i < handle.numFds; ++i) {
630 const int dupFd = dup(handle.data[i]);
631 if (dupFd == -1) {
632 return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "Failed to dup the fd";
633 }
634 aidlNativeHandle.fds.emplace_back(dupFd);
635 }
636
637 aidlNativeHandle.ints = std::vector<int>(&handle.data[handle.numFds],
638 &handle.data[handle.numFds + handle.numInts]);
639
640 return aidlNativeHandle;
641}
642
643} // namespace
644
645nn::GeneralResult<common::NativeHandle> unvalidatedConvert(const nn::SharedHandle& sharedHandle) {
646 CHECK(sharedHandle != nullptr);
647 return unvalidatedConvert(*sharedHandle);
648}
649
Michael Butlerfadeb8a2021-02-07 00:11:13 -0800650nn::GeneralResult<Memory> unvalidatedConvert(const nn::SharedMemory& memory) {
651 CHECK(memory != nullptr);
652 if (memory->size > std::numeric_limits<int64_t>::max()) {
Lev Proleev6b6dfcd2020-11-11 18:28:50 +0000653 return NN_ERROR() << "Memory size doesn't fit into int64_t.";
654 }
Michael Butlerab2f4822021-02-08 00:05:07 -0800655 if (const auto* handle = std::get_if<nn::Handle>(&memory->handle)) {
656 return Memory{
657 .handle = NN_TRY(unvalidatedConvert(*handle)),
658 .size = static_cast<int64_t>(memory->size),
659 .name = memory->name,
660 };
661 }
662
663 const auto* ahwb = std::get<nn::HardwareBufferHandle>(memory->handle).get();
664 AHardwareBuffer_Desc bufferDesc;
665 AHardwareBuffer_describe(ahwb, &bufferDesc);
666
667 if (bufferDesc.format == AHARDWAREBUFFER_FORMAT_BLOB) {
668 CHECK_EQ(memory->size, bufferDesc.width);
669 CHECK_EQ(memory->name, "hardware_buffer_blob");
670 } else {
671 CHECK_EQ(memory->size, 0u);
672 CHECK_EQ(memory->name, "hardware_buffer");
673 }
674
675 const native_handle_t* nativeHandle = AHardwareBuffer_getNativeHandle(ahwb);
676 if (nativeHandle == nullptr) {
677 return NN_ERROR() << "unvalidatedConvert failed because AHardwareBuffer_getNativeHandle "
678 "returned nullptr";
679 }
680
Lev Proleev6b6dfcd2020-11-11 18:28:50 +0000681 return Memory{
Michael Butlerab2f4822021-02-08 00:05:07 -0800682 .handle = NN_TRY(aidlHandleFromNativeHandle(*nativeHandle)),
Michael Butlerfadeb8a2021-02-07 00:11:13 -0800683 .size = static_cast<int64_t>(memory->size),
684 .name = memory->name,
Lev Proleev6b6dfcd2020-11-11 18:28:50 +0000685 };
686}
687
688nn::GeneralResult<ErrorStatus> unvalidatedConvert(const nn::ErrorStatus& errorStatus) {
689 switch (errorStatus) {
690 case nn::ErrorStatus::NONE:
691 case nn::ErrorStatus::DEVICE_UNAVAILABLE:
692 case nn::ErrorStatus::GENERAL_FAILURE:
693 case nn::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE:
694 case nn::ErrorStatus::INVALID_ARGUMENT:
695 case nn::ErrorStatus::MISSED_DEADLINE_TRANSIENT:
696 case nn::ErrorStatus::MISSED_DEADLINE_PERSISTENT:
697 case nn::ErrorStatus::RESOURCE_EXHAUSTED_TRANSIENT:
698 case nn::ErrorStatus::RESOURCE_EXHAUSTED_PERSISTENT:
699 return static_cast<ErrorStatus>(errorStatus);
700 default:
701 return ErrorStatus::GENERAL_FAILURE;
702 }
703}
704
705nn::GeneralResult<OutputShape> unvalidatedConvert(const nn::OutputShape& outputShape) {
706 return OutputShape{.dimensions = NN_TRY(toSigned(outputShape.dimensions)),
707 .isSufficient = outputShape.isSufficient};
708}
709
Michael Butlerfadeb8a2021-02-07 00:11:13 -0800710nn::GeneralResult<Memory> convert(const nn::SharedMemory& memory) {
Lev Proleev6b6dfcd2020-11-11 18:28:50 +0000711 return validatedConvert(memory);
712}
713
714nn::GeneralResult<ErrorStatus> convert(const nn::ErrorStatus& errorStatus) {
715 return validatedConvert(errorStatus);
716}
717
718nn::GeneralResult<std::vector<OutputShape>> convert(
719 const std::vector<nn::OutputShape>& outputShapes) {
720 return validatedConvert(outputShapes);
721}
722
723nn::GeneralResult<std::vector<int32_t>> toSigned(const std::vector<uint32_t>& vec) {
724 if (!std::all_of(vec.begin(), vec.end(),
725 [](uint32_t v) { return v <= std::numeric_limits<int32_t>::max(); })) {
726 return NN_ERROR() << "Vector contains a value that doesn't fit into int32_t.";
727 }
728 return std::vector<int32_t>(vec.begin(), vec.end());
729}
730
731} // namespace aidl::android::hardware::neuralnetworks::utils