Michael Butler | b98aa6d | 2020-02-22 22:37:59 -0800 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2020 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | #include "Conversions.h" |
| 18 | |
| 19 | #include <android-base/logging.h> |
| 20 | #include <android/hardware/neuralnetworks/1.2/types.h> |
| 21 | #include <nnapi/OperandTypes.h> |
| 22 | #include <nnapi/OperationTypes.h> |
| 23 | #include <nnapi/Result.h> |
| 24 | #include <nnapi/SharedMemory.h> |
| 25 | #include <nnapi/TypeUtils.h> |
| 26 | #include <nnapi/Types.h> |
| 27 | #include <nnapi/hal/1.0/Conversions.h> |
| 28 | #include <nnapi/hal/CommonUtils.h> |
| 29 | |
| 30 | #include <algorithm> |
| 31 | #include <functional> |
| 32 | #include <iterator> |
| 33 | #include <memory> |
| 34 | #include <type_traits> |
| 35 | #include <utility> |
| 36 | |
| 37 | namespace { |
| 38 | |
| 39 | template <typename Type> |
| 40 | constexpr std::underlying_type_t<Type> underlyingType(Type value) { |
| 41 | return static_cast<std::underlying_type_t<Type>>(value); |
| 42 | } |
| 43 | |
| 44 | } // namespace |
| 45 | |
| 46 | namespace android::nn { |
| 47 | namespace { |
| 48 | |
| 49 | constexpr bool validOperandType(OperandType operandType) { |
| 50 | switch (operandType) { |
| 51 | case OperandType::FLOAT32: |
| 52 | case OperandType::INT32: |
| 53 | case OperandType::UINT32: |
| 54 | case OperandType::TENSOR_FLOAT32: |
| 55 | case OperandType::TENSOR_INT32: |
| 56 | case OperandType::TENSOR_QUANT8_ASYMM: |
| 57 | case OperandType::BOOL: |
| 58 | case OperandType::TENSOR_QUANT16_SYMM: |
| 59 | case OperandType::TENSOR_FLOAT16: |
| 60 | case OperandType::TENSOR_BOOL8: |
| 61 | case OperandType::FLOAT16: |
| 62 | case OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL: |
| 63 | case OperandType::TENSOR_QUANT16_ASYMM: |
| 64 | case OperandType::TENSOR_QUANT8_SYMM: |
| 65 | case OperandType::OEM: |
| 66 | case OperandType::TENSOR_OEM_BYTE: |
| 67 | return true; |
| 68 | default: |
| 69 | break; |
| 70 | } |
| 71 | return isExtension(operandType); |
| 72 | } |
| 73 | |
| 74 | using hardware::hidl_handle; |
| 75 | using hardware::hidl_vec; |
| 76 | |
| 77 | template <typename Input> |
| 78 | using ConvertOutput = std::decay_t<decltype(convert(std::declval<Input>()).value())>; |
| 79 | |
| 80 | template <typename Type> |
| 81 | Result<std::vector<ConvertOutput<Type>>> convertVec(const hidl_vec<Type>& arguments) { |
| 82 | std::vector<ConvertOutput<Type>> canonical; |
| 83 | canonical.reserve(arguments.size()); |
| 84 | for (const auto& argument : arguments) { |
| 85 | canonical.push_back(NN_TRY(nn::convert(argument))); |
| 86 | } |
| 87 | return canonical; |
| 88 | } |
| 89 | |
| 90 | template <typename Type> |
| 91 | Result<std::vector<ConvertOutput<Type>>> convert(const hidl_vec<Type>& arguments) { |
| 92 | return convertVec(arguments); |
| 93 | } |
| 94 | |
| 95 | } // anonymous namespace |
| 96 | |
| 97 | Result<OperandType> convert(const hal::V1_2::OperandType& operandType) { |
| 98 | return static_cast<OperandType>(operandType); |
| 99 | } |
| 100 | |
| 101 | Result<OperationType> convert(const hal::V1_2::OperationType& operationType) { |
| 102 | return static_cast<OperationType>(operationType); |
| 103 | } |
| 104 | |
| 105 | Result<DeviceType> convert(const hal::V1_2::DeviceType& deviceType) { |
| 106 | return static_cast<DeviceType>(deviceType); |
| 107 | } |
| 108 | |
| 109 | Result<Capabilities> convert(const hal::V1_2::Capabilities& capabilities) { |
| 110 | const bool validOperandTypes = std::all_of( |
| 111 | capabilities.operandPerformance.begin(), capabilities.operandPerformance.end(), |
| 112 | [](const hal::V1_2::Capabilities::OperandPerformance& operandPerformance) { |
| 113 | const auto maybeType = convert(operandPerformance.type); |
| 114 | return !maybeType.has_value() ? false : validOperandType(maybeType.value()); |
| 115 | }); |
| 116 | if (!validOperandTypes) { |
| 117 | return NN_ERROR() |
| 118 | << "Invalid OperandType when converting OperandPerformance in Capabilities"; |
| 119 | } |
| 120 | |
| 121 | const auto relaxedFloat32toFloat16PerformanceScalar = |
| 122 | NN_TRY(convert(capabilities.relaxedFloat32toFloat16PerformanceScalar)); |
| 123 | const auto relaxedFloat32toFloat16PerformanceTensor = |
| 124 | NN_TRY(convert(capabilities.relaxedFloat32toFloat16PerformanceTensor)); |
| 125 | auto operandPerformance = NN_TRY(convert(capabilities.operandPerformance)); |
| 126 | |
| 127 | auto table = |
| 128 | NN_TRY(Capabilities::OperandPerformanceTable::create(std::move(operandPerformance))); |
| 129 | |
| 130 | return Capabilities{ |
| 131 | .relaxedFloat32toFloat16PerformanceScalar = relaxedFloat32toFloat16PerformanceScalar, |
| 132 | .relaxedFloat32toFloat16PerformanceTensor = relaxedFloat32toFloat16PerformanceTensor, |
| 133 | .operandPerformance = std::move(table), |
| 134 | }; |
| 135 | } |
| 136 | |
| 137 | Result<Capabilities::OperandPerformance> convert( |
| 138 | const hal::V1_2::Capabilities::OperandPerformance& operandPerformance) { |
| 139 | return Capabilities::OperandPerformance{ |
| 140 | .type = NN_TRY(convert(operandPerformance.type)), |
| 141 | .info = NN_TRY(convert(operandPerformance.info)), |
| 142 | }; |
| 143 | } |
| 144 | |
| 145 | Result<Operation> convert(const hal::V1_2::Operation& operation) { |
| 146 | return Operation{ |
| 147 | .type = NN_TRY(convert(operation.type)), |
| 148 | .inputs = operation.inputs, |
| 149 | .outputs = operation.outputs, |
| 150 | }; |
| 151 | } |
| 152 | |
| 153 | Result<Operand::SymmPerChannelQuantParams> convert( |
| 154 | const hal::V1_2::SymmPerChannelQuantParams& symmPerChannelQuantParams) { |
| 155 | return Operand::SymmPerChannelQuantParams{ |
| 156 | .scales = symmPerChannelQuantParams.scales, |
| 157 | .channelDim = symmPerChannelQuantParams.channelDim, |
| 158 | }; |
| 159 | } |
| 160 | |
| 161 | Result<Operand> convert(const hal::V1_2::Operand& operand) { |
| 162 | return Operand{ |
| 163 | .type = NN_TRY(convert(operand.type)), |
| 164 | .dimensions = operand.dimensions, |
| 165 | .scale = operand.scale, |
| 166 | .zeroPoint = operand.zeroPoint, |
| 167 | .lifetime = NN_TRY(convert(operand.lifetime)), |
| 168 | .location = NN_TRY(convert(operand.location)), |
| 169 | .extraParams = NN_TRY(convert(operand.extraParams)), |
| 170 | }; |
| 171 | } |
| 172 | |
| 173 | Result<Operand::ExtraParams> convert(const hal::V1_2::Operand::ExtraParams& extraParams) { |
| 174 | using Discriminator = hal::V1_2::Operand::ExtraParams::hidl_discriminator; |
| 175 | switch (extraParams.getDiscriminator()) { |
| 176 | case Discriminator::none: |
| 177 | return Operand::NoParams{}; |
| 178 | case Discriminator::channelQuant: |
| 179 | return convert(extraParams.channelQuant()); |
| 180 | case Discriminator::extension: |
| 181 | return extraParams.extension(); |
| 182 | } |
| 183 | return NN_ERROR() << "Unrecognized Operand::ExtraParams discriminator: " |
| 184 | << underlyingType(extraParams.getDiscriminator()); |
| 185 | } |
| 186 | |
| 187 | Result<Model> convert(const hal::V1_2::Model& model) { |
| 188 | auto operations = NN_TRY(convert(model.operations)); |
| 189 | |
| 190 | // Verify number of consumers. |
| 191 | const auto numberOfConsumers = |
| 192 | hal::utils::countNumberOfConsumers(model.operands.size(), operations); |
| 193 | CHECK(model.operands.size() == numberOfConsumers.size()); |
| 194 | for (size_t i = 0; i < model.operands.size(); ++i) { |
| 195 | if (model.operands[i].numberOfConsumers != numberOfConsumers[i]) { |
| 196 | return NN_ERROR() << "Invalid numberOfConsumers for operand " << i << ", expected " |
| 197 | << numberOfConsumers[i] << " but found " |
| 198 | << model.operands[i].numberOfConsumers; |
| 199 | } |
| 200 | } |
| 201 | |
| 202 | auto main = Model::Subgraph{ |
| 203 | .operands = NN_TRY(convert(model.operands)), |
| 204 | .operations = std::move(operations), |
| 205 | .inputIndexes = model.inputIndexes, |
| 206 | .outputIndexes = model.outputIndexes, |
| 207 | }; |
| 208 | |
| 209 | return Model{ |
| 210 | .main = std::move(main), |
| 211 | .operandValues = NN_TRY(convert(model.operandValues)), |
| 212 | .pools = NN_TRY(convert(model.pools)), |
| 213 | .relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16, |
| 214 | .extensionNameToPrefix = NN_TRY(convert(model.extensionNameToPrefix)), |
| 215 | }; |
| 216 | } |
| 217 | |
| 218 | Result<Model::ExtensionNameAndPrefix> convert( |
| 219 | const hal::V1_2::Model::ExtensionNameAndPrefix& extensionNameAndPrefix) { |
| 220 | return Model::ExtensionNameAndPrefix{ |
| 221 | .name = extensionNameAndPrefix.name, |
| 222 | .prefix = extensionNameAndPrefix.prefix, |
| 223 | }; |
| 224 | } |
| 225 | |
| 226 | Result<OutputShape> convert(const hal::V1_2::OutputShape& outputShape) { |
| 227 | return OutputShape{ |
| 228 | .dimensions = outputShape.dimensions, |
| 229 | .isSufficient = outputShape.isSufficient, |
| 230 | }; |
| 231 | } |
| 232 | |
| 233 | Result<MeasureTiming> convert(const hal::V1_2::MeasureTiming& measureTiming) { |
| 234 | return static_cast<MeasureTiming>(measureTiming); |
| 235 | } |
| 236 | |
| 237 | Result<Timing> convert(const hal::V1_2::Timing& timing) { |
| 238 | return Timing{.timeOnDevice = timing.timeOnDevice, .timeInDriver = timing.timeInDriver}; |
| 239 | } |
| 240 | |
| 241 | Result<Extension> convert(const hal::V1_2::Extension& extension) { |
| 242 | return Extension{ |
| 243 | .name = extension.name, |
| 244 | .operandTypes = NN_TRY(convert(extension.operandTypes)), |
| 245 | }; |
| 246 | } |
| 247 | |
| 248 | Result<Extension::OperandTypeInformation> convert( |
| 249 | const hal::V1_2::Extension::OperandTypeInformation& operandTypeInformation) { |
| 250 | return Extension::OperandTypeInformation{ |
| 251 | .type = operandTypeInformation.type, |
| 252 | .isTensor = operandTypeInformation.isTensor, |
| 253 | .byteSize = operandTypeInformation.byteSize, |
| 254 | }; |
| 255 | } |
| 256 | |
| 257 | Result<NativeHandle> convert(const hidl_handle& handle) { |
| 258 | auto* cloned = native_handle_clone(handle.getNativeHandle()); |
| 259 | return ::android::NativeHandle::create(cloned, /*ownsHandle=*/true); |
| 260 | } |
| 261 | |
| 262 | Result<std::vector<Extension>> convert(const hidl_vec<hal::V1_2::Extension>& extensions) { |
| 263 | return convertVec(extensions); |
| 264 | } |
| 265 | |
| 266 | Result<std::vector<NativeHandle>> convert(const hidl_vec<hidl_handle>& handles) { |
| 267 | return convertVec(handles); |
| 268 | } |
| 269 | |
| 270 | Result<std::vector<OutputShape>> convert(const hidl_vec<hal::V1_2::OutputShape>& outputShapes) { |
| 271 | return convertVec(outputShapes); |
| 272 | } |
| 273 | |
| 274 | } // namespace android::nn |
| 275 | |
| 276 | namespace android::hardware::neuralnetworks::V1_2::utils { |
| 277 | namespace { |
| 278 | |
| 279 | using utils::convert; |
| 280 | |
| 281 | nn::Result<V1_0::OperandLifeTime> convert(const nn::Operand::LifeTime& lifetime) { |
| 282 | return V1_0::utils::convert(lifetime); |
| 283 | } |
| 284 | |
| 285 | nn::Result<V1_0::PerformanceInfo> convert( |
| 286 | const nn::Capabilities::PerformanceInfo& performanceInfo) { |
| 287 | return V1_0::utils::convert(performanceInfo); |
| 288 | } |
| 289 | |
| 290 | nn::Result<V1_0::DataLocation> convert(const nn::DataLocation& location) { |
| 291 | return V1_0::utils::convert(location); |
| 292 | } |
| 293 | |
| 294 | nn::Result<hidl_vec<uint8_t>> convert(const nn::Model::OperandValues& operandValues) { |
| 295 | return V1_0::utils::convert(operandValues); |
| 296 | } |
| 297 | |
| 298 | nn::Result<hidl_memory> convert(const nn::Memory& memory) { |
| 299 | return V1_0::utils::convert(memory); |
| 300 | } |
| 301 | |
| 302 | template <typename Input> |
| 303 | using ConvertOutput = std::decay_t<decltype(convert(std::declval<Input>()).value())>; |
| 304 | |
| 305 | template <typename Type> |
| 306 | nn::Result<hidl_vec<ConvertOutput<Type>>> convertVec(const std::vector<Type>& arguments) { |
| 307 | hidl_vec<ConvertOutput<Type>> halObject(arguments.size()); |
| 308 | for (size_t i = 0; i < arguments.size(); ++i) { |
| 309 | halObject[i] = NN_TRY(convert(arguments[i])); |
| 310 | } |
| 311 | return halObject; |
| 312 | } |
| 313 | |
| 314 | template <typename Type> |
| 315 | nn::Result<hidl_vec<ConvertOutput<Type>>> convert(const std::vector<Type>& arguments) { |
| 316 | return convertVec(arguments); |
| 317 | } |
| 318 | |
| 319 | nn::Result<Operand::ExtraParams> makeExtraParams(nn::Operand::NoParams /*noParams*/) { |
| 320 | return Operand::ExtraParams{}; |
| 321 | } |
| 322 | |
| 323 | nn::Result<Operand::ExtraParams> makeExtraParams( |
| 324 | const nn::Operand::SymmPerChannelQuantParams& channelQuant) { |
| 325 | Operand::ExtraParams ret; |
| 326 | ret.channelQuant(NN_TRY(convert(channelQuant))); |
| 327 | return ret; |
| 328 | } |
| 329 | |
| 330 | nn::Result<Operand::ExtraParams> makeExtraParams(const nn::Operand::ExtensionParams& extension) { |
| 331 | Operand::ExtraParams ret; |
| 332 | ret.extension(extension); |
| 333 | return ret; |
| 334 | } |
| 335 | |
| 336 | } // anonymous namespace |
| 337 | |
| 338 | nn::Result<OperandType> convert(const nn::OperandType& operandType) { |
| 339 | return static_cast<OperandType>(operandType); |
| 340 | } |
| 341 | |
| 342 | nn::Result<OperationType> convert(const nn::OperationType& operationType) { |
| 343 | return static_cast<OperationType>(operationType); |
| 344 | } |
| 345 | |
| 346 | nn::Result<DeviceType> convert(const nn::DeviceType& deviceType) { |
| 347 | switch (deviceType) { |
| 348 | case nn::DeviceType::UNKNOWN: |
| 349 | return NN_ERROR() << "Invalid DeviceType UNKNOWN"; |
| 350 | case nn::DeviceType::OTHER: |
| 351 | case nn::DeviceType::CPU: |
| 352 | case nn::DeviceType::GPU: |
| 353 | case nn::DeviceType::ACCELERATOR: |
| 354 | return static_cast<DeviceType>(deviceType); |
| 355 | } |
| 356 | return NN_ERROR() << "Invalid DeviceType " << underlyingType(deviceType); |
| 357 | } |
| 358 | |
| 359 | nn::Result<Capabilities> convert(const nn::Capabilities& capabilities) { |
| 360 | std::vector<nn::Capabilities::OperandPerformance> operandPerformance; |
| 361 | operandPerformance.reserve(capabilities.operandPerformance.asVector().size()); |
| 362 | std::copy_if(capabilities.operandPerformance.asVector().begin(), |
| 363 | capabilities.operandPerformance.asVector().end(), |
| 364 | std::back_inserter(operandPerformance), |
| 365 | [](const nn::Capabilities::OperandPerformance& operandPerformance) { |
| 366 | return nn::validOperandType(operandPerformance.type); |
| 367 | }); |
| 368 | |
| 369 | return Capabilities{ |
| 370 | .relaxedFloat32toFloat16PerformanceScalar = |
| 371 | NN_TRY(convert(capabilities.relaxedFloat32toFloat16PerformanceScalar)), |
| 372 | .relaxedFloat32toFloat16PerformanceTensor = |
| 373 | NN_TRY(convert(capabilities.relaxedFloat32toFloat16PerformanceTensor)), |
| 374 | .operandPerformance = NN_TRY(convert(operandPerformance)), |
| 375 | }; |
| 376 | } |
| 377 | |
| 378 | nn::Result<Capabilities::OperandPerformance> convert( |
| 379 | const nn::Capabilities::OperandPerformance& operandPerformance) { |
| 380 | return Capabilities::OperandPerformance{ |
| 381 | .type = NN_TRY(convert(operandPerformance.type)), |
| 382 | .info = NN_TRY(convert(operandPerformance.info)), |
| 383 | }; |
| 384 | } |
| 385 | |
| 386 | nn::Result<Operation> convert(const nn::Operation& operation) { |
| 387 | return Operation{ |
| 388 | .type = NN_TRY(convert(operation.type)), |
| 389 | .inputs = operation.inputs, |
| 390 | .outputs = operation.outputs, |
| 391 | }; |
| 392 | } |
| 393 | |
| 394 | nn::Result<SymmPerChannelQuantParams> convert( |
| 395 | const nn::Operand::SymmPerChannelQuantParams& symmPerChannelQuantParams) { |
| 396 | return SymmPerChannelQuantParams{ |
| 397 | .scales = symmPerChannelQuantParams.scales, |
| 398 | .channelDim = symmPerChannelQuantParams.channelDim, |
| 399 | }; |
| 400 | } |
| 401 | |
| 402 | nn::Result<Operand> convert(const nn::Operand& operand) { |
| 403 | return Operand{ |
| 404 | .type = NN_TRY(convert(operand.type)), |
| 405 | .dimensions = operand.dimensions, |
| 406 | .numberOfConsumers = 0, |
| 407 | .scale = operand.scale, |
| 408 | .zeroPoint = operand.zeroPoint, |
| 409 | .lifetime = NN_TRY(convert(operand.lifetime)), |
| 410 | .location = NN_TRY(convert(operand.location)), |
| 411 | .extraParams = NN_TRY(convert(operand.extraParams)), |
| 412 | }; |
| 413 | } |
| 414 | |
| 415 | nn::Result<Operand::ExtraParams> convert(const nn::Operand::ExtraParams& extraParams) { |
| 416 | return std::visit([](const auto& x) { return makeExtraParams(x); }, extraParams); |
| 417 | } |
| 418 | |
| 419 | nn::Result<Model> convert(const nn::Model& model) { |
| 420 | if (!hal::utils::hasNoPointerData(model)) { |
| 421 | return NN_ERROR() << "Model cannot be converted because it contains pointer-based memory"; |
| 422 | } |
| 423 | |
| 424 | auto operands = NN_TRY(convert(model.main.operands)); |
| 425 | |
| 426 | // Update number of consumers. |
| 427 | const auto numberOfConsumers = |
| 428 | hal::utils::countNumberOfConsumers(operands.size(), model.main.operations); |
| 429 | CHECK(operands.size() == numberOfConsumers.size()); |
| 430 | for (size_t i = 0; i < operands.size(); ++i) { |
| 431 | operands[i].numberOfConsumers = numberOfConsumers[i]; |
| 432 | } |
| 433 | |
| 434 | return Model{ |
| 435 | .operands = std::move(operands), |
| 436 | .operations = NN_TRY(convert(model.main.operations)), |
| 437 | .inputIndexes = model.main.inputIndexes, |
| 438 | .outputIndexes = model.main.outputIndexes, |
| 439 | .operandValues = NN_TRY(convert(model.operandValues)), |
| 440 | .pools = NN_TRY(convert(model.pools)), |
| 441 | .relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16, |
| 442 | .extensionNameToPrefix = NN_TRY(convert(model.extensionNameToPrefix)), |
| 443 | }; |
| 444 | } |
| 445 | |
| 446 | nn::Result<Model::ExtensionNameAndPrefix> convert( |
| 447 | const nn::Model::ExtensionNameAndPrefix& extensionNameAndPrefix) { |
| 448 | return Model::ExtensionNameAndPrefix{ |
| 449 | .name = extensionNameAndPrefix.name, |
| 450 | .prefix = extensionNameAndPrefix.prefix, |
| 451 | }; |
| 452 | } |
| 453 | |
| 454 | nn::Result<OutputShape> convert(const nn::OutputShape& outputShape) { |
| 455 | return OutputShape{.dimensions = outputShape.dimensions, |
| 456 | .isSufficient = outputShape.isSufficient}; |
| 457 | } |
| 458 | |
| 459 | nn::Result<MeasureTiming> convert(const nn::MeasureTiming& measureTiming) { |
| 460 | return static_cast<MeasureTiming>(measureTiming); |
| 461 | } |
| 462 | |
| 463 | nn::Result<Timing> convert(const nn::Timing& timing) { |
| 464 | return Timing{.timeOnDevice = timing.timeOnDevice, .timeInDriver = timing.timeInDriver}; |
| 465 | } |
| 466 | |
| 467 | nn::Result<Extension> convert(const nn::Extension& extension) { |
| 468 | return Extension{ |
| 469 | .name = extension.name, |
| 470 | .operandTypes = NN_TRY(convert(extension.operandTypes)), |
| 471 | }; |
| 472 | } |
| 473 | |
| 474 | nn::Result<Extension::OperandTypeInformation> convert( |
| 475 | const nn::Extension::OperandTypeInformation& operandTypeInformation) { |
| 476 | return Extension::OperandTypeInformation{ |
| 477 | .type = operandTypeInformation.type, |
| 478 | .isTensor = operandTypeInformation.isTensor, |
| 479 | .byteSize = operandTypeInformation.byteSize, |
| 480 | }; |
| 481 | } |
| 482 | |
| 483 | nn::Result<hidl_handle> convert(const nn::NativeHandle& handle) { |
| 484 | const auto hidlHandle = hidl_handle(handle->handle()); |
| 485 | // Copy memory to force the native_handle_t to be copied. |
| 486 | auto copiedHandle = hidlHandle; |
| 487 | return copiedHandle; |
| 488 | } |
| 489 | |
| 490 | nn::Result<hidl_vec<Extension>> convert(const std::vector<nn::Extension>& extensions) { |
| 491 | return convertVec(extensions); |
| 492 | } |
| 493 | |
| 494 | nn::Result<hidl_vec<hidl_handle>> convert(const std::vector<nn::NativeHandle>& handles) { |
| 495 | return convertVec(handles); |
| 496 | } |
| 497 | |
| 498 | nn::Result<hidl_vec<OutputShape>> convert(const std::vector<nn::OutputShape>& outputShapes) { |
| 499 | return convertVec(outputShapes); |
| 500 | } |
| 501 | |
| 502 | } // namespace android::hardware::neuralnetworks::V1_2::utils |