blob: 8e55bf0253d34ab1a6e18f8615aad8839e3f3cae [file] [log] [blame]
Michael Butlerb98aa6d2020-02-22 22:37:59 -08001/*
2 * Copyright (C) 2020 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "CommonUtils.h"
18
Michael Butler4b276a72020-08-06 23:22:35 -070019#include "HandleError.h"
20
Michael Butlerb98aa6d2020-02-22 22:37:59 -080021#include <android-base/logging.h>
Slava Shklyaev49817a02020-10-27 18:44:01 +000022#include <android-base/unique_fd.h>
Michael Butlerab2f4822021-02-08 00:05:07 -080023#include <android/hardware_buffer.h>
24#include <hidl/HidlSupport.h>
Michael Butlerb98aa6d2020-02-22 22:37:59 -080025#include <nnapi/Result.h>
26#include <nnapi/SharedMemory.h>
27#include <nnapi/TypeUtils.h>
28#include <nnapi/Types.h>
29#include <nnapi/Validation.h>
Michael Butlerab2f4822021-02-08 00:05:07 -080030#include <vndk/hardware_buffer.h>
Michael Butlerb98aa6d2020-02-22 22:37:59 -080031
32#include <algorithm>
33#include <any>
Michael Butler4b276a72020-08-06 23:22:35 -070034#include <functional>
Michael Butlerb98aa6d2020-02-22 22:37:59 -080035#include <optional>
36#include <variant>
37#include <vector>
38
39namespace android::hardware::neuralnetworks::utils {
40namespace {
41
42bool hasNoPointerData(const nn::Operand& operand);
43bool hasNoPointerData(const nn::Model::Subgraph& subgraph);
44bool hasNoPointerData(const nn::Request::Argument& argument);
45
46template <typename Type>
47bool hasNoPointerData(const std::vector<Type>& objects) {
48 return std::all_of(objects.begin(), objects.end(),
49 [](const auto& object) { return hasNoPointerData(object); });
50}
51
52bool hasNoPointerData(const nn::DataLocation& location) {
53 return std::visit([](auto ptr) { return ptr == nullptr; }, location.pointer);
54}
55
56bool hasNoPointerData(const nn::Operand& operand) {
57 return hasNoPointerData(operand.location);
58}
59
60bool hasNoPointerData(const nn::Model::Subgraph& subgraph) {
61 return hasNoPointerData(subgraph.operands);
62}
63
64bool hasNoPointerData(const nn::Request::Argument& argument) {
65 return hasNoPointerData(argument.location);
66}
67
68void copyPointersToSharedMemory(nn::Operand* operand, nn::ConstantMemoryBuilder* memoryBuilder) {
69 CHECK(operand != nullptr);
70 CHECK(memoryBuilder != nullptr);
71
72 if (operand->lifetime != nn::Operand::LifeTime::POINTER) {
73 return;
74 }
75
76 const void* data = std::visit([](auto ptr) { return static_cast<const void*>(ptr); },
77 operand->location.pointer);
78 CHECK(data != nullptr);
79 operand->lifetime = nn::Operand::LifeTime::CONSTANT_REFERENCE;
80 operand->location = memoryBuilder->append(data, operand->location.length);
81}
82
83void copyPointersToSharedMemory(nn::Model::Subgraph* subgraph,
84 nn::ConstantMemoryBuilder* memoryBuilder) {
85 CHECK(subgraph != nullptr);
86 std::for_each(subgraph->operands.begin(), subgraph->operands.end(),
87 [memoryBuilder](auto& operand) {
88 copyPointersToSharedMemory(&operand, memoryBuilder);
89 });
90}
91
Michael Butlerbed23d92021-03-25 15:27:38 -070092nn::GeneralResult<hidl_handle> createNativeHandleFrom(base::unique_fd fd,
93 const std::vector<int32_t>& ints) {
94 constexpr size_t kIntMax = std::numeric_limits<int>::max();
95 CHECK_LE(ints.size(), kIntMax);
96 native_handle_t* nativeHandle = native_handle_create(1, static_cast<int>(ints.size()));
97 if (nativeHandle == nullptr) {
98 return NN_ERROR() << "Failed to create native_handle";
99 }
100
101 nativeHandle->data[0] = fd.release();
102 std::copy(ints.begin(), ints.end(), nativeHandle->data + 1);
103
104 hidl_handle handle;
105 handle.setTo(nativeHandle, /*shouldOwn=*/true);
106 return handle;
107}
108
109nn::GeneralResult<hidl_memory> createHidlMemoryFrom(const nn::Memory::Ashmem& memory) {
110 auto fd = NN_TRY(nn::dupFd(memory.fd));
111 auto handle = NN_TRY(createNativeHandleFrom(std::move(fd), {}));
112 return hidl_memory("ashmem", std::move(handle), memory.size);
113}
114
115nn::GeneralResult<hidl_memory> createHidlMemoryFrom(const nn::Memory::Fd& memory) {
116 auto fd = NN_TRY(nn::dupFd(memory.fd));
117
118 const auto [lowOffsetBits, highOffsetBits] = nn::getIntsFromOffset(memory.offset);
119 const std::vector<int> ints = {memory.prot, lowOffsetBits, highOffsetBits};
120
121 auto handle = NN_TRY(createNativeHandleFrom(std::move(fd), ints));
122 return hidl_memory("mmap_fd", std::move(handle), memory.size);
123}
124
125nn::GeneralResult<hidl_memory> createHidlMemoryFrom(const nn::Memory::HardwareBuffer& memory) {
126 const auto* ahwb = memory.handle.get();
127 AHardwareBuffer_Desc bufferDesc;
128 AHardwareBuffer_describe(ahwb, &bufferDesc);
129
130 const bool isBlob = bufferDesc.format == AHARDWAREBUFFER_FORMAT_BLOB;
131 const size_t size = isBlob ? bufferDesc.width : 0;
132 const char* const name = isBlob ? "hardware_buffer_blob" : "hardware_buffer";
133
134 const native_handle_t* nativeHandle = AHardwareBuffer_getNativeHandle(ahwb);
135 const hidl_handle hidlHandle(nativeHandle);
136 hidl_handle copiedHandle(hidlHandle);
137
138 return hidl_memory(name, std::move(copiedHandle), size);
139}
140
141nn::GeneralResult<hidl_memory> createHidlMemoryFrom(const nn::Memory::Unknown& memory) {
142 return hidl_memory(memory.name, NN_TRY(hidlHandleFromSharedHandle(memory.handle)), memory.size);
143}
144
Michael Butlerb98aa6d2020-02-22 22:37:59 -0800145} // anonymous namespace
146
147nn::Capabilities::OperandPerformanceTable makeQuantized8PerformanceConsistentWithP(
148 const nn::Capabilities::PerformanceInfo& float32Performance,
149 const nn::Capabilities::PerformanceInfo& quantized8Performance) {
150 // In Android P, most data types are treated as having the same performance as
151 // TENSOR_QUANT8_ASYMM. This collection must be in sorted order.
152 std::vector<nn::Capabilities::OperandPerformance> operandPerformances = {
153 {.type = nn::OperandType::FLOAT32, .info = float32Performance},
154 {.type = nn::OperandType::INT32, .info = quantized8Performance},
155 {.type = nn::OperandType::UINT32, .info = quantized8Performance},
156 {.type = nn::OperandType::TENSOR_FLOAT32, .info = float32Performance},
157 {.type = nn::OperandType::TENSOR_INT32, .info = quantized8Performance},
158 {.type = nn::OperandType::TENSOR_QUANT8_ASYMM, .info = quantized8Performance},
159 {.type = nn::OperandType::OEM, .info = quantized8Performance},
160 {.type = nn::OperandType::TENSOR_OEM_BYTE, .info = quantized8Performance},
161 };
162 return nn::Capabilities::OperandPerformanceTable::create(std::move(operandPerformances))
163 .value();
164}
165
166bool hasNoPointerData(const nn::Model& model) {
167 return hasNoPointerData(model.main) && hasNoPointerData(model.referenced);
168}
169
170bool hasNoPointerData(const nn::Request& request) {
171 return hasNoPointerData(request.inputs) && hasNoPointerData(request.outputs);
172}
173
Michael Butler4b276a72020-08-06 23:22:35 -0700174nn::GeneralResult<std::reference_wrapper<const nn::Model>> flushDataFromPointerToShared(
175 const nn::Model* model, std::optional<nn::Model>* maybeModelInSharedOut) {
176 CHECK(model != nullptr);
177 CHECK(maybeModelInSharedOut != nullptr);
178
179 if (hasNoPointerData(*model)) {
180 return *model;
181 }
182
183 // Make a copy of the model in order to make modifications. The modified model is returned to
184 // the caller through `maybeModelInSharedOut` if the function succeeds.
185 nn::Model modelInShared = *model;
Michael Butlerb98aa6d2020-02-22 22:37:59 -0800186
187 nn::ConstantMemoryBuilder memoryBuilder(modelInShared.pools.size());
188 copyPointersToSharedMemory(&modelInShared.main, &memoryBuilder);
189 std::for_each(modelInShared.referenced.begin(), modelInShared.referenced.end(),
190 [&memoryBuilder](auto& subgraph) {
191 copyPointersToSharedMemory(&subgraph, &memoryBuilder);
192 });
193
194 if (!memoryBuilder.empty()) {
195 auto memory = NN_TRY(memoryBuilder.finish());
196 modelInShared.pools.push_back(std::move(memory));
197 }
198
Michael Butler4b276a72020-08-06 23:22:35 -0700199 *maybeModelInSharedOut = modelInShared;
200 return **maybeModelInSharedOut;
Michael Butlerb98aa6d2020-02-22 22:37:59 -0800201}
202
Xusong Wang727a7b22021-03-03 16:20:37 -0800203template <>
204void InputRelocationTracker::flush() const {
205 // Copy from pointers to shared memory.
206 uint8_t* memoryPtr = static_cast<uint8_t*>(std::get<void*>(kMapping.pointer));
207 for (const auto& [data, length, offset] : kRelocationInfos) {
208 std::memcpy(memoryPtr + offset, data, length);
209 }
210}
211
212template <>
213void OutputRelocationTracker::flush() const {
214 // Copy from shared memory to pointers.
215 const uint8_t* memoryPtr = static_cast<const uint8_t*>(
216 std::visit([](auto ptr) { return static_cast<const void*>(ptr); }, kMapping.pointer));
217 for (const auto& [data, length, offset] : kRelocationInfos) {
218 std::memcpy(data, memoryPtr + offset, length);
219 }
220}
221
222nn::GeneralResult<std::reference_wrapper<const nn::Request>> convertRequestFromPointerToShared(
Xusong Wang8bfa2432021-05-07 14:13:22 -0700223 const nn::Request* request, uint32_t alignment, uint32_t padding,
224 std::optional<nn::Request>* maybeRequestInSharedOut, RequestRelocation* relocationOut) {
Michael Butler4b276a72020-08-06 23:22:35 -0700225 CHECK(request != nullptr);
226 CHECK(maybeRequestInSharedOut != nullptr);
Xusong Wang727a7b22021-03-03 16:20:37 -0800227 CHECK(relocationOut != nullptr);
Michael Butler4b276a72020-08-06 23:22:35 -0700228
229 if (hasNoPointerData(*request)) {
230 return *request;
231 }
232
233 // Make a copy of the request in order to make modifications. The modified request is returned
234 // to the caller through `maybeRequestInSharedOut` if the function succeeds.
235 nn::Request requestInShared = *request;
Michael Butlerb98aa6d2020-02-22 22:37:59 -0800236
Xusong Wang727a7b22021-03-03 16:20:37 -0800237 RequestRelocation relocation;
238
Michael Butlerb98aa6d2020-02-22 22:37:59 -0800239 // Change input pointers to shared memory.
Xusong Wang727a7b22021-03-03 16:20:37 -0800240 nn::MutableMemoryBuilder inputBuilder(requestInShared.pools.size());
241 std::vector<InputRelocationInfo> inputRelocationInfos;
Michael Butlerb98aa6d2020-02-22 22:37:59 -0800242 for (auto& input : requestInShared.inputs) {
243 const auto& location = input.location;
244 if (input.lifetime != nn::Request::Argument::LifeTime::POINTER) {
245 continue;
246 }
247
248 input.lifetime = nn::Request::Argument::LifeTime::POOL;
249 const void* data = std::visit([](auto ptr) { return static_cast<const void*>(ptr); },
250 location.pointer);
251 CHECK(data != nullptr);
Xusong Wang8bfa2432021-05-07 14:13:22 -0700252 input.location = inputBuilder.append(location.length, alignment, padding);
Xusong Wang727a7b22021-03-03 16:20:37 -0800253 inputRelocationInfos.push_back({data, input.location.length, input.location.offset});
Michael Butlerb98aa6d2020-02-22 22:37:59 -0800254 }
255
256 // Allocate input memory.
257 if (!inputBuilder.empty()) {
258 auto memory = NN_TRY(inputBuilder.finish());
Xusong Wang727a7b22021-03-03 16:20:37 -0800259 requestInShared.pools.push_back(memory);
260 relocation.input = NN_TRY(
261 InputRelocationTracker::create(std::move(inputRelocationInfos), std::move(memory)));
Michael Butlerb98aa6d2020-02-22 22:37:59 -0800262 }
263
264 // Change output pointers to shared memory.
265 nn::MutableMemoryBuilder outputBuilder(requestInShared.pools.size());
Xusong Wang727a7b22021-03-03 16:20:37 -0800266 std::vector<OutputRelocationInfo> outputRelocationInfos;
Michael Butlerb98aa6d2020-02-22 22:37:59 -0800267 for (auto& output : requestInShared.outputs) {
268 const auto& location = output.location;
269 if (output.lifetime != nn::Request::Argument::LifeTime::POINTER) {
270 continue;
271 }
272
273 output.lifetime = nn::Request::Argument::LifeTime::POOL;
Xusong Wang727a7b22021-03-03 16:20:37 -0800274 void* data = std::get<void*>(location.pointer);
275 CHECK(data != nullptr);
Xusong Wang8bfa2432021-05-07 14:13:22 -0700276 output.location = outputBuilder.append(location.length, alignment, padding);
Xusong Wang727a7b22021-03-03 16:20:37 -0800277 outputRelocationInfos.push_back({data, output.location.length, output.location.offset});
Michael Butlerb98aa6d2020-02-22 22:37:59 -0800278 }
279
280 // Allocate output memory.
281 if (!outputBuilder.empty()) {
282 auto memory = NN_TRY(outputBuilder.finish());
Xusong Wang727a7b22021-03-03 16:20:37 -0800283 requestInShared.pools.push_back(memory);
284 relocation.output = NN_TRY(OutputRelocationTracker::create(std::move(outputRelocationInfos),
285 std::move(memory)));
Michael Butlerb98aa6d2020-02-22 22:37:59 -0800286 }
287
Michael Butler4b276a72020-08-06 23:22:35 -0700288 *maybeRequestInSharedOut = requestInShared;
Xusong Wang727a7b22021-03-03 16:20:37 -0800289 *relocationOut = std::move(relocation);
Michael Butler4b276a72020-08-06 23:22:35 -0700290 return **maybeRequestInSharedOut;
Michael Butlerb98aa6d2020-02-22 22:37:59 -0800291}
292
Michael Butler68b69262021-02-09 15:36:11 -0800293nn::GeneralResult<std::vector<uint32_t>> countNumberOfConsumers(
294 size_t numberOfOperands, const std::vector<nn::Operation>& operations) {
295 return makeGeneralFailure(nn::countNumberOfConsumers(numberOfOperands, operations));
Michael Butlerb98aa6d2020-02-22 22:37:59 -0800296}
297
Michael Butlerab2f4822021-02-08 00:05:07 -0800298nn::GeneralResult<hidl_memory> createHidlMemoryFromSharedMemory(const nn::SharedMemory& memory) {
299 if (memory == nullptr) {
300 return NN_ERROR() << "Memory must be non-empty";
301 }
Michael Butlerbed23d92021-03-25 15:27:38 -0700302 return std::visit([](const auto& x) { return createHidlMemoryFrom(x); }, memory->handle);
Michael Butlerab2f4822021-02-08 00:05:07 -0800303}
304
305static uint32_t roundUpToMultiple(uint32_t value, uint32_t multiple) {
306 return (value + multiple - 1) / multiple * multiple;
307}
308
309nn::GeneralResult<nn::SharedMemory> createSharedMemoryFromHidlMemory(const hidl_memory& memory) {
Michael Butlerbed23d92021-03-25 15:27:38 -0700310 CHECK_LE(memory.size(), std::numeric_limits<size_t>::max());
311 if (!memory.valid()) {
312 return NN_ERROR() << "Unable to convert invalid hidl_memory";
313 }
314
315 if (memory.name() == "ashmem") {
316 if (memory.handle()->numFds != 1) {
317 return NN_ERROR() << "Unable to convert invalid ashmem memory object with "
318 << memory.handle()->numFds << " numFds, but expected 1";
319 }
320 if (memory.handle()->numInts != 0) {
321 return NN_ERROR() << "Unable to convert invalid ashmem memory object with "
322 << memory.handle()->numInts << " numInts, but expected 0";
323 }
324 auto handle = nn::Memory::Ashmem{
325 .fd = NN_TRY(nn::dupFd(memory.handle()->data[0])),
326 .size = static_cast<size_t>(memory.size()),
327 };
328 return std::make_shared<const nn::Memory>(nn::Memory{.handle = std::move(handle)});
329 }
330
331 if (memory.name() == "mmap_fd") {
332 if (memory.handle()->numFds != 1) {
333 return NN_ERROR() << "Unable to convert invalid mmap_fd memory object with "
334 << memory.handle()->numFds << " numFds, but expected 1";
335 }
336 if (memory.handle()->numInts != 3) {
337 return NN_ERROR() << "Unable to convert invalid mmap_fd memory object with "
338 << memory.handle()->numInts << " numInts, but expected 3";
339 }
340
341 const int fd = memory.handle()->data[0];
342 const int prot = memory.handle()->data[1];
343 const int lower = memory.handle()->data[2];
344 const int higher = memory.handle()->data[3];
345 const size_t offset = nn::getOffsetFromInts(lower, higher);
346
347 return nn::createSharedMemoryFromFd(static_cast<size_t>(memory.size()), prot, fd, offset);
348 }
Michael Butlerab2f4822021-02-08 00:05:07 -0800349
350 if (memory.name() != "hardware_buffer_blob") {
Michael Butlerbed23d92021-03-25 15:27:38 -0700351 auto handle = nn::Memory::Unknown{
Michael Butlerab2f4822021-02-08 00:05:07 -0800352 .handle = NN_TRY(sharedHandleFromNativeHandle(memory.handle())),
Michael Butlerbed23d92021-03-25 15:27:38 -0700353 .size = static_cast<size_t>(memory.size()),
Michael Butlerab2f4822021-02-08 00:05:07 -0800354 .name = memory.name(),
Michael Butlerbed23d92021-03-25 15:27:38 -0700355 };
356 return std::make_shared<const nn::Memory>(nn::Memory{.handle = std::move(handle)});
Michael Butlerab2f4822021-02-08 00:05:07 -0800357 }
358
359 const auto size = memory.size();
360 const auto format = AHARDWAREBUFFER_FORMAT_BLOB;
361 const auto usage = AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN | AHARDWAREBUFFER_USAGE_CPU_WRITE_OFTEN;
362 const uint32_t width = size;
363 const uint32_t height = 1; // height is always 1 for BLOB mode AHardwareBuffer.
364 const uint32_t layers = 1; // layers is always 1 for BLOB mode AHardwareBuffer.
365
366 // AHardwareBuffer_createFromHandle() might fail because an allocator
367 // expects a specific stride value. In that case, we try to guess it by
368 // aligning the width to small powers of 2.
369 // TODO(b/174120849): Avoid stride assumptions.
370 AHardwareBuffer* hardwareBuffer = nullptr;
371 status_t status = UNKNOWN_ERROR;
372 for (uint32_t alignment : {1, 4, 32, 64, 128, 2, 8, 16}) {
373 const uint32_t stride = roundUpToMultiple(width, alignment);
374 AHardwareBuffer_Desc desc{
375 .width = width,
376 .height = height,
377 .layers = layers,
378 .format = format,
379 .usage = usage,
380 .stride = stride,
381 };
382 status = AHardwareBuffer_createFromHandle(&desc, memory.handle(),
383 AHARDWAREBUFFER_CREATE_FROM_HANDLE_METHOD_CLONE,
384 &hardwareBuffer);
385 if (status == NO_ERROR) {
386 break;
387 }
388 }
389 if (status != NO_ERROR) {
390 return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
391 << "Can't create AHardwareBuffer from handle. Error: " << status;
392 }
393
Michael Butlerbed23d92021-03-25 15:27:38 -0700394 return nn::createSharedMemoryFromAHWB(hardwareBuffer, /*takeOwnership=*/true);
Michael Butlerab2f4822021-02-08 00:05:07 -0800395}
396
397nn::GeneralResult<hidl_handle> hidlHandleFromSharedHandle(const nn::Handle& handle) {
Slava Shklyaev49817a02020-10-27 18:44:01 +0000398 std::vector<base::unique_fd> fds;
Michael Butlerab2f4822021-02-08 00:05:07 -0800399 fds.reserve(handle.fds.size());
400 for (const auto& fd : handle.fds) {
401 const int dupFd = dup(fd);
Slava Shklyaev49817a02020-10-27 18:44:01 +0000402 if (dupFd == -1) {
403 return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "Failed to dup the fd";
404 }
405 fds.emplace_back(dupFd);
406 }
407
Michael Butlerab2f4822021-02-08 00:05:07 -0800408 constexpr size_t kIntMax = std::numeric_limits<int>::max();
409 CHECK_LE(handle.fds.size(), kIntMax);
410 CHECK_LE(handle.ints.size(), kIntMax);
411 native_handle_t* nativeHandle = native_handle_create(static_cast<int>(handle.fds.size()),
412 static_cast<int>(handle.ints.size()));
Slava Shklyaev49817a02020-10-27 18:44:01 +0000413 if (nativeHandle == nullptr) {
414 return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "Failed to create native_handle";
415 }
416 for (size_t i = 0; i < fds.size(); ++i) {
417 nativeHandle->data[i] = fds[i].release();
418 }
Michael Butlerab2f4822021-02-08 00:05:07 -0800419 std::copy(handle.ints.begin(), handle.ints.end(), &nativeHandle->data[nativeHandle->numFds]);
Slava Shklyaev49817a02020-10-27 18:44:01 +0000420
421 hidl_handle hidlHandle;
422 hidlHandle.setTo(nativeHandle, /*shouldOwn=*/true);
423 return hidlHandle;
424}
425
Michael Butlerab2f4822021-02-08 00:05:07 -0800426nn::GeneralResult<nn::Handle> sharedHandleFromNativeHandle(const native_handle_t* handle) {
Slava Shklyaev49817a02020-10-27 18:44:01 +0000427 if (handle == nullptr) {
Michael Butlerab2f4822021-02-08 00:05:07 -0800428 return NN_ERROR() << "sharedHandleFromNativeHandle failed because handle is nullptr";
Slava Shklyaev49817a02020-10-27 18:44:01 +0000429 }
430
431 std::vector<base::unique_fd> fds;
432 fds.reserve(handle->numFds);
433 for (int i = 0; i < handle->numFds; ++i) {
Michael Butlerab2f4822021-02-08 00:05:07 -0800434 const int dupFd = dup(handle->data[i]);
Slava Shklyaev49817a02020-10-27 18:44:01 +0000435 if (dupFd == -1) {
436 return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "Failed to dup the fd";
437 }
438 fds.emplace_back(dupFd);
439 }
440
441 std::vector<int> ints(&handle->data[handle->numFds],
442 &handle->data[handle->numFds + handle->numInts]);
443
Michael Butlerab2f4822021-02-08 00:05:07 -0800444 return nn::Handle{.fds = std::move(fds), .ints = std::move(ints)};
Slava Shklyaev49817a02020-10-27 18:44:01 +0000445}
446
447nn::GeneralResult<hidl_vec<hidl_handle>> convertSyncFences(
448 const std::vector<nn::SyncFence>& syncFences) {
449 hidl_vec<hidl_handle> handles(syncFences.size());
450 for (size_t i = 0; i < syncFences.size(); ++i) {
Michael Butlerab2f4822021-02-08 00:05:07 -0800451 const auto& handle = syncFences[i].getSharedHandle();
452 if (handle == nullptr) {
453 return NN_ERROR() << "convertSyncFences failed because sync fence is empty";
454 }
455 handles[i] = NN_TRY(hidlHandleFromSharedHandle(*handle));
Slava Shklyaev49817a02020-10-27 18:44:01 +0000456 }
457 return handles;
458}
459
Michael Butlerb98aa6d2020-02-22 22:37:59 -0800460} // namespace android::hardware::neuralnetworks::utils