blob: 98a75fa7853e32c1a4e08759b76c6ee7f391831e [file] [log] [blame]
Michael Butlerf6b2d1a2020-12-19 14:44:35 -08001/*
2 * Copyright (C) 2019 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#define LOG_TAG "ExecutionBurstController"
18
19#include "ExecutionBurstController.h"
Michael Butler76e491f2020-12-19 01:55:32 -080020#include "ExecutionBurstUtils.h"
Michael Butlerf6b2d1a2020-12-19 14:44:35 -080021
22#include <android-base/logging.h>
Michael Butler76e491f2020-12-19 01:55:32 -080023#include <android-base/thread_annotations.h>
24#include <nnapi/IBurst.h>
25#include <nnapi/IPreparedModel.h>
26#include <nnapi/Result.h>
27#include <nnapi/TypeUtils.h>
28#include <nnapi/Types.h>
29#include <nnapi/Validation.h>
30#include <nnapi/hal/1.0/Conversions.h>
Michael Butlere8645c32021-10-15 18:42:32 -070031#include <nnapi/hal/1.0/ProtectCallback.h>
Xusong Wangb2e80852021-03-23 15:07:10 -070032#include <nnapi/hal/CommonUtils.h>
Michael Butler76e491f2020-12-19 01:55:32 -080033#include <nnapi/hal/HandleError.h>
Michael Butler76e491f2020-12-19 01:55:32 -080034#include <nnapi/hal/TransferValue.h>
Michael Butlerf6b2d1a2020-12-19 14:44:35 -080035
36#include <algorithm>
37#include <cstring>
38#include <limits>
39#include <memory>
40#include <string>
Michael Butler76e491f2020-12-19 01:55:32 -080041#include <thread>
Michael Butlerf6b2d1a2020-12-19 14:44:35 -080042#include <tuple>
43#include <utility>
44#include <vector>
45
Michael Butler76e491f2020-12-19 01:55:32 -080046#include "Callbacks.h"
47#include "Conversions.h"
Michael Butlerf6b2d1a2020-12-19 14:44:35 -080048#include "Tracing.h"
49#include "Utils.h"
50
Michael Butler76e491f2020-12-19 01:55:32 -080051namespace android::hardware::neuralnetworks::V1_2::utils {
Michael Butlerf6b2d1a2020-12-19 14:44:35 -080052namespace {
53
Xusong Wangb2e80852021-03-23 15:07:10 -070054class BurstExecution final : public nn::IExecution,
55 public std::enable_shared_from_this<BurstExecution> {
56 struct PrivateConstructorTag {};
57
58 public:
59 static nn::GeneralResult<std::shared_ptr<const BurstExecution>> create(
60 std::shared_ptr<const ExecutionBurstController> controller,
61 std::vector<FmqRequestDatum> request, hal::utils::RequestRelocation relocation,
62 std::vector<ExecutionBurstController::OptionalCacheHold> cacheHolds);
63
64 BurstExecution(PrivateConstructorTag tag,
65 std::shared_ptr<const ExecutionBurstController> controller,
66 std::vector<FmqRequestDatum> request, hal::utils::RequestRelocation relocation,
67 std::vector<ExecutionBurstController::OptionalCacheHold> cacheHolds);
68
69 nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> compute(
70 const nn::OptionalTimePoint& deadline) const override;
71
72 nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> computeFenced(
73 const std::vector<nn::SyncFence>& waitFor, const nn::OptionalTimePoint& deadline,
74 const nn::OptionalDuration& timeoutDurationAfterFence) const override;
75
76 private:
77 const std::shared_ptr<const ExecutionBurstController> kController;
78 const std::vector<FmqRequestDatum> kRequest;
79 const hal::utils::RequestRelocation kRelocation;
80 const std::vector<ExecutionBurstController::OptionalCacheHold> kCacheHolds;
81};
82
Michael Butler76e491f2020-12-19 01:55:32 -080083nn::GeneralResult<sp<IBurstContext>> executionBurstResultCallback(
84 V1_0::ErrorStatus status, const sp<IBurstContext>& burstContext) {
85 HANDLE_HAL_STATUS(status) << "IPreparedModel::configureExecutionBurst failed with status "
86 << toString(status);
87 if (burstContext == nullptr) {
88 return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
89 << "IPreparedModel::configureExecutionBurst returned nullptr for burst";
Michael Butlerf6b2d1a2020-12-19 14:44:35 -080090 }
Michael Butler76e491f2020-12-19 01:55:32 -080091 return burstContext;
Michael Butlerf6b2d1a2020-12-19 14:44:35 -080092}
93
Michael Butler76e491f2020-12-19 01:55:32 -080094nn::GeneralResult<hidl_vec<hidl_memory>> getMemoriesHelper(
95 const hidl_vec<int32_t>& slots,
96 const std::shared_ptr<ExecutionBurstController::MemoryCache>& memoryCache) {
97 hidl_vec<hidl_memory> memories(slots.size());
98 for (size_t i = 0; i < slots.size(); ++i) {
99 const int32_t slot = slots[i];
100 const auto memory = NN_TRY(memoryCache->getMemory(slot));
101 memories[i] = NN_TRY(V1_0::utils::unvalidatedConvert(memory));
102 if (!memories[i].valid()) {
103 return NN_ERROR() << "memory at slot " << slot << " is invalid";
104 }
Michael Butlerf6b2d1a2020-12-19 14:44:35 -0800105 }
Michael Butler76e491f2020-12-19 01:55:32 -0800106 return memories;
Michael Butlerf6b2d1a2020-12-19 14:44:35 -0800107}
108
Michael Butler76e491f2020-12-19 01:55:32 -0800109} // namespace
Michael Butlerf6b2d1a2020-12-19 14:44:35 -0800110
Michael Butler76e491f2020-12-19 01:55:32 -0800111// MemoryCache methods
112
113ExecutionBurstController::MemoryCache::MemoryCache() {
114 constexpr size_t kPreallocatedCount = 1024;
115 std::vector<int32_t> freeSlotsSpace;
116 freeSlotsSpace.reserve(kPreallocatedCount);
117 mFreeSlots = std::stack<int32_t, std::vector<int32_t>>(std::move(freeSlotsSpace));
118 mMemoryCache.reserve(kPreallocatedCount);
119 mCacheCleaner.reserve(kPreallocatedCount);
Michael Butlerf6b2d1a2020-12-19 14:44:35 -0800120}
121
Michael Butler76e491f2020-12-19 01:55:32 -0800122void ExecutionBurstController::MemoryCache::setBurstContext(sp<IBurstContext> burstContext) {
123 std::lock_guard guard(mMutex);
124 mBurstContext = std::move(burstContext);
125}
126
127std::pair<int32_t, ExecutionBurstController::MemoryCache::SharedCleanup>
128ExecutionBurstController::MemoryCache::cacheMemory(const nn::SharedMemory& memory) {
129 std::unique_lock lock(mMutex);
130 base::ScopedLockAssertion lockAssert(mMutex);
131
132 // Use existing cache entry if (1) the Memory object is in the cache and (2) the cache entry is
133 // not currently being freed.
134 auto iter = mMemoryIdToSlot.find(memory);
135 while (iter != mMemoryIdToSlot.end()) {
Michael Butlerf6b2d1a2020-12-19 14:44:35 -0800136 const int32_t slot = iter->second;
Michael Butler76e491f2020-12-19 01:55:32 -0800137 if (auto cleaner = mCacheCleaner.at(slot).lock()) {
138 return std::make_pair(slot, std::move(cleaner));
139 }
140
141 // If the code reaches this point, the Memory object was in the cache, but is currently
142 // being destroyed. This code waits until the cache entry has been freed, then loops to
143 // ensure the cache entry has been freed or has been made present by another thread.
144 mCond.wait(lock);
145 iter = mMemoryIdToSlot.find(memory);
Michael Butlerf6b2d1a2020-12-19 14:44:35 -0800146 }
Michael Butler76e491f2020-12-19 01:55:32 -0800147
148 // Allocate a new cache entry.
149 const int32_t slot = allocateSlotLocked();
150 mMemoryIdToSlot[memory] = slot;
151 mMemoryCache[slot] = memory;
152
153 // Create reference-counted self-cleaning cache object.
154 auto self = weak_from_this();
155 Task cleanup = [memory, memoryCache = std::move(self)] {
156 if (const auto lock = memoryCache.lock()) {
157 lock->freeMemory(memory);
158 }
159 };
160 auto cleaner = std::make_shared<const Cleanup>(std::move(cleanup));
161 mCacheCleaner[slot] = cleaner;
162
163 return std::make_pair(slot, std::move(cleaner));
Michael Butlerf6b2d1a2020-12-19 14:44:35 -0800164}
165
Michael Butler76e491f2020-12-19 01:55:32 -0800166nn::GeneralResult<nn::SharedMemory> ExecutionBurstController::MemoryCache::getMemory(int32_t slot) {
167 std::lock_guard guard(mMutex);
168 if (slot < 0 || static_cast<size_t>(slot) >= mMemoryCache.size()) {
169 return NN_ERROR() << "Invalid slot: " << slot << " vs " << mMemoryCache.size();
170 }
171 return mMemoryCache[slot];
172}
173
174void ExecutionBurstController::MemoryCache::freeMemory(const nn::SharedMemory& memory) {
175 {
176 std::lock_guard guard(mMutex);
177 const int32_t slot = mMemoryIdToSlot.at(memory);
178 if (mBurstContext) {
Michael Butler1b09ea92021-10-13 11:28:26 -0700179 const auto ret = mBurstContext->freeMemory(slot);
180 if (!ret.isOk()) {
181 LOG(ERROR) << "IBustContext::freeMemory failed: " << ret.description();
182 }
Michael Butler76e491f2020-12-19 01:55:32 -0800183 }
184 mMemoryIdToSlot.erase(memory);
185 mMemoryCache[slot] = {};
186 mCacheCleaner[slot].reset();
187 mFreeSlots.push(slot);
188 }
189 mCond.notify_all();
190}
191
192int32_t ExecutionBurstController::MemoryCache::allocateSlotLocked() {
Michael Butlerf6b2d1a2020-12-19 14:44:35 -0800193 constexpr size_t kMaxNumberOfSlots = std::numeric_limits<int32_t>::max();
194
Michael Butler76e491f2020-12-19 01:55:32 -0800195 // If there is a free slot, use it.
196 if (!mFreeSlots.empty()) {
Michael Butlerf6b2d1a2020-12-19 14:44:35 -0800197 const int32_t slot = mFreeSlots.top();
198 mFreeSlots.pop();
199 return slot;
200 }
201
Michael Butler76e491f2020-12-19 01:55:32 -0800202 // Use a slot for the first time.
203 CHECK_LT(mMemoryCache.size(), kMaxNumberOfSlots) << "Exceeded maximum number of slots!";
Michael Butlerf6b2d1a2020-12-19 14:44:35 -0800204 const int32_t slot = static_cast<int32_t>(mMemoryCache.size());
205 mMemoryCache.emplace_back();
Michael Butler76e491f2020-12-19 01:55:32 -0800206 mCacheCleaner.emplace_back();
Michael Butlerf6b2d1a2020-12-19 14:44:35 -0800207
208 return slot;
209}
210
Michael Butler76e491f2020-12-19 01:55:32 -0800211// ExecutionBurstCallback methods
212
213ExecutionBurstController::ExecutionBurstCallback::ExecutionBurstCallback(
214 const std::shared_ptr<MemoryCache>& memoryCache)
215 : kMemoryCache(memoryCache) {
216 CHECK(memoryCache != nullptr);
217}
218
219Return<void> ExecutionBurstController::ExecutionBurstCallback::getMemories(
220 const hidl_vec<int32_t>& slots, getMemories_cb cb) {
221 const auto memoryCache = kMemoryCache.lock();
222 if (memoryCache == nullptr) {
223 LOG(ERROR) << "ExecutionBurstController::ExecutionBurstCallback::getMemories called after "
224 "the MemoryCache has been freed";
225 cb(V1_0::ErrorStatus::GENERAL_FAILURE, {});
226 return Void();
227 }
228
229 const auto maybeMemories = getMemoriesHelper(slots, memoryCache);
230 if (!maybeMemories.has_value()) {
231 const auto& [message, code] = maybeMemories.error();
232 LOG(ERROR) << "ExecutionBurstController::ExecutionBurstCallback::getMemories failed with "
233 << code << ": " << message;
234 cb(V1_0::ErrorStatus::INVALID_ARGUMENT, {});
235 return Void();
236 }
237
238 cb(V1_0::ErrorStatus::NONE, maybeMemories.value());
239 return Void();
240}
241
242// ExecutionBurstController methods
243
244nn::GeneralResult<std::shared_ptr<const ExecutionBurstController>> ExecutionBurstController::create(
Xusong Wangb2e80852021-03-23 15:07:10 -0700245 nn::SharedPreparedModel preparedModel, const sp<V1_2::IPreparedModel>& hidlPreparedModel,
Michael Butlerf6b2d1a2020-12-19 14:44:35 -0800246 std::chrono::microseconds pollingTimeWindow) {
247 // check inputs
Xusong Wangb2e80852021-03-23 15:07:10 -0700248 if (preparedModel == nullptr || hidlPreparedModel == nullptr) {
Michael Butler76e491f2020-12-19 01:55:32 -0800249 return NN_ERROR() << "ExecutionBurstController::create passed a nullptr";
Michael Butlerf6b2d1a2020-12-19 14:44:35 -0800250 }
251
Michael Butlerf6b2d1a2020-12-19 14:44:35 -0800252 // create FMQ objects
Michael Butler76e491f2020-12-19 01:55:32 -0800253 auto [requestChannelSender, requestChannelDescriptor] =
254 NN_TRY(RequestChannelSender::create(kExecutionBurstChannelLength));
255 auto [resultChannelReceiver, resultChannelDescriptor] =
256 NN_TRY(ResultChannelReceiver::create(kExecutionBurstChannelLength, pollingTimeWindow));
Michael Butlerf6b2d1a2020-12-19 14:44:35 -0800257
258 // check FMQ objects
Michael Butler76e491f2020-12-19 01:55:32 -0800259 CHECK(requestChannelSender != nullptr);
260 CHECK(requestChannelDescriptor != nullptr);
261 CHECK(resultChannelReceiver != nullptr);
262 CHECK(resultChannelDescriptor != nullptr);
263
264 // create memory cache
265 auto memoryCache = std::make_shared<MemoryCache>();
266
267 // create callback object
268 auto burstCallback = sp<ExecutionBurstCallback>::make(memoryCache);
269 auto cb = hal::utils::CallbackValue(executionBurstResultCallback);
Michael Butlerf6b2d1a2020-12-19 14:44:35 -0800270
271 // configure burst
Xusong Wangb2e80852021-03-23 15:07:10 -0700272 const Return<void> ret = hidlPreparedModel->configureExecutionBurst(
Michael Butler76e491f2020-12-19 01:55:32 -0800273 burstCallback, *requestChannelDescriptor, *resultChannelDescriptor, cb);
274 HANDLE_TRANSPORT_FAILURE(ret);
Michael Butlerf6b2d1a2020-12-19 14:44:35 -0800275
Michael Butler76e491f2020-12-19 01:55:32 -0800276 auto burstContext = NN_TRY(cb.take());
277 memoryCache->setBurstContext(burstContext);
Michael Butlerf6b2d1a2020-12-19 14:44:35 -0800278
279 // create death handler object
Michael Butler76e491f2020-12-19 01:55:32 -0800280 auto deathHandler = NN_TRY(neuralnetworks::utils::DeathHandler::create(burstContext));
281 deathHandler.protectCallbackForLifetimeOfDeathHandler(requestChannelSender.get());
282 deathHandler.protectCallbackForLifetimeOfDeathHandler(resultChannelReceiver.get());
Michael Butlerf6b2d1a2020-12-19 14:44:35 -0800283
284 // make and return controller
Michael Butler76e491f2020-12-19 01:55:32 -0800285 return std::make_shared<const ExecutionBurstController>(
Xusong Wangb2e80852021-03-23 15:07:10 -0700286 PrivateConstructorTag{}, std::move(preparedModel), std::move(requestChannelSender),
Michael Butler76e491f2020-12-19 01:55:32 -0800287 std::move(resultChannelReceiver), std::move(burstCallback), std::move(burstContext),
288 std::move(memoryCache), std::move(deathHandler));
Michael Butlerf6b2d1a2020-12-19 14:44:35 -0800289}
290
291ExecutionBurstController::ExecutionBurstController(
Xusong Wangb2e80852021-03-23 15:07:10 -0700292 PrivateConstructorTag /*tag*/, nn::SharedPreparedModel preparedModel,
Michael Butler76e491f2020-12-19 01:55:32 -0800293 std::unique_ptr<RequestChannelSender> requestChannelSender,
294 std::unique_ptr<ResultChannelReceiver> resultChannelReceiver,
295 sp<ExecutionBurstCallback> callback, sp<IBurstContext> burstContext,
296 std::shared_ptr<MemoryCache> memoryCache, neuralnetworks::utils::DeathHandler deathHandler)
Xusong Wangb2e80852021-03-23 15:07:10 -0700297 : kPreparedModel(std::move(preparedModel)),
Michael Butler76e491f2020-12-19 01:55:32 -0800298 mRequestChannelSender(std::move(requestChannelSender)),
299 mResultChannelReceiver(std::move(resultChannelReceiver)),
300 mBurstCallback(std::move(callback)),
301 mBurstContext(std::move(burstContext)),
302 mMemoryCache(std::move(memoryCache)),
303 kDeathHandler(std::move(deathHandler)) {}
Michael Butlerf6b2d1a2020-12-19 14:44:35 -0800304
Michael Butler76e491f2020-12-19 01:55:32 -0800305ExecutionBurstController::OptionalCacheHold ExecutionBurstController::cacheMemory(
306 const nn::SharedMemory& memory) const {
307 auto [slot, hold] = mMemoryCache->cacheMemory(memory);
308 return hold;
309}
310
311nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
Michael Butler8414a6e2021-03-10 18:41:05 -0800312ExecutionBurstController::execute(const nn::Request& request, nn::MeasureTiming measure,
313 const nn::OptionalTimePoint& deadline,
314 const nn::OptionalDuration& loopTimeoutDuration) const {
Michael Butler76e491f2020-12-19 01:55:32 -0800315 // This is the first point when we know an execution is occurring, so begin to collect
316 // systraces. Note that the first point we can begin collecting systraces in
317 // ExecutionBurstServer is when the RequestChannelReceiver realizes there is data in the FMQ, so
318 // ExecutionBurstServer collects systraces at different points in the code.
Xusong Wangb2e80852021-03-23 15:07:10 -0700319 NNTRACE_RT(NNTRACE_PHASE_EXECUTION, "ExecutionBurstController::execute");
Michael Butler76e491f2020-12-19 01:55:32 -0800320
321 // if the request is valid but of a higher version than what's supported in burst execution,
322 // fall back to another execution path
Michael Butlerff9a5a52021-10-15 16:23:20 -0700323 if (const auto version = NN_TRY(nn::validate(request)); version > nn::Version::ANDROID_Q) {
Michael Butler76e491f2020-12-19 01:55:32 -0800324 // fallback to another execution path if the packet could not be sent
Xusong Wangb2e80852021-03-23 15:07:10 -0700325 return kPreparedModel->execute(request, measure, deadline, loopTimeoutDuration);
Michael Butlerf6b2d1a2020-12-19 14:44:35 -0800326 }
Michael Butlerf6b2d1a2020-12-19 14:44:35 -0800327
Xusong Wangb2e80852021-03-23 15:07:10 -0700328 // ensure that request is ready for IPC
329 std::optional<nn::Request> maybeRequestInShared;
330 hal::utils::RequestRelocation relocation;
Michael Butlerff9a5a52021-10-15 16:23:20 -0700331 const nn::Request& requestInShared = NN_TRY(hal::utils::convertRequestFromPointerToShared(
332 &request, nn::kDefaultRequestMemoryAlignment, nn::kMinMemoryPadding,
333 &maybeRequestInShared, &relocation));
Xusong Wangb2e80852021-03-23 15:07:10 -0700334
Michael Butler76e491f2020-12-19 01:55:32 -0800335 // clear pools field of request, as they will be provided via slots
Xusong Wangb2e80852021-03-23 15:07:10 -0700336 const auto requestWithoutPools = nn::Request{
337 .inputs = requestInShared.inputs, .outputs = requestInShared.outputs, .pools = {}};
Michael Butlerff9a5a52021-10-15 16:23:20 -0700338 auto hidlRequest = NN_TRY(V1_0::utils::unvalidatedConvert(requestWithoutPools));
339 const auto hidlMeasure = NN_TRY(convert(measure));
Michael Butlerf6b2d1a2020-12-19 14:44:35 -0800340
Xusong Wangb2e80852021-03-23 15:07:10 -0700341 std::vector<int32_t> slots;
342 std::vector<OptionalCacheHold> holds;
343 slots.reserve(requestInShared.pools.size());
344 holds.reserve(requestInShared.pools.size());
345 for (const auto& memoryPool : requestInShared.pools) {
346 auto [slot, hold] = mMemoryCache->cacheMemory(std::get<nn::SharedMemory>(memoryPool));
347 slots.push_back(slot);
348 holds.push_back(std::move(hold));
349 }
350
351 // send request packet
352 const auto requestPacket = serialize(hidlRequest, hidlMeasure, slots);
353 const auto fallback = [this, &request, measure, &deadline, &loopTimeoutDuration] {
354 return kPreparedModel->execute(request, measure, deadline, loopTimeoutDuration);
355 };
356 return executeInternal(requestPacket, relocation, fallback);
357}
358
359// See IBurst::createReusableExecution for information on this method.
360nn::GeneralResult<nn::SharedExecution> ExecutionBurstController::createReusableExecution(
361 const nn::Request& request, nn::MeasureTiming measure,
362 const nn::OptionalDuration& loopTimeoutDuration) const {
363 NNTRACE_RT(NNTRACE_PHASE_EXECUTION, "ExecutionBurstController::createReusableExecution");
364
365 // if the request is valid but of a higher version than what's supported in burst execution,
366 // fall back to another execution path
Michael Butlerff9a5a52021-10-15 16:23:20 -0700367 if (const auto version = NN_TRY(nn::validate(request)); version > nn::Version::ANDROID_Q) {
Xusong Wangb2e80852021-03-23 15:07:10 -0700368 // fallback to another execution path if the packet could not be sent
369 return kPreparedModel->createReusableExecution(request, measure, loopTimeoutDuration);
370 }
371
372 // ensure that request is ready for IPC
373 std::optional<nn::Request> maybeRequestInShared;
374 hal::utils::RequestRelocation relocation;
375 const nn::Request& requestInShared = NN_TRY(hal::utils::convertRequestFromPointerToShared(
Xusong Wange3d0dad2021-05-07 14:13:22 -0700376 &request, nn::kDefaultRequestMemoryAlignment, nn::kMinMemoryPadding,
377 &maybeRequestInShared, &relocation));
Xusong Wangb2e80852021-03-23 15:07:10 -0700378
379 // clear pools field of request, as they will be provided via slots
380 const auto requestWithoutPools = nn::Request{
381 .inputs = requestInShared.inputs, .outputs = requestInShared.outputs, .pools = {}};
382 auto hidlRequest = NN_TRY(V1_0::utils::unvalidatedConvert(requestWithoutPools));
383 const auto hidlMeasure = NN_TRY(convert(measure));
384
385 std::vector<int32_t> slots;
386 std::vector<OptionalCacheHold> holds;
387 slots.reserve(requestInShared.pools.size());
388 holds.reserve(requestInShared.pools.size());
389 for (const auto& memoryPool : requestInShared.pools) {
390 auto [slot, hold] = mMemoryCache->cacheMemory(std::get<nn::SharedMemory>(memoryPool));
391 slots.push_back(slot);
392 holds.push_back(std::move(hold));
393 }
394
395 const auto requestPacket = serialize(hidlRequest, hidlMeasure, slots);
396 return BurstExecution::create(shared_from_this(), std::move(requestPacket),
397 std::move(relocation), std::move(holds));
398}
399
400nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
401ExecutionBurstController::executeInternal(const std::vector<FmqRequestDatum>& requestPacket,
402 const hal::utils::RequestRelocation& relocation,
403 FallbackFunction fallback) const {
404 NNTRACE_FULL(NNTRACE_LAYER_IPC, NNTRACE_PHASE_EXECUTION,
405 "ExecutionBurstController::executeInternal");
406
Michael Butler76e491f2020-12-19 01:55:32 -0800407 // Ensure that at most one execution is in flight at any given time.
408 const bool alreadyInFlight = mExecutionInFlight.test_and_set();
409 if (alreadyInFlight) {
410 return NN_ERROR() << "IBurst already has an execution in flight";
411 }
412 const auto guard = base::make_scope_guard([this] { mExecutionInFlight.clear(); });
Michael Butlerf6b2d1a2020-12-19 14:44:35 -0800413
Xusong Wangb2e80852021-03-23 15:07:10 -0700414 if (relocation.input) {
415 relocation.input->flush();
Michael Butler76e491f2020-12-19 01:55:32 -0800416 }
Michael Butlerf6b2d1a2020-12-19 14:44:35 -0800417
418 // send request packet
Xusong Wangb2e80852021-03-23 15:07:10 -0700419 const auto sendStatus = mRequestChannelSender->sendPacket(requestPacket);
Michael Butler76e491f2020-12-19 01:55:32 -0800420 if (!sendStatus.ok()) {
421 // fallback to another execution path if the packet could not be sent
Xusong Wangb2e80852021-03-23 15:07:10 -0700422 if (fallback) {
423 return fallback();
Michael Butler76e491f2020-12-19 01:55:32 -0800424 }
425 return NN_ERROR() << "Error sending FMQ packet: " << sendStatus.error();
Michael Butlerf6b2d1a2020-12-19 14:44:35 -0800426 }
427
428 // get result packet
Michael Butlerff9a5a52021-10-15 16:23:20 -0700429 const auto [status, outputShapes, timing] = NN_TRY(mResultChannelReceiver->getBlocking());
Xusong Wangb2e80852021-03-23 15:07:10 -0700430
431 if (relocation.output) {
432 relocation.output->flush();
433 }
Michael Butler76e491f2020-12-19 01:55:32 -0800434 return executionCallback(status, outputShapes, timing);
Michael Butlerf6b2d1a2020-12-19 14:44:35 -0800435}
436
Xusong Wangb2e80852021-03-23 15:07:10 -0700437nn::GeneralResult<std::shared_ptr<const BurstExecution>> BurstExecution::create(
438 std::shared_ptr<const ExecutionBurstController> controller,
439 std::vector<FmqRequestDatum> request, hal::utils::RequestRelocation relocation,
440 std::vector<ExecutionBurstController::OptionalCacheHold> cacheHolds) {
441 if (controller == nullptr) {
442 return NN_ERROR() << "V1_2::utils::BurstExecution::create must have non-null controller";
443 }
444
445 return std::make_shared<const BurstExecution>(PrivateConstructorTag{}, std::move(controller),
446 std::move(request), std::move(relocation),
447 std::move(cacheHolds));
448}
449
450BurstExecution::BurstExecution(PrivateConstructorTag /*tag*/,
451 std::shared_ptr<const ExecutionBurstController> controller,
452 std::vector<FmqRequestDatum> request,
453 hal::utils::RequestRelocation relocation,
454 std::vector<ExecutionBurstController::OptionalCacheHold> cacheHolds)
455 : kController(std::move(controller)),
456 kRequest(std::move(request)),
457 kRelocation(std::move(relocation)),
458 kCacheHolds(std::move(cacheHolds)) {}
459
460nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> BurstExecution::compute(
461 const nn::OptionalTimePoint& /*deadline*/) const {
462 return kController->executeInternal(kRequest, kRelocation, /*fallback=*/nullptr);
463}
464
465nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>>
466BurstExecution::computeFenced(const std::vector<nn::SyncFence>& /*waitFor*/,
467 const nn::OptionalTimePoint& /*deadline*/,
468 const nn::OptionalDuration& /*timeoutDurationAfterFence*/) const {
469 return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
470 << "IExecution::computeFenced is not supported on burst object";
471}
472
Michael Butler76e491f2020-12-19 01:55:32 -0800473} // namespace android::hardware::neuralnetworks::V1_2::utils