blob: 911fbfa9810d4de2119b99aba26205f4c94bcbfb [file] [log] [blame]
Michael Butlerf6b2d1a2020-12-19 14:44:35 -08001/*
2 * Copyright (C) 2019 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Michael Butler137ee992021-11-01 16:40:31 -070017#include "Burst.h"
18#include "BurstUtils.h"
Michael Butlerf6b2d1a2020-12-19 14:44:35 -080019
20#include <android-base/logging.h>
Michael Butler76e491f2020-12-19 01:55:32 -080021#include <android-base/thread_annotations.h>
22#include <nnapi/IBurst.h>
23#include <nnapi/IPreparedModel.h>
24#include <nnapi/Result.h>
25#include <nnapi/TypeUtils.h>
26#include <nnapi/Types.h>
27#include <nnapi/Validation.h>
28#include <nnapi/hal/1.0/Conversions.h>
Michael Butler49d95e02021-10-15 18:52:52 -070029#include <nnapi/hal/1.0/HandleError.h>
Michael Butlere8645c32021-10-15 18:42:32 -070030#include <nnapi/hal/1.0/ProtectCallback.h>
Xusong Wangb2e80852021-03-23 15:07:10 -070031#include <nnapi/hal/CommonUtils.h>
Michael Butler76e491f2020-12-19 01:55:32 -080032#include <nnapi/hal/TransferValue.h>
Michael Butlerf6b2d1a2020-12-19 14:44:35 -080033
34#include <algorithm>
35#include <cstring>
36#include <limits>
37#include <memory>
38#include <string>
Michael Butler76e491f2020-12-19 01:55:32 -080039#include <thread>
Michael Butlerf6b2d1a2020-12-19 14:44:35 -080040#include <tuple>
41#include <utility>
42#include <vector>
43
Michael Butler76e491f2020-12-19 01:55:32 -080044#include "Callbacks.h"
45#include "Conversions.h"
Michael Butlerf6b2d1a2020-12-19 14:44:35 -080046#include "Tracing.h"
47#include "Utils.h"
48
Michael Butler76e491f2020-12-19 01:55:32 -080049namespace android::hardware::neuralnetworks::V1_2::utils {
Michael Butlerf6b2d1a2020-12-19 14:44:35 -080050namespace {
51
Xusong Wangb2e80852021-03-23 15:07:10 -070052class BurstExecution final : public nn::IExecution,
53 public std::enable_shared_from_this<BurstExecution> {
54 struct PrivateConstructorTag {};
55
56 public:
57 static nn::GeneralResult<std::shared_ptr<const BurstExecution>> create(
Michael Butler137ee992021-11-01 16:40:31 -070058 std::shared_ptr<const Burst> controller, std::vector<FmqRequestDatum> request,
59 hal::utils::RequestRelocation relocation,
60 std::vector<Burst::OptionalCacheHold> cacheHolds);
Xusong Wangb2e80852021-03-23 15:07:10 -070061
Michael Butler137ee992021-11-01 16:40:31 -070062 BurstExecution(PrivateConstructorTag tag, std::shared_ptr<const Burst> controller,
Xusong Wangb2e80852021-03-23 15:07:10 -070063 std::vector<FmqRequestDatum> request, hal::utils::RequestRelocation relocation,
Michael Butler137ee992021-11-01 16:40:31 -070064 std::vector<Burst::OptionalCacheHold> cacheHolds);
Xusong Wangb2e80852021-03-23 15:07:10 -070065
66 nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> compute(
67 const nn::OptionalTimePoint& deadline) const override;
68
69 nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> computeFenced(
70 const std::vector<nn::SyncFence>& waitFor, const nn::OptionalTimePoint& deadline,
71 const nn::OptionalDuration& timeoutDurationAfterFence) const override;
72
73 private:
Michael Butler137ee992021-11-01 16:40:31 -070074 const std::shared_ptr<const Burst> kController;
Xusong Wangb2e80852021-03-23 15:07:10 -070075 const std::vector<FmqRequestDatum> kRequest;
76 const hal::utils::RequestRelocation kRelocation;
Michael Butler137ee992021-11-01 16:40:31 -070077 const std::vector<Burst::OptionalCacheHold> kCacheHolds;
Xusong Wangb2e80852021-03-23 15:07:10 -070078};
79
Michael Butler76e491f2020-12-19 01:55:32 -080080nn::GeneralResult<sp<IBurstContext>> executionBurstResultCallback(
81 V1_0::ErrorStatus status, const sp<IBurstContext>& burstContext) {
Michael Butler49d95e02021-10-15 18:52:52 -070082 HANDLE_STATUS_HIDL(status) << "IPreparedModel::configureExecutionBurst failed with status "
83 << toString(status);
Michael Butler76e491f2020-12-19 01:55:32 -080084 if (burstContext == nullptr) {
85 return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
86 << "IPreparedModel::configureExecutionBurst returned nullptr for burst";
Michael Butlerf6b2d1a2020-12-19 14:44:35 -080087 }
Michael Butler76e491f2020-12-19 01:55:32 -080088 return burstContext;
Michael Butlerf6b2d1a2020-12-19 14:44:35 -080089}
90
Michael Butler76e491f2020-12-19 01:55:32 -080091nn::GeneralResult<hidl_vec<hidl_memory>> getMemoriesHelper(
Michael Butler137ee992021-11-01 16:40:31 -070092 const hidl_vec<int32_t>& slots, const std::shared_ptr<Burst::MemoryCache>& memoryCache) {
Michael Butler76e491f2020-12-19 01:55:32 -080093 hidl_vec<hidl_memory> memories(slots.size());
94 for (size_t i = 0; i < slots.size(); ++i) {
95 const int32_t slot = slots[i];
96 const auto memory = NN_TRY(memoryCache->getMemory(slot));
97 memories[i] = NN_TRY(V1_0::utils::unvalidatedConvert(memory));
98 if (!memories[i].valid()) {
99 return NN_ERROR() << "memory at slot " << slot << " is invalid";
100 }
Michael Butlerf6b2d1a2020-12-19 14:44:35 -0800101 }
Michael Butler76e491f2020-12-19 01:55:32 -0800102 return memories;
Michael Butlerf6b2d1a2020-12-19 14:44:35 -0800103}
104
Michael Butler76e491f2020-12-19 01:55:32 -0800105} // namespace
Michael Butlerf6b2d1a2020-12-19 14:44:35 -0800106
Michael Butler76e491f2020-12-19 01:55:32 -0800107// MemoryCache methods
108
Michael Butler137ee992021-11-01 16:40:31 -0700109Burst::MemoryCache::MemoryCache() {
Michael Butler76e491f2020-12-19 01:55:32 -0800110 constexpr size_t kPreallocatedCount = 1024;
111 std::vector<int32_t> freeSlotsSpace;
112 freeSlotsSpace.reserve(kPreallocatedCount);
113 mFreeSlots = std::stack<int32_t, std::vector<int32_t>>(std::move(freeSlotsSpace));
114 mMemoryCache.reserve(kPreallocatedCount);
115 mCacheCleaner.reserve(kPreallocatedCount);
Michael Butlerf6b2d1a2020-12-19 14:44:35 -0800116}
117
Michael Butler137ee992021-11-01 16:40:31 -0700118void Burst::MemoryCache::setBurstContext(sp<IBurstContext> burstContext) {
Michael Butler76e491f2020-12-19 01:55:32 -0800119 std::lock_guard guard(mMutex);
120 mBurstContext = std::move(burstContext);
121}
122
Michael Butler137ee992021-11-01 16:40:31 -0700123std::pair<int32_t, Burst::MemoryCache::SharedCleanup> Burst::MemoryCache::cacheMemory(
124 const nn::SharedMemory& memory) {
Michael Butler76e491f2020-12-19 01:55:32 -0800125 std::unique_lock lock(mMutex);
126 base::ScopedLockAssertion lockAssert(mMutex);
127
128 // Use existing cache entry if (1) the Memory object is in the cache and (2) the cache entry is
129 // not currently being freed.
130 auto iter = mMemoryIdToSlot.find(memory);
131 while (iter != mMemoryIdToSlot.end()) {
Michael Butlerf6b2d1a2020-12-19 14:44:35 -0800132 const int32_t slot = iter->second;
Michael Butler76e491f2020-12-19 01:55:32 -0800133 if (auto cleaner = mCacheCleaner.at(slot).lock()) {
134 return std::make_pair(slot, std::move(cleaner));
135 }
136
137 // If the code reaches this point, the Memory object was in the cache, but is currently
138 // being destroyed. This code waits until the cache entry has been freed, then loops to
139 // ensure the cache entry has been freed or has been made present by another thread.
140 mCond.wait(lock);
141 iter = mMemoryIdToSlot.find(memory);
Michael Butlerf6b2d1a2020-12-19 14:44:35 -0800142 }
Michael Butler76e491f2020-12-19 01:55:32 -0800143
144 // Allocate a new cache entry.
145 const int32_t slot = allocateSlotLocked();
146 mMemoryIdToSlot[memory] = slot;
147 mMemoryCache[slot] = memory;
148
149 // Create reference-counted self-cleaning cache object.
150 auto self = weak_from_this();
151 Task cleanup = [memory, memoryCache = std::move(self)] {
152 if (const auto lock = memoryCache.lock()) {
153 lock->freeMemory(memory);
154 }
155 };
156 auto cleaner = std::make_shared<const Cleanup>(std::move(cleanup));
157 mCacheCleaner[slot] = cleaner;
158
159 return std::make_pair(slot, std::move(cleaner));
Michael Butlerf6b2d1a2020-12-19 14:44:35 -0800160}
161
Michael Butler137ee992021-11-01 16:40:31 -0700162nn::GeneralResult<nn::SharedMemory> Burst::MemoryCache::getMemory(int32_t slot) {
Michael Butler76e491f2020-12-19 01:55:32 -0800163 std::lock_guard guard(mMutex);
164 if (slot < 0 || static_cast<size_t>(slot) >= mMemoryCache.size()) {
165 return NN_ERROR() << "Invalid slot: " << slot << " vs " << mMemoryCache.size();
166 }
167 return mMemoryCache[slot];
168}
169
Michael Butler137ee992021-11-01 16:40:31 -0700170void Burst::MemoryCache::freeMemory(const nn::SharedMemory& memory) {
Michael Butler76e491f2020-12-19 01:55:32 -0800171 {
172 std::lock_guard guard(mMutex);
173 const int32_t slot = mMemoryIdToSlot.at(memory);
174 if (mBurstContext) {
Michael Butler1b09ea92021-10-13 11:28:26 -0700175 const auto ret = mBurstContext->freeMemory(slot);
176 if (!ret.isOk()) {
177 LOG(ERROR) << "IBustContext::freeMemory failed: " << ret.description();
178 }
Michael Butler76e491f2020-12-19 01:55:32 -0800179 }
180 mMemoryIdToSlot.erase(memory);
181 mMemoryCache[slot] = {};
182 mCacheCleaner[slot].reset();
183 mFreeSlots.push(slot);
184 }
185 mCond.notify_all();
186}
187
Michael Butler137ee992021-11-01 16:40:31 -0700188int32_t Burst::MemoryCache::allocateSlotLocked() {
Michael Butlerf6b2d1a2020-12-19 14:44:35 -0800189 constexpr size_t kMaxNumberOfSlots = std::numeric_limits<int32_t>::max();
190
Michael Butler76e491f2020-12-19 01:55:32 -0800191 // If there is a free slot, use it.
192 if (!mFreeSlots.empty()) {
Michael Butlerf6b2d1a2020-12-19 14:44:35 -0800193 const int32_t slot = mFreeSlots.top();
194 mFreeSlots.pop();
195 return slot;
196 }
197
Michael Butler76e491f2020-12-19 01:55:32 -0800198 // Use a slot for the first time.
199 CHECK_LT(mMemoryCache.size(), kMaxNumberOfSlots) << "Exceeded maximum number of slots!";
Michael Butlerf6b2d1a2020-12-19 14:44:35 -0800200 const int32_t slot = static_cast<int32_t>(mMemoryCache.size());
201 mMemoryCache.emplace_back();
Michael Butler76e491f2020-12-19 01:55:32 -0800202 mCacheCleaner.emplace_back();
Michael Butlerf6b2d1a2020-12-19 14:44:35 -0800203
204 return slot;
205}
206
Michael Butler76e491f2020-12-19 01:55:32 -0800207// ExecutionBurstCallback methods
208
Michael Butler137ee992021-11-01 16:40:31 -0700209Burst::ExecutionBurstCallback::ExecutionBurstCallback(
Michael Butler76e491f2020-12-19 01:55:32 -0800210 const std::shared_ptr<MemoryCache>& memoryCache)
211 : kMemoryCache(memoryCache) {
212 CHECK(memoryCache != nullptr);
213}
214
Michael Butler137ee992021-11-01 16:40:31 -0700215Return<void> Burst::ExecutionBurstCallback::getMemories(const hidl_vec<int32_t>& slots,
216 getMemories_cb cb) {
Michael Butler76e491f2020-12-19 01:55:32 -0800217 const auto memoryCache = kMemoryCache.lock();
218 if (memoryCache == nullptr) {
Michael Butler137ee992021-11-01 16:40:31 -0700219 LOG(ERROR) << "Burst::ExecutionBurstCallback::getMemories called after the MemoryCache has "
220 "been freed";
Michael Butler76e491f2020-12-19 01:55:32 -0800221 cb(V1_0::ErrorStatus::GENERAL_FAILURE, {});
222 return Void();
223 }
224
225 const auto maybeMemories = getMemoriesHelper(slots, memoryCache);
226 if (!maybeMemories.has_value()) {
227 const auto& [message, code] = maybeMemories.error();
Michael Butler137ee992021-11-01 16:40:31 -0700228 LOG(ERROR) << "Burst::ExecutionBurstCallback::getMemories failed with " << code << ": "
229 << message;
Michael Butler76e491f2020-12-19 01:55:32 -0800230 cb(V1_0::ErrorStatus::INVALID_ARGUMENT, {});
231 return Void();
232 }
233
234 cb(V1_0::ErrorStatus::NONE, maybeMemories.value());
235 return Void();
236}
237
Michael Butler137ee992021-11-01 16:40:31 -0700238// Burst methods
Michael Butler76e491f2020-12-19 01:55:32 -0800239
Michael Butler137ee992021-11-01 16:40:31 -0700240nn::GeneralResult<std::shared_ptr<const Burst>> Burst::create(
Xusong Wangb2e80852021-03-23 15:07:10 -0700241 nn::SharedPreparedModel preparedModel, const sp<V1_2::IPreparedModel>& hidlPreparedModel,
Michael Butlerf6b2d1a2020-12-19 14:44:35 -0800242 std::chrono::microseconds pollingTimeWindow) {
243 // check inputs
Xusong Wangb2e80852021-03-23 15:07:10 -0700244 if (preparedModel == nullptr || hidlPreparedModel == nullptr) {
Michael Butler137ee992021-11-01 16:40:31 -0700245 return NN_ERROR() << "Burst::create passed a nullptr";
Michael Butlerf6b2d1a2020-12-19 14:44:35 -0800246 }
247
Michael Butlerf6b2d1a2020-12-19 14:44:35 -0800248 // create FMQ objects
Michael Butler76e491f2020-12-19 01:55:32 -0800249 auto [requestChannelSender, requestChannelDescriptor] =
250 NN_TRY(RequestChannelSender::create(kExecutionBurstChannelLength));
251 auto [resultChannelReceiver, resultChannelDescriptor] =
252 NN_TRY(ResultChannelReceiver::create(kExecutionBurstChannelLength, pollingTimeWindow));
Michael Butlerf6b2d1a2020-12-19 14:44:35 -0800253
254 // check FMQ objects
Michael Butler76e491f2020-12-19 01:55:32 -0800255 CHECK(requestChannelSender != nullptr);
256 CHECK(requestChannelDescriptor != nullptr);
257 CHECK(resultChannelReceiver != nullptr);
258 CHECK(resultChannelDescriptor != nullptr);
259
260 // create memory cache
261 auto memoryCache = std::make_shared<MemoryCache>();
262
263 // create callback object
264 auto burstCallback = sp<ExecutionBurstCallback>::make(memoryCache);
265 auto cb = hal::utils::CallbackValue(executionBurstResultCallback);
Michael Butlerf6b2d1a2020-12-19 14:44:35 -0800266
267 // configure burst
Xusong Wangb2e80852021-03-23 15:07:10 -0700268 const Return<void> ret = hidlPreparedModel->configureExecutionBurst(
Michael Butler76e491f2020-12-19 01:55:32 -0800269 burstCallback, *requestChannelDescriptor, *resultChannelDescriptor, cb);
270 HANDLE_TRANSPORT_FAILURE(ret);
Michael Butlerf6b2d1a2020-12-19 14:44:35 -0800271
Michael Butler76e491f2020-12-19 01:55:32 -0800272 auto burstContext = NN_TRY(cb.take());
273 memoryCache->setBurstContext(burstContext);
Michael Butlerf6b2d1a2020-12-19 14:44:35 -0800274
275 // create death handler object
Michael Butler76e491f2020-12-19 01:55:32 -0800276 auto deathHandler = NN_TRY(neuralnetworks::utils::DeathHandler::create(burstContext));
277 deathHandler.protectCallbackForLifetimeOfDeathHandler(requestChannelSender.get());
278 deathHandler.protectCallbackForLifetimeOfDeathHandler(resultChannelReceiver.get());
Michael Butlerf6b2d1a2020-12-19 14:44:35 -0800279
280 // make and return controller
Michael Butler137ee992021-11-01 16:40:31 -0700281 return std::make_shared<const Burst>(
Xusong Wangb2e80852021-03-23 15:07:10 -0700282 PrivateConstructorTag{}, std::move(preparedModel), std::move(requestChannelSender),
Michael Butler76e491f2020-12-19 01:55:32 -0800283 std::move(resultChannelReceiver), std::move(burstCallback), std::move(burstContext),
284 std::move(memoryCache), std::move(deathHandler));
Michael Butlerf6b2d1a2020-12-19 14:44:35 -0800285}
286
Michael Butler137ee992021-11-01 16:40:31 -0700287Burst::Burst(PrivateConstructorTag /*tag*/, nn::SharedPreparedModel preparedModel,
288 std::unique_ptr<RequestChannelSender> requestChannelSender,
289 std::unique_ptr<ResultChannelReceiver> resultChannelReceiver,
290 sp<ExecutionBurstCallback> callback, sp<IBurstContext> burstContext,
291 std::shared_ptr<MemoryCache> memoryCache,
292 neuralnetworks::utils::DeathHandler deathHandler)
Xusong Wangb2e80852021-03-23 15:07:10 -0700293 : kPreparedModel(std::move(preparedModel)),
Michael Butler76e491f2020-12-19 01:55:32 -0800294 mRequestChannelSender(std::move(requestChannelSender)),
295 mResultChannelReceiver(std::move(resultChannelReceiver)),
296 mBurstCallback(std::move(callback)),
297 mBurstContext(std::move(burstContext)),
298 mMemoryCache(std::move(memoryCache)),
299 kDeathHandler(std::move(deathHandler)) {}
Michael Butlerf6b2d1a2020-12-19 14:44:35 -0800300
Michael Butler137ee992021-11-01 16:40:31 -0700301Burst::OptionalCacheHold Burst::cacheMemory(const nn::SharedMemory& memory) const {
Michael Butler76e491f2020-12-19 01:55:32 -0800302 auto [slot, hold] = mMemoryCache->cacheMemory(memory);
303 return hold;
304}
305
Michael Butler137ee992021-11-01 16:40:31 -0700306nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> Burst::execute(
307 const nn::Request& request, nn::MeasureTiming measure,
308 const nn::OptionalTimePoint& deadline,
309 const nn::OptionalDuration& loopTimeoutDuration) const {
Michael Butler76e491f2020-12-19 01:55:32 -0800310 // This is the first point when we know an execution is occurring, so begin to collect
311 // systraces. Note that the first point we can begin collecting systraces in
312 // ExecutionBurstServer is when the RequestChannelReceiver realizes there is data in the FMQ, so
313 // ExecutionBurstServer collects systraces at different points in the code.
Michael Butler137ee992021-11-01 16:40:31 -0700314 NNTRACE_RT(NNTRACE_PHASE_EXECUTION, "Burst::execute");
Michael Butler76e491f2020-12-19 01:55:32 -0800315
316 // if the request is valid but of a higher version than what's supported in burst execution,
317 // fall back to another execution path
Michael Butler34f0a8f2021-11-11 20:07:46 -0800318 if (!compliantVersion(request).ok()) {
Michael Butler76e491f2020-12-19 01:55:32 -0800319 // fallback to another execution path if the packet could not be sent
Xusong Wangb2e80852021-03-23 15:07:10 -0700320 return kPreparedModel->execute(request, measure, deadline, loopTimeoutDuration);
Michael Butlerf6b2d1a2020-12-19 14:44:35 -0800321 }
Michael Butlerf6b2d1a2020-12-19 14:44:35 -0800322
Xusong Wangb2e80852021-03-23 15:07:10 -0700323 // ensure that request is ready for IPC
324 std::optional<nn::Request> maybeRequestInShared;
325 hal::utils::RequestRelocation relocation;
Michael Butlerff9a5a52021-10-15 16:23:20 -0700326 const nn::Request& requestInShared = NN_TRY(hal::utils::convertRequestFromPointerToShared(
327 &request, nn::kDefaultRequestMemoryAlignment, nn::kMinMemoryPadding,
328 &maybeRequestInShared, &relocation));
Xusong Wangb2e80852021-03-23 15:07:10 -0700329
Michael Butler76e491f2020-12-19 01:55:32 -0800330 // clear pools field of request, as they will be provided via slots
Xusong Wangb2e80852021-03-23 15:07:10 -0700331 const auto requestWithoutPools = nn::Request{
332 .inputs = requestInShared.inputs, .outputs = requestInShared.outputs, .pools = {}};
Michael Butlerff9a5a52021-10-15 16:23:20 -0700333 auto hidlRequest = NN_TRY(V1_0::utils::unvalidatedConvert(requestWithoutPools));
334 const auto hidlMeasure = NN_TRY(convert(measure));
Michael Butlerf6b2d1a2020-12-19 14:44:35 -0800335
Xusong Wangb2e80852021-03-23 15:07:10 -0700336 std::vector<int32_t> slots;
337 std::vector<OptionalCacheHold> holds;
338 slots.reserve(requestInShared.pools.size());
339 holds.reserve(requestInShared.pools.size());
340 for (const auto& memoryPool : requestInShared.pools) {
341 auto [slot, hold] = mMemoryCache->cacheMemory(std::get<nn::SharedMemory>(memoryPool));
342 slots.push_back(slot);
343 holds.push_back(std::move(hold));
344 }
345
346 // send request packet
347 const auto requestPacket = serialize(hidlRequest, hidlMeasure, slots);
348 const auto fallback = [this, &request, measure, &deadline, &loopTimeoutDuration] {
349 return kPreparedModel->execute(request, measure, deadline, loopTimeoutDuration);
350 };
351 return executeInternal(requestPacket, relocation, fallback);
352}
353
354// See IBurst::createReusableExecution for information on this method.
Michael Butler137ee992021-11-01 16:40:31 -0700355nn::GeneralResult<nn::SharedExecution> Burst::createReusableExecution(
Xusong Wangb2e80852021-03-23 15:07:10 -0700356 const nn::Request& request, nn::MeasureTiming measure,
357 const nn::OptionalDuration& loopTimeoutDuration) const {
Michael Butler137ee992021-11-01 16:40:31 -0700358 NNTRACE_RT(NNTRACE_PHASE_EXECUTION, "Burst::createReusableExecution");
Xusong Wangb2e80852021-03-23 15:07:10 -0700359
360 // if the request is valid but of a higher version than what's supported in burst execution,
361 // fall back to another execution path
Michael Butler34f0a8f2021-11-11 20:07:46 -0800362 if (!compliantVersion(request).ok()) {
Xusong Wangb2e80852021-03-23 15:07:10 -0700363 // fallback to another execution path if the packet could not be sent
364 return kPreparedModel->createReusableExecution(request, measure, loopTimeoutDuration);
365 }
366
367 // ensure that request is ready for IPC
368 std::optional<nn::Request> maybeRequestInShared;
369 hal::utils::RequestRelocation relocation;
370 const nn::Request& requestInShared = NN_TRY(hal::utils::convertRequestFromPointerToShared(
Xusong Wange3d0dad2021-05-07 14:13:22 -0700371 &request, nn::kDefaultRequestMemoryAlignment, nn::kMinMemoryPadding,
372 &maybeRequestInShared, &relocation));
Xusong Wangb2e80852021-03-23 15:07:10 -0700373
374 // clear pools field of request, as they will be provided via slots
375 const auto requestWithoutPools = nn::Request{
376 .inputs = requestInShared.inputs, .outputs = requestInShared.outputs, .pools = {}};
377 auto hidlRequest = NN_TRY(V1_0::utils::unvalidatedConvert(requestWithoutPools));
378 const auto hidlMeasure = NN_TRY(convert(measure));
379
380 std::vector<int32_t> slots;
381 std::vector<OptionalCacheHold> holds;
382 slots.reserve(requestInShared.pools.size());
383 holds.reserve(requestInShared.pools.size());
384 for (const auto& memoryPool : requestInShared.pools) {
385 auto [slot, hold] = mMemoryCache->cacheMemory(std::get<nn::SharedMemory>(memoryPool));
386 slots.push_back(slot);
387 holds.push_back(std::move(hold));
388 }
389
390 const auto requestPacket = serialize(hidlRequest, hidlMeasure, slots);
391 return BurstExecution::create(shared_from_this(), std::move(requestPacket),
392 std::move(relocation), std::move(holds));
393}
394
Michael Butler137ee992021-11-01 16:40:31 -0700395nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> Burst::executeInternal(
396 const std::vector<FmqRequestDatum>& requestPacket,
397 const hal::utils::RequestRelocation& relocation, FallbackFunction fallback) const {
398 NNTRACE_FULL(NNTRACE_LAYER_IPC, NNTRACE_PHASE_EXECUTION, "Burst::executeInternal");
Xusong Wangb2e80852021-03-23 15:07:10 -0700399
Michael Butler76e491f2020-12-19 01:55:32 -0800400 // Ensure that at most one execution is in flight at any given time.
401 const bool alreadyInFlight = mExecutionInFlight.test_and_set();
402 if (alreadyInFlight) {
403 return NN_ERROR() << "IBurst already has an execution in flight";
404 }
405 const auto guard = base::make_scope_guard([this] { mExecutionInFlight.clear(); });
Michael Butlerf6b2d1a2020-12-19 14:44:35 -0800406
Xusong Wangb2e80852021-03-23 15:07:10 -0700407 if (relocation.input) {
408 relocation.input->flush();
Michael Butler76e491f2020-12-19 01:55:32 -0800409 }
Michael Butlerf6b2d1a2020-12-19 14:44:35 -0800410
411 // send request packet
Xusong Wangb2e80852021-03-23 15:07:10 -0700412 const auto sendStatus = mRequestChannelSender->sendPacket(requestPacket);
Michael Butler76e491f2020-12-19 01:55:32 -0800413 if (!sendStatus.ok()) {
414 // fallback to another execution path if the packet could not be sent
Xusong Wangb2e80852021-03-23 15:07:10 -0700415 if (fallback) {
416 return fallback();
Michael Butler76e491f2020-12-19 01:55:32 -0800417 }
418 return NN_ERROR() << "Error sending FMQ packet: " << sendStatus.error();
Michael Butlerf6b2d1a2020-12-19 14:44:35 -0800419 }
420
421 // get result packet
Michael Butlerff9a5a52021-10-15 16:23:20 -0700422 const auto [status, outputShapes, timing] = NN_TRY(mResultChannelReceiver->getBlocking());
Xusong Wangb2e80852021-03-23 15:07:10 -0700423
424 if (relocation.output) {
425 relocation.output->flush();
426 }
Michael Butler76e491f2020-12-19 01:55:32 -0800427 return executionCallback(status, outputShapes, timing);
Michael Butlerf6b2d1a2020-12-19 14:44:35 -0800428}
429
Xusong Wangb2e80852021-03-23 15:07:10 -0700430nn::GeneralResult<std::shared_ptr<const BurstExecution>> BurstExecution::create(
Michael Butler137ee992021-11-01 16:40:31 -0700431 std::shared_ptr<const Burst> controller, std::vector<FmqRequestDatum> request,
432 hal::utils::RequestRelocation relocation,
433 std::vector<Burst::OptionalCacheHold> cacheHolds) {
Xusong Wangb2e80852021-03-23 15:07:10 -0700434 if (controller == nullptr) {
435 return NN_ERROR() << "V1_2::utils::BurstExecution::create must have non-null controller";
436 }
437
438 return std::make_shared<const BurstExecution>(PrivateConstructorTag{}, std::move(controller),
439 std::move(request), std::move(relocation),
440 std::move(cacheHolds));
441}
442
443BurstExecution::BurstExecution(PrivateConstructorTag /*tag*/,
Michael Butler137ee992021-11-01 16:40:31 -0700444 std::shared_ptr<const Burst> controller,
Xusong Wangb2e80852021-03-23 15:07:10 -0700445 std::vector<FmqRequestDatum> request,
446 hal::utils::RequestRelocation relocation,
Michael Butler137ee992021-11-01 16:40:31 -0700447 std::vector<Burst::OptionalCacheHold> cacheHolds)
Xusong Wangb2e80852021-03-23 15:07:10 -0700448 : kController(std::move(controller)),
449 kRequest(std::move(request)),
450 kRelocation(std::move(relocation)),
451 kCacheHolds(std::move(cacheHolds)) {}
452
453nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> BurstExecution::compute(
454 const nn::OptionalTimePoint& /*deadline*/) const {
455 return kController->executeInternal(kRequest, kRelocation, /*fallback=*/nullptr);
456}
457
458nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>>
459BurstExecution::computeFenced(const std::vector<nn::SyncFence>& /*waitFor*/,
460 const nn::OptionalTimePoint& /*deadline*/,
461 const nn::OptionalDuration& /*timeoutDurationAfterFence*/) const {
462 return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
463 << "IExecution::computeFenced is not supported on burst object";
464}
465
Michael Butler76e491f2020-12-19 01:55:32 -0800466} // namespace android::hardware::neuralnetworks::V1_2::utils