Lev Proleev | 900c28a | 2021-01-26 19:40:20 +0000 | [diff] [blame^] | 1 | /* |
| 2 | * Copyright (C) 2021 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | #include "PreparedModel.h" |
| 18 | |
| 19 | #include "Callbacks.h" |
| 20 | #include "Conversions.h" |
| 21 | #include "ProtectCallback.h" |
| 22 | #include "Utils.h" |
| 23 | |
| 24 | #include <android/binder_auto_utils.h> |
| 25 | #include <nnapi/IPreparedModel.h> |
| 26 | #include <nnapi/Result.h> |
| 27 | #include <nnapi/TypeUtils.h> |
| 28 | #include <nnapi/Types.h> |
| 29 | #include <nnapi/hal/1.0/Burst.h> |
| 30 | #include <nnapi/hal/CommonUtils.h> |
| 31 | #include <nnapi/hal/HandleError.h> |
| 32 | |
| 33 | #include <memory> |
| 34 | #include <tuple> |
| 35 | #include <utility> |
| 36 | #include <vector> |
| 37 | |
| 38 | // See hardware/interfaces/neuralnetworks/utils/README.md for more information on AIDL interface |
| 39 | // lifetimes across processes and for protecting asynchronous calls across AIDL. |
| 40 | |
| 41 | namespace aidl::android::hardware::neuralnetworks::utils { |
| 42 | namespace { |
| 43 | |
| 44 | nn::GeneralResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> convertExecutionResults( |
| 45 | const std::vector<OutputShape>& outputShapes, const Timing& timing) { |
| 46 | return std::make_pair(NN_TRY(nn::convert(outputShapes)), NN_TRY(nn::convert(timing))); |
| 47 | } |
| 48 | |
| 49 | nn::GeneralResult<std::pair<nn::Timing, nn::Timing>> convertFencedExecutionResults( |
| 50 | ErrorStatus status, const aidl_hal::Timing& timingLaunched, |
| 51 | const aidl_hal::Timing& timingFenced) { |
| 52 | HANDLE_HAL_STATUS(status) << "fenced execution callback info failed with " << toString(status); |
| 53 | return std::make_pair(NN_TRY(nn::convert(timingLaunched)), NN_TRY(nn::convert(timingFenced))); |
| 54 | } |
| 55 | |
| 56 | } // namespace |
| 57 | |
| 58 | nn::GeneralResult<std::shared_ptr<const PreparedModel>> PreparedModel::create( |
| 59 | std::shared_ptr<aidl_hal::IPreparedModel> preparedModel) { |
| 60 | if (preparedModel == nullptr) { |
| 61 | return NN_ERROR() |
| 62 | << "aidl_hal::utils::PreparedModel::create must have non-null preparedModel"; |
| 63 | } |
| 64 | |
| 65 | return std::make_shared<const PreparedModel>(PrivateConstructorTag{}, std::move(preparedModel)); |
| 66 | } |
| 67 | |
| 68 | PreparedModel::PreparedModel(PrivateConstructorTag /*tag*/, |
| 69 | std::shared_ptr<aidl_hal::IPreparedModel> preparedModel) |
| 70 | : kPreparedModel(std::move(preparedModel)) {} |
| 71 | |
| 72 | nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> PreparedModel::execute( |
| 73 | const nn::Request& request, nn::MeasureTiming measure, |
| 74 | const nn::OptionalTimePoint& deadline, |
| 75 | const nn::OptionalDuration& loopTimeoutDuration) const { |
| 76 | // Ensure that request is ready for IPC. |
| 77 | std::optional<nn::Request> maybeRequestInShared; |
| 78 | const nn::Request& requestInShared = NN_TRY(hal::utils::makeExecutionFailure( |
| 79 | hal::utils::flushDataFromPointerToShared(&request, &maybeRequestInShared))); |
| 80 | |
| 81 | const auto aidlRequest = NN_TRY(hal::utils::makeExecutionFailure(convert(requestInShared))); |
| 82 | const auto aidlMeasure = NN_TRY(hal::utils::makeExecutionFailure(convert(measure))); |
| 83 | const auto aidlDeadline = NN_TRY(hal::utils::makeExecutionFailure(convert(deadline))); |
| 84 | const auto aidlLoopTimeoutDuration = |
| 85 | NN_TRY(hal::utils::makeExecutionFailure(convert(loopTimeoutDuration))); |
| 86 | |
| 87 | ExecutionResult executionResult; |
| 88 | const auto ret = kPreparedModel->executeSynchronously( |
| 89 | aidlRequest, aidlMeasure, aidlDeadline, aidlLoopTimeoutDuration, &executionResult); |
| 90 | HANDLE_ASTATUS(ret) << "executeSynchronously failed"; |
| 91 | if (!executionResult.outputSufficientSize) { |
| 92 | auto canonicalOutputShapes = |
| 93 | nn::convert(executionResult.outputShapes).value_or(std::vector<nn::OutputShape>{}); |
| 94 | return NN_ERROR(nn::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE, std::move(canonicalOutputShapes)) |
| 95 | << "execution failed with " << nn::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE; |
| 96 | } |
| 97 | auto [outputShapes, timing] = NN_TRY(hal::utils::makeExecutionFailure( |
| 98 | convertExecutionResults(executionResult.outputShapes, executionResult.timing))); |
| 99 | |
| 100 | NN_TRY(hal::utils::makeExecutionFailure( |
| 101 | hal::utils::unflushDataFromSharedToPointer(request, maybeRequestInShared))); |
| 102 | |
| 103 | return std::make_pair(std::move(outputShapes), timing); |
| 104 | } |
| 105 | |
| 106 | nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> |
| 107 | PreparedModel::executeFenced(const nn::Request& request, const std::vector<nn::SyncFence>& waitFor, |
| 108 | nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline, |
| 109 | const nn::OptionalDuration& loopTimeoutDuration, |
| 110 | const nn::OptionalDuration& timeoutDurationAfterFence) const { |
| 111 | // Ensure that request is ready for IPC. |
| 112 | std::optional<nn::Request> maybeRequestInShared; |
| 113 | const nn::Request& requestInShared = |
| 114 | NN_TRY(hal::utils::flushDataFromPointerToShared(&request, &maybeRequestInShared)); |
| 115 | |
| 116 | const auto aidlRequest = NN_TRY(convert(requestInShared)); |
| 117 | const auto aidlWaitFor = NN_TRY(convert(waitFor)); |
| 118 | const auto aidlMeasure = NN_TRY(convert(measure)); |
| 119 | const auto aidlDeadline = NN_TRY(convert(deadline)); |
| 120 | const auto aidlLoopTimeoutDuration = NN_TRY(convert(loopTimeoutDuration)); |
| 121 | const auto aidlTimeoutDurationAfterFence = NN_TRY(convert(timeoutDurationAfterFence)); |
| 122 | |
| 123 | FencedExecutionResult result; |
| 124 | const auto ret = kPreparedModel->executeFenced(aidlRequest, aidlWaitFor, aidlMeasure, |
| 125 | aidlDeadline, aidlLoopTimeoutDuration, |
| 126 | aidlTimeoutDurationAfterFence, &result); |
| 127 | HANDLE_ASTATUS(ret) << "executeFenced failed"; |
| 128 | |
| 129 | auto resultSyncFence = nn::SyncFence::createAsSignaled(); |
| 130 | if (result.syncFence.get() != -1) { |
| 131 | resultSyncFence = NN_TRY(nn::convert(result.syncFence)); |
| 132 | } |
| 133 | |
| 134 | auto callback = result.callback; |
| 135 | if (callback == nullptr) { |
| 136 | return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "callback is null"; |
| 137 | } |
| 138 | |
| 139 | // If executeFenced required the request memory to be moved into shared memory, block here until |
| 140 | // the fenced execution has completed and flush the memory back. |
| 141 | if (maybeRequestInShared.has_value()) { |
| 142 | const auto state = resultSyncFence.syncWait({}); |
| 143 | if (state != nn::SyncFence::FenceState::SIGNALED) { |
| 144 | return NN_ERROR() << "syncWait failed with " << state; |
| 145 | } |
| 146 | NN_TRY(hal::utils::unflushDataFromSharedToPointer(request, maybeRequestInShared)); |
| 147 | } |
| 148 | |
| 149 | // Create callback which can be used to retrieve the execution error status and timings. |
| 150 | nn::ExecuteFencedInfoCallback resultCallback = |
| 151 | [callback]() -> nn::GeneralResult<std::pair<nn::Timing, nn::Timing>> { |
| 152 | ErrorStatus errorStatus; |
| 153 | Timing timingLaunched; |
| 154 | Timing timingFenced; |
| 155 | const auto ret = callback->getExecutionInfo(&timingLaunched, &timingFenced, &errorStatus); |
| 156 | HANDLE_ASTATUS(ret) << "fenced execution callback getExecutionInfo failed"; |
| 157 | return convertFencedExecutionResults(errorStatus, timingLaunched, timingFenced); |
| 158 | }; |
| 159 | |
| 160 | return std::make_pair(std::move(resultSyncFence), std::move(resultCallback)); |
| 161 | } |
| 162 | |
| 163 | nn::GeneralResult<nn::SharedBurst> PreparedModel::configureExecutionBurst() const { |
| 164 | return hal::V1_0::utils::Burst::create(shared_from_this()); |
| 165 | } |
| 166 | |
| 167 | std::any PreparedModel::getUnderlyingResource() const { |
| 168 | std::shared_ptr<aidl_hal::IPreparedModel> resource = kPreparedModel; |
| 169 | return resource; |
| 170 | } |
| 171 | |
| 172 | } // namespace aidl::android::hardware::neuralnetworks::utils |