blob: c215f39ecf9331144fd8e3fd04dc948e1506553f [file] [log] [blame]
Michael Butler4b276a72020-08-06 23:22:35 -07001/*
2 * Copyright (C) 2020 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "Device.h"
18
19#include "Buffer.h"
20#include "Callbacks.h"
21#include "Conversions.h"
22#include "PreparedModel.h"
23#include "Utils.h"
24
25#include <android/hardware/neuralnetworks/1.0/types.h>
26#include <android/hardware/neuralnetworks/1.1/types.h>
27#include <android/hardware/neuralnetworks/1.2/types.h>
28#include <android/hardware/neuralnetworks/1.3/IDevice.h>
29#include <android/hardware/neuralnetworks/1.3/types.h>
30#include <nnapi/IBuffer.h>
31#include <nnapi/IDevice.h>
32#include <nnapi/IPreparedModel.h>
33#include <nnapi/OperandTypes.h>
34#include <nnapi/Result.h>
35#include <nnapi/Types.h>
36#include <nnapi/hal/1.1/Conversions.h>
37#include <nnapi/hal/1.2/Conversions.h>
38#include <nnapi/hal/1.2/Device.h>
39#include <nnapi/hal/CommonUtils.h>
40#include <nnapi/hal/HandleError.h>
41#include <nnapi/hal/ProtectCallback.h>
42
43#include <any>
44#include <functional>
45#include <memory>
46#include <optional>
47#include <string>
48#include <vector>
49
50namespace android::hardware::neuralnetworks::V1_3::utils {
51namespace {
52
53nn::GeneralResult<hidl_vec<sp<IPreparedModel>>> convert(
54 const std::vector<nn::SharedPreparedModel>& preparedModels) {
55 hidl_vec<sp<IPreparedModel>> hidlPreparedModels(preparedModels.size());
56 for (size_t i = 0; i < preparedModels.size(); ++i) {
57 std::any underlyingResource = preparedModels[i]->getUnderlyingResource();
58 if (const auto* hidlPreparedModel =
59 std::any_cast<sp<IPreparedModel>>(&underlyingResource)) {
60 hidlPreparedModels[i] = *hidlPreparedModel;
61 } else {
62 return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT)
63 << "Unable to convert from nn::IPreparedModel to V1_3::IPreparedModel";
64 }
65 }
66 return hidlPreparedModels;
67}
68
69nn::GeneralResult<nn::SharedBuffer> convert(
70 nn::GeneralResult<std::shared_ptr<const Buffer>> result) {
71 return NN_TRY(std::move(result));
72}
73
74} // namespace
75
76nn::GeneralResult<std::shared_ptr<const Device>> Device::create(std::string name,
77 sp<V1_3::IDevice> device) {
78 if (name.empty()) {
79 return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT)
80 << "V1_3::utils::Device::create must have non-empty name";
81 }
82 if (device == nullptr) {
83 return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT)
84 << "V1_3::utils::Device::create must have non-null device";
85 }
86
87 auto versionString = NN_TRY(V1_2::utils::initVersionString(device.get()));
88 const auto deviceType = NN_TRY(V1_2::utils::initDeviceType(device.get()));
89 auto extensions = NN_TRY(V1_2::utils::initExtensions(device.get()));
90 auto capabilities = NN_TRY(V1_2::utils::initCapabilities(device.get()));
91 const auto numberOfCacheFilesNeeded =
92 NN_TRY(V1_2::utils::initNumberOfCacheFilesNeeded(device.get()));
93
94 auto deathHandler = NN_TRY(hal::utils::DeathHandler::create(device));
95 return std::make_shared<const Device>(
96 PrivateConstructorTag{}, std::move(name), std::move(versionString), deviceType,
97 std::move(extensions), std::move(capabilities), numberOfCacheFilesNeeded,
98 std::move(device), std::move(deathHandler));
99}
100
101Device::Device(PrivateConstructorTag /*tag*/, std::string name, std::string versionString,
102 nn::DeviceType deviceType, std::vector<nn::Extension> extensions,
103 nn::Capabilities capabilities,
104 std::pair<uint32_t, uint32_t> numberOfCacheFilesNeeded, sp<V1_3::IDevice> device,
105 hal::utils::DeathHandler deathHandler)
106 : kName(std::move(name)),
107 kVersionString(std::move(versionString)),
108 kDeviceType(deviceType),
109 kExtensions(std::move(extensions)),
110 kCapabilities(std::move(capabilities)),
111 kNumberOfCacheFilesNeeded(numberOfCacheFilesNeeded),
112 kDevice(std::move(device)),
113 kDeathHandler(std::move(deathHandler)) {}
114
115const std::string& Device::getName() const {
116 return kName;
117}
118
119const std::string& Device::getVersionString() const {
120 return kVersionString;
121}
122
123nn::Version Device::getFeatureLevel() const {
124 return nn::Version::ANDROID_R;
125}
126
127nn::DeviceType Device::getType() const {
128 return kDeviceType;
129}
130
131const std::vector<nn::Extension>& Device::getSupportedExtensions() const {
132 return kExtensions;
133}
134
135const nn::Capabilities& Device::getCapabilities() const {
136 return kCapabilities;
137}
138
139std::pair<uint32_t, uint32_t> Device::getNumberOfCacheFilesNeeded() const {
140 return kNumberOfCacheFilesNeeded;
141}
142
143nn::GeneralResult<void> Device::wait() const {
144 const auto ret = kDevice->ping();
145 return hal::utils::handleTransportError(ret);
146}
147
148nn::GeneralResult<std::vector<bool>> Device::getSupportedOperations(const nn::Model& model) const {
149 // Ensure that model is ready for IPC.
150 std::optional<nn::Model> maybeModelInShared;
151 const nn::Model& modelInShared =
152 NN_TRY(hal::utils::flushDataFromPointerToShared(&model, &maybeModelInShared));
153
154 const auto hidlModel = NN_TRY(convert(modelInShared));
155
156 nn::GeneralResult<std::vector<bool>> result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
157 << "uninitialized";
158 auto cb = [&result, &model](ErrorStatus status, const hidl_vec<bool>& supportedOperations) {
159 if (status != ErrorStatus::NONE) {
160 const auto canonical =
161 validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
162 result = NN_ERROR(canonical)
163 << "IDevice::getSupportedOperations_1_3 failed with " << toString(status);
164 } else if (supportedOperations.size() != model.main.operations.size()) {
165 result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
166 << "IDevice::getSupportedOperations_1_3 returned vector of size "
167 << supportedOperations.size() << " but expected "
168 << model.main.operations.size();
169 } else {
170 result = supportedOperations;
171 }
172 };
173
174 const auto ret = kDevice->getSupportedOperations_1_3(hidlModel, cb);
175 NN_TRY(hal::utils::handleTransportError(ret));
176
177 return result;
178}
179
180nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModel(
181 const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority,
182 nn::OptionalTimePoint deadline, const std::vector<nn::NativeHandle>& modelCache,
183 const std::vector<nn::NativeHandle>& dataCache, const nn::CacheToken& token) const {
184 // Ensure that model is ready for IPC.
185 std::optional<nn::Model> maybeModelInShared;
186 const nn::Model& modelInShared =
187 NN_TRY(hal::utils::flushDataFromPointerToShared(&model, &maybeModelInShared));
188
189 const auto hidlModel = NN_TRY(convert(modelInShared));
190 const auto hidlPreference = NN_TRY(V1_1::utils::convert(preference));
191 const auto hidlPriority = NN_TRY(convert(priority));
192 const auto hidlDeadline = NN_TRY(convert(deadline));
193 const auto hidlModelCache = NN_TRY(V1_2::utils::convert(modelCache));
194 const auto hidlDataCache = NN_TRY(V1_2::utils::convert(dataCache));
195 const auto hidlToken = token;
196
197 const auto cb = sp<PreparedModelCallback>::make();
198 const auto scoped = kDeathHandler.protectCallback(cb.get());
199
200 const auto ret =
201 kDevice->prepareModel_1_3(hidlModel, hidlPreference, hidlPriority, hidlDeadline,
202 hidlModelCache, hidlDataCache, hidlToken, cb);
203 const auto status = NN_TRY(hal::utils::handleTransportError(ret));
204 if (status != ErrorStatus::NONE) {
205 const auto canonical =
206 validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
207 return NN_ERROR(canonical) << "prepareModel_1_3 failed with " << toString(status);
208 }
209
210 return cb->get();
211}
212
213nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModelFromCache(
214 nn::OptionalTimePoint deadline, const std::vector<nn::NativeHandle>& modelCache,
215 const std::vector<nn::NativeHandle>& dataCache, const nn::CacheToken& token) const {
216 const auto hidlDeadline = NN_TRY(convert(deadline));
217 const auto hidlModelCache = NN_TRY(V1_2::utils::convert(modelCache));
218 const auto hidlDataCache = NN_TRY(V1_2::utils::convert(dataCache));
219 const auto hidlToken = token;
220
221 const auto cb = sp<PreparedModelCallback>::make();
222 const auto scoped = kDeathHandler.protectCallback(cb.get());
223
224 const auto ret = kDevice->prepareModelFromCache_1_3(hidlDeadline, hidlModelCache, hidlDataCache,
225 hidlToken, cb);
226 const auto status = NN_TRY(hal::utils::handleTransportError(ret));
227 if (status != ErrorStatus::NONE) {
228 const auto canonical =
229 validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
230 return NN_ERROR(canonical) << "prepareModelFromCache_1_3 failed with " << toString(status);
231 }
232
233 return cb->get();
234}
235
236nn::GeneralResult<nn::SharedBuffer> Device::allocate(
237 const nn::BufferDesc& desc, const std::vector<nn::SharedPreparedModel>& preparedModels,
238 const std::vector<nn::BufferRole>& inputRoles,
239 const std::vector<nn::BufferRole>& outputRoles) const {
240 const auto hidlDesc = NN_TRY(convert(desc));
241 const auto hidlPreparedModels = NN_TRY(convert(preparedModels));
242 const auto hidlInputRoles = NN_TRY(convert(inputRoles));
243 const auto hidlOutputRoles = NN_TRY(convert(outputRoles));
244
245 nn::GeneralResult<nn::SharedBuffer> result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
246 << "uninitialized";
247 auto cb = [&result](ErrorStatus status, const sp<IBuffer>& buffer, uint32_t token) {
248 if (status != ErrorStatus::NONE) {
249 const auto canonical =
250 validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
251 result = NN_ERROR(canonical) << "IDevice::allocate failed with " << toString(status);
252 } else if (buffer == nullptr) {
253 result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "Returned buffer is nullptr";
254 } else if (token == 0) {
255 result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "Returned token is invalid (0)";
256 } else {
257 result = convert(
258 Buffer::create(buffer, static_cast<nn::Request::MemoryDomainToken>(token)));
259 }
260 };
261
262 const auto ret =
263 kDevice->allocate(hidlDesc, hidlPreparedModels, hidlInputRoles, hidlOutputRoles, cb);
264 NN_TRY(hal::utils::handleTransportError(ret));
265
266 return result;
267}
268
269} // namespace android::hardware::neuralnetworks::V1_3::utils