Cleanup NN callback error handling
This CL introduces a new templated class CallbackValue to handle HIDL
"return value" callbacks in a terser and more readable way.
This CL also introduces a new macro HANDLE_HAL_STATUS to return from the
current function when an error is present with the ability to append a
more descriptive error message.
Finally, this CL changes the behavior of synchronous executions. Prior
to this CL, IPreparedModel fell back to an asynchronous execution if the
synchronous execution was allowed and failed. This change instead
returns a failure if synchronous execution is allowed and fails.
Bug: 173084343
Test: mma
Change-Id: I62714a932e71dfc77401bbcb9eaaaf3d94fb9707
Merged-In: I62714a932e71dfc77401bbcb9eaaaf3d94fb9707
(cherry picked from commit 98ed9baf5de85599847b2b2f53585243c3b7b776)
diff --git a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Callbacks.h b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Callbacks.h
index cb2a56a..643172e 100644
--- a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Callbacks.h
+++ b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Callbacks.h
@@ -39,6 +39,26 @@
namespace android::hardware::neuralnetworks::V1_3::utils {
+// Converts the results of IDevice::getSupportedOperations* to the NN canonical format. On success,
+// this function returns with the supported operations as indicated by a driver. On failure, this
+// function returns with the appropriate nn::GeneralError.
+nn::GeneralResult<std::vector<bool>> supportedOperationsCallback(
+ ErrorStatus status, const hidl_vec<bool>& supportedOperations);
+
+// Converts the results of IDevice::prepareModel* to the NN canonical format. On success, this
+// function returns with a non-null nn::SharedPreparedModel with a feature level of
+// nn::Version::ANDROID_R. On failure, this function returns with the appropriate nn::GeneralError.
+nn::GeneralResult<nn::SharedPreparedModel> prepareModelCallback(
+ ErrorStatus status, const sp<IPreparedModel>& preparedModel);
+
+// Converts the results of IDevice::execute* to the NN canonical format. On success, this function
+// returns with the output shapes and the timing information. On failure, this function returns with
+// the appropriate nn::ExecutionError.
+nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> executionCallback(
+ ErrorStatus status, const hidl_vec<V1_2::OutputShape>& outputShapes,
+ const V1_2::Timing& timing);
+
+// A HIDL callback class to receive the results of IDevice::prepareModel* asynchronously.
class PreparedModelCallback final : public IPreparedModelCallback,
public hal::utils::IProtectedCallback {
public:
@@ -55,11 +75,10 @@
Data get();
private:
- void notifyInternal(Data result);
-
hal::utils::TransferValue<Data> mData;
};
+// A HIDL callback class to receive the results of IDevice::execute_1_3 asynchronously.
class ExecutionCallback final : public IExecutionCallback, public hal::utils::IProtectedCallback {
public:
using Data = nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>;
@@ -76,8 +95,6 @@
Data get();
private:
- void notifyInternal(Data result);
-
hal::utils::TransferValue<Data> mData;
};
diff --git a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Conversions.h b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Conversions.h
index 477bb7b..74a6534 100644
--- a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Conversions.h
+++ b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Conversions.h
@@ -103,6 +103,17 @@
nn::GeneralResult<hidl_memory> convert(const nn::Memory& memory);
nn::GeneralResult<hidl_vec<BufferRole>> convert(const std::vector<nn::BufferRole>& bufferRoles);
+nn::GeneralResult<V1_0::DeviceStatus> convert(const nn::DeviceStatus& deviceStatus);
+nn::GeneralResult<V1_1::ExecutionPreference> convert(
+ const nn::ExecutionPreference& executionPreference);
+nn::GeneralResult<hidl_vec<V1_2::Extension>> convert(const std::vector<nn::Extension>& extensions);
+nn::GeneralResult<hidl_vec<hidl_handle>> convert(const std::vector<nn::SharedHandle>& handles);
+nn::GeneralResult<hidl_vec<V1_2::OutputShape>> convert(
+ const std::vector<nn::OutputShape>& outputShapes);
+nn::GeneralResult<V1_2::DeviceType> convert(const nn::DeviceType& deviceType);
+nn::GeneralResult<V1_2::MeasureTiming> convert(const nn::MeasureTiming& measureTiming);
+nn::GeneralResult<V1_2::Timing> convert(const nn::Timing& timing);
+
} // namespace android::hardware::neuralnetworks::V1_3::utils
#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_3_CONVERSIONS_H
diff --git a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/PreparedModel.h b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/PreparedModel.h
index c4ba483..664d87a 100644
--- a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/PreparedModel.h
+++ b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/PreparedModel.h
@@ -40,10 +40,10 @@
public:
static nn::GeneralResult<std::shared_ptr<const PreparedModel>> create(
- sp<V1_3::IPreparedModel> preparedModel);
+ sp<V1_3::IPreparedModel> preparedModel, bool executeSynchronously);
- PreparedModel(PrivateConstructorTag tag, sp<V1_3::IPreparedModel> preparedModel,
- hal::utils::DeathHandler deathHandler);
+ PreparedModel(PrivateConstructorTag tag, bool executeSynchronously,
+ sp<V1_3::IPreparedModel> preparedModel, hal::utils::DeathHandler deathHandler);
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> execute(
const nn::Request& request, nn::MeasureTiming measure,
@@ -66,6 +66,7 @@
const Request& request, V1_2::MeasureTiming measure, const OptionalTimePoint& deadline,
const OptionalTimeoutDuration& loopTimeoutDuration) const;
+ const bool kExecuteSynchronously;
const sp<V1_3::IPreparedModel> kPreparedModel;
const hal::utils::DeathHandler kDeathHandler;
};
diff --git a/neuralnetworks/1.3/utils/src/Buffer.cpp b/neuralnetworks/1.3/utils/src/Buffer.cpp
index 4ef54a2..614033e 100644
--- a/neuralnetworks/1.3/utils/src/Buffer.cpp
+++ b/neuralnetworks/1.3/utils/src/Buffer.cpp
@@ -41,12 +41,10 @@
nn::GeneralResult<std::shared_ptr<const Buffer>> Buffer::create(
sp<V1_3::IBuffer> buffer, nn::Request::MemoryDomainToken token) {
if (buffer == nullptr) {
- return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT)
- << "V1_3::utils::Buffer::create must have non-null buffer";
+ return NN_ERROR() << "V1_3::utils::Buffer::create must have non-null buffer";
}
if (token == static_cast<nn::Request::MemoryDomainToken>(0)) {
- return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT)
- << "V1_3::utils::Buffer::create must have non-zero token";
+ return NN_ERROR() << "V1_3::utils::Buffer::create must have non-zero token";
}
return std::make_shared<const Buffer>(PrivateConstructorTag{}, std::move(buffer), token);
@@ -68,10 +66,7 @@
const auto ret = kBuffer->copyTo(hidlDst);
const auto status = HANDLE_TRANSPORT_FAILURE(ret);
- if (status != ErrorStatus::NONE) {
- const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
- return NN_ERROR(canonical) << "IBuffer::copyTo failed with " << toString(status);
- }
+ HANDLE_HAL_STATUS(status) << "IBuffer::copyTo failed with " << toString(status);
return {};
}
@@ -83,10 +78,7 @@
const auto ret = kBuffer->copyFrom(hidlSrc, hidlDimensions);
const auto status = HANDLE_TRANSPORT_FAILURE(ret);
- if (status != ErrorStatus::NONE) {
- const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
- return NN_ERROR(canonical) << "IBuffer::copyFrom failed with " << toString(status);
- }
+ HANDLE_HAL_STATUS(status) << "IBuffer::copyFrom failed with " << toString(status);
return {};
}
diff --git a/neuralnetworks/1.3/utils/src/Callbacks.cpp b/neuralnetworks/1.3/utils/src/Callbacks.cpp
index 17c20fb..af76e6a 100644
--- a/neuralnetworks/1.3/utils/src/Callbacks.cpp
+++ b/neuralnetworks/1.3/utils/src/Callbacks.cpp
@@ -30,6 +30,7 @@
#include <nnapi/Types.h>
#include <nnapi/hal/1.0/Conversions.h>
#include <nnapi/hal/1.0/PreparedModel.h>
+#include <nnapi/hal/1.2/Callbacks.h>
#include <nnapi/hal/1.2/Conversions.h>
#include <nnapi/hal/1.2/PreparedModel.h>
#include <nnapi/hal/CommonUtils.h>
@@ -45,136 +46,93 @@
namespace android::hardware::neuralnetworks::V1_3::utils {
namespace {
-nn::GeneralResult<nn::SharedPreparedModel> convertPreparedModel(
- const sp<V1_0::IPreparedModel>& preparedModel) {
- return NN_TRY(V1_0::utils::PreparedModel::create(preparedModel));
-}
-
-nn::GeneralResult<nn::SharedPreparedModel> convertPreparedModel(
- const sp<V1_2::IPreparedModel>& preparedModel) {
- return NN_TRY(V1_2::utils::PreparedModel::create(preparedModel));
-}
-
-nn::GeneralResult<nn::SharedPreparedModel> convertPreparedModel(
- const sp<IPreparedModel>& preparedModel) {
- return NN_TRY(utils::PreparedModel::create(preparedModel));
-}
-
nn::GeneralResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
convertExecutionGeneralResultsHelper(const hidl_vec<V1_2::OutputShape>& outputShapes,
const V1_2::Timing& timing) {
return std::make_pair(NN_TRY(nn::convert(outputShapes)), NN_TRY(nn::convert(timing)));
}
-nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
-convertExecutionGeneralResults(const hidl_vec<V1_2::OutputShape>& outputShapes,
- const V1_2::Timing& timing) {
+} // namespace
+
+nn::GeneralResult<std::vector<bool>> supportedOperationsCallback(
+ ErrorStatus status, const hidl_vec<bool>& supportedOperations) {
+ HANDLE_HAL_STATUS(status) << "get supported operations failed with " << toString(status);
+ return supportedOperations;
+}
+
+nn::GeneralResult<nn::SharedPreparedModel> prepareModelCallback(
+ ErrorStatus status, const sp<IPreparedModel>& preparedModel) {
+ HANDLE_HAL_STATUS(status) << "model preparation failed with " << toString(status);
+ return NN_TRY(PreparedModel::create(preparedModel, /*executeSynchronously=*/true));
+}
+
+nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> executionCallback(
+ ErrorStatus status, const hidl_vec<V1_2::OutputShape>& outputShapes,
+ const V1_2::Timing& timing) {
+ if (status == ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) {
+ auto canonicalOutputShapes =
+ nn::convert(outputShapes).value_or(std::vector<nn::OutputShape>{});
+ return NN_ERROR(nn::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE, std::move(canonicalOutputShapes))
+ << "execution failed with " << toString(status);
+ }
+ HANDLE_HAL_STATUS(status) << "execution failed with " << toString(status);
return hal::utils::makeExecutionFailure(
convertExecutionGeneralResultsHelper(outputShapes, timing));
}
-} // namespace
-
Return<void> PreparedModelCallback::notify(V1_0::ErrorStatus status,
const sp<V1_0::IPreparedModel>& preparedModel) {
- if (status != V1_0::ErrorStatus::NONE) {
- const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
- notifyInternal(NN_ERROR(canonical) << "preparedModel failed with " << toString(status));
- } else if (preparedModel == nullptr) {
- notifyInternal(NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
- << "Returned preparedModel is nullptr");
- } else {
- notifyInternal(convertPreparedModel(preparedModel));
- }
+ mData.put(V1_0::utils::prepareModelCallback(status, preparedModel));
return Void();
}
Return<void> PreparedModelCallback::notify_1_2(V1_0::ErrorStatus status,
const sp<V1_2::IPreparedModel>& preparedModel) {
- if (status != V1_0::ErrorStatus::NONE) {
- const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
- notifyInternal(NN_ERROR(canonical) << "preparedModel failed with " << toString(status));
- } else if (preparedModel == nullptr) {
- notifyInternal(NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
- << "Returned preparedModel is nullptr");
- } else {
- notifyInternal(convertPreparedModel(preparedModel));
- }
+ mData.put(V1_2::utils::prepareModelCallback(status, preparedModel));
return Void();
}
Return<void> PreparedModelCallback::notify_1_3(ErrorStatus status,
const sp<IPreparedModel>& preparedModel) {
- if (status != ErrorStatus::NONE) {
- const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
- notifyInternal(NN_ERROR(canonical) << "preparedModel failed with " << toString(status));
- } else if (preparedModel == nullptr) {
- notifyInternal(NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
- << "Returned preparedModel is nullptr");
- } else {
- notifyInternal(convertPreparedModel(preparedModel));
- }
+ mData.put(prepareModelCallback(status, preparedModel));
return Void();
}
void PreparedModelCallback::notifyAsDeadObject() {
- notifyInternal(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object");
+ mData.put(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object");
}
PreparedModelCallback::Data PreparedModelCallback::get() {
return mData.take();
}
-void PreparedModelCallback::notifyInternal(PreparedModelCallback::Data result) {
- mData.put(std::move(result));
-}
-
// ExecutionCallback methods begin here
Return<void> ExecutionCallback::notify(V1_0::ErrorStatus status) {
- if (status != V1_0::ErrorStatus::NONE) {
- const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
- notifyInternal(NN_ERROR(canonical) << "execute failed with " << toString(status));
- } else {
- notifyInternal({});
- }
+ mData.put(V1_0::utils::executionCallback(status));
return Void();
}
Return<void> ExecutionCallback::notify_1_2(V1_0::ErrorStatus status,
const hidl_vec<V1_2::OutputShape>& outputShapes,
const V1_2::Timing& timing) {
- if (status != V1_0::ErrorStatus::NONE) {
- const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
- notifyInternal(NN_ERROR(canonical) << "execute failed with " << toString(status));
- } else {
- notifyInternal(convertExecutionGeneralResults(outputShapes, timing));
- }
+ mData.put(V1_2::utils::executionCallback(status, outputShapes, timing));
return Void();
}
Return<void> ExecutionCallback::notify_1_3(ErrorStatus status,
const hidl_vec<V1_2::OutputShape>& outputShapes,
const V1_2::Timing& timing) {
- if (status != ErrorStatus::NONE) {
- const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
- notifyInternal(NN_ERROR(canonical) << "execute failed with " << toString(status));
- } else {
- notifyInternal(convertExecutionGeneralResults(outputShapes, timing));
- }
+ mData.put(executionCallback(status, outputShapes, timing));
return Void();
}
void ExecutionCallback::notifyAsDeadObject() {
- notifyInternal(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object");
+ mData.put(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object");
}
ExecutionCallback::Data ExecutionCallback::get() {
return mData.take();
}
-void ExecutionCallback::notifyInternal(ExecutionCallback::Data result) {
- mData.put(std::move(result));
-}
-
} // namespace android::hardware::neuralnetworks::V1_3::utils
diff --git a/neuralnetworks/1.3/utils/src/Conversions.cpp b/neuralnetworks/1.3/utils/src/Conversions.cpp
index c89a69f..8b7db2b 100644
--- a/neuralnetworks/1.3/utils/src/Conversions.cpp
+++ b/neuralnetworks/1.3/utils/src/Conversions.cpp
@@ -685,4 +685,38 @@
return validatedConvert(bufferRoles);
}
+nn::GeneralResult<V1_0::DeviceStatus> convert(const nn::DeviceStatus& deviceStatus) {
+ return V1_2::utils::convert(deviceStatus);
+}
+
+nn::GeneralResult<V1_1::ExecutionPreference> convert(
+ const nn::ExecutionPreference& executionPreference) {
+ return V1_2::utils::convert(executionPreference);
+}
+
+nn::GeneralResult<hidl_vec<V1_2::Extension>> convert(const std::vector<nn::Extension>& extensions) {
+ return V1_2::utils::convert(extensions);
+}
+
+nn::GeneralResult<hidl_vec<hidl_handle>> convert(const std::vector<nn::SharedHandle>& handles) {
+ return V1_2::utils::convert(handles);
+}
+
+nn::GeneralResult<hidl_vec<V1_2::OutputShape>> convert(
+ const std::vector<nn::OutputShape>& outputShapes) {
+ return V1_2::utils::convert(outputShapes);
+}
+
+nn::GeneralResult<V1_2::DeviceType> convert(const nn::DeviceType& deviceType) {
+ return V1_2::utils::convert(deviceType);
+}
+
+nn::GeneralResult<V1_2::MeasureTiming> convert(const nn::MeasureTiming& measureTiming) {
+ return V1_2::utils::convert(measureTiming);
+}
+
+nn::GeneralResult<V1_2::Timing> convert(const nn::Timing& timing) {
+ return V1_2::utils::convert(timing);
+}
+
} // namespace android::hardware::neuralnetworks::V1_3::utils
diff --git a/neuralnetworks/1.3/utils/src/Device.cpp b/neuralnetworks/1.3/utils/src/Device.cpp
index 6056498..d710b85 100644
--- a/neuralnetworks/1.3/utils/src/Device.cpp
+++ b/neuralnetworks/1.3/utils/src/Device.cpp
@@ -36,6 +36,7 @@
#include <nnapi/hal/1.1/Conversions.h>
#include <nnapi/hal/1.2/Conversions.h>
#include <nnapi/hal/1.2/Device.h>
+#include <nnapi/hal/1.2/Utils.h>
#include <nnapi/hal/CommonUtils.h>
#include <nnapi/hal/HandleError.h>
#include <nnapi/hal/ProtectCallback.h>
@@ -69,29 +70,27 @@
return hidlPreparedModels;
}
-nn::GeneralResult<nn::SharedBuffer> convert(
- nn::GeneralResult<std::shared_ptr<const Buffer>> result) {
- return NN_TRY(std::move(result));
+nn::GeneralResult<nn::Capabilities> capabilitiesCallback(ErrorStatus status,
+ const Capabilities& capabilities) {
+ HANDLE_HAL_STATUS(status) << "getting capabilities failed with " << toString(status);
+ return nn::convert(capabilities);
}
-nn::GeneralResult<nn::Capabilities> initCapabilities(V1_3::IDevice* device) {
+nn::GeneralResult<nn::Capabilities> getCapabilitiesFrom(V1_3::IDevice* device) {
CHECK(device != nullptr);
- nn::GeneralResult<nn::Capabilities> result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
- << "uninitialized";
- const auto cb = [&result](ErrorStatus status, const Capabilities& capabilities) {
- if (status != ErrorStatus::NONE) {
- const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
- result = NN_ERROR(canonical) << "getCapabilities_1_3 failed with " << toString(status);
- } else {
- result = nn::convert(capabilities);
- }
- };
+ auto cb = hal::utils::CallbackValue(capabilitiesCallback);
const auto ret = device->getCapabilities_1_3(cb);
HANDLE_TRANSPORT_FAILURE(ret);
- return result;
+ return cb.take();
+}
+
+nn::GeneralResult<nn::SharedBuffer> allocationCallback(ErrorStatus status,
+ const sp<IBuffer>& buffer, uint32_t token) {
+ HANDLE_HAL_STATUS(status) << "IDevice::allocate failed with " << toString(status);
+ return Buffer::create(buffer, static_cast<nn::Request::MemoryDomainToken>(token));
}
} // namespace
@@ -107,12 +106,12 @@
<< "V1_3::utils::Device::create must have non-null device";
}
- auto versionString = NN_TRY(V1_2::utils::initVersionString(device.get()));
- const auto deviceType = NN_TRY(V1_2::utils::initDeviceType(device.get()));
- auto extensions = NN_TRY(V1_2::utils::initExtensions(device.get()));
- auto capabilities = NN_TRY(initCapabilities(device.get()));
+ auto versionString = NN_TRY(V1_2::utils::getVersionStringFrom(device.get()));
+ const auto deviceType = NN_TRY(V1_2::utils::getDeviceTypeFrom(device.get()));
+ auto extensions = NN_TRY(V1_2::utils::getSupportedExtensionsFrom(device.get()));
+ auto capabilities = NN_TRY(getCapabilitiesFrom(device.get()));
const auto numberOfCacheFilesNeeded =
- NN_TRY(V1_2::utils::initNumberOfCacheFilesNeeded(device.get()));
+ NN_TRY(V1_2::utils::getNumberOfCacheFilesNeededFrom(device.get()));
auto deathHandler = NN_TRY(hal::utils::DeathHandler::create(device));
return std::make_shared<const Device>(
@@ -177,27 +176,12 @@
const auto hidlModel = NN_TRY(convert(modelInShared));
- nn::GeneralResult<std::vector<bool>> result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
- << "uninitialized";
- auto cb = [&result, &model](ErrorStatus status, const hidl_vec<bool>& supportedOperations) {
- if (status != ErrorStatus::NONE) {
- const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
- result = NN_ERROR(canonical)
- << "IDevice::getSupportedOperations_1_3 failed with " << toString(status);
- } else if (supportedOperations.size() != model.main.operations.size()) {
- result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
- << "IDevice::getSupportedOperations_1_3 returned vector of size "
- << supportedOperations.size() << " but expected "
- << model.main.operations.size();
- } else {
- result = supportedOperations;
- }
- };
+ auto cb = hal::utils::CallbackValue(supportedOperationsCallback);
const auto ret = kDevice->getSupportedOperations_1_3(hidlModel, cb);
HANDLE_TRANSPORT_FAILURE(ret);
- return result;
+ return cb.take();
}
nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModel(
@@ -210,12 +194,12 @@
NN_TRY(hal::utils::flushDataFromPointerToShared(&model, &maybeModelInShared));
const auto hidlModel = NN_TRY(convert(modelInShared));
- const auto hidlPreference = NN_TRY(V1_1::utils::convert(preference));
+ const auto hidlPreference = NN_TRY(convert(preference));
const auto hidlPriority = NN_TRY(convert(priority));
const auto hidlDeadline = NN_TRY(convert(deadline));
- const auto hidlModelCache = NN_TRY(V1_2::utils::convert(modelCache));
- const auto hidlDataCache = NN_TRY(V1_2::utils::convert(dataCache));
- const auto hidlToken = token;
+ const auto hidlModelCache = NN_TRY(convert(modelCache));
+ const auto hidlDataCache = NN_TRY(convert(dataCache));
+ const auto hidlToken = V1_2::utils::CacheToken{token};
const auto cb = sp<PreparedModelCallback>::make();
const auto scoped = kDeathHandler.protectCallback(cb.get());
@@ -224,10 +208,7 @@
kDevice->prepareModel_1_3(hidlModel, hidlPreference, hidlPriority, hidlDeadline,
hidlModelCache, hidlDataCache, hidlToken, cb);
const auto status = HANDLE_TRANSPORT_FAILURE(ret);
- if (status != ErrorStatus::NONE) {
- const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
- return NN_ERROR(canonical) << "prepareModel_1_3 failed with " << toString(status);
- }
+ HANDLE_HAL_STATUS(status) << "model preparation failed with " << toString(status);
return cb->get();
}
@@ -236,9 +217,9 @@
nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token) const {
const auto hidlDeadline = NN_TRY(convert(deadline));
- const auto hidlModelCache = NN_TRY(V1_2::utils::convert(modelCache));
- const auto hidlDataCache = NN_TRY(V1_2::utils::convert(dataCache));
- const auto hidlToken = token;
+ const auto hidlModelCache = NN_TRY(convert(modelCache));
+ const auto hidlDataCache = NN_TRY(convert(dataCache));
+ const auto hidlToken = V1_2::utils::CacheToken{token};
const auto cb = sp<PreparedModelCallback>::make();
const auto scoped = kDeathHandler.protectCallback(cb.get());
@@ -246,10 +227,7 @@
const auto ret = kDevice->prepareModelFromCache_1_3(hidlDeadline, hidlModelCache, hidlDataCache,
hidlToken, cb);
const auto status = HANDLE_TRANSPORT_FAILURE(ret);
- if (status != ErrorStatus::NONE) {
- const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
- return NN_ERROR(canonical) << "prepareModelFromCache_1_3 failed with " << toString(status);
- }
+ HANDLE_HAL_STATUS(status) << "model preparation from cache failed with " << toString(status);
return cb->get();
}
@@ -263,27 +241,13 @@
const auto hidlInputRoles = NN_TRY(convert(inputRoles));
const auto hidlOutputRoles = NN_TRY(convert(outputRoles));
- nn::GeneralResult<nn::SharedBuffer> result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
- << "uninitialized";
- auto cb = [&result](ErrorStatus status, const sp<IBuffer>& buffer, uint32_t token) {
- if (status != ErrorStatus::NONE) {
- const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
- result = NN_ERROR(canonical) << "IDevice::allocate failed with " << toString(status);
- } else if (buffer == nullptr) {
- result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "Returned buffer is nullptr";
- } else if (token == 0) {
- result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "Returned token is invalid (0)";
- } else {
- result = convert(
- Buffer::create(buffer, static_cast<nn::Request::MemoryDomainToken>(token)));
- }
- };
+ auto cb = hal::utils::CallbackValue(allocationCallback);
const auto ret =
kDevice->allocate(hidlDesc, hidlPreparedModels, hidlInputRoles, hidlOutputRoles, cb);
HANDLE_TRANSPORT_FAILURE(ret);
- return result;
+ return cb.take();
}
} // namespace android::hardware::neuralnetworks::V1_3::utils
diff --git a/neuralnetworks/1.3/utils/src/PreparedModel.cpp b/neuralnetworks/1.3/utils/src/PreparedModel.cpp
index 0bae95d..7b4b7ba 100644
--- a/neuralnetworks/1.3/utils/src/PreparedModel.cpp
+++ b/neuralnetworks/1.3/utils/src/PreparedModel.cpp
@@ -45,25 +45,17 @@
namespace android::hardware::neuralnetworks::V1_3::utils {
namespace {
-nn::GeneralResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
-convertExecutionResultsHelper(const hidl_vec<V1_2::OutputShape>& outputShapes,
- const V1_2::Timing& timing) {
- return std::make_pair(NN_TRY(nn::convert(outputShapes)), NN_TRY(nn::convert(timing)));
-}
-
-nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> convertExecutionResults(
- const hidl_vec<V1_2::OutputShape>& outputShapes, const V1_2::Timing& timing) {
- return hal::utils::makeExecutionFailure(convertExecutionResultsHelper(outputShapes, timing));
-}
-
nn::GeneralResult<std::pair<nn::Timing, nn::Timing>> convertFencedExecutionCallbackResults(
- const V1_2::Timing& timingLaunched, const V1_2::Timing& timingFenced) {
+ ErrorStatus status, const V1_2::Timing& timingLaunched, const V1_2::Timing& timingFenced) {
+ HANDLE_HAL_STATUS(status) << "fenced execution callback info failed with " << toString(status);
return std::make_pair(NN_TRY(nn::convert(timingLaunched)), NN_TRY(nn::convert(timingFenced)));
}
-nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>>
-convertExecuteFencedResults(const hidl_handle& syncFence,
- const sp<IFencedExecutionCallback>& callback) {
+nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> fencedExecutionCallback(
+ ErrorStatus status, const hidl_handle& syncFence,
+ const sp<IFencedExecutionCallback>& callback) {
+ HANDLE_HAL_STATUS(status) << "fenced execution failed with " << toString(status);
+
auto resultSyncFence = nn::SyncFence::createAsSignaled();
if (syncFence.getNativeHandle() != nullptr) {
auto sharedHandle = NN_TRY(nn::convert(syncFence));
@@ -78,23 +70,12 @@
// Create callback which can be used to retrieve the execution error status and timings.
nn::ExecuteFencedInfoCallback resultCallback =
[callback]() -> nn::GeneralResult<std::pair<nn::Timing, nn::Timing>> {
- nn::GeneralResult<std::pair<nn::Timing, nn::Timing>> result =
- NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "uninitialized";
- auto cb = [&result](ErrorStatus status, const V1_2::Timing& timingLaunched,
- const V1_2::Timing& timingFenced) {
- if (status != ErrorStatus::NONE) {
- const auto canonical =
- nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
- result = NN_ERROR(canonical) << "getExecutionInfo failed with " << toString(status);
- } else {
- result = convertFencedExecutionCallbackResults(timingLaunched, timingFenced);
- }
- };
+ auto cb = hal::utils::CallbackValue(convertFencedExecutionCallbackResults);
const auto ret = callback->getExecutionInfo(cb);
HANDLE_TRANSPORT_FAILURE(ret);
- return result;
+ return cb.take();
};
return std::make_pair(std::move(resultSyncFence), std::move(resultCallback));
@@ -103,42 +84,34 @@
} // namespace
nn::GeneralResult<std::shared_ptr<const PreparedModel>> PreparedModel::create(
- sp<V1_3::IPreparedModel> preparedModel) {
+ sp<V1_3::IPreparedModel> preparedModel, bool executeSynchronously) {
if (preparedModel == nullptr) {
- return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT)
- << "V1_3::utils::PreparedModel::create must have non-null preparedModel";
+ return NN_ERROR() << "V1_3::utils::PreparedModel::create must have non-null preparedModel";
}
auto deathHandler = NN_TRY(hal::utils::DeathHandler::create(preparedModel));
- return std::make_shared<const PreparedModel>(PrivateConstructorTag{}, std::move(preparedModel),
- std::move(deathHandler));
+ return std::make_shared<const PreparedModel>(PrivateConstructorTag{}, executeSynchronously,
+ std::move(preparedModel), std::move(deathHandler));
}
-PreparedModel::PreparedModel(PrivateConstructorTag /*tag*/, sp<V1_3::IPreparedModel> preparedModel,
+PreparedModel::PreparedModel(PrivateConstructorTag /*tag*/, bool executeSynchronously,
+ sp<V1_3::IPreparedModel> preparedModel,
hal::utils::DeathHandler deathHandler)
- : kPreparedModel(std::move(preparedModel)), kDeathHandler(std::move(deathHandler)) {}
+ : kExecuteSynchronously(executeSynchronously),
+ kPreparedModel(std::move(preparedModel)),
+ kDeathHandler(std::move(deathHandler)) {}
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
PreparedModel::executeSynchronously(const Request& request, V1_2::MeasureTiming measure,
const OptionalTimePoint& deadline,
const OptionalTimeoutDuration& loopTimeoutDuration) const {
- nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> result =
- NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "uninitialized";
- const auto cb = [&result](ErrorStatus status, const hidl_vec<V1_2::OutputShape>& outputShapes,
- const V1_2::Timing& timing) {
- if (status != ErrorStatus::NONE) {
- const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
- result = NN_ERROR(canonical) << "executeSynchronously failed with " << toString(status);
- } else {
- result = convertExecutionResults(outputShapes, timing);
- }
- };
+ auto cb = hal::utils::CallbackValue(executionCallback);
const auto ret = kPreparedModel->executeSynchronously_1_3(request, measure, deadline,
loopTimeoutDuration, cb);
HANDLE_TRANSPORT_FAILURE(ret);
- return result;
+ return cb.take();
}
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
@@ -151,9 +124,8 @@
const auto ret =
kPreparedModel->execute_1_3(request, measure, deadline, loopTimeoutDuration, cb);
const auto status = HANDLE_TRANSPORT_FAILURE(ret);
- if (status != ErrorStatus::NONE) {
- const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
- return NN_ERROR(canonical) << "executeAsynchronously failed with " << toString(status);
+ if (status != ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) {
+ HANDLE_HAL_STATUS(status) << "execution failed with " << toString(status);
}
return cb->get();
@@ -169,35 +141,22 @@
hal::utils::flushDataFromPointerToShared(&request, &maybeRequestInShared)));
const auto hidlRequest = NN_TRY(hal::utils::makeExecutionFailure(convert(requestInShared)));
- const auto hidlMeasure =
- NN_TRY(hal::utils::makeExecutionFailure(V1_2::utils::convert(measure)));
+ const auto hidlMeasure = NN_TRY(hal::utils::makeExecutionFailure(convert(measure)));
const auto hidlDeadline = NN_TRY(hal::utils::makeExecutionFailure(convert(deadline)));
const auto hidlLoopTimeoutDuration =
NN_TRY(hal::utils::makeExecutionFailure(convert(loopTimeoutDuration)));
- nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> result =
- NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "uninitialized";
- const bool preferSynchronous = true;
+ auto result = kExecuteSynchronously
+ ? executeSynchronously(hidlRequest, hidlMeasure, hidlDeadline,
+ hidlLoopTimeoutDuration)
+ : executeAsynchronously(hidlRequest, hidlMeasure, hidlDeadline,
+ hidlLoopTimeoutDuration);
+ auto [outputShapes, timing] = NN_TRY(std::move(result));
- // Execute synchronously if allowed.
- if (preferSynchronous) {
- result = executeSynchronously(hidlRequest, hidlMeasure, hidlDeadline,
- hidlLoopTimeoutDuration);
- }
+ NN_TRY(hal::utils::makeExecutionFailure(
+ hal::utils::unflushDataFromSharedToPointer(request, maybeRequestInShared)));
- // Run asymchronous execution if execution has not already completed.
- if (!result.has_value()) {
- result = executeAsynchronously(hidlRequest, hidlMeasure, hidlDeadline,
- hidlLoopTimeoutDuration);
- }
-
- // Flush output buffers if suxcessful execution.
- if (result.has_value()) {
- NN_TRY(hal::utils::makeExecutionFailure(
- hal::utils::unflushDataFromSharedToPointer(request, maybeRequestInShared)));
- }
-
- return result;
+ return std::make_pair(std::move(outputShapes), timing);
}
nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>>
@@ -212,28 +171,18 @@
const auto hidlRequest = NN_TRY(convert(requestInShared));
const auto hidlWaitFor = NN_TRY(hal::utils::convertSyncFences(waitFor));
- const auto hidlMeasure = NN_TRY(V1_2::utils::convert(measure));
+ const auto hidlMeasure = NN_TRY(convert(measure));
const auto hidlDeadline = NN_TRY(convert(deadline));
const auto hidlLoopTimeoutDuration = NN_TRY(convert(loopTimeoutDuration));
const auto hidlTimeoutDurationAfterFence = NN_TRY(convert(timeoutDurationAfterFence));
- nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> result =
- NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "uninitialized";
- auto cb = [&result](ErrorStatus status, const hidl_handle& syncFence,
- const sp<IFencedExecutionCallback>& callback) {
- if (status != ErrorStatus::NONE) {
- const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
- result = NN_ERROR(canonical) << "executeFenced failed with " << toString(status);
- } else {
- result = convertExecuteFencedResults(syncFence, callback);
- }
- };
+ auto cb = hal::utils::CallbackValue(fencedExecutionCallback);
const auto ret = kPreparedModel->executeFenced(hidlRequest, hidlWaitFor, hidlMeasure,
hidlDeadline, hidlLoopTimeoutDuration,
hidlTimeoutDurationAfterFence, cb);
HANDLE_TRANSPORT_FAILURE(ret);
- auto [syncFence, callback] = NN_TRY(std::move(result));
+ auto [syncFence, callback] = NN_TRY(cb.take());
// If executeFenced required the request memory to be moved into shared memory, block here until
// the fenced execution has completed and flush the memory back.