Neuralnetworks HAL cleanup -- hardware/interfaces
Does three primary cleanups:
1) Removes unused OperandTypes, creates OEM types
2) Creates explicit ErrorStatus; functions return status
3) IDevice::initialize renamed to getCapabilities,
IDevice::getSupportedSubgraph renamed to getSupportedOperations
Additionally fixes the corresponding VTS tests.
Bug: 63905942
Test: mm, vts
Change-Id: Ib4c61b9b13963ac2367f21dc3c20e5946eb955a9
diff --git a/neuralnetworks/1.0/IDevice.hal b/neuralnetworks/1.0/IDevice.hal
index 9e19097..ec3b27f 100644
--- a/neuralnetworks/1.0/IDevice.hal
+++ b/neuralnetworks/1.0/IDevice.hal
@@ -22,11 +22,13 @@
import IPreparedModel;
interface IDevice {
- initialize() generates(Capabilities capabilities);
+ getCapabilities() generates (ErrorStatus status, Capabilities capabilities);
- getSupportedSubgraph(Model model) generates(vec<bool> supported);
+ getSupportedOperations(Model model)
+ generates (ErrorStatus status, vec<bool> supportedOperations);
- prepareModel(Model model, IEvent event) generates(IPreparedModel preparedModel);
+ prepareModel(Model model, IEvent event)
+ generates (ErrorStatus status, IPreparedModel preparedModel);
- getStatus() generates(DeviceStatus status);
+ getStatus() generates (DeviceStatus status);
};
diff --git a/neuralnetworks/1.0/IEvent.hal b/neuralnetworks/1.0/IEvent.hal
index 63afeaf..cf71bbc 100644
--- a/neuralnetworks/1.0/IEvent.hal
+++ b/neuralnetworks/1.0/IEvent.hal
@@ -44,6 +44,6 @@
* @param status Status of the execution associated with the Event.
* Should be SUCCESS or ERROR.
*/
- oneway notify(Status status);
+ oneway notify(ErrorStatus status);
};
diff --git a/neuralnetworks/1.0/IPreparedModel.hal b/neuralnetworks/1.0/IPreparedModel.hal
index 428ddc7..1b82610 100644
--- a/neuralnetworks/1.0/IPreparedModel.hal
+++ b/neuralnetworks/1.0/IPreparedModel.hal
@@ -22,5 +22,5 @@
interface IPreparedModel {
// Multiple threads can call this execute function concurrently.
- execute(Request request, IEvent event) generates(bool success);
+ execute(Request request, IEvent event) generates (ErrorStatus status);
};
diff --git a/neuralnetworks/1.0/types.hal b/neuralnetworks/1.0/types.hal
index 844c44c..870c067 100644
--- a/neuralnetworks/1.0/types.hal
+++ b/neuralnetworks/1.0/types.hal
@@ -22,18 +22,14 @@
// These values are the same as found in the NeuralNetworks.h file.
// When modifying, be sure to update HAL_NUM_OPERAND_TYPES in HalIntefaces.h.
enum OperandType : uint32_t {
- FLOAT16 = 0,
+ OEM = 0,
FLOAT32 = 1,
- INT8 = 2,
- UINT8 = 3,
- INT16 = 4,
- UINT16 = 5,
- INT32 = 6,
- UINT32 = 7,
- TENSOR_FLOAT16 = 8,
- TENSOR_FLOAT32 = 9,
- TENSOR_INT32 = 10,
- TENSOR_QUANT8_ASYMM = 11,
+ INT32 = 2, // TODO: is this needed?
+ UINT32 = 3,
+ TENSOR_OEM_BYTE = 4,
+ TENSOR_FLOAT32 = 5,
+ TENSOR_INT32 = 6,
+ TENSOR_QUANT8_ASYMM = 7,
};
// The type of operations. Unlike the operation types found in
@@ -210,7 +206,10 @@
vec<memory> pools;
};
-enum Status : uint32_t {
- SUCCESS,
- ERROR,
+enum ErrorStatus : uint32_t {
+ NONE,
+ DEVICE_UNAVAILABLE,
+ GENERAL_FAILURE,
+ OUTPUT_INSUFFICIENT_SIZE,
+ INVALID_ARGUMENT,
};
diff --git a/neuralnetworks/1.0/vts/functional/Event.cpp b/neuralnetworks/1.0/vts/functional/Event.cpp
index 67de4f5..efaacb3 100644
--- a/neuralnetworks/1.0/vts/functional/Event.cpp
+++ b/neuralnetworks/1.0/vts/functional/Event.cpp
@@ -21,10 +21,10 @@
// thread::join failed: Resource deadlock would occur
}
-Return<void> Event::notify(ReturnedStatus status) {
+Return<void> Event::notify(ErrorStatus status) {
{
std::lock_guard<std::mutex> lock(mMutex);
- mStatus = status == ReturnedStatus::SUCCESS ? Status::SUCCESS : Status::ERROR;
+ mStatus = status == ErrorStatus::NONE ? Status::SUCCESS : Status::ERROR;
if (mStatus == Status::SUCCESS && mCallback != nullptr) {
bool success = mCallback();
if (!success) {
diff --git a/neuralnetworks/1.0/vts/functional/Event.h b/neuralnetworks/1.0/vts/functional/Event.h
index 4f7f2f6..7dd4070 100644
--- a/neuralnetworks/1.0/vts/functional/Event.h
+++ b/neuralnetworks/1.0/vts/functional/Event.h
@@ -24,8 +24,6 @@
using ::android::hardware::Void;
using ::android::sp;
-using ReturnedStatus = ::android::hardware::neuralnetworks::V1_0::Status;
-
/**
* The Event class is used internally by the Neuralnetworks runtime to
* synchronize between different threads. An asynchronous task is launched
@@ -77,9 +75,9 @@
*
* IEvent::notify can be called at most once on a given event.
*
- * @param neuralnetworks::V1_0::Status SUCCESS or ERROR
+ * @param neuralnetworks::V1_0::ErrorStatus ErrorStatus::NONE on success
*/
- Return<void> notify(ReturnedStatus status) override;
+ Return<void> notify(ErrorStatus status) override;
/**
* Event::poll returns the current status of the event.
diff --git a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0TargetTest.cpp b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0TargetTest.cpp
index 9c64c04..cd8a527 100644
--- a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0TargetTest.cpp
+++ b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0TargetTest.cpp
@@ -64,24 +64,27 @@
// status test
TEST_F(NeuralnetworksHidlTest, StatusTest) {
- DeviceStatus status = device->getStatus();
- EXPECT_EQ(DeviceStatus::AVAILABLE, status);
+ Return<DeviceStatus> status = device->getStatus();
+ ASSERT_TRUE(status.isOk());
+ EXPECT_EQ(DeviceStatus::AVAILABLE, static_cast<DeviceStatus>(status));
}
// initialization
-TEST_F(NeuralnetworksHidlTest, InitializeTest) {
- Return<void> ret = device->initialize([](const Capabilities& capabilities) {
- EXPECT_NE(nullptr, capabilities.supportedOperationTuples.data());
- EXPECT_NE(0ull, capabilities.supportedOperationTuples.size());
- EXPECT_EQ(0u, static_cast<uint32_t>(capabilities.cachesCompilation) & ~0x1);
- EXPECT_LT(0.0f, capabilities.bootupTime);
- EXPECT_LT(0.0f, capabilities.float16Performance.execTime);
- EXPECT_LT(0.0f, capabilities.float16Performance.powerUsage);
- EXPECT_LT(0.0f, capabilities.float32Performance.execTime);
- EXPECT_LT(0.0f, capabilities.float32Performance.powerUsage);
- EXPECT_LT(0.0f, capabilities.quantized8Performance.execTime);
- EXPECT_LT(0.0f, capabilities.quantized8Performance.powerUsage);
- });
+TEST_F(NeuralnetworksHidlTest, GetCapabilitiesTest) {
+ Return<void> ret =
+ device->getCapabilities([](ErrorStatus status, const Capabilities& capabilities) {
+ EXPECT_EQ(ErrorStatus::NONE, status);
+ EXPECT_NE(nullptr, capabilities.supportedOperationTuples.data());
+ EXPECT_NE(0ull, capabilities.supportedOperationTuples.size());
+ EXPECT_EQ(0u, static_cast<uint32_t>(capabilities.cachesCompilation) & ~0x1);
+ EXPECT_LT(0.0f, capabilities.bootupTime);
+ EXPECT_LT(0.0f, capabilities.float16Performance.execTime);
+ EXPECT_LT(0.0f, capabilities.float16Performance.powerUsage);
+ EXPECT_LT(0.0f, capabilities.float32Performance.execTime);
+ EXPECT_LT(0.0f, capabilities.float32Performance.powerUsage);
+ EXPECT_LT(0.0f, capabilities.quantized8Performance.execTime);
+ EXPECT_LT(0.0f, capabilities.quantized8Performance.powerUsage);
+ });
EXPECT_TRUE(ret.isOk());
}
@@ -192,13 +195,14 @@
} // anonymous namespace
// supported subgraph test
-TEST_F(NeuralnetworksHidlTest, SupportedSubgraphTest) {
+TEST_F(NeuralnetworksHidlTest, SupportedOperationsTest) {
Model model = createTestModel();
- std::vector<bool> supported;
- Return<void> ret = device->getSupportedSubgraph(
- model, [&](const hidl_vec<bool>& hidl_supported) { supported = hidl_supported; });
- ASSERT_TRUE(ret.isOk());
- EXPECT_EQ(/*model.operations.size()*/ 0ull, supported.size());
+ Return<void> ret = device->getSupportedOperations(
+ model, [&](ErrorStatus status, const hidl_vec<bool>& supported) {
+ EXPECT_EQ(ErrorStatus::NONE, status);
+ EXPECT_EQ(model.operations.size(), supported.size());
+ });
+ EXPECT_TRUE(ret.isOk());
}
// execute simple graph
@@ -211,9 +215,15 @@
// prepare request
Model model = createTestModel();
+ sp<IPreparedModel> preparedModel;
sp<Event> preparationEvent = new Event();
ASSERT_NE(nullptr, preparationEvent.get());
- sp<IPreparedModel> preparedModel = device->prepareModel(model, preparationEvent);
+ Return<void> prepareRet = device->prepareModel(
+ model, preparationEvent, [&](ErrorStatus status, const sp<IPreparedModel>& prepared) {
+ EXPECT_EQ(ErrorStatus::NONE, status);
+ preparedModel = prepared;
+ });
+ ASSERT_TRUE(prepareRet.isOk());
ASSERT_NE(nullptr, preparedModel.get());
Event::Status preparationStatus = preparationEvent->wait();
EXPECT_EQ(Event::Status::SUCCESS, preparationStatus);
@@ -251,11 +261,12 @@
// execute request
sp<Event> executionEvent = new Event();
ASSERT_NE(nullptr, executionEvent.get());
- bool success = preparedModel->execute({.inputs = inputs, .outputs = outputs, .pools = pools},
- executionEvent);
- EXPECT_TRUE(success);
- Event::Status executionStatus = executionEvent->wait();
- EXPECT_EQ(Event::Status::SUCCESS, executionStatus);
+ Return<ErrorStatus> executeStatus = preparedModel->execute(
+ {.inputs = inputs, .outputs = outputs, .pools = pools}, executionEvent);
+ ASSERT_TRUE(executeStatus.isOk());
+ EXPECT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(executeStatus));
+ Event::Status eventStatus = executionEvent->wait();
+ EXPECT_EQ(Event::Status::SUCCESS, eventStatus);
// validate results { 1+5, 2+6, 3+7, 4+8 }
outputMemory->read();