Merge "Port mode configuration test - II"
diff --git a/neuralnetworks/1.0/IDevice.hal b/neuralnetworks/1.0/IDevice.hal
index b6f9433..91a9555 100644
--- a/neuralnetworks/1.0/IDevice.hal
+++ b/neuralnetworks/1.0/IDevice.hal
@@ -14,8 +14,6 @@
* limitations under the License.
*/
-/* This HAL is a work in progress */
-
package android.hardware.neuralnetworks@1.0;
import IEvent;
@@ -28,7 +26,10 @@
/**
* Gets the capabilities of a driver.
*
- * @return status ErrorStatus::NONE if successful.
+ * @return status Error status of the call, must be:
+ * - NONE if successful
+ * - DEVICE_UNAVAILABLE if driver is offline or busy
+ * - GENERAL_FAILURE if there is an unspecified error
* @return capabilities Capabilities of the driver.
*/
getCapabilities() generates (ErrorStatus status, Capabilities capabilities);
@@ -43,7 +44,11 @@
*
* @param model A model whose operations--and their corresponding
* operands--are to be verified by the driver.
- * @return status ErrorStatus::NONE if successful.
+ * @return status Error status of the call, must be:
+ * - NONE if successful
+ * - DEVICE_UNAVAILABLE if driver is offline or busy
+ * - GENERAL_FAILURE if there is an unspecified error
+ * - INVALID_ARGUMENT when provided model is invalid
* @return supportedOperations A list of supported operations, where true
* indicates the operation is supported and
* false indicates the operation is not
@@ -52,7 +57,7 @@
* it is describing.
*/
getSupportedOperations(Model model)
- generates (ErrorStatus status, vec<bool> supportedOperations);
+ generates (ErrorStatus status, vec<bool> supportedOperations);
/**
* Prepares a model for execution.
@@ -60,7 +65,7 @@
* prepareModel is used to make any necessary transformations or alternative
* representations to a model for execution, possible including
* transformations on the constant data, optimization on the model's graph,
- * or compilation into the device's native binary.
+ * or compilation into the device's native binary format.
*
* The only information that may be unknown to the model at this stage is
* the shape of the tensors, which may only be known at execution time.
@@ -68,16 +73,25 @@
* @param model The model to be prepared for execution.
* @param event A synchronization callback that must be signaled once the
* execution has finished.
- * @return status ErrorStatus::NONE if successful.
+ * @return status Error status of the call, must be:
+ * - NONE if preparation task is successfully launched
+ * - DEVICE_UNAVAILABLE if driver is offline or busy
+ * - GENERAL_FAILURE if there is an unspecified error
+ * - INVALID_ARGUMENT when one of the input arguments is
+ * invalid
* @return preparedModel A handle to the resultant prepared model.
*/
prepareModel(Model model, IEvent event)
- generates (ErrorStatus status, IPreparedModel preparedModel);
+ generates (ErrorStatus status, IPreparedModel preparedModel);
/**
* Returns the current status of a driver.
*
- * @return status Status of the driver.
+ * @return status Status of the driver, one of:
+ * - DeviceStatus::AVAILABLE
+ * - DeviceStatus::BUSY
+ * - DeviceStatus::OFFLINE
+ * - DeviceStatus::UNKNOWN
*/
getStatus() generates (DeviceStatus status);
};
diff --git a/neuralnetworks/1.0/IEvent.hal b/neuralnetworks/1.0/IEvent.hal
index 2ebda58..2fe454c 100644
--- a/neuralnetworks/1.0/IEvent.hal
+++ b/neuralnetworks/1.0/IEvent.hal
@@ -14,8 +14,6 @@
* limitations under the License.
*/
-/* This HAL is a work in progress */
-
package android.hardware.neuralnetworks@1.0;
/**
@@ -37,7 +35,11 @@
* the work) to mark the event as completed so that any threads requiring
* the corresponding output can continue executing.
*
- * @param status ErrorStatus::NONE if successful.
+ * @param status Error status returned from the asynchronous task, must be:
+ * - NONE if asynchronous task was successful
+ * - DEVICE_UNAVAILABLE if driver is offline or busy
+ * - GENERAL_FAILURE if the asynchronous task resulted in an
+ * unspecified error
*/
oneway notify(ErrorStatus status);
};
diff --git a/neuralnetworks/1.0/IPreparedModel.hal b/neuralnetworks/1.0/IPreparedModel.hal
index a7c3342..5df883e 100644
--- a/neuralnetworks/1.0/IPreparedModel.hal
+++ b/neuralnetworks/1.0/IPreparedModel.hal
@@ -36,11 +36,16 @@
*
* @param request The input and output information on which the prepared
* model is to be executed.
- * prepared model.
* @param event A callback used for synchronization that must be signaled
* once the execution has finished.
- * @return status ErrorStatus::NONE if the asynchronous task was
- * successfully launched.
+ * @return status Error status of the call, must be:
+ * - NONE if task is successfully launched
+ * - DEVICE_UNAVAILABLE if driver is offline or busy
+ * - GENERAL_FAILURE if there is an unspecified error
+ * - OUTPUT_INSUFFICIENT_SIZE if provided output buffer is
+ * not large enough to store the resultant values
+ * - INVALID_ARGUMENT when one of the input arguments is
+ * invalid
*/
execute(Request request, IEvent event) generates (ErrorStatus status);
};
diff --git a/neuralnetworks/1.0/types.hal b/neuralnetworks/1.0/types.hal
index 39e3d34..537331b 100644
--- a/neuralnetworks/1.0/types.hal
+++ b/neuralnetworks/1.0/types.hal
@@ -218,7 +218,7 @@
*
* Inputs:
* 0: A 4-D tensor, of shape [batches, height, width, depth_in], specifying the input.
- * 1: A 4-D tensor, of shape [depth_out, filter_height, filter_width, depth_in],
+ * 1: A 4-D tensor, of shape [1, filter_height, filter_width, depth_out],
* specifying the filter.
* 2: A 1-D tensor, of shape [depth_out], specifying the bias.
* For input tensor of {@link OperandType::TENSOR_FLOAT32} type, the bias should
@@ -1105,14 +1105,16 @@
/**
* Quantized scale of the operand.
*
- * Only applicable if the operand is of type TENSOR_QUANT8_ASYMM.
+ * Only applicable if the operand is of type TENSOR_QUANT8_ASYMM or
+ * TENSOR_INT32.
*/
float scale;
/**
* Quantized zero-point offset of the operand.
*
- * Only applicable if the operand is of type TENSOR_QUANT8_ASYMM.
+ * Only applicable if the operand is of type TENSOR_QUANT8_ASYMM or
+ * TENSOR_INT32.
*/
int32_t zeroPoint;
@@ -1195,12 +1197,18 @@
/**
* A byte buffer containing operand data that were copied into the model.
+ *
+ * An operand's value must be located here if and only if Operand::lifetime
+ * equals OperandLifeTime::CONSTANT_COPY.
*/
vec<uint8_t> operandValues;
/**
* A collection of shared memory pools containing operand data that were
* registered by the model.
+ *
+ * An operand's value must be located here if and only if Operand::lifetime
+ * equals OperandLifeTime::CONSTANT_REFERENCE.
*/
vec<memory> pools;
};
diff --git a/neuralnetworks/1.0/vts/functional/Android.bp b/neuralnetworks/1.0/vts/functional/Android.bp
index 2318430..89e1021 100644
--- a/neuralnetworks/1.0/vts/functional/Android.bp
+++ b/neuralnetworks/1.0/vts/functional/Android.bp
@@ -19,6 +19,7 @@
srcs: [
"Event.cpp",
"GeneratedTestHarness.cpp",
+ "Models.cpp",
"VtsHalNeuralnetworksV1_0TargetTest.cpp",
],
defaults: ["VtsHalTargetTestDefaults"],
diff --git a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp
index db90ac2..4b8daec 100644
--- a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp
+++ b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp
@@ -42,6 +42,24 @@
using ::generated_tests::Float32Operands;
using ::generated_tests::Int32Operands;
using ::generated_tests::Quant8Operands;
+using ::generated_tests::compare;
+
+template <typename ty>
+void copy_back_(MixedTyped* dst, const std::vector<RequestArgument>& ra, char* src) {
+ MixedTyped& test = *dst;
+ for_each(test, [&ra, src](int index, std::vector<ty>& m) {
+ ASSERT_EQ(m.size(), ra[index].location.length / sizeof(ty));
+ char* begin = src + ra[index].location.offset;
+ memcpy(m.data(), begin, ra[index].location.length);
+ });
+}
+
+void copy_back(MixedTyped* dst, const std::vector<RequestArgument>& ra, char* src) {
+ copy_back_<float>(dst, ra, src);
+ copy_back_<int32_t>(dst, ra, src);
+ copy_back_<uint8_t>(dst, ra, src);
+}
+
// Top level driver for models and examples generated by test_generator.py
// Test driver for those generated from ml/nn/runtime/test/spec
void Execute(const sp<IDevice>& device, std::function<Model(void)> create_model,
@@ -97,9 +115,7 @@
MixedTyped test; // holding test results
// Go through all outputs, initialize RequestArgument descriptors
- resize_accordingly<float>(golden, test);
- resize_accordingly<int32_t>(golden, test);
- resize_accordingly<uint8_t>(golden, test);
+ resize_accordingly(golden, test);
for_all(golden, [&outputs_info, &outputSize](int index, auto, auto s) {
if (outputs_info.size() <= static_cast<size_t>(index)) outputs_info.resize(index + 1);
RequestArgument arg = {
@@ -156,40 +172,16 @@
// validate results
outputMemory->read();
-#define COPY_BACK(ty) \
- for_each<ty>(test, [&outputs_info, outputPtr](int index, std::vector<ty>& m) { \
- RequestArgument& i = outputs_info[index]; \
- ASSERT_EQ(m.size(), i.location.length / sizeof(ty)); \
- char* begin = outputPtr + i.location.offset; \
- memcpy(m.data(), begin, i.location.length); \
- });
- COPY_BACK(float);
- COPY_BACK(int32_t);
- COPY_BACK(uint8_t);
-#undef COPY_BACK
+ copy_back(&test, outputs_info, outputPtr);
outputMemory->commit();
// Filter out don't cares
MixedTyped filtered_golden;
MixedTyped filtered_test;
- filter<float>(golden, &filtered_golden, is_ignored);
- filter<float>(test, &filtered_test, is_ignored);
- filter<int32_t>(golden, &filtered_golden, is_ignored);
- filter<int32_t>(test, &filtered_test, is_ignored);
- filter<uint8_t>(golden, &filtered_golden, is_ignored);
- filter<uint8_t>(test, &filtered_test, is_ignored);
+ filter(golden, &filtered_golden, is_ignored);
+ filter(test, &filtered_test, is_ignored);
// We want "close-enough" results for float
- for_each<float>(filtered_golden, [&filtered_test](int index, auto& golden_float) {
- auto& test_float_operands = std::get<Float32Operands>(filtered_test);
- auto& test_float = test_float_operands[index];
- for (unsigned int i = 0; i < golden_float.size(); i++) {
- SCOPED_TRACE(i);
- EXPECT_NEAR(golden_float[i], test_float[i], 1.e-5);
- }
- });
- EXPECT_EQ(std::get<Int32Operands>(filtered_golden), std::get<Int32Operands>(filtered_test));
- EXPECT_EQ(std::get<Quant8Operands>(filtered_golden),
- std::get<Quant8Operands>(filtered_test));
+ compare(filtered_golden, filtered_test);
}
}
diff --git a/neuralnetworks/1.0/vts/functional/Models.cpp b/neuralnetworks/1.0/vts/functional/Models.cpp
new file mode 100644
index 0000000..9802f62
--- /dev/null
+++ b/neuralnetworks/1.0/vts/functional/Models.cpp
@@ -0,0 +1,210 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "neuralnetworks_hidl_hal_test"
+
+#include "Models.h"
+#include <android/hidl/memory/1.0/IMemory.h>
+#include <hidlmemory/mapping.h>
+#include <vector>
+
+namespace android {
+namespace hardware {
+namespace neuralnetworks {
+namespace V1_0 {
+namespace vts {
+namespace functional {
+
+// create a valid model
+Model createValidTestModel() {
+ const std::vector<float> operand2Data = {5.0f, 6.0f, 7.0f, 8.0f};
+ const uint32_t size = operand2Data.size() * sizeof(float);
+
+ const uint32_t operand1 = 0;
+ const uint32_t operand2 = 1;
+ const uint32_t operand3 = 2;
+ const uint32_t operand4 = 3;
+
+ const std::vector<Operand> operands = {
+ {
+ .type = OperandType::TENSOR_FLOAT32,
+ .dimensions = {1, 2, 2, 1},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ .type = OperandType::TENSOR_FLOAT32,
+ .dimensions = {1, 2, 2, 1},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::CONSTANT_COPY,
+ .location = {.poolIndex = 0, .offset = 0, .length = size},
+ },
+ {
+ .type = OperandType::INT32,
+ .dimensions = {},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::CONSTANT_COPY,
+ .location = {.poolIndex = 0, .offset = size, .length = sizeof(int32_t)},
+ },
+ {
+ .type = OperandType::TENSOR_FLOAT32,
+ .dimensions = {1, 2, 2, 1},
+ .numberOfConsumers = 0,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_OUTPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ };
+
+ const std::vector<Operation> operations = {{
+ .opTuple = {OperationType::ADD, OperandType::TENSOR_FLOAT32},
+ .inputs = {operand1, operand2, operand3},
+ .outputs = {operand4},
+ }};
+
+ const std::vector<uint32_t> inputIndexes = {operand1};
+ const std::vector<uint32_t> outputIndexes = {operand4};
+ std::vector<uint8_t> operandValues(
+ reinterpret_cast<const uint8_t*>(operand2Data.data()),
+ reinterpret_cast<const uint8_t*>(operand2Data.data()) + size);
+ int32_t activation[1] = {static_cast<int32_t>(FusedActivationFunc::NONE)};
+ operandValues.insert(operandValues.end(), reinterpret_cast<const uint8_t*>(&activation[0]),
+ reinterpret_cast<const uint8_t*>(&activation[1]));
+
+ const std::vector<hidl_memory> pools = {};
+
+ return {
+ .operands = operands,
+ .operations = operations,
+ .inputIndexes = inputIndexes,
+ .outputIndexes = outputIndexes,
+ .operandValues = operandValues,
+ .pools = pools,
+ };
+}
+
+// create first invalid model
+Model createInvalidTestModel1() {
+ Model model = createValidTestModel();
+ model.operations[0].opTuple = {static_cast<OperationType>(0xDEADBEEF) /* INVALID */,
+ OperandType::TENSOR_FLOAT32};
+ return model;
+}
+
+// create second invalid model
+Model createInvalidTestModel2() {
+ Model model = createValidTestModel();
+ const uint32_t operand1 = 0;
+ const uint32_t operand5 = 4; // INVALID OPERAND
+ model.inputIndexes = std::vector<uint32_t>({operand1, operand5 /* INVALID OPERAND */});
+ return model;
+}
+
+// allocator helper
+hidl_memory allocateSharedMemory(int64_t size, const std::string& type = "ashmem") {
+ hidl_memory memory;
+
+ sp<IAllocator> allocator = IAllocator::getService(type);
+ if (!allocator.get()) {
+ return {};
+ }
+
+ Return<void> ret = allocator->allocate(size, [&](bool success, const hidl_memory& mem) {
+ ASSERT_TRUE(success);
+ memory = mem;
+ });
+ if (!ret.isOk()) {
+ return {};
+ }
+
+ return memory;
+}
+
+// create a valid request
+Request createValidTestRequest() {
+ std::vector<float> inputData = {1.0f, 2.0f, 3.0f, 4.0f};
+ std::vector<float> outputData = {-1.0f, -1.0f, -1.0f, -1.0f};
+ const uint32_t INPUT = 0;
+ const uint32_t OUTPUT = 1;
+
+ // prepare inputs
+ uint32_t inputSize = static_cast<uint32_t>(inputData.size() * sizeof(float));
+ uint32_t outputSize = static_cast<uint32_t>(outputData.size() * sizeof(float));
+ std::vector<RequestArgument> inputs = {{
+ .location = {.poolIndex = INPUT, .offset = 0, .length = inputSize}, .dimensions = {},
+ }};
+ std::vector<RequestArgument> outputs = {{
+ .location = {.poolIndex = OUTPUT, .offset = 0, .length = outputSize}, .dimensions = {},
+ }};
+ std::vector<hidl_memory> pools = {allocateSharedMemory(inputSize),
+ allocateSharedMemory(outputSize)};
+ if (pools[INPUT].size() == 0 || pools[OUTPUT].size() == 0) {
+ return {};
+ }
+
+ // load data
+ sp<IMemory> inputMemory = mapMemory(pools[INPUT]);
+ sp<IMemory> outputMemory = mapMemory(pools[OUTPUT]);
+ if (inputMemory.get() == nullptr || outputMemory.get() == nullptr) {
+ return {};
+ }
+ float* inputPtr = reinterpret_cast<float*>(static_cast<void*>(inputMemory->getPointer()));
+ float* outputPtr = reinterpret_cast<float*>(static_cast<void*>(outputMemory->getPointer()));
+ if (inputPtr == nullptr || outputPtr == nullptr) {
+ return {};
+ }
+ inputMemory->update();
+ outputMemory->update();
+ std::copy(inputData.begin(), inputData.end(), inputPtr);
+ std::copy(outputData.begin(), outputData.end(), outputPtr);
+ inputMemory->commit();
+ outputMemory->commit();
+
+ return {.inputs = inputs, .outputs = outputs, .pools = pools};
+}
+
+// create first invalid request
+Request createInvalidTestRequest1() {
+ Request request = createValidTestRequest();
+ const uint32_t INVALID = 2;
+ std::vector<float> inputData = {1.0f, 2.0f, 3.0f, 4.0f};
+ uint32_t inputSize = static_cast<uint32_t>(inputData.size() * sizeof(float));
+ request.inputs[0].location = {
+ .poolIndex = INVALID /* INVALID */, .offset = 0, .length = inputSize};
+ return request;
+}
+
+// create second invalid request
+Request createInvalidTestRequest2() {
+ Request request = createValidTestRequest();
+ request.inputs[0].dimensions = std::vector<uint32_t>({1, 2, 3, 4, 5, 6, 7, 8} /* INVALID */);
+ return request;
+}
+
+} // namespace functional
+} // namespace vts
+} // namespace V1_0
+} // namespace neuralnetworks
+} // namespace hardware
+} // namespace android
diff --git a/neuralnetworks/1.0/vts/functional/Models.h b/neuralnetworks/1.0/vts/functional/Models.h
new file mode 100644
index 0000000..e0d57d5
--- /dev/null
+++ b/neuralnetworks/1.0/vts/functional/Models.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "neuralnetworks_hidl_hal_test"
+
+#include "VtsHalNeuralnetworksV1_0TargetTest.h"
+
+namespace android {
+namespace hardware {
+namespace neuralnetworks {
+namespace V1_0 {
+namespace vts {
+namespace functional {
+
+// create the model
+Model createValidTestModel();
+Model createInvalidTestModel1();
+Model createInvalidTestModel2();
+
+// create the request
+Request createValidTestRequest();
+Request createInvalidTestRequest1();
+Request createInvalidTestRequest2();
+
+} // namespace functional
+} // namespace vts
+} // namespace V1_0
+} // namespace neuralnetworks
+} // namespace hardware
+} // namespace android
diff --git a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0TargetTest.cpp b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0TargetTest.cpp
index 453e3e5..59d66ba 100644
--- a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0TargetTest.cpp
+++ b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0TargetTest.cpp
@@ -18,6 +18,7 @@
#include "VtsHalNeuralnetworksV1_0TargetTest.h"
#include "Event.h"
+#include "Models.h"
#include "TestHarness.h"
#include <android-base/logging.h>
@@ -65,6 +66,32 @@
void NeuralnetworksHidlTest::TearDown() {}
+sp<IPreparedModel> NeuralnetworksHidlTest::doPrepareModelShortcut(const Model& model) {
+ sp<IPreparedModel> preparedModel;
+ ErrorStatus prepareStatus;
+ sp<Event> preparationEvent = new Event();
+ if (preparationEvent.get() == nullptr) {
+ return nullptr;
+ }
+
+ Return<void> prepareRet = device->prepareModel(
+ model, preparationEvent, [&](ErrorStatus status, const sp<IPreparedModel>& prepared) {
+ prepareStatus = status;
+ preparedModel = prepared;
+ });
+
+ if (!prepareRet.isOk() || prepareStatus != ErrorStatus::NONE ||
+ preparedModel.get() == nullptr) {
+ return nullptr;
+ }
+ Event::Status eventStatus = preparationEvent->wait();
+ if (eventStatus != Event::Status::SUCCESS) {
+ return nullptr;
+ }
+
+ return preparedModel;
+}
+
// create device test
TEST_F(NeuralnetworksHidlTest, CreateDevice) {}
@@ -91,107 +118,9 @@
EXPECT_TRUE(ret.isOk());
}
-namespace {
-// create the model
-Model createTestModel() {
- const std::vector<float> operand2Data = {5.0f, 6.0f, 7.0f, 8.0f};
- const uint32_t size = operand2Data.size() * sizeof(float);
-
- const uint32_t operand1 = 0;
- const uint32_t operand2 = 1;
- const uint32_t operand3 = 2;
- const uint32_t operand4 = 3;
-
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 1},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 1},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = size},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = size, .length = sizeof(int32_t)},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 1},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- };
-
- const std::vector<Operation> operations = {{
- .opTuple = {OperationType::ADD, OperandType::TENSOR_FLOAT32},
- .inputs = {operand1, operand2, operand3},
- .outputs = {operand4},
- }};
-
- const std::vector<uint32_t> inputIndexes = {operand1};
- const std::vector<uint32_t> outputIndexes = {operand4};
- std::vector<uint8_t> operandValues(
- reinterpret_cast<const uint8_t*>(operand2Data.data()),
- reinterpret_cast<const uint8_t*>(operand2Data.data()) + size);
- int32_t activation[1] = {static_cast<int32_t>(FusedActivationFunc::NONE)};
- operandValues.insert(operandValues.end(), reinterpret_cast<const uint8_t*>(&activation[0]),
- reinterpret_cast<const uint8_t*>(&activation[1]));
-
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- };
-}
-} // anonymous namespace
-
-// allocator helper
-hidl_memory allocateSharedMemory(int64_t size, const std::string& type = "ashmem") {
- hidl_memory memory;
-
- sp<IAllocator> allocator = IAllocator::getService(type);
- if (!allocator.get()) {
- return {};
- }
-
- Return<void> ret = allocator->allocate(size, [&](bool success, const hidl_memory& mem) {
- ASSERT_TRUE(success);
- memory = mem;
- });
- if (!ret.isOk()) {
- return {};
- }
-
- return memory;
-}
-
-// supported subgraph test
-TEST_F(NeuralnetworksHidlTest, SupportedOperationsTest) {
- Model model = createTestModel();
+// supported operations positive test
+TEST_F(NeuralnetworksHidlTest, SupportedOperationsPositiveTest) {
+ Model model = createValidTestModel();
Return<void> ret = device->getSupportedOperations(
model, [&](ErrorStatus status, const hidl_vec<bool>& supported) {
EXPECT_EQ(ErrorStatus::NONE, status);
@@ -200,76 +129,126 @@
EXPECT_TRUE(ret.isOk());
}
-// execute simple graph
-TEST_F(NeuralnetworksHidlTest, SimpleExecuteGraphTest) {
- std::vector<float> inputData = {1.0f, 2.0f, 3.0f, 4.0f};
- std::vector<float> outputData = {-1.0f, -1.0f, -1.0f, -1.0f};
- std::vector<float> expectedData = {6.0f, 8.0f, 10.0f, 12.0f};
- const uint32_t INPUT = 0;
- const uint32_t OUTPUT = 1;
+// supported operations negative test 1
+TEST_F(NeuralnetworksHidlTest, SupportedOperationsNegativeTest1) {
+ Model model = createInvalidTestModel1();
+ Return<void> ret = device->getSupportedOperations(
+ model, [&](ErrorStatus status, const hidl_vec<bool>& supported) {
+ EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, status);
+ (void)supported;
+ });
+ EXPECT_TRUE(ret.isOk());
+}
- // prepare request
- Model model = createTestModel();
- sp<IPreparedModel> preparedModel;
+// supported operations negative test 2
+TEST_F(NeuralnetworksHidlTest, SupportedOperationsNegativeTest2) {
+ Model model = createInvalidTestModel2();
+ Return<void> ret = device->getSupportedOperations(
+ model, [&](ErrorStatus status, const hidl_vec<bool>& supported) {
+ EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, status);
+ (void)supported;
+ });
+ EXPECT_TRUE(ret.isOk());
+}
+
+// prepare simple model positive test
+TEST_F(NeuralnetworksHidlTest, SimplePrepareModelPositiveTest) {
+ Model model = createValidTestModel();
sp<Event> preparationEvent = new Event();
ASSERT_NE(nullptr, preparationEvent.get());
Return<void> prepareRet = device->prepareModel(
model, preparationEvent, [&](ErrorStatus status, const sp<IPreparedModel>& prepared) {
EXPECT_EQ(ErrorStatus::NONE, status);
- preparedModel = prepared;
+ (void)prepared;
});
ASSERT_TRUE(prepareRet.isOk());
+}
+
+// prepare simple model negative test 1
+TEST_F(NeuralnetworksHidlTest, SimplePrepareModelNegativeTest1) {
+ Model model = createInvalidTestModel1();
+ sp<Event> preparationEvent = new Event();
+ ASSERT_NE(nullptr, preparationEvent.get());
+ Return<void> prepareRet = device->prepareModel(
+ model, preparationEvent, [&](ErrorStatus status, const sp<IPreparedModel>& prepared) {
+ EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, status);
+ (void)prepared;
+ });
+ ASSERT_TRUE(prepareRet.isOk());
+}
+
+// prepare simple model negative test 2
+TEST_F(NeuralnetworksHidlTest, SimplePrepareModelNegativeTest2) {
+ Model model = createInvalidTestModel2();
+ sp<Event> preparationEvent = new Event();
+ ASSERT_NE(nullptr, preparationEvent.get());
+ Return<void> prepareRet = device->prepareModel(
+ model, preparationEvent, [&](ErrorStatus status, const sp<IPreparedModel>& prepared) {
+ EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, status);
+ (void)prepared;
+ });
+ ASSERT_TRUE(prepareRet.isOk());
+}
+
+// execute simple graph positive test
+TEST_F(NeuralnetworksHidlTest, SimpleExecuteGraphPositiveTest) {
+ Model model = createValidTestModel();
+ sp<IPreparedModel> preparedModel = doPrepareModelShortcut(model);
ASSERT_NE(nullptr, preparedModel.get());
- Event::Status preparationStatus = preparationEvent->wait();
- EXPECT_EQ(Event::Status::SUCCESS, preparationStatus);
+ Request request = createValidTestRequest();
- // prepare inputs
- uint32_t inputSize = static_cast<uint32_t>(inputData.size() * sizeof(float));
- uint32_t outputSize = static_cast<uint32_t>(outputData.size() * sizeof(float));
- std::vector<RequestArgument> inputs = {{
- .location = {.poolIndex = INPUT, .offset = 0, .length = inputSize}, .dimensions = {},
- }};
- std::vector<RequestArgument> outputs = {{
- .location = {.poolIndex = OUTPUT, .offset = 0, .length = outputSize}, .dimensions = {},
- }};
- std::vector<hidl_memory> pools = {allocateSharedMemory(inputSize),
- allocateSharedMemory(outputSize)};
- ASSERT_NE(0ull, pools[INPUT].size());
- ASSERT_NE(0ull, pools[OUTPUT].size());
-
- // load data
- sp<IMemory> inputMemory = mapMemory(pools[INPUT]);
- sp<IMemory> outputMemory = mapMemory(pools[OUTPUT]);
- ASSERT_NE(nullptr, inputMemory.get());
- ASSERT_NE(nullptr, outputMemory.get());
- float* inputPtr = reinterpret_cast<float*>(static_cast<void*>(inputMemory->getPointer()));
- float* outputPtr = reinterpret_cast<float*>(static_cast<void*>(outputMemory->getPointer()));
- ASSERT_NE(nullptr, inputPtr);
- ASSERT_NE(nullptr, outputPtr);
- inputMemory->update();
- outputMemory->update();
- std::copy(inputData.begin(), inputData.end(), inputPtr);
- std::copy(outputData.begin(), outputData.end(), outputPtr);
- inputMemory->commit();
- outputMemory->commit();
-
- // execute request
sp<Event> executionEvent = new Event();
ASSERT_NE(nullptr, executionEvent.get());
- Return<ErrorStatus> executeStatus = preparedModel->execute(
- {.inputs = inputs, .outputs = outputs, .pools = pools}, executionEvent);
+ Return<ErrorStatus> executeStatus = preparedModel->execute(request, executionEvent);
ASSERT_TRUE(executeStatus.isOk());
EXPECT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(executeStatus));
Event::Status eventStatus = executionEvent->wait();
EXPECT_EQ(Event::Status::SUCCESS, eventStatus);
- // validate results { 1+5, 2+6, 3+7, 4+8 }
+ std::vector<float> outputData = {-1.0f, -1.0f, -1.0f, -1.0f};
+ std::vector<float> expectedData = {6.0f, 8.0f, 10.0f, 12.0f};
+ const uint32_t OUTPUT = 1;
+
+ sp<IMemory> outputMemory = mapMemory(request.pools[OUTPUT]);
+ ASSERT_NE(nullptr, outputMemory.get());
+ float* outputPtr = reinterpret_cast<float*>(static_cast<void*>(outputMemory->getPointer()));
+ ASSERT_NE(nullptr, outputPtr);
outputMemory->read();
std::copy(outputPtr, outputPtr + outputData.size(), outputData.begin());
outputMemory->commit();
EXPECT_EQ(expectedData, outputData);
}
+// execute simple graph negative test 1
+TEST_F(NeuralnetworksHidlTest, SimpleExecuteGraphNegativeTest1) {
+ Model model = createValidTestModel();
+ sp<IPreparedModel> preparedModel = doPrepareModelShortcut(model);
+ ASSERT_NE(nullptr, preparedModel.get());
+ Request request = createInvalidTestRequest1();
+
+ sp<Event> executionEvent = new Event();
+ ASSERT_NE(nullptr, executionEvent.get());
+ Return<ErrorStatus> executeStatus = preparedModel->execute(request, executionEvent);
+ ASSERT_TRUE(executeStatus.isOk());
+ EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(executeStatus));
+ executionEvent->wait();
+}
+
+// execute simple graph negative test 2
+TEST_F(NeuralnetworksHidlTest, SimpleExecuteGraphNegativeTest2) {
+ Model model = createValidTestModel();
+ sp<IPreparedModel> preparedModel = doPrepareModelShortcut(model);
+ ASSERT_NE(nullptr, preparedModel.get());
+ Request request = createInvalidTestRequest2();
+
+ sp<Event> executionEvent = new Event();
+ ASSERT_NE(nullptr, executionEvent.get());
+ Return<ErrorStatus> executeStatus = preparedModel->execute(request, executionEvent);
+ ASSERT_TRUE(executeStatus.isOk());
+ EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(executeStatus));
+ executionEvent->wait();
+}
+
// Mixed-typed examples
typedef MixedTypedExampleType MixedTypedExample;
diff --git a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0TargetTest.h b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0TargetTest.h
index 9c56e6a..1b3b334 100644
--- a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0TargetTest.h
+++ b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0TargetTest.h
@@ -72,6 +72,8 @@
void SetUp() override;
void TearDown() override;
+ sp<IPreparedModel> doPrepareModelShortcut(const Model& model);
+
sp<IDevice> device;
};