Refactor NNAPI VTS to remove unreasonable dependence between versions
To make it easier to create the next version of NNAPI, this change
removes the following nonsensical dependence:
- NNAPI 1.0 VTS depends on NNAPI 1.1 and 1.2
- NNAPI 1.1 VTS depends on NNAPI 1.2
In particular, I made the following changes:
- split GeneratedTestHarness.cpp into three separate implementations,
- created a restricted version of Callbacks.h for 1.0 and 1.1,
- removed the dependency on frameworks/ml/nn/HalInterfaces.h,
- refactored Android.bp files for more autonomy between 1.0, 1.1, and 1.2,
- consolidated some common code into Utils.h,
- created structure for sharing code between VTS versions (VtsHalNeuralNetworksV1_0_utils).
Bug: 74827824
Bug: 124462414
Test: VtsHalNeuralnetworksV1_0TargetTest
Test: VtsHalNeuralnetworksV1_1TargetTest
Test: VtsHalNeuralnetworksV1_1CompatV1_0TargetTest
Test: VtsHalNeuralnetworksV1_2TargetTest
Test: VtsHalNeuralnetworksV1_2CompatV1_0TargetTest
Test: VtsHalNeuralnetworksV1_2CompatV1_1TargetTest
Change-Id: I4243d0b5e574255cef1070850f4d0a284f65f54e
diff --git a/neuralnetworks/1.0/vts/functional/Android.bp b/neuralnetworks/1.0/vts/functional/Android.bp
index 0fb18f1..0d70816 100644
--- a/neuralnetworks/1.0/vts/functional/Android.bp
+++ b/neuralnetworks/1.0/vts/functional/Android.bp
@@ -15,21 +15,19 @@
//
cc_library_static {
- name: "VtsHalNeuralnetworksTest_utils",
+ name: "VtsHalNeuralNetworksV1_0_utils",
srcs: [
"Callbacks.cpp",
- "GeneratedTestHarness.cpp",
+ "Utils.cpp",
],
defaults: ["VtsHalTargetTestDefaults"],
- export_include_dirs: ["."],
+ export_include_dirs: ["include"],
shared_libs: [
"libfmq",
"libnativewindow",
],
static_libs: [
"android.hardware.neuralnetworks@1.0",
- "android.hardware.neuralnetworks@1.1",
- "android.hardware.neuralnetworks@1.2",
"android.hidl.allocator@1.0",
"android.hidl.memory@1.0",
"libgmock",
@@ -44,12 +42,13 @@
}
cc_defaults {
- name: "VtsHalNeuralNetworksTargetTestDefaults",
+ name: "VtsHalNeuralNetworksV1_0TargetTestDefaults",
defaults: ["VtsHalTargetTestDefaults"],
srcs: [
"ValidateModel.cpp",
"ValidateRequest.cpp",
"VtsHalNeuralnetworks.cpp",
+ "GeneratedTestHarness.cpp",
],
shared_libs: [
"libfmq",
@@ -57,14 +56,12 @@
],
static_libs: [
"android.hardware.neuralnetworks@1.0",
- "android.hardware.neuralnetworks@1.1",
- "android.hardware.neuralnetworks@1.2",
"android.hidl.allocator@1.0",
"android.hidl.memory@1.0",
"libgmock",
"libhidlmemory",
"libneuralnetworks_utils",
- "VtsHalNeuralnetworksTest_utils",
+ "VtsHalNeuralNetworksV1_0_utils",
],
header_libs: [
"libneuralnetworks_headers",
@@ -76,19 +73,19 @@
cc_test {
name: "VtsHalNeuralnetworksV1_0TargetTest",
- defaults: ["VtsHalNeuralNetworksTargetTestDefaults"],
+ defaults: ["VtsHalNeuralNetworksV1_0TargetTestDefaults"],
srcs: [
"BasicTests.cpp",
- "GeneratedTests.cpp",
+ "GeneratedTestsV1_0.cpp",
],
}
cc_test {
name: "PresubmitHalNeuralnetworksV1_0TargetTest",
- defaults: ["VtsHalNeuralNetworksTargetTestDefaults"],
+ defaults: ["VtsHalNeuralNetworksV1_0TargetTestDefaults"],
srcs: [
"BasicTests.cpp",
- "GeneratedTests.cpp",
+ "GeneratedTestsV1_0.cpp",
],
cflags: [
"-DPRESUBMIT_NOT_VTS",
diff --git a/neuralnetworks/1.0/vts/functional/Callbacks.cpp b/neuralnetworks/1.0/vts/functional/Callbacks.cpp
index c30702c..2b5723d 100644
--- a/neuralnetworks/1.0/vts/functional/Callbacks.cpp
+++ b/neuralnetworks/1.0/vts/functional/Callbacks.cpp
@@ -14,13 +14,13 @@
* limitations under the License.
*/
-#include "Callbacks.h"
+#include "1.0/Callbacks.h"
#include <android-base/logging.h>
namespace android {
namespace hardware {
namespace neuralnetworks {
-namespace V1_2 {
+namespace V1_0 {
namespace implementation {
CallbackBase::CallbackBase() : mNotified(false) {}
@@ -111,14 +111,6 @@
return Void();
}
-Return<void> PreparedModelCallback::notify_1_2(ErrorStatus errorStatus,
- const sp<V1_2::IPreparedModel>& preparedModel) {
- mErrorStatus = errorStatus;
- mPreparedModel = preparedModel;
- CallbackBase::notify();
- return Void();
-}
-
ErrorStatus PreparedModelCallback::getStatus() {
wait();
return mErrorStatus;
@@ -135,18 +127,6 @@
Return<void> ExecutionCallback::notify(ErrorStatus errorStatus) {
mErrorStatus = errorStatus;
- mOutputShapes = {};
- mTiming = {.timeOnDevice = UINT64_MAX, .timeInDriver = UINT64_MAX};
- CallbackBase::notify();
- return Void();
-}
-
-Return<void> ExecutionCallback::notify_1_2(ErrorStatus errorStatus,
- const hidl_vec<OutputShape>& outputShapes,
- const Timing& timing) {
- mErrorStatus = errorStatus;
- mOutputShapes = outputShapes;
- mTiming = timing;
CallbackBase::notify();
return Void();
}
@@ -156,18 +136,8 @@
return mErrorStatus;
}
-const std::vector<OutputShape>& ExecutionCallback::getOutputShapes() {
- wait();
- return mOutputShapes;
-}
-
-Timing ExecutionCallback::getTiming() {
- wait();
- return mTiming;
-}
-
} // namespace implementation
-} // namespace V1_2
+} // namespace V1_0
} // namespace neuralnetworks
} // namespace hardware
} // namespace android
diff --git a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp
index c819b52..603054d 100644
--- a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp
+++ b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp
@@ -15,129 +15,47 @@
*/
#include "GeneratedTestHarness.h"
-#include "Callbacks.h"
-#include "ExecutionBurstController.h"
+#include "1.0/Callbacks.h"
+#include "1.0/Utils.h"
+#include "MemoryUtils.h"
#include "TestHarness.h"
-#include "Utils.h"
#include <android-base/logging.h>
#include <android/hardware/neuralnetworks/1.0/IDevice.h>
-#include <android/hardware/neuralnetworks/1.0/IExecutionCallback.h>
#include <android/hardware/neuralnetworks/1.0/IPreparedModel.h>
-#include <android/hardware/neuralnetworks/1.0/IPreparedModelCallback.h>
#include <android/hardware/neuralnetworks/1.0/types.h>
-#include <android/hardware/neuralnetworks/1.1/IDevice.h>
-#include <android/hardware/neuralnetworks/1.2/IDevice.h>
-#include <android/hardware/neuralnetworks/1.2/IExecutionCallback.h>
-#include <android/hardware/neuralnetworks/1.2/IPreparedModel.h>
-#include <android/hardware/neuralnetworks/1.2/IPreparedModelCallback.h>
#include <android/hidl/allocator/1.0/IAllocator.h>
#include <android/hidl/memory/1.0/IMemory.h>
#include <hidlmemory/mapping.h>
+
#include <iostream>
namespace android {
namespace hardware {
namespace neuralnetworks {
-
namespace generated_tests {
-using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
-using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
-using ::test_helper::bool8;
+
+using ::android::hardware::neuralnetworks::V1_0::ErrorStatus;
+using ::android::hardware::neuralnetworks::V1_0::IDevice;
+using ::android::hardware::neuralnetworks::V1_0::IPreparedModel;
+using ::android::hardware::neuralnetworks::V1_0::Model;
+using ::android::hardware::neuralnetworks::V1_0::Request;
+using ::android::hardware::neuralnetworks::V1_0::RequestArgument;
+using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
+using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
+using ::android::hidl::memory::V1_0::IMemory;
using ::test_helper::compare;
-using ::test_helper::expectMultinomialDistributionWithinTolerance;
using ::test_helper::filter;
using ::test_helper::for_all;
-using ::test_helper::for_each;
using ::test_helper::MixedTyped;
using ::test_helper::MixedTypedExample;
using ::test_helper::resize_accordingly;
-using HidlToken = hidl_array<uint8_t, static_cast<uint32_t>(Constant::BYTE_SIZE_OF_CACHE_TOKEN)>;
-
-template <typename T>
-void copy_back_(std::map<int, std::vector<T>>* dst, const std::vector<RequestArgument>& ra,
- char* src) {
- for_each<T>(*dst, [&ra, src](int index, std::vector<T>& m) {
- ASSERT_EQ(m.size(), ra[index].location.length / sizeof(T));
- char* begin = src + ra[index].location.offset;
- memcpy(m.data(), begin, ra[index].location.length);
- });
-}
-
-void copy_back(MixedTyped* dst, const std::vector<RequestArgument>& ra, char* src) {
- copy_back_(&dst->float32Operands, ra, src);
- copy_back_(&dst->int32Operands, ra, src);
- copy_back_(&dst->quant8AsymmOperands, ra, src);
- copy_back_(&dst->quant16SymmOperands, ra, src);
- copy_back_(&dst->float16Operands, ra, src);
- copy_back_(&dst->bool8Operands, ra, src);
- copy_back_(&dst->quant8ChannelOperands, ra, src);
- copy_back_(&dst->quant16AsymmOperands, ra, src);
- copy_back_(&dst->quant8SymmOperands, ra, src);
- static_assert(9 == MixedTyped::kNumTypes,
- "Number of types in MixedTyped changed, but copy_back function wasn't updated");
-}
-
-static bool isZeroSized(const MixedTyped& example, uint32_t index) {
- for (auto i : example.operandDimensions.at(index)) {
- if (i == 0) return true;
- }
- return false;
-}
// Top level driver for models and examples generated by test_generator.py
// Test driver for those generated from ml/nn/runtime/test/spec
-static Return<ErrorStatus> ExecutePreparedModel(sp<V1_0::IPreparedModel>& preparedModel,
- const Request& request, MeasureTiming,
- sp<ExecutionCallback>& callback) {
- return preparedModel->execute(request, callback);
-}
-static Return<ErrorStatus> ExecutePreparedModel(sp<V1_2::IPreparedModel>& preparedModel,
- const Request& request, MeasureTiming measure,
- sp<ExecutionCallback>& callback) {
- return preparedModel->execute_1_2(request, measure, callback);
-}
-static Return<ErrorStatus> ExecutePreparedModel(sp<V1_0::IPreparedModel>&, const Request&,
- MeasureTiming, hidl_vec<OutputShape>*, Timing*) {
- ADD_FAILURE() << "asking for synchronous execution at V1_0";
- return ErrorStatus::GENERAL_FAILURE;
-}
-static Return<ErrorStatus> ExecutePreparedModel(sp<V1_2::IPreparedModel>& preparedModel,
- const Request& request, MeasureTiming measure,
- hidl_vec<OutputShape>* outputShapes,
- Timing* timing) {
- ErrorStatus result;
- Return<void> ret = preparedModel->executeSynchronously(
- request, measure,
- [&result, outputShapes, timing](ErrorStatus error, const hidl_vec<OutputShape>& shapes,
- const Timing& time) {
- result = error;
- *outputShapes = shapes;
- *timing = time;
- });
- if (!ret.isOk()) {
- return ErrorStatus::GENERAL_FAILURE;
- }
- return result;
-}
-static std::unique_ptr<::android::nn::ExecutionBurstController> CreateBurst(
- const sp<V1_0::IPreparedModel>&) {
- ADD_FAILURE() << "asking for burst execution at V1_0";
- return nullptr;
-}
-static std::shared_ptr<::android::nn::ExecutionBurstController> CreateBurst(
- const sp<V1_2::IPreparedModel>& preparedModel) {
- return ::android::nn::ExecutionBurstController::create(preparedModel, /*blocking=*/true);
-}
-enum class Executor { ASYNC, SYNC, BURST };
-enum class OutputType { FULLY_SPECIFIED, UNSPECIFIED, INSUFFICIENT };
-const float kDefaultAtol = 1e-5f;
-const float kDefaultRtol = 1e-5f;
-template <typename T_IPreparedModel>
-void EvaluatePreparedModel(sp<T_IPreparedModel>& preparedModel, std::function<bool(int)> is_ignored,
- const std::vector<MixedTypedExample>& examples,
- bool hasRelaxedFloat32Model, float fpAtol, float fpRtol,
- Executor executor, MeasureTiming measure, OutputType outputType) {
+void EvaluatePreparedModel(sp<IPreparedModel>& preparedModel, std::function<bool(int)> is_ignored,
+ const std::vector<MixedTypedExample>& examples, float fpAtol,
+ float fpRtol) {
const uint32_t INPUT = 0;
const uint32_t OUTPUT = 1;
@@ -147,14 +65,7 @@
const MixedTyped& inputs = example.operands.first;
const MixedTyped& golden = example.operands.second;
- const bool hasFloat16Inputs = !inputs.float16Operands.empty();
- if (hasRelaxedFloat32Model || hasFloat16Inputs) {
- // TODO: Adjust the error limit based on testing.
- // If in relaxed mode, set the absolute tolerance to be 5ULP of FP16.
- fpAtol = 5.0f * 0.0009765625f;
- // Set the relative tolerance to be 5ULP of the corresponding FP precision.
- fpRtol = 5.0f * 0.0009765625f;
- }
+ CHECK(inputs.float16Operands.empty()) << "float16 is not supported in 1.0";
std::vector<RequestArgument> inputs_info, outputs_info;
uint32_t inputSize = 0, outputSize = 0;
@@ -163,11 +74,13 @@
for_all(inputs, [&inputs_info, &inputSize](int index, auto, auto s) {
if (inputs_info.size() <= static_cast<size_t>(index)) inputs_info.resize(index + 1);
RequestArgument arg = {
- .location = {.poolIndex = INPUT, .offset = 0, .length = static_cast<uint32_t>(s)},
- .dimensions = {},
+ .location = {.poolIndex = INPUT,
+ .offset = 0,
+ .length = static_cast<uint32_t>(s)},
+ .dimensions = {},
};
RequestArgument arg_empty = {
- .hasNoValue = true,
+ .hasNoValue = true,
};
inputs_info[index] = s ? arg : arg_empty;
inputSize += s;
@@ -185,31 +98,17 @@
// Go through all outputs, initialize RequestArgument descriptors
resize_accordingly(golden, test);
- bool sizeLargerThanOne = true;
- for_all(golden, [&golden, &outputs_info, &outputSize, &outputType, &sizeLargerThanOne](
- int index, auto, auto s) {
+ for_all(golden, [&outputs_info, &outputSize](int index, auto, auto s) {
if (outputs_info.size() <= static_cast<size_t>(index)) outputs_info.resize(index + 1);
- if (index == 0) {
- // On OutputType::INSUFFICIENT, set the output operand with index 0 with
- // buffer size one byte less than needed.
- if (outputType == OutputType::INSUFFICIENT) {
- if (s > 1 && !isZeroSized(golden, index)) {
- s -= 1;
- } else {
- sizeLargerThanOne = false;
- }
- }
- }
RequestArgument arg = {
- .location = {.poolIndex = OUTPUT, .offset = 0, .length = static_cast<uint32_t>(s)},
- .dimensions = {},
+ .location = {.poolIndex = OUTPUT,
+ .offset = 0,
+ .length = static_cast<uint32_t>(s)},
+ .dimensions = {},
};
outputs_info[index] = arg;
outputSize += s;
});
- // If output0 does not have size larger than one byte,
- // we can not provide an insufficient buffer
- if (!sizeLargerThanOne && outputType == OutputType::INSUFFICIENT) return;
// Compute offset for outputs 1 and so on
{
size_t offset = 0;
@@ -248,107 +147,17 @@
const Request request = {.inputs = inputs_info, .outputs = outputs_info, .pools = pools};
- ErrorStatus executionStatus;
- hidl_vec<OutputShape> outputShapes;
- Timing timing;
- switch (executor) {
- case Executor::ASYNC: {
- SCOPED_TRACE("asynchronous");
+ // launch execution
+ sp<ExecutionCallback> executionCallback = new ExecutionCallback();
+ ASSERT_NE(nullptr, executionCallback.get());
+ Return<ErrorStatus> executionLaunchStatus =
+ preparedModel->execute(request, executionCallback);
+ ASSERT_TRUE(executionLaunchStatus.isOk());
+ EXPECT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(executionLaunchStatus));
- // launch execution
- sp<ExecutionCallback> executionCallback = new ExecutionCallback();
- ASSERT_NE(nullptr, executionCallback.get());
- Return<ErrorStatus> executionLaunchStatus =
- ExecutePreparedModel(preparedModel, request, measure, executionCallback);
- ASSERT_TRUE(executionLaunchStatus.isOk());
- EXPECT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(executionLaunchStatus));
-
- // retrieve execution status
- executionCallback->wait();
- executionStatus = executionCallback->getStatus();
- outputShapes = executionCallback->getOutputShapes();
- timing = executionCallback->getTiming();
-
- break;
- }
- case Executor::SYNC: {
- SCOPED_TRACE("synchronous");
-
- // execute
- Return<ErrorStatus> executionReturnStatus = ExecutePreparedModel(
- preparedModel, request, measure, &outputShapes, &timing);
- ASSERT_TRUE(executionReturnStatus.isOk());
- executionStatus = static_cast<ErrorStatus>(executionReturnStatus);
-
- break;
- }
- case Executor::BURST: {
- SCOPED_TRACE("burst");
-
- // create burst
- const std::shared_ptr<::android::nn::ExecutionBurstController> controller =
- CreateBurst(preparedModel);
- ASSERT_NE(nullptr, controller.get());
-
- // create memory keys
- std::vector<intptr_t> keys(request.pools.size());
- for (size_t i = 0; i < keys.size(); ++i) {
- keys[i] = reinterpret_cast<intptr_t>(&request.pools[i]);
- }
-
- // execute burst
- std::tie(executionStatus, outputShapes, timing) =
- controller->compute(request, measure, keys);
-
- break;
- }
- }
-
- if (outputType != OutputType::FULLY_SPECIFIED &&
- executionStatus == ErrorStatus::GENERAL_FAILURE) {
- LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
- "execute model that it does not support.";
- std::cout << "[ ] Early termination of test because vendor service cannot "
- "execute model that it does not support."
- << std::endl;
- GTEST_SKIP();
- }
- if (measure == MeasureTiming::NO) {
- EXPECT_EQ(UINT64_MAX, timing.timeOnDevice);
- EXPECT_EQ(UINT64_MAX, timing.timeInDriver);
- } else {
- if (timing.timeOnDevice != UINT64_MAX && timing.timeInDriver != UINT64_MAX) {
- EXPECT_LE(timing.timeOnDevice, timing.timeInDriver);
- }
- }
-
- switch (outputType) {
- case OutputType::FULLY_SPECIFIED:
- // If the model output operands are fully specified, outputShapes must be either
- // either empty, or have the same number of elements as the number of outputs.
- ASSERT_EQ(ErrorStatus::NONE, executionStatus);
- ASSERT_TRUE(outputShapes.size() == 0 ||
- outputShapes.size() == test.operandDimensions.size());
- break;
- case OutputType::UNSPECIFIED:
- // If the model output operands are not fully specified, outputShapes must have
- // the same number of elements as the number of outputs.
- ASSERT_EQ(ErrorStatus::NONE, executionStatus);
- ASSERT_EQ(outputShapes.size(), test.operandDimensions.size());
- break;
- case OutputType::INSUFFICIENT:
- ASSERT_EQ(ErrorStatus::OUTPUT_INSUFFICIENT_SIZE, executionStatus);
- ASSERT_EQ(outputShapes.size(), test.operandDimensions.size());
- ASSERT_FALSE(outputShapes[0].isSufficient);
- return;
- }
- // Go through all outputs, overwrite output dimensions with returned output shapes
- if (outputShapes.size() > 0) {
- for_each<uint32_t>(test.operandDimensions,
- [&outputShapes](int idx, std::vector<uint32_t>& dim) {
- dim = outputShapes[idx].dimensions;
- });
- }
+ // retrieve execution status
+ executionCallback->wait();
+ ASSERT_EQ(ErrorStatus::NONE, executionCallback->getStatus());
// validate results
outputMemory->read();
@@ -360,89 +169,22 @@
// We want "close-enough" results for float
compare(filtered_golden, filtered_test, fpAtol, fpRtol);
-
- if (example.expectedMultinomialDistributionTolerance > 0) {
- expectMultinomialDistributionWithinTolerance(test, example);
- }
- }
-}
-template <typename T_IPreparedModel>
-void EvaluatePreparedModel(sp<T_IPreparedModel>& preparedModel, std::function<bool(int)> is_ignored,
- const std::vector<MixedTypedExample>& examples,
- bool hasRelaxedFloat32Model, Executor executor, MeasureTiming measure,
- OutputType outputType) {
- EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model, kDefaultAtol,
- kDefaultRtol, executor, measure, outputType);
-}
-
-void EvaluatePreparedModel(sp<V1_2::IPreparedModel>& preparedModel,
- std::function<bool(int)> is_ignored,
- const std::vector<MixedTypedExample>& examples,
- bool hasRelaxedFloat32Model, bool testDynamicOutputShape) {
- if (testDynamicOutputShape) {
- EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
- Executor::ASYNC, MeasureTiming::NO, OutputType::UNSPECIFIED);
- EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
- Executor::SYNC, MeasureTiming::NO, OutputType::UNSPECIFIED);
- EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
- Executor::BURST, MeasureTiming::NO, OutputType::UNSPECIFIED);
- EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
- Executor::ASYNC, MeasureTiming::YES, OutputType::UNSPECIFIED);
- EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
- Executor::SYNC, MeasureTiming::YES, OutputType::UNSPECIFIED);
- EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
- Executor::BURST, MeasureTiming::YES, OutputType::UNSPECIFIED);
- EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
- Executor::ASYNC, MeasureTiming::NO, OutputType::INSUFFICIENT);
- EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
- Executor::SYNC, MeasureTiming::NO, OutputType::INSUFFICIENT);
- EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
- Executor::BURST, MeasureTiming::NO, OutputType::INSUFFICIENT);
- EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
- Executor::ASYNC, MeasureTiming::YES, OutputType::INSUFFICIENT);
- EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
- Executor::SYNC, MeasureTiming::YES, OutputType::INSUFFICIENT);
- EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
- Executor::BURST, MeasureTiming::YES, OutputType::INSUFFICIENT);
- } else {
- EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
- Executor::ASYNC, MeasureTiming::NO, OutputType::FULLY_SPECIFIED);
- EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
- Executor::SYNC, MeasureTiming::NO, OutputType::FULLY_SPECIFIED);
- EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
- Executor::BURST, MeasureTiming::NO, OutputType::FULLY_SPECIFIED);
- EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
- Executor::ASYNC, MeasureTiming::YES, OutputType::FULLY_SPECIFIED);
- EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
- Executor::SYNC, MeasureTiming::YES, OutputType::FULLY_SPECIFIED);
- EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
- Executor::BURST, MeasureTiming::YES, OutputType::FULLY_SPECIFIED);
}
}
-static void getPreparedModel(sp<PreparedModelCallback> callback,
- sp<V1_0::IPreparedModel>* preparedModel) {
- *preparedModel = callback->getPreparedModel();
-}
-static void getPreparedModel(sp<PreparedModelCallback> callback,
- sp<V1_2::IPreparedModel>* preparedModel) {
- sp<V1_0::IPreparedModel> preparedModelV1_0 = callback->getPreparedModel();
- *preparedModel = V1_2::IPreparedModel::castFrom(preparedModelV1_0).withDefault(nullptr);
-}
-
-void Execute(const sp<V1_0::IDevice>& device, std::function<V1_0::Model(void)> create_model,
+void Execute(const sp<IDevice>& device, std::function<Model(void)> create_model,
std::function<bool(int)> is_ignored, const std::vector<MixedTypedExample>& examples) {
- V1_0::Model model = create_model();
+ Model model = create_model();
// see if service can handle model
bool fullySupportsModel = false;
Return<void> supportedCall = device->getSupportedOperations(
- model, [&fullySupportsModel](ErrorStatus status, const hidl_vec<bool>& supported) {
- ASSERT_EQ(ErrorStatus::NONE, status);
- ASSERT_NE(0ul, supported.size());
- fullySupportsModel =
- std::all_of(supported.begin(), supported.end(), [](bool valid) { return valid; });
- });
+ model, [&fullySupportsModel](ErrorStatus status, const hidl_vec<bool>& supported) {
+ ASSERT_EQ(ErrorStatus::NONE, status);
+ ASSERT_NE(0ul, supported.size());
+ fullySupportsModel = std::all_of(supported.begin(), supported.end(),
+ [](bool valid) { return valid; });
+ });
ASSERT_TRUE(supportedCall.isOk());
// launch prepare model
@@ -455,8 +197,7 @@
// retrieve prepared model
preparedModelCallback->wait();
ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
- sp<V1_0::IPreparedModel> preparedModel;
- getPreparedModel(preparedModelCallback, &preparedModel);
+ sp<IPreparedModel> preparedModel = preparedModelCallback->getPreparedModel();
// early termination if vendor service cannot fully prepare model
if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
@@ -472,115 +213,10 @@
ASSERT_NE(nullptr, preparedModel.get());
float fpAtol = 1e-5f, fpRtol = 5.0f * 1.1920928955078125e-7f;
- EvaluatePreparedModel(preparedModel, is_ignored, examples,
- /*hasRelaxedFloat32Model=*/false, fpAtol, fpRtol, Executor::ASYNC,
- MeasureTiming::NO, OutputType::FULLY_SPECIFIED);
-}
-
-void Execute(const sp<V1_1::IDevice>& device, std::function<V1_1::Model(void)> create_model,
- std::function<bool(int)> is_ignored, const std::vector<MixedTypedExample>& examples) {
- V1_1::Model model = create_model();
-
- // see if service can handle model
- bool fullySupportsModel = false;
- Return<void> supportedCall = device->getSupportedOperations_1_1(
- model, [&fullySupportsModel](ErrorStatus status, const hidl_vec<bool>& supported) {
- ASSERT_EQ(ErrorStatus::NONE, status);
- ASSERT_NE(0ul, supported.size());
- fullySupportsModel =
- std::all_of(supported.begin(), supported.end(), [](bool valid) { return valid; });
- });
- ASSERT_TRUE(supportedCall.isOk());
-
- // launch prepare model
- sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
- ASSERT_NE(nullptr, preparedModelCallback.get());
- Return<ErrorStatus> prepareLaunchStatus = device->prepareModel_1_1(
- model, ExecutionPreference::FAST_SINGLE_ANSWER, preparedModelCallback);
- ASSERT_TRUE(prepareLaunchStatus.isOk());
- ASSERT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus));
-
- // retrieve prepared model
- preparedModelCallback->wait();
- ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
- sp<V1_0::IPreparedModel> preparedModel;
- getPreparedModel(preparedModelCallback, &preparedModel);
-
- // early termination if vendor service cannot fully prepare model
- if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
- ASSERT_EQ(nullptr, preparedModel.get());
- LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
- "prepare model that it does not support.";
- std::cout << "[ ] Early termination of test because vendor service cannot "
- "prepare model that it does not support."
- << std::endl;
- GTEST_SKIP();
- }
- EXPECT_EQ(ErrorStatus::NONE, prepareReturnStatus);
- ASSERT_NE(nullptr, preparedModel.get());
-
- EvaluatePreparedModel(preparedModel, is_ignored, examples,
- model.relaxComputationFloat32toFloat16, 1e-5f, 1e-5f, Executor::ASYNC,
- MeasureTiming::NO, OutputType::FULLY_SPECIFIED);
-}
-
-void PrepareModel(const sp<V1_2::IDevice>& device, const V1_2::Model& model,
- sp<V1_2::IPreparedModel>* preparedModel) {
- // see if service can handle model
- bool fullySupportsModel = false;
- Return<void> supportedCall = device->getSupportedOperations_1_2(
- model, [&fullySupportsModel](ErrorStatus status, const hidl_vec<bool>& supported) {
- ASSERT_EQ(ErrorStatus::NONE, status);
- ASSERT_NE(0ul, supported.size());
- fullySupportsModel =
- std::all_of(supported.begin(), supported.end(), [](bool valid) { return valid; });
- });
- ASSERT_TRUE(supportedCall.isOk());
-
- // launch prepare model
- sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
- ASSERT_NE(nullptr, preparedModelCallback.get());
- Return<ErrorStatus> prepareLaunchStatus = device->prepareModel_1_2(
- model, ExecutionPreference::FAST_SINGLE_ANSWER, hidl_vec<hidl_handle>(),
- hidl_vec<hidl_handle>(), HidlToken(), preparedModelCallback);
- ASSERT_TRUE(prepareLaunchStatus.isOk());
- ASSERT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus));
-
- // retrieve prepared model
- preparedModelCallback->wait();
- ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
- getPreparedModel(preparedModelCallback, preparedModel);
-
- // early termination if vendor service cannot fully prepare model
- if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
- ASSERT_EQ(nullptr, preparedModel->get());
- LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
- "prepare model that it does not support.";
- std::cout << "[ ] Early termination of test because vendor service cannot "
- "prepare model that it does not support."
- << std::endl;
- return;
- }
- EXPECT_EQ(ErrorStatus::NONE, prepareReturnStatus);
- ASSERT_NE(nullptr, preparedModel->get());
-}
-
-// TODO: Reduce code duplication.
-void Execute(const sp<V1_2::IDevice>& device, std::function<V1_2::Model(void)> create_model,
- std::function<bool(int)> is_ignored, const std::vector<MixedTypedExample>& examples,
- bool testDynamicOutputShape) {
- V1_2::Model model = create_model();
- sp<V1_2::IPreparedModel> preparedModel = nullptr;
- PrepareModel(device, model, &preparedModel);
- if (preparedModel == nullptr) {
- GTEST_SKIP();
- }
- EvaluatePreparedModel(preparedModel, is_ignored, examples,
- model.relaxComputationFloat32toFloat16, testDynamicOutputShape);
+ EvaluatePreparedModel(preparedModel, is_ignored, examples, fpAtol, fpRtol);
}
} // namespace generated_tests
-
} // namespace neuralnetworks
} // namespace hardware
} // namespace android
diff --git a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.h b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.h
index c7d2399..11950d9 100644
--- a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.h
+++ b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.h
@@ -14,14 +14,11 @@
* limitations under the License.
*/
-#ifndef VTS_HAL_NEURALNETWORKS_GENERATED_TEST_HARNESS_H
-#define VTS_HAL_NEURALNETWORKS_GENERATED_TEST_HARNESS_H
-
-#include "TestHarness.h"
+#ifndef ANDROID_HARDWARE_NEURALNETWORKS_V1_0_GENERATED_TEST_HARNESS_H
+#define ANDROID_HARDWARE_NEURALNETWORKS_V1_0_GENERATED_TEST_HARNESS_H
#include <android/hardware/neuralnetworks/1.0/IDevice.h>
-#include <android/hardware/neuralnetworks/1.1/IDevice.h>
-#include <android/hardware/neuralnetworks/1.2/IDevice.h>
+#include "TestHarness.h"
namespace android {
namespace hardware {
@@ -30,28 +27,13 @@
namespace generated_tests {
using ::test_helper::MixedTypedExample;
-void PrepareModel(const sp<V1_2::IDevice>& device, const V1_2::Model& model,
- sp<V1_2::IPreparedModel>* preparedModel);
-
-void EvaluatePreparedModel(sp<V1_2::IPreparedModel>& preparedModel,
- std::function<bool(int)> is_ignored,
- const std::vector<MixedTypedExample>& examples,
- bool hasRelaxedFloat32Model, bool testDynamicOutputShape);
-
void Execute(const sp<V1_0::IDevice>& device, std::function<V1_0::Model(void)> create_model,
std::function<bool(int)> is_ignored, const std::vector<MixedTypedExample>& examples);
-void Execute(const sp<V1_1::IDevice>& device, std::function<V1_1::Model(void)> create_model,
- std::function<bool(int)> is_ignored, const std::vector<MixedTypedExample>& examples);
-
-void Execute(const sp<V1_2::IDevice>& device, std::function<V1_2::Model(void)> create_model,
- std::function<bool(int)> is_ignored, const std::vector<MixedTypedExample>& examples,
- bool testDynamicOutputShape = false);
-
} // namespace generated_tests
} // namespace neuralnetworks
} // namespace hardware
} // namespace android
-#endif // VTS_HAL_NEURALNETWORKS_GENERATED_TEST_HARNESS_H
+#endif // ANDROID_HARDWARE_NEURALNETWORKS_V1_0_GENERATED_TEST_HARNESS_H
diff --git a/neuralnetworks/1.0/vts/functional/GeneratedTests.cpp b/neuralnetworks/1.0/vts/functional/GeneratedTestsV1_0.cpp
similarity index 86%
rename from neuralnetworks/1.0/vts/functional/GeneratedTests.cpp
rename to neuralnetworks/1.0/vts/functional/GeneratedTestsV1_0.cpp
index d1c7de3..74946a5 100644
--- a/neuralnetworks/1.0/vts/functional/GeneratedTests.cpp
+++ b/neuralnetworks/1.0/vts/functional/GeneratedTestsV1_0.cpp
@@ -16,17 +16,16 @@
#define LOG_TAG "neuralnetworks_hidl_hal_test"
-#include "VtsHalNeuralnetworks.h"
-
-#include "Callbacks.h"
-#include "GeneratedTestHarness.h"
-#include "TestHarness.h"
-#include "Utils.h"
-
#include <android-base/logging.h>
#include <android/hidl/memory/1.0/IMemory.h>
#include <hidlmemory/mapping.h>
+#include "1.0/Callbacks.h"
+#include "GeneratedTestHarness.h"
+#include "MemoryUtils.h"
+#include "TestHarness.h"
+#include "VtsHalNeuralnetworks.h"
+
namespace android {
namespace hardware {
namespace neuralnetworks {
@@ -34,8 +33,9 @@
namespace vts {
namespace functional {
-using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
-using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
+using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
+using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
+using ::android::hidl::memory::V1_0::IMemory;
using ::android::nn::allocateSharedMemory;
using ::test_helper::MixedTypedExample;
diff --git a/neuralnetworks/1.0/vts/functional/Utils.cpp b/neuralnetworks/1.0/vts/functional/Utils.cpp
new file mode 100644
index 0000000..521e524
--- /dev/null
+++ b/neuralnetworks/1.0/vts/functional/Utils.cpp
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "GeneratedTestHarness.h"
+#include "TestHarness.h"
+
+#include <android/hardware/neuralnetworks/1.0/types.h>
+
+#include <cstring>
+#include <map>
+#include <vector>
+
+namespace android {
+namespace hardware {
+namespace neuralnetworks {
+
+using ::android::hardware::neuralnetworks::V1_0::RequestArgument;
+using ::test_helper::for_each;
+using ::test_helper::MixedTyped;
+
+template <typename T>
+void copy_back_(std::map<int, std::vector<T>>* dst, const std::vector<RequestArgument>& ra,
+ char* src) {
+ for_each<T>(*dst, [&ra, src](int index, std::vector<T>& m) {
+ ASSERT_EQ(m.size(), ra[index].location.length / sizeof(T));
+ char* begin = src + ra[index].location.offset;
+ memcpy(m.data(), begin, ra[index].location.length);
+ });
+}
+
+void copy_back(MixedTyped* dst, const std::vector<RequestArgument>& ra, char* src) {
+ copy_back_(&dst->float32Operands, ra, src);
+ copy_back_(&dst->int32Operands, ra, src);
+ copy_back_(&dst->quant8AsymmOperands, ra, src);
+ copy_back_(&dst->quant16SymmOperands, ra, src);
+ copy_back_(&dst->float16Operands, ra, src);
+ copy_back_(&dst->bool8Operands, ra, src);
+ copy_back_(&dst->quant8ChannelOperands, ra, src);
+ copy_back_(&dst->quant16AsymmOperands, ra, src);
+ copy_back_(&dst->quant8SymmOperands, ra, src);
+ static_assert(9 == MixedTyped::kNumTypes,
+ "Number of types in MixedTyped changed, but copy_back function wasn't updated");
+}
+
+} // namespace neuralnetworks
+} // namespace hardware
+} // namespace android
diff --git a/neuralnetworks/1.0/vts/functional/ValidateModel.cpp b/neuralnetworks/1.0/vts/functional/ValidateModel.cpp
index 5d24fb5..72c4a2b 100644
--- a/neuralnetworks/1.0/vts/functional/ValidateModel.cpp
+++ b/neuralnetworks/1.0/vts/functional/ValidateModel.cpp
@@ -18,7 +18,7 @@
#include "VtsHalNeuralnetworks.h"
-#include "Callbacks.h"
+#include "1.0/Callbacks.h"
namespace android {
namespace hardware {
@@ -27,8 +27,8 @@
namespace vts {
namespace functional {
-using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
-using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
+using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
+using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
///////////////////////// UTILITY FUNCTIONS /////////////////////////
diff --git a/neuralnetworks/1.0/vts/functional/ValidateRequest.cpp b/neuralnetworks/1.0/vts/functional/ValidateRequest.cpp
index f0c93b7..058eb25 100644
--- a/neuralnetworks/1.0/vts/functional/ValidateRequest.cpp
+++ b/neuralnetworks/1.0/vts/functional/ValidateRequest.cpp
@@ -16,16 +16,15 @@
#define LOG_TAG "neuralnetworks_hidl_hal_test"
-#include "VtsHalNeuralnetworks.h"
-
-#include "Callbacks.h"
-#include "TestHarness.h"
-#include "Utils.h"
-
#include <android-base/logging.h>
#include <android/hidl/memory/1.0/IMemory.h>
#include <hidlmemory/mapping.h>
+#include "1.0/Callbacks.h"
+#include "MemoryUtils.h"
+#include "TestHarness.h"
+#include "VtsHalNeuralnetworks.h"
+
namespace android {
namespace hardware {
namespace neuralnetworks {
@@ -33,7 +32,7 @@
namespace vts {
namespace functional {
-using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
+using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
using ::android::hidl::memory::V1_0::IMemory;
using test_helper::for_all;
using test_helper::MixedTyped;
@@ -121,11 +120,13 @@
for_all(inputs, [&inputs_info, &inputSize](int index, auto, auto s) {
if (inputs_info.size() <= static_cast<size_t>(index)) inputs_info.resize(index + 1);
RequestArgument arg = {
- .location = {.poolIndex = INPUT, .offset = 0, .length = static_cast<uint32_t>(s)},
- .dimensions = {},
+ .location = {.poolIndex = INPUT,
+ .offset = 0,
+ .length = static_cast<uint32_t>(s)},
+ .dimensions = {},
};
RequestArgument arg_empty = {
- .hasNoValue = true,
+ .hasNoValue = true,
};
inputs_info[index] = s ? arg : arg_empty;
inputSize += s;
@@ -143,8 +144,10 @@
for_all(outputs, [&outputs_info, &outputSize](int index, auto, auto s) {
if (outputs_info.size() <= static_cast<size_t>(index)) outputs_info.resize(index + 1);
RequestArgument arg = {
- .location = {.poolIndex = OUTPUT, .offset = 0, .length = static_cast<uint32_t>(s)},
- .dimensions = {},
+ .location = {.poolIndex = OUTPUT,
+ .offset = 0,
+ .length = static_cast<uint32_t>(s)},
+ .dimensions = {},
};
outputs_info[index] = arg;
outputSize += s;
diff --git a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.cpp b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.cpp
index aee2f85..95b7ad3 100644
--- a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.cpp
+++ b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.cpp
@@ -20,7 +20,7 @@
#include <android-base/logging.h>
-#include "Callbacks.h"
+#include "1.0/Callbacks.h"
namespace android {
namespace hardware {
@@ -29,7 +29,7 @@
namespace vts {
namespace functional {
-using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
+using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
static void createPreparedModel(const sp<IDevice>& device, const V1_0::Model& model,
sp<IPreparedModel>* preparedModel) {
diff --git a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.h b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.h
index 22285be..c32a91d 100644
--- a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.h
+++ b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef VTS_HAL_NEURALNETWORKS_V1_0_TARGET_TESTS_H
-#define VTS_HAL_NEURALNETWORKS_V1_0_TARGET_TESTS_H
+#ifndef ANDROID_HARDWARE_NEURALNETWORKS_V1_0_VTS_HAL_NEURALNETWORKS_H
+#define ANDROID_HARDWARE_NEURALNETWORKS_V1_0_VTS_HAL_NEURALNETWORKS_H
#include <android/hardware/neuralnetworks/1.0/IDevice.h>
#include <android/hardware/neuralnetworks/1.0/types.h>
@@ -89,4 +89,4 @@
} // namespace android::hardware::neuralnetworks::V1_0
-#endif // VTS_HAL_NEURALNETWORKS_V1_0_TARGET_TESTS_H
+#endif // ANDROID_HARDWARE_NEURALNETWORKS_V1_0_VTS_HAL_NEURALNETWORKS_H
diff --git a/neuralnetworks/1.0/vts/functional/Callbacks.h b/neuralnetworks/1.0/vts/functional/include/1.0/Callbacks.h
similarity index 64%
copy from neuralnetworks/1.0/vts/functional/Callbacks.h
copy to neuralnetworks/1.0/vts/functional/include/1.0/Callbacks.h
index 4707d0a..36318ea 100644
--- a/neuralnetworks/1.0/vts/functional/Callbacks.h
+++ b/neuralnetworks/1.0/vts/functional/include/1.0/Callbacks.h
@@ -19,9 +19,6 @@
#include <android/hardware/neuralnetworks/1.0/IExecutionCallback.h>
#include <android/hardware/neuralnetworks/1.0/IPreparedModelCallback.h>
-#include <android/hardware/neuralnetworks/1.2/IExecutionCallback.h>
-#include <android/hardware/neuralnetworks/1.2/IPreparedModelCallback.h>
-#include <hidl/MQDescriptor.h>
#include <hidl/Status.h>
#include <chrono>
#include <condition_variable>
@@ -32,11 +29,9 @@
namespace android {
namespace hardware {
namespace neuralnetworks {
-namespace V1_2 {
+namespace V1_0 {
namespace implementation {
-using V1_0::ErrorStatus;
-
/**
* The CallbackBase class is used internally by the NeuralNetworks runtime to
* synchronize between different threads. An asynchronous task is launched
@@ -60,7 +55,7 @@
* std::condition_variable, or std::experimental::latch instead.
*/
class CallbackBase {
- public:
+ public:
CallbackBase();
~CallbackBase();
@@ -79,8 +74,8 @@
* before the time duration expired, std::cv_status::timeout
* otherwise.
*/
- template<class Rep, class Period>
- std::cv_status wait_for(const std::chrono::duration<Rep,Period>& timeout_duration);
+ template <class Rep, class Period>
+ std::cv_status wait_for(const std::chrono::duration<Rep, Period>& timeout_duration);
/**
* CallbackBase::on_finish binds a function to the callback object. This
@@ -144,7 +139,7 @@
*/
void join_thread();
- protected:
+ protected:
/**
* CallbackBase::notify enables all prior and future wait* calls on the
* callback object to proceed. The call to CallbackBase::notify happens
@@ -158,16 +153,16 @@
*/
void notify();
- private:
+ private:
// Same as CallbackBase::join_thread but assumes we already hold a lock on
// mMutex.
void join_thread_locked();
- bool mNotified;
- std::mutex mMutex;
- std::condition_variable mCondition;
+ bool mNotified;
+ std::mutex mMutex;
+ std::condition_variable mCondition;
std::function<bool(void)> mPostWork;
- std::thread mThread;
+ std::thread mThread;
};
/**
@@ -176,29 +171,29 @@
* asynchronously with respect to the runtime. If a calling thread calls wait*
* or get* on a PreparedModelCallback object and the corresponding asynchronous
* task has not finished preparing the model, the calling thread will block
- * until the asynchronous task has either called notify or notify_1_2. For more
- * information on the synchronization behavior, refer to the CallbackBase class.
+ * until the asynchronous task has called notify. For more information on the
+ * synchronization behavior, refer to the CallbackBase class.
*
* This class inherits the basic blocking and signaling calls from
- * CallbackBase, and implements the HIDL notify and notify_1_2 calls from
+ * CallbackBase, and implements the HIDL notify call from
* IPreparedModelCallback. This callback object is passed as an argument to
* IDevice::prepareModel.
*/
class PreparedModelCallback : public CallbackBase, public IPreparedModelCallback {
- public:
+ public:
PreparedModelCallback();
~PreparedModelCallback() override;
/**
- * IPreparedModelCallback::notify and IPreparedModelCallback::notify_1_2
- * mark the callback object with the return status of the asynchronous
- * model preparation along with the prepared model, and call
- * CallbackBase::notify, enabling all prior and future wait* calls on the
- * PreparedModelCallback object to proceed. For more information on the
- * synchronization behavior, refer to the CallbackBase class.
+ * IPreparedModelCallback::notify marks the callback object with the return
+ * status of the asynchronous model preparation along with the prepared
+ * model and calls CallbackBase::notify, enabling all prior and future
+ * wait* calls on the PreparedModelCallback object to proceed.
+ * For more information on the synchronization behavior, refer to the
+ * CallbackBase class.
*
- * Either IPreparedModelCallback::notify or IPreparedModelCallback::notify_1_2
- * must be called exactly once on a given PreparedModelCallback object.
+ * IPreparedModelCallback::notify must be called exactly once on a given
+ * PreparedModelCallback object.
*
* @param status Error status returned from asynchronously preparing the
* model; will be:
@@ -210,8 +205,6 @@
* nullptr if the model was unable to be prepared.
*/
Return<void> notify(ErrorStatus status, const sp<V1_0::IPreparedModel>& preparedModel) override;
- Return<void> notify_1_2(ErrorStatus status,
- const sp<V1_2::IPreparedModel>& preparedModel) override;
/**
* Retrieves the error status returned from the asynchronous task launched
@@ -241,8 +234,8 @@
*/
sp<V1_0::IPreparedModel> getPreparedModel();
- private:
- ErrorStatus mErrorStatus;
+ private:
+ ErrorStatus mErrorStatus;
sp<V1_0::IPreparedModel> mPreparedModel;
};
@@ -251,29 +244,28 @@
* execution from a task executing asynchronously with respect to the runtime.
* If a calling thread calls wait* or get* on a PreparedModelCallback object and
* the corresponding asynchronous task has not finished the execution, the
- * calling thread will block until the asynchronous task has either called notify
- * or notify_1_2. For more information on the synchronization behavior, refer to
- * the CallbackBase class.
+ * calling thread will block until the asynchronous task has called notify.
+ * For more information on the synchronization behavior, refer to the
+ * CallbackBase class.
*
* This class inherits the basic blocking and signaling calls from
- * CallbackBase, and implements the HIDL notify and notify_1_2 calls from
- * IExecutionCallback. This callback object is passed as an argument to
- * IPreparedModel::execute.
+ * CallbackBase, and implements the HIDL notify call from IExecutionCallback.
+ * This callback object is passed as an argument to IPreparedModel::execute.
*/
-class ExecutionCallback : public CallbackBase, public IExecutionCallback {
- public:
+class ExecutionCallback : public CallbackBase, public IExecutionCallback {
+ public:
ExecutionCallback();
~ExecutionCallback() override;
/**
- * IExecutionCallback::notify and IExecutionCallback::notify_1_2 mark the
- * callback object with the return status of the asynchronous execution that
- * held this callback and enable all prior and future wait* calls on the
- * ExecutionCallback object to proceed. For more information on the
- * synchronization behavior, refer to the CallbackBase class.
+ * IExecutionCallback::notify marks the callback object with the return
+ * status of the asynchronous execution that held this callback and enable
+ * all prior and future wait* calls on the ExecutionCallback object to
+ * proceed. For more information on the synchronization behavior, refer to
+ * the CallbackBase class.
*
- * Either IExecutionCallback::notify or IExecutionCallback::notify_1_2 must
- * be called exactly once on a given ExecutionCallback object.
+ * IExecutionCallback::notify must be called exactly once on a given
+ * ExecutionCallback object.
*
* @param status Error status returned from launching the asynchronous task
* (if the launch fails) or from the asynchronous task itself
@@ -288,47 +280,10 @@
Return<void> notify(ErrorStatus status) override;
/**
- * Similar to IExecutionCallback::notify, but for V1_2::IPreparedModel to
- * also notify output shapes along with error status.
- *
- * @param status Error status returned from launching the asynchronous task
- * (if the launch fails) or from the asynchronous task itself
- * (if the launch succeeds). Must be:
- * - NONE if the asynchronous execution was successful
- * - DEVICE_UNAVAILABLE if driver is offline or busy
- * - GENERAL_FAILURE if the asynchronous task resulted in an
- * unspecified error
- * - OUTPUT_INSUFFICIENT_SIZE if at least one output
- * operand buffer is not large enough to store the
- * corresponding output
- * - INVALID_ARGUMENT if one of the input arguments to
- * prepareModel is invalid
- * @param outputShapes A list of shape information of model output operands.
- * The index into "outputShapes" corresponds to the index
- * of the output operand in the Request outputs vector.
- * outputShapes must be empty unless the status is either
- * NONE or OUTPUT_INSUFFICIENT_SIZE.
- * @return Timing Duration of execution. Unless MeasureTiming::YES was passed when
- * launching the execution and status is NONE, all times must
- * be reported as UINT64_MAX. A driver may choose to report
- * any time as UINT64_MAX, indicating that particular measurement is
- * not available.
- */
- Return<void> notify_1_2(ErrorStatus status, const hidl_vec<OutputShape>& outputShapes,
- const Timing& timing) override;
-
- // An overload of the latest notify interface to hide the version from ExecutionBuilder.
- Return<void> notify(ErrorStatus status, const hidl_vec<OutputShape>& outputShapes,
- const Timing& timing) {
- return notify_1_2(status, outputShapes, timing);
- }
-
- /**
* Retrieves the error status returned from the asynchronous task launched
- * by either IPreparedModel::execute or IPreparedModel::execute_1_2. If
- * IPreparedModel::execute or IPreparedModel::execute_1_2 has not finished
- * asynchronously executing, this call will block until the asynchronous task
- * notifies the object.
+ * by IPreparedModel::execute. If IPreparedModel::execute has not finished
+ * asynchronously executing, this call will block until the asynchronous
+ * task notifies the object.
*
* @return status Error status returned from launching the asynchronous task
* (if the launch fails) or from the asynchronous task itself
@@ -345,50 +300,17 @@
*/
ErrorStatus getStatus();
- /**
- * Retrieves the output shapes returned from the asynchronous task launched
- * by IPreparedModel::execute_1_2. If IPreparedModel::execute_1_2 has not finished
- * asynchronously executing, this call will block until the asynchronous task
- * notifies the object.
- *
- * If the asynchronous task was launched by IPreparedModel::execute, an empty vector
- * will be returned.
- *
- * @return outputShapes A list of shape information of model output operands.
- * The index into "outputShapes" corresponds to the index
- * of the output operand in the Request outputs vector.
- * outputShapes must be empty unless the status is either
- * NONE or OUTPUT_INSUFFICIENT_SIZE.
- */
- const std::vector<OutputShape>& getOutputShapes();
-
- /**
- * Retrieves the duration of execution ofthe asynchronous task launched
- * by IPreparedModel::execute_1_2. If IPreparedModel::execute_1_2 has not finished
- * asynchronously executing, this call will block until the asynchronous task
- * notifies the object.
- *
- * If the asynchronous task was launched by IPreparedModel::execute, every time
- * must be UINT64_MAX.
- *
- * @return timing Duration of the execution. Every time must be UINT64_MAX unless
- * the status is NONE.
- */
- Timing getTiming();
-
- private:
+ private:
ErrorStatus mErrorStatus = ErrorStatus::GENERAL_FAILURE;
- std::vector<OutputShape> mOutputShapes = {};
- Timing mTiming = {};
};
-
// template function implementation(s) below this point
-template<class Rep, class Period>
-std::cv_status CallbackBase::wait_for(const std::chrono::duration<Rep,Period>& timeout_duration) {
+template <class Rep, class Period>
+std::cv_status CallbackBase::wait_for(const std::chrono::duration<Rep, Period>& timeout_duration) {
std::unique_lock<std::mutex> lock(mMutex);
- std::cv_status status = mCondition.wait_for(lock, timeout_duration, [this]{return mNotified;});
+ std::cv_status status =
+ mCondition.wait_for(lock, timeout_duration, [this] { return mNotified; });
if (status != std::cv_status::timeout) {
join_thread_locked();
}
@@ -396,7 +318,7 @@
}
} // namespace implementation
-} // namespace V1_2
+} // namespace V1_0
} // namespace neuralnetworks
} // namespace hardware
} // namespace android
diff --git a/neuralnetworks/1.0/vts/functional/include/1.0/Utils.h b/neuralnetworks/1.0/vts/functional/include/1.0/Utils.h
new file mode 100644
index 0000000..b270c20
--- /dev/null
+++ b/neuralnetworks/1.0/vts/functional/include/1.0/Utils.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_NEURALNETWORKS_V1_0_UTILS_H
+#define ANDROID_HARDWARE_NEURALNETWORKS_V1_0_UTILS_H
+
+#include <android/hardware/neuralnetworks/1.0/types.h>
+#include <algorithm>
+#include <vector>
+#include "TestHarness.h"
+
+namespace android {
+namespace hardware {
+namespace neuralnetworks {
+
+void copy_back(::test_helper::MixedTyped* dst, const std::vector<V1_0::RequestArgument>& ra,
+ char* src);
+
+// Delete element from hidl_vec. hidl_vec doesn't support a "remove" operation,
+// so this is efficiently accomplished by moving the element to the end and
+// resizing the hidl_vec to one less.
+template <typename Type>
+inline void hidl_vec_removeAt(hidl_vec<Type>* vec, uint32_t index) {
+ if (vec) {
+ std::rotate(vec->begin() + index, vec->begin() + index + 1, vec->end());
+ vec->resize(vec->size() - 1);
+ }
+}
+
+template <typename Type>
+inline uint32_t hidl_vec_push_back(hidl_vec<Type>* vec, const Type& value) {
+ // assume vec is valid
+ const uint32_t index = vec->size();
+ vec->resize(index + 1);
+ (*vec)[index] = value;
+ return index;
+}
+
+} // namespace neuralnetworks
+} // namespace hardware
+} // namespace android
+
+#endif // ANDROID_HARDWARE_NEURALNETWORKS_V1_0_UTILS_H
diff --git a/neuralnetworks/1.1/vts/functional/Android.bp b/neuralnetworks/1.1/vts/functional/Android.bp
index 4fbeac9..ee90ec6 100644
--- a/neuralnetworks/1.1/vts/functional/Android.bp
+++ b/neuralnetworks/1.1/vts/functional/Android.bp
@@ -14,10 +14,41 @@
// limitations under the License.
//
+cc_defaults {
+ name: "VtsHalNeuralNetworksV1_1TargetTestDefaults",
+ defaults: ["VtsHalTargetTestDefaults"],
+ srcs: [
+ "ValidateModel.cpp",
+ "ValidateRequest.cpp",
+ "VtsHalNeuralnetworks.cpp",
+ "GeneratedTestHarness.cpp",
+ ],
+ shared_libs: [
+ "libfmq",
+ "libnativewindow",
+ ],
+ static_libs: [
+ "android.hardware.neuralnetworks@1.0",
+ "android.hardware.neuralnetworks@1.1",
+ "android.hidl.allocator@1.0",
+ "android.hidl.memory@1.0",
+ "libgmock",
+ "libhidlmemory",
+ "libneuralnetworks_utils",
+ "VtsHalNeuralNetworksV1_0_utils",
+ ],
+ header_libs: [
+ "libneuralnetworks_headers",
+ "libneuralnetworks_generated_test_harness_headers",
+ "libneuralnetworks_generated_tests",
+ ],
+ test_suites: ["general-tests"],
+}
+
// Tests for V1_0 models using the V1_1 HAL.
cc_test {
name: "VtsHalNeuralnetworksV1_1CompatV1_0TargetTest",
- defaults: ["VtsHalNeuralNetworksTargetTestDefaults"],
+ defaults: ["VtsHalNeuralNetworksV1_1TargetTestDefaults"],
srcs: [
"GeneratedTestsV1_0.cpp",
],
@@ -26,19 +57,19 @@
// Tests for V1_1 models.
cc_test {
name: "VtsHalNeuralnetworksV1_1TargetTest",
- defaults: ["VtsHalNeuralNetworksTargetTestDefaults"],
+ defaults: ["VtsHalNeuralNetworksV1_1TargetTestDefaults"],
srcs: [
"BasicTests.cpp",
- "GeneratedTests.cpp",
+ "GeneratedTestsV1_1.cpp",
],
}
cc_test {
name: "PresubmitHalNeuralnetworksV1_1TargetTest",
- defaults: ["VtsHalNeuralNetworksTargetTestDefaults"],
+ defaults: ["VtsHalNeuralNetworksV1_1TargetTestDefaults"],
srcs: [
"BasicTests.cpp",
- "GeneratedTests.cpp",
+ "GeneratedTestsV1_1.cpp",
],
cflags: [
"-DPRESUBMIT_NOT_VTS",
diff --git a/neuralnetworks/1.1/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.1/vts/functional/GeneratedTestHarness.cpp
new file mode 100644
index 0000000..d9f64fd
--- /dev/null
+++ b/neuralnetworks/1.1/vts/functional/GeneratedTestHarness.cpp
@@ -0,0 +1,232 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "GeneratedTestHarness.h"
+
+#include <android-base/logging.h>
+#include <android/hardware/neuralnetworks/1.0/IPreparedModel.h>
+#include <android/hardware/neuralnetworks/1.0/types.h>
+#include <android/hardware/neuralnetworks/1.1/IDevice.h>
+#include <android/hidl/allocator/1.0/IAllocator.h>
+#include <android/hidl/memory/1.0/IMemory.h>
+#include <hidlmemory/mapping.h>
+
+#include <iostream>
+
+#include "1.0/Callbacks.h"
+#include "1.0/Utils.h"
+#include "MemoryUtils.h"
+#include "TestHarness.h"
+
+namespace android {
+namespace hardware {
+namespace neuralnetworks {
+namespace generated_tests {
+
+using ::android::hardware::neuralnetworks::V1_0::ErrorStatus;
+using ::android::hardware::neuralnetworks::V1_0::IPreparedModel;
+using ::android::hardware::neuralnetworks::V1_0::Request;
+using ::android::hardware::neuralnetworks::V1_0::RequestArgument;
+using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
+using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
+using ::android::hardware::neuralnetworks::V1_1::ExecutionPreference;
+using ::android::hardware::neuralnetworks::V1_1::IDevice;
+using ::android::hardware::neuralnetworks::V1_1::Model;
+using ::android::hidl::memory::V1_0::IMemory;
+using ::test_helper::compare;
+using ::test_helper::filter;
+using ::test_helper::for_all;
+using ::test_helper::MixedTyped;
+using ::test_helper::MixedTypedExample;
+using ::test_helper::resize_accordingly;
+
+// Top level driver for models and examples generated by test_generator.py
+// Test driver for those generated from ml/nn/runtime/test/spec
+void EvaluatePreparedModel(sp<IPreparedModel>& preparedModel, std::function<bool(int)> is_ignored,
+ const std::vector<MixedTypedExample>& examples,
+ bool hasRelaxedFloat32Model, float fpAtol, float fpRtol) {
+ const uint32_t INPUT = 0;
+ const uint32_t OUTPUT = 1;
+
+ int example_no = 1;
+ for (auto& example : examples) {
+ SCOPED_TRACE(example_no++);
+ const MixedTyped& inputs = example.operands.first;
+ const MixedTyped& golden = example.operands.second;
+
+ const bool hasFloat16Inputs = !inputs.float16Operands.empty();
+ if (hasRelaxedFloat32Model || hasFloat16Inputs) {
+ // TODO: Adjust the error limit based on testing.
+ // If in relaxed mode, set the absolute tolerance to be 5ULP of FP16.
+ fpAtol = 5.0f * 0.0009765625f;
+ // Set the relative tolerance to be 5ULP of the corresponding FP precision.
+ fpRtol = 5.0f * 0.0009765625f;
+ }
+
+ std::vector<RequestArgument> inputs_info, outputs_info;
+ uint32_t inputSize = 0, outputSize = 0;
+ // This function only partially specifies the metadata (vector of RequestArguments).
+ // The contents are copied over below.
+ for_all(inputs, [&inputs_info, &inputSize](int index, auto, auto s) {
+ if (inputs_info.size() <= static_cast<size_t>(index)) inputs_info.resize(index + 1);
+ RequestArgument arg = {
+ .location = {.poolIndex = INPUT,
+ .offset = 0,
+ .length = static_cast<uint32_t>(s)},
+ .dimensions = {},
+ };
+ RequestArgument arg_empty = {
+ .hasNoValue = true,
+ };
+ inputs_info[index] = s ? arg : arg_empty;
+ inputSize += s;
+ });
+ // Compute offset for inputs 1 and so on
+ {
+ size_t offset = 0;
+ for (auto& i : inputs_info) {
+ if (!i.hasNoValue) i.location.offset = offset;
+ offset += i.location.length;
+ }
+ }
+
+ MixedTyped test; // holding test results
+
+ // Go through all outputs, initialize RequestArgument descriptors
+ resize_accordingly(golden, test);
+ for_all(golden, [&outputs_info, &outputSize](int index, auto, auto s) {
+ if (outputs_info.size() <= static_cast<size_t>(index)) outputs_info.resize(index + 1);
+ RequestArgument arg = {
+ .location = {.poolIndex = OUTPUT,
+ .offset = 0,
+ .length = static_cast<uint32_t>(s)},
+ .dimensions = {},
+ };
+ outputs_info[index] = arg;
+ outputSize += s;
+ });
+ // Compute offset for outputs 1 and so on
+ {
+ size_t offset = 0;
+ for (auto& i : outputs_info) {
+ i.location.offset = offset;
+ offset += i.location.length;
+ }
+ }
+ std::vector<hidl_memory> pools = {nn::allocateSharedMemory(inputSize),
+ nn::allocateSharedMemory(outputSize)};
+ ASSERT_NE(0ull, pools[INPUT].size());
+ ASSERT_NE(0ull, pools[OUTPUT].size());
+
+ // load data
+ sp<IMemory> inputMemory = mapMemory(pools[INPUT]);
+ sp<IMemory> outputMemory = mapMemory(pools[OUTPUT]);
+ ASSERT_NE(nullptr, inputMemory.get());
+ ASSERT_NE(nullptr, outputMemory.get());
+ char* inputPtr = reinterpret_cast<char*>(static_cast<void*>(inputMemory->getPointer()));
+ char* outputPtr = reinterpret_cast<char*>(static_cast<void*>(outputMemory->getPointer()));
+ ASSERT_NE(nullptr, inputPtr);
+ ASSERT_NE(nullptr, outputPtr);
+ inputMemory->update();
+ outputMemory->update();
+
+ // Go through all inputs, copy the values
+ for_all(inputs, [&inputs_info, inputPtr](int index, auto p, auto s) {
+ char* begin = (char*)p;
+ char* end = begin + s;
+ // TODO: handle more than one input
+ std::copy(begin, end, inputPtr + inputs_info[index].location.offset);
+ });
+
+ inputMemory->commit();
+ outputMemory->commit();
+
+ const Request request = {.inputs = inputs_info, .outputs = outputs_info, .pools = pools};
+
+ // launch execution
+ sp<ExecutionCallback> executionCallback = new ExecutionCallback();
+ ASSERT_NE(nullptr, executionCallback.get());
+ Return<ErrorStatus> executionLaunchStatus =
+ preparedModel->execute(request, executionCallback);
+ ASSERT_TRUE(executionLaunchStatus.isOk());
+ EXPECT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(executionLaunchStatus));
+
+ // retrieve execution status
+ executionCallback->wait();
+ ASSERT_EQ(ErrorStatus::NONE, executionCallback->getStatus());
+
+ // validate results
+ outputMemory->read();
+ copy_back(&test, outputs_info, outputPtr);
+ outputMemory->commit();
+ // Filter out don't cares
+ MixedTyped filtered_golden = filter(golden, is_ignored);
+ MixedTyped filtered_test = filter(test, is_ignored);
+
+ // We want "close-enough" results for float
+ compare(filtered_golden, filtered_test, fpAtol, fpRtol);
+ }
+}
+
+void Execute(const sp<IDevice>& device, std::function<Model(void)> create_model,
+ std::function<bool(int)> is_ignored, const std::vector<MixedTypedExample>& examples) {
+ Model model = create_model();
+
+ // see if service can handle model
+ bool fullySupportsModel = false;
+ Return<void> supportedCall = device->getSupportedOperations_1_1(
+ model, [&fullySupportsModel](ErrorStatus status, const hidl_vec<bool>& supported) {
+ ASSERT_EQ(ErrorStatus::NONE, status);
+ ASSERT_NE(0ul, supported.size());
+ fullySupportsModel = std::all_of(supported.begin(), supported.end(),
+ [](bool valid) { return valid; });
+ });
+ ASSERT_TRUE(supportedCall.isOk());
+
+ // launch prepare model
+ sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
+ ASSERT_NE(nullptr, preparedModelCallback.get());
+ Return<ErrorStatus> prepareLaunchStatus = device->prepareModel_1_1(
+ model, ExecutionPreference::FAST_SINGLE_ANSWER, preparedModelCallback);
+ ASSERT_TRUE(prepareLaunchStatus.isOk());
+ ASSERT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus));
+
+ // retrieve prepared model
+ preparedModelCallback->wait();
+ ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
+ sp<IPreparedModel> preparedModel = preparedModelCallback->getPreparedModel();
+
+ // early termination if vendor service cannot fully prepare model
+ if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
+ ASSERT_EQ(nullptr, preparedModel.get());
+ LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
+ "prepare model that it does not support.";
+ std::cout << "[ ] Early termination of test because vendor service cannot "
+ "prepare model that it does not support."
+ << std::endl;
+ GTEST_SKIP();
+ }
+ EXPECT_EQ(ErrorStatus::NONE, prepareReturnStatus);
+ ASSERT_NE(nullptr, preparedModel.get());
+
+ EvaluatePreparedModel(preparedModel, is_ignored, examples,
+ model.relaxComputationFloat32toFloat16, 1e-5f, 1e-5f);
+}
+
+} // namespace generated_tests
+} // namespace neuralnetworks
+} // namespace hardware
+} // namespace android
diff --git a/neuralnetworks/1.1/vts/functional/GeneratedTestHarness.h b/neuralnetworks/1.1/vts/functional/GeneratedTestHarness.h
new file mode 100644
index 0000000..ab71b2b
--- /dev/null
+++ b/neuralnetworks/1.1/vts/functional/GeneratedTestHarness.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_NEURALNETWORKS_V1_1_GENERATED_TEST_HARNESS_H
+#define ANDROID_HARDWARE_NEURALNETWORKS_V1_1_GENERATED_TEST_HARNESS_H
+
+#include <android/hardware/neuralnetworks/1.1/IDevice.h>
+#include <android/hardware/neuralnetworks/1.1/types.h>
+#include <functional>
+#include <vector>
+#include "TestHarness.h"
+
+namespace android {
+namespace hardware {
+namespace neuralnetworks {
+namespace generated_tests {
+
+void Execute(const sp<V1_1::IDevice>& device, std::function<V1_1::Model(void)> create_model,
+ std::function<bool(int)> is_ignored,
+ const std::vector<::test_helper::MixedTypedExample>& examples);
+
+} // namespace generated_tests
+} // namespace neuralnetworks
+} // namespace hardware
+} // namespace android
+
+#endif // ANDROID_HARDWARE_NEURALNETWORKS_V1_1_GENERATED_TEST_HARNESS_H
diff --git a/neuralnetworks/1.1/vts/functional/GeneratedTestsV1_0.cpp b/neuralnetworks/1.1/vts/functional/GeneratedTestsV1_0.cpp
index e67ef8e..10cf5a9 100644
--- a/neuralnetworks/1.1/vts/functional/GeneratedTestsV1_0.cpp
+++ b/neuralnetworks/1.1/vts/functional/GeneratedTestsV1_0.cpp
@@ -16,17 +16,16 @@
#define LOG_TAG "neuralnetworks_hidl_hal_test"
-#include "VtsHalNeuralnetworks.h"
-
-#include "Callbacks.h"
-#include "GeneratedTestHarness.h"
-#include "TestHarness.h"
-#include "Utils.h"
-
#include <android-base/logging.h>
#include <android/hidl/memory/1.0/IMemory.h>
#include <hidlmemory/mapping.h>
+#include "1.0/Callbacks.h"
+#include "GeneratedTestHarness.h"
+#include "MemoryUtils.h"
+#include "TestHarness.h"
+#include "VtsHalNeuralnetworks.h"
+
namespace android {
namespace hardware {
namespace neuralnetworks {
@@ -34,8 +33,10 @@
namespace vts {
namespace functional {
-using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
-using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
+using ::android::hardware::neuralnetworks::V1_0::OperandLifeTime;
+using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
+using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
+using ::android::hidl::memory::V1_0::IMemory;
using ::android::nn::allocateSharedMemory;
using ::test_helper::MixedTypedExample;
diff --git a/neuralnetworks/1.1/vts/functional/GeneratedTests.cpp b/neuralnetworks/1.1/vts/functional/GeneratedTestsV1_1.cpp
similarity index 83%
rename from neuralnetworks/1.1/vts/functional/GeneratedTests.cpp
rename to neuralnetworks/1.1/vts/functional/GeneratedTestsV1_1.cpp
index 4db1276..079c18c 100644
--- a/neuralnetworks/1.1/vts/functional/GeneratedTests.cpp
+++ b/neuralnetworks/1.1/vts/functional/GeneratedTestsV1_1.cpp
@@ -16,17 +16,16 @@
#define LOG_TAG "neuralnetworks_hidl_hal_test"
-#include "VtsHalNeuralnetworks.h"
-
-#include "Callbacks.h"
-#include "GeneratedTestHarness.h"
-#include "TestHarness.h"
-#include "Utils.h"
-
#include <android-base/logging.h>
#include <android/hidl/memory/1.0/IMemory.h>
#include <hidlmemory/mapping.h>
+#include "1.0/Callbacks.h"
+#include "GeneratedTestHarness.h"
+#include "MemoryUtils.h"
+#include "TestHarness.h"
+#include "VtsHalNeuralnetworks.h"
+
namespace android {
namespace hardware {
namespace neuralnetworks {
@@ -34,8 +33,10 @@
namespace vts {
namespace functional {
-using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
-using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
+using ::android::hardware::neuralnetworks::V1_0::OperandLifeTime;
+using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
+using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
+using ::android::hidl::memory::V1_0::IMemory;
using ::android::nn::allocateSharedMemory;
using ::test_helper::MixedTypedExample;
diff --git a/neuralnetworks/1.1/vts/functional/ValidateModel.cpp b/neuralnetworks/1.1/vts/functional/ValidateModel.cpp
index b35a901..fb80d13 100644
--- a/neuralnetworks/1.1/vts/functional/ValidateModel.cpp
+++ b/neuralnetworks/1.1/vts/functional/ValidateModel.cpp
@@ -16,25 +16,22 @@
#define LOG_TAG "neuralnetworks_hidl_hal_test"
+#include "1.0/Callbacks.h"
+#include "1.0/Utils.h"
#include "VtsHalNeuralnetworks.h"
-#include "Callbacks.h"
-
namespace android {
namespace hardware {
namespace neuralnetworks {
namespace V1_1 {
-
-using V1_0::IPreparedModel;
-using V1_0::Operand;
-using V1_0::OperandLifeTime;
-using V1_0::OperandType;
-
namespace vts {
namespace functional {
-using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
-using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
+using ::android::hardware::neuralnetworks::V1_0::IPreparedModel;
+using ::android::hardware::neuralnetworks::V1_0::Operand;
+using ::android::hardware::neuralnetworks::V1_0::OperandLifeTime;
+using ::android::hardware::neuralnetworks::V1_0::OperandType;
+using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
///////////////////////// UTILITY FUNCTIONS /////////////////////////
@@ -42,10 +39,10 @@
const V1_1::Model& model) {
SCOPED_TRACE(message + " [getSupportedOperations_1_1]");
- Return<void> ret =
- device->getSupportedOperations_1_1(model, [&](ErrorStatus status, const hidl_vec<bool>&) {
- EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, status);
- });
+ Return<void> ret = device->getSupportedOperations_1_1(
+ model, [&](ErrorStatus status, const hidl_vec<bool>&) {
+ EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, status);
+ });
EXPECT_TRUE(ret.isOk());
}
@@ -56,7 +53,7 @@
sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
ASSERT_NE(nullptr, preparedModelCallback.get());
Return<ErrorStatus> prepareLaunchStatus =
- device->prepareModel_1_1(model, preference, preparedModelCallback);
+ device->prepareModel_1_1(model, preference, preparedModelCallback);
ASSERT_TRUE(prepareLaunchStatus.isOk());
ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(prepareLaunchStatus));
@@ -87,36 +84,16 @@
validatePrepareModel(device, message, model, preference);
}
-// Delete element from hidl_vec. hidl_vec doesn't support a "remove" operation,
-// so this is efficiently accomplished by moving the element to the end and
-// resizing the hidl_vec to one less.
-template <typename Type>
-static void hidl_vec_removeAt(hidl_vec<Type>* vec, uint32_t index) {
- if (vec) {
- std::rotate(vec->begin() + index, vec->begin() + index + 1, vec->end());
- vec->resize(vec->size() - 1);
- }
-}
-
-template <typename Type>
-static uint32_t hidl_vec_push_back(hidl_vec<Type>* vec, const Type& value) {
- // assume vec is valid
- const uint32_t index = vec->size();
- vec->resize(index + 1);
- (*vec)[index] = value;
- return index;
-}
-
static uint32_t addOperand(Model* model) {
return hidl_vec_push_back(&model->operands,
{
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ .type = OperandType::INT32,
+ .dimensions = {},
+ .numberOfConsumers = 0,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
});
}
@@ -130,10 +107,10 @@
///////////////////////// VALIDATE MODEL OPERAND TYPE /////////////////////////
static const int32_t invalidOperandTypes[] = {
- static_cast<int32_t>(OperandType::FLOAT32) - 1, // lower bound fundamental
- static_cast<int32_t>(OperandType::TENSOR_QUANT8_ASYMM) + 1, // upper bound fundamental
- static_cast<int32_t>(OperandType::OEM) - 1, // lower bound OEM
- static_cast<int32_t>(OperandType::TENSOR_OEM_BYTE) + 1, // upper bound OEM
+ static_cast<int32_t>(OperandType::FLOAT32) - 1, // lower bound fundamental
+ static_cast<int32_t>(OperandType::TENSOR_QUANT8_ASYMM) + 1, // upper bound fundamental
+ static_cast<int32_t>(OperandType::OEM) - 1, // lower bound OEM
+ static_cast<int32_t>(OperandType::TENSOR_OEM_BYTE) + 1, // upper bound OEM
};
static void mutateOperandTypeTest(const sp<IDevice>& device, const V1_1::Model& model) {
@@ -226,7 +203,7 @@
static void mutateOperandZeroPointTest(const sp<IDevice>& device, const V1_1::Model& model) {
for (size_t operand = 0; operand < model.operands.size(); ++operand) {
const std::vector<int32_t> invalidZeroPoints =
- getInvalidZeroPoints(model.operands[operand].type);
+ getInvalidZeroPoints(model.operands[operand].type);
for (int32_t invalidZeroPoint : invalidZeroPoints) {
const std::string message = "mutateOperandZeroPointTest: operand " +
std::to_string(operand) + " has zero point of " +
@@ -258,18 +235,18 @@
break;
case OperandType::TENSOR_FLOAT32:
newOperand.dimensions =
- operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1});
+ operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1});
newOperand.scale = 0.0f;
newOperand.zeroPoint = 0;
break;
case OperandType::TENSOR_INT32:
newOperand.dimensions =
- operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1});
+ operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1});
newOperand.zeroPoint = 0;
break;
case OperandType::TENSOR_QUANT8_ASYMM:
newOperand.dimensions =
- operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1});
+ operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1});
newOperand.scale = operand->scale != 0.0f ? operand->scale : 1.0f;
break;
case OperandType::OEM:
@@ -319,10 +296,10 @@
///////////////////////// VALIDATE MODEL OPERATION TYPE /////////////////////////
static const int32_t invalidOperationTypes[] = {
- static_cast<int32_t>(OperationType::ADD) - 1, // lower bound fundamental
- static_cast<int32_t>(OperationType::TRANSPOSE) + 1, // upper bound fundamental
- static_cast<int32_t>(OperationType::OEM_OPERATION) - 1, // lower bound OEM
- static_cast<int32_t>(OperationType::OEM_OPERATION) + 1, // upper bound OEM
+ static_cast<int32_t>(OperationType::ADD) - 1, // lower bound fundamental
+ static_cast<int32_t>(OperationType::TRANSPOSE) + 1, // upper bound fundamental
+ static_cast<int32_t>(OperationType::OEM_OPERATION) - 1, // lower bound OEM
+ static_cast<int32_t>(OperationType::OEM_OPERATION) + 1, // upper bound OEM
};
static void mutateOperationTypeTest(const sp<IDevice>& device, const V1_1::Model& model) {
@@ -333,7 +310,7 @@
std::to_string(invalidOperationType);
validate(device, message, model, [operation, invalidOperationType](Model* model) {
model->operations[operation].type =
- static_cast<OperationType>(invalidOperationType);
+ static_cast<OperationType>(invalidOperationType);
});
}
}
@@ -486,7 +463,7 @@
static void addOperationOutputTest(const sp<IDevice>& device, const V1_1::Model& model) {
for (size_t operation = 0; operation < model.operations.size(); ++operation) {
const std::string message =
- "addOperationOutputTest: operation " + std::to_string(operation);
+ "addOperationOutputTest: operation " + std::to_string(operation);
validate(device, message, model, [operation](Model* model) {
uint32_t index = addOperand(model, OperandLifeTime::MODEL_OUTPUT);
hidl_vec_push_back(&model->operations[operation].outputs, index);
@@ -498,14 +475,14 @@
///////////////////////// VALIDATE EXECUTION PREFERENCE /////////////////////////
static const int32_t invalidExecutionPreferences[] = {
- static_cast<int32_t>(ExecutionPreference::LOW_POWER) - 1, // lower bound
- static_cast<int32_t>(ExecutionPreference::SUSTAINED_SPEED) + 1, // upper bound
+ static_cast<int32_t>(ExecutionPreference::LOW_POWER) - 1, // lower bound
+ static_cast<int32_t>(ExecutionPreference::SUSTAINED_SPEED) + 1, // upper bound
};
static void mutateExecutionPreferenceTest(const sp<IDevice>& device, const V1_1::Model& model) {
for (int32_t preference : invalidExecutionPreferences) {
const std::string message =
- "mutateExecutionPreferenceTest: preference " + std::to_string(preference);
+ "mutateExecutionPreferenceTest: preference " + std::to_string(preference);
validate(device, message, model, [](Model*) {},
static_cast<ExecutionPreference>(preference));
}
diff --git a/neuralnetworks/1.1/vts/functional/ValidateRequest.cpp b/neuralnetworks/1.1/vts/functional/ValidateRequest.cpp
index f4adbab..c549728 100644
--- a/neuralnetworks/1.1/vts/functional/ValidateRequest.cpp
+++ b/neuralnetworks/1.1/vts/functional/ValidateRequest.cpp
@@ -16,16 +16,16 @@
#define LOG_TAG "neuralnetworks_hidl_hal_test"
-#include "VtsHalNeuralnetworks.h"
-
-#include "Callbacks.h"
-#include "TestHarness.h"
-#include "Utils.h"
-
#include <android-base/logging.h>
#include <android/hidl/memory/1.0/IMemory.h>
#include <hidlmemory/mapping.h>
+#include "1.0/Callbacks.h"
+#include "1.0/Utils.h"
+#include "MemoryUtils.h"
+#include "TestHarness.h"
+#include "VtsHalNeuralnetworks.h"
+
namespace android {
namespace hardware {
namespace neuralnetworks {
@@ -33,11 +33,15 @@
namespace vts {
namespace functional {
-using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
+using ::android::hardware::neuralnetworks::V1_0::ErrorStatus;
+using ::android::hardware::neuralnetworks::V1_0::Request;
+using ::android::hardware::neuralnetworks::V1_0::RequestArgument;
+using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
+using ::android::hardware::neuralnetworks::V1_1::IPreparedModel;
using ::android::hidl::memory::V1_0::IMemory;
-using test_helper::for_all;
-using test_helper::MixedTyped;
-using test_helper::MixedTypedExample;
+using ::test_helper::for_all;
+using ::test_helper::MixedTyped;
+using ::test_helper::MixedTypedExample;
///////////////////////// UTILITY FUNCTIONS /////////////////////////
@@ -61,26 +65,6 @@
ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, executionReturnStatus);
}
-// Delete element from hidl_vec. hidl_vec doesn't support a "remove" operation,
-// so this is efficiently accomplished by moving the element to the end and
-// resizing the hidl_vec to one less.
-template <typename Type>
-static void hidl_vec_removeAt(hidl_vec<Type>* vec, uint32_t index) {
- if (vec) {
- std::rotate(vec->begin() + index, vec->begin() + index + 1, vec->end());
- vec->resize(vec->size() - 1);
- }
-}
-
-template <typename Type>
-static uint32_t hidl_vec_push_back(hidl_vec<Type>* vec, const Type& value) {
- // assume vec is valid
- const uint32_t index = vec->size();
- vec->resize(index + 1);
- (*vec)[index] = value;
- return index;
-}
-
///////////////////////// REMOVE INPUT ////////////////////////////////////
static void removeInputTest(const sp<IPreparedModel>& preparedModel, const Request& request) {
@@ -121,11 +105,13 @@
for_all(inputs, [&inputs_info, &inputSize](int index, auto, auto s) {
if (inputs_info.size() <= static_cast<size_t>(index)) inputs_info.resize(index + 1);
RequestArgument arg = {
- .location = {.poolIndex = INPUT, .offset = 0, .length = static_cast<uint32_t>(s)},
- .dimensions = {},
+ .location = {.poolIndex = INPUT,
+ .offset = 0,
+ .length = static_cast<uint32_t>(s)},
+ .dimensions = {},
};
RequestArgument arg_empty = {
- .hasNoValue = true,
+ .hasNoValue = true,
};
inputs_info[index] = s ? arg : arg_empty;
inputSize += s;
@@ -143,8 +129,10 @@
for_all(outputs, [&outputs_info, &outputSize](int index, auto, auto s) {
if (outputs_info.size() <= static_cast<size_t>(index)) outputs_info.resize(index + 1);
RequestArgument arg = {
- .location = {.poolIndex = OUTPUT, .offset = 0, .length = static_cast<uint32_t>(s)},
- .dimensions = {},
+ .location = {.poolIndex = OUTPUT,
+ .offset = 0,
+ .length = static_cast<uint32_t>(s)},
+ .dimensions = {},
};
outputs_info[index] = arg;
outputSize += s;
diff --git a/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.cpp b/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.cpp
index 08069f2..12bdd3f 100644
--- a/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.cpp
+++ b/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.cpp
@@ -20,7 +20,7 @@
#include <android-base/logging.h>
-#include "Callbacks.h"
+#include "1.0/Callbacks.h"
namespace android {
namespace hardware {
@@ -29,7 +29,7 @@
namespace vts {
namespace functional {
-using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
+using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
static void createPreparedModel(const sp<IDevice>& device, const V1_1::Model& model,
sp<IPreparedModel>* preparedModel) {
diff --git a/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.h b/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.h
index f3f587b..3156784 100644
--- a/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.h
+++ b/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef VTS_HAL_NEURALNETWORKS_V1_1_H
-#define VTS_HAL_NEURALNETWORKS_V1_1_H
+#ifndef ANDROID_HARDWARE_NEURALNETWORKS_V1_1_VTS_HAL_NEURALNETWORKS_H
+#define ANDROID_HARDWARE_NEURALNETWORKS_V1_1_VTS_HAL_NEURALNETWORKS_H
#include <android/hardware/neuralnetworks/1.0/types.h>
#include <android/hardware/neuralnetworks/1.1/IDevice.h>
@@ -98,4 +98,4 @@
} // namespace android::hardware::neuralnetworks::V1_0
-#endif // VTS_HAL_NEURALNETWORKS_V1_1_H
+#endif // ANDROID_HARDWARE_NEURALNETWORKS_V1_1_VTS_HAL_NEURALNETWORKS_H
diff --git a/neuralnetworks/1.2/vts/functional/Android.bp b/neuralnetworks/1.2/vts/functional/Android.bp
index 6c26820..b48646f 100644
--- a/neuralnetworks/1.2/vts/functional/Android.bp
+++ b/neuralnetworks/1.2/vts/functional/Android.bp
@@ -14,10 +14,44 @@
// limitations under the License.
//
+cc_defaults {
+ name: "VtsHalNeuralNetworksV1_2TargetTestDefaults",
+ defaults: ["VtsHalTargetTestDefaults"],
+ srcs: [
+ "ValidateModel.cpp",
+ "ValidateRequest.cpp",
+ "VtsHalNeuralnetworks.cpp",
+ "Callbacks.cpp",
+ "GeneratedTestHarness.cpp",
+ ],
+ local_include_dirs: ["include"],
+ shared_libs: [
+ "libfmq",
+ "libnativewindow",
+ ],
+ static_libs: [
+ "android.hardware.neuralnetworks@1.0",
+ "android.hardware.neuralnetworks@1.1",
+ "android.hardware.neuralnetworks@1.2",
+ "android.hidl.allocator@1.0",
+ "android.hidl.memory@1.0",
+ "libgmock",
+ "libhidlmemory",
+ "libneuralnetworks_utils",
+ "VtsHalNeuralNetworksV1_0_utils",
+ ],
+ header_libs: [
+ "libneuralnetworks_headers",
+ "libneuralnetworks_generated_test_harness_headers",
+ "libneuralnetworks_generated_tests",
+ ],
+ test_suites: ["general-tests"],
+}
+
// Tests for V1_0 models using the V1_2 HAL.
cc_test {
name: "VtsHalNeuralnetworksV1_2CompatV1_0TargetTest",
- defaults: ["VtsHalNeuralNetworksTargetTestDefaults"],
+ defaults: ["VtsHalNeuralNetworksV1_2TargetTestDefaults"],
srcs: [
"GeneratedTestsV1_0.cpp",
"ValidateBurst.cpp",
@@ -30,7 +64,7 @@
// Tests for V1_1 models using the V1_2 HAL.
cc_test {
name: "VtsHalNeuralnetworksV1_2CompatV1_1TargetTest",
- defaults: ["VtsHalNeuralNetworksTargetTestDefaults"],
+ defaults: ["VtsHalNeuralNetworksV1_2TargetTestDefaults"],
srcs: [
"GeneratedTestsV1_1.cpp",
"ValidateBurst.cpp",
@@ -43,11 +77,11 @@
// Tests for V1_2 models.
cc_test {
name: "VtsHalNeuralnetworksV1_2TargetTest",
- defaults: ["VtsHalNeuralNetworksTargetTestDefaults"],
+ defaults: ["VtsHalNeuralNetworksV1_2TargetTestDefaults"],
srcs: [
"BasicTests.cpp",
"CompilationCachingTests.cpp",
- "GeneratedTests.cpp",
+ "GeneratedTestsV1_2.cpp",
"ValidateBurst.cpp",
],
cflags: [
@@ -57,10 +91,10 @@
cc_test {
name: "PresubmitHalNeuralnetworksV1_2TargetTest",
- defaults: ["VtsHalNeuralNetworksTargetTestDefaults"],
+ defaults: ["VtsHalNeuralNetworksV1_2TargetTestDefaults"],
srcs: [
"BasicTests.cpp",
- "GeneratedTests.cpp",
+ "GeneratedTestsV1_2.cpp",
"ValidateBurst.cpp",
],
cflags: [
diff --git a/neuralnetworks/1.2/vts/functional/Callbacks.cpp b/neuralnetworks/1.2/vts/functional/Callbacks.cpp
new file mode 100644
index 0000000..cfaf91d
--- /dev/null
+++ b/neuralnetworks/1.2/vts/functional/Callbacks.cpp
@@ -0,0 +1,173 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "1.2/Callbacks.h"
+#include <android-base/logging.h>
+
+namespace android {
+namespace hardware {
+namespace neuralnetworks {
+namespace V1_2 {
+namespace implementation {
+
+CallbackBase::CallbackBase() : mNotified(false) {}
+
+CallbackBase::~CallbackBase() {
+ // Note that we cannot call CallbackBase::join_thread from here:
+ // CallbackBase is intended to be reference counted, and it is possible that
+ // the reference count drops to zero in the bound thread, causing the
+ // bound thread to call this destructor. If a thread tries to join
+ // itself, it throws an exception, producing a message like the
+ // following:
+ //
+ // terminating with uncaught exception of type std::__1::system_error:
+ // thread::join failed: Resource deadlock would occur
+}
+
+void CallbackBase::wait() {
+ std::unique_lock<std::mutex> lock(mMutex);
+ mCondition.wait(lock, [this] { return mNotified; });
+ join_thread_locked();
+}
+
+bool CallbackBase::on_finish(std::function<bool(void)> post_work) {
+ std::lock_guard<std::mutex> lock(mMutex);
+ if (mPostWork != nullptr) {
+ LOG(ERROR) << "CallbackBase::on_finish -- a post-work function has already been bound to "
+ "this callback object";
+ return false;
+ }
+ if (post_work == nullptr) {
+ LOG(ERROR) << "CallbackBase::on_finish -- the new post-work function is invalid";
+ return false;
+ }
+ mPostWork = std::move(post_work);
+ return true;
+}
+
+bool CallbackBase::bind_thread(std::thread&& asyncThread) {
+ std::lock_guard<std::mutex> lock(mMutex);
+ if (mThread.joinable()) {
+ LOG(ERROR) << "CallbackBase::bind_thread -- a thread has already been bound to this "
+ "callback object";
+ return false;
+ }
+ if (!asyncThread.joinable()) {
+ LOG(ERROR) << "CallbackBase::bind_thread -- the new thread is not joinable";
+ return false;
+ }
+ mThread = std::move(asyncThread);
+ return true;
+}
+
+void CallbackBase::join_thread() {
+ std::lock_guard<std::mutex> lock(mMutex);
+ join_thread_locked();
+}
+
+void CallbackBase::notify() {
+ {
+ std::lock_guard<std::mutex> lock(mMutex);
+ mNotified = true;
+ if (mPostWork != nullptr) {
+ bool success = mPostWork();
+ if (!success) {
+ LOG(ERROR) << "CallbackBase::notify -- post work failed";
+ }
+ }
+ }
+ mCondition.notify_all();
+}
+
+void CallbackBase::join_thread_locked() {
+ if (mThread.joinable()) {
+ mThread.join();
+ }
+}
+
+PreparedModelCallback::PreparedModelCallback()
+ : mErrorStatus(ErrorStatus::GENERAL_FAILURE), mPreparedModel(nullptr) {}
+
+PreparedModelCallback::~PreparedModelCallback() {}
+
+Return<void> PreparedModelCallback::notify(ErrorStatus errorStatus,
+ const sp<V1_0::IPreparedModel>& preparedModel) {
+ mErrorStatus = errorStatus;
+ mPreparedModel = preparedModel;
+ CallbackBase::notify();
+ return Void();
+}
+
+Return<void> PreparedModelCallback::notify_1_2(ErrorStatus errorStatus,
+ const sp<V1_2::IPreparedModel>& preparedModel) {
+ mErrorStatus = errorStatus;
+ mPreparedModel = preparedModel;
+ CallbackBase::notify();
+ return Void();
+}
+
+ErrorStatus PreparedModelCallback::getStatus() {
+ wait();
+ return mErrorStatus;
+}
+
+sp<V1_0::IPreparedModel> PreparedModelCallback::getPreparedModel() {
+ wait();
+ return mPreparedModel;
+}
+
+ExecutionCallback::ExecutionCallback() : mErrorStatus(ErrorStatus::GENERAL_FAILURE) {}
+
+ExecutionCallback::~ExecutionCallback() {}
+
+Return<void> ExecutionCallback::notify(ErrorStatus errorStatus) {
+ mErrorStatus = errorStatus;
+ mOutputShapes = {};
+ mTiming = {.timeOnDevice = UINT64_MAX, .timeInDriver = UINT64_MAX};
+ CallbackBase::notify();
+ return Void();
+}
+
+Return<void> ExecutionCallback::notify_1_2(ErrorStatus errorStatus,
+ const hidl_vec<OutputShape>& outputShapes,
+ const Timing& timing) {
+ mErrorStatus = errorStatus;
+ mOutputShapes = outputShapes;
+ mTiming = timing;
+ CallbackBase::notify();
+ return Void();
+}
+
+ErrorStatus ExecutionCallback::getStatus() {
+ wait();
+ return mErrorStatus;
+}
+
+const std::vector<OutputShape>& ExecutionCallback::getOutputShapes() {
+ wait();
+ return mOutputShapes;
+}
+
+Timing ExecutionCallback::getTiming() {
+ wait();
+ return mTiming;
+}
+
+} // namespace implementation
+} // namespace V1_2
+} // namespace neuralnetworks
+} // namespace hardware
+} // namespace android
diff --git a/neuralnetworks/1.2/vts/functional/CompilationCachingTests.cpp b/neuralnetworks/1.2/vts/functional/CompilationCachingTests.cpp
index 4411b90..9cabb7b 100644
--- a/neuralnetworks/1.2/vts/functional/CompilationCachingTests.cpp
+++ b/neuralnetworks/1.2/vts/functional/CompilationCachingTests.cpp
@@ -27,8 +27,9 @@
#include <cstdlib>
#include <random>
-#include "Callbacks.h"
+#include "1.2/Callbacks.h"
#include "GeneratedTestHarness.h"
+#include "MemoryUtils.h"
#include "TestHarness.h"
#include "Utils.h"
#include "VtsHalNeuralnetworks.h"
diff --git a/neuralnetworks/1.2/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.2/vts/functional/GeneratedTestHarness.cpp
new file mode 100644
index 0000000..c3578cd
--- /dev/null
+++ b/neuralnetworks/1.2/vts/functional/GeneratedTestHarness.cpp
@@ -0,0 +1,452 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "GeneratedTestHarness.h"
+
+#include <android-base/logging.h>
+#include <android/hardware/neuralnetworks/1.0/IDevice.h>
+#include <android/hardware/neuralnetworks/1.0/IExecutionCallback.h>
+#include <android/hardware/neuralnetworks/1.0/IPreparedModel.h>
+#include <android/hardware/neuralnetworks/1.0/IPreparedModelCallback.h>
+#include <android/hardware/neuralnetworks/1.0/types.h>
+#include <android/hardware/neuralnetworks/1.1/IDevice.h>
+#include <android/hardware/neuralnetworks/1.2/IDevice.h>
+#include <android/hardware/neuralnetworks/1.2/IExecutionCallback.h>
+#include <android/hardware/neuralnetworks/1.2/IPreparedModel.h>
+#include <android/hardware/neuralnetworks/1.2/IPreparedModelCallback.h>
+#include <android/hidl/allocator/1.0/IAllocator.h>
+#include <android/hidl/memory/1.0/IMemory.h>
+#include <hidlmemory/mapping.h>
+
+#include <iostream>
+
+#include "1.0/Utils.h"
+#include "1.2/Callbacks.h"
+#include "ExecutionBurstController.h"
+#include "MemoryUtils.h"
+#include "TestHarness.h"
+#include "Utils.h"
+
+namespace android {
+namespace hardware {
+namespace neuralnetworks {
+namespace generated_tests {
+
+using ::android::hardware::neuralnetworks::V1_0::ErrorStatus;
+using ::android::hardware::neuralnetworks::V1_0::Request;
+using ::android::hardware::neuralnetworks::V1_0::RequestArgument;
+using ::android::hardware::neuralnetworks::V1_1::ExecutionPreference;
+using ::android::hardware::neuralnetworks::V1_2::IDevice;
+using ::android::hardware::neuralnetworks::V1_2::IPreparedModel;
+using ::android::hardware::neuralnetworks::V1_2::Model;
+using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
+using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
+using ::android::hidl::memory::V1_0::IMemory;
+using ::test_helper::compare;
+using ::test_helper::expectMultinomialDistributionWithinTolerance;
+using ::test_helper::filter;
+using ::test_helper::for_all;
+using ::test_helper::for_each;
+using ::test_helper::MixedTyped;
+using ::test_helper::MixedTypedExample;
+using ::test_helper::resize_accordingly;
+using HidlToken = hidl_array<uint8_t, static_cast<uint32_t>(Constant::BYTE_SIZE_OF_CACHE_TOKEN)>;
+
+static bool isZeroSized(const MixedTyped& example, uint32_t index) {
+ for (auto i : example.operandDimensions.at(index)) {
+ if (i == 0) return true;
+ }
+ return false;
+}
+
+static Return<ErrorStatus> ExecutePreparedModel(sp<IPreparedModel>& preparedModel,
+ const Request& request, MeasureTiming measure,
+ sp<ExecutionCallback>& callback) {
+ return preparedModel->execute_1_2(request, measure, callback);
+}
+static Return<ErrorStatus> ExecutePreparedModel(sp<IPreparedModel>& preparedModel,
+ const Request& request, MeasureTiming measure,
+ hidl_vec<OutputShape>* outputShapes,
+ Timing* timing) {
+ ErrorStatus result;
+ Return<void> ret = preparedModel->executeSynchronously(
+ request, measure,
+ [&result, outputShapes, timing](ErrorStatus error, const hidl_vec<OutputShape>& shapes,
+ const Timing& time) {
+ result = error;
+ *outputShapes = shapes;
+ *timing = time;
+ });
+ if (!ret.isOk()) {
+ return ErrorStatus::GENERAL_FAILURE;
+ }
+ return result;
+}
+static std::shared_ptr<::android::nn::ExecutionBurstController> CreateBurst(
+ const sp<IPreparedModel>& preparedModel) {
+ return ::android::nn::ExecutionBurstController::create(preparedModel, /*blocking=*/true);
+}
+enum class Executor { ASYNC, SYNC, BURST };
+enum class OutputType { FULLY_SPECIFIED, UNSPECIFIED, INSUFFICIENT };
+const float kDefaultAtol = 1e-5f;
+const float kDefaultRtol = 1e-5f;
+void EvaluatePreparedModel(sp<IPreparedModel>& preparedModel, std::function<bool(int)> is_ignored,
+ const std::vector<MixedTypedExample>& examples,
+ bool hasRelaxedFloat32Model, float fpAtol, float fpRtol,
+ Executor executor, MeasureTiming measure, OutputType outputType) {
+ const uint32_t INPUT = 0;
+ const uint32_t OUTPUT = 1;
+
+ int example_no = 1;
+ for (auto& example : examples) {
+ SCOPED_TRACE(example_no++);
+ const MixedTyped& inputs = example.operands.first;
+ const MixedTyped& golden = example.operands.second;
+
+ const bool hasFloat16Inputs = !inputs.float16Operands.empty();
+ if (hasRelaxedFloat32Model || hasFloat16Inputs) {
+ // TODO: Adjust the error limit based on testing.
+ // If in relaxed mode, set the absolute tolerance to be 5ULP of FP16.
+ fpAtol = 5.0f * 0.0009765625f;
+ // Set the relative tolerance to be 5ULP of the corresponding FP precision.
+ fpRtol = 5.0f * 0.0009765625f;
+ }
+
+ std::vector<RequestArgument> inputs_info, outputs_info;
+ uint32_t inputSize = 0, outputSize = 0;
+ // This function only partially specifies the metadata (vector of RequestArguments).
+ // The contents are copied over below.
+ for_all(inputs, [&inputs_info, &inputSize](int index, auto, auto s) {
+ if (inputs_info.size() <= static_cast<size_t>(index)) inputs_info.resize(index + 1);
+ RequestArgument arg = {
+ .location = {.poolIndex = INPUT,
+ .offset = 0,
+ .length = static_cast<uint32_t>(s)},
+ .dimensions = {},
+ };
+ RequestArgument arg_empty = {
+ .hasNoValue = true,
+ };
+ inputs_info[index] = s ? arg : arg_empty;
+ inputSize += s;
+ });
+ // Compute offset for inputs 1 and so on
+ {
+ size_t offset = 0;
+ for (auto& i : inputs_info) {
+ if (!i.hasNoValue) i.location.offset = offset;
+ offset += i.location.length;
+ }
+ }
+
+ MixedTyped test; // holding test results
+
+ // Go through all outputs, initialize RequestArgument descriptors
+ resize_accordingly(golden, test);
+ bool sizeLargerThanOne = true;
+ for_all(golden, [&golden, &outputs_info, &outputSize, &outputType, &sizeLargerThanOne](
+ int index, auto, auto s) {
+ if (outputs_info.size() <= static_cast<size_t>(index)) outputs_info.resize(index + 1);
+ if (index == 0) {
+ // On OutputType::INSUFFICIENT, set the output operand with index 0 with
+ // buffer size one byte less than needed.
+ if (outputType == OutputType::INSUFFICIENT) {
+ if (s > 1 && !isZeroSized(golden, index)) {
+ s -= 1;
+ } else {
+ sizeLargerThanOne = false;
+ }
+ }
+ }
+ RequestArgument arg = {
+ .location = {.poolIndex = OUTPUT,
+ .offset = 0,
+ .length = static_cast<uint32_t>(s)},
+ .dimensions = {},
+ };
+ outputs_info[index] = arg;
+ outputSize += s;
+ });
+ // If output0 does not have size larger than one byte,
+ // we can not provide an insufficient buffer
+ if (!sizeLargerThanOne && outputType == OutputType::INSUFFICIENT) return;
+ // Compute offset for outputs 1 and so on
+ {
+ size_t offset = 0;
+ for (auto& i : outputs_info) {
+ i.location.offset = offset;
+ offset += i.location.length;
+ }
+ }
+ std::vector<hidl_memory> pools = {nn::allocateSharedMemory(inputSize),
+ nn::allocateSharedMemory(outputSize)};
+ ASSERT_NE(0ull, pools[INPUT].size());
+ ASSERT_NE(0ull, pools[OUTPUT].size());
+
+ // load data
+ sp<IMemory> inputMemory = mapMemory(pools[INPUT]);
+ sp<IMemory> outputMemory = mapMemory(pools[OUTPUT]);
+ ASSERT_NE(nullptr, inputMemory.get());
+ ASSERT_NE(nullptr, outputMemory.get());
+ char* inputPtr = reinterpret_cast<char*>(static_cast<void*>(inputMemory->getPointer()));
+ char* outputPtr = reinterpret_cast<char*>(static_cast<void*>(outputMemory->getPointer()));
+ ASSERT_NE(nullptr, inputPtr);
+ ASSERT_NE(nullptr, outputPtr);
+ inputMemory->update();
+ outputMemory->update();
+
+ // Go through all inputs, copy the values
+ for_all(inputs, [&inputs_info, inputPtr](int index, auto p, auto s) {
+ char* begin = (char*)p;
+ char* end = begin + s;
+ // TODO: handle more than one input
+ std::copy(begin, end, inputPtr + inputs_info[index].location.offset);
+ });
+
+ inputMemory->commit();
+ outputMemory->commit();
+
+ const Request request = {.inputs = inputs_info, .outputs = outputs_info, .pools = pools};
+
+ ErrorStatus executionStatus;
+ hidl_vec<OutputShape> outputShapes;
+ Timing timing;
+ switch (executor) {
+ case Executor::ASYNC: {
+ SCOPED_TRACE("asynchronous");
+
+ // launch execution
+ sp<ExecutionCallback> executionCallback = new ExecutionCallback();
+ ASSERT_NE(nullptr, executionCallback.get());
+ Return<ErrorStatus> executionLaunchStatus =
+ ExecutePreparedModel(preparedModel, request, measure, executionCallback);
+ ASSERT_TRUE(executionLaunchStatus.isOk());
+ EXPECT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(executionLaunchStatus));
+
+ // retrieve execution status
+ executionCallback->wait();
+ executionStatus = executionCallback->getStatus();
+ outputShapes = executionCallback->getOutputShapes();
+ timing = executionCallback->getTiming();
+
+ break;
+ }
+ case Executor::SYNC: {
+ SCOPED_TRACE("synchronous");
+
+ // execute
+ Return<ErrorStatus> executionReturnStatus = ExecutePreparedModel(
+ preparedModel, request, measure, &outputShapes, &timing);
+ ASSERT_TRUE(executionReturnStatus.isOk());
+ executionStatus = static_cast<ErrorStatus>(executionReturnStatus);
+
+ break;
+ }
+ case Executor::BURST: {
+ SCOPED_TRACE("burst");
+
+ // create burst
+ const std::shared_ptr<::android::nn::ExecutionBurstController> controller =
+ CreateBurst(preparedModel);
+ ASSERT_NE(nullptr, controller.get());
+
+ // create memory keys
+ std::vector<intptr_t> keys(request.pools.size());
+ for (size_t i = 0; i < keys.size(); ++i) {
+ keys[i] = reinterpret_cast<intptr_t>(&request.pools[i]);
+ }
+
+ // execute burst
+ std::tie(executionStatus, outputShapes, timing) =
+ controller->compute(request, measure, keys);
+
+ break;
+ }
+ }
+
+ if (outputType != OutputType::FULLY_SPECIFIED &&
+ executionStatus == ErrorStatus::GENERAL_FAILURE) {
+ LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
+ "execute model that it does not support.";
+ std::cout << "[ ] Early termination of test because vendor service cannot "
+ "execute model that it does not support."
+ << std::endl;
+ GTEST_SKIP();
+ }
+ if (measure == MeasureTiming::NO) {
+ EXPECT_EQ(UINT64_MAX, timing.timeOnDevice);
+ EXPECT_EQ(UINT64_MAX, timing.timeInDriver);
+ } else {
+ if (timing.timeOnDevice != UINT64_MAX && timing.timeInDriver != UINT64_MAX) {
+ EXPECT_LE(timing.timeOnDevice, timing.timeInDriver);
+ }
+ }
+
+ switch (outputType) {
+ case OutputType::FULLY_SPECIFIED:
+ // If the model output operands are fully specified, outputShapes must be either
+ // either empty, or have the same number of elements as the number of outputs.
+ ASSERT_EQ(ErrorStatus::NONE, executionStatus);
+ ASSERT_TRUE(outputShapes.size() == 0 ||
+ outputShapes.size() == test.operandDimensions.size());
+ break;
+ case OutputType::UNSPECIFIED:
+ // If the model output operands are not fully specified, outputShapes must have
+ // the same number of elements as the number of outputs.
+ ASSERT_EQ(ErrorStatus::NONE, executionStatus);
+ ASSERT_EQ(outputShapes.size(), test.operandDimensions.size());
+ break;
+ case OutputType::INSUFFICIENT:
+ ASSERT_EQ(ErrorStatus::OUTPUT_INSUFFICIENT_SIZE, executionStatus);
+ ASSERT_EQ(outputShapes.size(), test.operandDimensions.size());
+ ASSERT_FALSE(outputShapes[0].isSufficient);
+ return;
+ }
+ // Go through all outputs, overwrite output dimensions with returned output shapes
+ if (outputShapes.size() > 0) {
+ for_each<uint32_t>(test.operandDimensions,
+ [&outputShapes](int idx, std::vector<uint32_t>& dim) {
+ dim = outputShapes[idx].dimensions;
+ });
+ }
+
+ // validate results
+ outputMemory->read();
+ copy_back(&test, outputs_info, outputPtr);
+ outputMemory->commit();
+ // Filter out don't cares
+ MixedTyped filtered_golden = filter(golden, is_ignored);
+ MixedTyped filtered_test = filter(test, is_ignored);
+
+ // We want "close-enough" results for float
+ compare(filtered_golden, filtered_test, fpAtol, fpRtol);
+
+ if (example.expectedMultinomialDistributionTolerance > 0) {
+ expectMultinomialDistributionWithinTolerance(test, example);
+ }
+ }
+}
+void EvaluatePreparedModel(sp<IPreparedModel>& preparedModel, std::function<bool(int)> is_ignored,
+ const std::vector<MixedTypedExample>& examples,
+ bool hasRelaxedFloat32Model, Executor executor, MeasureTiming measure,
+ OutputType outputType) {
+ EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model, kDefaultAtol,
+ kDefaultRtol, executor, measure, outputType);
+}
+
+void EvaluatePreparedModel(sp<IPreparedModel>& preparedModel, std::function<bool(int)> is_ignored,
+ const std::vector<MixedTypedExample>& examples,
+ bool hasRelaxedFloat32Model, bool testDynamicOutputShape) {
+ if (testDynamicOutputShape) {
+ EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
+ Executor::ASYNC, MeasureTiming::NO, OutputType::UNSPECIFIED);
+ EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
+ Executor::SYNC, MeasureTiming::NO, OutputType::UNSPECIFIED);
+ EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
+ Executor::BURST, MeasureTiming::NO, OutputType::UNSPECIFIED);
+ EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
+ Executor::ASYNC, MeasureTiming::YES, OutputType::UNSPECIFIED);
+ EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
+ Executor::SYNC, MeasureTiming::YES, OutputType::UNSPECIFIED);
+ EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
+ Executor::BURST, MeasureTiming::YES, OutputType::UNSPECIFIED);
+ EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
+ Executor::ASYNC, MeasureTiming::NO, OutputType::INSUFFICIENT);
+ EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
+ Executor::SYNC, MeasureTiming::NO, OutputType::INSUFFICIENT);
+ EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
+ Executor::BURST, MeasureTiming::NO, OutputType::INSUFFICIENT);
+ EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
+ Executor::ASYNC, MeasureTiming::YES, OutputType::INSUFFICIENT);
+ EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
+ Executor::SYNC, MeasureTiming::YES, OutputType::INSUFFICIENT);
+ EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
+ Executor::BURST, MeasureTiming::YES, OutputType::INSUFFICIENT);
+ } else {
+ EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
+ Executor::ASYNC, MeasureTiming::NO, OutputType::FULLY_SPECIFIED);
+ EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
+ Executor::SYNC, MeasureTiming::NO, OutputType::FULLY_SPECIFIED);
+ EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
+ Executor::BURST, MeasureTiming::NO, OutputType::FULLY_SPECIFIED);
+ EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
+ Executor::ASYNC, MeasureTiming::YES, OutputType::FULLY_SPECIFIED);
+ EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
+ Executor::SYNC, MeasureTiming::YES, OutputType::FULLY_SPECIFIED);
+ EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
+ Executor::BURST, MeasureTiming::YES, OutputType::FULLY_SPECIFIED);
+ }
+}
+
+void PrepareModel(const sp<IDevice>& device, const Model& model,
+ sp<IPreparedModel>* preparedModel) {
+ // see if service can handle model
+ bool fullySupportsModel = false;
+ Return<void> supportedCall = device->getSupportedOperations_1_2(
+ model, [&fullySupportsModel](ErrorStatus status, const hidl_vec<bool>& supported) {
+ ASSERT_EQ(ErrorStatus::NONE, status);
+ ASSERT_NE(0ul, supported.size());
+ fullySupportsModel = std::all_of(supported.begin(), supported.end(),
+ [](bool valid) { return valid; });
+ });
+ ASSERT_TRUE(supportedCall.isOk());
+
+ // launch prepare model
+ sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
+ ASSERT_NE(nullptr, preparedModelCallback.get());
+ Return<ErrorStatus> prepareLaunchStatus = device->prepareModel_1_2(
+ model, ExecutionPreference::FAST_SINGLE_ANSWER, hidl_vec<hidl_handle>(),
+ hidl_vec<hidl_handle>(), HidlToken(), preparedModelCallback);
+ ASSERT_TRUE(prepareLaunchStatus.isOk());
+ ASSERT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus));
+
+ // retrieve prepared model
+ preparedModelCallback->wait();
+ ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
+ sp<V1_0::IPreparedModel> preparedModelV1_0 = preparedModelCallback->getPreparedModel();
+ *preparedModel = IPreparedModel::castFrom(preparedModelV1_0).withDefault(nullptr);
+
+ // early termination if vendor service cannot fully prepare model
+ if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
+ ASSERT_EQ(nullptr, preparedModel->get());
+ LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
+ "prepare model that it does not support.";
+ std::cout << "[ ] Early termination of test because vendor service cannot "
+ "prepare model that it does not support."
+ << std::endl;
+ return;
+ }
+ EXPECT_EQ(ErrorStatus::NONE, prepareReturnStatus);
+ ASSERT_NE(nullptr, preparedModel->get());
+}
+
+void Execute(const sp<IDevice>& device, std::function<Model(void)> create_model,
+ std::function<bool(int)> is_ignored, const std::vector<MixedTypedExample>& examples,
+ bool testDynamicOutputShape) {
+ Model model = create_model();
+ sp<IPreparedModel> preparedModel = nullptr;
+ PrepareModel(device, model, &preparedModel);
+ if (preparedModel == nullptr) {
+ GTEST_SKIP();
+ }
+ EvaluatePreparedModel(preparedModel, is_ignored, examples,
+ model.relaxComputationFloat32toFloat16, testDynamicOutputShape);
+}
+
+} // namespace generated_tests
+} // namespace neuralnetworks
+} // namespace hardware
+} // namespace android
diff --git a/neuralnetworks/1.2/vts/functional/GeneratedTestHarness.h b/neuralnetworks/1.2/vts/functional/GeneratedTestHarness.h
new file mode 100644
index 0000000..30e5578
--- /dev/null
+++ b/neuralnetworks/1.2/vts/functional/GeneratedTestHarness.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_NEURALNETWORKS_V1_2_GENERATED_TEST_HARNESS_H
+#define ANDROID_HARDWARE_NEURALNETWORKS_V1_2_GENERATED_TEST_HARNESS_H
+
+#include <android/hardware/neuralnetworks/1.2/IDevice.h>
+#include <android/hardware/neuralnetworks/1.2/IPreparedModel.h>
+#include <android/hardware/neuralnetworks/1.2/types.h>
+#include <functional>
+#include <vector>
+#include "TestHarness.h"
+
+namespace android {
+namespace hardware {
+namespace neuralnetworks {
+namespace generated_tests {
+
+using ::test_helper::MixedTypedExample;
+
+void PrepareModel(const sp<V1_2::IDevice>& device, const V1_2::Model& model,
+ sp<V1_2::IPreparedModel>* preparedModel);
+
+void EvaluatePreparedModel(sp<V1_2::IPreparedModel>& preparedModel,
+ std::function<bool(int)> is_ignored,
+ const std::vector<MixedTypedExample>& examples,
+ bool hasRelaxedFloat32Model, bool testDynamicOutputShape);
+
+void Execute(const sp<V1_2::IDevice>& device, std::function<V1_2::Model(void)> create_model,
+ std::function<bool(int)> is_ignored, const std::vector<MixedTypedExample>& examples,
+ bool testDynamicOutputShape = false);
+
+} // namespace generated_tests
+} // namespace neuralnetworks
+} // namespace hardware
+} // namespace android
+
+#endif // ANDROID_HARDWARE_NEURALNETWORKS_V1_2_GENERATED_TEST_HARNESS_H
diff --git a/neuralnetworks/1.2/vts/functional/GeneratedTestsV1_0.cpp b/neuralnetworks/1.2/vts/functional/GeneratedTestsV1_0.cpp
index 990cab9..d48c73e 100644
--- a/neuralnetworks/1.2/vts/functional/GeneratedTestsV1_0.cpp
+++ b/neuralnetworks/1.2/vts/functional/GeneratedTestsV1_0.cpp
@@ -16,17 +16,17 @@
#define LOG_TAG "neuralnetworks_hidl_hal_test"
-#include "VtsHalNeuralnetworks.h"
-
-#include "Callbacks.h"
-#include "GeneratedTestHarness.h"
-#include "TestHarness.h"
-#include "Utils.h"
-
#include <android-base/logging.h>
#include <android/hidl/memory/1.0/IMemory.h>
#include <hidlmemory/mapping.h>
+#include "1.2/Callbacks.h"
+#include "GeneratedTestHarness.h"
+#include "MemoryUtils.h"
+#include "TestHarness.h"
+#include "Utils.h"
+#include "VtsHalNeuralnetworks.h"
+
namespace android {
namespace hardware {
namespace neuralnetworks {
diff --git a/neuralnetworks/1.2/vts/functional/GeneratedTestsV1_1.cpp b/neuralnetworks/1.2/vts/functional/GeneratedTestsV1_1.cpp
index fa6d54d..1adb371 100644
--- a/neuralnetworks/1.2/vts/functional/GeneratedTestsV1_1.cpp
+++ b/neuralnetworks/1.2/vts/functional/GeneratedTestsV1_1.cpp
@@ -16,17 +16,17 @@
#define LOG_TAG "neuralnetworks_hidl_hal_test"
-#include "VtsHalNeuralnetworks.h"
-
-#include "Callbacks.h"
-#include "GeneratedTestHarness.h"
-#include "TestHarness.h"
-#include "Utils.h"
-
#include <android-base/logging.h>
#include <android/hidl/memory/1.0/IMemory.h>
#include <hidlmemory/mapping.h>
+#include "1.2/Callbacks.h"
+#include "GeneratedTestHarness.h"
+#include "MemoryUtils.h"
+#include "TestHarness.h"
+#include "Utils.h"
+#include "VtsHalNeuralnetworks.h"
+
namespace android {
namespace hardware {
namespace neuralnetworks {
diff --git a/neuralnetworks/1.2/vts/functional/GeneratedTests.cpp b/neuralnetworks/1.2/vts/functional/GeneratedTestsV1_2.cpp
similarity index 97%
rename from neuralnetworks/1.2/vts/functional/GeneratedTests.cpp
rename to neuralnetworks/1.2/vts/functional/GeneratedTestsV1_2.cpp
index 5af3255..f9cecf8 100644
--- a/neuralnetworks/1.2/vts/functional/GeneratedTests.cpp
+++ b/neuralnetworks/1.2/vts/functional/GeneratedTestsV1_2.cpp
@@ -16,17 +16,17 @@
#define LOG_TAG "neuralnetworks_hidl_hal_test"
-#include "VtsHalNeuralnetworks.h"
-
-#include "Callbacks.h"
-#include "GeneratedTestHarness.h"
-#include "TestHarness.h"
-#include "Utils.h"
-
#include <android-base/logging.h>
#include <android/hidl/memory/1.0/IMemory.h>
#include <hidlmemory/mapping.h>
+#include "1.2/Callbacks.h"
+#include "GeneratedTestHarness.h"
+#include "MemoryUtils.h"
+#include "TestHarness.h"
+#include "Utils.h"
+#include "VtsHalNeuralnetworks.h"
+
namespace android {
namespace hardware {
namespace neuralnetworks {
diff --git a/neuralnetworks/1.2/vts/functional/ValidateBurst.cpp b/neuralnetworks/1.2/vts/functional/ValidateBurst.cpp
index 8c6391e..4d6bdbb 100644
--- a/neuralnetworks/1.2/vts/functional/ValidateBurst.cpp
+++ b/neuralnetworks/1.2/vts/functional/ValidateBurst.cpp
@@ -18,7 +18,7 @@
#include "VtsHalNeuralnetworks.h"
-#include "Callbacks.h"
+#include "1.2/Callbacks.h"
#include "ExecutionBurstController.h"
#include "ExecutionBurstServer.h"
#include "TestHarness.h"
diff --git a/neuralnetworks/1.2/vts/functional/ValidateModel.cpp b/neuralnetworks/1.2/vts/functional/ValidateModel.cpp
index a0b6d9a..78bb194 100644
--- a/neuralnetworks/1.2/vts/functional/ValidateModel.cpp
+++ b/neuralnetworks/1.2/vts/functional/ValidateModel.cpp
@@ -16,10 +16,10 @@
#define LOG_TAG "neuralnetworks_hidl_hal_test"
+#include "1.0/Utils.h"
+#include "1.2/Callbacks.h"
#include "VtsHalNeuralnetworks.h"
-#include "Callbacks.h"
-
namespace android {
namespace hardware {
namespace neuralnetworks {
@@ -41,10 +41,10 @@
const Model& model) {
SCOPED_TRACE(message + " [getSupportedOperations_1_2]");
- Return<void> ret =
- device->getSupportedOperations_1_2(model, [&](ErrorStatus status, const hidl_vec<bool>&) {
- EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, status);
- });
+ Return<void> ret = device->getSupportedOperations_1_2(
+ model, [&](ErrorStatus status, const hidl_vec<bool>&) {
+ EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, status);
+ });
EXPECT_TRUE(ret.isOk());
}
@@ -87,36 +87,16 @@
validatePrepareModel(device, message, model, preference);
}
-// Delete element from hidl_vec. hidl_vec doesn't support a "remove" operation,
-// so this is efficiently accomplished by moving the element to the end and
-// resizing the hidl_vec to one less.
-template <typename Type>
-static void hidl_vec_removeAt(hidl_vec<Type>* vec, uint32_t index) {
- if (vec) {
- std::rotate(vec->begin() + index, vec->begin() + index + 1, vec->end());
- vec->resize(vec->size() - 1);
- }
-}
-
-template <typename Type>
-static uint32_t hidl_vec_push_back(hidl_vec<Type>* vec, const Type& value) {
- // assume vec is valid
- const uint32_t index = vec->size();
- vec->resize(index + 1);
- (*vec)[index] = value;
- return index;
-}
-
static uint32_t addOperand(Model* model) {
return hidl_vec_push_back(&model->operands,
{
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ .type = OperandType::INT32,
+ .dimensions = {},
+ .numberOfConsumers = 0,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
});
}
@@ -243,7 +223,7 @@
case OperandType::TENSOR_QUANT8_ASYMM:
return {-1, 256};
case OperandType::TENSOR_QUANT8_SYMM:
- return {-129, -1, 1, 128};
+ return {-129, -1, 1, 128};
case OperandType::TENSOR_QUANT16_ASYMM:
return {-1, 65536};
case OperandType::TENSOR_QUANT16_SYMM:
@@ -256,7 +236,7 @@
static void mutateOperandZeroPointTest(const sp<IDevice>& device, const Model& model) {
for (size_t operand = 0; operand < model.operands.size(); ++operand) {
const std::vector<int32_t> invalidZeroPoints =
- getInvalidZeroPoints(model.operands[operand].type);
+ getInvalidZeroPoints(model.operands[operand].type);
for (int32_t invalidZeroPoint : invalidZeroPoints) {
const std::string message = "mutateOperandZeroPointTest: operand " +
std::to_string(operand) + " has zero point of " +
@@ -292,13 +272,13 @@
case OperandType::TENSOR_FLOAT16:
case OperandType::TENSOR_FLOAT32:
newOperand.dimensions =
- operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1});
+ operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1});
newOperand.scale = 0.0f;
newOperand.zeroPoint = 0;
break;
case OperandType::TENSOR_INT32:
newOperand.dimensions =
- operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1});
+ operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1});
newOperand.zeroPoint = 0;
break;
case OperandType::TENSOR_QUANT8_ASYMM:
@@ -306,19 +286,20 @@
case OperandType::TENSOR_QUANT16_ASYMM:
case OperandType::TENSOR_QUANT16_SYMM:
newOperand.dimensions =
- operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1});
+ operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1});
newOperand.scale = operand->scale != 0.0f ? operand->scale : 1.0f;
break;
case OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL: {
newOperand.dimensions =
- operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1});
+ operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1});
newOperand.scale = 0.0f;
newOperand.zeroPoint = 0;
SymmPerChannelQuantParams channelQuant;
channelQuant.channelDim = 0;
channelQuant.scales = hidl_vec<float>(
- operand->dimensions.size() > 0 ? static_cast<size_t>(operand->dimensions[0]) : 0);
+ operand->dimensions.size() > 0 ? static_cast<size_t>(operand->dimensions[0])
+ : 0);
for (size_t i = 0; i < channelQuant.scales.size(); ++i) {
channelQuant.scales[i] = 1.0f;
}
@@ -435,7 +416,7 @@
std::to_string(invalidOperationType);
validate(device, message, model, [operation, invalidOperationType](Model* model) {
model->operations[operation].type =
- static_cast<OperationType>(invalidOperationType);
+ static_cast<OperationType>(invalidOperationType);
});
}
}
@@ -690,7 +671,7 @@
static void addOperationOutputTest(const sp<IDevice>& device, const Model& model) {
for (size_t operation = 0; operation < model.operations.size(); ++operation) {
const std::string message =
- "addOperationOutputTest: operation " + std::to_string(operation);
+ "addOperationOutputTest: operation " + std::to_string(operation);
validate(device, message, model, [operation](Model* model) {
uint32_t index = addOperand(model, OperandLifeTime::MODEL_OUTPUT);
hidl_vec_push_back(&model->operations[operation].outputs, index);
@@ -702,14 +683,14 @@
///////////////////////// VALIDATE EXECUTION PREFERENCE /////////////////////////
static const int32_t invalidExecutionPreferences[] = {
- static_cast<int32_t>(ExecutionPreference::LOW_POWER) - 1, // lower bound
- static_cast<int32_t>(ExecutionPreference::SUSTAINED_SPEED) + 1, // upper bound
+ static_cast<int32_t>(ExecutionPreference::LOW_POWER) - 1, // lower bound
+ static_cast<int32_t>(ExecutionPreference::SUSTAINED_SPEED) + 1, // upper bound
};
static void mutateExecutionPreferenceTest(const sp<IDevice>& device, const Model& model) {
for (int32_t preference : invalidExecutionPreferences) {
const std::string message =
- "mutateExecutionPreferenceTest: preference " + std::to_string(preference);
+ "mutateExecutionPreferenceTest: preference " + std::to_string(preference);
validate(device, message, model, [](Model*) {},
static_cast<ExecutionPreference>(preference));
}
diff --git a/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp b/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp
index e935aaa..a7e8328 100644
--- a/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp
+++ b/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp
@@ -16,17 +16,18 @@
#define LOG_TAG "neuralnetworks_hidl_hal_test"
-#include "VtsHalNeuralnetworks.h"
-
-#include "Callbacks.h"
-#include "ExecutionBurstController.h"
-#include "TestHarness.h"
-#include "Utils.h"
-
#include <android-base/logging.h>
#include <android/hidl/memory/1.0/IMemory.h>
#include <hidlmemory/mapping.h>
+#include "1.0/Utils.h"
+#include "1.2/Callbacks.h"
+#include "ExecutionBurstController.h"
+#include "MemoryUtils.h"
+#include "TestHarness.h"
+#include "Utils.h"
+#include "VtsHalNeuralnetworks.h"
+
namespace android {
namespace hardware {
namespace neuralnetworks {
@@ -137,26 +138,6 @@
}
}
-// Delete element from hidl_vec. hidl_vec doesn't support a "remove" operation,
-// so this is efficiently accomplished by moving the element to the end and
-// resizing the hidl_vec to one less.
-template <typename Type>
-static void hidl_vec_removeAt(hidl_vec<Type>* vec, uint32_t index) {
- if (vec) {
- std::rotate(vec->begin() + index, vec->begin() + index + 1, vec->end());
- vec->resize(vec->size() - 1);
- }
-}
-
-template <typename Type>
-static uint32_t hidl_vec_push_back(hidl_vec<Type>* vec, const Type& value) {
- // assume vec is valid
- const uint32_t index = vec->size();
- vec->resize(index + 1);
- (*vec)[index] = value;
- return index;
-}
-
///////////////////////// REMOVE INPUT ////////////////////////////////////
static void removeInputTest(const sp<IPreparedModel>& preparedModel, const Request& request) {
@@ -197,11 +178,13 @@
for_all(inputs, [&inputs_info, &inputSize](int index, auto, auto s) {
if (inputs_info.size() <= static_cast<size_t>(index)) inputs_info.resize(index + 1);
RequestArgument arg = {
- .location = {.poolIndex = INPUT, .offset = 0, .length = static_cast<uint32_t>(s)},
- .dimensions = {},
+ .location = {.poolIndex = INPUT,
+ .offset = 0,
+ .length = static_cast<uint32_t>(s)},
+ .dimensions = {},
};
RequestArgument arg_empty = {
- .hasNoValue = true,
+ .hasNoValue = true,
};
inputs_info[index] = s ? arg : arg_empty;
inputSize += s;
@@ -219,8 +202,10 @@
for_all(outputs, [&outputs_info, &outputSize](int index, auto, auto s) {
if (outputs_info.size() <= static_cast<size_t>(index)) outputs_info.resize(index + 1);
RequestArgument arg = {
- .location = {.poolIndex = OUTPUT, .offset = 0, .length = static_cast<uint32_t>(s)},
- .dimensions = {},
+ .location = {.poolIndex = OUTPUT,
+ .offset = 0,
+ .length = static_cast<uint32_t>(s)},
+ .dimensions = {},
};
outputs_info[index] = arg;
outputSize += s;
diff --git a/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.cpp b/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.cpp
index 666f9b5..bd24edc 100644
--- a/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.cpp
+++ b/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.cpp
@@ -20,7 +20,7 @@
#include <android-base/logging.h>
-#include "Callbacks.h"
+#include "1.2/Callbacks.h"
namespace android {
namespace hardware {
diff --git a/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.h b/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.h
index 80e810a..90dfe25 100644
--- a/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.h
+++ b/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.h
@@ -14,24 +14,23 @@
* limitations under the License.
*/
-#ifndef VTS_HAL_NEURALNETWORKS_V1_2_H
-#define VTS_HAL_NEURALNETWORKS_V1_2_H
+#ifndef ANDROID_HARDWARE_NEURALNETWORKS_V1_2_VTS_HAL_NEURALNETWORKS_H
+#define ANDROID_HARDWARE_NEURALNETWORKS_V1_2_VTS_HAL_NEURALNETWORKS_H
-#include "Callbacks.h"
-
+#include <VtsHalHidlTargetTestBase.h>
+#include <VtsHalHidlTargetTestEnvBase.h>
+#include <android-base/macros.h>
#include <android/hardware/neuralnetworks/1.0/types.h>
#include <android/hardware/neuralnetworks/1.1/types.h>
#include <android/hardware/neuralnetworks/1.2/IDevice.h>
#include <android/hardware/neuralnetworks/1.2/types.h>
-
-#include <VtsHalHidlTargetTestBase.h>
-#include <VtsHalHidlTargetTestEnvBase.h>
-
-#include <android-base/macros.h>
#include <gtest/gtest.h>
+
#include <iostream>
#include <vector>
+#include "1.2/Callbacks.h"
+
namespace android {
namespace hardware {
namespace neuralnetworks {
@@ -50,7 +49,7 @@
NeuralnetworksHidlEnvironment();
~NeuralnetworksHidlEnvironment() override;
- public:
+ public:
static NeuralnetworksHidlEnvironment* getInstance();
void registerTestServices() override;
};
@@ -59,30 +58,30 @@
class NeuralnetworksHidlTest : public ::testing::VtsHalHidlTargetTestBase {
DISALLOW_COPY_AND_ASSIGN(NeuralnetworksHidlTest);
- public:
+ public:
NeuralnetworksHidlTest();
~NeuralnetworksHidlTest() override;
void SetUp() override;
void TearDown() override;
- protected:
+ protected:
sp<IDevice> device;
};
// Tag for the validation tests
class ValidationTest : public NeuralnetworksHidlTest {
- protected:
- void validateEverything(const Model& model, const std::vector<Request>& requests);
- void validateFailure(const Model& model, const std::vector<Request>& requests);
+ protected:
+ void validateEverything(const Model& model, const std::vector<Request>& requests);
+ void validateFailure(const Model& model, const std::vector<Request>& requests);
- private:
- void validateModel(const Model& model);
- void validateRequests(const sp<IPreparedModel>& preparedModel,
- const std::vector<Request>& requests);
- void validateRequestFailure(const sp<IPreparedModel>& preparedModel,
- const std::vector<Request>& requests);
- void validateBurst(const sp<IPreparedModel>& preparedModel,
- const std::vector<Request>& requests);
+ private:
+ void validateModel(const Model& model);
+ void validateRequests(const sp<IPreparedModel>& preparedModel,
+ const std::vector<Request>& requests);
+ void validateRequestFailure(const sp<IPreparedModel>& preparedModel,
+ const std::vector<Request>& requests);
+ void validateBurst(const sp<IPreparedModel>& preparedModel,
+ const std::vector<Request>& requests);
};
// Tag for the generated tests
@@ -93,7 +92,7 @@
// Utility function to get PreparedModel from callback and downcast to V1_2.
sp<IPreparedModel> getPreparedModel_1_2(
- const sp<V1_2::implementation::PreparedModelCallback>& callback);
+ const sp<V1_2::implementation::PreparedModelCallback>& callback);
} // namespace functional
} // namespace vts
@@ -110,4 +109,4 @@
} // namespace android::hardware::neuralnetworks::V1_0
-#endif // VTS_HAL_NEURALNETWORKS_V1_2_H
+#endif // ANDROID_HARDWARE_NEURALNETWORKS_V1_2_VTS_HAL_NEURALNETWORKS_H
diff --git a/neuralnetworks/1.0/vts/functional/Callbacks.h b/neuralnetworks/1.2/vts/functional/include/1.2/Callbacks.h
similarity index 94%
rename from neuralnetworks/1.0/vts/functional/Callbacks.h
rename to neuralnetworks/1.2/vts/functional/include/1.2/Callbacks.h
index 4707d0a..212a887 100644
--- a/neuralnetworks/1.0/vts/functional/Callbacks.h
+++ b/neuralnetworks/1.2/vts/functional/include/1.2/Callbacks.h
@@ -14,14 +14,11 @@
* limitations under the License.
*/
-#ifndef ANDROID_HARDWARE_NEURALNETWORKS_V1_0_CALLBACKS_H
-#define ANDROID_HARDWARE_NEURALNETWORKS_V1_0_CALLBACKS_H
+#ifndef ANDROID_HARDWARE_NEURALNETWORKS_V1_2_CALLBACKS_H
+#define ANDROID_HARDWARE_NEURALNETWORKS_V1_2_CALLBACKS_H
-#include <android/hardware/neuralnetworks/1.0/IExecutionCallback.h>
-#include <android/hardware/neuralnetworks/1.0/IPreparedModelCallback.h>
#include <android/hardware/neuralnetworks/1.2/IExecutionCallback.h>
#include <android/hardware/neuralnetworks/1.2/IPreparedModelCallback.h>
-#include <hidl/MQDescriptor.h>
#include <hidl/Status.h>
#include <chrono>
#include <condition_variable>
@@ -60,7 +57,7 @@
* std::condition_variable, or std::experimental::latch instead.
*/
class CallbackBase {
- public:
+ public:
CallbackBase();
~CallbackBase();
@@ -79,8 +76,8 @@
* before the time duration expired, std::cv_status::timeout
* otherwise.
*/
- template<class Rep, class Period>
- std::cv_status wait_for(const std::chrono::duration<Rep,Period>& timeout_duration);
+ template <class Rep, class Period>
+ std::cv_status wait_for(const std::chrono::duration<Rep, Period>& timeout_duration);
/**
* CallbackBase::on_finish binds a function to the callback object. This
@@ -144,7 +141,7 @@
*/
void join_thread();
- protected:
+ protected:
/**
* CallbackBase::notify enables all prior and future wait* calls on the
* callback object to proceed. The call to CallbackBase::notify happens
@@ -158,16 +155,16 @@
*/
void notify();
- private:
+ private:
// Same as CallbackBase::join_thread but assumes we already hold a lock on
// mMutex.
void join_thread_locked();
- bool mNotified;
- std::mutex mMutex;
- std::condition_variable mCondition;
+ bool mNotified;
+ std::mutex mMutex;
+ std::condition_variable mCondition;
std::function<bool(void)> mPostWork;
- std::thread mThread;
+ std::thread mThread;
};
/**
@@ -185,7 +182,7 @@
* IDevice::prepareModel.
*/
class PreparedModelCallback : public CallbackBase, public IPreparedModelCallback {
- public:
+ public:
PreparedModelCallback();
~PreparedModelCallback() override;
@@ -241,8 +238,8 @@
*/
sp<V1_0::IPreparedModel> getPreparedModel();
- private:
- ErrorStatus mErrorStatus;
+ private:
+ ErrorStatus mErrorStatus;
sp<V1_0::IPreparedModel> mPreparedModel;
};
@@ -260,8 +257,8 @@
* IExecutionCallback. This callback object is passed as an argument to
* IPreparedModel::execute.
*/
-class ExecutionCallback : public CallbackBase, public IExecutionCallback {
- public:
+class ExecutionCallback : public CallbackBase, public IExecutionCallback {
+ public:
ExecutionCallback();
~ExecutionCallback() override;
@@ -376,19 +373,19 @@
*/
Timing getTiming();
- private:
+ private:
ErrorStatus mErrorStatus = ErrorStatus::GENERAL_FAILURE;
std::vector<OutputShape> mOutputShapes = {};
Timing mTiming = {};
};
-
// template function implementation(s) below this point
-template<class Rep, class Period>
-std::cv_status CallbackBase::wait_for(const std::chrono::duration<Rep,Period>& timeout_duration) {
+template <class Rep, class Period>
+std::cv_status CallbackBase::wait_for(const std::chrono::duration<Rep, Period>& timeout_duration) {
std::unique_lock<std::mutex> lock(mMutex);
- std::cv_status status = mCondition.wait_for(lock, timeout_duration, [this]{return mNotified;});
+ std::cv_status status =
+ mCondition.wait_for(lock, timeout_duration, [this] { return mNotified; });
if (status != std::cv_status::timeout) {
join_thread_locked();
}
@@ -401,4 +398,4 @@
} // namespace hardware
} // namespace android
-#endif // ANDROID_HARDWARE_NEURALNETWORKS_V1_0_CALLBACKS_H
+#endif // ANDROID_HARDWARE_NEURALNETWORKS_V1_2_CALLBACKS_H