Modify 1.2 VTS tests to consume test struct directly.
Comparing with v1.1, the converter for 1.2 HIDL model has additional support
for extraParam, dynamic output shape, and zero-sized output.
Modify CompilationCachingTests to use the new test struct.
Bug: 123092187
Bug: 138718240
Test: All VTS
Change-Id: I54ac97f62898e47a338b51cc6d895a0309ab001f
Merged-In: I54ac97f62898e47a338b51cc6d895a0309ab001f
(cherry picked from commit 491b0a89133b8519a68b5999cf3b227c750f6deb)
diff --git a/neuralnetworks/1.0/vts/functional/Utils.cpp b/neuralnetworks/1.0/vts/functional/Utils.cpp
index 94ba183..5aa2751 100644
--- a/neuralnetworks/1.0/vts/functional/Utils.cpp
+++ b/neuralnetworks/1.0/vts/functional/Utils.cpp
@@ -25,6 +25,7 @@
#include <android/hidl/memory/1.0/IMemory.h>
#include <hidlmemory/mapping.h>
+#include <algorithm>
#include <vector>
namespace android {
@@ -63,11 +64,19 @@
size_t outputSize = 0;
for (uint32_t i = 0; i < testModel.outputIndexes.size(); i++) {
const auto& op = testModel.operands[testModel.outputIndexes[i]];
- size_t dataSize = op.data.size();
+
+ // In the case of zero-sized output, we should at least provide a one-byte buffer.
+ // This is because zero-sized tensors are only supported internally to the driver, or
+ // reported in output shapes. It is illegal for the client to pre-specify a zero-sized
+ // tensor as model output. Otherwise, we will have two semantic conflicts:
+ // - "Zero dimension" conflicts with "unspecified dimension".
+ // - "Omitted operand buffer" conflicts with "zero-sized operand buffer".
+ size_t bufferSize = std::max<size_t>(op.data.size(), 1);
+
DataLocation loc = {.poolIndex = kOutputPoolIndex,
.offset = static_cast<uint32_t>(outputSize),
- .length = static_cast<uint32_t>(dataSize)};
- outputSize += op.data.alignedSize();
+ .length = static_cast<uint32_t>(bufferSize)};
+ outputSize += op.data.size() == 0 ? TestBuffer::kAlignment : op.data.alignedSize();
outputs[i] = {.hasNoValue = false, .location = loc, .dimensions = {}};
}
diff --git a/neuralnetworks/1.2/vts/functional/Android.bp b/neuralnetworks/1.2/vts/functional/Android.bp
index 301ca5d..e14430f 100644
--- a/neuralnetworks/1.2/vts/functional/Android.bp
+++ b/neuralnetworks/1.2/vts/functional/Android.bp
@@ -37,13 +37,12 @@
"android.hidl.memory@1.0",
"libgmock",
"libhidlmemory",
+ "libneuralnetworks_generated_test_harness",
"libneuralnetworks_utils",
"VtsHalNeuralNetworksV1_0_utils",
],
header_libs: [
"libneuralnetworks_headers",
- "libneuralnetworks_generated_test_harness_headers",
- "libneuralnetworks_generated_tests",
],
test_suites: ["general-tests"],
}
@@ -75,8 +74,8 @@
srcs: [
"BasicTests.cpp",
":VtsHalNeuralNetworksV1_2_all_generated_V1_2_tests",
+ ":VtsHalNeuralNetworksV1_2_mobilenets",
"CompilationCachingTests.cpp",
- ":VtsHalNeuralNetworksV1_2_mobilenets", // CompilationCachingTests depend on MobileNets.
"ValidateBurst.cpp",
],
}
diff --git a/neuralnetworks/1.2/vts/functional/CompilationCachingTests.cpp b/neuralnetworks/1.2/vts/functional/CompilationCachingTests.cpp
index 5907646..8747fb3 100644
--- a/neuralnetworks/1.2/vts/functional/CompilationCachingTests.cpp
+++ b/neuralnetworks/1.2/vts/functional/CompilationCachingTests.cpp
@@ -35,22 +35,14 @@
#include "Utils.h"
#include "VtsHalNeuralnetworks.h"
-namespace android::hardware::neuralnetworks::V1_2 {
+// Forward declaration of the mobilenet generated test models in
+// frameworks/ml/nn/runtime/test/generated/.
namespace generated_tests::mobilenet_224_gender_basic_fixed {
-Model createTestModel();
+const ::test_helper::TestModel& get_test_model();
} // namespace generated_tests::mobilenet_224_gender_basic_fixed
-} // namespace android::hardware::neuralnetworks::V1_2
-
-namespace generated_tests::mobilenet_224_gender_basic_fixed {
-std::vector<test_helper::MixedTypedExample>& get_examples();
-} // namespace generated_tests::mobilenet_224_gender_basic_fixed
-
-namespace android::hardware::neuralnetworks::V1_2::generated_tests::mobilenet_quantized {
-Model createTestModel();
-} // namespace android::hardware::neuralnetworks::V1_2::generated_tests::mobilenet_quantized
namespace generated_tests::mobilenet_quantized {
-std::vector<test_helper::MixedTypedExample>& get_examples();
+const ::test_helper::TestModel& get_test_model();
} // namespace generated_tests::mobilenet_quantized
namespace android {
@@ -60,49 +52,23 @@
namespace vts {
namespace functional {
+using namespace test_helper;
using ::android::hardware::neuralnetworks::V1_0::OperandLifeTime;
using ::android::hardware::neuralnetworks::V1_1::ExecutionPreference;
using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
using ::android::hidl::memory::V1_0::IMemory;
using ::android::nn::allocateSharedMemory;
-using ::test_helper::MixedTypedExample;
namespace float32_model {
-constexpr auto createTestModel = ::android::hardware::neuralnetworks::V1_2::generated_tests::
- mobilenet_224_gender_basic_fixed::createTestModel;
-constexpr auto get_examples = ::generated_tests::mobilenet_224_gender_basic_fixed::get_examples;
-
-// MixedTypedExample is defined in frameworks/ml/nn/tools/test_generator/include/TestHarness.h.
-// This function assumes the operation is always ADD.
-std::vector<MixedTypedExample> getLargeModelExamples(uint32_t len) {
- float outputValue = 1.0f + static_cast<float>(len);
- return {{.operands = {
- // Input
- {.operandDimensions = {{0, {1}}}, .float32Operands = {{0, {1.0f}}}},
- // Output
- {.operandDimensions = {{0, {1}}}, .float32Operands = {{0, {outputValue}}}}}}};
-}
+constexpr auto get_test_model = ::generated_tests::mobilenet_224_gender_basic_fixed::get_test_model;
} // namespace float32_model
namespace quant8_model {
-constexpr auto createTestModel = ::android::hardware::neuralnetworks::V1_2::generated_tests::
- mobilenet_quantized::createTestModel;
-constexpr auto get_examples = ::generated_tests::mobilenet_quantized::get_examples;
-
-// MixedTypedExample is defined in frameworks/ml/nn/tools/test_generator/include/TestHarness.h.
-// This function assumes the operation is always ADD.
-std::vector<MixedTypedExample> getLargeModelExamples(uint32_t len) {
- uint8_t outputValue = 1 + static_cast<uint8_t>(len);
- return {{.operands = {// Input
- {.operandDimensions = {{0, {1}}}, .quant8AsymmOperands = {{0, {1}}}},
- // Output
- {.operandDimensions = {{0, {1}}},
- .quant8AsymmOperands = {{0, {outputValue}}}}}}};
-}
+constexpr auto get_test_model = ::generated_tests::mobilenet_quantized::get_test_model;
} // namespace quant8_model
@@ -155,39 +121,34 @@
// [1] [1] [1] [1]
//
// This function assumes the operation is either ADD or MUL.
-template <typename CppType, OperandType operandType>
-Model createLargeTestModelImpl(OperationType op, uint32_t len) {
- EXPECT_TRUE(op == OperationType::ADD || op == OperationType::MUL);
+template <typename CppType, TestOperandType operandType>
+TestModel createLargeTestModelImpl(TestOperationType op, uint32_t len) {
+ EXPECT_TRUE(op == TestOperationType::ADD || op == TestOperationType::MUL);
// Model operations and operands.
- std::vector<Operation> operations(len);
- std::vector<Operand> operands(len * 2 + 2);
-
- // The constant buffer pool. This contains the activation scalar, followed by the
- // per-operation constant operands.
- std::vector<uint8_t> operandValues(sizeof(int32_t) + len * sizeof(CppType));
+ std::vector<TestOperation> operations(len);
+ std::vector<TestOperand> operands(len * 2 + 2);
// The activation scalar, value = 0.
operands[0] = {
- .type = OperandType::INT32,
+ .type = TestOperandType::INT32,
.dimensions = {},
.numberOfConsumers = len,
.scale = 0.0f,
.zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = sizeof(int32_t)},
+ .lifetime = TestOperandLifeTime::CONSTANT_COPY,
+ .data = TestBuffer::createFromVector<int32_t>({0}),
};
- memset(operandValues.data(), 0, sizeof(int32_t));
// The buffer value of the constant second operand. The logical value is always 1.0f.
CppType bufferValue;
// The scale of the first and second operand.
float scale1, scale2;
- if (operandType == OperandType::TENSOR_FLOAT32) {
+ if (operandType == TestOperandType::TENSOR_FLOAT32) {
bufferValue = 1.0f;
scale1 = 0.0f;
scale2 = 0.0f;
- } else if (op == OperationType::ADD) {
+ } else if (op == TestOperationType::ADD) {
bufferValue = 1;
scale1 = 1.0f;
scale2 = 1.0f;
@@ -211,9 +172,9 @@
.numberOfConsumers = 1,
.scale = scale1,
.zeroPoint = 0,
- .lifetime = (i == 0 ? OperandLifeTime::MODEL_INPUT
- : OperandLifeTime::TEMPORARY_VARIABLE),
- .location = {},
+ .lifetime = (i == 0 ? TestOperandLifeTime::MODEL_INPUT
+ : TestOperandLifeTime::TEMPORARY_VARIABLE),
+ .data = (i == 0 ? TestBuffer::createFromVector<CppType>({1}) : TestBuffer()),
};
// The second operation input, value = 1.
@@ -223,13 +184,9 @@
.numberOfConsumers = 1,
.scale = scale2,
.zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0,
- .offset = static_cast<uint32_t>(i * sizeof(CppType) + sizeof(int32_t)),
- .length = sizeof(CppType)},
+ .lifetime = TestOperandLifeTime::CONSTANT_COPY,
+ .data = TestBuffer::createFromVector<CppType>({bufferValue}),
};
- memcpy(operandValues.data() + sizeof(int32_t) + i * sizeof(CppType), &bufferValue,
- sizeof(CppType));
// The operation. All operations share the same activation scalar.
// The output operand is created as an input in the next iteration of the loop, in the case
@@ -242,6 +199,10 @@
};
}
+ // For TestOperationType::ADD, output = 1 + 1 * len = len + 1
+ // For TestOperationType::MUL, output = 1 * 1 ^ len = 1
+ CppType outputResult = static_cast<CppType>(op == TestOperationType::ADD ? len + 1u : 1u);
+
// The model output.
operands.back() = {
.type = operandType,
@@ -249,21 +210,16 @@
.numberOfConsumers = 0,
.scale = scale1,
.zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {},
+ .lifetime = TestOperandLifeTime::MODEL_OUTPUT,
+ .data = TestBuffer::createFromVector<CppType>({outputResult}),
};
- const std::vector<uint32_t> inputIndexes = {1};
- const std::vector<uint32_t> outputIndexes = {len * 2 + 1};
- const std::vector<hidl_memory> pools = {};
-
return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
+ .operands = std::move(operands),
+ .operations = std::move(operations),
+ .inputIndexes = {1},
+ .outputIndexes = {len * 2 + 1},
+ .isRelaxed = false,
};
}
@@ -332,35 +288,21 @@
// Model and examples creators. According to kOperandType, the following methods will return
// either float32 model/examples or the quant8 variant.
- Model createTestModel() {
+ TestModel createTestModel() {
if (kOperandType == OperandType::TENSOR_FLOAT32) {
- return float32_model::createTestModel();
+ return float32_model::get_test_model();
} else {
- return quant8_model::createTestModel();
+ return quant8_model::get_test_model();
}
}
- std::vector<MixedTypedExample> get_examples() {
+ TestModel createLargeTestModel(OperationType op, uint32_t len) {
if (kOperandType == OperandType::TENSOR_FLOAT32) {
- return float32_model::get_examples();
+ return createLargeTestModelImpl<float, TestOperandType::TENSOR_FLOAT32>(
+ static_cast<TestOperationType>(op), len);
} else {
- return quant8_model::get_examples();
- }
- }
-
- Model createLargeTestModel(OperationType op, uint32_t len) {
- if (kOperandType == OperandType::TENSOR_FLOAT32) {
- return createLargeTestModelImpl<float, OperandType::TENSOR_FLOAT32>(op, len);
- } else {
- return createLargeTestModelImpl<uint8_t, OperandType::TENSOR_QUANT8_ASYMM>(op, len);
- }
- }
-
- std::vector<MixedTypedExample> getLargeModelExamples(uint32_t len) {
- if (kOperandType == OperandType::TENSOR_FLOAT32) {
- return float32_model::getLargeModelExamples(len);
- } else {
- return quant8_model::getLargeModelExamples(len);
+ return createLargeTestModelImpl<uint8_t, TestOperandType::TENSOR_QUANT8_ASYMM>(
+ static_cast<TestOperationType>(op), len);
}
}
@@ -482,8 +424,9 @@
TEST_P(CompilationCachingTest, CacheSavingAndRetrieval) {
// Create test HIDL model and compile.
- const Model testModel = createTestModel();
- if (checkEarlyTermination(testModel)) return;
+ const TestModel& testModel = createTestModel();
+ const Model model = generated_tests::createModel(testModel);
+ if (checkEarlyTermination(model)) return;
sp<IPreparedModel> preparedModel = nullptr;
// Save the compilation to cache.
@@ -491,7 +434,7 @@
hidl_vec<hidl_handle> modelCache, dataCache;
createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
- saveModelToCache(testModel, modelCache, dataCache);
+ saveModelToCache(model, modelCache, dataCache);
}
// Retrieve preparedModel from cache.
@@ -516,15 +459,15 @@
}
// Execute and verify results.
- generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; }, get_examples(),
- testModel.relaxComputationFloat32toFloat16,
+ generated_tests::EvaluatePreparedModel(preparedModel, testModel,
/*testDynamicOutputShape=*/false);
}
TEST_P(CompilationCachingTest, CacheSavingAndRetrievalNonZeroOffset) {
// Create test HIDL model and compile.
- const Model testModel = createTestModel();
- if (checkEarlyTermination(testModel)) return;
+ const TestModel& testModel = createTestModel();
+ const Model model = generated_tests::createModel(testModel);
+ if (checkEarlyTermination(model)) return;
sp<IPreparedModel> preparedModel = nullptr;
// Save the compilation to cache.
@@ -545,7 +488,7 @@
write(dataCache[i].getNativeHandle()->data[0], &dummyBytes, sizeof(dummyBytes)),
sizeof(dummyBytes));
}
- saveModelToCache(testModel, modelCache, dataCache);
+ saveModelToCache(model, modelCache, dataCache);
}
// Retrieve preparedModel from cache.
@@ -579,15 +522,15 @@
}
// Execute and verify results.
- generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; }, get_examples(),
- testModel.relaxComputationFloat32toFloat16,
+ generated_tests::EvaluatePreparedModel(preparedModel, testModel,
/*testDynamicOutputShape=*/false);
}
TEST_P(CompilationCachingTest, SaveToCacheInvalidNumCache) {
// Create test HIDL model and compile.
- const Model testModel = createTestModel();
- if (checkEarlyTermination(testModel)) return;
+ const TestModel& testModel = createTestModel();
+ const Model model = generated_tests::createModel(testModel);
+ if (checkEarlyTermination(model)) return;
// Test with number of model cache files greater than mNumModelCache.
{
@@ -598,12 +541,10 @@
createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
mModelCache.pop_back();
sp<IPreparedModel> preparedModel = nullptr;
- saveModelToCache(testModel, modelCache, dataCache, &preparedModel);
+ saveModelToCache(model, modelCache, dataCache, &preparedModel);
ASSERT_NE(preparedModel, nullptr);
// Execute and verify results.
- generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; },
- get_examples(),
- testModel.relaxComputationFloat32toFloat16,
+ generated_tests::EvaluatePreparedModel(preparedModel, testModel,
/*testDynamicOutputShape=*/false);
// Check if prepareModelFromCache fails.
preparedModel = nullptr;
@@ -625,12 +566,10 @@
createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
mModelCache.push_back(tmp);
sp<IPreparedModel> preparedModel = nullptr;
- saveModelToCache(testModel, modelCache, dataCache, &preparedModel);
+ saveModelToCache(model, modelCache, dataCache, &preparedModel);
ASSERT_NE(preparedModel, nullptr);
// Execute and verify results.
- generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; },
- get_examples(),
- testModel.relaxComputationFloat32toFloat16,
+ generated_tests::EvaluatePreparedModel(preparedModel, testModel,
/*testDynamicOutputShape=*/false);
// Check if prepareModelFromCache fails.
preparedModel = nullptr;
@@ -651,12 +590,10 @@
createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
mDataCache.pop_back();
sp<IPreparedModel> preparedModel = nullptr;
- saveModelToCache(testModel, modelCache, dataCache, &preparedModel);
+ saveModelToCache(model, modelCache, dataCache, &preparedModel);
ASSERT_NE(preparedModel, nullptr);
// Execute and verify results.
- generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; },
- get_examples(),
- testModel.relaxComputationFloat32toFloat16,
+ generated_tests::EvaluatePreparedModel(preparedModel, testModel,
/*testDynamicOutputShape=*/false);
// Check if prepareModelFromCache fails.
preparedModel = nullptr;
@@ -678,12 +615,10 @@
createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
mDataCache.push_back(tmp);
sp<IPreparedModel> preparedModel = nullptr;
- saveModelToCache(testModel, modelCache, dataCache, &preparedModel);
+ saveModelToCache(model, modelCache, dataCache, &preparedModel);
ASSERT_NE(preparedModel, nullptr);
// Execute and verify results.
- generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; },
- get_examples(),
- testModel.relaxComputationFloat32toFloat16,
+ generated_tests::EvaluatePreparedModel(preparedModel, testModel,
/*testDynamicOutputShape=*/false);
// Check if prepareModelFromCache fails.
preparedModel = nullptr;
@@ -698,15 +633,16 @@
TEST_P(CompilationCachingTest, PrepareModelFromCacheInvalidNumCache) {
// Create test HIDL model and compile.
- const Model testModel = createTestModel();
- if (checkEarlyTermination(testModel)) return;
+ const TestModel& testModel = createTestModel();
+ const Model model = generated_tests::createModel(testModel);
+ if (checkEarlyTermination(model)) return;
// Save the compilation to cache.
{
hidl_vec<hidl_handle> modelCache, dataCache;
createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
- saveModelToCache(testModel, modelCache, dataCache);
+ saveModelToCache(model, modelCache, dataCache);
}
// Test with number of model cache files greater than mNumModelCache.
@@ -778,8 +714,9 @@
TEST_P(CompilationCachingTest, SaveToCacheInvalidNumFd) {
// Create test HIDL model and compile.
- const Model testModel = createTestModel();
- if (checkEarlyTermination(testModel)) return;
+ const TestModel& testModel = createTestModel();
+ const Model model = generated_tests::createModel(testModel);
+ if (checkEarlyTermination(model)) return;
// Go through each handle in model cache, test with NumFd greater than 1.
for (uint32_t i = 0; i < mNumModelCache; i++) {
@@ -790,12 +727,10 @@
createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
mModelCache[i].pop_back();
sp<IPreparedModel> preparedModel = nullptr;
- saveModelToCache(testModel, modelCache, dataCache, &preparedModel);
+ saveModelToCache(model, modelCache, dataCache, &preparedModel);
ASSERT_NE(preparedModel, nullptr);
// Execute and verify results.
- generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; },
- get_examples(),
- testModel.relaxComputationFloat32toFloat16,
+ generated_tests::EvaluatePreparedModel(preparedModel, testModel,
/*testDynamicOutputShape=*/false);
// Check if prepareModelFromCache fails.
preparedModel = nullptr;
@@ -817,12 +752,10 @@
createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
mModelCache[i].push_back(tmp);
sp<IPreparedModel> preparedModel = nullptr;
- saveModelToCache(testModel, modelCache, dataCache, &preparedModel);
+ saveModelToCache(model, modelCache, dataCache, &preparedModel);
ASSERT_NE(preparedModel, nullptr);
// Execute and verify results.
- generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; },
- get_examples(),
- testModel.relaxComputationFloat32toFloat16,
+ generated_tests::EvaluatePreparedModel(preparedModel, testModel,
/*testDynamicOutputShape=*/false);
// Check if prepareModelFromCache fails.
preparedModel = nullptr;
@@ -843,12 +776,10 @@
createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
mDataCache[i].pop_back();
sp<IPreparedModel> preparedModel = nullptr;
- saveModelToCache(testModel, modelCache, dataCache, &preparedModel);
+ saveModelToCache(model, modelCache, dataCache, &preparedModel);
ASSERT_NE(preparedModel, nullptr);
// Execute and verify results.
- generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; },
- get_examples(),
- testModel.relaxComputationFloat32toFloat16,
+ generated_tests::EvaluatePreparedModel(preparedModel, testModel,
/*testDynamicOutputShape=*/false);
// Check if prepareModelFromCache fails.
preparedModel = nullptr;
@@ -870,12 +801,10 @@
createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
mDataCache[i].push_back(tmp);
sp<IPreparedModel> preparedModel = nullptr;
- saveModelToCache(testModel, modelCache, dataCache, &preparedModel);
+ saveModelToCache(model, modelCache, dataCache, &preparedModel);
ASSERT_NE(preparedModel, nullptr);
// Execute and verify results.
- generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; },
- get_examples(),
- testModel.relaxComputationFloat32toFloat16,
+ generated_tests::EvaluatePreparedModel(preparedModel, testModel,
/*testDynamicOutputShape=*/false);
// Check if prepareModelFromCache fails.
preparedModel = nullptr;
@@ -890,15 +819,16 @@
TEST_P(CompilationCachingTest, PrepareModelFromCacheInvalidNumFd) {
// Create test HIDL model and compile.
- const Model testModel = createTestModel();
- if (checkEarlyTermination(testModel)) return;
+ const TestModel& testModel = createTestModel();
+ const Model model = generated_tests::createModel(testModel);
+ if (checkEarlyTermination(model)) return;
// Save the compilation to cache.
{
hidl_vec<hidl_handle> modelCache, dataCache;
createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
- saveModelToCache(testModel, modelCache, dataCache);
+ saveModelToCache(model, modelCache, dataCache);
}
// Go through each handle in model cache, test with NumFd greater than 1.
@@ -970,8 +900,9 @@
TEST_P(CompilationCachingTest, SaveToCacheInvalidAccessMode) {
// Create test HIDL model and compile.
- const Model testModel = createTestModel();
- if (checkEarlyTermination(testModel)) return;
+ const TestModel& testModel = createTestModel();
+ const Model model = generated_tests::createModel(testModel);
+ if (checkEarlyTermination(model)) return;
std::vector<AccessMode> modelCacheMode(mNumModelCache, AccessMode::READ_WRITE);
std::vector<AccessMode> dataCacheMode(mNumDataCache, AccessMode::READ_WRITE);
@@ -983,12 +914,10 @@
createCacheHandles(mDataCache, dataCacheMode, &dataCache);
modelCacheMode[i] = AccessMode::READ_WRITE;
sp<IPreparedModel> preparedModel = nullptr;
- saveModelToCache(testModel, modelCache, dataCache, &preparedModel);
+ saveModelToCache(model, modelCache, dataCache, &preparedModel);
ASSERT_NE(preparedModel, nullptr);
// Execute and verify results.
- generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; },
- get_examples(),
- testModel.relaxComputationFloat32toFloat16,
+ generated_tests::EvaluatePreparedModel(preparedModel, testModel,
/*testDynamicOutputShape=*/false);
// Check if prepareModelFromCache fails.
preparedModel = nullptr;
@@ -1008,12 +937,10 @@
createCacheHandles(mDataCache, dataCacheMode, &dataCache);
dataCacheMode[i] = AccessMode::READ_WRITE;
sp<IPreparedModel> preparedModel = nullptr;
- saveModelToCache(testModel, modelCache, dataCache, &preparedModel);
+ saveModelToCache(model, modelCache, dataCache, &preparedModel);
ASSERT_NE(preparedModel, nullptr);
// Execute and verify results.
- generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; },
- get_examples(),
- testModel.relaxComputationFloat32toFloat16,
+ generated_tests::EvaluatePreparedModel(preparedModel, testModel,
/*testDynamicOutputShape=*/false);
// Check if prepareModelFromCache fails.
preparedModel = nullptr;
@@ -1028,8 +955,9 @@
TEST_P(CompilationCachingTest, PrepareModelFromCacheInvalidAccessMode) {
// Create test HIDL model and compile.
- const Model testModel = createTestModel();
- if (checkEarlyTermination(testModel)) return;
+ const TestModel& testModel = createTestModel();
+ const Model model = generated_tests::createModel(testModel);
+ if (checkEarlyTermination(model)) return;
std::vector<AccessMode> modelCacheMode(mNumModelCache, AccessMode::READ_WRITE);
std::vector<AccessMode> dataCacheMode(mNumDataCache, AccessMode::READ_WRITE);
@@ -1038,7 +966,7 @@
hidl_vec<hidl_handle> modelCache, dataCache;
createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
- saveModelToCache(testModel, modelCache, dataCache);
+ saveModelToCache(model, modelCache, dataCache);
}
// Go through each handle in model cache, test with invalid access mode.
@@ -1106,12 +1034,14 @@
if (!mIsCachingSupported) return;
// Create test models and check if fully supported by the service.
- const Model testModelMul = createLargeTestModel(OperationType::MUL, kLargeModelSize);
- if (checkEarlyTermination(testModelMul)) return;
- const Model testModelAdd = createLargeTestModel(OperationType::ADD, kLargeModelSize);
- if (checkEarlyTermination(testModelAdd)) return;
+ const TestModel testModelMul = createLargeTestModel(OperationType::MUL, kLargeModelSize);
+ const Model modelMul = generated_tests::createModel(testModelMul);
+ if (checkEarlyTermination(modelMul)) return;
+ const TestModel testModelAdd = createLargeTestModel(OperationType::ADD, kLargeModelSize);
+ const Model modelAdd = generated_tests::createModel(testModelAdd);
+ if (checkEarlyTermination(modelAdd)) return;
- // Save the testModelMul compilation to cache.
+ // Save the modelMul compilation to cache.
auto modelCacheMul = mModelCache;
for (auto& cache : modelCacheMul) {
cache[0].append("_mul");
@@ -1120,15 +1050,15 @@
hidl_vec<hidl_handle> modelCache, dataCache;
createCacheHandles(modelCacheMul, AccessMode::READ_WRITE, &modelCache);
createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
- saveModelToCache(testModelMul, modelCache, dataCache);
+ saveModelToCache(modelMul, modelCache, dataCache);
}
- // Use a different token for testModelAdd.
+ // Use a different token for modelAdd.
mToken[0]++;
// This test is probabilistic, so we run it multiple times.
for (uint32_t i = 0; i < kNumIterationsTOCTOU; i++) {
- // Save the testModelAdd compilation to cache.
+ // Save the modelAdd compilation to cache.
{
hidl_vec<hidl_handle> modelCache, dataCache;
createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
@@ -1136,7 +1066,7 @@
// Spawn a thread to copy the cache content concurrently while saving to cache.
std::thread thread(copyCacheFiles, std::cref(modelCacheMul), std::cref(mModelCache));
- saveModelToCache(testModelAdd, modelCache, dataCache);
+ saveModelToCache(modelAdd, modelCache, dataCache);
thread.join();
}
@@ -1155,11 +1085,8 @@
ASSERT_EQ(preparedModel, nullptr);
} else {
ASSERT_NE(preparedModel, nullptr);
- generated_tests::EvaluatePreparedModel(
- preparedModel, [](int) { return false; },
- getLargeModelExamples(kLargeModelSize),
- testModelAdd.relaxComputationFloat32toFloat16,
- /*testDynamicOutputShape=*/false);
+ generated_tests::EvaluatePreparedModel(preparedModel, testModelAdd,
+ /*testDynamicOutputShape=*/false);
}
}
}
@@ -1169,12 +1096,14 @@
if (!mIsCachingSupported) return;
// Create test models and check if fully supported by the service.
- const Model testModelMul = createLargeTestModel(OperationType::MUL, kLargeModelSize);
- if (checkEarlyTermination(testModelMul)) return;
- const Model testModelAdd = createLargeTestModel(OperationType::ADD, kLargeModelSize);
- if (checkEarlyTermination(testModelAdd)) return;
+ const TestModel testModelMul = createLargeTestModel(OperationType::MUL, kLargeModelSize);
+ const Model modelMul = generated_tests::createModel(testModelMul);
+ if (checkEarlyTermination(modelMul)) return;
+ const TestModel testModelAdd = createLargeTestModel(OperationType::ADD, kLargeModelSize);
+ const Model modelAdd = generated_tests::createModel(testModelAdd);
+ if (checkEarlyTermination(modelAdd)) return;
- // Save the testModelMul compilation to cache.
+ // Save the modelMul compilation to cache.
auto modelCacheMul = mModelCache;
for (auto& cache : modelCacheMul) {
cache[0].append("_mul");
@@ -1183,20 +1112,20 @@
hidl_vec<hidl_handle> modelCache, dataCache;
createCacheHandles(modelCacheMul, AccessMode::READ_WRITE, &modelCache);
createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
- saveModelToCache(testModelMul, modelCache, dataCache);
+ saveModelToCache(modelMul, modelCache, dataCache);
}
- // Use a different token for testModelAdd.
+ // Use a different token for modelAdd.
mToken[0]++;
// This test is probabilistic, so we run it multiple times.
for (uint32_t i = 0; i < kNumIterationsTOCTOU; i++) {
- // Save the testModelAdd compilation to cache.
+ // Save the modelAdd compilation to cache.
{
hidl_vec<hidl_handle> modelCache, dataCache;
createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
- saveModelToCache(testModelAdd, modelCache, dataCache);
+ saveModelToCache(modelAdd, modelCache, dataCache);
}
// Retrieve preparedModel from cache.
@@ -1218,11 +1147,8 @@
ASSERT_EQ(preparedModel, nullptr);
} else {
ASSERT_NE(preparedModel, nullptr);
- generated_tests::EvaluatePreparedModel(
- preparedModel, [](int) { return false; },
- getLargeModelExamples(kLargeModelSize),
- testModelAdd.relaxComputationFloat32toFloat16,
- /*testDynamicOutputShape=*/false);
+ generated_tests::EvaluatePreparedModel(preparedModel, testModelAdd,
+ /*testDynamicOutputShape=*/false);
}
}
}
@@ -1232,12 +1158,14 @@
if (!mIsCachingSupported) return;
// Create test models and check if fully supported by the service.
- const Model testModelMul = createLargeTestModel(OperationType::MUL, kLargeModelSize);
- if (checkEarlyTermination(testModelMul)) return;
- const Model testModelAdd = createLargeTestModel(OperationType::ADD, kLargeModelSize);
- if (checkEarlyTermination(testModelAdd)) return;
+ const TestModel testModelMul = createLargeTestModel(OperationType::MUL, kLargeModelSize);
+ const Model modelMul = generated_tests::createModel(testModelMul);
+ if (checkEarlyTermination(modelMul)) return;
+ const TestModel testModelAdd = createLargeTestModel(OperationType::ADD, kLargeModelSize);
+ const Model modelAdd = generated_tests::createModel(testModelAdd);
+ if (checkEarlyTermination(modelAdd)) return;
- // Save the testModelMul compilation to cache.
+ // Save the modelMul compilation to cache.
auto modelCacheMul = mModelCache;
for (auto& cache : modelCacheMul) {
cache[0].append("_mul");
@@ -1246,21 +1174,21 @@
hidl_vec<hidl_handle> modelCache, dataCache;
createCacheHandles(modelCacheMul, AccessMode::READ_WRITE, &modelCache);
createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
- saveModelToCache(testModelMul, modelCache, dataCache);
+ saveModelToCache(modelMul, modelCache, dataCache);
}
- // Use a different token for testModelAdd.
+ // Use a different token for modelAdd.
mToken[0]++;
- // Save the testModelAdd compilation to cache.
+ // Save the modelAdd compilation to cache.
{
hidl_vec<hidl_handle> modelCache, dataCache;
createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
- saveModelToCache(testModelAdd, modelCache, dataCache);
+ saveModelToCache(modelAdd, modelCache, dataCache);
}
- // Replace the model cache of testModelAdd with testModelMul.
+ // Replace the model cache of modelAdd with modelMul.
copyCacheFiles(modelCacheMul, mModelCache);
// Retrieve the preparedModel from cache, expect failure.
@@ -1336,15 +1264,16 @@
// The modifier accepts one pointer argument "skip" as the returning value, indicating
// whether the test should be skipped or not.
void testCorruptedCache(ExpectedResult expected, std::function<void(bool*)> modifier) {
- const Model testModel = createTestModel();
- if (checkEarlyTermination(testModel)) return;
+ const TestModel& testModel = createTestModel();
+ const Model model = generated_tests::createModel(testModel);
+ if (checkEarlyTermination(model)) return;
// Save the compilation to cache.
{
hidl_vec<hidl_handle> modelCache, dataCache;
createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
- saveModelToCache(testModel, modelCache, dataCache);
+ saveModelToCache(model, modelCache, dataCache);
}
bool skip = false;
diff --git a/neuralnetworks/1.2/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.2/vts/functional/GeneratedTestHarness.cpp
index 82cc73d..1dcebbe 100644
--- a/neuralnetworks/1.2/vts/functional/GeneratedTestHarness.cpp
+++ b/neuralnetworks/1.2/vts/functional/GeneratedTestHarness.cpp
@@ -31,7 +31,10 @@
#include <android/hidl/memory/1.0/IMemory.h>
#include <hidlmemory/mapping.h>
+#include <gtest/gtest.h>
+#include <algorithm>
#include <iostream>
+#include <numeric>
#include "1.0/Utils.h"
#include "1.2/Callbacks.h"
@@ -46,7 +49,10 @@
namespace V1_2 {
namespace generated_tests {
+using namespace test_helper;
+using ::android::hardware::neuralnetworks::V1_0::DataLocation;
using ::android::hardware::neuralnetworks::V1_0::ErrorStatus;
+using ::android::hardware::neuralnetworks::V1_0::OperandLifeTime;
using ::android::hardware::neuralnetworks::V1_0::Request;
using ::android::hardware::neuralnetworks::V1_0::RequestArgument;
using ::android::hardware::neuralnetworks::V1_1::ExecutionPreference;
@@ -60,29 +66,122 @@
using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
using ::android::hidl::memory::V1_0::IMemory;
-using ::test_helper::compare;
-using ::test_helper::expectMultinomialDistributionWithinTolerance;
-using ::test_helper::filter;
-using ::test_helper::for_all;
-using ::test_helper::for_each;
-using ::test_helper::MixedTyped;
-using ::test_helper::MixedTypedExample;
-using ::test_helper::resize_accordingly;
using HidlToken = hidl_array<uint8_t, static_cast<uint32_t>(Constant::BYTE_SIZE_OF_CACHE_TOKEN)>;
-static bool isZeroSized(const MixedTyped& example, uint32_t index) {
- for (auto i : example.operandDimensions.at(index)) {
- if (i == 0) return true;
+enum class OutputType { FULLY_SPECIFIED, UNSPECIFIED, INSUFFICIENT };
+
+Model createModel(const TestModel& testModel) {
+ // Model operands.
+ hidl_vec<Operand> operands(testModel.operands.size());
+ size_t constCopySize = 0, constRefSize = 0;
+ for (uint32_t i = 0; i < testModel.operands.size(); i++) {
+ const auto& op = testModel.operands[i];
+
+ DataLocation loc = {};
+ if (op.lifetime == TestOperandLifeTime::CONSTANT_COPY) {
+ loc = {.poolIndex = 0,
+ .offset = static_cast<uint32_t>(constCopySize),
+ .length = static_cast<uint32_t>(op.data.size())};
+ constCopySize += op.data.alignedSize();
+ } else if (op.lifetime == TestOperandLifeTime::CONSTANT_REFERENCE) {
+ loc = {.poolIndex = 0,
+ .offset = static_cast<uint32_t>(constRefSize),
+ .length = static_cast<uint32_t>(op.data.size())};
+ constRefSize += op.data.alignedSize();
+ }
+
+ Operand::ExtraParams extraParams;
+ if (op.type == TestOperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) {
+ extraParams.channelQuant(SymmPerChannelQuantParams{
+ .scales = op.channelQuant.scales, .channelDim = op.channelQuant.channelDim});
+ }
+
+ operands[i] = {.type = static_cast<OperandType>(op.type),
+ .dimensions = op.dimensions,
+ .numberOfConsumers = op.numberOfConsumers,
+ .scale = op.scale,
+ .zeroPoint = op.zeroPoint,
+ .lifetime = static_cast<OperandLifeTime>(op.lifetime),
+ .location = loc,
+ .extraParams = std::move(extraParams)};
}
- return false;
+
+ // Model operations.
+ hidl_vec<Operation> operations(testModel.operations.size());
+ std::transform(testModel.operations.begin(), testModel.operations.end(), operations.begin(),
+ [](const TestOperation& op) -> Operation {
+ return {.type = static_cast<OperationType>(op.type),
+ .inputs = op.inputs,
+ .outputs = op.outputs};
+ });
+
+ // Constant copies.
+ hidl_vec<uint8_t> operandValues(constCopySize);
+ for (uint32_t i = 0; i < testModel.operands.size(); i++) {
+ const auto& op = testModel.operands[i];
+ if (op.lifetime == TestOperandLifeTime::CONSTANT_COPY) {
+ const uint8_t* begin = op.data.get<uint8_t>();
+ const uint8_t* end = begin + op.data.size();
+ std::copy(begin, end, operandValues.data() + operands[i].location.offset);
+ }
+ }
+
+ // Shared memory.
+ hidl_vec<hidl_memory> pools = {};
+ if (constRefSize > 0) {
+ hidl_vec_push_back(&pools, nn::allocateSharedMemory(constRefSize));
+ CHECK_NE(pools[0].size(), 0u);
+
+ // load data
+ sp<IMemory> mappedMemory = mapMemory(pools[0]);
+ CHECK(mappedMemory.get() != nullptr);
+ uint8_t* mappedPtr =
+ reinterpret_cast<uint8_t*>(static_cast<void*>(mappedMemory->getPointer()));
+ CHECK(mappedPtr != nullptr);
+
+ for (uint32_t i = 0; i < testModel.operands.size(); i++) {
+ const auto& op = testModel.operands[i];
+ if (op.lifetime == TestOperandLifeTime::CONSTANT_REFERENCE) {
+ const uint8_t* begin = op.data.get<uint8_t>();
+ const uint8_t* end = begin + op.data.size();
+ std::copy(begin, end, mappedPtr + operands[i].location.offset);
+ }
+ }
+ }
+
+ return {.operands = std::move(operands),
+ .operations = std::move(operations),
+ .inputIndexes = testModel.inputIndexes,
+ .outputIndexes = testModel.outputIndexes,
+ .operandValues = std::move(operandValues),
+ .pools = std::move(pools),
+ .relaxComputationFloat32toFloat16 = testModel.isRelaxed};
}
-static Return<ErrorStatus> ExecutePreparedModel(sp<IPreparedModel>& preparedModel,
+static bool isOutputSizeGreaterThanOne(const TestModel& testModel, uint32_t index) {
+ const auto byteSize = testModel.operands[testModel.outputIndexes[index]].data.size();
+ return byteSize > 1u;
+}
+
+static void makeOutputInsufficientSize(uint32_t outputIndex, Request* request) {
+ auto& length = request->outputs[outputIndex].location.length;
+ ASSERT_GT(length, 1u);
+ length -= 1u;
+}
+
+static void makeOutputDimensionsUnspecified(Model* model) {
+ for (auto i : model->outputIndexes) {
+ auto& dims = model->operands[i].dimensions;
+ std::fill(dims.begin(), dims.end(), 0);
+ }
+}
+
+static Return<ErrorStatus> ExecutePreparedModel(const sp<IPreparedModel>& preparedModel,
const Request& request, MeasureTiming measure,
sp<ExecutionCallback>& callback) {
return preparedModel->execute_1_2(request, measure, callback);
}
-static Return<ErrorStatus> ExecutePreparedModel(sp<IPreparedModel>& preparedModel,
+static Return<ErrorStatus> ExecutePreparedModel(const sp<IPreparedModel>& preparedModel,
const Request& request, MeasureTiming measure,
hidl_vec<OutputShape>* outputShapes,
Timing* timing) {
@@ -105,294 +204,168 @@
return ::android::nn::ExecutionBurstController::create(preparedModel, /*blocking=*/true);
}
enum class Executor { ASYNC, SYNC, BURST };
-enum class OutputType { FULLY_SPECIFIED, UNSPECIFIED, INSUFFICIENT };
-const float kDefaultAtol = 1e-5f;
-const float kDefaultRtol = 1e-5f;
-void EvaluatePreparedModel(sp<IPreparedModel>& preparedModel, std::function<bool(int)> is_ignored,
- const std::vector<MixedTypedExample>& examples,
- bool hasRelaxedFloat32Model, float fpAtol, float fpRtol,
+
+void EvaluatePreparedModel(const sp<IPreparedModel>& preparedModel, const TestModel& testModel,
Executor executor, MeasureTiming measure, OutputType outputType) {
- const uint32_t INPUT = 0;
- const uint32_t OUTPUT = 1;
+ // If output0 does not have size larger than one byte, we can not test with insufficient buffer.
+ if (outputType == OutputType::INSUFFICIENT && !isOutputSizeGreaterThanOne(testModel, 0)) {
+ return;
+ }
- int example_no = 1;
- for (auto& example : examples) {
- SCOPED_TRACE(example_no++);
- const MixedTyped& inputs = example.operands.first;
- const MixedTyped& golden = example.operands.second;
+ Request request = createRequest(testModel);
+ if (outputType == OutputType::INSUFFICIENT) {
+ makeOutputInsufficientSize(/*outputIndex=*/0, &request);
+ }
- const bool hasFloat16Inputs = !inputs.float16Operands.empty();
- if (hasRelaxedFloat32Model || hasFloat16Inputs) {
- // TODO: Adjust the error limit based on testing.
- // If in relaxed mode, set the absolute tolerance to be 5ULP of FP16.
- fpAtol = 5.0f * 0.0009765625f;
- // Set the relative tolerance to be 5ULP of the corresponding FP precision.
- fpRtol = 5.0f * 0.0009765625f;
+ ErrorStatus executionStatus;
+ hidl_vec<OutputShape> outputShapes;
+ Timing timing;
+ switch (executor) {
+ case Executor::ASYNC: {
+ SCOPED_TRACE("asynchronous");
+
+ // launch execution
+ sp<ExecutionCallback> executionCallback = new ExecutionCallback();
+ Return<ErrorStatus> executionLaunchStatus =
+ ExecutePreparedModel(preparedModel, request, measure, executionCallback);
+ ASSERT_TRUE(executionLaunchStatus.isOk());
+ EXPECT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(executionLaunchStatus));
+
+ // retrieve execution status
+ executionCallback->wait();
+ executionStatus = executionCallback->getStatus();
+ outputShapes = executionCallback->getOutputShapes();
+ timing = executionCallback->getTiming();
+
+ break;
}
+ case Executor::SYNC: {
+ SCOPED_TRACE("synchronous");
- std::vector<RequestArgument> inputs_info, outputs_info;
- uint32_t inputSize = 0, outputSize = 0;
- // This function only partially specifies the metadata (vector of RequestArguments).
- // The contents are copied over below.
- for_all(inputs, [&inputs_info, &inputSize](int index, auto, auto s) {
- if (inputs_info.size() <= static_cast<size_t>(index)) inputs_info.resize(index + 1);
- RequestArgument arg = {
- .location = {.poolIndex = INPUT,
- .offset = 0,
- .length = static_cast<uint32_t>(s)},
- .dimensions = {},
- };
- RequestArgument arg_empty = {
- .hasNoValue = true,
- };
- inputs_info[index] = s ? arg : arg_empty;
- inputSize += s;
- });
- // Compute offset for inputs 1 and so on
- {
- size_t offset = 0;
- for (auto& i : inputs_info) {
- if (!i.hasNoValue) i.location.offset = offset;
- offset += i.location.length;
+ // execute
+ Return<ErrorStatus> executionReturnStatus =
+ ExecutePreparedModel(preparedModel, request, measure, &outputShapes, &timing);
+ ASSERT_TRUE(executionReturnStatus.isOk());
+ executionStatus = static_cast<ErrorStatus>(executionReturnStatus);
+
+ break;
+ }
+ case Executor::BURST: {
+ SCOPED_TRACE("burst");
+
+ // create burst
+ const std::shared_ptr<::android::nn::ExecutionBurstController> controller =
+ CreateBurst(preparedModel);
+ ASSERT_NE(nullptr, controller.get());
+
+ // create memory keys
+ std::vector<intptr_t> keys(request.pools.size());
+ for (size_t i = 0; i < keys.size(); ++i) {
+ keys[i] = reinterpret_cast<intptr_t>(&request.pools[i]);
}
- }
- MixedTyped test; // holding test results
+ // execute burst
+ std::tie(executionStatus, outputShapes, timing) =
+ controller->compute(request, measure, keys);
- // Go through all outputs, initialize RequestArgument descriptors
- resize_accordingly(golden, test);
- bool sizeLargerThanOne = true;
- for_all(golden, [&golden, &outputs_info, &outputSize, &outputType, &sizeLargerThanOne](
- int index, auto, auto s) {
- if (outputs_info.size() <= static_cast<size_t>(index)) outputs_info.resize(index + 1);
- if (index == 0) {
- // On OutputType::INSUFFICIENT, set the output operand with index 0 with
- // buffer size one byte less than needed.
- if (outputType == OutputType::INSUFFICIENT) {
- if (s > 1 && !isZeroSized(golden, index)) {
- s -= 1;
- } else {
- sizeLargerThanOne = false;
- }
- }
- }
- RequestArgument arg = {
- .location = {.poolIndex = OUTPUT,
- .offset = 0,
- .length = static_cast<uint32_t>(s)},
- .dimensions = {},
- };
- outputs_info[index] = arg;
- outputSize += s;
- });
- // If output0 does not have size larger than one byte,
- // we can not provide an insufficient buffer
- if (!sizeLargerThanOne && outputType == OutputType::INSUFFICIENT) return;
- // Compute offset for outputs 1 and so on
- {
- size_t offset = 0;
- for (auto& i : outputs_info) {
- i.location.offset = offset;
- offset += i.location.length;
- }
- }
- std::vector<hidl_memory> pools = {nn::allocateSharedMemory(inputSize),
- nn::allocateSharedMemory(outputSize)};
- ASSERT_NE(0ull, pools[INPUT].size());
- ASSERT_NE(0ull, pools[OUTPUT].size());
-
- // load data
- sp<IMemory> inputMemory = mapMemory(pools[INPUT]);
- sp<IMemory> outputMemory = mapMemory(pools[OUTPUT]);
- ASSERT_NE(nullptr, inputMemory.get());
- ASSERT_NE(nullptr, outputMemory.get());
- char* inputPtr = reinterpret_cast<char*>(static_cast<void*>(inputMemory->getPointer()));
- char* outputPtr = reinterpret_cast<char*>(static_cast<void*>(outputMemory->getPointer()));
- ASSERT_NE(nullptr, inputPtr);
- ASSERT_NE(nullptr, outputPtr);
- inputMemory->update();
- outputMemory->update();
-
- // Go through all inputs, copy the values
- for_all(inputs, [&inputs_info, inputPtr](int index, auto p, auto s) {
- char* begin = (char*)p;
- char* end = begin + s;
- // TODO: handle more than one input
- std::copy(begin, end, inputPtr + inputs_info[index].location.offset);
- });
-
- inputMemory->commit();
- outputMemory->commit();
-
- const Request request = {.inputs = inputs_info, .outputs = outputs_info, .pools = pools};
-
- ErrorStatus executionStatus;
- hidl_vec<OutputShape> outputShapes;
- Timing timing;
- switch (executor) {
- case Executor::ASYNC: {
- SCOPED_TRACE("asynchronous");
-
- // launch execution
- sp<ExecutionCallback> executionCallback = new ExecutionCallback();
- ASSERT_NE(nullptr, executionCallback.get());
- Return<ErrorStatus> executionLaunchStatus =
- ExecutePreparedModel(preparedModel, request, measure, executionCallback);
- ASSERT_TRUE(executionLaunchStatus.isOk());
- EXPECT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(executionLaunchStatus));
-
- // retrieve execution status
- executionCallback->wait();
- executionStatus = executionCallback->getStatus();
- outputShapes = executionCallback->getOutputShapes();
- timing = executionCallback->getTiming();
-
- break;
- }
- case Executor::SYNC: {
- SCOPED_TRACE("synchronous");
-
- // execute
- Return<ErrorStatus> executionReturnStatus = ExecutePreparedModel(
- preparedModel, request, measure, &outputShapes, &timing);
- ASSERT_TRUE(executionReturnStatus.isOk());
- executionStatus = static_cast<ErrorStatus>(executionReturnStatus);
-
- break;
- }
- case Executor::BURST: {
- SCOPED_TRACE("burst");
-
- // create burst
- const std::shared_ptr<::android::nn::ExecutionBurstController> controller =
- CreateBurst(preparedModel);
- ASSERT_NE(nullptr, controller.get());
-
- // create memory keys
- std::vector<intptr_t> keys(request.pools.size());
- for (size_t i = 0; i < keys.size(); ++i) {
- keys[i] = reinterpret_cast<intptr_t>(&request.pools[i]);
- }
-
- // execute burst
- std::tie(executionStatus, outputShapes, timing) =
- controller->compute(request, measure, keys);
-
- break;
- }
- }
-
- if (outputType != OutputType::FULLY_SPECIFIED &&
- executionStatus == ErrorStatus::GENERAL_FAILURE) {
- LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
- "execute model that it does not support.";
- std::cout << "[ ] Early termination of test because vendor service cannot "
- "execute model that it does not support."
- << std::endl;
- GTEST_SKIP();
- }
- if (measure == MeasureTiming::NO) {
- EXPECT_EQ(UINT64_MAX, timing.timeOnDevice);
- EXPECT_EQ(UINT64_MAX, timing.timeInDriver);
- } else {
- if (timing.timeOnDevice != UINT64_MAX && timing.timeInDriver != UINT64_MAX) {
- EXPECT_LE(timing.timeOnDevice, timing.timeInDriver);
- }
- }
-
- switch (outputType) {
- case OutputType::FULLY_SPECIFIED:
- // If the model output operands are fully specified, outputShapes must be either
- // either empty, or have the same number of elements as the number of outputs.
- ASSERT_EQ(ErrorStatus::NONE, executionStatus);
- ASSERT_TRUE(outputShapes.size() == 0 ||
- outputShapes.size() == test.operandDimensions.size());
- break;
- case OutputType::UNSPECIFIED:
- // If the model output operands are not fully specified, outputShapes must have
- // the same number of elements as the number of outputs.
- ASSERT_EQ(ErrorStatus::NONE, executionStatus);
- ASSERT_EQ(outputShapes.size(), test.operandDimensions.size());
- break;
- case OutputType::INSUFFICIENT:
- ASSERT_EQ(ErrorStatus::OUTPUT_INSUFFICIENT_SIZE, executionStatus);
- ASSERT_EQ(outputShapes.size(), test.operandDimensions.size());
- ASSERT_FALSE(outputShapes[0].isSufficient);
- return;
- }
- // Go through all outputs, overwrite output dimensions with returned output shapes
- if (outputShapes.size() > 0) {
- for_each<uint32_t>(test.operandDimensions,
- [&outputShapes](int idx, std::vector<uint32_t>& dim) {
- dim = outputShapes[idx].dimensions;
- });
- }
-
- // validate results
- outputMemory->read();
- copy_back(&test, outputs_info, outputPtr);
- outputMemory->commit();
- // Filter out don't cares
- MixedTyped filtered_golden = filter(golden, is_ignored);
- MixedTyped filtered_test = filter(test, is_ignored);
-
- // We want "close-enough" results for float
- compare(filtered_golden, filtered_test, fpAtol, fpRtol);
-
- if (example.expectedMultinomialDistributionTolerance > 0) {
- expectMultinomialDistributionWithinTolerance(test, example);
+ break;
}
}
-}
-void EvaluatePreparedModel(sp<IPreparedModel>& preparedModel, std::function<bool(int)> is_ignored,
- const std::vector<MixedTypedExample>& examples,
- bool hasRelaxedFloat32Model, Executor executor, MeasureTiming measure,
- OutputType outputType) {
- EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model, kDefaultAtol,
- kDefaultRtol, executor, measure, outputType);
+
+ if (outputType != OutputType::FULLY_SPECIFIED &&
+ executionStatus == ErrorStatus::GENERAL_FAILURE) {
+ LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
+ "execute model that it does not support.";
+ std::cout << "[ ] Early termination of test because vendor service cannot "
+ "execute model that it does not support."
+ << std::endl;
+ GTEST_SKIP();
+ }
+ if (measure == MeasureTiming::NO) {
+ EXPECT_EQ(UINT64_MAX, timing.timeOnDevice);
+ EXPECT_EQ(UINT64_MAX, timing.timeInDriver);
+ } else {
+ if (timing.timeOnDevice != UINT64_MAX && timing.timeInDriver != UINT64_MAX) {
+ EXPECT_LE(timing.timeOnDevice, timing.timeInDriver);
+ }
+ }
+
+ switch (outputType) {
+ case OutputType::FULLY_SPECIFIED:
+ // If the model output operands are fully specified, outputShapes must be either
+ // either empty, or have the same number of elements as the number of outputs.
+ ASSERT_EQ(ErrorStatus::NONE, executionStatus);
+ ASSERT_TRUE(outputShapes.size() == 0 ||
+ outputShapes.size() == testModel.outputIndexes.size());
+ break;
+ case OutputType::UNSPECIFIED:
+ // If the model output operands are not fully specified, outputShapes must have
+ // the same number of elements as the number of outputs.
+ ASSERT_EQ(ErrorStatus::NONE, executionStatus);
+ ASSERT_EQ(outputShapes.size(), testModel.outputIndexes.size());
+ break;
+ case OutputType::INSUFFICIENT:
+ ASSERT_EQ(ErrorStatus::OUTPUT_INSUFFICIENT_SIZE, executionStatus);
+ ASSERT_EQ(outputShapes.size(), testModel.outputIndexes.size());
+ ASSERT_FALSE(outputShapes[0].isSufficient);
+ return;
+ }
+
+ // Go through all outputs, check returned output shapes.
+ for (uint32_t i = 0; i < outputShapes.size(); i++) {
+ EXPECT_TRUE(outputShapes[i].isSufficient);
+ const auto& expect = testModel.operands[testModel.outputIndexes[i]].dimensions;
+ const std::vector<uint32_t> actual = outputShapes[i].dimensions;
+ EXPECT_EQ(expect, actual);
+ }
+
+ // Retrieve execution results.
+ const std::vector<TestBuffer> outputs = getOutputBuffers(request);
+
+ // We want "close-enough" results.
+ checkResults(testModel, outputs);
}
-void EvaluatePreparedModel(sp<IPreparedModel>& preparedModel, std::function<bool(int)> is_ignored,
- const std::vector<MixedTypedExample>& examples,
- bool hasRelaxedFloat32Model, bool testDynamicOutputShape) {
+void EvaluatePreparedModel(const sp<IPreparedModel>& preparedModel, const TestModel& testModel,
+ bool testDynamicOutputShape) {
if (testDynamicOutputShape) {
- EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
- Executor::ASYNC, MeasureTiming::NO, OutputType::UNSPECIFIED);
- EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
- Executor::SYNC, MeasureTiming::NO, OutputType::UNSPECIFIED);
- EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
- Executor::BURST, MeasureTiming::NO, OutputType::UNSPECIFIED);
- EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
- Executor::ASYNC, MeasureTiming::YES, OutputType::UNSPECIFIED);
- EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
- Executor::SYNC, MeasureTiming::YES, OutputType::UNSPECIFIED);
- EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
- Executor::BURST, MeasureTiming::YES, OutputType::UNSPECIFIED);
- EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
- Executor::ASYNC, MeasureTiming::NO, OutputType::INSUFFICIENT);
- EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
- Executor::SYNC, MeasureTiming::NO, OutputType::INSUFFICIENT);
- EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
- Executor::BURST, MeasureTiming::NO, OutputType::INSUFFICIENT);
- EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
- Executor::ASYNC, MeasureTiming::YES, OutputType::INSUFFICIENT);
- EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
- Executor::SYNC, MeasureTiming::YES, OutputType::INSUFFICIENT);
- EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
- Executor::BURST, MeasureTiming::YES, OutputType::INSUFFICIENT);
+ EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::NO,
+ OutputType::UNSPECIFIED);
+ EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::NO,
+ OutputType::UNSPECIFIED);
+ EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::NO,
+ OutputType::UNSPECIFIED);
+ EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::YES,
+ OutputType::UNSPECIFIED);
+ EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::YES,
+ OutputType::UNSPECIFIED);
+ EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::YES,
+ OutputType::UNSPECIFIED);
+ EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::NO,
+ OutputType::INSUFFICIENT);
+ EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::NO,
+ OutputType::INSUFFICIENT);
+ EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::NO,
+ OutputType::INSUFFICIENT);
+ EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::YES,
+ OutputType::INSUFFICIENT);
+ EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::YES,
+ OutputType::INSUFFICIENT);
+ EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::YES,
+ OutputType::INSUFFICIENT);
} else {
- EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
- Executor::ASYNC, MeasureTiming::NO, OutputType::FULLY_SPECIFIED);
- EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
- Executor::SYNC, MeasureTiming::NO, OutputType::FULLY_SPECIFIED);
- EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
- Executor::BURST, MeasureTiming::NO, OutputType::FULLY_SPECIFIED);
- EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
- Executor::ASYNC, MeasureTiming::YES, OutputType::FULLY_SPECIFIED);
- EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
- Executor::SYNC, MeasureTiming::YES, OutputType::FULLY_SPECIFIED);
- EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
- Executor::BURST, MeasureTiming::YES, OutputType::FULLY_SPECIFIED);
+ EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::NO,
+ OutputType::FULLY_SPECIFIED);
+ EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::NO,
+ OutputType::FULLY_SPECIFIED);
+ EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::NO,
+ OutputType::FULLY_SPECIFIED);
+ EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::YES,
+ OutputType::FULLY_SPECIFIED);
+ EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::YES,
+ OutputType::FULLY_SPECIFIED);
+ EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::YES,
+ OutputType::FULLY_SPECIFIED);
}
}
@@ -411,7 +384,6 @@
// launch prepare model
sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
- ASSERT_NE(nullptr, preparedModelCallback.get());
Return<ErrorStatus> prepareLaunchStatus = device->prepareModel_1_2(
model, ExecutionPreference::FAST_SINGLE_ANSWER, hidl_vec<hidl_handle>(),
hidl_vec<hidl_handle>(), HidlToken(), preparedModelCallback);
@@ -438,17 +410,18 @@
ASSERT_NE(nullptr, preparedModel->get());
}
-void Execute(const sp<IDevice>& device, std::function<Model(void)> create_model,
- std::function<bool(int)> is_ignored, const std::vector<MixedTypedExample>& examples,
- bool testDynamicOutputShape) {
- Model model = create_model();
+void Execute(const sp<IDevice>& device, const TestModel& testModel, bool testDynamicOutputShape) {
+ Model model = createModel(testModel);
+ if (testDynamicOutputShape) {
+ makeOutputDimensionsUnspecified(&model);
+ }
+
sp<IPreparedModel> preparedModel = nullptr;
PrepareModel(device, model, &preparedModel);
if (preparedModel == nullptr) {
GTEST_SKIP();
}
- EvaluatePreparedModel(preparedModel, is_ignored, examples,
- model.relaxComputationFloat32toFloat16, testDynamicOutputShape);
+ EvaluatePreparedModel(preparedModel, testModel, testDynamicOutputShape);
}
} // namespace generated_tests
diff --git a/neuralnetworks/1.2/vts/functional/GeneratedTestHarness.h b/neuralnetworks/1.2/vts/functional/GeneratedTestHarness.h
index 0ecbe7e..de45242 100644
--- a/neuralnetworks/1.2/vts/functional/GeneratedTestHarness.h
+++ b/neuralnetworks/1.2/vts/functional/GeneratedTestHarness.h
@@ -30,18 +30,15 @@
namespace V1_2 {
namespace generated_tests {
-using ::test_helper::MixedTypedExample;
+Model createModel(const ::test_helper::TestModel& testModel);
void PrepareModel(const sp<V1_2::IDevice>& device, const V1_2::Model& model,
sp<V1_2::IPreparedModel>* preparedModel);
-void EvaluatePreparedModel(sp<V1_2::IPreparedModel>& preparedModel,
- std::function<bool(int)> is_ignored,
- const std::vector<MixedTypedExample>& examples,
- bool hasRelaxedFloat32Model, bool testDynamicOutputShape);
+void EvaluatePreparedModel(const sp<V1_2::IPreparedModel>& preparedModel,
+ const ::test_helper::TestModel& testModel, bool testDynamicOutputShape);
-void Execute(const sp<V1_2::IDevice>& device, std::function<V1_2::Model(void)> create_model,
- std::function<bool(int)> is_ignored, const std::vector<MixedTypedExample>& examples,
+void Execute(const sp<V1_2::IDevice>& device, const ::test_helper::TestModel& testModel,
bool testDynamicOutputShape = false);
} // namespace generated_tests
diff --git a/neuralnetworks/1.2/vts/functional/GeneratedTests.h b/neuralnetworks/1.2/vts/functional/GeneratedTests.h
index 9842036..a723609 100644
--- a/neuralnetworks/1.2/vts/functional/GeneratedTests.h
+++ b/neuralnetworks/1.2/vts/functional/GeneratedTests.h
@@ -14,21 +14,11 @@
* limitations under the License.
*/
-#include <android/hidl/memory/1.0/IMemory.h>
-#include <hidlmemory/mapping.h>
-
+#include "1.0/Utils.h"
#include "GeneratedTestHarness.h"
-#include "MemoryUtils.h"
#include "TestHarness.h"
-#include "Utils.h"
#include "VtsHalNeuralnetworks.h"
-namespace android::hardware::neuralnetworks::V1_2::vts::functional {
-
-std::vector<Request> createRequests(const std::vector<::test_helper::MixedTypedExample>& examples);
-
-} // namespace android::hardware::neuralnetworks::V1_2::vts::functional
-
namespace android::hardware::neuralnetworks::V1_2::generated_tests {
using namespace ::android::hardware::neuralnetworks::V1_2::vts::functional;
diff --git a/neuralnetworks/1.2/vts/functional/ValidateBurst.cpp b/neuralnetworks/1.2/vts/functional/ValidateBurst.cpp
index 06103bc..816f861 100644
--- a/neuralnetworks/1.2/vts/functional/ValidateBurst.cpp
+++ b/neuralnetworks/1.2/vts/functional/ValidateBurst.cpp
@@ -238,7 +238,7 @@
///////////////////////// BURST VALIATION TESTS ////////////////////////////////////
static void validateBurstSerialization(const sp<IPreparedModel>& preparedModel,
- const std::vector<Request>& requests) {
+ const Request& request) {
// create burst
std::unique_ptr<RequestChannelSender> sender;
std::unique_ptr<ResultChannelReceiver> receiver;
@@ -249,35 +249,32 @@
ASSERT_NE(nullptr, receiver.get());
ASSERT_NE(nullptr, context.get());
- // validate each request
- for (const Request& request : requests) {
- // load memory into callback slots
- std::vector<intptr_t> keys;
- keys.reserve(request.pools.size());
- std::transform(request.pools.begin(), request.pools.end(), std::back_inserter(keys),
- [](const auto& pool) { return reinterpret_cast<intptr_t>(&pool); });
- const std::vector<int32_t> slots = callback->getSlots(request.pools, keys);
+ // load memory into callback slots
+ std::vector<intptr_t> keys;
+ keys.reserve(request.pools.size());
+ std::transform(request.pools.begin(), request.pools.end(), std::back_inserter(keys),
+ [](const auto& pool) { return reinterpret_cast<intptr_t>(&pool); });
+ const std::vector<int32_t> slots = callback->getSlots(request.pools, keys);
- // ensure slot std::numeric_limits<int32_t>::max() doesn't exist (for
- // subsequent slot validation testing)
- ASSERT_TRUE(std::all_of(slots.begin(), slots.end(), [](int32_t slot) {
- return slot != std::numeric_limits<int32_t>::max();
- }));
+ // ensure slot std::numeric_limits<int32_t>::max() doesn't exist (for
+ // subsequent slot validation testing)
+ ASSERT_TRUE(std::all_of(slots.begin(), slots.end(), [](int32_t slot) {
+ return slot != std::numeric_limits<int32_t>::max();
+ }));
- // serialize the request
- const auto serialized = ::android::nn::serialize(request, MeasureTiming::YES, slots);
+ // serialize the request
+ const auto serialized = ::android::nn::serialize(request, MeasureTiming::YES, slots);
- // validations
- removeDatumTest(sender.get(), receiver.get(), serialized);
- addDatumTest(sender.get(), receiver.get(), serialized);
- mutateDatumTest(sender.get(), receiver.get(), serialized);
- }
+ // validations
+ removeDatumTest(sender.get(), receiver.get(), serialized);
+ addDatumTest(sender.get(), receiver.get(), serialized);
+ mutateDatumTest(sender.get(), receiver.get(), serialized);
}
// This test validates that when the Result message size exceeds length of the
// result FMQ, the service instance gracefully fails and returns an error.
static void validateBurstFmqLength(const sp<IPreparedModel>& preparedModel,
- const std::vector<Request>& requests) {
+ const Request& request) {
// create regular burst
std::shared_ptr<ExecutionBurstController> controllerRegular;
ASSERT_NO_FATAL_FAILURE(createBurstWithResultChannelLength(
@@ -290,43 +287,40 @@
preparedModel, kExecutionBurstChannelSmallLength, &controllerSmall));
ASSERT_NE(nullptr, controllerSmall.get());
- // validate each request
- for (const Request& request : requests) {
- // load memory into callback slots
- std::vector<intptr_t> keys(request.pools.size());
- for (size_t i = 0; i < keys.size(); ++i) {
- keys[i] = reinterpret_cast<intptr_t>(&request.pools[i]);
- }
-
- // collect serialized result by running regular burst
- const auto [statusRegular, outputShapesRegular, timingRegular] =
- controllerRegular->compute(request, MeasureTiming::NO, keys);
-
- // skip test if regular burst output isn't useful for testing a failure
- // caused by having too small of a length for the result FMQ
- const std::vector<FmqResultDatum> serialized =
- ::android::nn::serialize(statusRegular, outputShapesRegular, timingRegular);
- if (statusRegular != ErrorStatus::NONE ||
- serialized.size() <= kExecutionBurstChannelSmallLength) {
- continue;
- }
-
- // by this point, execution should fail because the result channel isn't
- // large enough to return the serialized result
- const auto [statusSmall, outputShapesSmall, timingSmall] =
- controllerSmall->compute(request, MeasureTiming::NO, keys);
- EXPECT_NE(ErrorStatus::NONE, statusSmall);
- EXPECT_EQ(0u, outputShapesSmall.size());
- EXPECT_TRUE(badTiming(timingSmall));
+ // load memory into callback slots
+ std::vector<intptr_t> keys(request.pools.size());
+ for (size_t i = 0; i < keys.size(); ++i) {
+ keys[i] = reinterpret_cast<intptr_t>(&request.pools[i]);
}
+
+ // collect serialized result by running regular burst
+ const auto [statusRegular, outputShapesRegular, timingRegular] =
+ controllerRegular->compute(request, MeasureTiming::NO, keys);
+
+ // skip test if regular burst output isn't useful for testing a failure
+ // caused by having too small of a length for the result FMQ
+ const std::vector<FmqResultDatum> serialized =
+ ::android::nn::serialize(statusRegular, outputShapesRegular, timingRegular);
+ if (statusRegular != ErrorStatus::NONE ||
+ serialized.size() <= kExecutionBurstChannelSmallLength) {
+ return;
+ }
+
+ // by this point, execution should fail because the result channel isn't
+ // large enough to return the serialized result
+ const auto [statusSmall, outputShapesSmall, timingSmall] =
+ controllerSmall->compute(request, MeasureTiming::NO, keys);
+ EXPECT_NE(ErrorStatus::NONE, statusSmall);
+ EXPECT_EQ(0u, outputShapesSmall.size());
+ EXPECT_TRUE(badTiming(timingSmall));
}
///////////////////////////// ENTRY POINT //////////////////////////////////
void ValidationTest::validateBurst(const sp<IPreparedModel>& preparedModel,
- const std::vector<Request>& requests) {
- ASSERT_NO_FATAL_FAILURE(validateBurstSerialization(preparedModel, requests));
- ASSERT_NO_FATAL_FAILURE(validateBurstFmqLength(preparedModel, requests));
+ const Request& request) {
+ ASSERT_NO_FATAL_FAILURE(validateBurstSerialization(preparedModel, request));
+ ASSERT_NO_FATAL_FAILURE(validateBurstFmqLength(preparedModel, request));
}
} // namespace functional
diff --git a/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp b/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp
index cf5905f..13d45e4 100644
--- a/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp
+++ b/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp
@@ -16,14 +16,9 @@
#define LOG_TAG "neuralnetworks_hidl_hal_test"
-#include <android-base/logging.h>
-#include <android/hidl/memory/1.0/IMemory.h>
-#include <hidlmemory/mapping.h>
-
#include "1.0/Utils.h"
#include "1.2/Callbacks.h"
#include "ExecutionBurstController.h"
-#include "MemoryUtils.h"
#include "TestHarness.h"
#include "Utils.h"
#include "VtsHalNeuralnetworks.h"
@@ -35,12 +30,7 @@
namespace vts {
namespace functional {
-using ::android::hardware::neuralnetworks::V1_0::RequestArgument;
using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
-using ::android::hidl::memory::V1_0::IMemory;
-using test_helper::for_all;
-using test_helper::MixedTyped;
-using test_helper::MixedTypedExample;
///////////////////////// UTILITY FUNCTIONS /////////////////////////
@@ -161,119 +151,23 @@
///////////////////////////// ENTRY POINT //////////////////////////////////
-std::vector<Request> createRequests(const std::vector<MixedTypedExample>& examples) {
- const uint32_t INPUT = 0;
- const uint32_t OUTPUT = 1;
-
- std::vector<Request> requests;
-
- for (auto& example : examples) {
- const MixedTyped& inputs = example.operands.first;
- const MixedTyped& outputs = example.operands.second;
-
- std::vector<RequestArgument> inputs_info, outputs_info;
- uint32_t inputSize = 0, outputSize = 0;
-
- // This function only partially specifies the metadata (vector of RequestArguments).
- // The contents are copied over below.
- for_all(inputs, [&inputs_info, &inputSize](int index, auto, auto s) {
- if (inputs_info.size() <= static_cast<size_t>(index)) inputs_info.resize(index + 1);
- RequestArgument arg = {
- .location = {.poolIndex = INPUT,
- .offset = 0,
- .length = static_cast<uint32_t>(s)},
- .dimensions = {},
- };
- RequestArgument arg_empty = {
- .hasNoValue = true,
- };
- inputs_info[index] = s ? arg : arg_empty;
- inputSize += s;
- });
- // Compute offset for inputs 1 and so on
- {
- size_t offset = 0;
- for (auto& i : inputs_info) {
- if (!i.hasNoValue) i.location.offset = offset;
- offset += i.location.length;
- }
- }
-
- // Go through all outputs, initialize RequestArgument descriptors
- for_all(outputs, [&outputs_info, &outputSize](int index, auto, auto s) {
- if (outputs_info.size() <= static_cast<size_t>(index)) outputs_info.resize(index + 1);
- RequestArgument arg = {
- .location = {.poolIndex = OUTPUT,
- .offset = 0,
- .length = static_cast<uint32_t>(s)},
- .dimensions = {},
- };
- outputs_info[index] = arg;
- outputSize += s;
- });
- // Compute offset for outputs 1 and so on
- {
- size_t offset = 0;
- for (auto& i : outputs_info) {
- i.location.offset = offset;
- offset += i.location.length;
- }
- }
- std::vector<hidl_memory> pools = {nn::allocateSharedMemory(inputSize),
- nn::allocateSharedMemory(outputSize)};
- if (pools[INPUT].size() == 0 || pools[OUTPUT].size() == 0) {
- return {};
- }
-
- // map pool
- sp<IMemory> inputMemory = mapMemory(pools[INPUT]);
- if (inputMemory == nullptr) {
- return {};
- }
- char* inputPtr = reinterpret_cast<char*>(static_cast<void*>(inputMemory->getPointer()));
- if (inputPtr == nullptr) {
- return {};
- }
-
- // initialize pool
- inputMemory->update();
- for_all(inputs, [&inputs_info, inputPtr](int index, auto p, auto s) {
- char* begin = (char*)p;
- char* end = begin + s;
- // TODO: handle more than one input
- std::copy(begin, end, inputPtr + inputs_info[index].location.offset);
- });
- inputMemory->commit();
-
- requests.push_back({.inputs = inputs_info, .outputs = outputs_info, .pools = pools});
- }
-
- return requests;
-}
-
-void ValidationTest::validateRequests(const sp<IPreparedModel>& preparedModel,
- const std::vector<Request>& requests) {
- // validate each request
- for (const Request& request : requests) {
- removeInputTest(preparedModel, request);
- removeOutputTest(preparedModel, request);
- }
+void ValidationTest::validateRequest(const sp<IPreparedModel>& preparedModel,
+ const Request& request) {
+ removeInputTest(preparedModel, request);
+ removeOutputTest(preparedModel, request);
}
void ValidationTest::validateRequestFailure(const sp<IPreparedModel>& preparedModel,
- const std::vector<Request>& requests) {
- for (const Request& request : requests) {
- SCOPED_TRACE("Expecting request to fail [executeSynchronously]");
- Return<void> executeStatus = preparedModel->executeSynchronously(
- request, MeasureTiming::NO,
- [](ErrorStatus error, const hidl_vec<OutputShape>& outputShapes,
- const Timing& timing) {
- ASSERT_NE(ErrorStatus::NONE, error);
- EXPECT_EQ(outputShapes.size(), 0);
- EXPECT_TRUE(badTiming(timing));
- });
- ASSERT_TRUE(executeStatus.isOk());
- }
+ const Request& request) {
+ SCOPED_TRACE("Expecting request to fail [executeSynchronously]");
+ Return<void> executeStatus = preparedModel->executeSynchronously(
+ request, MeasureTiming::NO,
+ [](ErrorStatus error, const hidl_vec<OutputShape>& outputShapes, const Timing& timing) {
+ ASSERT_NE(ErrorStatus::NONE, error);
+ EXPECT_EQ(outputShapes.size(), 0);
+ EXPECT_TRUE(badTiming(timing));
+ });
+ ASSERT_TRUE(executeStatus.isOk());
}
} // namespace functional
diff --git a/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.cpp b/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.cpp
index bd24edc..eb52110 100644
--- a/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.cpp
+++ b/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.cpp
@@ -126,7 +126,7 @@
::testing::VtsHalHidlTargetTestBase::TearDown();
}
-void ValidationTest::validateEverything(const Model& model, const std::vector<Request>& requests) {
+void ValidationTest::validateEverything(const Model& model, const Request& request) {
validateModel(model);
// create IPreparedModel
@@ -136,11 +136,11 @@
return;
}
- validateRequests(preparedModel, requests);
- validateBurst(preparedModel, requests);
+ validateRequest(preparedModel, request);
+ validateBurst(preparedModel, request);
}
-void ValidationTest::validateFailure(const Model& model, const std::vector<Request>& requests) {
+void ValidationTest::validateFailure(const Model& model, const Request& request) {
// TODO: Should this always succeed?
// What if the invalid input is part of the model (i.e., a parameter).
validateModel(model);
@@ -151,7 +151,7 @@
return;
}
- validateRequestFailure(preparedModel, requests);
+ validateRequestFailure(preparedModel, request);
}
sp<IPreparedModel> getPreparedModel_1_2(
diff --git a/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.h b/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.h
index 90dfe25..e76ad7b 100644
--- a/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.h
+++ b/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.h
@@ -68,20 +68,16 @@
sp<IDevice> device;
};
-// Tag for the validation tests
class ValidationTest : public NeuralnetworksHidlTest {
protected:
- void validateEverything(const Model& model, const std::vector<Request>& requests);
- void validateFailure(const Model& model, const std::vector<Request>& requests);
+ void validateEverything(const Model& model, const Request& request);
+ void validateFailure(const Model& model, const Request& request);
private:
void validateModel(const Model& model);
- void validateRequests(const sp<IPreparedModel>& preparedModel,
- const std::vector<Request>& requests);
- void validateRequestFailure(const sp<IPreparedModel>& preparedModel,
- const std::vector<Request>& requests);
- void validateBurst(const sp<IPreparedModel>& preparedModel,
- const std::vector<Request>& requests);
+ void validateRequest(const sp<IPreparedModel>& preparedModel, const Request& request);
+ void validateRequestFailure(const sp<IPreparedModel>& preparedModel, const Request& request);
+ void validateBurst(const sp<IPreparedModel>& preparedModel, const Request& request);
};
// Tag for the generated tests