blob: 6ed5bc12745f5ac64643c3dd2b85ff81b384fbeb [file] [log] [blame]
Slava Shklyaev73ee79d2019-05-14 14:15:14 +01001/*
2 * Copyright (C) 2017 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "GeneratedTestHarness.h"
18
19#include <android-base/logging.h>
20#include <android/hardware/neuralnetworks/1.0/IPreparedModel.h>
21#include <android/hardware/neuralnetworks/1.0/types.h>
22#include <android/hardware/neuralnetworks/1.1/IDevice.h>
23#include <android/hidl/allocator/1.0/IAllocator.h>
24#include <android/hidl/memory/1.0/IMemory.h>
25#include <hidlmemory/mapping.h>
26
Xusong Wang81611962019-08-09 16:41:16 -070027#include <gtest/gtest.h>
Slava Shklyaev73ee79d2019-05-14 14:15:14 +010028#include <iostream>
29
30#include "1.0/Callbacks.h"
31#include "1.0/Utils.h"
32#include "MemoryUtils.h"
33#include "TestHarness.h"
Xusong Wangbcaa7822019-08-23 16:10:54 -070034#include "VtsHalNeuralnetworks.h"
Slava Shklyaev73ee79d2019-05-14 14:15:14 +010035
Michael Butler62749b92019-08-26 23:55:47 -070036namespace android::hardware::neuralnetworks::V1_1::vts::functional {
Slava Shklyaev73ee79d2019-05-14 14:15:14 +010037
Xusong Wang81611962019-08-09 16:41:16 -070038using namespace test_helper;
Michael Butler62749b92019-08-26 23:55:47 -070039using hidl::memory::V1_0::IMemory;
40using V1_0::DataLocation;
41using V1_0::ErrorStatus;
42using V1_0::IPreparedModel;
43using V1_0::Operand;
44using V1_0::OperandLifeTime;
45using V1_0::OperandType;
46using V1_0::Request;
47using V1_0::implementation::ExecutionCallback;
48using V1_0::implementation::PreparedModelCallback;
Xusong Wang81611962019-08-09 16:41:16 -070049
50Model createModel(const TestModel& testModel) {
51 // Model operands.
52 hidl_vec<Operand> operands(testModel.operands.size());
53 size_t constCopySize = 0, constRefSize = 0;
54 for (uint32_t i = 0; i < testModel.operands.size(); i++) {
55 const auto& op = testModel.operands[i];
56
57 DataLocation loc = {};
58 if (op.lifetime == TestOperandLifeTime::CONSTANT_COPY) {
59 loc = {.poolIndex = 0,
60 .offset = static_cast<uint32_t>(constCopySize),
61 .length = static_cast<uint32_t>(op.data.size())};
62 constCopySize += op.data.alignedSize();
63 } else if (op.lifetime == TestOperandLifeTime::CONSTANT_REFERENCE) {
64 loc = {.poolIndex = 0,
65 .offset = static_cast<uint32_t>(constRefSize),
66 .length = static_cast<uint32_t>(op.data.size())};
67 constRefSize += op.data.alignedSize();
68 }
69
70 operands[i] = {.type = static_cast<OperandType>(op.type),
71 .dimensions = op.dimensions,
72 .numberOfConsumers = op.numberOfConsumers,
73 .scale = op.scale,
74 .zeroPoint = op.zeroPoint,
75 .lifetime = static_cast<OperandLifeTime>(op.lifetime),
76 .location = loc};
77 }
78
79 // Model operations.
80 hidl_vec<Operation> operations(testModel.operations.size());
81 std::transform(testModel.operations.begin(), testModel.operations.end(), operations.begin(),
82 [](const TestOperation& op) -> Operation {
83 return {.type = static_cast<OperationType>(op.type),
84 .inputs = op.inputs,
85 .outputs = op.outputs};
86 });
87
88 // Constant copies.
89 hidl_vec<uint8_t> operandValues(constCopySize);
90 for (uint32_t i = 0; i < testModel.operands.size(); i++) {
91 const auto& op = testModel.operands[i];
92 if (op.lifetime == TestOperandLifeTime::CONSTANT_COPY) {
93 const uint8_t* begin = op.data.get<uint8_t>();
94 const uint8_t* end = begin + op.data.size();
95 std::copy(begin, end, operandValues.data() + operands[i].location.offset);
96 }
97 }
98
99 // Shared memory.
100 hidl_vec<hidl_memory> pools;
101 if (constRefSize > 0) {
102 hidl_vec_push_back(&pools, nn::allocateSharedMemory(constRefSize));
103 CHECK_NE(pools[0].size(), 0u);
104
105 // load data
106 sp<IMemory> mappedMemory = mapMemory(pools[0]);
107 CHECK(mappedMemory.get() != nullptr);
108 uint8_t* mappedPtr =
109 reinterpret_cast<uint8_t*>(static_cast<void*>(mappedMemory->getPointer()));
110 CHECK(mappedPtr != nullptr);
111
112 for (uint32_t i = 0; i < testModel.operands.size(); i++) {
113 const auto& op = testModel.operands[i];
114 if (op.lifetime == TestOperandLifeTime::CONSTANT_REFERENCE) {
115 const uint8_t* begin = op.data.get<uint8_t>();
116 const uint8_t* end = begin + op.data.size();
117 std::copy(begin, end, mappedPtr + operands[i].location.offset);
118 }
119 }
120 }
121
122 return {.operands = std::move(operands),
123 .operations = std::move(operations),
124 .inputIndexes = testModel.inputIndexes,
125 .outputIndexes = testModel.outputIndexes,
126 .operandValues = std::move(operandValues),
127 .pools = std::move(pools),
128 .relaxComputationFloat32toFloat16 = testModel.isRelaxed};
129}
Slava Shklyaev73ee79d2019-05-14 14:15:14 +0100130
131// Top level driver for models and examples generated by test_generator.py
132// Test driver for those generated from ml/nn/runtime/test/spec
Xusong Wang81611962019-08-09 16:41:16 -0700133void EvaluatePreparedModel(const sp<IPreparedModel>& preparedModel, const TestModel& testModel) {
134 const Request request = createRequest(testModel);
Slava Shklyaev73ee79d2019-05-14 14:15:14 +0100135
Xusong Wang81611962019-08-09 16:41:16 -0700136 // Launch execution.
137 sp<ExecutionCallback> executionCallback = new ExecutionCallback();
138 Return<ErrorStatus> executionLaunchStatus = preparedModel->execute(request, executionCallback);
139 ASSERT_TRUE(executionLaunchStatus.isOk());
140 EXPECT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(executionLaunchStatus));
Slava Shklyaev73ee79d2019-05-14 14:15:14 +0100141
Xusong Wang81611962019-08-09 16:41:16 -0700142 // Retrieve execution status.
143 executionCallback->wait();
144 ASSERT_EQ(ErrorStatus::NONE, executionCallback->getStatus());
Slava Shklyaev73ee79d2019-05-14 14:15:14 +0100145
Xusong Wang81611962019-08-09 16:41:16 -0700146 // Retrieve execution results.
147 const std::vector<TestBuffer> outputs = getOutputBuffers(request);
Slava Shklyaev73ee79d2019-05-14 14:15:14 +0100148
Xusong Wang81611962019-08-09 16:41:16 -0700149 // We want "close-enough" results.
150 checkResults(testModel, outputs);
Slava Shklyaev73ee79d2019-05-14 14:15:14 +0100151}
152
Xusong Wangbcaa7822019-08-23 16:10:54 -0700153// Tag for the generated tests
154class GeneratedTest : public GeneratedTestBase {
155 protected:
156 void Execute(const TestModel& testModel) {
157 Model model = createModel(testModel);
Slava Shklyaev73ee79d2019-05-14 14:15:14 +0100158
Xusong Wangbcaa7822019-08-23 16:10:54 -0700159 // see if service can handle model
160 bool fullySupportsModel = false;
161 Return<void> supportedCall = device->getSupportedOperations_1_1(
162 model, [&fullySupportsModel](ErrorStatus status, const hidl_vec<bool>& supported) {
163 ASSERT_EQ(ErrorStatus::NONE, status);
164 ASSERT_NE(0ul, supported.size());
165 fullySupportsModel = std::all_of(supported.begin(), supported.end(),
166 [](bool valid) { return valid; });
167 });
168 ASSERT_TRUE(supportedCall.isOk());
Slava Shklyaev73ee79d2019-05-14 14:15:14 +0100169
Xusong Wangbcaa7822019-08-23 16:10:54 -0700170 // launch prepare model
171 sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
172 Return<ErrorStatus> prepareLaunchStatus = device->prepareModel_1_1(
173 model, ExecutionPreference::FAST_SINGLE_ANSWER, preparedModelCallback);
174 ASSERT_TRUE(prepareLaunchStatus.isOk());
175 ASSERT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus));
Slava Shklyaev73ee79d2019-05-14 14:15:14 +0100176
Xusong Wangbcaa7822019-08-23 16:10:54 -0700177 // retrieve prepared model
178 preparedModelCallback->wait();
179 ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
180 sp<IPreparedModel> preparedModel = preparedModelCallback->getPreparedModel();
Slava Shklyaev73ee79d2019-05-14 14:15:14 +0100181
Xusong Wangbcaa7822019-08-23 16:10:54 -0700182 // early termination if vendor service cannot fully prepare model
183 if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
184 ASSERT_EQ(nullptr, preparedModel.get());
185 LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
186 "prepare model that it does not support.";
187 std::cout << "[ ] Early termination of test because vendor service cannot "
188 "prepare model that it does not support."
189 << std::endl;
190 GTEST_SKIP();
191 }
192 EXPECT_EQ(ErrorStatus::NONE, prepareReturnStatus);
193 ASSERT_NE(nullptr, preparedModel.get());
194
195 EvaluatePreparedModel(preparedModel, testModel);
Slava Shklyaev73ee79d2019-05-14 14:15:14 +0100196 }
Xusong Wangbcaa7822019-08-23 16:10:54 -0700197};
Slava Shklyaev73ee79d2019-05-14 14:15:14 +0100198
Xusong Wangbcaa7822019-08-23 16:10:54 -0700199TEST_P(GeneratedTest, Test) {
200 Execute(*mTestModel);
Slava Shklyaev73ee79d2019-05-14 14:15:14 +0100201}
202
Xusong Wangbcaa7822019-08-23 16:10:54 -0700203INSTANTIATE_GENERATED_TEST(GeneratedTest,
204 [](const TestModel& testModel) { return !testModel.expectFailure; });
205
Michael Butler62749b92019-08-26 23:55:47 -0700206} // namespace android::hardware::neuralnetworks::V1_1::vts::functional