Lev Proleev | 13fdfcd | 2019-08-30 11:35:34 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2018 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | #define LOG_TAG "neuralnetworks_hidl_hal_test" |
| 18 | |
| 19 | #include "1.0/Utils.h" |
Xusong Wang | cc47dff | 2019-10-23 10:35:07 -0700 | [diff] [blame] | 20 | #include "1.3/Callbacks.h" |
Lev Proleev | 13fdfcd | 2019-08-30 11:35:34 +0100 | [diff] [blame] | 21 | #include "GeneratedTestHarness.h" |
| 22 | #include "VtsHalNeuralnetworks.h" |
| 23 | |
Lev Proleev | 26d1bc8 | 2019-08-30 11:57:18 +0100 | [diff] [blame] | 24 | namespace android::hardware::neuralnetworks::V1_3::vts::functional { |
Lev Proleev | 13fdfcd | 2019-08-30 11:35:34 +0100 | [diff] [blame] | 25 | |
Xusong Wang | cc47dff | 2019-10-23 10:35:07 -0700 | [diff] [blame] | 26 | using implementation::PreparedModelCallback; |
Lev Proleev | 13fdfcd | 2019-08-30 11:35:34 +0100 | [diff] [blame] | 27 | using V1_0::ErrorStatus; |
| 28 | using V1_0::OperandLifeTime; |
| 29 | using V1_1::ExecutionPreference; |
Lev Proleev | 26d1bc8 | 2019-08-30 11:57:18 +0100 | [diff] [blame] | 30 | using V1_2::OperationTypeRange; |
| 31 | using V1_2::SymmPerChannelQuantParams; |
Lev Proleev | 26d1bc8 | 2019-08-30 11:57:18 +0100 | [diff] [blame] | 32 | using HidlToken = |
| 33 | hidl_array<uint8_t, static_cast<uint32_t>(V1_2::Constant::BYTE_SIZE_OF_CACHE_TOKEN)>; |
Lev Proleev | 13fdfcd | 2019-08-30 11:35:34 +0100 | [diff] [blame] | 34 | |
| 35 | ///////////////////////// UTILITY FUNCTIONS ///////////////////////// |
| 36 | |
| 37 | static void validateGetSupportedOperations(const sp<IDevice>& device, const std::string& message, |
| 38 | const Model& model) { |
Lev Proleev | 26d1bc8 | 2019-08-30 11:57:18 +0100 | [diff] [blame] | 39 | SCOPED_TRACE(message + " [getSupportedOperations_1_3]"); |
Lev Proleev | 13fdfcd | 2019-08-30 11:35:34 +0100 | [diff] [blame] | 40 | |
Lev Proleev | 26d1bc8 | 2019-08-30 11:57:18 +0100 | [diff] [blame] | 41 | Return<void> ret = device->getSupportedOperations_1_3( |
Lev Proleev | 13fdfcd | 2019-08-30 11:35:34 +0100 | [diff] [blame] | 42 | model, [&](ErrorStatus status, const hidl_vec<bool>&) { |
| 43 | EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, status); |
| 44 | }); |
| 45 | EXPECT_TRUE(ret.isOk()); |
| 46 | } |
| 47 | |
| 48 | static void validatePrepareModel(const sp<IDevice>& device, const std::string& message, |
| 49 | const Model& model, ExecutionPreference preference) { |
Lev Proleev | 26d1bc8 | 2019-08-30 11:57:18 +0100 | [diff] [blame] | 50 | SCOPED_TRACE(message + " [prepareModel_1_3]"); |
Lev Proleev | 13fdfcd | 2019-08-30 11:35:34 +0100 | [diff] [blame] | 51 | |
| 52 | sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback(); |
| 53 | Return<ErrorStatus> prepareLaunchStatus = |
Lev Proleev | 26d1bc8 | 2019-08-30 11:57:18 +0100 | [diff] [blame] | 54 | device->prepareModel_1_3(model, preference, hidl_vec<hidl_handle>(), |
Lev Proleev | 13fdfcd | 2019-08-30 11:35:34 +0100 | [diff] [blame] | 55 | hidl_vec<hidl_handle>(), HidlToken(), preparedModelCallback); |
| 56 | ASSERT_TRUE(prepareLaunchStatus.isOk()); |
| 57 | ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(prepareLaunchStatus)); |
| 58 | |
| 59 | preparedModelCallback->wait(); |
| 60 | ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus(); |
| 61 | ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, prepareReturnStatus); |
Xusong Wang | 1b3f426 | 2019-10-25 12:07:17 -0700 | [diff] [blame] | 62 | sp<IPreparedModel> preparedModel = getPreparedModel_1_3(preparedModelCallback); |
Lev Proleev | 13fdfcd | 2019-08-30 11:35:34 +0100 | [diff] [blame] | 63 | ASSERT_EQ(nullptr, preparedModel.get()); |
| 64 | } |
| 65 | |
| 66 | static bool validExecutionPreference(ExecutionPreference preference) { |
| 67 | return preference == ExecutionPreference::LOW_POWER || |
| 68 | preference == ExecutionPreference::FAST_SINGLE_ANSWER || |
| 69 | preference == ExecutionPreference::SUSTAINED_SPEED; |
| 70 | } |
| 71 | |
| 72 | // Primary validation function. This function will take a valid model, apply a |
| 73 | // mutation to it to invalidate the model, then pass it to interface calls that |
| 74 | // use the model. Note that the model here is passed by value, and any mutation |
| 75 | // to the model does not leave this function. |
| 76 | static void validate(const sp<IDevice>& device, const std::string& message, Model model, |
| 77 | const std::function<void(Model*)>& mutation, |
| 78 | ExecutionPreference preference = ExecutionPreference::FAST_SINGLE_ANSWER) { |
| 79 | mutation(&model); |
| 80 | if (validExecutionPreference(preference)) { |
| 81 | validateGetSupportedOperations(device, message, model); |
| 82 | } |
| 83 | validatePrepareModel(device, message, model, preference); |
| 84 | } |
| 85 | |
| 86 | static uint32_t addOperand(Model* model) { |
| 87 | return hidl_vec_push_back(&model->operands, |
| 88 | { |
| 89 | .type = OperandType::INT32, |
| 90 | .dimensions = {}, |
| 91 | .numberOfConsumers = 0, |
| 92 | .scale = 0.0f, |
| 93 | .zeroPoint = 0, |
| 94 | .lifetime = OperandLifeTime::MODEL_INPUT, |
| 95 | .location = {.poolIndex = 0, .offset = 0, .length = 0}, |
| 96 | }); |
| 97 | } |
| 98 | |
| 99 | static uint32_t addOperand(Model* model, OperandLifeTime lifetime) { |
| 100 | uint32_t index = addOperand(model); |
| 101 | model->operands[index].numberOfConsumers = 1; |
| 102 | model->operands[index].lifetime = lifetime; |
| 103 | return index; |
| 104 | } |
| 105 | |
| 106 | ///////////////////////// VALIDATE MODEL OPERAND TYPE ///////////////////////// |
| 107 | |
| 108 | static const uint32_t invalidOperandTypes[] = { |
| 109 | static_cast<uint32_t>(OperandTypeRange::FUNDAMENTAL_MIN) - 1, |
| 110 | static_cast<uint32_t>(OperandTypeRange::FUNDAMENTAL_MAX) + 1, |
| 111 | static_cast<uint32_t>(OperandTypeRange::OEM_MIN) - 1, |
| 112 | static_cast<uint32_t>(OperandTypeRange::OEM_MAX) + 1, |
| 113 | }; |
| 114 | |
| 115 | static void mutateOperandTypeTest(const sp<IDevice>& device, const Model& model) { |
| 116 | for (size_t operand = 0; operand < model.operands.size(); ++operand) { |
| 117 | for (uint32_t invalidOperandType : invalidOperandTypes) { |
| 118 | const std::string message = "mutateOperandTypeTest: operand " + |
| 119 | std::to_string(operand) + " set to value " + |
| 120 | std::to_string(invalidOperandType); |
| 121 | validate(device, message, model, [operand, invalidOperandType](Model* model) { |
| 122 | model->operands[operand].type = static_cast<OperandType>(invalidOperandType); |
| 123 | }); |
| 124 | } |
| 125 | } |
| 126 | } |
| 127 | |
| 128 | ///////////////////////// VALIDATE OPERAND RANK ///////////////////////// |
| 129 | |
| 130 | static uint32_t getInvalidRank(OperandType type) { |
| 131 | switch (type) { |
| 132 | case OperandType::FLOAT16: |
| 133 | case OperandType::FLOAT32: |
| 134 | case OperandType::INT32: |
| 135 | case OperandType::UINT32: |
| 136 | case OperandType::BOOL: |
| 137 | return 1; |
| 138 | case OperandType::TENSOR_BOOL8: |
| 139 | case OperandType::TENSOR_FLOAT16: |
| 140 | case OperandType::TENSOR_FLOAT32: |
| 141 | case OperandType::TENSOR_INT32: |
| 142 | case OperandType::TENSOR_QUANT8_ASYMM: |
| 143 | case OperandType::TENSOR_QUANT8_SYMM: |
| 144 | case OperandType::TENSOR_QUANT16_ASYMM: |
| 145 | case OperandType::TENSOR_QUANT16_SYMM: |
| 146 | case OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL: |
| 147 | return 0; |
| 148 | default: |
| 149 | return 0; |
| 150 | } |
| 151 | } |
| 152 | |
| 153 | static void mutateOperandRankTest(const sp<IDevice>& device, const Model& model) { |
| 154 | for (size_t operand = 0; operand < model.operands.size(); ++operand) { |
| 155 | const uint32_t invalidRank = getInvalidRank(model.operands[operand].type); |
| 156 | if (invalidRank == 0) { |
| 157 | continue; |
| 158 | } |
| 159 | const std::string message = "mutateOperandRankTest: operand " + std::to_string(operand) + |
| 160 | " has rank of " + std::to_string(invalidRank); |
| 161 | validate(device, message, model, [operand, invalidRank](Model* model) { |
| 162 | model->operands[operand].dimensions = std::vector<uint32_t>(invalidRank, 0); |
| 163 | }); |
| 164 | } |
| 165 | } |
| 166 | |
| 167 | ///////////////////////// VALIDATE OPERAND SCALE ///////////////////////// |
| 168 | |
| 169 | static float getInvalidScale(OperandType type) { |
| 170 | switch (type) { |
| 171 | case OperandType::FLOAT16: |
| 172 | case OperandType::FLOAT32: |
| 173 | case OperandType::INT32: |
| 174 | case OperandType::UINT32: |
| 175 | case OperandType::BOOL: |
| 176 | case OperandType::TENSOR_BOOL8: |
| 177 | case OperandType::TENSOR_FLOAT16: |
| 178 | case OperandType::TENSOR_FLOAT32: |
| 179 | case OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL: |
| 180 | return 1.0f; |
| 181 | case OperandType::TENSOR_INT32: |
| 182 | return -1.0f; |
| 183 | case OperandType::TENSOR_QUANT8_SYMM: |
| 184 | case OperandType::TENSOR_QUANT8_ASYMM: |
| 185 | case OperandType::TENSOR_QUANT16_ASYMM: |
| 186 | case OperandType::TENSOR_QUANT16_SYMM: |
| 187 | return 0.0f; |
| 188 | default: |
| 189 | return 0.0f; |
| 190 | } |
| 191 | } |
| 192 | |
| 193 | static void mutateOperandScaleTest(const sp<IDevice>& device, const Model& model) { |
| 194 | for (size_t operand = 0; operand < model.operands.size(); ++operand) { |
| 195 | const float invalidScale = getInvalidScale(model.operands[operand].type); |
| 196 | const std::string message = "mutateOperandScaleTest: operand " + std::to_string(operand) + |
| 197 | " has scale of " + std::to_string(invalidScale); |
| 198 | validate(device, message, model, [operand, invalidScale](Model* model) { |
| 199 | model->operands[operand].scale = invalidScale; |
| 200 | }); |
| 201 | } |
| 202 | } |
| 203 | |
| 204 | ///////////////////////// VALIDATE OPERAND ZERO POINT ///////////////////////// |
| 205 | |
| 206 | static std::vector<int32_t> getInvalidZeroPoints(OperandType type) { |
| 207 | switch (type) { |
| 208 | case OperandType::FLOAT16: |
| 209 | case OperandType::FLOAT32: |
| 210 | case OperandType::INT32: |
| 211 | case OperandType::UINT32: |
| 212 | case OperandType::BOOL: |
| 213 | case OperandType::TENSOR_BOOL8: |
| 214 | case OperandType::TENSOR_FLOAT16: |
| 215 | case OperandType::TENSOR_FLOAT32: |
| 216 | case OperandType::TENSOR_INT32: |
| 217 | case OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL: |
| 218 | return {1}; |
| 219 | case OperandType::TENSOR_QUANT8_ASYMM: |
| 220 | return {-1, 256}; |
| 221 | case OperandType::TENSOR_QUANT8_SYMM: |
| 222 | return {-129, -1, 1, 128}; |
| 223 | case OperandType::TENSOR_QUANT16_ASYMM: |
| 224 | return {-1, 65536}; |
| 225 | case OperandType::TENSOR_QUANT16_SYMM: |
| 226 | return {-32769, -1, 1, 32768}; |
| 227 | default: |
| 228 | return {}; |
| 229 | } |
| 230 | } |
| 231 | |
| 232 | static void mutateOperandZeroPointTest(const sp<IDevice>& device, const Model& model) { |
| 233 | for (size_t operand = 0; operand < model.operands.size(); ++operand) { |
| 234 | const std::vector<int32_t> invalidZeroPoints = |
| 235 | getInvalidZeroPoints(model.operands[operand].type); |
| 236 | for (int32_t invalidZeroPoint : invalidZeroPoints) { |
| 237 | const std::string message = "mutateOperandZeroPointTest: operand " + |
| 238 | std::to_string(operand) + " has zero point of " + |
| 239 | std::to_string(invalidZeroPoint); |
| 240 | validate(device, message, model, [operand, invalidZeroPoint](Model* model) { |
| 241 | model->operands[operand].zeroPoint = invalidZeroPoint; |
| 242 | }); |
| 243 | } |
| 244 | } |
| 245 | } |
| 246 | |
| 247 | ///////////////////////// VALIDATE EXTRA ??? ///////////////////////// |
| 248 | |
| 249 | // TODO: Operand::lifetime |
| 250 | // TODO: Operand::location |
| 251 | |
| 252 | ///////////////////////// VALIDATE OPERATION OPERAND TYPE ///////////////////////// |
| 253 | |
| 254 | static void mutateOperand(Operand* operand, OperandType type) { |
| 255 | Operand newOperand = *operand; |
| 256 | newOperand.type = type; |
| 257 | switch (type) { |
| 258 | case OperandType::FLOAT16: |
| 259 | case OperandType::FLOAT32: |
| 260 | case OperandType::INT32: |
| 261 | case OperandType::UINT32: |
| 262 | case OperandType::BOOL: |
| 263 | newOperand.dimensions = hidl_vec<uint32_t>(); |
| 264 | newOperand.scale = 0.0f; |
| 265 | newOperand.zeroPoint = 0; |
| 266 | break; |
| 267 | case OperandType::TENSOR_BOOL8: |
| 268 | case OperandType::TENSOR_FLOAT16: |
| 269 | case OperandType::TENSOR_FLOAT32: |
| 270 | newOperand.dimensions = |
| 271 | operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1}); |
| 272 | newOperand.scale = 0.0f; |
| 273 | newOperand.zeroPoint = 0; |
| 274 | break; |
| 275 | case OperandType::TENSOR_INT32: |
| 276 | newOperand.dimensions = |
| 277 | operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1}); |
| 278 | newOperand.zeroPoint = 0; |
| 279 | break; |
| 280 | case OperandType::TENSOR_QUANT8_ASYMM: |
| 281 | case OperandType::TENSOR_QUANT8_SYMM: |
| 282 | case OperandType::TENSOR_QUANT16_ASYMM: |
| 283 | case OperandType::TENSOR_QUANT16_SYMM: |
| 284 | newOperand.dimensions = |
| 285 | operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1}); |
| 286 | newOperand.scale = operand->scale != 0.0f ? operand->scale : 1.0f; |
| 287 | break; |
| 288 | case OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL: { |
| 289 | newOperand.dimensions = |
| 290 | operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1}); |
| 291 | newOperand.scale = 0.0f; |
| 292 | newOperand.zeroPoint = 0; |
| 293 | |
| 294 | SymmPerChannelQuantParams channelQuant; |
| 295 | channelQuant.channelDim = 0; |
| 296 | channelQuant.scales = hidl_vec<float>( |
| 297 | operand->dimensions.size() > 0 ? static_cast<size_t>(operand->dimensions[0]) |
| 298 | : 0); |
| 299 | for (size_t i = 0; i < channelQuant.scales.size(); ++i) { |
| 300 | channelQuant.scales[i] = 1.0f; |
| 301 | } |
| 302 | newOperand.extraParams.channelQuant(std::move(channelQuant)); |
| 303 | } break; |
| 304 | case OperandType::OEM: |
| 305 | case OperandType::TENSOR_OEM_BYTE: |
| 306 | default: |
| 307 | break; |
| 308 | } |
| 309 | *operand = newOperand; |
| 310 | } |
| 311 | |
| 312 | static bool mutateOperationOperandTypeSkip(size_t operand, OperandType type, const Model& model) { |
| 313 | // Do not test OEM types |
| 314 | if (type == model.operands[operand].type || type == OperandType::OEM || |
| 315 | type == OperandType::TENSOR_OEM_BYTE) { |
| 316 | return true; |
| 317 | } |
| 318 | for (const Operation& operation : model.operations) { |
| 319 | // Skip mutateOperationOperandTypeTest for the following operations. |
| 320 | // - LSH_PROJECTION's second argument is allowed to have any type. |
| 321 | // - ARGMIN and ARGMAX's first argument can be any of |
| 322 | // TENSOR_(FLOAT16|FLOAT32|INT32|QUANT8_ASYMM). |
| 323 | // - CAST's argument can be any of TENSOR_(FLOAT16|FLOAT32|INT32|QUANT8_ASYMM). |
| 324 | // - RANDOM_MULTINOMIAL's argument can be either TENSOR_FLOAT16 or TENSOR_FLOAT32. |
| 325 | // - DEQUANTIZE input can be any of |
Lev Proleev | ae643ae | 2019-12-05 16:57:30 +0000 | [diff] [blame^] | 326 | // TENSOR_(QUANT8_ASYMM|QUANT8_ASYMM_SIGNED|QUANT8_SYMM|QUANT8_SYMM_PER_CHANNEL), |
| 327 | // output can be of either TENSOR_FLOAT16 or TENSOR_FLOAT32. |
Lev Proleev | 13fdfcd | 2019-08-30 11:35:34 +0100 | [diff] [blame] | 328 | // - QUANTIZE input can be either TENSOR_FLOAT16 or TENSOR_FLOAT32 |
| 329 | // - CONV_2D filter type (arg 1) can be QUANT8_ASYMM or QUANT8_SYMM_PER_CHANNEL |
| 330 | // - DEPTHWISE_CONV_2D filter type (arg 1) can be QUANT8_ASYMM or QUANT8_SYMM_PER_CHANNEL |
| 331 | // - GROUPED_CONV_2D filter type (arg 1) can be QUANT8_ASYMM or QUANT8_SYMM_PER_CHANNEL |
| 332 | // - TRANSPOSE_CONV_2D filter type (arg 1) can be QUANT8_ASYMM or QUANT8_SYMM_PER_CHANNEL |
| 333 | switch (operation.type) { |
| 334 | case OperationType::LSH_PROJECTION: { |
| 335 | if (operand == operation.inputs[1]) { |
| 336 | return true; |
| 337 | } |
| 338 | } break; |
| 339 | case OperationType::CAST: |
| 340 | case OperationType::ARGMAX: |
| 341 | case OperationType::ARGMIN: { |
| 342 | if (type == OperandType::TENSOR_FLOAT16 || type == OperandType::TENSOR_FLOAT32 || |
Przemyslaw Szczepaniak | 2326dd1 | 2019-11-29 09:49:17 +0000 | [diff] [blame] | 343 | type == OperandType::TENSOR_INT32 || type == OperandType::TENSOR_QUANT8_ASYMM || |
| 344 | type == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { |
Lev Proleev | 13fdfcd | 2019-08-30 11:35:34 +0100 | [diff] [blame] | 345 | return true; |
| 346 | } |
| 347 | } break; |
Przemyslaw Szczepaniak | 90fc2cc | 2019-11-25 11:04:19 +0000 | [diff] [blame] | 348 | case OperationType::QUANTIZE: { |
| 349 | if (operand == operation.inputs[0] && |
| 350 | (type == OperandType::TENSOR_FLOAT16 || type == OperandType::TENSOR_FLOAT32)) { |
| 351 | return true; |
| 352 | } |
| 353 | if (operand == operation.outputs[0] && |
| 354 | (type == OperandType::TENSOR_QUANT8_ASYMM || |
| 355 | type == OperandType::TENSOR_QUANT8_ASYMM_SIGNED)) { |
| 356 | return true; |
| 357 | } |
| 358 | } break; |
Lev Proleev | 13fdfcd | 2019-08-30 11:35:34 +0100 | [diff] [blame] | 359 | case OperationType::RANDOM_MULTINOMIAL: { |
| 360 | if (operand == operation.inputs[0] && |
| 361 | (type == OperandType::TENSOR_FLOAT16 || type == OperandType::TENSOR_FLOAT32)) { |
| 362 | return true; |
| 363 | } |
| 364 | } break; |
| 365 | case OperationType::DEQUANTIZE: { |
| 366 | if (operand == operation.inputs[0] && |
| 367 | (type == OperandType::TENSOR_QUANT8_ASYMM || |
Lev Proleev | ae643ae | 2019-12-05 16:57:30 +0000 | [diff] [blame^] | 368 | type == OperandType::TENSOR_QUANT8_ASYMM_SIGNED || |
Lev Proleev | 13fdfcd | 2019-08-30 11:35:34 +0100 | [diff] [blame] | 369 | type == OperandType::TENSOR_QUANT8_SYMM || |
| 370 | type == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL)) { |
| 371 | return true; |
| 372 | } |
| 373 | if (operand == operation.outputs[0] && |
| 374 | (type == OperandType::TENSOR_FLOAT16 || type == OperandType::TENSOR_FLOAT32)) { |
| 375 | return true; |
| 376 | } |
| 377 | } break; |
| 378 | case OperationType::TRANSPOSE_CONV_2D: |
| 379 | case OperationType::GROUPED_CONV_2D: |
| 380 | case OperationType::DEPTHWISE_CONV_2D: |
| 381 | case OperationType::CONV_2D: { |
| 382 | if (operand == operation.inputs[1] && |
| 383 | (type == OperandType::TENSOR_QUANT8_ASYMM || |
| 384 | type == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL)) { |
| 385 | return true; |
| 386 | } |
| 387 | } break; |
| 388 | default: |
| 389 | break; |
| 390 | } |
| 391 | } |
| 392 | return false; |
| 393 | } |
| 394 | |
| 395 | static void mutateOperationOperandTypeTest(const sp<IDevice>& device, const Model& model) { |
| 396 | for (size_t operand = 0; operand < model.operands.size(); ++operand) { |
| 397 | for (OperandType invalidOperandType : hidl_enum_range<OperandType>{}) { |
| 398 | if (mutateOperationOperandTypeSkip(operand, invalidOperandType, model)) { |
| 399 | continue; |
| 400 | } |
| 401 | const std::string message = "mutateOperationOperandTypeTest: operand " + |
| 402 | std::to_string(operand) + " set to type " + |
| 403 | toString(invalidOperandType); |
| 404 | validate(device, message, model, [operand, invalidOperandType](Model* model) { |
| 405 | mutateOperand(&model->operands[operand], invalidOperandType); |
| 406 | }); |
| 407 | } |
| 408 | } |
| 409 | } |
| 410 | |
| 411 | ///////////////////////// VALIDATE MODEL OPERATION TYPE ///////////////////////// |
| 412 | |
| 413 | static const uint32_t invalidOperationTypes[] = { |
| 414 | static_cast<uint32_t>(OperationTypeRange::FUNDAMENTAL_MAX) + 1, |
| 415 | static_cast<uint32_t>(OperationTypeRange::OEM_MIN) - 1, |
| 416 | static_cast<uint32_t>(OperationTypeRange::OEM_MAX) + 1, |
| 417 | }; |
| 418 | |
| 419 | static void mutateOperationTypeTest(const sp<IDevice>& device, const Model& model) { |
| 420 | for (size_t operation = 0; operation < model.operations.size(); ++operation) { |
| 421 | for (uint32_t invalidOperationType : invalidOperationTypes) { |
| 422 | const std::string message = "mutateOperationTypeTest: operation " + |
| 423 | std::to_string(operation) + " set to value " + |
| 424 | std::to_string(invalidOperationType); |
| 425 | validate(device, message, model, [operation, invalidOperationType](Model* model) { |
| 426 | model->operations[operation].type = |
| 427 | static_cast<OperationType>(invalidOperationType); |
| 428 | }); |
| 429 | } |
| 430 | } |
| 431 | } |
| 432 | |
| 433 | ///////////////////////// VALIDATE MODEL OPERATION INPUT OPERAND INDEX ///////////////////////// |
| 434 | |
| 435 | static void mutateOperationInputOperandIndexTest(const sp<IDevice>& device, const Model& model) { |
| 436 | for (size_t operation = 0; operation < model.operations.size(); ++operation) { |
| 437 | const uint32_t invalidOperand = model.operands.size(); |
| 438 | for (size_t input = 0; input < model.operations[operation].inputs.size(); ++input) { |
| 439 | const std::string message = "mutateOperationInputOperandIndexTest: operation " + |
| 440 | std::to_string(operation) + " input " + |
| 441 | std::to_string(input); |
| 442 | validate(device, message, model, [operation, input, invalidOperand](Model* model) { |
| 443 | model->operations[operation].inputs[input] = invalidOperand; |
| 444 | }); |
| 445 | } |
| 446 | } |
| 447 | } |
| 448 | |
| 449 | ///////////////////////// VALIDATE MODEL OPERATION OUTPUT OPERAND INDEX ///////////////////////// |
| 450 | |
| 451 | static void mutateOperationOutputOperandIndexTest(const sp<IDevice>& device, const Model& model) { |
| 452 | for (size_t operation = 0; operation < model.operations.size(); ++operation) { |
| 453 | const uint32_t invalidOperand = model.operands.size(); |
| 454 | for (size_t output = 0; output < model.operations[operation].outputs.size(); ++output) { |
| 455 | const std::string message = "mutateOperationOutputOperandIndexTest: operation " + |
| 456 | std::to_string(operation) + " output " + |
| 457 | std::to_string(output); |
| 458 | validate(device, message, model, [operation, output, invalidOperand](Model* model) { |
| 459 | model->operations[operation].outputs[output] = invalidOperand; |
| 460 | }); |
| 461 | } |
| 462 | } |
| 463 | } |
| 464 | |
| 465 | ///////////////////////// REMOVE OPERAND FROM EVERYTHING ///////////////////////// |
| 466 | |
| 467 | static void removeValueAndDecrementGreaterValues(hidl_vec<uint32_t>* vec, uint32_t value) { |
| 468 | if (vec) { |
| 469 | // remove elements matching "value" |
| 470 | auto last = std::remove(vec->begin(), vec->end(), value); |
| 471 | vec->resize(std::distance(vec->begin(), last)); |
| 472 | |
| 473 | // decrement elements exceeding "value" |
| 474 | std::transform(vec->begin(), vec->end(), vec->begin(), |
| 475 | [value](uint32_t v) { return v > value ? v-- : v; }); |
| 476 | } |
| 477 | } |
| 478 | |
| 479 | static void removeOperand(Model* model, uint32_t index) { |
| 480 | hidl_vec_removeAt(&model->operands, index); |
| 481 | for (Operation& operation : model->operations) { |
| 482 | removeValueAndDecrementGreaterValues(&operation.inputs, index); |
| 483 | removeValueAndDecrementGreaterValues(&operation.outputs, index); |
| 484 | } |
| 485 | removeValueAndDecrementGreaterValues(&model->inputIndexes, index); |
| 486 | removeValueAndDecrementGreaterValues(&model->outputIndexes, index); |
| 487 | } |
| 488 | |
| 489 | static bool removeOperandSkip(size_t operand, const Model& model) { |
| 490 | for (const Operation& operation : model.operations) { |
| 491 | // Skip removeOperandTest for the following operations. |
| 492 | // - SPLIT's outputs are not checked during prepareModel. |
| 493 | if (operation.type == OperationType::SPLIT) { |
| 494 | for (const size_t outOprand : operation.outputs) { |
| 495 | if (operand == outOprand) { |
| 496 | return true; |
| 497 | } |
| 498 | } |
| 499 | } |
| 500 | // BIDIRECTIONAL_SEQUENCE_LSTM and BIDIRECTIONAL_SEQUENCE_RNN can have either one or two |
| 501 | // outputs depending on their mergeOutputs parameter. |
| 502 | if (operation.type == OperationType::BIDIRECTIONAL_SEQUENCE_LSTM || |
| 503 | operation.type == OperationType::BIDIRECTIONAL_SEQUENCE_RNN) { |
| 504 | for (const size_t outOprand : operation.outputs) { |
| 505 | if (operand == outOprand) { |
| 506 | return true; |
| 507 | } |
| 508 | } |
| 509 | } |
| 510 | } |
| 511 | return false; |
| 512 | } |
| 513 | |
| 514 | static void removeOperandTest(const sp<IDevice>& device, const Model& model) { |
| 515 | for (size_t operand = 0; operand < model.operands.size(); ++operand) { |
| 516 | if (removeOperandSkip(operand, model)) { |
| 517 | continue; |
| 518 | } |
| 519 | const std::string message = "removeOperandTest: operand " + std::to_string(operand); |
| 520 | validate(device, message, model, |
| 521 | [operand](Model* model) { removeOperand(model, operand); }); |
| 522 | } |
| 523 | } |
| 524 | |
| 525 | ///////////////////////// REMOVE OPERATION ///////////////////////// |
| 526 | |
| 527 | static void removeOperation(Model* model, uint32_t index) { |
| 528 | for (uint32_t operand : model->operations[index].inputs) { |
| 529 | model->operands[operand].numberOfConsumers--; |
| 530 | } |
| 531 | hidl_vec_removeAt(&model->operations, index); |
| 532 | } |
| 533 | |
| 534 | static void removeOperationTest(const sp<IDevice>& device, const Model& model) { |
| 535 | for (size_t operation = 0; operation < model.operations.size(); ++operation) { |
| 536 | const std::string message = "removeOperationTest: operation " + std::to_string(operation); |
| 537 | validate(device, message, model, |
| 538 | [operation](Model* model) { removeOperation(model, operation); }); |
| 539 | } |
| 540 | } |
| 541 | |
| 542 | ///////////////////////// REMOVE OPERATION INPUT ///////////////////////// |
| 543 | |
| 544 | static bool removeOperationInputSkip(const Operation& op, size_t input) { |
| 545 | // Skip removeOperationInputTest for the following operations. |
| 546 | // - CONCATENATION has at least 2 inputs, with the last element being INT32. |
| 547 | // - CONV_2D, DEPTHWISE_CONV_2D, MAX_POOL_2D, AVERAGE_POOL_2D, L2_POOL_2D, RESIZE_BILINEAR, |
| 548 | // SPACE_TO_DEPTH, SPACE_TO_DEPTH, SPACE_TO_BATCH_ND, BATCH_TO_SPACE_ND can have an optional |
| 549 | // layout parameter. |
| 550 | // - L2_NORMALIZATION, LOCAL_RESPONSE_NORMALIZATION, SOFTMAX can have an optional axis |
| 551 | // parameter. |
| 552 | switch (op.type) { |
| 553 | case OperationType::CONCATENATION: { |
| 554 | if (op.inputs.size() > 2 && input != op.inputs.size() - 1) { |
| 555 | return true; |
| 556 | } |
| 557 | } break; |
| 558 | case OperationType::DEPTHWISE_CONV_2D: { |
| 559 | if ((op.inputs.size() == 12 && input == 11) || (op.inputs.size() == 9 && input == 8)) { |
| 560 | return true; |
| 561 | } |
| 562 | } break; |
| 563 | case OperationType::CONV_2D: |
| 564 | case OperationType::AVERAGE_POOL_2D: |
| 565 | case OperationType::MAX_POOL_2D: |
| 566 | case OperationType::L2_POOL_2D: { |
| 567 | if ((op.inputs.size() == 11 && input == 10) || (op.inputs.size() == 8 && input == 7)) { |
| 568 | return true; |
| 569 | } |
| 570 | } break; |
| 571 | case OperationType::RESIZE_BILINEAR: { |
| 572 | if (op.inputs.size() == 4 && input == 3) { |
| 573 | return true; |
| 574 | } |
| 575 | } break; |
| 576 | case OperationType::SPACE_TO_DEPTH: |
| 577 | case OperationType::DEPTH_TO_SPACE: |
| 578 | case OperationType::BATCH_TO_SPACE_ND: { |
| 579 | if (op.inputs.size() == 3 && input == 2) { |
| 580 | return true; |
| 581 | } |
| 582 | } break; |
| 583 | case OperationType::SPACE_TO_BATCH_ND: { |
| 584 | if (op.inputs.size() == 4 && input == 3) { |
| 585 | return true; |
| 586 | } |
| 587 | } break; |
| 588 | case OperationType::L2_NORMALIZATION: { |
| 589 | if (op.inputs.size() == 2 && input == 1) { |
| 590 | return true; |
| 591 | } |
| 592 | } break; |
| 593 | case OperationType::LOCAL_RESPONSE_NORMALIZATION: { |
| 594 | if (op.inputs.size() == 6 && input == 5) { |
| 595 | return true; |
| 596 | } |
| 597 | } break; |
| 598 | case OperationType::SOFTMAX: { |
| 599 | if (op.inputs.size() == 3 && input == 2) { |
| 600 | return true; |
| 601 | } |
| 602 | } break; |
| 603 | default: |
| 604 | break; |
| 605 | } |
| 606 | return false; |
| 607 | } |
| 608 | |
| 609 | static void removeOperationInputTest(const sp<IDevice>& device, const Model& model) { |
| 610 | for (size_t operation = 0; operation < model.operations.size(); ++operation) { |
| 611 | for (size_t input = 0; input < model.operations[operation].inputs.size(); ++input) { |
| 612 | const Operation& op = model.operations[operation]; |
| 613 | if (removeOperationInputSkip(op, input)) { |
| 614 | continue; |
| 615 | } |
| 616 | const std::string message = "removeOperationInputTest: operation " + |
| 617 | std::to_string(operation) + ", input " + |
| 618 | std::to_string(input); |
| 619 | validate(device, message, model, [operation, input](Model* model) { |
| 620 | uint32_t operand = model->operations[operation].inputs[input]; |
| 621 | model->operands[operand].numberOfConsumers--; |
| 622 | hidl_vec_removeAt(&model->operations[operation].inputs, input); |
| 623 | }); |
| 624 | } |
| 625 | } |
| 626 | } |
| 627 | |
| 628 | ///////////////////////// REMOVE OPERATION OUTPUT ///////////////////////// |
| 629 | |
| 630 | static void removeOperationOutputTest(const sp<IDevice>& device, const Model& model) { |
| 631 | for (size_t operation = 0; operation < model.operations.size(); ++operation) { |
| 632 | for (size_t output = 0; output < model.operations[operation].outputs.size(); ++output) { |
| 633 | const std::string message = "removeOperationOutputTest: operation " + |
| 634 | std::to_string(operation) + ", output " + |
| 635 | std::to_string(output); |
| 636 | validate(device, message, model, [operation, output](Model* model) { |
| 637 | hidl_vec_removeAt(&model->operations[operation].outputs, output); |
| 638 | }); |
| 639 | } |
| 640 | } |
| 641 | } |
| 642 | |
| 643 | ///////////////////////// MODEL VALIDATION ///////////////////////// |
| 644 | |
| 645 | // TODO: remove model input |
| 646 | // TODO: remove model output |
| 647 | // TODO: add unused operation |
| 648 | |
| 649 | ///////////////////////// ADD OPERATION INPUT ///////////////////////// |
| 650 | |
| 651 | static bool addOperationInputSkip(const Operation& op) { |
| 652 | // Skip addOperationInputTest for the following operations. |
| 653 | // - L2_NORMALIZATION, LOCAL_RESPONSE_NORMALIZATION, SOFTMAX can have an optional INT32 axis |
| 654 | // parameter. |
| 655 | if ((op.type == OperationType::L2_NORMALIZATION && op.inputs.size() == 1) || |
| 656 | (op.type == OperationType::LOCAL_RESPONSE_NORMALIZATION && op.inputs.size() == 5) || |
| 657 | (op.type == OperationType::SOFTMAX && op.inputs.size() == 2)) { |
| 658 | return true; |
| 659 | } |
| 660 | return false; |
| 661 | } |
| 662 | |
| 663 | static void addOperationInputTest(const sp<IDevice>& device, const Model& model) { |
| 664 | for (size_t operation = 0; operation < model.operations.size(); ++operation) { |
| 665 | if (addOperationInputSkip(model.operations[operation])) { |
| 666 | continue; |
| 667 | } |
| 668 | const std::string message = "addOperationInputTest: operation " + std::to_string(operation); |
| 669 | validate(device, message, model, [operation](Model* model) { |
| 670 | uint32_t index = addOperand(model, OperandLifeTime::MODEL_INPUT); |
| 671 | hidl_vec_push_back(&model->operations[operation].inputs, index); |
| 672 | hidl_vec_push_back(&model->inputIndexes, index); |
| 673 | }); |
| 674 | } |
| 675 | } |
| 676 | |
| 677 | ///////////////////////// ADD OPERATION OUTPUT ///////////////////////// |
| 678 | |
| 679 | static void addOperationOutputTest(const sp<IDevice>& device, const Model& model) { |
| 680 | for (size_t operation = 0; operation < model.operations.size(); ++operation) { |
| 681 | const std::string message = |
| 682 | "addOperationOutputTest: operation " + std::to_string(operation); |
| 683 | validate(device, message, model, [operation](Model* model) { |
| 684 | uint32_t index = addOperand(model, OperandLifeTime::MODEL_OUTPUT); |
| 685 | hidl_vec_push_back(&model->operations[operation].outputs, index); |
| 686 | hidl_vec_push_back(&model->outputIndexes, index); |
| 687 | }); |
| 688 | } |
| 689 | } |
| 690 | |
| 691 | ///////////////////////// VALIDATE EXECUTION PREFERENCE ///////////////////////// |
| 692 | |
| 693 | static const int32_t invalidExecutionPreferences[] = { |
| 694 | static_cast<int32_t>(ExecutionPreference::LOW_POWER) - 1, // lower bound |
| 695 | static_cast<int32_t>(ExecutionPreference::SUSTAINED_SPEED) + 1, // upper bound |
| 696 | }; |
| 697 | |
| 698 | static void mutateExecutionPreferenceTest(const sp<IDevice>& device, const Model& model) { |
| 699 | for (int32_t preference : invalidExecutionPreferences) { |
| 700 | const std::string message = |
| 701 | "mutateExecutionPreferenceTest: preference " + std::to_string(preference); |
| 702 | validate( |
| 703 | device, message, model, [](Model*) {}, |
| 704 | static_cast<ExecutionPreference>(preference)); |
| 705 | } |
| 706 | } |
| 707 | |
| 708 | ////////////////////////// ENTRY POINT ////////////////////////////// |
| 709 | |
| 710 | void validateModel(const sp<IDevice>& device, const Model& model) { |
| 711 | mutateOperandTypeTest(device, model); |
| 712 | mutateOperandRankTest(device, model); |
| 713 | mutateOperandScaleTest(device, model); |
| 714 | mutateOperandZeroPointTest(device, model); |
| 715 | mutateOperationOperandTypeTest(device, model); |
| 716 | mutateOperationTypeTest(device, model); |
| 717 | mutateOperationInputOperandIndexTest(device, model); |
| 718 | mutateOperationOutputOperandIndexTest(device, model); |
| 719 | removeOperandTest(device, model); |
| 720 | removeOperationTest(device, model); |
| 721 | removeOperationInputTest(device, model); |
| 722 | removeOperationOutputTest(device, model); |
| 723 | addOperationInputTest(device, model); |
| 724 | addOperationOutputTest(device, model); |
| 725 | mutateExecutionPreferenceTest(device, model); |
| 726 | } |
| 727 | |
Lev Proleev | 26d1bc8 | 2019-08-30 11:57:18 +0100 | [diff] [blame] | 728 | } // namespace android::hardware::neuralnetworks::V1_3::vts::functional |