| /* |
| * Copyright (C) 2017 The Android Open Source Project |
| * |
| * Licensed under the Apache License, Version 2.0 (the "License"); |
| * you may not use this file except in compliance with the License. |
| * You may obtain a copy of the License at |
| * |
| * http://www.apache.org/licenses/LICENSE-2.0 |
| * |
| * Unless required by applicable law or agreed to in writing, software |
| * distributed under the License is distributed on an "AS IS" BASIS, |
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| * See the License for the specific language governing permissions and |
| * limitations under the License. |
| */ |
| |
| #include "Event.h" |
| #include "TestHarness.h" |
| #include "VtsHalNeuralnetworksV1_0TargetTest.h" |
| |
| #include <android-base/logging.h> |
| #include <android/hidl/memory/1.0/IMemory.h> |
| #include <hidlmemory/mapping.h> |
| |
| namespace android { |
| namespace hardware { |
| namespace neuralnetworks { |
| namespace V1_0 { |
| namespace vts { |
| namespace functional { |
| // allocator helper |
| hidl_memory allocateSharedMemory(int64_t size, const std::string& type = "ashmem"); |
| |
| namespace generated_tests { |
| using ::android::hardware::neuralnetworks::V1_0::implementation::Event; |
| using ::generated_tests::filter; |
| using ::generated_tests::for_all; |
| using ::generated_tests::for_each; |
| using ::generated_tests::resize_accordingly; |
| using ::generated_tests::MixedTyped; |
| using ::generated_tests::MixedTypedExampleType; |
| using ::generated_tests::Float32Operands; |
| using ::generated_tests::Int32Operands; |
| using ::generated_tests::Quant8Operands; |
| using ::generated_tests::compare; |
| |
| template <typename ty> |
| void copy_back_(MixedTyped* dst, const std::vector<RequestArgument>& ra, char* src) { |
| MixedTyped& test = *dst; |
| for_each(test, [&ra, src](int index, std::vector<ty>& m) { |
| ASSERT_EQ(m.size(), ra[index].location.length / sizeof(ty)); |
| char* begin = src + ra[index].location.offset; |
| memcpy(m.data(), begin, ra[index].location.length); |
| }); |
| } |
| |
| void copy_back(MixedTyped* dst, const std::vector<RequestArgument>& ra, char* src) { |
| copy_back_<float>(dst, ra, src); |
| copy_back_<int32_t>(dst, ra, src); |
| copy_back_<uint8_t>(dst, ra, src); |
| } |
| |
| // Top level driver for models and examples generated by test_generator.py |
| // Test driver for those generated from ml/nn/runtime/test/spec |
| void Execute(const sp<IDevice>& device, std::function<Model(void)> create_model, |
| std::function<bool(int)> is_ignored, |
| const std::vector<MixedTypedExampleType>& examples) { |
| Model model = create_model(); |
| sp<IPreparedModel> preparedModel; |
| sp<Event> preparationEvent = new Event(); |
| ASSERT_NE(nullptr, preparationEvent.get()); |
| Return<void> prepareRet = device->prepareModel( |
| model, preparationEvent, [&](ErrorStatus status, const sp<IPreparedModel>& prepared) { |
| EXPECT_EQ(ErrorStatus::NONE, status); |
| preparedModel = prepared; |
| }); |
| ASSERT_TRUE(prepareRet.isOk()); |
| ASSERT_NE(nullptr, preparedModel.get()); |
| Event::Status preparationStatus = preparationEvent->wait(); |
| EXPECT_EQ(Event::Status::SUCCESS, preparationStatus); |
| |
| const uint32_t INPUT = 0; |
| const uint32_t OUTPUT = 1; |
| |
| int example_no = 1; |
| for (auto& example : examples) { |
| SCOPED_TRACE(example_no++); |
| |
| const MixedTyped& inputs = example.first; |
| const MixedTyped& golden = example.second; |
| |
| std::vector<RequestArgument> inputs_info, outputs_info; |
| uint32_t inputSize = 0, outputSize = 0; |
| |
| // This function only partially specifies the metadata (vector of RequestArguments). |
| // The contents are copied over below. |
| for_all(inputs, [&inputs_info, &inputSize](int index, auto, auto s) { |
| if (inputs_info.size() <= static_cast<size_t>(index)) inputs_info.resize(index + 1); |
| RequestArgument arg = { |
| .location = {.poolIndex = INPUT, .offset = 0, .length = static_cast<uint32_t>(s)}, |
| .dimensions = {}, |
| }; |
| inputs_info[index] = arg; |
| inputSize += s; |
| }); |
| // Compute offset for inputs 1 and so on |
| { |
| size_t offset = 0; |
| for (auto& i : inputs_info) { |
| i.location.offset = offset; |
| offset += i.location.length; |
| } |
| } |
| |
| MixedTyped test; // holding test results |
| |
| // Go through all outputs, initialize RequestArgument descriptors |
| resize_accordingly(golden, test); |
| for_all(golden, [&outputs_info, &outputSize](int index, auto, auto s) { |
| if (outputs_info.size() <= static_cast<size_t>(index)) outputs_info.resize(index + 1); |
| RequestArgument arg = { |
| .location = {.poolIndex = OUTPUT, .offset = 0, .length = static_cast<uint32_t>(s)}, |
| .dimensions = {}, |
| }; |
| outputs_info[index] = arg; |
| outputSize += s; |
| }); |
| // Compute offset for outputs 1 and so on |
| { |
| size_t offset = 0; |
| for (auto& i : outputs_info) { |
| i.location.offset = offset; |
| offset += i.location.length; |
| } |
| } |
| std::vector<hidl_memory> pools = {allocateSharedMemory(inputSize), |
| allocateSharedMemory(outputSize)}; |
| ASSERT_NE(0ull, pools[INPUT].size()); |
| ASSERT_NE(0ull, pools[OUTPUT].size()); |
| |
| // load data |
| sp<IMemory> inputMemory = mapMemory(pools[INPUT]); |
| sp<IMemory> outputMemory = mapMemory(pools[OUTPUT]); |
| ASSERT_NE(nullptr, inputMemory.get()); |
| ASSERT_NE(nullptr, outputMemory.get()); |
| char* inputPtr = reinterpret_cast<char*>(static_cast<void*>(inputMemory->getPointer())); |
| char* outputPtr = reinterpret_cast<char*>(static_cast<void*>(outputMemory->getPointer())); |
| ASSERT_NE(nullptr, inputPtr); |
| ASSERT_NE(nullptr, outputPtr); |
| inputMemory->update(); |
| outputMemory->update(); |
| |
| // Go through all inputs, copy the values |
| for_all(inputs, [&inputs_info, inputPtr](int index, auto p, auto s) { |
| char* begin = (char*)p; |
| char* end = begin + s; |
| // TODO: handle more than one input |
| std::copy(begin, end, inputPtr + inputs_info[index].location.offset); |
| }); |
| |
| inputMemory->commit(); |
| outputMemory->commit(); |
| // execute request |
| sp<Event> executionEvent = new Event(); |
| ASSERT_NE(nullptr, executionEvent.get()); |
| Return<ErrorStatus> executeStatus = preparedModel->execute( |
| {.inputs = inputs_info, .outputs = outputs_info, .pools = pools}, executionEvent); |
| ASSERT_TRUE(executeStatus.isOk()); |
| EXPECT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(executeStatus)); |
| Event::Status eventStatus = executionEvent->wait(); |
| EXPECT_EQ(Event::Status::SUCCESS, eventStatus); |
| |
| // validate results |
| outputMemory->read(); |
| copy_back(&test, outputs_info, outputPtr); |
| outputMemory->commit(); |
| // Filter out don't cares |
| MixedTyped filtered_golden; |
| MixedTyped filtered_test; |
| filter(golden, &filtered_golden, is_ignored); |
| filter(test, &filtered_test, is_ignored); |
| |
| // We want "close-enough" results for float |
| compare(filtered_golden, filtered_test); |
| } |
| } |
| |
| } // namespace generated_tests |
| |
| } // namespace functional |
| } // namespace vts |
| } // namespace V1_0 |
| } // namespace neuralnetworks |
| } // namespace hardware |
| } // namespace android |