Memory Domain HAL: Define HAL APIs.
- Add and document memory domain HAL APIs.
- Make necessary changes to the existing VTS codes to make them work
with V1_3::Request.
Bug: 141353602
Bug: 141363565
Test: mma
Test: NNT_static
Test: 1.3 VTS
Change-Id: Ia32555d4fef149fad4a79728981c5d9cca675a1a
Merged-In: Ia32555d4fef149fad4a79728981c5d9cca675a1a
(cherry picked from commit 931d5a18bc7d1bcf72c105bf86f928ba2bd5a939)
diff --git a/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp
index eced063..09ccc9a 100644
--- a/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp
+++ b/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp
@@ -60,7 +60,6 @@
using V1_0::DataLocation;
using V1_0::ErrorStatus;
using V1_0::OperandLifeTime;
-using V1_0::Request;
using V1_1::ExecutionPreference;
using V1_2::Constant;
using V1_2::MeasureTiming;
@@ -192,7 +191,7 @@
return byteSize > 1u;
}
-static void makeOutputInsufficientSize(uint32_t outputIndex, Request* request) {
+static void makeOutputInsufficientSize(uint32_t outputIndex, V1_0::Request* request) {
auto& length = request->outputs[outputIndex].location.length;
ASSERT_GT(length, 1u);
length -= 1u;
@@ -245,10 +244,11 @@
return;
}
- Request request = createRequest(testModel);
+ V1_0::Request request10 = createRequest(testModel);
if (testConfig.outputType == OutputType::INSUFFICIENT) {
- makeOutputInsufficientSize(/*outputIndex=*/0, &request);
+ makeOutputInsufficientSize(/*outputIndex=*/0, &request10);
}
+ Request request = nn::convertToV1_3(request10);
ErrorStatus executionStatus;
hidl_vec<OutputShape> outputShapes;
@@ -284,6 +284,8 @@
break;
}
case Executor::BURST: {
+ // TODO(butlermichael): Check if we need to test burst in V1_3 if the interface remains
+ // V1_2.
SCOPED_TRACE("burst");
// create burst
@@ -292,15 +294,15 @@
ASSERT_NE(nullptr, controller.get());
// create memory keys
- std::vector<intptr_t> keys(request.pools.size());
+ std::vector<intptr_t> keys(request10.pools.size());
for (size_t i = 0; i < keys.size(); ++i) {
- keys[i] = reinterpret_cast<intptr_t>(&request.pools[i]);
+ keys[i] = reinterpret_cast<intptr_t>(&request10.pools[i]);
}
// execute burst
int n;
std::tie(n, outputShapes, timing, std::ignore) =
- controller->compute(request, testConfig.measureTiming, keys);
+ controller->compute(request10, testConfig.measureTiming, keys);
executionStatus = nn::convertResultCodeToErrorStatus(n);
break;
@@ -361,7 +363,7 @@
}
// Retrieve execution results.
- const std::vector<TestBuffer> outputs = getOutputBuffers(request);
+ const std::vector<TestBuffer> outputs = getOutputBuffers(request10);
// We want "close-enough" results.
checkResults(testModel, outputs);
diff --git a/neuralnetworks/1.3/vts/functional/ValidateRequest.cpp b/neuralnetworks/1.3/vts/functional/ValidateRequest.cpp
index 8092d04..96dc589 100644
--- a/neuralnetworks/1.3/vts/functional/ValidateRequest.cpp
+++ b/neuralnetworks/1.3/vts/functional/ValidateRequest.cpp
@@ -28,7 +28,6 @@
namespace android::hardware::neuralnetworks::V1_3::vts::functional {
using V1_0::ErrorStatus;
-using V1_0::Request;
using V1_2::MeasureTiming;
using V1_2::OutputShape;
using V1_2::Timing;
@@ -93,9 +92,13 @@
}
// burst
+ // TODO(butlermichael): Check if we need to test burst in V1_3 if the interface remains V1_2.
{
SCOPED_TRACE(message + " [burst]");
+ ASSERT_TRUE(nn::compliantWithV1_0(request));
+ V1_0::Request request10 = nn::convertToV1_0(request);
+
// create burst
std::shared_ptr<::android::nn::ExecutionBurstController> burst =
android::nn::ExecutionBurstController::create(preparedModel,
@@ -103,13 +106,13 @@
ASSERT_NE(nullptr, burst.get());
// create memory keys
- std::vector<intptr_t> keys(request.pools.size());
+ std::vector<intptr_t> keys(request10.pools.size());
for (size_t i = 0; i < keys.size(); ++i) {
- keys[i] = reinterpret_cast<intptr_t>(&request.pools[i]);
+ keys[i] = reinterpret_cast<intptr_t>(&request10.pools[i]);
}
// execute and verify
- const auto [n, outputShapes, timing, fallback] = burst->compute(request, measure, keys);
+ const auto [n, outputShapes, timing, fallback] = burst->compute(request10, measure, keys);
const ErrorStatus status = nn::convertResultCodeToErrorStatus(n);
EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, status);
EXPECT_EQ(outputShapes.size(), 0);
@@ -117,7 +120,7 @@
EXPECT_FALSE(fallback);
// additional burst testing
- if (request.pools.size() > 0) {
+ if (request10.pools.size() > 0) {
// valid free
burst->freeMemory(keys.front());
diff --git a/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.cpp b/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.cpp
index 92d8fa7..1140b68 100644
--- a/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.cpp
+++ b/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.cpp
@@ -25,6 +25,7 @@
#include "1.3/Callbacks.h"
#include "GeneratedTestHarness.h"
#include "TestHarness.h"
+#include "Utils.h"
namespace android::hardware::neuralnetworks::V1_3::vts::functional {
@@ -32,7 +33,6 @@
hidl_array<uint8_t, static_cast<uint32_t>(V1_2::Constant::BYTE_SIZE_OF_CACHE_TOKEN)>;
using implementation::PreparedModelCallback;
using V1_0::ErrorStatus;
-using V1_0::Request;
using V1_1::ExecutionPreference;
// internal helper function
@@ -124,9 +124,9 @@
// Forward declaration from ValidateModel.cpp
void validateModel(const sp<IDevice>& device, const Model& model);
// Forward declaration from ValidateRequest.cpp
-void validateRequest(const sp<IPreparedModel>& preparedModel, const V1_0::Request& request);
+void validateRequest(const sp<IPreparedModel>& preparedModel, const Request& request);
// Forward declaration from ValidateRequest.cpp
-void validateRequestFailure(const sp<IPreparedModel>& preparedModel, const V1_0::Request& request);
+void validateRequestFailure(const sp<IPreparedModel>& preparedModel, const Request& request);
// Forward declaration from ValidateBurst.cpp
void validateBurst(const sp<IPreparedModel>& preparedModel, const V1_0::Request& request);
@@ -139,7 +139,11 @@
if (preparedModel == nullptr) return;
validateRequest(preparedModel, request);
- validateBurst(preparedModel, request);
+
+ // TODO(butlermichael): Check if we need to test burst in V1_3 if the interface remains V1_2.
+ ASSERT_TRUE(nn::compliantWithV1_0(request));
+ V1_0::Request request10 = nn::convertToV1_0(request);
+ validateBurst(preparedModel, request10);
}
void validateFailure(const sp<IDevice>& device, const Model& model, const Request& request) {
@@ -157,7 +161,7 @@
TEST_P(ValidationTest, Test) {
const Model model = createModel(kTestModel);
- const Request request = createRequest(kTestModel);
+ const Request request = nn::convertToV1_3(createRequest(kTestModel));
if (kTestModel.expectFailure) {
validateFailure(kDevice, model, request);
} else {