Use proper alignment and padding for pointer arguments -- HAL.

This CL modifies the shared memory allocation for pointer arguments to
use proper alignment and padding. We use default alignment (64) and
min padding (1) for HIDL drivers, and default alignment (64) and default
padding (64) for sAIDL drivers.

Bug: 184164929
Test: NNT_static
Change-Id: I22591640fa047d5f75d437edac1a7645d3b05526
Merged-In: I22591640fa047d5f75d437edac1a7645d3b05526
(cherry picked from commit 8bfa243972d34c97111f0e7af6e77a0ab04d4f60)
diff --git a/neuralnetworks/1.0/utils/src/PreparedModel.cpp b/neuralnetworks/1.0/utils/src/PreparedModel.cpp
index 7987ab4..00970c0 100644
--- a/neuralnetworks/1.0/utils/src/PreparedModel.cpp
+++ b/neuralnetworks/1.0/utils/src/PreparedModel.cpp
@@ -65,7 +65,8 @@
     hal::utils::RequestRelocation relocation;
     const nn::Request& requestInShared =
             NN_TRY(hal::utils::makeExecutionFailure(hal::utils::convertRequestFromPointerToShared(
-                    &request, &maybeRequestInShared, &relocation)));
+                    &request, nn::kDefaultRequestMemoryAlignment, nn::kMinMemoryPadding,
+                    &maybeRequestInShared, &relocation)));
 
     const auto hidlRequest = NN_TRY(hal::utils::makeExecutionFailure(convert(requestInShared)));
 
@@ -111,7 +112,8 @@
     std::optional<nn::Request> maybeRequestInShared;
     hal::utils::RequestRelocation relocation;
     const nn::Request& requestInShared = NN_TRY(hal::utils::convertRequestFromPointerToShared(
-            &request, &maybeRequestInShared, &relocation));
+            &request, nn::kDefaultRequestMemoryAlignment, nn::kMinMemoryPadding,
+            &maybeRequestInShared, &relocation));
 
     auto hidlRequest = NN_TRY(convert(requestInShared));
     return Execution::create(shared_from_this(), std::move(hidlRequest), std::move(relocation));
diff --git a/neuralnetworks/1.2/utils/src/ExecutionBurstController.cpp b/neuralnetworks/1.2/utils/src/ExecutionBurstController.cpp
index 8e82d25..b4b6f68 100644
--- a/neuralnetworks/1.2/utils/src/ExecutionBurstController.cpp
+++ b/neuralnetworks/1.2/utils/src/ExecutionBurstController.cpp
@@ -328,7 +328,8 @@
     hal::utils::RequestRelocation relocation;
     const nn::Request& requestInShared =
             NN_TRY(hal::utils::makeExecutionFailure(hal::utils::convertRequestFromPointerToShared(
-                    &request, &maybeRequestInShared, &relocation)));
+                    &request, nn::kDefaultRequestMemoryAlignment, nn::kMinMemoryPadding,
+                    &maybeRequestInShared, &relocation)));
 
     // clear pools field of request, as they will be provided via slots
     const auto requestWithoutPools = nn::Request{
@@ -373,7 +374,8 @@
     std::optional<nn::Request> maybeRequestInShared;
     hal::utils::RequestRelocation relocation;
     const nn::Request& requestInShared = NN_TRY(hal::utils::convertRequestFromPointerToShared(
-            &request, &maybeRequestInShared, &relocation));
+            &request, nn::kDefaultRequestMemoryAlignment, nn::kMinMemoryPadding,
+            &maybeRequestInShared, &relocation));
 
     // clear pools field of request, as they will be provided via slots
     const auto requestWithoutPools = nn::Request{
diff --git a/neuralnetworks/1.2/utils/src/PreparedModel.cpp b/neuralnetworks/1.2/utils/src/PreparedModel.cpp
index 1d87937..d0ef36e 100644
--- a/neuralnetworks/1.2/utils/src/PreparedModel.cpp
+++ b/neuralnetworks/1.2/utils/src/PreparedModel.cpp
@@ -97,7 +97,8 @@
     hal::utils::RequestRelocation relocation;
     const nn::Request& requestInShared =
             NN_TRY(hal::utils::makeExecutionFailure(hal::utils::convertRequestFromPointerToShared(
-                    &request, &maybeRequestInShared, &relocation)));
+                    &request, nn::kDefaultRequestMemoryAlignment, nn::kMinMemoryPadding,
+                    &maybeRequestInShared, &relocation)));
 
     const auto hidlRequest = NN_TRY(hal::utils::makeExecutionFailure(convert(requestInShared)));
     const auto hidlMeasure = NN_TRY(hal::utils::makeExecutionFailure(convert(measure)));
@@ -140,7 +141,8 @@
     std::optional<nn::Request> maybeRequestInShared;
     hal::utils::RequestRelocation relocation;
     const nn::Request& requestInShared = NN_TRY(hal::utils::convertRequestFromPointerToShared(
-            &request, &maybeRequestInShared, &relocation));
+            &request, nn::kDefaultRequestMemoryAlignment, nn::kMinMemoryPadding,
+            &maybeRequestInShared, &relocation));
 
     auto hidlRequest = NN_TRY(convert(requestInShared));
     auto hidlMeasure = NN_TRY(convert(measure));
diff --git a/neuralnetworks/1.3/utils/src/PreparedModel.cpp b/neuralnetworks/1.3/utils/src/PreparedModel.cpp
index cb56bdc..1623de5 100644
--- a/neuralnetworks/1.3/utils/src/PreparedModel.cpp
+++ b/neuralnetworks/1.3/utils/src/PreparedModel.cpp
@@ -143,7 +143,8 @@
     hal::utils::RequestRelocation relocation;
     const nn::Request& requestInShared =
             NN_TRY(hal::utils::makeExecutionFailure(hal::utils::convertRequestFromPointerToShared(
-                    &request, &maybeRequestInShared, &relocation)));
+                    &request, nn::kDefaultRequestMemoryAlignment, nn::kMinMemoryPadding,
+                    &maybeRequestInShared, &relocation)));
 
     const auto hidlRequest = NN_TRY(hal::utils::makeExecutionFailure(convert(requestInShared)));
     const auto hidlMeasure = NN_TRY(hal::utils::makeExecutionFailure(convert(measure)));
@@ -184,7 +185,8 @@
     std::optional<nn::Request> maybeRequestInShared;
     hal::utils::RequestRelocation relocation;
     const nn::Request& requestInShared = NN_TRY(hal::utils::convertRequestFromPointerToShared(
-            &request, &maybeRequestInShared, &relocation));
+            &request, nn::kDefaultRequestMemoryAlignment, nn::kMinMemoryPadding,
+            &maybeRequestInShared, &relocation));
 
     const auto hidlRequest = NN_TRY(convert(requestInShared));
     const auto hidlWaitFor = NN_TRY(hal::utils::convertSyncFences(waitFor));
@@ -236,7 +238,8 @@
     std::optional<nn::Request> maybeRequestInShared;
     hal::utils::RequestRelocation relocation;
     const nn::Request& requestInShared = NN_TRY(hal::utils::convertRequestFromPointerToShared(
-            &request, &maybeRequestInShared, &relocation));
+            &request, nn::kDefaultRequestMemoryAlignment, nn::kMinMemoryPadding,
+            &maybeRequestInShared, &relocation));
 
     auto hidlRequest = NN_TRY(convert(requestInShared));
     auto hidlMeasure = NN_TRY(convert(measure));
diff --git a/neuralnetworks/aidl/utils/src/Burst.cpp b/neuralnetworks/aidl/utils/src/Burst.cpp
index 3cbba4d..87cd0e4 100644
--- a/neuralnetworks/aidl/utils/src/Burst.cpp
+++ b/neuralnetworks/aidl/utils/src/Burst.cpp
@@ -178,7 +178,8 @@
     hal::utils::RequestRelocation relocation;
     const nn::Request& requestInShared =
             NN_TRY(hal::utils::makeExecutionFailure(hal::utils::convertRequestFromPointerToShared(
-                    &request, &maybeRequestInShared, &relocation)));
+                    &request, nn::kDefaultRequestMemoryAlignment, nn::kDefaultRequestMemoryPadding,
+                    &maybeRequestInShared, &relocation)));
 
     const auto aidlRequest = NN_TRY(hal::utils::makeExecutionFailure(convert(requestInShared)));
     const auto aidlMeasure = NN_TRY(hal::utils::makeExecutionFailure(convert(measure)));
@@ -248,7 +249,8 @@
     std::optional<nn::Request> maybeRequestInShared;
     hal::utils::RequestRelocation relocation;
     const nn::Request& requestInShared = NN_TRY(hal::utils::convertRequestFromPointerToShared(
-            &request, &maybeRequestInShared, &relocation));
+            &request, nn::kDefaultRequestMemoryAlignment, nn::kDefaultRequestMemoryPadding,
+            &maybeRequestInShared, &relocation));
 
     auto aidlRequest = NN_TRY(convert(requestInShared));
     const auto aidlMeasure = NN_TRY(convert(measure));
diff --git a/neuralnetworks/aidl/utils/src/PreparedModel.cpp b/neuralnetworks/aidl/utils/src/PreparedModel.cpp
index 1915607..18e7636 100644
--- a/neuralnetworks/aidl/utils/src/PreparedModel.cpp
+++ b/neuralnetworks/aidl/utils/src/PreparedModel.cpp
@@ -80,7 +80,8 @@
     hal::utils::RequestRelocation relocation;
     const nn::Request& requestInShared =
             NN_TRY(hal::utils::makeExecutionFailure(hal::utils::convertRequestFromPointerToShared(
-                    &request, &maybeRequestInShared, &relocation)));
+                    &request, nn::kDefaultRequestMemoryAlignment, nn::kDefaultRequestMemoryPadding,
+                    &maybeRequestInShared, &relocation)));
 
     const auto aidlRequest = NN_TRY(hal::utils::makeExecutionFailure(convert(requestInShared)));
     const auto aidlMeasure = NN_TRY(hal::utils::makeExecutionFailure(convert(measure)));
@@ -127,7 +128,8 @@
     std::optional<nn::Request> maybeRequestInShared;
     hal::utils::RequestRelocation relocation;
     const nn::Request& requestInShared = NN_TRY(hal::utils::convertRequestFromPointerToShared(
-            &request, &maybeRequestInShared, &relocation));
+            &request, nn::kDefaultRequestMemoryAlignment, nn::kDefaultRequestMemoryPadding,
+            &maybeRequestInShared, &relocation));
 
     const auto aidlRequest = NN_TRY(convert(requestInShared));
     const auto aidlWaitFor = NN_TRY(convert(waitFor));
@@ -197,7 +199,8 @@
     std::optional<nn::Request> maybeRequestInShared;
     hal::utils::RequestRelocation relocation;
     const nn::Request& requestInShared = NN_TRY(hal::utils::convertRequestFromPointerToShared(
-            &request, &maybeRequestInShared, &relocation));
+            &request, nn::kDefaultRequestMemoryAlignment, nn::kDefaultRequestMemoryPadding,
+            &maybeRequestInShared, &relocation));
 
     auto aidlRequest = NN_TRY(convert(requestInShared));
     auto aidlMeasure = NN_TRY(convert(measure));
diff --git a/neuralnetworks/utils/common/include/nnapi/hal/CommonUtils.h b/neuralnetworks/utils/common/include/nnapi/hal/CommonUtils.h
index fdc90df..702ee92 100644
--- a/neuralnetworks/utils/common/include/nnapi/hal/CommonUtils.h
+++ b/neuralnetworks/utils/common/include/nnapi/hal/CommonUtils.h
@@ -122,8 +122,8 @@
 // Unlike `flushDataFromPointerToShared`, this method will not copy the input pointer data to the
 // shared memory pool. Use `relocationOut` to flush the input or output data after the call.
 nn::GeneralResult<std::reference_wrapper<const nn::Request>> convertRequestFromPointerToShared(
-        const nn::Request* request, std::optional<nn::Request>* maybeRequestInSharedOut,
-        RequestRelocation* relocationOut);
+        const nn::Request* request, uint32_t alignment, uint32_t padding,
+        std::optional<nn::Request>* maybeRequestInSharedOut, RequestRelocation* relocationOut);
 
 nn::GeneralResult<std::vector<uint32_t>> countNumberOfConsumers(
         size_t numberOfOperands, const std::vector<nn::Operation>& operations);
diff --git a/neuralnetworks/utils/common/src/CommonUtils.cpp b/neuralnetworks/utils/common/src/CommonUtils.cpp
index eaeb9ad..8e55bf0 100644
--- a/neuralnetworks/utils/common/src/CommonUtils.cpp
+++ b/neuralnetworks/utils/common/src/CommonUtils.cpp
@@ -220,8 +220,8 @@
 }
 
 nn::GeneralResult<std::reference_wrapper<const nn::Request>> convertRequestFromPointerToShared(
-        const nn::Request* request, std::optional<nn::Request>* maybeRequestInSharedOut,
-        RequestRelocation* relocationOut) {
+        const nn::Request* request, uint32_t alignment, uint32_t padding,
+        std::optional<nn::Request>* maybeRequestInSharedOut, RequestRelocation* relocationOut) {
     CHECK(request != nullptr);
     CHECK(maybeRequestInSharedOut != nullptr);
     CHECK(relocationOut != nullptr);
@@ -249,7 +249,7 @@
         const void* data = std::visit([](auto ptr) { return static_cast<const void*>(ptr); },
                                       location.pointer);
         CHECK(data != nullptr);
-        input.location = inputBuilder.append(location.length);
+        input.location = inputBuilder.append(location.length, alignment, padding);
         inputRelocationInfos.push_back({data, input.location.length, input.location.offset});
     }
 
@@ -273,7 +273,7 @@
         output.lifetime = nn::Request::Argument::LifeTime::POOL;
         void* data = std::get<void*>(location.pointer);
         CHECK(data != nullptr);
-        output.location = outputBuilder.append(location.length);
+        output.location = outputBuilder.append(location.length, alignment, padding);
         outputRelocationInfos.push_back({data, output.location.length, output.location.offset});
     }