Move NN_TRY macro out of struct initialization

NNAPI NN_TRY macros use Statement Expressions (a GNU extension) to
propagate errors. However, a "return" statement in a Statement
Expression can lead to memory leaks when the Statement Expression is
being used to initialize a member of a struct. Specifically, when one
member of a struct is already initialized, and a Statement Expression
used to initialize a subsequent member early-returns, the previously
initialized members will not have their destructors called.

This CL moves any NN_TRY macro out of struct initialization to avoid any
potential memory leaks.

Bug: 230500484
Test: mma
Test: presubmit
Change-Id: I3493fd4764f8eacc86750e6414e62bc891abaccd
Merged-In: I3493fd4764f8eacc86750e6414e62bc891abaccd
diff --git a/neuralnetworks/aidl/utils/src/Utils.cpp b/neuralnetworks/aidl/utils/src/Utils.cpp
index 76a0b07..f9b4f6e 100644
--- a/neuralnetworks/aidl/utils/src/Utils.cpp
+++ b/neuralnetworks/aidl/utils/src/Utils.cpp
@@ -51,8 +51,9 @@
 }
 
 nn::GeneralResult<common::NativeHandle> clone(const common::NativeHandle& handle) {
+    auto fds = NN_TRY(cloneVec(handle.fds));
     return common::NativeHandle{
-            .fds = NN_TRY(cloneVec(handle.fds)),
+            .fds = std::move(fds),
             .ints = handle.ints,
     };
 }
@@ -63,29 +64,32 @@
     switch (memory.getTag()) {
         case Memory::Tag::ashmem: {
             const auto& ashmem = memory.get<Memory::Tag::ashmem>();
+            auto fd = NN_TRY(clone(ashmem.fd));
             auto handle = common::Ashmem{
-                    .fd = NN_TRY(clone(ashmem.fd)),
+                    .fd = std::move(fd),
                     .size = ashmem.size,
             };
             return Memory::make<Memory::Tag::ashmem>(std::move(handle));
         }
         case Memory::Tag::mappableFile: {
             const auto& memFd = memory.get<Memory::Tag::mappableFile>();
+            auto fd = NN_TRY(clone(memFd.fd));
             auto handle = common::MappableFile{
                     .length = memFd.length,
                     .prot = memFd.prot,
-                    .fd = NN_TRY(clone(memFd.fd)),
+                    .fd = std::move(fd),
                     .offset = memFd.offset,
             };
             return Memory::make<Memory::Tag::mappableFile>(std::move(handle));
         }
         case Memory::Tag::hardwareBuffer: {
             const auto& hardwareBuffer = memory.get<Memory::Tag::hardwareBuffer>();
-            auto handle = graphics::common::HardwareBuffer{
+            auto handle = NN_TRY(clone(hardwareBuffer.handle));
+            auto ahwbHandle = graphics::common::HardwareBuffer{
                     .description = hardwareBuffer.description,
-                    .handle = NN_TRY(clone(hardwareBuffer.handle)),
+                    .handle = std::move(handle),
             };
-            return Memory::make<Memory::Tag::hardwareBuffer>(std::move(handle));
+            return Memory::make<Memory::Tag::hardwareBuffer>(std::move(ahwbHandle));
         }
     }
     return (NN_ERROR() << "Unrecognized Memory::Tag: " << underlyingType(memory.getTag()))
@@ -109,19 +113,21 @@
 }
 
 nn::GeneralResult<Request> clone(const Request& request) {
+    auto pools = NN_TRY(clone(request.pools));
     return Request{
             .inputs = request.inputs,
             .outputs = request.outputs,
-            .pools = NN_TRY(clone(request.pools)),
+            .pools = std::move(pools),
     };
 }
 
 nn::GeneralResult<Model> clone(const Model& model) {
+    auto pools = NN_TRY(clone(model.pools));
     return Model{
             .main = model.main,
             .referenced = model.referenced,
             .operandValues = model.operandValues,
-            .pools = NN_TRY(clone(model.pools)),
+            .pools = std::move(pools),
             .relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16,
             .extensionNameToPrefix = model.extensionNameToPrefix,
     };