Move NN_TRY macro out of struct initialization
NNAPI NN_TRY macros use Statement Expressions (a GNU extension) to
propagate errors. However, a "return" statement in a Statement
Expression can lead to memory leaks when the Statement Expression is
being used to initialize a member of a struct. Specifically, when one member of a struct is already initialized, and a Statement Expression used to initialize a subsequent member early-returns, the previously initialized members will not have their destructors called.
This CL moves any NN_TRY macro out of struct initialization to avoid any
potential memory leaks.
Bug: 230500484
Test: mma
Test: presubmit
Change-Id: I3493fd4764f8eacc86750e6414e62bc891abaccd
diff --git a/neuralnetworks/1.3/utils/src/Conversions.cpp b/neuralnetworks/1.3/utils/src/Conversions.cpp
index 09e9d80..4eeb414 100644
--- a/neuralnetworks/1.3/utils/src/Conversions.cpp
+++ b/neuralnetworks/1.3/utils/src/Conversions.cpp
@@ -133,28 +133,35 @@
auto table =
NN_TRY(Capabilities::OperandPerformanceTable::create(std::move(operandPerformance)));
+ const auto relaxedFloat32toFloat16PerformanceScalar =
+ NN_TRY(unvalidatedConvert(capabilities.relaxedFloat32toFloat16PerformanceScalar));
+ const auto relaxedFloat32toFloat16PerformanceTensor =
+ NN_TRY(unvalidatedConvert(capabilities.relaxedFloat32toFloat16PerformanceTensor));
+ const auto ifPerformance = NN_TRY(unvalidatedConvert(capabilities.ifPerformance));
+ const auto whilePerformance = NN_TRY(unvalidatedConvert(capabilities.whilePerformance));
return Capabilities{
- .relaxedFloat32toFloat16PerformanceScalar = NN_TRY(
- unvalidatedConvert(capabilities.relaxedFloat32toFloat16PerformanceScalar)),
- .relaxedFloat32toFloat16PerformanceTensor = NN_TRY(
- unvalidatedConvert(capabilities.relaxedFloat32toFloat16PerformanceTensor)),
+ .relaxedFloat32toFloat16PerformanceScalar = relaxedFloat32toFloat16PerformanceScalar,
+ .relaxedFloat32toFloat16PerformanceTensor = relaxedFloat32toFloat16PerformanceTensor,
.operandPerformance = std::move(table),
- .ifPerformance = NN_TRY(unvalidatedConvert(capabilities.ifPerformance)),
- .whilePerformance = NN_TRY(unvalidatedConvert(capabilities.whilePerformance)),
+ .ifPerformance = ifPerformance,
+ .whilePerformance = whilePerformance,
};
}
GeneralResult<Capabilities::OperandPerformance> unvalidatedConvert(
const hal::V1_3::Capabilities::OperandPerformance& operandPerformance) {
+ const auto type = NN_TRY(unvalidatedConvert(operandPerformance.type));
+ const auto info = NN_TRY(unvalidatedConvert(operandPerformance.info));
return Capabilities::OperandPerformance{
- .type = NN_TRY(unvalidatedConvert(operandPerformance.type)),
- .info = NN_TRY(unvalidatedConvert(operandPerformance.info)),
+ .type = type,
+ .info = info,
};
}
GeneralResult<Operation> unvalidatedConvert(const hal::V1_3::Operation& operation) {
+ const auto type = NN_TRY(unvalidatedConvert(operation.type));
return Operation{
- .type = NN_TRY(unvalidatedConvert(operation.type)),
+ .type = type,
.inputs = operation.inputs,
.outputs = operation.outputs,
};
@@ -166,25 +173,34 @@
}
GeneralResult<Operand> unvalidatedConvert(const hal::V1_3::Operand& operand) {
+ const auto type = NN_TRY(unvalidatedConvert(operand.type));
+ const auto lifetime = NN_TRY(unvalidatedConvert(operand.lifetime));
+ const auto location = NN_TRY(unvalidatedConvert(operand.location));
+ auto extraParams = NN_TRY(unvalidatedConvert(operand.extraParams));
return Operand{
- .type = NN_TRY(unvalidatedConvert(operand.type)),
+ .type = type,
.dimensions = operand.dimensions,
.scale = operand.scale,
.zeroPoint = operand.zeroPoint,
- .lifetime = NN_TRY(unvalidatedConvert(operand.lifetime)),
- .location = NN_TRY(unvalidatedConvert(operand.location)),
- .extraParams = NN_TRY(unvalidatedConvert(operand.extraParams)),
+ .lifetime = lifetime,
+ .location = location,
+ .extraParams = std::move(extraParams),
};
}
GeneralResult<Model> unvalidatedConvert(const hal::V1_3::Model& model) {
+ auto main = NN_TRY(unvalidatedConvert(model.main));
+ auto referenced = NN_TRY(unvalidatedConvert(model.referenced));
+ auto operandValues = NN_TRY(unvalidatedConvert(model.operandValues));
+ auto pools = NN_TRY(unvalidatedConvert(model.pools));
+ auto extensionNameToPrefix = NN_TRY(unvalidatedConvert(model.extensionNameToPrefix));
return Model{
- .main = NN_TRY(unvalidatedConvert(model.main)),
- .referenced = NN_TRY(unvalidatedConvert(model.referenced)),
- .operandValues = NN_TRY(unvalidatedConvert(model.operandValues)),
- .pools = NN_TRY(unvalidatedConvert(model.pools)),
+ .main = std::move(main),
+ .referenced = std::move(referenced),
+ .operandValues = std::move(operandValues),
+ .pools = std::move(pools),
.relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16,
- .extensionNameToPrefix = NN_TRY(unvalidatedConvert(model.extensionNameToPrefix)),
+ .extensionNameToPrefix = std::move(extensionNameToPrefix),
};
}
@@ -204,8 +220,9 @@
}
}
+ auto operands = NN_TRY(unvalidatedConvert(subgraph.operands));
return Model::Subgraph{
- .operands = NN_TRY(unvalidatedConvert(subgraph.operands)),
+ .operands = std::move(operands),
.operations = std::move(operations),
.inputIndexes = subgraph.inputIndexes,
.outputIndexes = subgraph.outputIndexes,
@@ -225,10 +242,13 @@
}
GeneralResult<Request> unvalidatedConvert(const hal::V1_3::Request& request) {
+ auto inputs = NN_TRY(unvalidatedConvert(request.inputs));
+ auto outputs = NN_TRY(unvalidatedConvert(request.outputs));
+ auto pools = NN_TRY(unvalidatedConvert(request.pools));
return Request{
- .inputs = NN_TRY(unvalidatedConvert(request.inputs)),
- .outputs = NN_TRY(unvalidatedConvert(request.outputs)),
- .pools = NN_TRY(unvalidatedConvert(request.pools)),
+ .inputs = std::move(inputs),
+ .outputs = std::move(outputs),
+ .pools = std::move(pools),
};
}
@@ -463,37 +483,45 @@
}
nn::GeneralResult<Capabilities> unvalidatedConvert(const nn::Capabilities& capabilities) {
- std::vector<nn::Capabilities::OperandPerformance> operandPerformance;
- operandPerformance.reserve(capabilities.operandPerformance.asVector().size());
+ std::vector<nn::Capabilities::OperandPerformance> filteredOperandPerformances;
+ filteredOperandPerformances.reserve(capabilities.operandPerformance.asVector().size());
std::copy_if(capabilities.operandPerformance.asVector().begin(),
capabilities.operandPerformance.asVector().end(),
- std::back_inserter(operandPerformance),
+ std::back_inserter(filteredOperandPerformances),
[](const nn::Capabilities::OperandPerformance& operandPerformance) {
return compliantVersion(operandPerformance.type).has_value();
});
+ const auto relaxedFloat32toFloat16PerformanceScalar =
+ NN_TRY(unvalidatedConvert(capabilities.relaxedFloat32toFloat16PerformanceScalar));
+ const auto relaxedFloat32toFloat16PerformanceTensor =
+ NN_TRY(unvalidatedConvert(capabilities.relaxedFloat32toFloat16PerformanceTensor));
+ auto operandPerformance = NN_TRY(unvalidatedConvert(filteredOperandPerformances));
+ const auto ifPerformance = NN_TRY(unvalidatedConvert(capabilities.ifPerformance));
+ const auto whilePerformance = NN_TRY(unvalidatedConvert(capabilities.whilePerformance));
return Capabilities{
- .relaxedFloat32toFloat16PerformanceScalar = NN_TRY(
- unvalidatedConvert(capabilities.relaxedFloat32toFloat16PerformanceScalar)),
- .relaxedFloat32toFloat16PerformanceTensor = NN_TRY(
- unvalidatedConvert(capabilities.relaxedFloat32toFloat16PerformanceTensor)),
- .operandPerformance = NN_TRY(unvalidatedConvert(operandPerformance)),
- .ifPerformance = NN_TRY(unvalidatedConvert(capabilities.ifPerformance)),
- .whilePerformance = NN_TRY(unvalidatedConvert(capabilities.whilePerformance)),
+ .relaxedFloat32toFloat16PerformanceScalar = relaxedFloat32toFloat16PerformanceScalar,
+ .relaxedFloat32toFloat16PerformanceTensor = relaxedFloat32toFloat16PerformanceTensor,
+ .operandPerformance = std::move(operandPerformance),
+ .ifPerformance = ifPerformance,
+ .whilePerformance = whilePerformance,
};
}
nn::GeneralResult<Capabilities::OperandPerformance> unvalidatedConvert(
const nn::Capabilities::OperandPerformance& operandPerformance) {
+ const auto type = NN_TRY(unvalidatedConvert(operandPerformance.type));
+ const auto info = NN_TRY(unvalidatedConvert(operandPerformance.info));
return Capabilities::OperandPerformance{
- .type = NN_TRY(unvalidatedConvert(operandPerformance.type)),
- .info = NN_TRY(unvalidatedConvert(operandPerformance.info)),
+ .type = type,
+ .info = info,
};
}
nn::GeneralResult<Operation> unvalidatedConvert(const nn::Operation& operation) {
+ const auto type = NN_TRY(unvalidatedConvert(operation.type));
return Operation{
- .type = NN_TRY(unvalidatedConvert(operation.type)),
+ .type = type,
.inputs = operation.inputs,
.outputs = operation.outputs,
};
@@ -509,15 +537,19 @@
}
nn::GeneralResult<Operand> unvalidatedConvert(const nn::Operand& operand) {
+ const auto type = NN_TRY(unvalidatedConvert(operand.type));
+ const auto lifetime = NN_TRY(unvalidatedConvert(operand.lifetime));
+ const auto location = NN_TRY(unvalidatedConvert(operand.location));
+ auto extraParams = NN_TRY(unvalidatedConvert(operand.extraParams));
return Operand{
- .type = NN_TRY(unvalidatedConvert(operand.type)),
+ .type = type,
.dimensions = operand.dimensions,
.numberOfConsumers = 0,
.scale = operand.scale,
.zeroPoint = operand.zeroPoint,
- .lifetime = NN_TRY(unvalidatedConvert(operand.lifetime)),
- .location = NN_TRY(unvalidatedConvert(operand.location)),
- .extraParams = NN_TRY(unvalidatedConvert(operand.extraParams)),
+ .lifetime = lifetime,
+ .location = location,
+ .extraParams = std::move(extraParams),
};
}
@@ -527,13 +559,18 @@
<< "Model cannot be unvalidatedConverted because it contains pointer-based memory";
}
+ auto main = NN_TRY(unvalidatedConvert(model.main));
+ auto referenced = NN_TRY(unvalidatedConvert(model.referenced));
+ auto operandValues = NN_TRY(unvalidatedConvert(model.operandValues));
+ auto pools = NN_TRY(unvalidatedConvert(model.pools));
+ auto extensionNameToPrefix = NN_TRY(unvalidatedConvert(model.extensionNameToPrefix));
return Model{
- .main = NN_TRY(unvalidatedConvert(model.main)),
- .referenced = NN_TRY(unvalidatedConvert(model.referenced)),
- .operandValues = NN_TRY(unvalidatedConvert(model.operandValues)),
- .pools = NN_TRY(unvalidatedConvert(model.pools)),
+ .main = std::move(main),
+ .referenced = std::move(referenced),
+ .operandValues = std::move(operandValues),
+ .pools = std::move(pools),
.relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16,
- .extensionNameToPrefix = NN_TRY(unvalidatedConvert(model.extensionNameToPrefix)),
+ .extensionNameToPrefix = std::move(extensionNameToPrefix),
};
}
@@ -548,9 +585,10 @@
operands[i].numberOfConsumers = numberOfConsumers[i];
}
+ auto operations = NN_TRY(unvalidatedConvert(subgraph.operations));
return Subgraph{
.operands = std::move(operands),
- .operations = NN_TRY(unvalidatedConvert(subgraph.operations)),
+ .operations = std::move(operations),
.inputIndexes = subgraph.inputIndexes,
.outputIndexes = subgraph.outputIndexes,
};
@@ -574,10 +612,13 @@
<< "Request cannot be unvalidatedConverted because it contains pointer-based memory";
}
+ auto inputs = NN_TRY(unvalidatedConvert(request.inputs));
+ auto outputs = NN_TRY(unvalidatedConvert(request.outputs));
+ auto pools = NN_TRY(unvalidatedConvert(request.pools));
return Request{
- .inputs = NN_TRY(unvalidatedConvert(request.inputs)),
- .outputs = NN_TRY(unvalidatedConvert(request.outputs)),
- .pools = NN_TRY(unvalidatedConvert(request.pools)),
+ .inputs = std::move(inputs),
+ .outputs = std::move(outputs),
+ .pools = std::move(pools),
};
}