Merge "Support 24k and 32k config for LE Audio Offload"
diff --git a/compatibility_matrices/Android.bp b/compatibility_matrices/Android.bp
index 193fd2b..524242f 100644
--- a/compatibility_matrices/Android.bp
+++ b/compatibility_matrices/Android.bp
@@ -74,6 +74,18 @@
}
vintf_compatibility_matrix {
+ name: "framework_compatibility_matrix.7.xml",
+ stem: "compatibility_matrix.7.xml",
+ srcs: [
+ "compatibility_matrix.7.xml",
+ ],
+ kernel_configs: [
+ "kernel_config_t_5.10",
+ "kernel_config_t_5.15",
+ ],
+}
+
+vintf_compatibility_matrix {
name: "framework_compatibility_matrix.current.xml",
stem: "compatibility_matrix.current.xml",
srcs: [
diff --git a/compatibility_matrices/Android.mk b/compatibility_matrices/Android.mk
index 9e715bf..d19f0da 100644
--- a/compatibility_matrices/Android.mk
+++ b/compatibility_matrices/Android.mk
@@ -102,6 +102,7 @@
framework_compatibility_matrix.4.xml \
framework_compatibility_matrix.5.xml \
framework_compatibility_matrix.6.xml \
+ framework_compatibility_matrix.7.xml \
framework_compatibility_matrix.current.xml \
framework_compatibility_matrix.device.xml \
diff --git a/compatibility_matrices/compatibility_matrix.7.xml b/compatibility_matrices/compatibility_matrix.7.xml
new file mode 100644
index 0000000..6967671
--- /dev/null
+++ b/compatibility_matrices/compatibility_matrix.7.xml
@@ -0,0 +1,720 @@
+<compatibility-matrix version="1.0" type="framework" level="7">
+ <hal format="hidl" optional="true">
+ <name>android.hardware.atrace</name>
+ <version>1.0</version>
+ <interface>
+ <name>IAtraceDevice</name>
+ <instance>default</instance>
+ </interface>
+ </hal>
+ <hal format="hidl" optional="false">
+ <name>android.hardware.audio</name>
+ <version>6.0</version>
+ <version>7.0-1</version>
+ <interface>
+ <name>IDevicesFactory</name>
+ <instance>default</instance>
+ </interface>
+ </hal>
+ <hal format="hidl" optional="false">
+ <name>android.hardware.audio.effect</name>
+ <version>6.0</version>
+ <version>7.0</version>
+ <interface>
+ <name>IEffectsFactory</name>
+ <instance>default</instance>
+ </interface>
+ </hal>
+ <hal format="aidl" optional="true">
+ <name>android.hardware.authsecret</name>
+ <version>1</version>
+ <interface>
+ <name>IAuthSecret</name>
+ <instance>default</instance>
+ </interface>
+ </hal>
+ <hal format="hidl" optional="true">
+ <name>android.hardware.authsecret</name>
+ <version>1.0</version>
+ <interface>
+ <name>IAuthSecret</name>
+ <instance>default</instance>
+ </interface>
+ </hal>
+ <hal format="aidl" optional="true">
+ <name>android.hardware.automotive.audiocontrol</name>
+ <interface>
+ <name>IAudioControl</name>
+ <instance>default</instance>
+ </interface>
+ </hal>
+ <hal format="hidl" optional="true">
+ <name>android.hardware.automotive.can</name>
+ <version>1.0</version>
+ <interface>
+ <name>ICanBus</name>
+ <regex-instance>.*</regex-instance>
+ </interface>
+ <interface>
+ <name>ICanController</name>
+ <regex-instance>.*</regex-instance>
+ </interface>
+ </hal>
+ <hal format="hidl" optional="true">
+ <name>android.hardware.automotive.evs</name>
+ <version>1.0-1</version>
+ <interface>
+ <name>IEvsEnumerator</name>
+ <instance>default</instance>
+ <regex-instance>[a-z]+/[0-9]+</regex-instance>
+ </interface>
+ </hal>
+ <hal format="aidl" optional="true">
+ <name>android.hardware.automotive.occupant_awareness</name>
+ <version>1</version>
+ <interface>
+ <name>IOccupantAwareness</name>
+ <instance>default</instance>
+ </interface>
+ </hal>
+ <hal format="hidl" optional="true">
+ <name>android.hardware.automotive.sv</name>
+ <version>1.0</version>
+ <interface>
+ <name>ISurroundViewService</name>
+ <instance>default</instance>
+ </interface>
+ </hal>
+ <hal format="hidl" optional="true">
+ <name>android.hardware.automotive.vehicle</name>
+ <version>2.0</version>
+ <interface>
+ <name>IVehicle</name>
+ <instance>default</instance>
+ </interface>
+ </hal>
+ <hal format="hidl" optional="true">
+ <name>android.hardware.biometrics.face</name>
+ <version>1.0</version>
+ <interface>
+ <name>IBiometricsFace</name>
+ <instance>default</instance>
+ </interface>
+ </hal>
+ <hal format="aidl" optional="true">
+ <name>android.hardware.biometrics.face</name>
+ <interface>
+ <name>IFace</name>
+ <instance>default</instance>
+ </interface>
+ </hal>
+ <hal format="hidl" optional="true">
+ <name>android.hardware.biometrics.fingerprint</name>
+ <version>2.1-3</version>
+ <interface>
+ <name>IBiometricsFingerprint</name>
+ <instance>default</instance>
+ </interface>
+ </hal>
+ <hal format="aidl" optional="true">
+ <name>android.hardware.biometrics.fingerprint</name>
+ <interface>
+ <name>IFingerprint</name>
+ <instance>default</instance>
+ </interface>
+ </hal>
+ <hal format="hidl" optional="true">
+ <name>android.hardware.bluetooth</name>
+ <version>1.0-1</version>
+ <interface>
+ <name>IBluetoothHci</name>
+ <instance>default</instance>
+ </interface>
+ </hal>
+ <hal format="aidl" optional="true">
+ <name>android.hardware.bluetooth.audio</name>
+ <interface>
+ <name>IBluetoothAudioProviderFactory</name>
+ <instance>default</instance>
+ </interface>
+ </hal>
+ <hal format="hidl" optional="true">
+ <name>android.hardware.boot</name>
+ <version>1.2</version>
+ <interface>
+ <name>IBootControl</name>
+ <instance>default</instance>
+ </interface>
+ </hal>
+ <hal format="hidl" optional="true">
+ <name>android.hardware.broadcastradio</name>
+ <version>1.0-1</version>
+ <interface>
+ <name>IBroadcastRadioFactory</name>
+ <instance>default</instance>
+ </interface>
+ </hal>
+ <hal format="hidl" optional="true">
+ <name>android.hardware.broadcastradio</name>
+ <version>2.0</version>
+ <interface>
+ <name>IBroadcastRadio</name>
+ <regex-instance>.*</regex-instance>
+ </interface>
+ </hal>
+ <hal format="hidl" optional="true">
+ <name>android.hardware.camera.provider</name>
+ <version>2.4-7</version>
+ <interface>
+ <name>ICameraProvider</name>
+ <regex-instance>[^/]+/[0-9]+</regex-instance>
+ </interface>
+ </hal>
+ <hal format="hidl" optional="true">
+ <name>android.hardware.cas</name>
+ <version>1.1-2</version>
+ <interface>
+ <name>IMediaCasService</name>
+ <instance>default</instance>
+ </interface>
+ </hal>
+ <hal format="hidl" optional="true">
+ <name>android.hardware.confirmationui</name>
+ <version>1.0</version>
+ <interface>
+ <name>IConfirmationUI</name>
+ <instance>default</instance>
+ </interface>
+ </hal>
+ <hal format="hidl" optional="true">
+ <name>android.hardware.contexthub</name>
+ <version>1.2</version>
+ <interface>
+ <name>IContexthub</name>
+ <instance>default</instance>
+ </interface>
+ </hal>
+ <hal format="hidl" optional="true">
+ <name>android.hardware.drm</name>
+ <version>1.3-4</version>
+ <interface>
+ <name>ICryptoFactory</name>
+ <regex-instance>.*</regex-instance>
+ </interface>
+ <interface>
+ <name>IDrmFactory</name>
+ <regex-instance>.*</regex-instance>
+ </interface>
+ </hal>
+ <hal format="aidl" optional="true">
+ <name>android.hardware.dumpstate</name>
+ <interface>
+ <name>IDumpstateDevice</name>
+ <instance>default</instance>
+ </interface>
+ </hal>
+ <hal format="hidl" optional="false">
+ <name>android.hardware.gatekeeper</name>
+ <version>1.0</version>
+ <interface>
+ <name>IGatekeeper</name>
+ <instance>default</instance>
+ </interface>
+ </hal>
+ <hal format="hidl" optional="true">
+ <name>android.hardware.gnss</name>
+ <version>2.0-1</version>
+ <interface>
+ <name>IGnss</name>
+ <instance>default</instance>
+ </interface>
+ </hal>
+ <hal format="aidl" optional="true">
+ <name>android.hardware.gnss</name>
+ <interface>
+ <name>IGnss</name>
+ <instance>default</instance>
+ </interface>
+ </hal>
+ <hal format="hidl" optional="false">
+ <name>android.hardware.graphics.allocator</name>
+ <!-- New, non-Go devices should use 4.0, tested in vts_treble_vintf_vendor_test -->
+ <version>2.0</version>
+ <version>3.0</version>
+ <version>4.0</version>
+ <interface>
+ <name>IAllocator</name>
+ <instance>default</instance>
+ </interface>
+ </hal>
+ <hal format="hidl" optional="false">
+ <name>android.hardware.graphics.composer</name>
+ <version>2.1-4</version>
+ <interface>
+ <name>IComposer</name>
+ <instance>default</instance>
+ </interface>
+ </hal>
+ <hal format="hidl" optional="false">
+ <name>android.hardware.graphics.mapper</name>
+ <!-- New, non-Go devices should use 4.0, tested in vts_treble_vintf_vendor_test -->
+ <version>2.1</version>
+ <version>3.0</version>
+ <version>4.0</version>
+ <interface>
+ <name>IMapper</name>
+ <instance>default</instance>
+ </interface>
+ </hal>
+ <hal format="aidl" optional="false">
+ <name>android.hardware.health</name>
+ <version>1</version>
+ <interface>
+ <name>IHealth</name>
+ <instance>default</instance>
+ </interface>
+ </hal>
+ <hal format="aidl" optional="true">
+ <name>android.hardware.health.storage</name>
+ <version>1</version>
+ <interface>
+ <name>IStorage</name>
+ <instance>default</instance>
+ </interface>
+ </hal>
+ <hal format="aidl" optional="true">
+ <name>android.hardware.identity</name>
+ <version>1-4</version>
+ <interface>
+ <name>IIdentityCredentialStore</name>
+ <instance>default</instance>
+ </interface>
+ </hal>
+ <hal format="aidl" optional="true">
+ <name>android.hardware.oemlock</name>
+ <version>1</version>
+ <interface>
+ <name>IOemLock</name>
+ <instance>default</instance>
+ </interface>
+ </hal>
+ <hal format="aidl" optional="true">
+ <name>android.hardware.ir</name>
+ <version>1</version>
+ <interface>
+ <name>IConsumerIr</name>
+ <instance>default</instance>
+ </interface>
+ </hal>
+ <hal format="hidl" optional="true">
+ <name>android.hardware.input.classifier</name>
+ <version>1.0</version>
+ <interface>
+ <name>IInputClassifier</name>
+ <instance>default</instance>
+ </interface>
+ </hal>
+ <hal format="hidl" optional="true">
+ <name>android.hardware.keymaster</name>
+ <version>3.0</version>
+ <version>4.0-1</version>
+ <interface>
+ <name>IKeymasterDevice</name>
+ <instance>default</instance>
+ </interface>
+ </hal>
+ <hal format="hidl" optional="true">
+ <name>android.hardware.keymaster</name>
+ <version>4.0-1</version>
+ <interface>
+ <name>IKeymasterDevice</name>
+ <instance>strongbox</instance>
+ </interface>
+ </hal>
+ <hal format="aidl" optional="true">
+ <name>android.hardware.security.dice</name>
+ <version>1</version>
+ <interface>
+ <name>IDiceDevice</name>
+ <instance>default</instance>
+ </interface>
+ </hal>
+ <hal format="aidl" optional="true">
+ <name>android.hardware.security.keymint</name>
+ <version>1-2</version>
+ <interface>
+ <name>IKeyMintDevice</name>
+ <instance>default</instance>
+ <instance>strongbox</instance>
+ </interface>
+ </hal>
+ <hal format="aidl" optional="true">
+ <name>android.hardware.security.keymint</name>
+ <version>1-2</version>
+ <interface>
+ <name>IRemotelyProvisionedComponent</name>
+ <instance>default</instance>
+ </interface>
+ </hal>
+ <hal format="aidl" optional="true">
+ <name>android.hardware.light</name>
+ <version>1</version>
+ <interface>
+ <name>ILights</name>
+ <instance>default</instance>
+ </interface>
+ </hal>
+ <hal format="hidl" optional="true">
+ <name>android.hardware.media.c2</name>
+ <version>1.0-2</version>
+ <interface>
+ <name>IComponentStore</name>
+ <regex-instance>default[0-9]*</regex-instance>
+ <regex-instance>vendor[0-9]*_software</regex-instance>
+ </interface>
+ </hal>
+ <hal format="hidl" optional="true">
+ <name>android.hardware.media.omx</name>
+ <version>1.0</version>
+ <interface>
+ <name>IOmx</name>
+ <instance>default</instance>
+ </interface>
+ <interface>
+ <name>IOmxStore</name>
+ <instance>default</instance>
+ </interface>
+ </hal>
+ <hal format="aidl" optional="true">
+ <name>android.hardware.memtrack</name>
+ <version>1</version>
+ <interface>
+ <name>IMemtrack</name>
+ <instance>default</instance>
+ </interface>
+ </hal>
+ <hal format="hidl" optional="true">
+ <name>android.hardware.neuralnetworks</name>
+ <version>1.0-3</version>
+ <interface>
+ <name>IDevice</name>
+ <regex-instance>.*</regex-instance>
+ </interface>
+ </hal>
+ <hal format="aidl" optional="true">
+ <name>android.hardware.neuralnetworks</name>
+ <version>1-4</version>
+ <interface>
+ <name>IDevice</name>
+ <regex-instance>.*</regex-instance>
+ </interface>
+ </hal>
+ <hal format="hidl" optional="true">
+ <name>android.hardware.nfc</name>
+ <version>1.2</version>
+ <interface>
+ <name>INfc</name>
+ <instance>default</instance>
+ </interface>
+ </hal>
+ <hal format="aidl" optional="true">
+ <name>android.hardware.nfc</name>
+ <interface>
+ <name>INfc</name>
+ <instance>default</instance>
+ </interface>
+ </hal>
+ <hal format="hidl" optional="true">
+ <name>android.hardware.oemlock</name>
+ <version>1.0</version>
+ <interface>
+ <name>IOemLock</name>
+ <instance>default</instance>
+ </interface>
+ </hal>
+ <hal format="aidl" optional="false">
+ <name>android.hardware.power</name>
+ <version>1-2</version>
+ <interface>
+ <name>IPower</name>
+ <instance>default</instance>
+ </interface>
+ </hal>
+ <hal format="aidl" optional="true">
+ <name>android.hardware.power.stats</name>
+ <interface>
+ <name>IPowerStats</name>
+ <instance>default</instance>
+ </interface>
+ </hal>
+ <hal format="aidl" optional="true">
+ <name>android.hardware.radio.config</name>
+ <version>1</version>
+ <interface>
+ <name>IRadioConfig</name>
+ <instance>default</instance>
+ </interface>
+ </hal>
+ <hal format="aidl" optional="true">
+ <name>android.hardware.radio.data</name>
+ <version>1</version>
+ <interface>
+ <name>IRadioData</name>
+ <instance>slot1</instance>
+ <instance>slot2</instance>
+ <instance>slot3</instance>
+ </interface>
+ </hal>
+ <hal format="aidl" optional="true">
+ <name>android.hardware.radio.messaging</name>
+ <version>1</version>
+ <interface>
+ <name>IRadioMessaging</name>
+ <instance>slot1</instance>
+ <instance>slot2</instance>
+ <instance>slot3</instance>
+ </interface>
+ </hal>
+ <hal format="aidl" optional="true">
+ <name>android.hardware.radio.modem</name>
+ <version>1</version>
+ <interface>
+ <name>IRadioModem</name>
+ <instance>slot1</instance>
+ <instance>slot2</instance>
+ <instance>slot3</instance>
+ </interface>
+ </hal>
+ <hal format="aidl" optional="true">
+ <name>android.hardware.radio.network</name>
+ <version>1</version>
+ <interface>
+ <name>IRadioNetwork</name>
+ <instance>slot1</instance>
+ <instance>slot2</instance>
+ <instance>slot3</instance>
+ </interface>
+ </hal>
+ <hal format="aidl" optional="true">
+ <name>android.hardware.radio.sim</name>
+ <version>1</version>
+ <interface>
+ <name>IRadioSim</name>
+ <instance>slot1</instance>
+ <instance>slot2</instance>
+ <instance>slot3</instance>
+ </interface>
+ </hal>
+ <hal format="aidl" optional="true">
+ <name>android.hardware.radio.voice</name>
+ <version>1</version>
+ <interface>
+ <name>IRadioVoice</name>
+ <instance>slot1</instance>
+ <instance>slot2</instance>
+ <instance>slot3</instance>
+ </interface>
+ </hal>
+ <hal format="hidl" optional="true">
+ <name>android.hardware.radio</name>
+ <version>1.2</version>
+ <interface>
+ <name>ISap</name>
+ <instance>slot1</instance>
+ </interface>
+ </hal>
+ <hal format="hidl" optional="true">
+ <name>android.hardware.renderscript</name>
+ <version>1.0</version>
+ <interface>
+ <name>IDevice</name>
+ <instance>default</instance>
+ </interface>
+ </hal>
+ <hal format="aidl" optional="true">
+ <name>android.hardware.rebootescrow</name>
+ <version>1</version>
+ <interface>
+ <name>IRebootEscrow</name>
+ <instance>default</instance>
+ </interface>
+ </hal>
+ <hal format="hidl" optional="true">
+ <name>android.hardware.secure_element</name>
+ <version>1.0-2</version>
+ <interface>
+ <name>ISecureElement</name>
+ <regex-instance>eSE[1-9][0-9]*</regex-instance>
+ <regex-instance>SIM[1-9][0-9]*</regex-instance>
+ </interface>
+ </hal>
+ <hal format="aidl" optional="true">
+ <name>android.hardware.security.secureclock</name>
+ <version>1</version>
+ <interface>
+ <name>ISecureClock</name>
+ <instance>default</instance>
+ </interface>
+ </hal>
+ <hal format="aidl" optional="true">
+ <name>android.hardware.security.sharedsecret</name>
+ <version>1</version>
+ <interface>
+ <name>ISharedSecret</name>
+ <instance>default</instance>
+ <instance>strongbox</instance>
+ </interface>
+ </hal>
+ <hal format="hidl" optional="true">
+ <name>android.hardware.sensors</name>
+ <version>1.0</version>
+ <version>2.0-1</version>
+ <interface>
+ <name>ISensors</name>
+ <instance>default</instance>
+ </interface>
+ </hal>
+ <hal format="hidl" optional="true">
+ <name>android.hardware.soundtrigger</name>
+ <version>2.3</version>
+ <interface>
+ <name>ISoundTriggerHw</name>
+ <instance>default</instance>
+ </interface>
+ </hal>
+ <hal format="hidl" optional="true">
+ <name>android.hardware.tetheroffload.config</name>
+ <version>1.0</version>
+ <interface>
+ <name>IOffloadConfig</name>
+ <instance>default</instance>
+ </interface>
+ </hal>
+ <hal format="hidl" optional="true">
+ <name>android.hardware.tetheroffload.control</name>
+ <version>1.1</version>
+ <interface>
+ <name>IOffloadControl</name>
+ <instance>default</instance>
+ </interface>
+ </hal>
+ <hal format="hidl" optional="false">
+ <name>android.hardware.thermal</name>
+ <version>2.0</version>
+ <interface>
+ <name>IThermal</name>
+ <instance>default</instance>
+ </interface>
+ </hal>
+ <hal format="hidl" optional="true">
+ <name>android.hardware.tv.cec</name>
+ <version>1.0-1</version>
+ <interface>
+ <name>IHdmiCec</name>
+ <instance>default</instance>
+ </interface>
+ </hal>
+ <hal format="hidl" optional="true">
+ <name>android.hardware.tv.input</name>
+ <version>1.0</version>
+ <interface>
+ <name>ITvInput</name>
+ <instance>default</instance>
+ </interface>
+ </hal>
+ <hal format="hidl" optional="true">
+ <name>android.hardware.tv.tuner</name>
+ <version>1.0-1</version>
+ <interface>
+ <name>ITuner</name>
+ <instance>default</instance>
+ </interface>
+ </hal>
+ <hal format="hidl" optional="true">
+ <name>android.hardware.usb</name>
+ <version>1.0-3</version>
+ <interface>
+ <name>IUsb</name>
+ <instance>default</instance>
+ </interface>
+ </hal>
+ <hal format="hidl" optional="true">
+ <name>android.hardware.usb.gadget</name>
+ <version>1.0-2</version>
+ <interface>
+ <name>IUsbGadget</name>
+ <instance>default</instance>
+ </interface>
+ </hal>
+ <hal format="aidl" optional="true">
+ <name>android.hardware.vibrator</name>
+ <version>1-2</version>
+ <interface>
+ <name>IVibrator</name>
+ <instance>default</instance>
+ </interface>
+ </hal>
+ <hal format="aidl" optional="true">
+ <name>android.hardware.vibrator</name>
+ <version>1-2</version>
+ <interface>
+ <name>IVibratorManager</name>
+ <instance>default</instance>
+ </interface>
+ </hal>
+ <hal format="hidl" optional="true">
+ <name>android.hardware.weaver</name>
+ <version>1.0</version>
+ <interface>
+ <name>IWeaver</name>
+ <instance>default</instance>
+ </interface>
+ </hal>
+ <hal format="aidl" optional="true">
+ <name>android.hardware.weaver</name>
+ <version>1</version>
+ <interface>
+ <name>IWeaver</name>
+ <instance>default</instance>
+ </interface>
+ </hal>
+ <hal format="hidl" optional="true">
+ <name>android.hardware.wifi</name>
+ <version>1.3-5</version>
+ <interface>
+ <name>IWifi</name>
+ <instance>default</instance>
+ </interface>
+ </hal>
+ <hal format="hidl" optional="true">
+ <name>android.hardware.wifi.hostapd</name>
+ <version>1.0-3</version>
+ <interface>
+ <name>IHostapd</name>
+ <instance>default</instance>
+ </interface>
+ </hal>
+ <hal format="aidl" optional="true">
+ <name>android.hardware.wifi.hostapd</name>
+ <version>1</version>
+ <interface>
+ <name>IHostapd</name>
+ <instance>default</instance>
+ </interface>
+ </hal>
+ <hal format="hidl" optional="true">
+ <name>android.hardware.wifi.supplicant</name>
+ <version>1.2-4</version>
+ <interface>
+ <name>ISupplicant</name>
+ <instance>default</instance>
+ </interface>
+ </hal>
+ <hal format="aidl" optional="true">
+ <name>android.hardware.wifi.supplicant</name>
+ <interface>
+ <name>ISupplicant</name>
+ <instance>default</instance>
+ </interface>
+ </hal>
+</compatibility-matrix>
diff --git a/compatibility_matrices/compatibility_matrix.current.xml b/compatibility_matrices/compatibility_matrix.current.xml
index 5810fd5..2979b11 100644
--- a/compatibility_matrices/compatibility_matrix.current.xml
+++ b/compatibility_matrices/compatibility_matrix.current.xml
@@ -1,4 +1,4 @@
-<compatibility-matrix version="1.0" type="framework" level="7">
+<compatibility-matrix version="1.0" type="framework" level="8">
<hal format="hidl" optional="true">
<name>android.hardware.atrace</name>
<version>1.0</version>
diff --git a/neuralnetworks/1.0/utils/src/Conversions.cpp b/neuralnetworks/1.0/utils/src/Conversions.cpp
index daa10fd..d98fef0 100644
--- a/neuralnetworks/1.0/utils/src/Conversions.cpp
+++ b/neuralnetworks/1.0/utils/src/Conversions.cpp
@@ -110,8 +110,9 @@
return NN_ERROR() << "Unable to convert invalid ashmem memory object with "
<< memory.handle()->numInts << " numInts, but expected 0";
}
+ auto fd = NN_TRY(nn::dupFd(memory.handle()->data[0]));
auto handle = nn::Memory::Ashmem{
- .fd = NN_TRY(nn::dupFd(memory.handle()->data[0])),
+ .fd = std::move(fd),
.size = static_cast<size_t>(memory.size()),
};
return std::make_shared<const nn::Memory>(nn::Memory{.handle = std::move(handle)});
@@ -137,12 +138,13 @@
}
if (memory.name() != "hardware_buffer_blob") {
- auto handle = nn::Memory::Unknown{
- .handle = NN_TRY(unknownHandleFromNativeHandle(memory.handle())),
+ auto handle = NN_TRY(unknownHandleFromNativeHandle(memory.handle()));
+ auto unknown = nn::Memory::Unknown{
+ .handle = std::move(handle),
.size = static_cast<size_t>(memory.size()),
.name = memory.name(),
};
- return std::make_shared<const nn::Memory>(nn::Memory{.handle = std::move(handle)});
+ return std::make_shared<const nn::Memory>(nn::Memory{.handle = std::move(unknown)});
}
#ifdef __ANDROID__
@@ -245,19 +247,23 @@
}
GeneralResult<Operand> unvalidatedConvert(const hal::V1_0::Operand& operand) {
+ const auto type = NN_TRY(unvalidatedConvert(operand.type));
+ const auto lifetime = NN_TRY(unvalidatedConvert(operand.lifetime));
+ const auto location = NN_TRY(unvalidatedConvert(operand.location));
return Operand{
- .type = NN_TRY(unvalidatedConvert(operand.type)),
+ .type = type,
.dimensions = operand.dimensions,
.scale = operand.scale,
.zeroPoint = operand.zeroPoint,
- .lifetime = NN_TRY(unvalidatedConvert(operand.lifetime)),
- .location = NN_TRY(unvalidatedConvert(operand.location)),
+ .lifetime = lifetime,
+ .location = location,
};
}
GeneralResult<Operation> unvalidatedConvert(const hal::V1_0::Operation& operation) {
+ const auto type = NN_TRY(unvalidatedConvert(operation.type));
return Operation{
- .type = NN_TRY(unvalidatedConvert(operation.type)),
+ .type = type,
.inputs = operation.inputs,
.outputs = operation.outputs,
};
@@ -298,26 +304,30 @@
}
}
+ auto operands = NN_TRY(unvalidatedConvert(model.operands));
auto main = Model::Subgraph{
- .operands = NN_TRY(unvalidatedConvert(model.operands)),
+ .operands = std::move(operands),
.operations = std::move(operations),
.inputIndexes = model.inputIndexes,
.outputIndexes = model.outputIndexes,
};
+ auto operandValues = NN_TRY(unvalidatedConvert(model.operandValues));
+ auto pools = NN_TRY(unvalidatedConvert(model.pools));
return Model{
.main = std::move(main),
- .operandValues = NN_TRY(unvalidatedConvert(model.operandValues)),
- .pools = NN_TRY(unvalidatedConvert(model.pools)),
+ .operandValues = std::move(operandValues),
+ .pools = std::move(pools),
};
}
GeneralResult<Request::Argument> unvalidatedConvert(const hal::V1_0::RequestArgument& argument) {
const auto lifetime = argument.hasNoValue ? Request::Argument::LifeTime::NO_VALUE
: Request::Argument::LifeTime::POOL;
+ const auto location = NN_TRY(unvalidatedConvert(argument.location));
return Request::Argument{
.lifetime = lifetime,
- .location = NN_TRY(unvalidatedConvert(argument.location)),
+ .location = location,
.dimensions = argument.dimensions,
};
}
@@ -328,9 +338,11 @@
pools.reserve(memories.size());
std::move(memories.begin(), memories.end(), std::back_inserter(pools));
+ auto inputs = NN_TRY(unvalidatedConvert(request.inputs));
+ auto outputs = NN_TRY(unvalidatedConvert(request.outputs));
return Request{
- .inputs = NN_TRY(unvalidatedConvert(request.inputs)),
- .outputs = NN_TRY(unvalidatedConvert(request.outputs)),
+ .inputs = std::move(inputs),
+ .outputs = std::move(outputs),
.pools = std::move(pools),
};
}
@@ -500,11 +512,13 @@
}
nn::GeneralResult<Capabilities> unvalidatedConvert(const nn::Capabilities& capabilities) {
+ const auto float32Performance = NN_TRY(unvalidatedConvert(
+ capabilities.operandPerformance.lookup(nn::OperandType::TENSOR_FLOAT32)));
+ const auto quantized8Performance = NN_TRY(unvalidatedConvert(
+ capabilities.operandPerformance.lookup(nn::OperandType::TENSOR_QUANT8_ASYMM)));
return Capabilities{
- .float32Performance = NN_TRY(unvalidatedConvert(
- capabilities.operandPerformance.lookup(nn::OperandType::TENSOR_FLOAT32))),
- .quantized8Performance = NN_TRY(unvalidatedConvert(
- capabilities.operandPerformance.lookup(nn::OperandType::TENSOR_QUANT8_ASYMM))),
+ .float32Performance = float32Performance,
+ .quantized8Performance = quantized8Performance,
};
}
@@ -517,20 +531,24 @@
}
nn::GeneralResult<Operand> unvalidatedConvert(const nn::Operand& operand) {
+ const auto type = NN_TRY(unvalidatedConvert(operand.type));
+ const auto lifetime = NN_TRY(unvalidatedConvert(operand.lifetime));
+ const auto location = NN_TRY(unvalidatedConvert(operand.location));
return Operand{
- .type = NN_TRY(unvalidatedConvert(operand.type)),
+ .type = type,
.dimensions = operand.dimensions,
.numberOfConsumers = 0,
.scale = operand.scale,
.zeroPoint = operand.zeroPoint,
- .lifetime = NN_TRY(unvalidatedConvert(operand.lifetime)),
- .location = NN_TRY(unvalidatedConvert(operand.location)),
+ .lifetime = lifetime,
+ .location = location,
};
}
nn::GeneralResult<Operation> unvalidatedConvert(const nn::Operation& operation) {
+ const auto type = NN_TRY(unvalidatedConvert(operation.type));
return Operation{
- .type = NN_TRY(unvalidatedConvert(operation.type)),
+ .type = type,
.inputs = operation.inputs,
.outputs = operation.outputs,
};
@@ -572,13 +590,16 @@
operands[i].numberOfConsumers = numberOfConsumers[i];
}
+ auto operations = NN_TRY(unvalidatedConvert(model.main.operations));
+ auto operandValues = NN_TRY(unvalidatedConvert(model.operandValues));
+ auto pools = NN_TRY(unvalidatedConvert(model.pools));
return Model{
.operands = std::move(operands),
- .operations = NN_TRY(unvalidatedConvert(model.main.operations)),
+ .operations = std::move(operations),
.inputIndexes = model.main.inputIndexes,
.outputIndexes = model.main.outputIndexes,
- .operandValues = NN_TRY(unvalidatedConvert(model.operandValues)),
- .pools = NN_TRY(unvalidatedConvert(model.pools)),
+ .operandValues = std::move(operandValues),
+ .pools = std::move(pools),
};
}
@@ -589,9 +610,10 @@
<< "Request cannot be unvalidatedConverted because it contains pointer-based memory";
}
const bool hasNoValue = requestArgument.lifetime == nn::Request::Argument::LifeTime::NO_VALUE;
+ const auto location = NN_TRY(unvalidatedConvert(requestArgument.location));
return RequestArgument{
.hasNoValue = hasNoValue,
- .location = NN_TRY(unvalidatedConvert(requestArgument.location)),
+ .location = location,
.dimensions = requestArgument.dimensions,
};
}
@@ -606,10 +628,13 @@
<< "Request cannot be unvalidatedConverted because it contains pointer-based memory";
}
+ auto inputs = NN_TRY(unvalidatedConvert(request.inputs));
+ auto outputs = NN_TRY(unvalidatedConvert(request.outputs));
+ auto pools = NN_TRY(unvalidatedConvert(request.pools));
return Request{
- .inputs = NN_TRY(unvalidatedConvert(request.inputs)),
- .outputs = NN_TRY(unvalidatedConvert(request.outputs)),
- .pools = NN_TRY(unvalidatedConvert(request.pools)),
+ .inputs = std::move(inputs),
+ .outputs = std::move(outputs),
+ .pools = std::move(pools),
};
}
diff --git a/neuralnetworks/1.1/utils/src/Conversions.cpp b/neuralnetworks/1.1/utils/src/Conversions.cpp
index 5bdbe31..887c8ec 100644
--- a/neuralnetworks/1.1/utils/src/Conversions.cpp
+++ b/neuralnetworks/1.1/utils/src/Conversions.cpp
@@ -88,8 +88,9 @@
}
GeneralResult<Operation> unvalidatedConvert(const hal::V1_1::Operation& operation) {
+ const auto type = NN_TRY(unvalidatedConvert(operation.type));
return Operation{
- .type = NN_TRY(unvalidatedConvert(operation.type)),
+ .type = type,
.inputs = operation.inputs,
.outputs = operation.outputs,
};
@@ -110,17 +111,20 @@
}
}
+ auto operands = NN_TRY(unvalidatedConvert(model.operands));
auto main = Model::Subgraph{
- .operands = NN_TRY(unvalidatedConvert(model.operands)),
+ .operands = std::move(operands),
.operations = std::move(operations),
.inputIndexes = model.inputIndexes,
.outputIndexes = model.outputIndexes,
};
+ auto operandValues = NN_TRY(unvalidatedConvert(model.operandValues));
+ auto pools = NN_TRY(unvalidatedConvert(model.pools));
return Model{
.main = std::move(main),
- .operandValues = NN_TRY(unvalidatedConvert(model.operandValues)),
- .pools = NN_TRY(unvalidatedConvert(model.pools)),
+ .operandValues = std::move(operandValues),
+ .pools = std::move(pools),
.relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16,
};
}
@@ -195,19 +199,23 @@
}
nn::GeneralResult<Capabilities> unvalidatedConvert(const nn::Capabilities& capabilities) {
+ const auto float32Performance = NN_TRY(unvalidatedConvert(
+ capabilities.operandPerformance.lookup(nn::OperandType::TENSOR_FLOAT32)));
+ const auto quanitized8Performance = NN_TRY(unvalidatedConvert(
+ capabilities.operandPerformance.lookup(nn::OperandType::TENSOR_QUANT8_ASYMM)));
+ const auto relaxedFloat32toFloat16Performance =
+ NN_TRY(unvalidatedConvert(capabilities.relaxedFloat32toFloat16PerformanceTensor));
return Capabilities{
- .float32Performance = NN_TRY(unvalidatedConvert(
- capabilities.operandPerformance.lookup(nn::OperandType::TENSOR_FLOAT32))),
- .quantized8Performance = NN_TRY(unvalidatedConvert(
- capabilities.operandPerformance.lookup(nn::OperandType::TENSOR_QUANT8_ASYMM))),
- .relaxedFloat32toFloat16Performance = NN_TRY(
- unvalidatedConvert(capabilities.relaxedFloat32toFloat16PerformanceTensor)),
+ .float32Performance = float32Performance,
+ .quantized8Performance = quanitized8Performance,
+ .relaxedFloat32toFloat16Performance = relaxedFloat32toFloat16Performance,
};
}
nn::GeneralResult<Operation> unvalidatedConvert(const nn::Operation& operation) {
+ const auto type = NN_TRY(unvalidatedConvert(operation.type));
return Operation{
- .type = NN_TRY(unvalidatedConvert(operation.type)),
+ .type = type,
.inputs = operation.inputs,
.outputs = operation.outputs,
};
@@ -229,13 +237,16 @@
operands[i].numberOfConsumers = numberOfConsumers[i];
}
+ auto operations = NN_TRY(unvalidatedConvert(model.main.operations));
+ auto operandValues = NN_TRY(unvalidatedConvert(model.operandValues));
+ auto pools = NN_TRY(unvalidatedConvert(model.pools));
return Model{
.operands = std::move(operands),
- .operations = NN_TRY(unvalidatedConvert(model.main.operations)),
+ .operations = std::move(operations),
.inputIndexes = model.main.inputIndexes,
.outputIndexes = model.main.outputIndexes,
- .operandValues = NN_TRY(unvalidatedConvert(model.operandValues)),
- .pools = NN_TRY(unvalidatedConvert(model.pools)),
+ .operandValues = std::move(operandValues),
+ .pools = std::move(pools),
.relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16,
};
}
diff --git a/neuralnetworks/1.2/utils/src/Conversions.cpp b/neuralnetworks/1.2/utils/src/Conversions.cpp
index 62ec2ed..78d71cf 100644
--- a/neuralnetworks/1.2/utils/src/Conversions.cpp
+++ b/neuralnetworks/1.2/utils/src/Conversions.cpp
@@ -131,15 +131,18 @@
GeneralResult<Capabilities::OperandPerformance> unvalidatedConvert(
const hal::V1_2::Capabilities::OperandPerformance& operandPerformance) {
+ const auto type = NN_TRY(unvalidatedConvert(operandPerformance.type));
+ const auto info = NN_TRY(unvalidatedConvert(operandPerformance.info));
return Capabilities::OperandPerformance{
- .type = NN_TRY(unvalidatedConvert(operandPerformance.type)),
- .info = NN_TRY(unvalidatedConvert(operandPerformance.info)),
+ .type = type,
+ .info = info,
};
}
GeneralResult<Operation> unvalidatedConvert(const hal::V1_2::Operation& operation) {
+ const auto type = NN_TRY(unvalidatedConvert(operation.type));
return Operation{
- .type = NN_TRY(unvalidatedConvert(operation.type)),
+ .type = type,
.inputs = operation.inputs,
.outputs = operation.outputs,
};
@@ -154,14 +157,18 @@
}
GeneralResult<Operand> unvalidatedConvert(const hal::V1_2::Operand& operand) {
+ const auto type = NN_TRY(unvalidatedConvert(operand.type));
+ const auto lifetime = NN_TRY(unvalidatedConvert(operand.lifetime));
+ const auto location = NN_TRY(unvalidatedConvert(operand.location));
+ auto extraParams = NN_TRY(unvalidatedConvert(operand.extraParams));
return Operand{
- .type = NN_TRY(unvalidatedConvert(operand.type)),
+ .type = type,
.dimensions = operand.dimensions,
.scale = operand.scale,
.zeroPoint = operand.zeroPoint,
- .lifetime = NN_TRY(unvalidatedConvert(operand.lifetime)),
- .location = NN_TRY(unvalidatedConvert(operand.location)),
- .extraParams = NN_TRY(unvalidatedConvert(operand.extraParams)),
+ .lifetime = lifetime,
+ .location = location,
+ .extraParams = std::move(extraParams),
};
}
@@ -196,19 +203,23 @@
}
}
+ auto operands = NN_TRY(unvalidatedConvert(model.operands));
auto main = Model::Subgraph{
- .operands = NN_TRY(unvalidatedConvert(model.operands)),
+ .operands = std::move(operands),
.operations = std::move(operations),
.inputIndexes = model.inputIndexes,
.outputIndexes = model.outputIndexes,
};
+ auto operandValues = NN_TRY(unvalidatedConvert(model.operandValues));
+ auto pools = NN_TRY(unvalidatedConvert(model.pools));
+ auto extensionNameToPrefix = NN_TRY(unvalidatedConvert(model.extensionNameToPrefix));
return Model{
.main = std::move(main),
- .operandValues = NN_TRY(unvalidatedConvert(model.operandValues)),
- .pools = NN_TRY(unvalidatedConvert(model.pools)),
+ .operandValues = std::move(operandValues),
+ .pools = std::move(pools),
.relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16,
- .extensionNameToPrefix = NN_TRY(unvalidatedConvert(model.extensionNameToPrefix)),
+ .extensionNameToPrefix = std::move(extensionNameToPrefix),
};
}
@@ -248,9 +259,10 @@
}
GeneralResult<Extension> unvalidatedConvert(const hal::V1_2::Extension& extension) {
+ auto operandTypes = NN_TRY(unvalidatedConvert(extension.operandTypes));
return Extension{
.name = extension.name,
- .operandTypes = NN_TRY(unvalidatedConvert(extension.operandTypes)),
+ .operandTypes = std::move(operandTypes),
};
}
@@ -406,35 +418,41 @@
}
nn::GeneralResult<Capabilities> unvalidatedConvert(const nn::Capabilities& capabilities) {
- std::vector<nn::Capabilities::OperandPerformance> operandPerformance;
- operandPerformance.reserve(capabilities.operandPerformance.asVector().size());
+ std::vector<nn::Capabilities::OperandPerformance> filteredOperandPerformances;
+ filteredOperandPerformances.reserve(capabilities.operandPerformance.asVector().size());
std::copy_if(capabilities.operandPerformance.asVector().begin(),
capabilities.operandPerformance.asVector().end(),
- std::back_inserter(operandPerformance),
+ std::back_inserter(filteredOperandPerformances),
[](const nn::Capabilities::OperandPerformance& operandPerformance) {
return compliantVersion(operandPerformance.type).has_value();
});
+ const auto relaxedFloat32toFloat16PerformanceScalar =
+ NN_TRY(unvalidatedConvert(capabilities.relaxedFloat32toFloat16PerformanceScalar));
+ const auto relaxedFloat32toFloat16PerformanceTensor =
+ NN_TRY(unvalidatedConvert(capabilities.relaxedFloat32toFloat16PerformanceTensor));
+ auto operandPerformance = NN_TRY(unvalidatedConvert(filteredOperandPerformances));
return Capabilities{
- .relaxedFloat32toFloat16PerformanceScalar = NN_TRY(
- unvalidatedConvert(capabilities.relaxedFloat32toFloat16PerformanceScalar)),
- .relaxedFloat32toFloat16PerformanceTensor = NN_TRY(
- unvalidatedConvert(capabilities.relaxedFloat32toFloat16PerformanceTensor)),
- .operandPerformance = NN_TRY(unvalidatedConvert(operandPerformance)),
+ .relaxedFloat32toFloat16PerformanceScalar = relaxedFloat32toFloat16PerformanceScalar,
+ .relaxedFloat32toFloat16PerformanceTensor = relaxedFloat32toFloat16PerformanceTensor,
+ .operandPerformance = std::move(operandPerformance),
};
}
nn::GeneralResult<Capabilities::OperandPerformance> unvalidatedConvert(
const nn::Capabilities::OperandPerformance& operandPerformance) {
+ const auto type = NN_TRY(unvalidatedConvert(operandPerformance.type));
+ const auto info = NN_TRY(unvalidatedConvert(operandPerformance.info));
return Capabilities::OperandPerformance{
- .type = NN_TRY(unvalidatedConvert(operandPerformance.type)),
- .info = NN_TRY(unvalidatedConvert(operandPerformance.info)),
+ .type = type,
+ .info = info,
};
}
nn::GeneralResult<Operation> unvalidatedConvert(const nn::Operation& operation) {
+ const auto type = NN_TRY(unvalidatedConvert(operation.type));
return Operation{
- .type = NN_TRY(unvalidatedConvert(operation.type)),
+ .type = type,
.inputs = operation.inputs,
.outputs = operation.outputs,
};
@@ -449,15 +467,19 @@
}
nn::GeneralResult<Operand> unvalidatedConvert(const nn::Operand& operand) {
+ const auto type = NN_TRY(unvalidatedConvert(operand.type));
+ const auto lifetime = NN_TRY(unvalidatedConvert(operand.lifetime));
+ const auto location = NN_TRY(unvalidatedConvert(operand.location));
+ auto extraParams = NN_TRY(unvalidatedConvert(operand.extraParams));
return Operand{
- .type = NN_TRY(unvalidatedConvert(operand.type)),
+ .type = type,
.dimensions = operand.dimensions,
.numberOfConsumers = 0,
.scale = operand.scale,
.zeroPoint = operand.zeroPoint,
- .lifetime = NN_TRY(unvalidatedConvert(operand.lifetime)),
- .location = NN_TRY(unvalidatedConvert(operand.location)),
- .extraParams = NN_TRY(unvalidatedConvert(operand.extraParams)),
+ .lifetime = lifetime,
+ .location = location,
+ .extraParams = std::move(extraParams),
};
}
@@ -482,15 +504,19 @@
operands[i].numberOfConsumers = numberOfConsumers[i];
}
+ auto operations = NN_TRY(unvalidatedConvert(model.main.operations));
+ auto operandValues = NN_TRY(unvalidatedConvert(model.operandValues));
+ auto pools = NN_TRY(unvalidatedConvert(model.pools));
+ auto extensionNameToPrefix = NN_TRY(unvalidatedConvert(model.extensionNameToPrefix));
return Model{
.operands = std::move(operands),
- .operations = NN_TRY(unvalidatedConvert(model.main.operations)),
+ .operations = std::move(operations),
.inputIndexes = model.main.inputIndexes,
.outputIndexes = model.main.outputIndexes,
- .operandValues = NN_TRY(unvalidatedConvert(model.operandValues)),
- .pools = NN_TRY(unvalidatedConvert(model.pools)),
+ .operandValues = std::move(operandValues),
+ .pools = std::move(pools),
.relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16,
- .extensionNameToPrefix = NN_TRY(unvalidatedConvert(model.extensionNameToPrefix)),
+ .extensionNameToPrefix = std::move(extensionNameToPrefix),
};
}
@@ -524,9 +550,10 @@
}
nn::GeneralResult<Extension> unvalidatedConvert(const nn::Extension& extension) {
+ auto operandTypes = NN_TRY(unvalidatedConvert(extension.operandTypes));
return Extension{
.name = extension.name,
- .operandTypes = NN_TRY(unvalidatedConvert(extension.operandTypes)),
+ .operandTypes = std::move(operandTypes),
};
}
diff --git a/neuralnetworks/1.3/utils/src/Conversions.cpp b/neuralnetworks/1.3/utils/src/Conversions.cpp
index 09e9d80..4eeb414 100644
--- a/neuralnetworks/1.3/utils/src/Conversions.cpp
+++ b/neuralnetworks/1.3/utils/src/Conversions.cpp
@@ -133,28 +133,35 @@
auto table =
NN_TRY(Capabilities::OperandPerformanceTable::create(std::move(operandPerformance)));
+ const auto relaxedFloat32toFloat16PerformanceScalar =
+ NN_TRY(unvalidatedConvert(capabilities.relaxedFloat32toFloat16PerformanceScalar));
+ const auto relaxedFloat32toFloat16PerformanceTensor =
+ NN_TRY(unvalidatedConvert(capabilities.relaxedFloat32toFloat16PerformanceTensor));
+ const auto ifPerformance = NN_TRY(unvalidatedConvert(capabilities.ifPerformance));
+ const auto whilePerformance = NN_TRY(unvalidatedConvert(capabilities.whilePerformance));
return Capabilities{
- .relaxedFloat32toFloat16PerformanceScalar = NN_TRY(
- unvalidatedConvert(capabilities.relaxedFloat32toFloat16PerformanceScalar)),
- .relaxedFloat32toFloat16PerformanceTensor = NN_TRY(
- unvalidatedConvert(capabilities.relaxedFloat32toFloat16PerformanceTensor)),
+ .relaxedFloat32toFloat16PerformanceScalar = relaxedFloat32toFloat16PerformanceScalar,
+ .relaxedFloat32toFloat16PerformanceTensor = relaxedFloat32toFloat16PerformanceTensor,
.operandPerformance = std::move(table),
- .ifPerformance = NN_TRY(unvalidatedConvert(capabilities.ifPerformance)),
- .whilePerformance = NN_TRY(unvalidatedConvert(capabilities.whilePerformance)),
+ .ifPerformance = ifPerformance,
+ .whilePerformance = whilePerformance,
};
}
GeneralResult<Capabilities::OperandPerformance> unvalidatedConvert(
const hal::V1_3::Capabilities::OperandPerformance& operandPerformance) {
+ const auto type = NN_TRY(unvalidatedConvert(operandPerformance.type));
+ const auto info = NN_TRY(unvalidatedConvert(operandPerformance.info));
return Capabilities::OperandPerformance{
- .type = NN_TRY(unvalidatedConvert(operandPerformance.type)),
- .info = NN_TRY(unvalidatedConvert(operandPerformance.info)),
+ .type = type,
+ .info = info,
};
}
GeneralResult<Operation> unvalidatedConvert(const hal::V1_3::Operation& operation) {
+ const auto type = NN_TRY(unvalidatedConvert(operation.type));
return Operation{
- .type = NN_TRY(unvalidatedConvert(operation.type)),
+ .type = type,
.inputs = operation.inputs,
.outputs = operation.outputs,
};
@@ -166,25 +173,34 @@
}
GeneralResult<Operand> unvalidatedConvert(const hal::V1_3::Operand& operand) {
+ const auto type = NN_TRY(unvalidatedConvert(operand.type));
+ const auto lifetime = NN_TRY(unvalidatedConvert(operand.lifetime));
+ const auto location = NN_TRY(unvalidatedConvert(operand.location));
+ auto extraParams = NN_TRY(unvalidatedConvert(operand.extraParams));
return Operand{
- .type = NN_TRY(unvalidatedConvert(operand.type)),
+ .type = type,
.dimensions = operand.dimensions,
.scale = operand.scale,
.zeroPoint = operand.zeroPoint,
- .lifetime = NN_TRY(unvalidatedConvert(operand.lifetime)),
- .location = NN_TRY(unvalidatedConvert(operand.location)),
- .extraParams = NN_TRY(unvalidatedConvert(operand.extraParams)),
+ .lifetime = lifetime,
+ .location = location,
+ .extraParams = std::move(extraParams),
};
}
GeneralResult<Model> unvalidatedConvert(const hal::V1_3::Model& model) {
+ auto main = NN_TRY(unvalidatedConvert(model.main));
+ auto referenced = NN_TRY(unvalidatedConvert(model.referenced));
+ auto operandValues = NN_TRY(unvalidatedConvert(model.operandValues));
+ auto pools = NN_TRY(unvalidatedConvert(model.pools));
+ auto extensionNameToPrefix = NN_TRY(unvalidatedConvert(model.extensionNameToPrefix));
return Model{
- .main = NN_TRY(unvalidatedConvert(model.main)),
- .referenced = NN_TRY(unvalidatedConvert(model.referenced)),
- .operandValues = NN_TRY(unvalidatedConvert(model.operandValues)),
- .pools = NN_TRY(unvalidatedConvert(model.pools)),
+ .main = std::move(main),
+ .referenced = std::move(referenced),
+ .operandValues = std::move(operandValues),
+ .pools = std::move(pools),
.relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16,
- .extensionNameToPrefix = NN_TRY(unvalidatedConvert(model.extensionNameToPrefix)),
+ .extensionNameToPrefix = std::move(extensionNameToPrefix),
};
}
@@ -204,8 +220,9 @@
}
}
+ auto operands = NN_TRY(unvalidatedConvert(subgraph.operands));
return Model::Subgraph{
- .operands = NN_TRY(unvalidatedConvert(subgraph.operands)),
+ .operands = std::move(operands),
.operations = std::move(operations),
.inputIndexes = subgraph.inputIndexes,
.outputIndexes = subgraph.outputIndexes,
@@ -225,10 +242,13 @@
}
GeneralResult<Request> unvalidatedConvert(const hal::V1_3::Request& request) {
+ auto inputs = NN_TRY(unvalidatedConvert(request.inputs));
+ auto outputs = NN_TRY(unvalidatedConvert(request.outputs));
+ auto pools = NN_TRY(unvalidatedConvert(request.pools));
return Request{
- .inputs = NN_TRY(unvalidatedConvert(request.inputs)),
- .outputs = NN_TRY(unvalidatedConvert(request.outputs)),
- .pools = NN_TRY(unvalidatedConvert(request.pools)),
+ .inputs = std::move(inputs),
+ .outputs = std::move(outputs),
+ .pools = std::move(pools),
};
}
@@ -463,37 +483,45 @@
}
nn::GeneralResult<Capabilities> unvalidatedConvert(const nn::Capabilities& capabilities) {
- std::vector<nn::Capabilities::OperandPerformance> operandPerformance;
- operandPerformance.reserve(capabilities.operandPerformance.asVector().size());
+ std::vector<nn::Capabilities::OperandPerformance> filteredOperandPerformances;
+ filteredOperandPerformances.reserve(capabilities.operandPerformance.asVector().size());
std::copy_if(capabilities.operandPerformance.asVector().begin(),
capabilities.operandPerformance.asVector().end(),
- std::back_inserter(operandPerformance),
+ std::back_inserter(filteredOperandPerformances),
[](const nn::Capabilities::OperandPerformance& operandPerformance) {
return compliantVersion(operandPerformance.type).has_value();
});
+ const auto relaxedFloat32toFloat16PerformanceScalar =
+ NN_TRY(unvalidatedConvert(capabilities.relaxedFloat32toFloat16PerformanceScalar));
+ const auto relaxedFloat32toFloat16PerformanceTensor =
+ NN_TRY(unvalidatedConvert(capabilities.relaxedFloat32toFloat16PerformanceTensor));
+ auto operandPerformance = NN_TRY(unvalidatedConvert(filteredOperandPerformances));
+ const auto ifPerformance = NN_TRY(unvalidatedConvert(capabilities.ifPerformance));
+ const auto whilePerformance = NN_TRY(unvalidatedConvert(capabilities.whilePerformance));
return Capabilities{
- .relaxedFloat32toFloat16PerformanceScalar = NN_TRY(
- unvalidatedConvert(capabilities.relaxedFloat32toFloat16PerformanceScalar)),
- .relaxedFloat32toFloat16PerformanceTensor = NN_TRY(
- unvalidatedConvert(capabilities.relaxedFloat32toFloat16PerformanceTensor)),
- .operandPerformance = NN_TRY(unvalidatedConvert(operandPerformance)),
- .ifPerformance = NN_TRY(unvalidatedConvert(capabilities.ifPerformance)),
- .whilePerformance = NN_TRY(unvalidatedConvert(capabilities.whilePerformance)),
+ .relaxedFloat32toFloat16PerformanceScalar = relaxedFloat32toFloat16PerformanceScalar,
+ .relaxedFloat32toFloat16PerformanceTensor = relaxedFloat32toFloat16PerformanceTensor,
+ .operandPerformance = std::move(operandPerformance),
+ .ifPerformance = ifPerformance,
+ .whilePerformance = whilePerformance,
};
}
nn::GeneralResult<Capabilities::OperandPerformance> unvalidatedConvert(
const nn::Capabilities::OperandPerformance& operandPerformance) {
+ const auto type = NN_TRY(unvalidatedConvert(operandPerformance.type));
+ const auto info = NN_TRY(unvalidatedConvert(operandPerformance.info));
return Capabilities::OperandPerformance{
- .type = NN_TRY(unvalidatedConvert(operandPerformance.type)),
- .info = NN_TRY(unvalidatedConvert(operandPerformance.info)),
+ .type = type,
+ .info = info,
};
}
nn::GeneralResult<Operation> unvalidatedConvert(const nn::Operation& operation) {
+ const auto type = NN_TRY(unvalidatedConvert(operation.type));
return Operation{
- .type = NN_TRY(unvalidatedConvert(operation.type)),
+ .type = type,
.inputs = operation.inputs,
.outputs = operation.outputs,
};
@@ -509,15 +537,19 @@
}
nn::GeneralResult<Operand> unvalidatedConvert(const nn::Operand& operand) {
+ const auto type = NN_TRY(unvalidatedConvert(operand.type));
+ const auto lifetime = NN_TRY(unvalidatedConvert(operand.lifetime));
+ const auto location = NN_TRY(unvalidatedConvert(operand.location));
+ auto extraParams = NN_TRY(unvalidatedConvert(operand.extraParams));
return Operand{
- .type = NN_TRY(unvalidatedConvert(operand.type)),
+ .type = type,
.dimensions = operand.dimensions,
.numberOfConsumers = 0,
.scale = operand.scale,
.zeroPoint = operand.zeroPoint,
- .lifetime = NN_TRY(unvalidatedConvert(operand.lifetime)),
- .location = NN_TRY(unvalidatedConvert(operand.location)),
- .extraParams = NN_TRY(unvalidatedConvert(operand.extraParams)),
+ .lifetime = lifetime,
+ .location = location,
+ .extraParams = std::move(extraParams),
};
}
@@ -527,13 +559,18 @@
<< "Model cannot be unvalidatedConverted because it contains pointer-based memory";
}
+ auto main = NN_TRY(unvalidatedConvert(model.main));
+ auto referenced = NN_TRY(unvalidatedConvert(model.referenced));
+ auto operandValues = NN_TRY(unvalidatedConvert(model.operandValues));
+ auto pools = NN_TRY(unvalidatedConvert(model.pools));
+ auto extensionNameToPrefix = NN_TRY(unvalidatedConvert(model.extensionNameToPrefix));
return Model{
- .main = NN_TRY(unvalidatedConvert(model.main)),
- .referenced = NN_TRY(unvalidatedConvert(model.referenced)),
- .operandValues = NN_TRY(unvalidatedConvert(model.operandValues)),
- .pools = NN_TRY(unvalidatedConvert(model.pools)),
+ .main = std::move(main),
+ .referenced = std::move(referenced),
+ .operandValues = std::move(operandValues),
+ .pools = std::move(pools),
.relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16,
- .extensionNameToPrefix = NN_TRY(unvalidatedConvert(model.extensionNameToPrefix)),
+ .extensionNameToPrefix = std::move(extensionNameToPrefix),
};
}
@@ -548,9 +585,10 @@
operands[i].numberOfConsumers = numberOfConsumers[i];
}
+ auto operations = NN_TRY(unvalidatedConvert(subgraph.operations));
return Subgraph{
.operands = std::move(operands),
- .operations = NN_TRY(unvalidatedConvert(subgraph.operations)),
+ .operations = std::move(operations),
.inputIndexes = subgraph.inputIndexes,
.outputIndexes = subgraph.outputIndexes,
};
@@ -574,10 +612,13 @@
<< "Request cannot be unvalidatedConverted because it contains pointer-based memory";
}
+ auto inputs = NN_TRY(unvalidatedConvert(request.inputs));
+ auto outputs = NN_TRY(unvalidatedConvert(request.outputs));
+ auto pools = NN_TRY(unvalidatedConvert(request.pools));
return Request{
- .inputs = NN_TRY(unvalidatedConvert(request.inputs)),
- .outputs = NN_TRY(unvalidatedConvert(request.outputs)),
- .pools = NN_TRY(unvalidatedConvert(request.pools)),
+ .inputs = std::move(inputs),
+ .outputs = std::move(outputs),
+ .pools = std::move(pools),
};
}
diff --git a/neuralnetworks/aidl/utils/src/Conversions.cpp b/neuralnetworks/aidl/utils/src/Conversions.cpp
index 081e3d7..47c72b4 100644
--- a/neuralnetworks/aidl/utils/src/Conversions.cpp
+++ b/neuralnetworks/aidl/utils/src/Conversions.cpp
@@ -177,22 +177,28 @@
auto table =
NN_TRY(Capabilities::OperandPerformanceTable::create(std::move(operandPerformance)));
+ const auto relaxedFloat32toFloat16PerformanceScalar =
+ NN_TRY(unvalidatedConvert(capabilities.relaxedFloat32toFloat16PerformanceScalar));
+ const auto relaxedFloat32toFloat16PerformanceTensor =
+ NN_TRY(unvalidatedConvert(capabilities.relaxedFloat32toFloat16PerformanceTensor));
+ const auto ifPerformance = NN_TRY(unvalidatedConvert(capabilities.ifPerformance));
+ const auto whilePerformance = NN_TRY(unvalidatedConvert(capabilities.whilePerformance));
return Capabilities{
- .relaxedFloat32toFloat16PerformanceScalar = NN_TRY(
- unvalidatedConvert(capabilities.relaxedFloat32toFloat16PerformanceScalar)),
- .relaxedFloat32toFloat16PerformanceTensor = NN_TRY(
- unvalidatedConvert(capabilities.relaxedFloat32toFloat16PerformanceTensor)),
+ .relaxedFloat32toFloat16PerformanceScalar = relaxedFloat32toFloat16PerformanceScalar,
+ .relaxedFloat32toFloat16PerformanceTensor = relaxedFloat32toFloat16PerformanceTensor,
.operandPerformance = std::move(table),
- .ifPerformance = NN_TRY(unvalidatedConvert(capabilities.ifPerformance)),
- .whilePerformance = NN_TRY(unvalidatedConvert(capabilities.whilePerformance)),
+ .ifPerformance = ifPerformance,
+ .whilePerformance = whilePerformance,
};
}
GeneralResult<Capabilities::OperandPerformance> unvalidatedConvert(
const aidl_hal::OperandPerformance& operandPerformance) {
+ const auto type = NN_TRY(unvalidatedConvert(operandPerformance.type));
+ const auto info = NN_TRY(unvalidatedConvert(operandPerformance.info));
return Capabilities::OperandPerformance{
- .type = NN_TRY(unvalidatedConvert(operandPerformance.type)),
- .info = NN_TRY(unvalidatedConvert(operandPerformance.info)),
+ .type = type,
+ .info = info,
};
}
@@ -228,10 +234,13 @@
}
GeneralResult<Operation> unvalidatedConvert(const aidl_hal::Operation& operation) {
+ const auto type = NN_TRY(unvalidatedConvert(operation.type));
+ auto inputs = NN_TRY(toUnsigned(operation.inputs));
+ auto outputs = NN_TRY(toUnsigned(operation.outputs));
return Operation{
- .type = NN_TRY(unvalidatedConvert(operation.type)),
- .inputs = NN_TRY(toUnsigned(operation.inputs)),
- .outputs = NN_TRY(toUnsigned(operation.outputs)),
+ .type = type,
+ .inputs = std::move(inputs),
+ .outputs = std::move(outputs),
};
}
@@ -241,14 +250,19 @@
}
GeneralResult<Operand> unvalidatedConvert(const aidl_hal::Operand& operand) {
+ const auto type = NN_TRY(unvalidatedConvert(operand.type));
+ auto dimensions = NN_TRY(toUnsigned(operand.dimensions));
+ const auto lifetime = NN_TRY(unvalidatedConvert(operand.lifetime));
+ const auto location = NN_TRY(unvalidatedConvert(operand.location));
+ auto extraParams = NN_TRY(unvalidatedConvert(operand.extraParams));
return Operand{
- .type = NN_TRY(unvalidatedConvert(operand.type)),
- .dimensions = NN_TRY(toUnsigned(operand.dimensions)),
+ .type = type,
+ .dimensions = std::move(dimensions),
.scale = operand.scale,
.zeroPoint = operand.zeroPoint,
- .lifetime = NN_TRY(unvalidatedConvert(operand.lifetime)),
- .location = NN_TRY(unvalidatedConvert(operand.location)),
- .extraParams = NN_TRY(unvalidatedConvert(operand.extraParams)),
+ .lifetime = lifetime,
+ .location = location,
+ .extraParams = std::move(extraParams),
};
}
@@ -280,22 +294,31 @@
}
GeneralResult<Model> unvalidatedConvert(const aidl_hal::Model& model) {
+ auto main = NN_TRY(unvalidatedConvert(model.main));
+ auto referenced = NN_TRY(unvalidatedConvert(model.referenced));
+ auto operandValues = NN_TRY(unvalidatedConvert(model.operandValues));
+ auto pools = NN_TRY(unvalidatedConvert(model.pools));
+ auto extensionNameToPrefix = NN_TRY(unvalidatedConvert(model.extensionNameToPrefix));
return Model{
- .main = NN_TRY(unvalidatedConvert(model.main)),
- .referenced = NN_TRY(unvalidatedConvert(model.referenced)),
- .operandValues = NN_TRY(unvalidatedConvert(model.operandValues)),
- .pools = NN_TRY(unvalidatedConvert(model.pools)),
+ .main = std::move(main),
+ .referenced = std::move(referenced),
+ .operandValues = std::move(operandValues),
+ .pools = std::move(pools),
.relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16,
- .extensionNameToPrefix = NN_TRY(unvalidatedConvert(model.extensionNameToPrefix)),
+ .extensionNameToPrefix = std::move(extensionNameToPrefix),
};
}
GeneralResult<Model::Subgraph> unvalidatedConvert(const aidl_hal::Subgraph& subgraph) {
+ auto operands = NN_TRY(unvalidatedConvert(subgraph.operands));
+ auto operations = NN_TRY(unvalidatedConvert(subgraph.operations));
+ auto inputIndexes = NN_TRY(toUnsigned(subgraph.inputIndexes));
+ auto outputIndexes = NN_TRY(toUnsigned(subgraph.outputIndexes));
return Model::Subgraph{
- .operands = NN_TRY(unvalidatedConvert(subgraph.operands)),
- .operations = NN_TRY(unvalidatedConvert(subgraph.operations)),
- .inputIndexes = NN_TRY(toUnsigned(subgraph.inputIndexes)),
- .outputIndexes = NN_TRY(toUnsigned(subgraph.outputIndexes)),
+ .operands = std::move(operands),
+ .operations = std::move(operations),
+ .inputIndexes = std::move(inputIndexes),
+ .outputIndexes = std::move(outputIndexes),
};
}
@@ -308,9 +331,10 @@
}
GeneralResult<Extension> unvalidatedConvert(const aidl_hal::Extension& extension) {
+ auto operandTypes = NN_TRY(unvalidatedConvert(extension.operandTypes));
return Extension{
.name = extension.name,
- .operandTypes = NN_TRY(unvalidatedConvert(extension.operandTypes)),
+ .operandTypes = std::move(operandTypes),
};
}
@@ -326,8 +350,9 @@
}
GeneralResult<OutputShape> unvalidatedConvert(const aidl_hal::OutputShape& outputShape) {
+ auto dimensions = NN_TRY(toUnsigned(outputShape.dimensions));
return OutputShape{
- .dimensions = NN_TRY(toUnsigned(outputShape.dimensions)),
+ .dimensions = std::move(dimensions),
.isSufficient = outputShape.isSufficient,
};
}
@@ -346,8 +371,9 @@
return NN_ERROR() << "Memory: size must be <= std::numeric_limits<size_t>::max()";
}
+ auto fd = NN_TRY(dupFd(ashmem.fd.get()));
auto handle = Memory::Ashmem{
- .fd = NN_TRY(dupFd(ashmem.fd.get())),
+ .fd = std::move(fd),
.size = static_cast<size_t>(ashmem.size),
};
return std::make_shared<const Memory>(Memory{.handle = std::move(handle)});
@@ -426,7 +452,8 @@
}
GeneralResult<BufferDesc> unvalidatedConvert(const aidl_hal::BufferDesc& bufferDesc) {
- return BufferDesc{.dimensions = NN_TRY(toUnsigned(bufferDesc.dimensions))};
+ auto dimensions = NN_TRY(toUnsigned(bufferDesc.dimensions));
+ return BufferDesc{.dimensions = std::move(dimensions)};
}
GeneralResult<BufferRole> unvalidatedConvert(const aidl_hal::BufferRole& bufferRole) {
@@ -440,20 +467,25 @@
}
GeneralResult<Request> unvalidatedConvert(const aidl_hal::Request& request) {
+ auto inputs = NN_TRY(unvalidatedConvert(request.inputs));
+ auto outputs = NN_TRY(unvalidatedConvert(request.outputs));
+ auto pools = NN_TRY(unvalidatedConvert(request.pools));
return Request{
- .inputs = NN_TRY(unvalidatedConvert(request.inputs)),
- .outputs = NN_TRY(unvalidatedConvert(request.outputs)),
- .pools = NN_TRY(unvalidatedConvert(request.pools)),
+ .inputs = std::move(inputs),
+ .outputs = std::move(outputs),
+ .pools = std::move(pools),
};
}
GeneralResult<Request::Argument> unvalidatedConvert(const aidl_hal::RequestArgument& argument) {
const auto lifetime = argument.hasNoValue ? Request::Argument::LifeTime::NO_VALUE
: Request::Argument::LifeTime::POOL;
+ const auto location = NN_TRY(unvalidatedConvert(argument.location));
+ auto dimensions = NN_TRY(toUnsigned(argument.dimensions));
return Request::Argument{
.lifetime = lifetime,
- .location = NN_TRY(unvalidatedConvert(argument.location)),
- .dimensions = NN_TRY(toUnsigned(argument.dimensions)),
+ .location = location,
+ .dimensions = std::move(dimensions),
};
}
@@ -720,8 +752,9 @@
nn::GeneralResult<OperandPerformance> unvalidatedConvert(
const nn::Capabilities::OperandPerformance& operandPerformance) {
- return OperandPerformance{.type = NN_TRY(unvalidatedConvert(operandPerformance.type)),
- .info = NN_TRY(unvalidatedConvert(operandPerformance.info))};
+ const auto type = NN_TRY(unvalidatedConvert(operandPerformance.type));
+ const auto info = NN_TRY(unvalidatedConvert(operandPerformance.info));
+ return OperandPerformance{.type = type, .info = info};
}
nn::GeneralResult<std::vector<OperandPerformance>> unvalidatedConvert(
@@ -788,7 +821,8 @@
}
nn::GeneralResult<BufferDesc> unvalidatedConvert(const nn::BufferDesc& bufferDesc) {
- return BufferDesc{.dimensions = NN_TRY(toSigned(bufferDesc.dimensions))};
+ auto dimensions = NN_TRY(toSigned(bufferDesc.dimensions));
+ return BufferDesc{.dimensions = std::move(dimensions)};
}
nn::GeneralResult<BufferRole> unvalidatedConvert(const nn::BufferRole& bufferRole) {
@@ -847,7 +881,8 @@
}
nn::GeneralResult<OutputShape> unvalidatedConvert(const nn::OutputShape& outputShape) {
- return OutputShape{.dimensions = NN_TRY(toSigned(outputShape.dimensions)),
+ auto dimensions = NN_TRY(toSigned(outputShape.dimensions));
+ return OutputShape{.dimensions = std::move(dimensions),
.isSufficient = outputShape.isSufficient};
}
@@ -915,14 +950,19 @@
}
nn::GeneralResult<Operand> unvalidatedConvert(const nn::Operand& operand) {
+ const auto type = NN_TRY(unvalidatedConvert(operand.type));
+ auto dimensions = NN_TRY(toSigned(operand.dimensions));
+ const auto lifetime = NN_TRY(unvalidatedConvert(operand.lifetime));
+ const auto location = NN_TRY(unvalidatedConvert(operand.location));
+ auto extraParams = NN_TRY(unvalidatedConvert(operand.extraParams));
return Operand{
- .type = NN_TRY(unvalidatedConvert(operand.type)),
- .dimensions = NN_TRY(toSigned(operand.dimensions)),
+ .type = type,
+ .dimensions = std::move(dimensions),
.scale = operand.scale,
.zeroPoint = operand.zeroPoint,
- .lifetime = NN_TRY(unvalidatedConvert(operand.lifetime)),
- .location = NN_TRY(unvalidatedConvert(operand.location)),
- .extraParams = NN_TRY(unvalidatedConvert(operand.extraParams)),
+ .lifetime = lifetime,
+ .location = location,
+ .extraParams = std::move(extraParams),
};
}
@@ -934,19 +974,26 @@
}
nn::GeneralResult<Operation> unvalidatedConvert(const nn::Operation& operation) {
+ const auto type = NN_TRY(unvalidatedConvert(operation.type));
+ auto inputs = NN_TRY(toSigned(operation.inputs));
+ auto outputs = NN_TRY(toSigned(operation.outputs));
return Operation{
- .type = NN_TRY(unvalidatedConvert(operation.type)),
- .inputs = NN_TRY(toSigned(operation.inputs)),
- .outputs = NN_TRY(toSigned(operation.outputs)),
+ .type = type,
+ .inputs = std::move(inputs),
+ .outputs = std::move(outputs),
};
}
nn::GeneralResult<Subgraph> unvalidatedConvert(const nn::Model::Subgraph& subgraph) {
+ auto operands = NN_TRY(unvalidatedConvert(subgraph.operands));
+ auto operations = NN_TRY(unvalidatedConvert(subgraph.operations));
+ auto inputIndexes = NN_TRY(toSigned(subgraph.inputIndexes));
+ auto outputIndexes = NN_TRY(toSigned(subgraph.outputIndexes));
return Subgraph{
- .operands = NN_TRY(unvalidatedConvert(subgraph.operands)),
- .operations = NN_TRY(unvalidatedConvert(subgraph.operations)),
- .inputIndexes = NN_TRY(toSigned(subgraph.inputIndexes)),
- .outputIndexes = NN_TRY(toSigned(subgraph.outputIndexes)),
+ .operands = std::move(operands),
+ .operations = std::move(operations),
+ .inputIndexes = std::move(inputIndexes),
+ .outputIndexes = std::move(outputIndexes),
};
}
@@ -969,13 +1016,18 @@
<< "Model cannot be unvalidatedConverted because it contains pointer-based memory";
}
+ auto main = NN_TRY(unvalidatedConvert(model.main));
+ auto referenced = NN_TRY(unvalidatedConvert(model.referenced));
+ auto operandValues = NN_TRY(unvalidatedConvert(model.operandValues));
+ auto pools = NN_TRY(unvalidatedConvert(model.pools));
+ auto extensionNameToPrefix = NN_TRY(unvalidatedConvert(model.extensionNameToPrefix));
return Model{
- .main = NN_TRY(unvalidatedConvert(model.main)),
- .referenced = NN_TRY(unvalidatedConvert(model.referenced)),
- .operandValues = NN_TRY(unvalidatedConvert(model.operandValues)),
- .pools = NN_TRY(unvalidatedConvert(model.pools)),
+ .main = std::move(main),
+ .referenced = std::move(referenced),
+ .operandValues = std::move(operandValues),
+ .pools = std::move(pools),
.relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16,
- .extensionNameToPrefix = NN_TRY(unvalidatedConvert(model.extensionNameToPrefix)),
+ .extensionNameToPrefix = std::move(extensionNameToPrefix),
};
}
@@ -989,10 +1041,13 @@
<< "Request cannot be unvalidatedConverted because it contains pointer-based memory";
}
+ auto inputs = NN_TRY(unvalidatedConvert(request.inputs));
+ auto outputs = NN_TRY(unvalidatedConvert(request.outputs));
+ auto pools = NN_TRY(unvalidatedConvert(request.pools));
return Request{
- .inputs = NN_TRY(unvalidatedConvert(request.inputs)),
- .outputs = NN_TRY(unvalidatedConvert(request.outputs)),
- .pools = NN_TRY(unvalidatedConvert(request.pools)),
+ .inputs = std::move(inputs),
+ .outputs = std::move(outputs),
+ .pools = std::move(pools),
};
}
@@ -1003,10 +1058,12 @@
<< "Request cannot be unvalidatedConverted because it contains pointer-based memory";
}
const bool hasNoValue = requestArgument.lifetime == nn::Request::Argument::LifeTime::NO_VALUE;
+ const auto location = NN_TRY(unvalidatedConvert(requestArgument.location));
+ auto dimensions = NN_TRY(toSigned(requestArgument.dimensions));
return RequestArgument{
.hasNoValue = hasNoValue,
- .location = NN_TRY(unvalidatedConvert(requestArgument.location)),
- .dimensions = NN_TRY(toSigned(requestArgument.dimensions)),
+ .location = location,
+ .dimensions = std::move(dimensions),
};
}
@@ -1033,9 +1090,11 @@
}
nn::GeneralResult<Timing> unvalidatedConvert(const nn::Timing& timing) {
+ const auto timeOnDeviceNs = NN_TRY(unvalidatedConvert(timing.timeOnDevice));
+ const auto timeInDriverNs = NN_TRY(unvalidatedConvert(timing.timeInDriver));
return Timing{
- .timeOnDeviceNs = NN_TRY(unvalidatedConvert(timing.timeOnDevice)),
- .timeInDriverNs = NN_TRY(unvalidatedConvert(timing.timeInDriver)),
+ .timeOnDeviceNs = timeOnDeviceNs,
+ .timeInDriverNs = timeInDriverNs,
};
}
@@ -1064,20 +1123,25 @@
}
nn::GeneralResult<Capabilities> unvalidatedConvert(const nn::Capabilities& capabilities) {
+ const auto relaxedFloat32toFloat16PerformanceTensor =
+ NN_TRY(unvalidatedConvert(capabilities.relaxedFloat32toFloat16PerformanceTensor));
+ const auto relaxedFloat32toFloat16PerformanceScalar =
+ NN_TRY(unvalidatedConvert(capabilities.relaxedFloat32toFloat16PerformanceScalar));
+ auto operandPerformance = NN_TRY(unvalidatedConvert(capabilities.operandPerformance));
+ const auto ifPerformance = NN_TRY(unvalidatedConvert(capabilities.ifPerformance));
+ const auto whilePerformance = NN_TRY(unvalidatedConvert(capabilities.whilePerformance));
return Capabilities{
- .relaxedFloat32toFloat16PerformanceTensor = NN_TRY(
- unvalidatedConvert(capabilities.relaxedFloat32toFloat16PerformanceTensor)),
- .relaxedFloat32toFloat16PerformanceScalar = NN_TRY(
- unvalidatedConvert(capabilities.relaxedFloat32toFloat16PerformanceScalar)),
- .operandPerformance = NN_TRY(unvalidatedConvert(capabilities.operandPerformance)),
- .ifPerformance = NN_TRY(unvalidatedConvert(capabilities.ifPerformance)),
- .whilePerformance = NN_TRY(unvalidatedConvert(capabilities.whilePerformance)),
+ .relaxedFloat32toFloat16PerformanceTensor = relaxedFloat32toFloat16PerformanceTensor,
+ .relaxedFloat32toFloat16PerformanceScalar = relaxedFloat32toFloat16PerformanceScalar,
+ .operandPerformance = std::move(operandPerformance),
+ .ifPerformance = ifPerformance,
+ .whilePerformance = whilePerformance,
};
}
nn::GeneralResult<Extension> unvalidatedConvert(const nn::Extension& extension) {
- return Extension{.name = extension.name,
- .operandTypes = NN_TRY(unvalidatedConvert(extension.operandTypes))};
+ auto operandTypes = NN_TRY(unvalidatedConvert(extension.operandTypes));
+ return Extension{.name = extension.name, .operandTypes = std::move(operandTypes)};
}
#ifdef NN_AIDL_V4_OR_ABOVE
nn::GeneralResult<TokenValuePair> unvalidatedConvert(const nn::TokenValuePair& tokenValuePair) {
diff --git a/neuralnetworks/aidl/utils/src/Utils.cpp b/neuralnetworks/aidl/utils/src/Utils.cpp
index 76a0b07..f9b4f6e 100644
--- a/neuralnetworks/aidl/utils/src/Utils.cpp
+++ b/neuralnetworks/aidl/utils/src/Utils.cpp
@@ -51,8 +51,9 @@
}
nn::GeneralResult<common::NativeHandle> clone(const common::NativeHandle& handle) {
+ auto fds = NN_TRY(cloneVec(handle.fds));
return common::NativeHandle{
- .fds = NN_TRY(cloneVec(handle.fds)),
+ .fds = std::move(fds),
.ints = handle.ints,
};
}
@@ -63,29 +64,32 @@
switch (memory.getTag()) {
case Memory::Tag::ashmem: {
const auto& ashmem = memory.get<Memory::Tag::ashmem>();
+ auto fd = NN_TRY(clone(ashmem.fd));
auto handle = common::Ashmem{
- .fd = NN_TRY(clone(ashmem.fd)),
+ .fd = std::move(fd),
.size = ashmem.size,
};
return Memory::make<Memory::Tag::ashmem>(std::move(handle));
}
case Memory::Tag::mappableFile: {
const auto& memFd = memory.get<Memory::Tag::mappableFile>();
+ auto fd = NN_TRY(clone(memFd.fd));
auto handle = common::MappableFile{
.length = memFd.length,
.prot = memFd.prot,
- .fd = NN_TRY(clone(memFd.fd)),
+ .fd = std::move(fd),
.offset = memFd.offset,
};
return Memory::make<Memory::Tag::mappableFile>(std::move(handle));
}
case Memory::Tag::hardwareBuffer: {
const auto& hardwareBuffer = memory.get<Memory::Tag::hardwareBuffer>();
- auto handle = graphics::common::HardwareBuffer{
+ auto handle = NN_TRY(clone(hardwareBuffer.handle));
+ auto ahwbHandle = graphics::common::HardwareBuffer{
.description = hardwareBuffer.description,
- .handle = NN_TRY(clone(hardwareBuffer.handle)),
+ .handle = std::move(handle),
};
- return Memory::make<Memory::Tag::hardwareBuffer>(std::move(handle));
+ return Memory::make<Memory::Tag::hardwareBuffer>(std::move(ahwbHandle));
}
}
return (NN_ERROR() << "Unrecognized Memory::Tag: " << underlyingType(memory.getTag()))
@@ -109,19 +113,21 @@
}
nn::GeneralResult<Request> clone(const Request& request) {
+ auto pools = NN_TRY(clone(request.pools));
return Request{
.inputs = request.inputs,
.outputs = request.outputs,
- .pools = NN_TRY(clone(request.pools)),
+ .pools = std::move(pools),
};
}
nn::GeneralResult<Model> clone(const Model& model) {
+ auto pools = NN_TRY(clone(model.pools));
return Model{
.main = model.main,
.referenced = model.referenced,
.operandValues = model.operandValues,
- .pools = NN_TRY(clone(model.pools)),
+ .pools = std::move(pools),
.relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16,
.extensionNameToPrefix = model.extensionNameToPrefix,
};
diff --git a/neuralnetworks/aidl/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/aidl/vts/functional/GeneratedTestHarness.cpp
index 40f6cd1..dcf8451 100644
--- a/neuralnetworks/aidl/vts/functional/GeneratedTestHarness.cpp
+++ b/neuralnetworks/aidl/vts/functional/GeneratedTestHarness.cpp
@@ -659,6 +659,7 @@
ASSERT_NE(nullptr, burst.get());
// associate a unique slot with each memory pool
+ constexpr int64_t kIgnoreSlot = -1;
int64_t currentSlot = 0;
std::vector<int64_t> slots;
slots.reserve(request.pools.size());
@@ -667,7 +668,7 @@
slots.push_back(currentSlot++);
} else {
EXPECT_EQ(pool.getTag(), RequestMemoryPool::Tag::token);
- slots.push_back(-1);
+ slots.push_back(kIgnoreSlot);
}
}
@@ -698,8 +699,10 @@
// Mark each slot as unused after the execution. This is unnecessary because the
// burst is freed after this scope ends, but this is here to test the functionality.
for (int64_t slot : slots) {
- ret = burst->releaseMemoryResource(slot);
- ASSERT_TRUE(ret.isOk()) << ret.getDescription();
+ if (slot != kIgnoreSlot) {
+ ret = burst->releaseMemoryResource(slot);
+ ASSERT_TRUE(ret.isOk()) << ret.getDescription();
+ }
}
break;
diff --git a/security/keymint/aidl/android/hardware/security/keymint/ProtectedData.aidl b/security/keymint/aidl/android/hardware/security/keymint/ProtectedData.aidl
index cfbf171..6db58f2 100644
--- a/security/keymint/aidl/android/hardware/security/keymint/ProtectedData.aidl
+++ b/security/keymint/aidl/android/hardware/security/keymint/ProtectedData.aidl
@@ -18,13 +18,20 @@
/**
* ProtectedData contains the encrypted BCC and the ephemeral MAC key used to
- * authenticate the keysToSign (see keysToSignMac output argument).
+ * authenticate the keysToSign (see keysToSignMac output argument of
+ * IRemotelyProvisionedComponent.generateCertificateRequest).
* @hide
*/
@VintfStability
parcelable ProtectedData {
/**
- * ProtectedData is a COSE_Encrypt structure, specified by the following CDDL
+ * ProtectedData is a COSE_Encrypt structure, encrypted with an AES key that is agreed upon
+ * using Elliptic-curve Diffie-Hellman. The contents of the structure are specified by the
+ * following CDDL [RFC8610].
+ *
+ * Notes:
+ * - None of the CBOR in ProtectedData uses CBOR tags. If an implementation includes
+ * tags, parsers may reject the data.
*
* ProtectedData = [ // COSE_Encrypt
* protected: bstr .cbor {
@@ -34,13 +41,18 @@
* 5 : bstr .size 12 // IV
* },
* ciphertext: bstr, // AES-GCM-256(K, .cbor ProtectedDataPayload)
+ * // Where the encryption key 'K' is derived as follows:
+ * // ikm = ECDH(EEK_pub, Ephemeral_priv)
+ * // salt = null
+ * // info = .cbor Context (see below)
+ * // K = HKDF-SHA-256(ikm, salt, info)
* recipients : [
* [ // COSE_Recipient
* protected : bstr .cbor {
* 1 : -25 // Algorithm : ECDH-ES + HKDF-256
* },
* unprotected : {
- * -1 : PubKeyX25519 / PubKeyEcdhP256 // Of the sender
+ * -1 : PubKeyX25519 / PubKeyEcdhP256 // Ephemeral_pub
* 4 : bstr, // KID : EEK ID
* },
* ciphertext : nil
@@ -48,14 +60,14 @@
* ]
* ]
*
- * K = HKDF-256(ECDH(EEK_pub, Ephemeral_priv), Context)
- *
- * Context = [ // COSE_KDF_Context
+ * // The COSE_KDF_Context that is used to derive the ProtectedData encryption key with
+ * // HKDF. See details on use in ProtectedData comments above.
+ * Context = [
* AlgorithmID : 3 // AES-GCM 256
* PartyUInfo : [
* identity : bstr "client"
* nonce : bstr .size 0,
- * other : bstr // Ephemeral pubkey
+ * other : bstr // Ephemeral_pub
* ],
* PartyVInfo : [
* identity : bstr "server",
@@ -68,41 +80,55 @@
* ]
* ]
*
+ * // The data that is encrypted and included in ProtectedData ciphertext (see above).
* ProtectedDataPayload [
* SignedMac,
* Bcc,
* ? AdditionalDKSignatures,
* ]
+ *
+ * // AdditionalDKSignatures allows the platform to provide additional certifications
+ * // for the DK_pub. For example, this could be provided by the hardware vendor, who
+ * // certifies all of their devices. The SignerName is a free-form string describing
+ * // who generated the signature.
* AdditionalDKSignatures = {
* + SignerName => DKCertChain
* }
*
+ * // SignerName is a string identifier that indicates both the signing authority as
+ * // well as the format of the DKCertChain
* SignerName = tstr
*
* DKCertChain = [
- * 2* Certificate // Root -> Leaf. Root is the vendor
- * // self-signed cert, leaf contains DK_pub
+ * 2* Certificate // Root -> ... -> Leaf. "Root" is the vendor self-signed
+ * // cert, "Leaf" contains DK_pub. There may also be
+ * // intermediate certificates between Root and Leaf.
* ]
*
- * Certificate = COSE_Sign1 of a public key
+ * // Certificates may be either:
+ * // 1. COSE_Sign1, with payload containing PubKeyEd25519 or PubKeyECDSA256
+ * // 2. a bstr containing a DER-encoded X.509 certificate (RSA, NIST P-curve, or edDSA)
+ * Certificate = COSE_Sign1 / bstr
*
- * SignedMac = [ // COSE_Sign1
- * bstr .cbor { // Protected params
- * 1 : AlgorithmEdDSA / AlgorithmES256, // Algorithm
+ * // The SignedMac, which authenticates the MAC key that is used to authenticate the
+ * // keysToSign.
+ * SignedMac = [ // COSE_Sign1
+ * bstr .cbor { // Protected params
+ * 1 : AlgorithmEdDSA / AlgorithmES256, // Algorithm
* },
- * {}, // Unprotected params
- * bstr .size 32, // MAC key
+ * {}, // Unprotected params
+ * bstr .size 32, // Payload: MAC key
* bstr // PureEd25519(KM_priv, bstr .cbor SignedMac_structure) /
* // ECDSA(KM_priv, bstr .cbor SignedMac_structure)
* ]
*
- * SignedMac_structure = [
+ * SignedMac_structure = [ // COSE Sig_structure
* "Signature1",
- * bstr .cbor { // Protected params
- * 1 : AlgorithmEdDSA / AlgorithmES256, // Algorithm
+ * bstr .cbor { // Protected params
+ * 1 : AlgorithmEdDSA / AlgorithmES256, // Algorithm
* },
- * bstr .cbor SignedMacAad
- * bstr .size 32 // MAC key
+ * bstr .cbor SignedMacAad,
+ * bstr .size 32 // MAC key
* ]
*
* SignedMacAad = [
@@ -114,31 +140,48 @@
* // the signature.
* ]
*
+ * VerifiedDeviceInfo = DeviceInfo // See DeviceInfo.aidl
+ *
+ * // The BCC is the boot certificate chain, containing measurements about the device
+ * // boot chain. The BCC generally follows the Open Profile for DICE specification at
+ * // https://pigweed.googlesource.com/open-dice/+/HEAD/docs/specification.md.
+ * //
+ * // The first entry in the Bcc is the DK_pub, encoded as a COSE_key. All entries after
+ * // the first describe a link in the boot chain (e.g. bootloaders: BL1, BL2, ... BLN).
+ * // Note that there is no BccEntry for DK_pub, only a "bare" COSE_key.
* Bcc = [
* PubKeyEd25519 / PubKeyECDSA256, // DK_pub
* + BccEntry, // Root -> leaf (KM_pub)
* ]
*
- * BccPayload = { // CWT
- * 1 : tstr, // Issuer
- * 2 : tstr, // Subject
- * // See the Open Profile for DICE for details on these fields.
- * ? -4670545 : bstr, // Code Hash
- * ? -4670546 : bstr, // Code Descriptor
- * ? -4670547 : bstr, // Configuration Hash
- * ? -4670548 : bstr .cbor { // Configuration Descriptor
- * ? -70002 : tstr, // Component name
- * ? -70003 : int, // Firmware version
- * ? -70004 : null, // Resettable
- * },
- * ? -4670549 : bstr, // Authority Hash
- * ? -4670550 : bstr, // Authority Descriptor
- * ? -4670551 : bstr, // Mode
+ * // This is the signed payload for each entry in the Bcc. Note that the "Configuration
+ * // Input Values" described by the Open Profile are not used here. Instead, the Bcc
+ * // defines its own configuration values for the Configuration Descriptor field. See
+ * // the Open Profile for DICE for more details on the fields. All hashes are SHA256.
+ * BccPayload = { // CWT [RFC8392]
+ * 1 : tstr, // Issuer
+ * 2 : tstr, // Subject
* -4670552 : bstr .cbor PubKeyEd25519 /
- * bstr .cbor PubKeyECDSA256 // Subject Public Key
- * -4670553 : bstr // Key Usage
+ * bstr .cbor PubKeyECDSA256, // Subject Public Key
+ * -4670553 : bstr // Key Usage
+ *
+ * // NOTE: All of the following fields may be omitted for a "Degenerate BCC", as
+ * // described by IRemotelyProvisionedComponent.aidl.
+ * -4670545 : bstr, // Code Hash
+ * ? -4670546 : bstr, // Code Descriptor
+ * ? -4670547 : bstr, // Configuration Hash
+ * -4670548 : bstr .cbor { // Configuration Descriptor
+ * ? -70002 : tstr, // Component name
+ * ? -70003 : int, // Firmware version
+ * ? -70004 : null, // Resettable
+ * },
+ * -4670549 : bstr, // Authority Hash
+ * ? -4670550 : bstr, // Authority Descriptor
+ * -4670551 : bstr, // Mode
* }
*
+ * // Each entry in the Bcc is a BccPayload signed by the key from the previous entry
+ * // in the Bcc array.
* BccEntry = [ // COSE_Sign1 (untagged)
* protected : bstr .cbor {
* 1 : AlgorithmEdDSA / AlgorithmES256, // Algorithm
@@ -159,8 +202,8 @@
* payload: bstr .cbor BccPayload
* ]
*
- * VerifiedDeviceInfo = DeviceInfo // See DeviceInfo.aidl
- *
+ * // The following section defines some types that are reused throughout the above
+ * // data structures.
* PubKeyX25519 = { // COSE_Key
* 1 : 1, // Key type : Octet Key Pair
* -1 : 4, // Curve : X25519
@@ -168,25 +211,25 @@
* }
*
* PubKeyEd25519 = { // COSE_Key
- * 1 : 1, // Key type : octet key pair
- * 3 : AlgorithmEdDSA, // Algorithm : EdDSA
- * -1 : 6, // Curve : Ed25519
- * -2 : bstr // X coordinate, little-endian
+ * 1 : 1, // Key type : octet key pair
+ * 3 : AlgorithmEdDSA, // Algorithm : EdDSA
+ * -1 : 6, // Curve : Ed25519
+ * -2 : bstr // X coordinate, little-endian
* }
*
- * PubKeyEcdhP256 = { // COSE_Key
- * 1 : 2, // Key type : EC2
- * -1 : 1, // Curve : P256
- * -2 : bstr // Sender X coordinate
- * -3 : bstr // Sender Y coordinate
+ * PubKeyEcdhP256 = { // COSE_Key
+ * 1 : 2, // Key type : EC2
+ * -1 : 1, // Curve : P256
+ * -2 : bstr // Sender X coordinate
+ * -3 : bstr // Sender Y coordinate
* }
*
- * PubKeyECDSA256 = { // COSE_Key
- * 1 : 2, // Key type : EC2
- * 3 : AlgorithmES256, // Algorithm : ECDSA w/ SHA-256
- * -1 : 1, // Curve: P256
- * -2 : bstr, // X coordinate
- * -3 : bstr // Y coordinate
+ * PubKeyECDSA256 = { // COSE_Key
+ * 1 : 2, // Key type : EC2
+ * 3 : AlgorithmES256, // Algorithm : ECDSA w/ SHA-256
+ * -1 : 1, // Curve: P256
+ * -2 : bstr, // X coordinate
+ * -3 : bstr // Y coordinate
* }
*
* AlgorithmES256 = -7
diff --git a/security/keymint/aidl/android/hardware/security/keymint/Tag.aidl b/security/keymint/aidl/android/hardware/security/keymint/Tag.aidl
index b28ebcb..42dfad5 100644
--- a/security/keymint/aidl/android/hardware/security/keymint/Tag.aidl
+++ b/security/keymint/aidl/android/hardware/security/keymint/Tag.aidl
@@ -504,7 +504,9 @@
* that is necessary during all uses of the key. In particular, calls to exportKey() and
* getKeyCharacteristics() must provide the same value to the clientId parameter, and calls to
* begin() must provide this tag and the same associated data as part of the inParams set. If
- * the correct data is not provided, the method must return ErrorCode::INVALID_KEY_BLOB.
+ * the correct data is not provided, the method must return ErrorCode::INVALID_KEY_BLOB. Note
+ * that a key with a zero-length APPLICATION_ID cannot have its key characteristics retrieved
+ * using getKeyCharacteristics() due to a historical limitation of the API.
*
* The content of this tag must be bound to the key cryptographically, meaning it must not be
* possible for an adversary who has access to all of the secure world secrets but does not have
@@ -525,7 +527,9 @@
* that is necessary during all uses of the key. In particular, calls to begin() and
* exportKey() must provide the same value to the appData parameter, and calls to begin must
* provide this tag and the same associated data as part of the inParams set. If the correct
- * data is not provided, the method must return ErrorCode::INVALID_KEY_BLOB.
+ * data is not provided, the method must return ErrorCode::INVALID_KEY_BLOB. Note that a key
+ * with a zero-length APPLICATION_DATA cannot have its key characteristics retrieved using
+ * getKeyCharacteristics() due to a historical limitation of the API.
*
* The content of this tag must be bound to the key cryptographically, meaning it must not be
* possible for an adversary who has access to all of the secure world secrets but does not have
diff --git a/usb/aidl/Android.bp b/usb/aidl/Android.bp
index d1e9e68..f71cacb 100644
--- a/usb/aidl/Android.bp
+++ b/usb/aidl/Android.bp
@@ -12,6 +12,15 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+package {
+ // See: http://go/android-license-faq
+ // A large-scale-change added 'default_applicable_licenses' to import
+ // all of the 'license_kinds' from "hardware_interfaces_license"
+ // to get the below license kinds:
+ // SPDX-license-identifier-Apache-2.0
+ default_applicable_licenses: ["hardware_interfaces_license"],
+}
+
aidl_interface {
name: "android.hardware.usb",
vendor_available: true,
diff --git a/usb/aidl/default/Android.bp b/usb/aidl/default/Android.bp
index da0cff2..7cb2822 100644
--- a/usb/aidl/default/Android.bp
+++ b/usb/aidl/default/Android.bp
@@ -14,6 +14,15 @@
// limitations under the License.
//
+package {
+ // See: http://go/android-license-faq
+ // A large-scale-change added 'default_applicable_licenses' to import
+ // all of the 'license_kinds' from "hardware_interfaces_license"
+ // to get the below license kinds:
+ // SPDX-license-identifier-Apache-2.0
+ default_applicable_licenses: ["hardware_interfaces_license"],
+}
+
cc_binary {
name: "android.hardware.usb-service.example",
relative_install_path: "hw",