Merge "uwb(hal): Set sdk to Tiramisu"
diff --git a/automotive/evs/aidl/Android.bp b/automotive/evs/aidl/Android.bp
new file mode 100644
index 0000000..3c0aa13
--- /dev/null
+++ b/automotive/evs/aidl/Android.bp
@@ -0,0 +1,51 @@
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package {
+ // See: http://go/android-license-faq
+ // A large-scale-change added 'default_applicable_licenses' to import
+ // all of the 'license_kinds' from "hardware_interfaces_license"
+ // to get the below license kinds:
+ // SPDX-license-identifier-Apache-2.0
+ default_applicable_licenses: ["hardware_interfaces_license"],
+}
+
+aidl_interface {
+ name: "android.hardware.automotive.evs",
+ vendor_available: true,
+ srcs: [
+ "android/hardware/automotive/evs/*.aidl"
+ ],
+ stability: "vintf",
+ imports: [
+ "android.hardware.common-V2",
+ "android.hardware.graphics.common-V3",
+ ],
+ backend: {
+ java: {
+ // android.hardware.graphics.common package is not enabled
+ // for Java backend.
+ enabled: false,
+ },
+ cpp: {
+ enabled: false,
+ },
+ ndk: {
+ vndk: {
+ enabled: false,
+ },
+ min_sdk_version: "29"
+ },
+ },
+}
diff --git a/automotive/evs/aidl/aidl_api/android.hardware.automotive.evs/current/android/hardware/automotive/evs/BufferDesc.aidl b/automotive/evs/aidl/aidl_api/android.hardware.automotive.evs/current/android/hardware/automotive/evs/BufferDesc.aidl
new file mode 100644
index 0000000..31acdb8
--- /dev/null
+++ b/automotive/evs/aidl/aidl_api/android.hardware.automotive.evs/current/android/hardware/automotive/evs/BufferDesc.aidl
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+///////////////////////////////////////////////////////////////////////////////
+// THIS FILE IS IMMUTABLE. DO NOT EDIT IN ANY CASE. //
+///////////////////////////////////////////////////////////////////////////////
+
+// This file is a snapshot of an AIDL file. Do not edit it manually. There are
+// two cases:
+// 1). this is a frozen version file - do not edit this in any case.
+// 2). this is a 'current' file. If you make a backwards compatible change to
+// the interface (from the latest frozen version), the build system will
+// prompt you to update this file with `m <name>-update-api`.
+//
+// You must not make a backward incompatible change to any AIDL file built
+// with the aidl_interface module type with versions property set. The module
+// type is used to build AIDL files in a way that they can be used across
+// independently updatable components of the system. If a device is shipped
+// with such a backward incompatible change, it has a high risk of breaking
+// later when a module using the interface is updated, e.g., Mainline modules.
+
+package android.hardware.automotive.evs;
+@VintfStability
+parcelable BufferDesc {
+ android.hardware.graphics.common.HardwareBuffer buffer;
+ int pixelSizeBytes;
+ int bufferId;
+ @utf8InCpp String deviceId;
+ long timestamp;
+ byte[] metadata;
+}
diff --git a/automotive/evs/aidl/aidl_api/android.hardware.automotive.evs/current/android/hardware/automotive/evs/CameraDesc.aidl b/automotive/evs/aidl/aidl_api/android.hardware.automotive.evs/current/android/hardware/automotive/evs/CameraDesc.aidl
new file mode 100644
index 0000000..4dadeb8
--- /dev/null
+++ b/automotive/evs/aidl/aidl_api/android.hardware.automotive.evs/current/android/hardware/automotive/evs/CameraDesc.aidl
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+///////////////////////////////////////////////////////////////////////////////
+// THIS FILE IS IMMUTABLE. DO NOT EDIT IN ANY CASE. //
+///////////////////////////////////////////////////////////////////////////////
+
+// This file is a snapshot of an AIDL file. Do not edit it manually. There are
+// two cases:
+// 1). this is a frozen version file - do not edit this in any case.
+// 2). this is a 'current' file. If you make a backwards compatible change to
+// the interface (from the latest frozen version), the build system will
+// prompt you to update this file with `m <name>-update-api`.
+//
+// You must not make a backward incompatible change to any AIDL file built
+// with the aidl_interface module type with versions property set. The module
+// type is used to build AIDL files in a way that they can be used across
+// independently updatable components of the system. If a device is shipped
+// with such a backward incompatible change, it has a high risk of breaking
+// later when a module using the interface is updated, e.g., Mainline modules.
+
+package android.hardware.automotive.evs;
+@VintfStability
+parcelable CameraDesc {
+ @utf8InCpp String id;
+ int vendorFlags;
+ byte[] metadata;
+}
diff --git a/automotive/evs/aidl/aidl_api/android.hardware.automotive.evs/current/android/hardware/automotive/evs/CameraParam.aidl b/automotive/evs/aidl/aidl_api/android.hardware.automotive.evs/current/android/hardware/automotive/evs/CameraParam.aidl
new file mode 100644
index 0000000..ae4ce77
--- /dev/null
+++ b/automotive/evs/aidl/aidl_api/android.hardware.automotive.evs/current/android/hardware/automotive/evs/CameraParam.aidl
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+///////////////////////////////////////////////////////////////////////////////
+// THIS FILE IS IMMUTABLE. DO NOT EDIT IN ANY CASE. //
+///////////////////////////////////////////////////////////////////////////////
+
+// This file is a snapshot of an AIDL file. Do not edit it manually. There are
+// two cases:
+// 1). this is a frozen version file - do not edit this in any case.
+// 2). this is a 'current' file. If you make a backwards compatible change to
+// the interface (from the latest frozen version), the build system will
+// prompt you to update this file with `m <name>-update-api`.
+//
+// You must not make a backward incompatible change to any AIDL file built
+// with the aidl_interface module type with versions property set. The module
+// type is used to build AIDL files in a way that they can be used across
+// independently updatable components of the system. If a device is shipped
+// with such a backward incompatible change, it has a high risk of breaking
+// later when a module using the interface is updated, e.g., Mainline modules.
+
+package android.hardware.automotive.evs;
+@Backing(type="int") @VintfStability
+enum CameraParam {
+ BRIGHTNESS = 0,
+ CONTRAST = 1,
+ AUTOGAIN = 2,
+ GAIN = 3,
+ AUTO_WHITE_BALANCE = 4,
+ WHITE_BALANCE_TEMPERATURE = 5,
+ SHARPNESS = 6,
+ AUTO_EXPOSURE = 7,
+ ABSOLUTE_EXPOSURE = 8,
+ ABSOLUTE_FOCUS = 9,
+ AUTO_FOCUS = 10,
+ ABSOLUTE_ZOOM = 11,
+}
diff --git a/automotive/evs/aidl/aidl_api/android.hardware.automotive.evs/current/android/hardware/automotive/evs/DeviceStatus.aidl b/automotive/evs/aidl/aidl_api/android.hardware.automotive.evs/current/android/hardware/automotive/evs/DeviceStatus.aidl
new file mode 100644
index 0000000..cc066ac
--- /dev/null
+++ b/automotive/evs/aidl/aidl_api/android.hardware.automotive.evs/current/android/hardware/automotive/evs/DeviceStatus.aidl
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+///////////////////////////////////////////////////////////////////////////////
+// THIS FILE IS IMMUTABLE. DO NOT EDIT IN ANY CASE. //
+///////////////////////////////////////////////////////////////////////////////
+
+// This file is a snapshot of an AIDL file. Do not edit it manually. There are
+// two cases:
+// 1). this is a frozen version file - do not edit this in any case.
+// 2). this is a 'current' file. If you make a backwards compatible change to
+// the interface (from the latest frozen version), the build system will
+// prompt you to update this file with `m <name>-update-api`.
+//
+// You must not make a backward incompatible change to any AIDL file built
+// with the aidl_interface module type with versions property set. The module
+// type is used to build AIDL files in a way that they can be used across
+// independently updatable components of the system. If a device is shipped
+// with such a backward incompatible change, it has a high risk of breaking
+// later when a module using the interface is updated, e.g., Mainline modules.
+
+package android.hardware.automotive.evs;
+@VintfStability
+parcelable DeviceStatus {
+ @utf8InCpp String id;
+ android.hardware.automotive.evs.DeviceStatusType status;
+}
diff --git a/automotive/evs/aidl/aidl_api/android.hardware.automotive.evs/current/android/hardware/automotive/evs/DeviceStatusType.aidl b/automotive/evs/aidl/aidl_api/android.hardware.automotive.evs/current/android/hardware/automotive/evs/DeviceStatusType.aidl
new file mode 100644
index 0000000..d0f1d8e
--- /dev/null
+++ b/automotive/evs/aidl/aidl_api/android.hardware.automotive.evs/current/android/hardware/automotive/evs/DeviceStatusType.aidl
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+///////////////////////////////////////////////////////////////////////////////
+// THIS FILE IS IMMUTABLE. DO NOT EDIT IN ANY CASE. //
+///////////////////////////////////////////////////////////////////////////////
+
+// This file is a snapshot of an AIDL file. Do not edit it manually. There are
+// two cases:
+// 1). this is a frozen version file - do not edit this in any case.
+// 2). this is a 'current' file. If you make a backwards compatible change to
+// the interface (from the latest frozen version), the build system will
+// prompt you to update this file with `m <name>-update-api`.
+//
+// You must not make a backward incompatible change to any AIDL file built
+// with the aidl_interface module type with versions property set. The module
+// type is used to build AIDL files in a way that they can be used across
+// independently updatable components of the system. If a device is shipped
+// with such a backward incompatible change, it has a high risk of breaking
+// later when a module using the interface is updated, e.g., Mainline modules.
+
+package android.hardware.automotive.evs;
+@Backing(type="int") @VintfStability
+enum DeviceStatusType {
+ CAMERA_AVAILABLE = 0,
+ CAMERA_NOT_AVAILABLE = 1,
+ DISPLAY_AVAILABLE = 2,
+ DISPLAY_NOT_AVAILABLE = 3,
+}
diff --git a/automotive/evs/aidl/aidl_api/android.hardware.automotive.evs/current/android/hardware/automotive/evs/DisplayDesc.aidl b/automotive/evs/aidl/aidl_api/android.hardware.automotive.evs/current/android/hardware/automotive/evs/DisplayDesc.aidl
new file mode 100644
index 0000000..4ac029e
--- /dev/null
+++ b/automotive/evs/aidl/aidl_api/android.hardware.automotive.evs/current/android/hardware/automotive/evs/DisplayDesc.aidl
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+///////////////////////////////////////////////////////////////////////////////
+// THIS FILE IS IMMUTABLE. DO NOT EDIT IN ANY CASE. //
+///////////////////////////////////////////////////////////////////////////////
+
+// This file is a snapshot of an AIDL file. Do not edit it manually. There are
+// two cases:
+// 1). this is a frozen version file - do not edit this in any case.
+// 2). this is a 'current' file. If you make a backwards compatible change to
+// the interface (from the latest frozen version), the build system will
+// prompt you to update this file with `m <name>-update-api`.
+//
+// You must not make a backward incompatible change to any AIDL file built
+// with the aidl_interface module type with versions property set. The module
+// type is used to build AIDL files in a way that they can be used across
+// independently updatable components of the system. If a device is shipped
+// with such a backward incompatible change, it has a high risk of breaking
+// later when a module using the interface is updated, e.g., Mainline modules.
+
+package android.hardware.automotive.evs;
+@VintfStability
+parcelable DisplayDesc {
+ @utf8InCpp String id;
+ int width;
+ int height;
+ android.hardware.automotive.evs.Rotation orientation;
+ int vendorFlags;
+}
diff --git a/automotive/evs/aidl/aidl_api/android.hardware.automotive.evs/current/android/hardware/automotive/evs/DisplayState.aidl b/automotive/evs/aidl/aidl_api/android.hardware.automotive.evs/current/android/hardware/automotive/evs/DisplayState.aidl
new file mode 100644
index 0000000..a5f4309
--- /dev/null
+++ b/automotive/evs/aidl/aidl_api/android.hardware.automotive.evs/current/android/hardware/automotive/evs/DisplayState.aidl
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+///////////////////////////////////////////////////////////////////////////////
+// THIS FILE IS IMMUTABLE. DO NOT EDIT IN ANY CASE. //
+///////////////////////////////////////////////////////////////////////////////
+
+// This file is a snapshot of an AIDL file. Do not edit it manually. There are
+// two cases:
+// 1). this is a frozen version file - do not edit this in any case.
+// 2). this is a 'current' file. If you make a backwards compatible change to
+// the interface (from the latest frozen version), the build system will
+// prompt you to update this file with `m <name>-update-api`.
+//
+// You must not make a backward incompatible change to any AIDL file built
+// with the aidl_interface module type with versions property set. The module
+// type is used to build AIDL files in a way that they can be used across
+// independently updatable components of the system. If a device is shipped
+// with such a backward incompatible change, it has a high risk of breaking
+// later when a module using the interface is updated, e.g., Mainline modules.
+
+package android.hardware.automotive.evs;
+@Backing(type="int") @VintfStability
+enum DisplayState {
+ NOT_OPEN = 0,
+ NOT_VISIBLE = 1,
+ VISIBLE_ON_NEXT_FRAME = 2,
+ VISIBLE = 3,
+ DEAD = 4,
+}
diff --git a/automotive/evs/aidl/aidl_api/android.hardware.automotive.evs/current/android/hardware/automotive/evs/EvsEventDesc.aidl b/automotive/evs/aidl/aidl_api/android.hardware.automotive.evs/current/android/hardware/automotive/evs/EvsEventDesc.aidl
new file mode 100644
index 0000000..09b2b9d
--- /dev/null
+++ b/automotive/evs/aidl/aidl_api/android.hardware.automotive.evs/current/android/hardware/automotive/evs/EvsEventDesc.aidl
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+///////////////////////////////////////////////////////////////////////////////
+// THIS FILE IS IMMUTABLE. DO NOT EDIT IN ANY CASE. //
+///////////////////////////////////////////////////////////////////////////////
+
+// This file is a snapshot of an AIDL file. Do not edit it manually. There are
+// two cases:
+// 1). this is a frozen version file - do not edit this in any case.
+// 2). this is a 'current' file. If you make a backwards compatible change to
+// the interface (from the latest frozen version), the build system will
+// prompt you to update this file with `m <name>-update-api`.
+//
+// You must not make a backward incompatible change to any AIDL file built
+// with the aidl_interface module type with versions property set. The module
+// type is used to build AIDL files in a way that they can be used across
+// independently updatable components of the system. If a device is shipped
+// with such a backward incompatible change, it has a high risk of breaking
+// later when a module using the interface is updated, e.g., Mainline modules.
+
+package android.hardware.automotive.evs;
+@VintfStability
+parcelable EvsEventDesc {
+ android.hardware.automotive.evs.EvsEventType aType;
+ @utf8InCpp String deviceId;
+ int[] payload;
+}
diff --git a/automotive/evs/aidl/aidl_api/android.hardware.automotive.evs/current/android/hardware/automotive/evs/EvsEventType.aidl b/automotive/evs/aidl/aidl_api/android.hardware.automotive.evs/current/android/hardware/automotive/evs/EvsEventType.aidl
new file mode 100644
index 0000000..052a6b3
--- /dev/null
+++ b/automotive/evs/aidl/aidl_api/android.hardware.automotive.evs/current/android/hardware/automotive/evs/EvsEventType.aidl
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+///////////////////////////////////////////////////////////////////////////////
+// THIS FILE IS IMMUTABLE. DO NOT EDIT IN ANY CASE. //
+///////////////////////////////////////////////////////////////////////////////
+
+// This file is a snapshot of an AIDL file. Do not edit it manually. There are
+// two cases:
+// 1). this is a frozen version file - do not edit this in any case.
+// 2). this is a 'current' file. If you make a backwards compatible change to
+// the interface (from the latest frozen version), the build system will
+// prompt you to update this file with `m <name>-update-api`.
+//
+// You must not make a backward incompatible change to any AIDL file built
+// with the aidl_interface module type with versions property set. The module
+// type is used to build AIDL files in a way that they can be used across
+// independently updatable components of the system. If a device is shipped
+// with such a backward incompatible change, it has a high risk of breaking
+// later when a module using the interface is updated, e.g., Mainline modules.
+
+package android.hardware.automotive.evs;
+@Backing(type="int") @VintfStability
+enum EvsEventType {
+ STREAM_STARTED = 0,
+ STREAM_STOPPED = 1,
+ FRAME_DROPPED = 2,
+ TIMEOUT = 3,
+ PARAMETER_CHANGED = 4,
+ MASTER_RELEASED = 5,
+ STREAM_ERROR = 6,
+}
diff --git a/automotive/evs/aidl/aidl_api/android.hardware.automotive.evs/current/android/hardware/automotive/evs/EvsResult.aidl b/automotive/evs/aidl/aidl_api/android.hardware.automotive.evs/current/android/hardware/automotive/evs/EvsResult.aidl
new file mode 100644
index 0000000..a0418a9
--- /dev/null
+++ b/automotive/evs/aidl/aidl_api/android.hardware.automotive.evs/current/android/hardware/automotive/evs/EvsResult.aidl
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+///////////////////////////////////////////////////////////////////////////////
+// THIS FILE IS IMMUTABLE. DO NOT EDIT IN ANY CASE. //
+///////////////////////////////////////////////////////////////////////////////
+
+// This file is a snapshot of an AIDL file. Do not edit it manually. There are
+// two cases:
+// 1). this is a frozen version file - do not edit this in any case.
+// 2). this is a 'current' file. If you make a backwards compatible change to
+// the interface (from the latest frozen version), the build system will
+// prompt you to update this file with `m <name>-update-api`.
+//
+// You must not make a backward incompatible change to any AIDL file built
+// with the aidl_interface module type with versions property set. The module
+// type is used to build AIDL files in a way that they can be used across
+// independently updatable components of the system. If a device is shipped
+// with such a backward incompatible change, it has a high risk of breaking
+// later when a module using the interface is updated, e.g., Mainline modules.
+
+package android.hardware.automotive.evs;
+@Backing(type="int") @VintfStability
+enum EvsResult {
+ OK = 0,
+ INVALID_ARG = 1,
+ STREAM_ALREADY_RUNNING = 2,
+ BUFFER_NOT_AVAILABLE = 3,
+ OWNERSHIP_LOST = 4,
+ UNDERLYING_SERVICE_ERROR = 5,
+ PERMISSION_DENIED = 6,
+ RESOURCE_NOT_AVAILABLE = 7,
+ RESOURCE_BUSY = 8,
+ NOT_IMPLEMENTED = 9,
+ NOT_SUPPORTED = 10,
+}
diff --git a/automotive/evs/aidl/aidl_api/android.hardware.automotive.evs/current/android/hardware/automotive/evs/IEvsCamera.aidl b/automotive/evs/aidl/aidl_api/android.hardware.automotive.evs/current/android/hardware/automotive/evs/IEvsCamera.aidl
new file mode 100644
index 0000000..ce1b97d
--- /dev/null
+++ b/automotive/evs/aidl/aidl_api/android.hardware.automotive.evs/current/android/hardware/automotive/evs/IEvsCamera.aidl
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+///////////////////////////////////////////////////////////////////////////////
+// THIS FILE IS IMMUTABLE. DO NOT EDIT IN ANY CASE. //
+///////////////////////////////////////////////////////////////////////////////
+
+// This file is a snapshot of an AIDL file. Do not edit it manually. There are
+// two cases:
+// 1). this is a frozen version file - do not edit this in any case.
+// 2). this is a 'current' file. If you make a backwards compatible change to
+// the interface (from the latest frozen version), the build system will
+// prompt you to update this file with `m <name>-update-api`.
+//
+// You must not make a backward incompatible change to any AIDL file built
+// with the aidl_interface module type with versions property set. The module
+// type is used to build AIDL files in a way that they can be used across
+// independently updatable components of the system. If a device is shipped
+// with such a backward incompatible change, it has a high risk of breaking
+// later when a module using the interface is updated, e.g., Mainline modules.
+
+package android.hardware.automotive.evs;
+@VintfStability
+interface IEvsCamera {
+ void doneWithFrame(in android.hardware.automotive.evs.BufferDesc[] buffer);
+ void forcePrimaryClient(in android.hardware.automotive.evs.IEvsDisplay display);
+ android.hardware.automotive.evs.CameraDesc getCameraInfo();
+ byte[] getExtendedInfo(in int opaqueIdentifier);
+ int[] getIntParameter(in android.hardware.automotive.evs.CameraParam id);
+ android.hardware.automotive.evs.ParameterRange getIntParameterRange(in android.hardware.automotive.evs.CameraParam id);
+ android.hardware.automotive.evs.CameraParam[] getParameterList();
+ android.hardware.automotive.evs.CameraDesc getPhysicalCameraInfo(in String deviceId);
+ int importExternalBuffers(in android.hardware.automotive.evs.BufferDesc[] buffers);
+ void pauseVideoStream();
+ void resumeVideoStream();
+ void setExtendedInfo(in int opaqueIdentifier, in byte[] opaqueValue);
+ int[] setIntParameter(in android.hardware.automotive.evs.CameraParam id, in int value);
+ void setPrimaryClient();
+ void setMaxFramesInFlight(in int bufferCount);
+ void startVideoStream(in android.hardware.automotive.evs.IEvsCameraStream receiver);
+ void stopVideoStream();
+ void unsetPrimaryClient();
+}
diff --git a/automotive/evs/aidl/aidl_api/android.hardware.automotive.evs/current/android/hardware/automotive/evs/IEvsCameraStream.aidl b/automotive/evs/aidl/aidl_api/android.hardware.automotive.evs/current/android/hardware/automotive/evs/IEvsCameraStream.aidl
new file mode 100644
index 0000000..6e2e64a
--- /dev/null
+++ b/automotive/evs/aidl/aidl_api/android.hardware.automotive.evs/current/android/hardware/automotive/evs/IEvsCameraStream.aidl
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+///////////////////////////////////////////////////////////////////////////////
+// THIS FILE IS IMMUTABLE. DO NOT EDIT IN ANY CASE. //
+///////////////////////////////////////////////////////////////////////////////
+
+// This file is a snapshot of an AIDL file. Do not edit it manually. There are
+// two cases:
+// 1). this is a frozen version file - do not edit this in any case.
+// 2). this is a 'current' file. If you make a backwards compatible change to
+// the interface (from the latest frozen version), the build system will
+// prompt you to update this file with `m <name>-update-api`.
+//
+// You must not make a backward incompatible change to any AIDL file built
+// with the aidl_interface module type with versions property set. The module
+// type is used to build AIDL files in a way that they can be used across
+// independently updatable components of the system. If a device is shipped
+// with such a backward incompatible change, it has a high risk of breaking
+// later when a module using the interface is updated, e.g., Mainline modules.
+
+package android.hardware.automotive.evs;
+@VintfStability
+interface IEvsCameraStream {
+ oneway void deliverFrame(in android.hardware.automotive.evs.BufferDesc[] buffer);
+ oneway void notify(in android.hardware.automotive.evs.EvsEventDesc event);
+}
diff --git a/automotive/evs/aidl/aidl_api/android.hardware.automotive.evs/current/android/hardware/automotive/evs/IEvsDisplay.aidl b/automotive/evs/aidl/aidl_api/android.hardware.automotive.evs/current/android/hardware/automotive/evs/IEvsDisplay.aidl
new file mode 100644
index 0000000..9b538d4
--- /dev/null
+++ b/automotive/evs/aidl/aidl_api/android.hardware.automotive.evs/current/android/hardware/automotive/evs/IEvsDisplay.aidl
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+///////////////////////////////////////////////////////////////////////////////
+// THIS FILE IS IMMUTABLE. DO NOT EDIT IN ANY CASE. //
+///////////////////////////////////////////////////////////////////////////////
+
+// This file is a snapshot of an AIDL file. Do not edit it manually. There are
+// two cases:
+// 1). this is a frozen version file - do not edit this in any case.
+// 2). this is a 'current' file. If you make a backwards compatible change to
+// the interface (from the latest frozen version), the build system will
+// prompt you to update this file with `m <name>-update-api`.
+//
+// You must not make a backward incompatible change to any AIDL file built
+// with the aidl_interface module type with versions property set. The module
+// type is used to build AIDL files in a way that they can be used across
+// independently updatable components of the system. If a device is shipped
+// with such a backward incompatible change, it has a high risk of breaking
+// later when a module using the interface is updated, e.g., Mainline modules.
+
+package android.hardware.automotive.evs;
+@VintfStability
+interface IEvsDisplay {
+ android.hardware.automotive.evs.DisplayDesc getDisplayInfo();
+ android.hardware.automotive.evs.DisplayState getDisplayState();
+ android.hardware.automotive.evs.BufferDesc getTargetBuffer();
+ void returnTargetBufferForDisplay(in android.hardware.automotive.evs.BufferDesc buffer);
+ void setDisplayState(in android.hardware.automotive.evs.DisplayState state);
+}
diff --git a/automotive/evs/aidl/aidl_api/android.hardware.automotive.evs/current/android/hardware/automotive/evs/IEvsEnumerator.aidl b/automotive/evs/aidl/aidl_api/android.hardware.automotive.evs/current/android/hardware/automotive/evs/IEvsEnumerator.aidl
new file mode 100644
index 0000000..a79c68d
--- /dev/null
+++ b/automotive/evs/aidl/aidl_api/android.hardware.automotive.evs/current/android/hardware/automotive/evs/IEvsEnumerator.aidl
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+///////////////////////////////////////////////////////////////////////////////
+// THIS FILE IS IMMUTABLE. DO NOT EDIT IN ANY CASE. //
+///////////////////////////////////////////////////////////////////////////////
+
+// This file is a snapshot of an AIDL file. Do not edit it manually. There are
+// two cases:
+// 1). this is a frozen version file - do not edit this in any case.
+// 2). this is a 'current' file. If you make a backwards compatible change to
+// the interface (from the latest frozen version), the build system will
+// prompt you to update this file with `m <name>-update-api`.
+//
+// You must not make a backward incompatible change to any AIDL file built
+// with the aidl_interface module type with versions property set. The module
+// type is used to build AIDL files in a way that they can be used across
+// independently updatable components of the system. If a device is shipped
+// with such a backward incompatible change, it has a high risk of breaking
+// later when a module using the interface is updated, e.g., Mainline modules.
+
+package android.hardware.automotive.evs;
+@VintfStability
+interface IEvsEnumerator {
+ void closeCamera(in android.hardware.automotive.evs.IEvsCamera carCamera);
+ void closeDisplay(in android.hardware.automotive.evs.IEvsDisplay display);
+ void closeUltrasonicsArray(in android.hardware.automotive.evs.IEvsUltrasonicsArray evsUltrasonicsArray);
+ android.hardware.automotive.evs.CameraDesc[] getCameraList();
+ byte[] getDisplayIdList();
+ android.hardware.automotive.evs.DisplayState getDisplayState();
+ android.hardware.automotive.evs.Stream[] getStreamList(in android.hardware.automotive.evs.CameraDesc description);
+ android.hardware.automotive.evs.UltrasonicsArrayDesc[] getUltrasonicsArrayList();
+ boolean isHardware();
+ android.hardware.automotive.evs.IEvsCamera openCamera(in String cameraId, in android.hardware.automotive.evs.Stream streamCfg);
+ android.hardware.automotive.evs.IEvsDisplay openDisplay(in byte id);
+ android.hardware.automotive.evs.IEvsUltrasonicsArray openUltrasonicsArray(in String ultrasonicsArrayId);
+ void registerStatusCallback(in android.hardware.automotive.evs.IEvsEnumeratorStatusCallback callback);
+}
diff --git a/automotive/evs/aidl/aidl_api/android.hardware.automotive.evs/current/android/hardware/automotive/evs/IEvsEnumeratorStatusCallback.aidl b/automotive/evs/aidl/aidl_api/android.hardware.automotive.evs/current/android/hardware/automotive/evs/IEvsEnumeratorStatusCallback.aidl
new file mode 100644
index 0000000..c39a4e8
--- /dev/null
+++ b/automotive/evs/aidl/aidl_api/android.hardware.automotive.evs/current/android/hardware/automotive/evs/IEvsEnumeratorStatusCallback.aidl
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+///////////////////////////////////////////////////////////////////////////////
+// THIS FILE IS IMMUTABLE. DO NOT EDIT IN ANY CASE. //
+///////////////////////////////////////////////////////////////////////////////
+
+// This file is a snapshot of an AIDL file. Do not edit it manually. There are
+// two cases:
+// 1). this is a frozen version file - do not edit this in any case.
+// 2). this is a 'current' file. If you make a backwards compatible change to
+// the interface (from the latest frozen version), the build system will
+// prompt you to update this file with `m <name>-update-api`.
+//
+// You must not make a backward incompatible change to any AIDL file built
+// with the aidl_interface module type with versions property set. The module
+// type is used to build AIDL files in a way that they can be used across
+// independently updatable components of the system. If a device is shipped
+// with such a backward incompatible change, it has a high risk of breaking
+// later when a module using the interface is updated, e.g., Mainline modules.
+
+package android.hardware.automotive.evs;
+@VintfStability
+interface IEvsEnumeratorStatusCallback {
+ oneway void deviceStatusChanged(in android.hardware.automotive.evs.DeviceStatus[] status);
+}
diff --git a/automotive/evs/aidl/aidl_api/android.hardware.automotive.evs/current/android/hardware/automotive/evs/IEvsUltrasonicsArray.aidl b/automotive/evs/aidl/aidl_api/android.hardware.automotive.evs/current/android/hardware/automotive/evs/IEvsUltrasonicsArray.aidl
new file mode 100644
index 0000000..1183ab3
--- /dev/null
+++ b/automotive/evs/aidl/aidl_api/android.hardware.automotive.evs/current/android/hardware/automotive/evs/IEvsUltrasonicsArray.aidl
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+///////////////////////////////////////////////////////////////////////////////
+// THIS FILE IS IMMUTABLE. DO NOT EDIT IN ANY CASE. //
+///////////////////////////////////////////////////////////////////////////////
+
+// This file is a snapshot of an AIDL file. Do not edit it manually. There are
+// two cases:
+// 1). this is a frozen version file - do not edit this in any case.
+// 2). this is a 'current' file. If you make a backwards compatible change to
+// the interface (from the latest frozen version), the build system will
+// prompt you to update this file with `m <name>-update-api`.
+//
+// You must not make a backward incompatible change to any AIDL file built
+// with the aidl_interface module type with versions property set. The module
+// type is used to build AIDL files in a way that they can be used across
+// independently updatable components of the system. If a device is shipped
+// with such a backward incompatible change, it has a high risk of breaking
+// later when a module using the interface is updated, e.g., Mainline modules.
+
+package android.hardware.automotive.evs;
+@VintfStability
+interface IEvsUltrasonicsArray {
+ void doneWithDataFrame(in android.hardware.automotive.evs.UltrasonicsDataFrameDesc dataFrameDesc);
+ android.hardware.automotive.evs.UltrasonicsArrayDesc getUltrasonicArrayInfo();
+ void setMaxFramesInFlight(in int bufferCount);
+ void startStream(in android.hardware.automotive.evs.IEvsUltrasonicsArrayStream stream);
+ void stopStream();
+}
diff --git a/automotive/evs/aidl/aidl_api/android.hardware.automotive.evs/current/android/hardware/automotive/evs/IEvsUltrasonicsArrayStream.aidl b/automotive/evs/aidl/aidl_api/android.hardware.automotive.evs/current/android/hardware/automotive/evs/IEvsUltrasonicsArrayStream.aidl
new file mode 100644
index 0000000..510b0a4
--- /dev/null
+++ b/automotive/evs/aidl/aidl_api/android.hardware.automotive.evs/current/android/hardware/automotive/evs/IEvsUltrasonicsArrayStream.aidl
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+///////////////////////////////////////////////////////////////////////////////
+// THIS FILE IS IMMUTABLE. DO NOT EDIT IN ANY CASE. //
+///////////////////////////////////////////////////////////////////////////////
+
+// This file is a snapshot of an AIDL file. Do not edit it manually. There are
+// two cases:
+// 1). this is a frozen version file - do not edit this in any case.
+// 2). this is a 'current' file. If you make a backwards compatible change to
+// the interface (from the latest frozen version), the build system will
+// prompt you to update this file with `m <name>-update-api`.
+//
+// You must not make a backward incompatible change to any AIDL file built
+// with the aidl_interface module type with versions property set. The module
+// type is used to build AIDL files in a way that they can be used across
+// independently updatable components of the system. If a device is shipped
+// with such a backward incompatible change, it has a high risk of breaking
+// later when a module using the interface is updated, e.g., Mainline modules.
+
+package android.hardware.automotive.evs;
+@VintfStability
+interface IEvsUltrasonicsArrayStream {
+ oneway void deliverDataFrame(in android.hardware.automotive.evs.UltrasonicsDataFrameDesc dataFrameDesc);
+ oneway void notify(in android.hardware.automotive.evs.EvsEventDesc event);
+}
diff --git a/automotive/evs/aidl/aidl_api/android.hardware.automotive.evs/current/android/hardware/automotive/evs/ParameterRange.aidl b/automotive/evs/aidl/aidl_api/android.hardware.automotive.evs/current/android/hardware/automotive/evs/ParameterRange.aidl
new file mode 100644
index 0000000..44e9b59
--- /dev/null
+++ b/automotive/evs/aidl/aidl_api/android.hardware.automotive.evs/current/android/hardware/automotive/evs/ParameterRange.aidl
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+///////////////////////////////////////////////////////////////////////////////
+// THIS FILE IS IMMUTABLE. DO NOT EDIT IN ANY CASE. //
+///////////////////////////////////////////////////////////////////////////////
+
+// This file is a snapshot of an AIDL file. Do not edit it manually. There are
+// two cases:
+// 1). this is a frozen version file - do not edit this in any case.
+// 2). this is a 'current' file. If you make a backwards compatible change to
+// the interface (from the latest frozen version), the build system will
+// prompt you to update this file with `m <name>-update-api`.
+//
+// You must not make a backward incompatible change to any AIDL file built
+// with the aidl_interface module type with versions property set. The module
+// type is used to build AIDL files in a way that they can be used across
+// independently updatable components of the system. If a device is shipped
+// with such a backward incompatible change, it has a high risk of breaking
+// later when a module using the interface is updated, e.g., Mainline modules.
+
+package android.hardware.automotive.evs;
+@VintfStability
+parcelable ParameterRange {
+ int min;
+ int max;
+ int step;
+}
diff --git a/automotive/evs/aidl/aidl_api/android.hardware.automotive.evs/current/android/hardware/automotive/evs/Rotation.aidl b/automotive/evs/aidl/aidl_api/android.hardware.automotive.evs/current/android/hardware/automotive/evs/Rotation.aidl
new file mode 100644
index 0000000..91971fc
--- /dev/null
+++ b/automotive/evs/aidl/aidl_api/android.hardware.automotive.evs/current/android/hardware/automotive/evs/Rotation.aidl
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+///////////////////////////////////////////////////////////////////////////////
+// THIS FILE IS IMMUTABLE. DO NOT EDIT IN ANY CASE. //
+///////////////////////////////////////////////////////////////////////////////
+
+// This file is a snapshot of an AIDL file. Do not edit it manually. There are
+// two cases:
+// 1). this is a frozen version file - do not edit this in any case.
+// 2). this is a 'current' file. If you make a backwards compatible change to
+// the interface (from the latest frozen version), the build system will
+// prompt you to update this file with `m <name>-update-api`.
+//
+// You must not make a backward incompatible change to any AIDL file built
+// with the aidl_interface module type with versions property set. The module
+// type is used to build AIDL files in a way that they can be used across
+// independently updatable components of the system. If a device is shipped
+// with such a backward incompatible change, it has a high risk of breaking
+// later when a module using the interface is updated, e.g., Mainline modules.
+
+package android.hardware.automotive.evs;
+@Backing(type="int") @VintfStability
+enum Rotation {
+ ROTATION_0 = 0,
+ ROTATION_90 = 1,
+ ROTATION_180 = 2,
+ ROTATION_270 = 3,
+}
diff --git a/automotive/evs/aidl/aidl_api/android.hardware.automotive.evs/current/android/hardware/automotive/evs/RotationQuaternion.aidl b/automotive/evs/aidl/aidl_api/android.hardware.automotive.evs/current/android/hardware/automotive/evs/RotationQuaternion.aidl
new file mode 100644
index 0000000..d9c8b6e
--- /dev/null
+++ b/automotive/evs/aidl/aidl_api/android.hardware.automotive.evs/current/android/hardware/automotive/evs/RotationQuaternion.aidl
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+///////////////////////////////////////////////////////////////////////////////
+// THIS FILE IS IMMUTABLE. DO NOT EDIT IN ANY CASE. //
+///////////////////////////////////////////////////////////////////////////////
+
+// This file is a snapshot of an AIDL file. Do not edit it manually. There are
+// two cases:
+// 1). this is a frozen version file - do not edit this in any case.
+// 2). this is a 'current' file. If you make a backwards compatible change to
+// the interface (from the latest frozen version), the build system will
+// prompt you to update this file with `m <name>-update-api`.
+//
+// You must not make a backward incompatible change to any AIDL file built
+// with the aidl_interface module type with versions property set. The module
+// type is used to build AIDL files in a way that they can be used across
+// independently updatable components of the system. If a device is shipped
+// with such a backward incompatible change, it has a high risk of breaking
+// later when a module using the interface is updated, e.g., Mainline modules.
+
+package android.hardware.automotive.evs;
+@VintfStability
+parcelable RotationQuaternion {
+ float x;
+ float y;
+ float z;
+ float w;
+}
diff --git a/automotive/evs/aidl/aidl_api/android.hardware.automotive.evs/current/android/hardware/automotive/evs/SensorPose.aidl b/automotive/evs/aidl/aidl_api/android.hardware.automotive.evs/current/android/hardware/automotive/evs/SensorPose.aidl
new file mode 100644
index 0000000..4ead9ea
--- /dev/null
+++ b/automotive/evs/aidl/aidl_api/android.hardware.automotive.evs/current/android/hardware/automotive/evs/SensorPose.aidl
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+///////////////////////////////////////////////////////////////////////////////
+// THIS FILE IS IMMUTABLE. DO NOT EDIT IN ANY CASE. //
+///////////////////////////////////////////////////////////////////////////////
+
+// This file is a snapshot of an AIDL file. Do not edit it manually. There are
+// two cases:
+// 1). this is a frozen version file - do not edit this in any case.
+// 2). this is a 'current' file. If you make a backwards compatible change to
+// the interface (from the latest frozen version), the build system will
+// prompt you to update this file with `m <name>-update-api`.
+//
+// You must not make a backward incompatible change to any AIDL file built
+// with the aidl_interface module type with versions property set. The module
+// type is used to build AIDL files in a way that they can be used across
+// independently updatable components of the system. If a device is shipped
+// with such a backward incompatible change, it has a high risk of breaking
+// later when a module using the interface is updated, e.g., Mainline modules.
+
+package android.hardware.automotive.evs;
+@VintfStability
+parcelable SensorPose {
+ android.hardware.automotive.evs.RotationQuaternion rotation;
+ android.hardware.automotive.evs.Translation translation;
+}
diff --git a/automotive/evs/aidl/aidl_api/android.hardware.automotive.evs/current/android/hardware/automotive/evs/Stream.aidl b/automotive/evs/aidl/aidl_api/android.hardware.automotive.evs/current/android/hardware/automotive/evs/Stream.aidl
new file mode 100644
index 0000000..a780412
--- /dev/null
+++ b/automotive/evs/aidl/aidl_api/android.hardware.automotive.evs/current/android/hardware/automotive/evs/Stream.aidl
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+///////////////////////////////////////////////////////////////////////////////
+// THIS FILE IS IMMUTABLE. DO NOT EDIT IN ANY CASE. //
+///////////////////////////////////////////////////////////////////////////////
+
+// This file is a snapshot of an AIDL file. Do not edit it manually. There are
+// two cases:
+// 1). this is a frozen version file - do not edit this in any case.
+// 2). this is a 'current' file. If you make a backwards compatible change to
+// the interface (from the latest frozen version), the build system will
+// prompt you to update this file with `m <name>-update-api`.
+//
+// You must not make a backward incompatible change to any AIDL file built
+// with the aidl_interface module type with versions property set. The module
+// type is used to build AIDL files in a way that they can be used across
+// independently updatable components of the system. If a device is shipped
+// with such a backward incompatible change, it has a high risk of breaking
+// later when a module using the interface is updated, e.g., Mainline modules.
+
+package android.hardware.automotive.evs;
+@VintfStability
+parcelable Stream {
+ int id;
+ android.hardware.automotive.evs.StreamType streamType;
+ int width;
+ int height;
+ android.hardware.graphics.common.PixelFormat format;
+ android.hardware.graphics.common.BufferUsage usage;
+ android.hardware.automotive.evs.Rotation rotation;
+}
diff --git a/automotive/evs/aidl/aidl_api/android.hardware.automotive.evs/current/android/hardware/automotive/evs/StreamType.aidl b/automotive/evs/aidl/aidl_api/android.hardware.automotive.evs/current/android/hardware/automotive/evs/StreamType.aidl
new file mode 100644
index 0000000..9819c89
--- /dev/null
+++ b/automotive/evs/aidl/aidl_api/android.hardware.automotive.evs/current/android/hardware/automotive/evs/StreamType.aidl
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+///////////////////////////////////////////////////////////////////////////////
+// THIS FILE IS IMMUTABLE. DO NOT EDIT IN ANY CASE. //
+///////////////////////////////////////////////////////////////////////////////
+
+// This file is a snapshot of an AIDL file. Do not edit it manually. There are
+// two cases:
+// 1). this is a frozen version file - do not edit this in any case.
+// 2). this is a 'current' file. If you make a backwards compatible change to
+// the interface (from the latest frozen version), the build system will
+// prompt you to update this file with `m <name>-update-api`.
+//
+// You must not make a backward incompatible change to any AIDL file built
+// with the aidl_interface module type with versions property set. The module
+// type is used to build AIDL files in a way that they can be used across
+// independently updatable components of the system. If a device is shipped
+// with such a backward incompatible change, it has a high risk of breaking
+// later when a module using the interface is updated, e.g., Mainline modules.
+
+package android.hardware.automotive.evs;
+@Backing(type="int") @VintfStability
+enum StreamType {
+ OUTPUT = 0,
+ INPUT = 1,
+}
diff --git a/automotive/evs/aidl/aidl_api/android.hardware.automotive.evs/current/android/hardware/automotive/evs/Translation.aidl b/automotive/evs/aidl/aidl_api/android.hardware.automotive.evs/current/android/hardware/automotive/evs/Translation.aidl
new file mode 100644
index 0000000..488d80f
--- /dev/null
+++ b/automotive/evs/aidl/aidl_api/android.hardware.automotive.evs/current/android/hardware/automotive/evs/Translation.aidl
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+///////////////////////////////////////////////////////////////////////////////
+// THIS FILE IS IMMUTABLE. DO NOT EDIT IN ANY CASE. //
+///////////////////////////////////////////////////////////////////////////////
+
+// This file is a snapshot of an AIDL file. Do not edit it manually. There are
+// two cases:
+// 1). this is a frozen version file - do not edit this in any case.
+// 2). this is a 'current' file. If you make a backwards compatible change to
+// the interface (from the latest frozen version), the build system will
+// prompt you to update this file with `m <name>-update-api`.
+//
+// You must not make a backward incompatible change to any AIDL file built
+// with the aidl_interface module type with versions property set. The module
+// type is used to build AIDL files in a way that they can be used across
+// independently updatable components of the system. If a device is shipped
+// with such a backward incompatible change, it has a high risk of breaking
+// later when a module using the interface is updated, e.g., Mainline modules.
+
+package android.hardware.automotive.evs;
+@VintfStability
+parcelable Translation {
+ float x;
+ float y;
+ float z;
+}
diff --git a/automotive/evs/aidl/aidl_api/android.hardware.automotive.evs/current/android/hardware/automotive/evs/UltrasonicSensor.aidl b/automotive/evs/aidl/aidl_api/android.hardware.automotive.evs/current/android/hardware/automotive/evs/UltrasonicSensor.aidl
new file mode 100644
index 0000000..23f81f8
--- /dev/null
+++ b/automotive/evs/aidl/aidl_api/android.hardware.automotive.evs/current/android/hardware/automotive/evs/UltrasonicSensor.aidl
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+///////////////////////////////////////////////////////////////////////////////
+// THIS FILE IS IMMUTABLE. DO NOT EDIT IN ANY CASE. //
+///////////////////////////////////////////////////////////////////////////////
+
+// This file is a snapshot of an AIDL file. Do not edit it manually. There are
+// two cases:
+// 1). this is a frozen version file - do not edit this in any case.
+// 2). this is a 'current' file. If you make a backwards compatible change to
+// the interface (from the latest frozen version), the build system will
+// prompt you to update this file with `m <name>-update-api`.
+//
+// You must not make a backward incompatible change to any AIDL file built
+// with the aidl_interface module type with versions property set. The module
+// type is used to build AIDL files in a way that they can be used across
+// independently updatable components of the system. If a device is shipped
+// with such a backward incompatible change, it has a high risk of breaking
+// later when a module using the interface is updated, e.g., Mainline modules.
+
+package android.hardware.automotive.evs;
+@VintfStability
+parcelable UltrasonicSensor {
+ android.hardware.automotive.evs.SensorPose pose;
+ float maxRangeMm;
+ float angleOfMeasurement;
+}
diff --git a/automotive/evs/aidl/aidl_api/android.hardware.automotive.evs/current/android/hardware/automotive/evs/UltrasonicsArrayDesc.aidl b/automotive/evs/aidl/aidl_api/android.hardware.automotive.evs/current/android/hardware/automotive/evs/UltrasonicsArrayDesc.aidl
new file mode 100644
index 0000000..4a98875
--- /dev/null
+++ b/automotive/evs/aidl/aidl_api/android.hardware.automotive.evs/current/android/hardware/automotive/evs/UltrasonicsArrayDesc.aidl
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+///////////////////////////////////////////////////////////////////////////////
+// THIS FILE IS IMMUTABLE. DO NOT EDIT IN ANY CASE. //
+///////////////////////////////////////////////////////////////////////////////
+
+// This file is a snapshot of an AIDL file. Do not edit it manually. There are
+// two cases:
+// 1). this is a frozen version file - do not edit this in any case.
+// 2). this is a 'current' file. If you make a backwards compatible change to
+// the interface (from the latest frozen version), the build system will
+// prompt you to update this file with `m <name>-update-api`.
+//
+// You must not make a backward incompatible change to any AIDL file built
+// with the aidl_interface module type with versions property set. The module
+// type is used to build AIDL files in a way that they can be used across
+// independently updatable components of the system. If a device is shipped
+// with such a backward incompatible change, it has a high risk of breaking
+// later when a module using the interface is updated, e.g., Mainline modules.
+
+package android.hardware.automotive.evs;
+@VintfStability
+parcelable UltrasonicsArrayDesc {
+ @utf8InCpp String ultrasonicsArrayId;
+ int maxReadingsPerSensorCount;
+ int maxReceiversCount;
+ android.hardware.automotive.evs.UltrasonicSensor[] sensors;
+}
diff --git a/automotive/evs/aidl/aidl_api/android.hardware.automotive.evs/current/android/hardware/automotive/evs/UltrasonicsDataFrameDesc.aidl b/automotive/evs/aidl/aidl_api/android.hardware.automotive.evs/current/android/hardware/automotive/evs/UltrasonicsDataFrameDesc.aidl
new file mode 100644
index 0000000..35ec84b
--- /dev/null
+++ b/automotive/evs/aidl/aidl_api/android.hardware.automotive.evs/current/android/hardware/automotive/evs/UltrasonicsDataFrameDesc.aidl
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+///////////////////////////////////////////////////////////////////////////////
+// THIS FILE IS IMMUTABLE. DO NOT EDIT IN ANY CASE. //
+///////////////////////////////////////////////////////////////////////////////
+
+// This file is a snapshot of an AIDL file. Do not edit it manually. There are
+// two cases:
+// 1). this is a frozen version file - do not edit this in any case.
+// 2). this is a 'current' file. If you make a backwards compatible change to
+// the interface (from the latest frozen version), the build system will
+// prompt you to update this file with `m <name>-update-api`.
+//
+// You must not make a backward incompatible change to any AIDL file built
+// with the aidl_interface module type with versions property set. The module
+// type is used to build AIDL files in a way that they can be used across
+// independently updatable components of the system. If a device is shipped
+// with such a backward incompatible change, it has a high risk of breaking
+// later when a module using the interface is updated, e.g., Mainline modules.
+
+package android.hardware.automotive.evs;
+@VintfStability
+parcelable UltrasonicsDataFrameDesc {
+ long timestampNs;
+ int id;
+ byte[] transmittersIdList;
+ byte[] receiversIdList;
+ int[] receiversReadingsCountList;
+ android.hardware.common.Ashmem waveformsData;
+}
diff --git a/automotive/evs/aidl/android/hardware/automotive/evs/BufferDesc.aidl b/automotive/evs/aidl/android/hardware/automotive/evs/BufferDesc.aidl
new file mode 100644
index 0000000..0604abe
--- /dev/null
+++ b/automotive/evs/aidl/android/hardware/automotive/evs/BufferDesc.aidl
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.automotive.evs;
+
+import android.hardware.graphics.common.HardwareBuffer;
+
+/**
+ * Structure representing an image buffer through our APIs
+ *
+ * In addition to the handle to the graphics memory, we need to retain
+ * the properties of the buffer for easy reference and reconstruction of
+ * an ANativeWindowBuffer object on the remote side of API calls.
+ * (Not least because OpenGL expect an ANativeWindowBuffer* for us as a
+ * texture via eglCreateImageKHR()).
+ */
+@VintfStability
+parcelable BufferDesc {
+ /**
+ * Stable AIDL counter part of AHardwareBuffer. Please see
+ * hardware/interfaces/graphics/common/aidl/android/hardware/graphics/common/HardwareBuffer.aidl
+ * for more details.
+ */
+ HardwareBuffer buffer;
+ /**
+ * The size of a pixel in the units of bytes.
+ */
+ int pixelSizeBytes;
+ /**
+ * Opaque value from driver
+ */
+ int bufferId;
+ /**
+ * Unique identifier of the physical camera device that produces this buffer.
+ */
+ @utf8InCpp
+ String deviceId;
+ /**
+ * Time that this buffer is being filled in the units of microseconds and must be
+ * obtained from android::elapsedRealtimeNanos() or its equivalents.
+ */
+ long timestamp;
+ /**
+ * Frame metadata. This is opaque to EvsManager.
+ */
+ byte[] metadata;
+}
diff --git a/automotive/evs/aidl/android/hardware/automotive/evs/CameraDesc.aidl b/automotive/evs/aidl/android/hardware/automotive/evs/CameraDesc.aidl
new file mode 100644
index 0000000..2f500a7
--- /dev/null
+++ b/automotive/evs/aidl/android/hardware/automotive/evs/CameraDesc.aidl
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.automotive.evs;
+
+/**
+ * Structure describing the basic properties of an EVS camera.
+ *
+ * The HAL is responsible for filling out this structure for each
+ * EVS camera in the system.
+ */
+@VintfStability
+parcelable CameraDesc {
+ /**
+ * Unique identifier for camera devices. This may be a path to detected
+ * camera device; for example, "/dev/video0".
+ */
+ @utf8InCpp
+ String id;
+ /**
+ * Opaque value from driver. Vendor may use this field to store additional
+ * information; for example, sensor and bridge chip id.
+ */
+ int vendorFlags;
+ /**
+ * Store camera metadata such as lens characteristics.
+ */
+ byte[] metadata;
+}
diff --git a/automotive/evs/aidl/android/hardware/automotive/evs/CameraParam.aidl b/automotive/evs/aidl/android/hardware/automotive/evs/CameraParam.aidl
new file mode 100644
index 0000000..15500b2
--- /dev/null
+++ b/automotive/evs/aidl/android/hardware/automotive/evs/CameraParam.aidl
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.automotive.evs;
+
+/**
+ * EVS camera parameter
+ */
+@VintfStability
+@Backing(type="int")
+enum CameraParam {
+ /**
+ * The brightness of image frames
+ */
+ BRIGHTNESS,
+ /**
+ * The contrast of image frames
+ */
+ CONTRAST,
+ /**
+ * Automatic gain/exposure control
+ */
+ AUTOGAIN,
+ /**
+ * Gain control
+ */
+ GAIN,
+ /**
+ * Automatic Whitebalance
+ */
+ AUTO_WHITE_BALANCE,
+ /**
+ * Manual white balance setting as a color temperature in Kelvin.
+ */
+ WHITE_BALANCE_TEMPERATURE,
+ /**
+ * Image sharpness adjustment
+ */
+ SHARPNESS,
+ /**
+ * Auto Exposure Control modes; auto, manual, shutter priority, or
+ * aperture priority.
+ */
+ AUTO_EXPOSURE,
+ /**
+ * Manual exposure time of the camera
+ */
+ ABSOLUTE_EXPOSURE,
+ /**
+ * Sets the focal point of the camera to the specified position. This
+ * parameter may not be effective when auto focus is enabled.
+ */
+ ABSOLUTE_FOCUS,
+ /**
+ * Enables continuous automatic focus adjustments.
+ */
+ AUTO_FOCUS,
+ /**
+ * Specifies the objective lens focal length as an absolute value.
+ */
+ ABSOLUTE_ZOOM,
+}
diff --git a/automotive/evs/aidl/android/hardware/automotive/evs/DeviceStatus.aidl b/automotive/evs/aidl/android/hardware/automotive/evs/DeviceStatus.aidl
new file mode 100644
index 0000000..535ace3
--- /dev/null
+++ b/automotive/evs/aidl/android/hardware/automotive/evs/DeviceStatus.aidl
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.automotive.evs;
+
+import android.hardware.automotive.evs.DeviceStatusType;
+
+/**
+ * The status of the devices, as sent by EVS HAL through the
+ * IEvsEnumeratorCallback::deviceStatusChanged() call.
+ */
+@VintfStability
+parcelable DeviceStatus {
+ /**
+ * The identifier of a device that has transitioned to a new status.
+ */
+ @utf8InCpp
+ String id;
+ /**
+ * A new status of this device
+ */
+ DeviceStatusType status;
+}
diff --git a/automotive/evs/aidl/android/hardware/automotive/evs/DeviceStatusType.aidl b/automotive/evs/aidl/android/hardware/automotive/evs/DeviceStatusType.aidl
new file mode 100644
index 0000000..902b31b
--- /dev/null
+++ b/automotive/evs/aidl/android/hardware/automotive/evs/DeviceStatusType.aidl
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.automotive.evs;
+
+/**
+ * The status of the devices available through the EVS
+ */
+@VintfStability
+@Backing(type="int")
+enum DeviceStatusType {
+ /**
+ * A camera device is available and ready to be used.
+ */
+ CAMERA_AVAILABLE,
+ /**
+ * A camera device is not available; e.g. disconnected from the system.
+ */
+ CAMERA_NOT_AVAILABLE,
+ /**
+ * A display device is available and ready to be used.
+ */
+ DISPLAY_AVAILABLE,
+ /**
+ * A display device is not available; e.g. disconnected from the
+ * system.
+ */
+ DISPLAY_NOT_AVAILABLE,
+}
diff --git a/automotive/evs/aidl/android/hardware/automotive/evs/DisplayDesc.aidl b/automotive/evs/aidl/android/hardware/automotive/evs/DisplayDesc.aidl
new file mode 100644
index 0000000..0b4243b
--- /dev/null
+++ b/automotive/evs/aidl/android/hardware/automotive/evs/DisplayDesc.aidl
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.automotive.evs;
+
+import android.hardware.automotive.evs.Rotation;
+
+/**
+ * Structure describing the basic properties of an EVS display
+ *
+ * The HAL is responsible for filling out this structure to describe
+ * the EVS display. As an implementation detail, this may be a physical
+ * display or a virtual display that is overlaid or mixed with another
+ * presentation device.
+ */
+@VintfStability
+parcelable DisplayDesc {
+ /**
+ * Unique identifier for the display
+ */
+ @utf8InCpp
+ String id;
+ /**
+ * The width of the display
+ */
+ int width;
+ /**
+ * The height of the display
+ */
+ int height;
+ /**
+ * Counterclock-wise orientation of the display
+ */
+ Rotation orientation;
+ /**
+ * Opaque value from driver. Vendor may use this field to store additional
+ * information; for example, sensor and bridge chip id.
+ */
+ int vendorFlags;
+}
diff --git a/automotive/evs/aidl/android/hardware/automotive/evs/DisplayState.aidl b/automotive/evs/aidl/android/hardware/automotive/evs/DisplayState.aidl
new file mode 100644
index 0000000..c242d2f
--- /dev/null
+++ b/automotive/evs/aidl/android/hardware/automotive/evs/DisplayState.aidl
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.automotive.evs;
+
+/**
+ * States for control of the EVS display
+ *
+ * The DisplayInfo structure describes the basic properties of an EVS display. Any EVS
+ * implementation is required to have one. The HAL is responsible for filling out this
+ * structure to describe the EVS display. As an implementation detail, this may be a
+ * physical display or a virtual display that is overlaid or mixed with another
+ * presentation device.
+ */
+@VintfStability
+@Backing(type="int")
+enum DisplayState {
+ /*
+ * Display has not been requested by any application yet
+ */
+ NOT_OPEN = 0,
+ /*
+ * Display is inhibited
+ */
+ NOT_VISIBLE,
+ /*
+ * Will become visible with next frame
+ */
+ VISIBLE_ON_NEXT_FRAME,
+ /*
+ * Display is currently active
+ */
+ VISIBLE,
+ /*
+ * Driver is in an undefined state. Interface should be closed.
+ */
+ DEAD,
+}
diff --git a/automotive/evs/aidl/android/hardware/automotive/evs/EvsEventDesc.aidl b/automotive/evs/aidl/android/hardware/automotive/evs/EvsEventDesc.aidl
new file mode 100644
index 0000000..ebff98f
--- /dev/null
+++ b/automotive/evs/aidl/android/hardware/automotive/evs/EvsEventDesc.aidl
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.automotive.evs;
+
+import android.hardware.automotive.evs.EvsEventType;
+
+/**
+ * Structure that describes informative events occurred during EVS is streaming
+ */
+@VintfStability
+parcelable EvsEventDesc {
+ /**
+ * Type of an informative event
+ */
+ EvsEventType aType;
+ /**
+ * Device identifier
+ */
+ @utf8InCpp
+ String deviceId;
+ /**
+ * Possible additional vendor information that is opaque to the EvsManager
+ */
+ int[] payload;
+}
diff --git a/automotive/evs/aidl/android/hardware/automotive/evs/EvsEventType.aidl b/automotive/evs/aidl/android/hardware/automotive/evs/EvsEventType.aidl
new file mode 100644
index 0000000..3a493af
--- /dev/null
+++ b/automotive/evs/aidl/android/hardware/automotive/evs/EvsEventType.aidl
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.automotive.evs;
+
+/**
+ * Types of informative streaming events
+ */
+@VintfStability
+@Backing(type="int")
+enum EvsEventType {
+ /**
+ * Video stream is started
+ */
+ STREAM_STARTED = 0,
+ /**
+ * Video stream is stopped
+ */
+ STREAM_STOPPED,
+ /**
+ * Video frame is dropped
+ */
+ FRAME_DROPPED,
+ /**
+ * Timeout happens
+ */
+ TIMEOUT,
+ /**
+ * Camera parameter is changed; payload contains a changed parameter ID and
+ * its value
+ */
+ PARAMETER_CHANGED,
+ /**
+ * Master role has become available
+ */
+ MASTER_RELEASED,
+ /**
+ * Any other erroneous streaming events
+ */
+ STREAM_ERROR,
+}
diff --git a/automotive/evs/aidl/android/hardware/automotive/evs/EvsResult.aidl b/automotive/evs/aidl/android/hardware/automotive/evs/EvsResult.aidl
new file mode 100644
index 0000000..c355be3
--- /dev/null
+++ b/automotive/evs/aidl/android/hardware/automotive/evs/EvsResult.aidl
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.automotive.evs;
+
+/**
+ * Error codes used in EVS HAL interface.
+ */
+@VintfStability
+@Backing(type="int")
+enum EvsResult {
+ OK = 0,
+ /**
+ * Given arguments are invalid
+ */
+ INVALID_ARG,
+ /**
+ * Requested stream is already running
+ */
+ STREAM_ALREADY_RUNNING,
+ /**
+ * Buffer is not available; e.g. failed to allocate
+ */
+ BUFFER_NOT_AVAILABLE,
+ /**
+ * Ownership has been expired or stolen by other clients
+ */
+ OWNERSHIP_LOST,
+ /**
+ * A dependent service fails to handle a request
+ */
+ UNDERLYING_SERVICE_ERROR,
+ /**
+ * Permission denied
+ */
+ PERMISSION_DENIED,
+ /**
+ * Either the camera or the display is not available
+ */
+ RESOURCE_NOT_AVAILABLE,
+ /**
+ * Device or resource busy
+ */
+ RESOURCE_BUSY,
+ /**
+ * A method is not implemented yet
+ */
+ NOT_IMPLEMENTED,
+ /**
+ * Requested functionality is not supported
+ */
+ NOT_SUPPORTED,
+}
diff --git a/automotive/evs/aidl/android/hardware/automotive/evs/IEvsCamera.aidl b/automotive/evs/aidl/android/hardware/automotive/evs/IEvsCamera.aidl
new file mode 100644
index 0000000..080dd75
--- /dev/null
+++ b/automotive/evs/aidl/android/hardware/automotive/evs/IEvsCamera.aidl
@@ -0,0 +1,248 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.automotive.evs;
+
+import android.hardware.automotive.evs.BufferDesc;
+import android.hardware.automotive.evs.CameraDesc;
+import android.hardware.automotive.evs.CameraParam;
+import android.hardware.automotive.evs.IEvsCameraStream;
+import android.hardware.automotive.evs.IEvsDisplay;
+import android.hardware.automotive.evs.ParameterRange;
+
+/**
+ * Represents a single camera and is the primary interface for capturing images.
+ */
+@VintfStability
+interface IEvsCamera {
+ /**
+ * Returns frames that were delivered to the IEvsCameraStream.
+ *
+ * When done consuming a frame delivered to the IEvsCameraStream
+ * interface, it must be returned to the IEvsCamera for reuse.
+ * A small, finite number of buffers are available (possibly as small
+ * as one), and if the supply is exhausted, no further frames may be
+ * delivered until a buffer is returned.
+ *
+ * @param in buffer Buffers to be returned.
+ */
+ void doneWithFrame(in BufferDesc[] buffer);
+
+ /**
+ * Sets to be the primary client forcibly.
+ *
+ * The client, which owns the display, has a high priority and can take over
+ * a primary client role from other clients without the display.
+ *
+ * @param in display IEvsDisplay handle. If a given display is in either
+ * NOT_VISIBLE, VISIBLE_ON_NEXT_FRAME, or VISIBLE state, the
+ * calling client is considered as the high priority client
+ * and therefore allowed to take over a primary client role from
+ * existing primary client.
+ * @throws EvsResult::INVALID_ARG if a given display handle is null or invalid states.
+ */
+ void forcePrimaryClient(in IEvsDisplay display);
+
+ /**
+ * Returns the description of this camera.
+ *
+ * @return The description of this camera. This must be the same value as
+ * reported by IEvsEnumerator::getCameraList().
+ */
+ CameraDesc getCameraInfo();
+
+ /**
+ * Request driver specific information from the HAL implementation.
+ *
+ * The values allowed for opaqueIdentifier are driver specific,
+ * but no value passed in may crash the driver.
+ *
+ * @param in opaqueIdentifier An unique identifier of the information to
+ * request.
+ * @return Requested information. Zero-size vector is returned if the driver does
+ * not recognize a given identifier.
+ * @throws EvsResult::INVALID_ARG for any unrecognized opaqueIdentifier.
+ */
+ byte[] getExtendedInfo(in int opaqueIdentifier);
+
+ /**
+ * Retrieves values of given camera parameter. The driver must report
+ * EvsResult::INVALID_ARG if a request parameter is not supported.
+ *
+ * @param in id The identifier of camera parameter, CameraParam enum.
+ * @return Values of requested camera parameter, the same number of values as
+ * backing camera devices.
+ * @throws EvsResult::INVALID_ARG for any unrecognized parameter.
+ * EvsResult::UNDERLYING_SERVICE_ERROR for any other failures.
+ */
+ int[] getIntParameter(in CameraParam id);
+
+ /**
+ * Requests a valid value range of a camera parameter
+ *
+ * @param in id The identifier of camera parameter, CameraParam enum.
+ * @return ParameterRange of a requested CameraParam
+ */
+ ParameterRange getIntParameterRange(in CameraParam id);
+
+ /**
+ * Retrieves a list of parameters this camera supports.
+ *
+ * @return A list of CameraParam that this camera supports.
+ */
+ CameraParam[] getParameterList();
+
+ /**
+ * Returns the description of the physical camera device that backs this
+ * logical camera.
+ *
+ * If a requested device does not either exist or back this logical device,
+ * this method returns a null camera descriptor. And, if this is called on
+ * a physical camera device, this method is the same as getCameraInfo()
+ * method if a given device ID is matched. Otherwise, this will return a
+ * null camera descriptor.
+ *
+ * @param in deviceId Physical camera device identifier string.
+ * @return The description of a member physical camera device.
+ * This must be the same value as reported by IEvsEnumerator::getCameraList().
+ */
+ CameraDesc getPhysicalCameraInfo(in String deviceId);
+
+ /**
+ * Import external buffers to capture frames
+ *
+ * This API must be called with a physical camera device identifier.
+ *
+ * @param in buffers A list of buffers allocated by the caller. EvsCamera
+ * will use these buffers to capture frames, in addition to
+ * other buffers already in its buffer pool.
+ * @return The amount of buffer pool size changes after importing given buffers.
+ */
+ int importExternalBuffers(in BufferDesc[] buffers);
+
+ /**
+ * Requests to pause EVS camera stream events.
+ *
+ * Like stopVideoStream(), events may continue to arrive for some time
+ * after this call returns. Delivered frame buffers must be returned.
+ */
+ void pauseVideoStream();
+
+ /**
+ * Requests to resume EVS camera stream.
+ */
+ void resumeVideoStream();
+
+ /**
+ * Send a driver specific value to the HAL implementation.
+ *
+ * This extension is provided to facilitate car specific
+ * extensions, but no HAL implementation may require this call
+ * in order to function in a default state.
+ * INVALID_ARG is returned if the opaqueValue is not meaningful to
+ * the driver implementation.
+ *
+ * @param in opaqueIdentifier An unique identifier of the information to
+ * program.
+ * in opaqueValue A value to program.
+ * @throws EvsResult::INVALID_ARG if this call fails to set a parameter.
+ */
+ void setExtendedInfo(in int opaqueIdentifier, in byte[] opaqueValue);
+
+ /**
+ * Requests to set a camera parameter.
+ *
+ * Only a request from the primary client will be processed successfully.
+ * When this method is called on a logical camera device, it will be forwarded
+ * to each physical device and, if it fails to program any physical device,
+ * it will return an error code with the same number of effective values as
+ * the number of backing camera devices.
+ *
+ * @param in id The identifier of camera parameter, CameraParam enum.
+ * @param in value A desired parameter value.
+ * @return Programmed parameter values. This may differ from what the client
+ * gives if, for example, the driver does not support a target parameter.
+ * @throws EvsResult::INVALID_ARG if either the request is not made by the primary
+ * client, or a requested parameter is not supported.
+ * EvsResult::UNDERLYING_SERVICE_ERROR if it fails to program a value by any
+ * other reason.
+ */
+ int[] setIntParameter(in CameraParam id, in int value);
+
+ /**
+ * Requests to be the primary client.
+ *
+ * When multiple clients subscribe to a single camera hardware and one of
+ * them adjusts a camera parameter such as the contrast, it may disturb
+ * other clients' operations. Therefore, the client must call this method
+ * to be a primary client. Once it becomes a primary client, it will be able to
+ * change camera parameters until either it dies or explicitly gives up the
+ * role.
+ *
+ * @throws EvsResult::OWNERSHIP_LOST if there is already the primary client.
+ */
+ void setPrimaryClient();
+
+ /**
+ * Specifies the depth of the buffer chain the camera is asked to support.
+ *
+ * Up to this many frames may be held concurrently by the client of IEvsCamera.
+ * If this many frames have been delivered to the receiver without being returned
+ * by doneWithFrame, the stream must skip frames until a buffer is returned for reuse.
+ * It is legal for this call to come at any time, even while streams are already running,
+ * in which case buffers should be added or removed from the chain as appropriate.
+ * If no call is made to this entry point, the IEvsCamera must support at least one
+ * frame by default. More is acceptable.
+ *
+ * @param in bufferCount Number of buffers the client of IEvsCamera may hold concurrently.
+ * @throws EvsResult::BUFFER_NOT_AVAILABLE if the client cannot increase the max frames.
+ * EvsResult::INVALID_ARG if the client cannot decrease the max frames.
+ * EvsResult::OWNERSHIP_LOST if we lost an ownership of a target camera.
+ */
+ void setMaxFramesInFlight(in int bufferCount);
+
+ /**
+ * Request to start EVS camera stream from this camera.
+ *
+ * The IEvsCameraStream must begin receiving calls with various events
+ * including new image frame ready until stopVideoStream() is called.
+ *
+ * @param in receiver IEvsCameraStream implementation.
+ * @throws EvsResult::OWNERSHIP_LOST if we lost an ownership of a target camera.
+ * EvsResult::STREAM_ALREADY_RUNNING if a video stream has been started already.
+ * EvsResult::BUFFER_NOT_AVAILABLE if it fails to secure a minimum number of
+ * buffers to run a video stream.
+ * EvsResult::UNDERLYING_SERVICE_ERROR for all other failures.
+ */
+ void startVideoStream(in IEvsCameraStream receiver);
+
+ /**
+ * Stop the delivery of EVS camera frames.
+ *
+ * Because delivery is asynchronous, frames may continue to arrive for
+ * some time after this call returns. Each must be returned until the
+ * closure of the stream is signaled to the IEvsCameraStream.
+ * This function cannot fail and is simply ignored if the stream isn't running.
+ */
+ void stopVideoStream();
+
+ /**
+ * Retires from the primary client role.
+ *
+ * @throws EvsResult::INVALID_ARG if the caller client is not a primary client.
+ */
+ void unsetPrimaryClient();
+}
diff --git a/automotive/evs/aidl/android/hardware/automotive/evs/IEvsCameraStream.aidl b/automotive/evs/aidl/android/hardware/automotive/evs/IEvsCameraStream.aidl
new file mode 100644
index 0000000..2c2b44c
--- /dev/null
+++ b/automotive/evs/aidl/android/hardware/automotive/evs/IEvsCameraStream.aidl
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.automotive.evs;
+
+import android.hardware.automotive.evs.BufferDesc;
+import android.hardware.automotive.evs.EvsEventDesc;
+import android.hardware.graphics.common.HardwareBuffer;
+
+/**
+ * Implemented on client side to receive asynchronous streaming event deliveries.
+ */
+@VintfStability
+oneway interface IEvsCameraStream {
+ /**
+ * Receives calls from the HAL each time video frames is ready for inspection.
+ * Buffer handles received by this method must be returned via calls to
+ * IEvsCamera::doneWithFrame(). When the video stream is stopped via a call
+ * to IEvsCamera::stopVideoStream(), this callback may continue to happen for
+ * some time as the pipeline drains. Each frame must still be returned.
+ * When the last frame in the stream has been delivered, STREAM_STOPPED
+ * event must be delivered. No further frame deliveries may happen
+ * thereafter.
+ *
+ * A camera device will deliver the same number of frames as number of
+ * backing physical camera devices; it means, a physical camera device
+ * sends always a single frame and a logical camera device sends multiple
+ * frames as many as number of backing physical camera devices.
+ *
+ * @param in buffer Buffer descriptors of delivered image frames.
+ */
+ void deliverFrame(in BufferDesc[] buffer);
+
+ /**
+ * Receives calls from the HAL each time an event happens.
+ *
+ * @param in event EVS event with possible event information.
+ */
+ void notify(in EvsEventDesc event);
+}
diff --git a/automotive/evs/aidl/android/hardware/automotive/evs/IEvsDisplay.aidl b/automotive/evs/aidl/android/hardware/automotive/evs/IEvsDisplay.aidl
new file mode 100644
index 0000000..8d57014
--- /dev/null
+++ b/automotive/evs/aidl/android/hardware/automotive/evs/IEvsDisplay.aidl
@@ -0,0 +1,94 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.automotive.evs;
+
+import android.hardware.automotive.evs.BufferDesc;
+import android.hardware.automotive.evs.DisplayDesc;
+import android.hardware.automotive.evs.DisplayState;
+
+/**
+ * Represents a single display.
+ */
+@VintfStability
+interface IEvsDisplay {
+ /**
+ * Returns the description of this display.
+ *
+ * @return The information of this display including id, current mode, current state,
+ * and additional vendor-specific information.
+ * @throws EvsResult::UNDERLYING_SERVICE_ERROR if it fails to read a display information.
+ */
+ DisplayDesc getDisplayInfo();
+
+ /**
+ * This call requests the current state of the display
+ *
+ * The HAL implementation should report the actual current state, which might
+ * transiently differ from the most recently requested state. Note, however, that
+ * the logic responsible for changing display states should generally live above
+ * the device layer, making it undesirable for the HAL implementation to spontaneously
+ * change display states.
+ *
+ * @return Current DisplayState of this Display.
+ */
+ DisplayState getDisplayState();
+
+ /**
+ * This call returns a handle to a frame buffer associated with the display.
+ *
+ * @return A handle to a frame buffer. The returned buffer may be locked and
+ * written to by software and/or GL. This buffer must be returned via
+ * a call to returnTargetBufferForDisplay() even if the display is no
+ * longer visible.
+ * @throws EvsResult::OWNERSHIP_LOST if a display is in DisplayState::DEAD.
+ * EvsResult::BUFFER_NOT_AVAILABLE if no buffer is available.
+ * EvsResult::UNDERLYING_SERVICE_ERROR for any other failures.
+ */
+ BufferDesc getTargetBuffer();
+
+ /**
+ * This call tells the display that the buffer is ready for display.
+ *
+ * The buffer is no longer valid for use by the client after this call.
+ * There is no maximum time the caller may hold onto the buffer before making this
+ * call. The buffer may be returned at any time and in any DisplayState, but all
+ * buffers are expected to be returned before the IEvsDisplay interface is destroyed.
+ *
+ * @param in buffer A buffer handle to the frame that is ready for display.
+ * @throws EvsResult::INVALID_ARG if a given buffer is unknown or invalid.
+ * EvsResult::OWNERSHIP_LOST if a display is in DisplayState::DEAD.
+ * EvsResult::UNDERLYING_SERVICE_ERROR for any other failures.
+ */
+ void returnTargetBufferForDisplay(in BufferDesc buffer);
+
+ /**
+ * Clients may set the display state to express their desired state.
+ *
+ * The HAL implementation must gracefully accept a request for any state while in
+ * any other state, although the response may be to defer or ignore the request. The display
+ * is defined to start in the NOT_VISIBLE state upon initialization. The client is
+ * then expected to request the VISIBLE_ON_NEXT_FRAME state, and then begin providing
+ * video. When the display is no longer required, the client is expected to request
+ * the NOT_VISIBLE state after passing the last video frame.
+ * Returns INVALID_ARG if the requested state is not a recognized value.
+ *
+ * @param in state Desired new DisplayState.
+ * @throws EvsResult::INVALID_ARG if a given state is invalid.
+ * EvsResult::OWNERSHIP_LOST if a display is in DisplayState::DEAD.
+ */
+ void setDisplayState(in DisplayState state);
+}
diff --git a/automotive/evs/aidl/android/hardware/automotive/evs/IEvsEnumerator.aidl b/automotive/evs/aidl/android/hardware/automotive/evs/IEvsEnumerator.aidl
new file mode 100644
index 0000000..8e380e0
--- /dev/null
+++ b/automotive/evs/aidl/android/hardware/automotive/evs/IEvsEnumerator.aidl
@@ -0,0 +1,166 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.automotive.evs;
+
+import android.hardware.automotive.evs.CameraDesc;
+import android.hardware.automotive.evs.DisplayState;
+import android.hardware.automotive.evs.IEvsCamera;
+import android.hardware.automotive.evs.IEvsDisplay;
+import android.hardware.automotive.evs.IEvsEnumeratorStatusCallback;
+import android.hardware.automotive.evs.IEvsUltrasonicsArray;
+import android.hardware.automotive.evs.Stream;
+import android.hardware.automotive.evs.UltrasonicsArrayDesc;
+
+/**
+ * Provides the mechanism for EVS camera and ultrasonics array discovery
+ */
+@VintfStability
+interface IEvsEnumerator {
+ /**
+ * Return the specified IEvsCamera interface as no longer in use
+ *
+ * When the IEvsCamera object is no longer required, it must be released.
+ * NOTE: Video streaming must be cleanly stopped before making this call.
+ *
+ * @param in carCamera EvsCamera object to be closed.
+ * @throws EvsResult::INVALID_ARG if a given camera object is invalid.
+ */
+ void closeCamera(in IEvsCamera carCamera);
+
+ /**
+ * Return the specified IEvsDisplay interface as no longer in use
+ *
+ * When the IEvsDisplay object is no longer required, it must be released.
+ * NOTE: All buffers must have been returned to the display before making this call.
+ *
+ * @param in display EvsDisplay object to be closed.
+ */
+ void closeDisplay(in IEvsDisplay display);
+
+ /**
+ * Return the specified IEvsUltrasonicsArray interface as no longer in use
+ *
+ * When the IEvsUltrasonicsArray object is no longer required, it must be released.
+ * NOTE: Data streaming must be cleanly stopped before making this call.
+ *
+ * @param in evsUltrasonicsArray EvsUltrasonics array object to be closed.
+ */
+ void closeUltrasonicsArray(in IEvsUltrasonicsArray evsUltrasonicsArray);
+
+ /**
+ * Returns a list of all EVS cameras available to the system
+ *
+ * @return A list of cameras availale for EVS service.
+ * @throws EvsResult::PERMISSION_DENIED if the process is not permitted to enumerate
+ * camera devices.
+ */
+ CameraDesc[] getCameraList();
+
+ /**
+ * Returns a list of all EVS displays available to the system
+ *
+ * @return Identifiers of available displays.
+ */
+ byte[] getDisplayIdList();
+
+ /**
+ * This call requests the current state of the display
+ *
+ * If there is no open display, this returns DisplayState::NOT_OPEN. otherwise, it returns
+ * the actual state of the active display. This call is replicated on the IEvsEnumerator
+ * interface in order to allow secondary clients to monitor the state of the EVS display
+ * without acquiring exclusive ownership of the display.
+ *
+ * @return Current DisplayState of this Display.
+ * @throws EvsResult::OWNERSHIP_LOST if current display is inactive
+ * EvsResult::PERMISSION_DENIED if the process is not permitted to do this operation.
+ */
+ DisplayState getDisplayState();
+
+ /**
+ * Return a list of the stream configurations a target camera device supports
+ *
+ * @param in description A target camera descriptor
+ * @return A list of stream configurations supported by a given camera device
+ */
+ Stream[] getStreamList(in CameraDesc description);
+
+ /**
+ * Returns a list of all ultrasonics array available to the system.
+ * Will return an empty vector if ultrasonics is not supported.
+ *
+ * @return A list of ultrasonics available for EVS service.
+ */
+ UltrasonicsArrayDesc[] getUltrasonicsArrayList();
+
+ /**
+ * Tells whether this is EvsManager or HAL implementation.
+ *
+ * @return False for EvsManager implementations and true for all others.
+ */
+ boolean isHardware();
+
+ /**
+ * Gets the IEvsCamera associated with a cameraId from a CameraDesc
+ *
+ * Given a camera's unique cameraId from CameraDesc, returns the
+ * IEvsCamera interface associated with the specified camera. When
+ * done using the camera, the caller may release it by calling closeCamera().
+ *
+ * @param in cameraId A unique identifier of the camera.
+ * @param in streamCfg A stream configuration the client wants to use.
+ * @return EvsCamera object associated with a given cameraId.
+ * Returned object would be null if a camera device does not support a
+ * given stream configuration or is already configured differently by
+ * another client.
+ * @throws EvsResult::PERMISSION_DENIED if the process is not permitted to use camera
+ * devices.
+ * EveResult::INVALID_ARG if it fails to open a camera with a given id.
+ */
+ IEvsCamera openCamera(in String cameraId, in Stream streamCfg);
+
+ /**
+ * Get exclusive access to IEvsDisplay for the system
+ *
+ * There can be more than one EVS display objects for the system and this function
+ * requests access to the display identified by a given ID. If the target EVS display
+ * is not available or is already in use the old instance shall be closed and give
+ * the new caller exclusive access.
+ * When done using the display, the caller may release it by calling closeDisplay().
+ *
+ * @param in id Target display identifier.
+ * @return EvsDisplay object to be used.
+ * @throws EvsResult::INVALID_ARG if no display with a given id exists
+ */
+ IEvsDisplay openDisplay(in byte id);
+
+ /**
+ * Gets the IEvsUltrasonicsArray associated with a ultrasonicsArrayId from a
+ * UltrasonicsDataDesc
+ *
+ * @param in ultrasonicsArrayId A unique identifier of the ultrasonic array.
+ * @return IEvsUltrasonicsArray object associated with a given ultrasonicsArrayId.
+ */
+ IEvsUltrasonicsArray openUltrasonicsArray(in String ultrasonicsArrayId);
+
+ /**
+ * Registers a callback to listen to devices' status changes
+ *
+ * @param in callback IEvsEnumeratorStatusCallback implementation
+ */
+ void registerStatusCallback(in IEvsEnumeratorStatusCallback callback);
+}
diff --git a/automotive/evs/aidl/android/hardware/automotive/evs/IEvsEnumeratorStatusCallback.aidl b/automotive/evs/aidl/android/hardware/automotive/evs/IEvsEnumeratorStatusCallback.aidl
new file mode 100644
index 0000000..26ccf72
--- /dev/null
+++ b/automotive/evs/aidl/android/hardware/automotive/evs/IEvsEnumeratorStatusCallback.aidl
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.automotive.evs;
+
+import android.hardware.automotive.evs.DeviceStatus;
+
+/**
+ * Implemented on client side to receive asynchronous notifications from
+ * IEvsEnumreator.
+ */
+@VintfStability
+oneway interface IEvsEnumeratorStatusCallback {
+ /**
+ * Receives calls from the HAL each time a status of camera devices is
+ * changed.
+ *
+ * @param in status A list of newly updated device status
+ */
+ void deviceStatusChanged(in DeviceStatus[] status);
+}
diff --git a/automotive/evs/aidl/android/hardware/automotive/evs/IEvsUltrasonicsArray.aidl b/automotive/evs/aidl/android/hardware/automotive/evs/IEvsUltrasonicsArray.aidl
new file mode 100644
index 0000000..40de313
--- /dev/null
+++ b/automotive/evs/aidl/android/hardware/automotive/evs/IEvsUltrasonicsArray.aidl
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.automotive.evs;
+
+import android.hardware.automotive.evs.IEvsUltrasonicsArrayStream;
+import android.hardware.automotive.evs.UltrasonicsArrayDesc;
+import android.hardware.automotive.evs.UltrasonicsDataFrameDesc;
+
+/**
+ * HAL interface for ultrasonics sensor array.
+ */
+@VintfStability
+interface IEvsUltrasonicsArray {
+ /**
+ * Notifies the UltrasonicsDataDesc is consumed that was received from
+ * IEvsUltrasonicsArrayStream
+ *
+ * @param in dataFrameDesc Ultrasonics data descriptor
+ */
+ void doneWithDataFrame(in UltrasonicsDataFrameDesc dataFrameDesc);
+
+ /**
+ * Returns the ultrasonic sensor array information
+ *
+ * @throws The description of this ultrasonic array. This must be the same
+ * value as reported by IEvsEnumerator::getUltrasonicsArrayList().
+ */
+ UltrasonicsArrayDesc getUltrasonicArrayInfo();
+
+ /**
+ * Specifies the depth of the buffer chain the ultrasonic sensors is
+ * asked to support
+ *
+ * Up to this many data frames may be held concurrently by the client of IEvsUltrasonicsArray.
+ * If this many frames have been delivered to the receiver without being returned
+ * by doneWithFrame, the stream must skip frames until a buffer is returned for reuse.
+ * It is legal for this call to come at any time, even while streams are already running,
+ * in which case buffers should be added or removed from the chain as appropriate.
+ * If no call is made to this entry point, the IEvsUltrasonicsArray must support at least one
+ * data frame by default. More is acceptable.
+ *
+ * @param in bufferCount Number of buffers the client of IEvsUltrasonicsArray may hold
+ * concurrently.
+ * @throws EvsResult::INVALID_ARG on invalid bufferCount.
+ */
+ void setMaxFramesInFlight(in int bufferCount);
+
+ /**
+ * Requests to start the stream
+ *
+ * @param in stream Implementation of IEvsUltrasonicsArrayStream.
+ * @throws EvsResult::STREAM_ALREADY_RUNNING if stream is already running
+ */
+ void startStream(in IEvsUltrasonicsArrayStream stream);
+
+ /**
+ * Requests to stop the delivery of the ultrasonic array data frames
+ *
+ * Because delivery is asynchronous, frames may continue to arrive for
+ * some time after this call returns. Each must be returned until the
+ * closure of the stream is signaled to the IEvsCameraStream.
+ * This function cannot fail and is ignored if the stream isn't running.
+ */
+ void stopStream();
+}
diff --git a/automotive/evs/aidl/android/hardware/automotive/evs/IEvsUltrasonicsArrayStream.aidl b/automotive/evs/aidl/android/hardware/automotive/evs/IEvsUltrasonicsArrayStream.aidl
new file mode 100644
index 0000000..bc31a6b
--- /dev/null
+++ b/automotive/evs/aidl/android/hardware/automotive/evs/IEvsUltrasonicsArrayStream.aidl
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.automotive.evs;
+
+import android.hardware.automotive.evs.EvsEventDesc;
+import android.hardware.automotive.evs.UltrasonicsDataFrameDesc;
+
+/**
+ * Implemented on client side to receive asynchronous ultrasonic data
+ * deliveries.
+ */
+@VintfStability
+interface IEvsUltrasonicsArrayStream {
+ /**
+ * Receives calls from the HAL each time a data frame is ready
+ *
+ * @param in dataFrameDesc Ultrasonic array data frame descriptor
+ */
+ oneway void deliverDataFrame(in UltrasonicsDataFrameDesc dataFrameDesc);
+
+ /**
+ * Receives calls from the HAL each time an event happens
+ *
+ * @param in event Event EVS event with possible event information
+ */
+ oneway void notify(in EvsEventDesc event);
+}
diff --git a/automotive/evs/aidl/android/hardware/automotive/evs/ParameterRange.aidl b/automotive/evs/aidl/android/hardware/automotive/evs/ParameterRange.aidl
new file mode 100644
index 0000000..b08fcbd
--- /dev/null
+++ b/automotive/evs/aidl/android/hardware/automotive/evs/ParameterRange.aidl
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.automotive.evs;
+
+/**
+ * Represent a valid range of CameraParam
+ */
+@VintfStability
+parcelable ParameterRange {
+ /**
+ * Lower bound of a valid value range
+ */
+ int min;
+ /**
+ * Upper bound of a valid value range
+ */
+ int max;
+ /**
+ * A value of unit increment
+ */
+ int step;
+}
diff --git a/automotive/evs/aidl/android/hardware/automotive/evs/Rotation.aidl b/automotive/evs/aidl/android/hardware/automotive/evs/Rotation.aidl
new file mode 100644
index 0000000..dede39e
--- /dev/null
+++ b/automotive/evs/aidl/android/hardware/automotive/evs/Rotation.aidl
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.automotive.evs;
+
+/**
+ * Rotation:
+ *
+ * The required counterclockwise rotation of EVS camera stream and display.
+ */
+@VintfStability
+@Backing(type="int")
+enum Rotation {
+ /** No rotation */
+ ROTATION_0 = 0,
+ /** Rotate by 90 degree counterclockwise */
+ ROTATION_90 = 1,
+ /** Rotate by 180 degree counterclockwise */
+ ROTATION_180 = 2,
+ /** Rotate by 270 degree counterclockwise */
+ ROTATION_270 = 3
+
+}
diff --git a/automotive/evs/aidl/android/hardware/automotive/evs/RotationQuaternion.aidl b/automotive/evs/aidl/android/hardware/automotive/evs/RotationQuaternion.aidl
new file mode 100644
index 0000000..b80343b
--- /dev/null
+++ b/automotive/evs/aidl/android/hardware/automotive/evs/RotationQuaternion.aidl
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.automotive.evs;
+
+/**
+ * Structure for rotation expressed as quaternions.
+ * Convention used: Unit quaternion with hamilton convention.
+ */
+@VintfStability
+parcelable RotationQuaternion {
+ float x;
+ float y;
+ float z;
+ float w;
+}
diff --git a/automotive/evs/aidl/android/hardware/automotive/evs/SensorPose.aidl b/automotive/evs/aidl/android/hardware/automotive/evs/SensorPose.aidl
new file mode 100644
index 0000000..26c3339
--- /dev/null
+++ b/automotive/evs/aidl/android/hardware/automotive/evs/SensorPose.aidl
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.automotive.evs;
+
+import android.hardware.automotive.evs.RotationQuaternion;
+import android.hardware.automotive.evs.Translation;
+
+/**
+ * Provides the orientation and location of a car sensor relative to the android automotive
+ * coordinate system:
+ * https://source.android.com/devices/sensors/sensor-types#auto_axes
+ * The sensor pose defines the transformation to be applied to the android automotive axes to
+ * obtain the sensor local axes.
+ * The pose consists of rotation, (specified as a quaternions) and translation
+ * (vector with x, y, z).
+ * This rotation and translation applied to the sensor data in the sensor's local coordinate
+ * system transform the data to the automotive coordinate system.
+ * i.e. loc = ( Rot * Psensor ) + Trans
+ * Here loc is a point in automotive coordinate system and Psensor is a point in the sensor's
+ * coordinate system.
+ * Example:
+ * For a sensor on the front bumper and on the left corner of the car with its X axis pointing to
+ * the front, the sensor is located at (-2, 4, 0) meters w.r.t android automotive axes and the
+ * sensor local axes has a rotation of 90 degrees counter-clockwise w.r.t android automotive axes
+ * when viewing the car from top on the +Z axis side:
+ *
+ * ↑X sensor
+ * Y←∘______
+ * | | front
+ * | car |
+ * | ↑Y |
+ * | ∘→X | rear
+ * |______|
+ *
+ * For this example the rotation and translation will be:
+ * Rotation = + 90 degrees around Z axis = (0.7071, 0, 0, 0.7071) as a unit quaternion.
+ * Translation = (-2, 4, 0) in meters = (-2000, 4000, 0) in milli-meters.
+ * Note: Every sensor type must specify its own pose.
+ */
+@VintfStability
+parcelable SensorPose {
+ /**
+ * Rotation part of the sensor pose, expressed as a unit quaternion.
+ */
+ RotationQuaternion rotation;
+ /**
+ * Translation part of the sensor pose, in (x, y, z) format with milli-meter units.
+ */
+ Translation translation;
+}
diff --git a/automotive/evs/aidl/android/hardware/automotive/evs/Stream.aidl b/automotive/evs/aidl/android/hardware/automotive/evs/Stream.aidl
new file mode 100644
index 0000000..ae5c7f0
--- /dev/null
+++ b/automotive/evs/aidl/android/hardware/automotive/evs/Stream.aidl
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.automotive.evs;
+
+import android.hardware.automotive.evs.Rotation;
+import android.hardware.automotive.evs.StreamType;
+import android.hardware.graphics.common.BufferUsage;
+import android.hardware.graphics.common.PixelFormat;
+
+/**
+ * Stream:
+ *
+ * Structure that describes a EVS Camera stream
+ */
+@VintfStability
+parcelable Stream {
+ /**
+ * Stream ID - a non-negative integer identifier for a stream.
+ *
+ * The identical stream ID must reference the same stream, with the same
+ * width/height/format, across consecutive calls to configureStreams.
+ *
+ * If previously-used stream ID is not used in a new call to
+ * configureStreams, then that stream is no longer active. Such a stream ID
+ * may be reused in a future configureStreams with a new
+ * width/height/format.
+ *
+ */
+ int id;
+ /**
+ * The type of the stream (input vs output, etc).
+ */
+ StreamType streamType;
+ /**
+ * The width in pixels of the buffers in this stream.
+ */
+ int width;
+ /**
+ * The height in pixels of the buffers in this stream.
+ */
+ int height;
+ /**
+ * The frame rate of this stream in frames-per-second
+ /
+ int framerate;
+ /**
+ * The pixel format form the buffers in this stream.
+ */
+ PixelFormat format;
+ /**
+ * The gralloc usage flags for this stream, as needed by the consumer of
+ * the stream.
+ */
+ BufferUsage usage;
+ /**
+ * The required output rotation of the stream.
+ *
+ * This must be inspected by HAL along with stream with and height.
+ */
+ Rotation rotation;
+}
diff --git a/automotive/evs/aidl/android/hardware/automotive/evs/StreamType.aidl b/automotive/evs/aidl/android/hardware/automotive/evs/StreamType.aidl
new file mode 100644
index 0000000..c028a5c
--- /dev/null
+++ b/automotive/evs/aidl/android/hardware/automotive/evs/StreamType.aidl
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.automotive.evs;
+
+/**
+ * StreamType:
+ *
+ * The type of the camera stream, which defines whether the EVS client device is
+ * the producer or the consumer for that stream, and how the buffers of the
+ * stream relate to the other streams.
+ */
+@VintfStability
+@Backing(type="int")
+enum StreamType {
+ /**
+ * This stream is an output stream; the EVS HAL device must fill buffers
+ * from this stream with newly captured or reprocessed image data.
+ */
+ OUTPUT = 0,
+
+ /**
+ * This stream is an input stream; the EVS HAL device must read buffers
+ * from this stream and send them through the camera processing pipeline,
+ * as if the buffer was a newly captured image from the imager.
+ */
+ INPUT = 1
+}
diff --git a/automotive/evs/aidl/android/hardware/automotive/evs/Translation.aidl b/automotive/evs/aidl/android/hardware/automotive/evs/Translation.aidl
new file mode 100644
index 0000000..14b14db
--- /dev/null
+++ b/automotive/evs/aidl/android/hardware/automotive/evs/Translation.aidl
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.automotive.evs;
+
+/**
+ * Structure for translation with x, y and z units.
+ */
+@VintfStability
+parcelable Translation {
+ float x;
+ float y;
+ float z;
+}
diff --git a/automotive/evs/aidl/android/hardware/automotive/evs/UltrasonicSensor.aidl b/automotive/evs/aidl/android/hardware/automotive/evs/UltrasonicSensor.aidl
new file mode 100644
index 0000000..712411b
--- /dev/null
+++ b/automotive/evs/aidl/android/hardware/automotive/evs/UltrasonicSensor.aidl
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.automotive.evs;
+
+import android.hardware.automotive.evs.SensorPose;
+
+/**
+ * Structure that contains all information of an ultrasonic sensor.
+ */
+@VintfStability
+parcelable UltrasonicSensor {
+ /**
+ * Pose provides the orientation and location of the ultrasonic sensor within the car.
+ * The +Y axis points along the center of the beam spread the X axis to the right and the Z
+ * axis in the up direction.
+ */
+ SensorPose pose;
+ /**
+ * Maximum range of the sensor in milli-metres.
+ */
+ float maxRangeMm;
+ /**
+ * Half-angle of the angle of measurement of the sensor, relative to the
+ * sensor’s x axis, in radians.
+ */
+ float angleOfMeasurement;
+}
diff --git a/automotive/evs/aidl/android/hardware/automotive/evs/UltrasonicsArrayDesc.aidl b/automotive/evs/aidl/android/hardware/automotive/evs/UltrasonicsArrayDesc.aidl
new file mode 100644
index 0000000..d4f0663
--- /dev/null
+++ b/automotive/evs/aidl/android/hardware/automotive/evs/UltrasonicsArrayDesc.aidl
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.automotive.evs;
+
+import android.hardware.automotive.evs.UltrasonicSensor;
+
+/**
+ * Structure identifies and describes an ultrasonics array in the car.
+ *
+ * A ultrasonics array represents a group of ultrasonic sensors within the
+ * car. These may be sensors that are physically connected to the same hardware
+ * control unit or represent a logical group of sensors like front and back.
+ * The HAL is responsible for filling out this structure for each Ultrasonics
+ * Array.
+ */
+@VintfStability
+parcelable UltrasonicsArrayDesc {
+ /**
+ * Unique identifier for the ultrasonic array. This may be a path or name of the
+ * physical control device or a string identifying a logical group of sensors forming an array
+ * such as "front_array" and "back_array".
+ */
+ @utf8InCpp
+ String ultrasonicsArrayId;
+ /**
+ * Maximum number of readings (points on waveform) provided per sensor in
+ * each data frame. Used by client to pre-allocate required memory buffer for
+ * incoming data.
+ */
+ int maxReadingsPerSensorCount;
+ /**
+ * Maximum number of receiver sensors in a data frame. Must be between 1
+ * and sensorCount. Used by client to pre-allocate required memory buffer for
+ * incoming data.
+ */
+ int maxReceiversCount;
+ /**
+ * The order of sensors specified must be in clockwise order around the car, starting
+ * from front left-most sensor.
+ */
+ UltrasonicSensor[] sensors;
+}
diff --git a/automotive/evs/aidl/android/hardware/automotive/evs/UltrasonicsDataFrameDesc.aidl b/automotive/evs/aidl/android/hardware/automotive/evs/UltrasonicsDataFrameDesc.aidl
new file mode 100644
index 0000000..e546db9
--- /dev/null
+++ b/automotive/evs/aidl/android/hardware/automotive/evs/UltrasonicsDataFrameDesc.aidl
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.automotive.evs;
+
+import android.hardware.common.Ashmem;
+
+/**
+ * Structure that describes the data frame received from an ultrasonics array.
+ *
+ * Each data frame returned consists of received waveform signals from a subset
+ * of sensors in an array as indicated by the receiversIdList. The signal is
+ * transmitted at a particular time instant indicated by timestampNs from a
+ * subset of sensors in the array as provided in the transmittersIdList.
+ */
+@VintfStability
+parcelable UltrasonicsDataFrameDesc {
+ /**
+ * Timestamp of the start of the transmit signal for this data frame.
+ * Timestamp unit is nanoseconds and is obtained from android::elapsedRealtimeNanos().
+ * timeOfFlight readings are future-deltas to this timestamp.
+ */
+ long timestampNs;
+ /**
+ * Identifier of data frame. Used by implementation for managing multiple frames in flight.
+ */
+ int id;
+ /**
+ * List of indexes of sensors in range [0, sensorCount - 1] that
+ * transmitted the signal for this data frame.
+ */
+ byte[] transmittersIdList;
+ /**
+ * List of indexes of sensors in range [0, sensorCount - 1] that received
+ * the signal. The order of ids must match the order of the waveforms in the
+ * waveformsData.
+ * Size of list is upper bound by maxReceiversCount.
+ */
+ byte[] receiversIdList;
+ /**
+ * List of the number of readings corresponding to each ultrasonics sensor in
+ * the receiversIdList. Order of the readings count must match the order in
+ * receiversIdList.
+ * Size of list is upper bound by maxReadingsPerSensorCount.
+ */
+ int[] receiversReadingsCountList;
+ /**
+ * Shared memory object containing the waveforms data. Contains one waveform
+ * for each sensor specified in receiversIdList, in order.
+ * Each waveform is represented by a number of readings, which are sample
+ * points on the waveform. The number of readings for each waveform is as
+ * specified in the receiversReadingsCountList.
+ * Each reading is a pair of time Of flight and resonance.
+ * Time of flight (float): Time between transmit and receive signal in nanoseconds.
+ * Resonance (float): Resonance at time on waveform in range [0.0, 1.0].
+ *
+ * The structure of shared memory (example with 2 waveforms, each with 2 readings):
+ *
+ * Byte: | 0 | 1-4 | 5-8 | 9-12 | 13-16 || 17 | 18-21 | 22-25 | 26-29 | 30-33 |
+ * Data: | RecId1 | TOF1 | RES1 | TOF2 | RES2 || RecId2 | TOF1 | RES1 | TOF2 | RES2 |
+ * | Waveform1 || Waveform2 |
+ * Here:
+ * RecId : Receiver's Id. Order matches the receiversIdList, type uint8_t
+ * TOF : Time of flight, type float (4 bytes)
+ * RES : Resonance, type float (4 bytes)
+ * Note: All readings and waveforms are contigious with no padding.
+ */
+ Ashmem waveformsData;
+}
diff --git a/automotive/evs/aidl/impl/Android.bp b/automotive/evs/aidl/impl/Android.bp
new file mode 100644
index 0000000..7eb0116
--- /dev/null
+++ b/automotive/evs/aidl/impl/Android.bp
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package {
+ default_applicable_licenses: ["Android-Apache-2.0"],
+}
+
+cc_defaults {
+ name: "EvsHalDefaults",
+ static_libs: [
+ "android.hardware.automotive.evs-V1-ndk",
+ "android.hardware.common-V2-ndk",
+ "android.hardware.graphics.common-V3-ndk",
+ ],
+ shared_libs: [
+ "libbase",
+ "liblog",
+ "libutils",
+ ],
+ cflags: [
+ "-Wall",
+ "-Wextra",
+ "-Werror",
+ "-Wthread-safety",
+ ],
+}
diff --git a/automotive/evs/aidl/impl/default/Android.bp b/automotive/evs/aidl/impl/default/Android.bp
new file mode 100644
index 0000000..dbe0314
--- /dev/null
+++ b/automotive/evs/aidl/impl/default/Android.bp
@@ -0,0 +1,36 @@
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package {
+ // See: http://go/android-license-faq
+ // A large-scale-change added 'default_applicable_licenses' to import
+ // all of the 'license_kinds' from "hardware_interfaces_license"
+ // to get the below license kinds:
+ // SPDX-license-identifier-Apache-2.0
+ default_applicable_licenses: ["hardware_interfaces_license"],
+}
+
+cc_binary {
+ name: "android.hardware.automotive.evs-aidl-default-service",
+ defaults: ["EvsHalDefaults"],
+ local_include_dirs: ["include"],
+ vintf_fragments: ["evs-default-service.xml"],
+ init_rc: ["evs-default-service.rc"],
+ vendor: true,
+ relative_install_path: "hw",
+ srcs: ["src/*.cpp"],
+ shared_libs: [
+ "libbinder_ndk",
+ ],
+}
diff --git a/automotive/evs/aidl/impl/default/evs-default-service.rc b/automotive/evs/aidl/impl/default/evs-default-service.rc
new file mode 100644
index 0000000..ea8e689
--- /dev/null
+++ b/automotive/evs/aidl/impl/default/evs-default-service.rc
@@ -0,0 +1,5 @@
+service vendor.evs-hal-default /vendor/bin/hw/android.hardware.automotive.evs-aidl-default-service
+ class early_hal
+ user automotive_evs
+ group automotive_evs
+ disabled
diff --git a/automotive/evs/aidl/impl/default/evs-default-service.xml b/automotive/evs/aidl/impl/default/evs-default-service.xml
new file mode 100644
index 0000000..96ff9f6
--- /dev/null
+++ b/automotive/evs/aidl/impl/default/evs-default-service.xml
@@ -0,0 +1,11 @@
+<manifest version="1.0" type="device">
+ <hal format="aidl">
+ <name>android.hardware.automotive.evs</name>
+ <transport>hwbinder</transport>
+ <version>1</version>
+ <interface>
+ <name>IEvsEnumerator</name>
+ <instance>hw/0</instance>
+ </interface>
+ </hal>
+</manifest>
diff --git a/automotive/evs/aidl/impl/default/include/DefaultEvsEnumerator.h b/automotive/evs/aidl/impl/default/include/DefaultEvsEnumerator.h
new file mode 100644
index 0000000..8bcd867
--- /dev/null
+++ b/automotive/evs/aidl/impl/default/include/DefaultEvsEnumerator.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef android_hardware_automotive_evs_aidl_impl_evshal_include_DefaultEvsHal_H_
+#define android_hardware_automotive_evs_aidl_impl_evshal_include_DefaultEvsHal_H_
+
+#include <aidl/android/hardware/automotive/evs/BnEvsEnumerator.h>
+
+namespace aidl::android::hardware::automotive::evs::implementation {
+
+class DefaultEvsEnumerator final
+ : public ::aidl::android::hardware::automotive::evs::BnEvsEnumerator {
+ ::ndk::ScopedAStatus isHardware(bool* flag) override;
+ ::ndk::ScopedAStatus openCamera(
+ const std::string& cameraId,
+ const ::aidl::android::hardware::automotive::evs::Stream& streamConfig,
+ std::shared_ptr<::aidl::android::hardware::automotive::evs::IEvsCamera>* obj) override;
+ ::ndk::ScopedAStatus closeCamera(
+ const std::shared_ptr<::aidl::android::hardware::automotive::evs::IEvsCamera>& obj)
+ override;
+ ::ndk::ScopedAStatus getCameraList(
+ std::vector<::aidl::android::hardware::automotive::evs::CameraDesc>* list) override;
+ ::ndk::ScopedAStatus getStreamList(
+ const ::aidl::android::hardware::automotive::evs::CameraDesc& desc,
+ std::vector<::aidl::android::hardware::automotive::evs::Stream>* _aidl_return) override;
+ ::ndk::ScopedAStatus openDisplay(
+ int8_t displayId,
+ std::shared_ptr<::aidl::android::hardware::automotive::evs::IEvsDisplay>* obj) override;
+ ::ndk::ScopedAStatus closeDisplay(
+ const std::shared_ptr<::aidl::android::hardware::automotive::evs::IEvsDisplay>& obj)
+ override;
+ ::ndk::ScopedAStatus getDisplayIdList(std::vector<uint8_t>* list) override;
+ ::ndk::ScopedAStatus getDisplayState(
+ ::aidl::android::hardware::automotive::evs::DisplayState* state) override;
+ ::ndk::ScopedAStatus registerStatusCallback(
+ const std::shared_ptr<
+ ::aidl::android::hardware::automotive::evs::IEvsEnumeratorStatusCallback>&
+ callback) override;
+ ::ndk::ScopedAStatus openUltrasonicsArray(
+ const std::string& id,
+ std::shared_ptr<::aidl::android::hardware::automotive::evs::IEvsUltrasonicsArray>* obj)
+ override;
+ ::ndk::ScopedAStatus closeUltrasonicsArray(
+ const std::shared_ptr<::aidl::android::hardware::automotive::evs::IEvsUltrasonicsArray>&
+ arr) override;
+ ::ndk::ScopedAStatus getUltrasonicsArrayList(
+ std::vector<::aidl::android::hardware::automotive::evs::UltrasonicsArrayDesc>* list)
+ override;
+};
+
+} // namespace aidl::android::hardware::automotive::evs::implementation
+
+#endif // android_hardware_automotive_evs_aidl_impl_evshal_include_DefaultEvsHal_H_
diff --git a/automotive/evs/aidl/impl/default/src/DefaultEvsEnumerator.cpp b/automotive/evs/aidl/impl/default/src/DefaultEvsEnumerator.cpp
new file mode 100644
index 0000000..2ff6d59
--- /dev/null
+++ b/automotive/evs/aidl/impl/default/src/DefaultEvsEnumerator.cpp
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// TODO(b/203661081): Remove below lines to disable compiler warnings.
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wunused-parameter"
+
+#define LOG_TAG "DefaultEvsEnumerator"
+
+#include <DefaultEvsEnumerator.h>
+
+namespace aidl::android::hardware::automotive::evs::implementation {
+
+using ::ndk::ScopedAStatus;
+
+ScopedAStatus DefaultEvsEnumerator::isHardware(bool* flag) {
+ // This returns true always.
+ *flag = true;
+ return ScopedAStatus::ok();
+}
+
+ScopedAStatus DefaultEvsEnumerator::openCamera(const std::string& cameraId,
+ const Stream& streamConfig,
+ std::shared_ptr<IEvsCamera>* obj) {
+ return ScopedAStatus::ok();
+}
+
+ScopedAStatus DefaultEvsEnumerator::closeCamera(const std::shared_ptr<IEvsCamera>& obj) {
+ return ScopedAStatus::ok();
+}
+
+ScopedAStatus DefaultEvsEnumerator::getCameraList(std::vector<CameraDesc>* list) {
+ return ScopedAStatus::ok();
+}
+
+ScopedAStatus DefaultEvsEnumerator::getStreamList(const CameraDesc& desc,
+ std::vector<Stream>* _aidl_return) {
+ return ScopedAStatus::ok();
+}
+
+ScopedAStatus DefaultEvsEnumerator::openDisplay(int8_t displayId,
+ std::shared_ptr<IEvsDisplay>* obj) {
+ return ScopedAStatus::ok();
+}
+
+ScopedAStatus DefaultEvsEnumerator::closeDisplay(const std::shared_ptr<IEvsDisplay>& state) {
+ return ScopedAStatus::ok();
+}
+
+ScopedAStatus DefaultEvsEnumerator::getDisplayIdList(std::vector<uint8_t>* list) {
+ return ScopedAStatus::ok();
+}
+
+ScopedAStatus DefaultEvsEnumerator::getDisplayState(DisplayState* state) {
+ return ScopedAStatus::ok();
+}
+
+ScopedAStatus DefaultEvsEnumerator::registerStatusCallback(
+ const std::shared_ptr<IEvsEnumeratorStatusCallback>& callback) {
+ return ScopedAStatus::ok();
+}
+
+ScopedAStatus DefaultEvsEnumerator::openUltrasonicsArray(
+ const std::string& id, std::shared_ptr<IEvsUltrasonicsArray>* obj) {
+ return ScopedAStatus::ok();
+}
+
+ScopedAStatus DefaultEvsEnumerator::closeUltrasonicsArray(
+ const std::shared_ptr<IEvsUltrasonicsArray>& obj) {
+ return ScopedAStatus::ok();
+}
+
+ScopedAStatus DefaultEvsEnumerator::getUltrasonicsArrayList(
+ std::vector<UltrasonicsArrayDesc>* list) {
+ return ScopedAStatus::ok();
+}
+
+} // namespace aidl::android::hardware::automotive::evs::implementation
+
+#pragma clang diagnostic pop
diff --git a/automotive/evs/aidl/impl/default/src/service.cpp b/automotive/evs/aidl/impl/default/src/service.cpp
new file mode 100644
index 0000000..0a0913f
--- /dev/null
+++ b/automotive/evs/aidl/impl/default/src/service.cpp
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "EvsService"
+
+#include <DefaultEvsEnumerator.h>
+
+#include <android/binder_manager.h>
+#include <android/binder_process.h>
+#include <utils/Log.h>
+
+using ::aidl::android::hardware::automotive::evs::implementation::DefaultEvsEnumerator;
+
+int main([[maybe_unused]] int argc, [[maybe_unused]] char* argv[]) {
+ std::shared_ptr<DefaultEvsEnumerator> vhal = ndk::SharedRefBase::make<DefaultEvsEnumerator>();
+
+ ALOGI("Registering as service...");
+ binder_exception_t err =
+ AServiceManager_addService(vhal->asBinder().get(), "android.hardware.automotive.evs");
+ if (err != EX_NONE) {
+ ALOGE("failed to register android.hardware.automotive.evs service, exception: %d", err);
+ return 1;
+ }
+
+ if (!ABinderProcess_setThreadPoolMaxThreadCount(1)) {
+ ALOGE("%s", "failed to set thread pool max thread count");
+ return 1;
+ }
+ ABinderProcess_startThreadPool();
+
+ ALOGI("Evs Service Ready");
+
+ ABinderProcess_joinThreadPool();
+
+ ALOGI("Evs Service Exiting");
+
+ return 0;
+}
diff --git a/automotive/evs/aidl/vts/Android.bp b/automotive/evs/aidl/vts/Android.bp
new file mode 100644
index 0000000..980c6d5
--- /dev/null
+++ b/automotive/evs/aidl/vts/Android.bp
@@ -0,0 +1,53 @@
+//
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+package{
+ // See: http://go/android-license-faq
+ // A large-scale-change added 'default_applicable_licenses' to import
+ // all of the 'license_kinds' from "hardware_interfaces_license"
+ // to get the below license kinds:
+ // SPDX-license-identifier-Apache-2.0
+ default_applicable_licenses : ["hardware_interfaces_license"],
+}
+
+cc_test {
+name:
+ "VtsHalEvsTargetTest",
+ srcs: [
+ "*.cpp",
+ ],
+ defaults: [
+ "VtsHalTargetTestDefaults",
+ "use_libaidlvintf_gtest_helper_static",
+ ],
+ shared_libs: [
+ "libbinder_ndk",
+ "libcamera_metadata",
+ "libui",
+ "libutils",
+ ],
+ static_libs: [
+ "android.hardware.automotive.evs@common-default-lib",
+ "android.hardware.automotive.evs-V1-ndk",
+ "android.hardware.common-V2-ndk",
+ "android.hardware.graphics.common-V3-ndk",
+ "libaidlcommonsupport",
+ ],
+ test_suites: [
+ "general-tests",
+ "vts",
+ ],
+}
diff --git a/automotive/evs/aidl/vts/FrameHandler.cpp b/automotive/evs/aidl/vts/FrameHandler.cpp
new file mode 100644
index 0000000..bab832b
--- /dev/null
+++ b/automotive/evs/aidl/vts/FrameHandler.cpp
@@ -0,0 +1,358 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "VtsHalEvsTest"
+
+#include "FrameHandler.h"
+#include "FormatConvert.h"
+
+#include <aidl/android/hardware/graphics/common/HardwareBufferDescription.h>
+#include <aidlcommonsupport/NativeHandle.h>
+#include <android-base/logging.h>
+#include <ui/GraphicBuffer.h>
+#include <ui/GraphicBufferAllocator.h>
+
+using ::aidl::android::hardware::automotive::evs::BufferDesc;
+using ::aidl::android::hardware::automotive::evs::CameraDesc;
+using ::aidl::android::hardware::automotive::evs::EvsEventDesc;
+using ::aidl::android::hardware::automotive::evs::EvsEventType;
+using ::aidl::android::hardware::automotive::evs::IEvsCamera;
+using ::aidl::android::hardware::automotive::evs::IEvsDisplay;
+using ::aidl::android::hardware::graphics::common::HardwareBufferDescription;
+using ::ndk::ScopedAStatus;
+using std::chrono_literals::operator""s;
+
+FrameHandler::FrameHandler(const std::shared_ptr<IEvsCamera>& pCamera, const CameraDesc& cameraInfo,
+ const std::shared_ptr<IEvsDisplay>& pDisplay, BufferControlFlag mode)
+ : mCamera(pCamera), mCameraInfo(cameraInfo), mDisplay(pDisplay), mReturnMode(mode) {
+ // Nothing but member initialization here.
+}
+
+void FrameHandler::shutdown() {
+ // Make sure we're not still streaming
+ blockingStopStream();
+
+ // At this point, the receiver thread is no longer running, so we can safely drop
+ // our remote object references so they can be freed
+ mCamera = nullptr;
+ mDisplay = nullptr;
+}
+
+bool FrameHandler::startStream() {
+ // Tell the camera to start streaming
+ auto status = mCamera->startVideoStream(ref<FrameHandler>());
+ if (!status.isOk()) {
+ return false;
+ }
+
+ // Mark ourselves as running
+ mLock.lock();
+ mRunning = true;
+ mLock.unlock();
+
+ return true;
+}
+
+void FrameHandler::asyncStopStream() {
+ // Tell the camera to stop streaming.
+ // This will result in a null frame being delivered when the stream actually stops.
+ mCamera->stopVideoStream();
+}
+
+void FrameHandler::blockingStopStream() {
+ // Tell the stream to stop
+ asyncStopStream();
+
+ // Wait until the stream has actually stopped
+ std::unique_lock<std::mutex> lock(mEventLock);
+ if (mRunning) {
+ mEventSignal.wait(lock, [this]() { return !mRunning; });
+ }
+}
+
+bool FrameHandler::returnHeldBuffer() {
+ std::lock_guard<std::mutex> lock(mLock);
+
+ // Return the oldest buffer we're holding
+ if (mHeldBuffers.empty()) {
+ // No buffers are currently held
+ return false;
+ }
+
+ std::vector<BufferDesc> buffers = std::move(mHeldBuffers.front());
+ mHeldBuffers.pop();
+ mCamera->doneWithFrame(buffers);
+
+ return true;
+}
+
+bool FrameHandler::isRunning() {
+ std::lock_guard<std::mutex> lock(mLock);
+ return mRunning;
+}
+
+void FrameHandler::waitForFrameCount(unsigned frameCount) {
+ // Wait until we've seen at least the requested number of frames (could be more)
+ std::unique_lock<std::mutex> lock(mLock);
+ mFrameSignal.wait(lock, [this, frameCount]() { return mFramesReceived >= frameCount; });
+}
+
+void FrameHandler::getFramesCounters(unsigned* received, unsigned* displayed) {
+ std::lock_guard<std::mutex> lock(mLock);
+
+ if (received) {
+ *received = mFramesReceived;
+ }
+ if (displayed) {
+ *displayed = mFramesDisplayed;
+ }
+}
+
+ScopedAStatus FrameHandler::deliverFrame(const std::vector<BufferDesc>& buffers) {
+ mLock.lock();
+ // For VTS tests, FrameHandler uses a single frame among delivered frames.
+ auto bufferIdx = mFramesDisplayed % buffers.size();
+ auto& buffer = buffers[bufferIdx];
+ mLock.unlock();
+
+ // Store a dimension of a received frame.
+ mFrameWidth = buffer.buffer.description.width;
+ mFrameHeight = buffer.buffer.description.height;
+
+ // If we were given an opened display at construction time, then send the received
+ // image back down the camera.
+ bool displayed = false;
+ if (mDisplay) {
+ // Get the output buffer we'll use to display the imagery
+ BufferDesc tgtBuffer;
+ auto status = mDisplay->getTargetBuffer(&tgtBuffer);
+ if (!status.isOk()) {
+ printf("Didn't get target buffer - frame lost\n");
+ LOG(ERROR) << "Didn't get requested output buffer -- skipping this frame.";
+ } else {
+ // Copy the contents of the of buffer.memHandle into tgtBuffer
+ copyBufferContents(tgtBuffer, buffer);
+
+ // Send the target buffer back for display
+ auto status = mDisplay->returnTargetBufferForDisplay(tgtBuffer);
+ if (!status.isOk()) {
+ printf("AIDL error on display buffer (%d)- frame lost\n",
+ status.getServiceSpecificError());
+ LOG(ERROR) << "Error making the remote function call. AIDL said "
+ << status.getServiceSpecificError();
+ } else {
+ // Everything looks good!
+ // Keep track so tests or watch dogs can monitor progress
+ displayed = true;
+ }
+ }
+ }
+
+ mLock.lock();
+ // increases counters
+ ++mFramesReceived;
+ mFramesDisplayed += (int)displayed;
+ mLock.unlock();
+ mFrameSignal.notify_all();
+
+ switch (mReturnMode) {
+ case eAutoReturn:
+ // Send the camera buffer back now that the client has seen it
+ LOG(DEBUG) << "Calling doneWithFrame";
+ mCamera->doneWithFrame(buffers);
+ break;
+ case eNoAutoReturn:
+ // Hang onto the buffer handles for now -- the client will return it explicitly later
+ // mHeldBuffers.push(buffers);
+ break;
+ }
+
+ LOG(DEBUG) << "Frame handling complete";
+ return ScopedAStatus::ok();
+}
+
+ScopedAStatus FrameHandler::notify(const EvsEventDesc& event) {
+ // Local flag we use to keep track of when the stream is stopping
+ std::unique_lock<std::mutex> lock(mEventLock);
+ mLatestEventDesc.aType = event.aType;
+ mLatestEventDesc.payload[0] = event.payload[0];
+ mLatestEventDesc.payload[1] = event.payload[1];
+ if (mLatestEventDesc.aType == EvsEventType::STREAM_STOPPED) {
+ // Signal that the last frame has been received and the stream is stopped
+ mRunning = false;
+ } else if (mLatestEventDesc.aType == EvsEventType::PARAMETER_CHANGED) {
+ LOG(DEBUG) << "Camera parameter " << mLatestEventDesc.payload[0] << " is changed to "
+ << mLatestEventDesc.payload[1];
+ } else {
+ LOG(DEBUG) << "Received an event " << eventToString(mLatestEventDesc.aType);
+ }
+ lock.unlock();
+ mEventSignal.notify_one();
+
+ return ScopedAStatus::ok();
+}
+
+bool FrameHandler::copyBufferContents(const BufferDesc& tgtBuffer, const BufferDesc& srcBuffer) {
+ bool success = true;
+ const HardwareBufferDescription* pSrcDesc =
+ reinterpret_cast<const HardwareBufferDescription*>(&srcBuffer.buffer.description);
+ const HardwareBufferDescription* pTgtDesc =
+ reinterpret_cast<const HardwareBufferDescription*>(&tgtBuffer.buffer.description);
+
+ // Make sure we don't run off the end of either buffer
+ const unsigned width = std::min(pTgtDesc->width, pSrcDesc->width);
+ const unsigned height = std::min(pTgtDesc->height, pSrcDesc->height);
+
+ // FIXME: We duplicate file descriptors twice below; consider using TAKE_HANDLE
+ // instead of CLONE_HANDLE.
+ buffer_handle_t target = ::android::dupFromAidl(tgtBuffer.buffer.handle);
+ ::android::sp<android::GraphicBuffer> tgt = new android::GraphicBuffer(
+ target, android::GraphicBuffer::CLONE_HANDLE, pTgtDesc->width, pTgtDesc->height,
+ static_cast<android::PixelFormat>(pTgtDesc->format), pTgtDesc->layers,
+ static_cast<uint64_t>(pTgtDesc->usage), pTgtDesc->stride);
+
+ buffer_handle_t source = ::android::dupFromAidl(srcBuffer.buffer.handle);
+ ::android::sp<android::GraphicBuffer> src = new android::GraphicBuffer(
+ source, android::GraphicBuffer::CLONE_HANDLE, pSrcDesc->width, pSrcDesc->height,
+ static_cast<android::PixelFormat>(pSrcDesc->format), pSrcDesc->layers,
+ static_cast<uint64_t>(pSrcDesc->usage), pSrcDesc->stride);
+
+ // Lock our source buffer for reading (current expectation are for this to be NV21 format)
+ uint8_t* srcPixels = nullptr;
+ src->lock(GRALLOC_USAGE_SW_READ_OFTEN, (void**)&srcPixels);
+
+ // Lock our target buffer for writing (should be either RGBA8888 or BGRA8888 format)
+ uint32_t* tgtPixels = nullptr;
+ tgt->lock(GRALLOC_USAGE_SW_WRITE_OFTEN, (void**)&tgtPixels);
+
+ if (srcPixels && tgtPixels) {
+ using namespace ::android::hardware::automotive::evs::common;
+ if (static_cast<android_pixel_format_t>(pTgtDesc->format) == HAL_PIXEL_FORMAT_RGBA_8888) {
+ if (static_cast<android_pixel_format_t>(pSrcDesc->format) ==
+ HAL_PIXEL_FORMAT_YCRCB_420_SP) { // 420SP == NV21
+ Utils::copyNV21toRGB32(width, height, srcPixels, tgtPixels, pTgtDesc->stride);
+ } else if (static_cast<android_pixel_format_t>(pSrcDesc->format) ==
+ HAL_PIXEL_FORMAT_YV12) { // YUV_420P == YV12
+ Utils::copyYV12toRGB32(width, height, srcPixels, tgtPixels, pTgtDesc->stride);
+ } else if (static_cast<android_pixel_format_t>(pSrcDesc->format) ==
+ HAL_PIXEL_FORMAT_YCBCR_422_I) { // YUYV
+ Utils::copyYUYVtoRGB32(width, height, srcPixels, pSrcDesc->stride, tgtPixels,
+ pTgtDesc->stride);
+ } else if (pSrcDesc->format == pTgtDesc->format) { // 32bit RGBA
+ Utils::copyMatchedInterleavedFormats(width, height, srcPixels, pSrcDesc->stride,
+ tgtPixels, pTgtDesc->stride,
+ tgtBuffer.pixelSizeBytes);
+ } else {
+ LOG(ERROR) << "Camera buffer format is not supported";
+ success = false;
+ }
+ } else if (static_cast<android_pixel_format_t>(pTgtDesc->format) ==
+ HAL_PIXEL_FORMAT_BGRA_8888) {
+ if (static_cast<android_pixel_format_t>(pSrcDesc->format) ==
+ HAL_PIXEL_FORMAT_YCRCB_420_SP) { // 420SP == NV21
+ Utils::copyNV21toBGR32(width, height, srcPixels, tgtPixels, pTgtDesc->stride);
+ } else if (static_cast<android_pixel_format_t>(pSrcDesc->format) ==
+ HAL_PIXEL_FORMAT_YV12) { // YUV_420P == YV12
+ Utils::copyYV12toBGR32(width, height, srcPixels, tgtPixels, pTgtDesc->stride);
+ } else if (static_cast<android_pixel_format_t>(pSrcDesc->format) ==
+ HAL_PIXEL_FORMAT_YCBCR_422_I) { // YUYV
+ Utils::copyYUYVtoBGR32(width, height, srcPixels, pSrcDesc->stride, tgtPixels,
+ pTgtDesc->stride);
+ } else if (pSrcDesc->format == pTgtDesc->format) { // 32bit RGBA
+ Utils::copyMatchedInterleavedFormats(width, height, srcPixels, pSrcDesc->stride,
+ tgtPixels, pTgtDesc->stride,
+ tgtBuffer.pixelSizeBytes);
+ } else {
+ LOG(ERROR) << "Camera buffer format is not supported";
+ success = false;
+ }
+ } else {
+ // We always expect 32 bit RGB for the display output for now. Is there a need for 565?
+ LOG(ERROR) << "Diplay buffer is always expected to be 32bit RGBA";
+ success = false;
+ }
+ } else {
+ LOG(ERROR) << "Failed to lock buffer contents for contents transfer";
+ success = false;
+ }
+
+ if (srcPixels) {
+ src->unlock();
+ }
+ if (tgtPixels) {
+ tgt->unlock();
+ }
+
+ return success;
+}
+
+void FrameHandler::getFrameDimension(unsigned* width, unsigned* height) {
+ if (width) {
+ *width = mFrameWidth;
+ }
+
+ if (height) {
+ *height = mFrameHeight;
+ }
+}
+
+bool FrameHandler::waitForEvent(const EvsEventDesc& aTargetEvent, EvsEventDesc& aReceivedEvent,
+ bool ignorePayload) {
+ // Wait until we get an expected parameter change event.
+ std::unique_lock<std::mutex> lock(mEventLock);
+ auto now = std::chrono::system_clock::now();
+ bool found = false;
+ while (!found) {
+ bool result = mEventSignal.wait_until(
+ lock, now + 5s, [this, aTargetEvent, ignorePayload, &aReceivedEvent, &found]() {
+ found = (mLatestEventDesc.aType == aTargetEvent.aType) &&
+ (ignorePayload ||
+ (mLatestEventDesc.payload[0] == aTargetEvent.payload[0] &&
+ mLatestEventDesc.payload[1] == aTargetEvent.payload[1]));
+
+ aReceivedEvent.aType = mLatestEventDesc.aType;
+ aReceivedEvent.payload[0] = mLatestEventDesc.payload[0];
+ aReceivedEvent.payload[1] = mLatestEventDesc.payload[1];
+ return found;
+ });
+
+ if (!result) {
+ LOG(WARNING) << "A timer is expired before a target event has happened.";
+ break;
+ }
+ }
+
+ return found;
+}
+
+const char* FrameHandler::eventToString(const EvsEventType aType) {
+ switch (aType) {
+ case EvsEventType::STREAM_STARTED:
+ return "STREAM_STARTED";
+ case EvsEventType::STREAM_STOPPED:
+ return "STREAM_STOPPED";
+ case EvsEventType::FRAME_DROPPED:
+ return "FRAME_DROPPED";
+ case EvsEventType::TIMEOUT:
+ return "TIMEOUT";
+ case EvsEventType::PARAMETER_CHANGED:
+ return "PARAMETER_CHANGED";
+ case EvsEventType::MASTER_RELEASED:
+ return "MASTER_RELEASED";
+ default:
+ return "Unknown";
+ }
+}
diff --git a/automotive/evs/aidl/vts/FrameHandler.h b/automotive/evs/aidl/vts/FrameHandler.h
new file mode 100644
index 0000000..0b959ab
--- /dev/null
+++ b/automotive/evs/aidl/vts/FrameHandler.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AUTOMOTIVE_EVS_VTS_FRAMEHANDLER_H
+#define AUTOMOTIVE_EVS_VTS_FRAMEHANDLER_H
+
+#include <aidl/android/hardware/automotive/evs/BnEvsCameraStream.h>
+#include <aidl/android/hardware/automotive/evs/EvsEventDesc.h>
+#include <aidl/android/hardware/automotive/evs/IEvsCamera.h>
+#include <aidl/android/hardware/automotive/evs/IEvsDisplay.h>
+
+#include <mutex>
+#include <queue>
+
+/*
+ * FrameHandler:
+ * This class can be used to receive camera imagery from an IEvsCamera implementation. Given an
+ * IEvsDisplay instance at startup, it will forward the received imagery to the display,
+ * providing a trivial implementation of a rear vew camera type application.
+ * Note that the video frames are delivered on a background thread, while the control interface
+ * is actuated from the applications foreground thread.
+ */
+class FrameHandler : public ::aidl::android::hardware::automotive::evs::BnEvsCameraStream {
+ public:
+ enum BufferControlFlag {
+ eAutoReturn,
+ eNoAutoReturn,
+ };
+
+ FrameHandler(
+ const std::shared_ptr<::aidl::android::hardware::automotive::evs::IEvsCamera>& pCamera,
+ const ::aidl::android::hardware::automotive::evs::CameraDesc& cameraInfo,
+ const std::shared_ptr<::aidl::android::hardware::automotive::evs::IEvsDisplay>&
+ pDisplay,
+ BufferControlFlag mode = eAutoReturn);
+ virtual ~FrameHandler() {
+ if (mCamera != nullptr) {
+ /* shutdown a camera explicitly */
+ shutdown();
+ }
+ }
+
+ void shutdown();
+ bool startStream();
+ void asyncStopStream();
+ void blockingStopStream();
+ bool returnHeldBuffer();
+ bool isRunning();
+ void waitForFrameCount(unsigned frameCount);
+ bool waitForEvent(const ::aidl::android::hardware::automotive::evs::EvsEventDesc& aTargetEvent,
+ ::aidl::android::hardware::automotive::evs::EvsEventDesc& aReceivedEvent,
+ bool ignorePayload = false);
+ void getFramesCounters(unsigned* received, unsigned* displayed);
+ void getFrameDimension(unsigned* width, unsigned* height);
+
+ private:
+ // Methods from ::aidl::android::hardware::automotive::evs::IEvsCameraStream follow.
+ ::ndk::ScopedAStatus deliverFrame(
+ const std::vector<::aidl::android::hardware::automotive::evs::BufferDesc>& buffer)
+ override;
+ ::ndk::ScopedAStatus notify(
+ const ::aidl::android::hardware::automotive::evs::EvsEventDesc& event) override;
+
+ // Local implementation details
+ bool copyBufferContents(
+ const ::aidl::android::hardware::automotive::evs::BufferDesc& tgtBuffer,
+ const ::aidl::android::hardware::automotive::evs::BufferDesc& srcBuffer);
+ const char* eventToString(const ::aidl::android::hardware::automotive::evs::EvsEventType aType);
+
+ std::shared_ptr<::aidl::android::hardware::automotive::evs::IEvsCamera> mCamera;
+ ::aidl::android::hardware::automotive::evs::CameraDesc mCameraInfo;
+ std::shared_ptr<::aidl::android::hardware::automotive::evs::IEvsDisplay> mDisplay;
+ BufferControlFlag mReturnMode;
+
+ // Since we get frames delivered to us asynchronously via the IEvsCameraStream interface,
+ // we need to protect all member variables that may be modified while we're streaming
+ // (ie: those below)
+ std::mutex mLock;
+ std::mutex mEventLock;
+ std::condition_variable mEventSignal;
+ std::condition_variable mFrameSignal;
+ std::queue<std::vector<::aidl::android::hardware::automotive::evs::BufferDesc>> mHeldBuffers;
+
+ bool mRunning = false;
+ unsigned mFramesReceived = 0; // Simple counter -- rolls over eventually!
+ unsigned mFramesDisplayed = 0; // Simple counter -- rolls over eventually!
+ unsigned mFrameWidth = 0;
+ unsigned mFrameHeight = 0;
+ ::aidl::android::hardware::automotive::evs::EvsEventDesc mLatestEventDesc;
+};
+
+#endif // AUTOMOTIVE_EVS_VTS_FRAMEHANDLER_H
diff --git a/automotive/evs/aidl/vts/FrameHandlerUltrasonics.cpp b/automotive/evs/aidl/vts/FrameHandlerUltrasonics.cpp
new file mode 100644
index 0000000..650f0ed
--- /dev/null
+++ b/automotive/evs/aidl/vts/FrameHandlerUltrasonics.cpp
@@ -0,0 +1,123 @@
+/*
+ * Copyright 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "FrameHandlerUltrasonics.h"
+
+#include <aidl/android/hardware/automotive/evs/EvsEventDesc.h>
+#include <aidl/android/hardware/automotive/evs/EvsEventType.h>
+#include <aidl/android/hardware/automotive/evs/IEvsUltrasonicsArray.h>
+#include <aidl/android/hardware/automotive/evs/UltrasonicsDataFrameDesc.h>
+#include <android-base/logging.h>
+
+using ::aidl::android::hardware::automotive::evs::EvsEventDesc;
+using ::aidl::android::hardware::automotive::evs::EvsEventType;
+using ::aidl::android::hardware::automotive::evs::IEvsUltrasonicsArray;
+using ::aidl::android::hardware::automotive::evs::UltrasonicsDataFrameDesc;
+using ::ndk::ScopedAStatus;
+
+namespace {
+
+// Struct used by SerializeWaveformData().
+struct WaveformData {
+ uint8_t receiverId;
+ std::vector<std::pair<float, float>> readings;
+};
+
+} // namespace
+
+FrameHandlerUltrasonics::FrameHandlerUltrasonics(
+ const std::shared_ptr<IEvsUltrasonicsArray>& pArray)
+ : mEvsUltrasonicsArray(pArray), mReceiveFramesCount(0) {
+ // Nothing but member initialization
+}
+
+ScopedAStatus FrameHandlerUltrasonics::notify(const EvsEventDesc& evsEvent) {
+ switch (evsEvent.aType) {
+ case EvsEventType::STREAM_STARTED:
+ case EvsEventType::STREAM_STOPPED:
+ case EvsEventType::FRAME_DROPPED:
+ case EvsEventType::TIMEOUT:
+ mReceivedEvents.emplace_back(evsEvent);
+ break;
+ default:
+ LOG(ERROR) << "Received unexpected event";
+ }
+
+ return ScopedAStatus::ok();
+}
+
+// De-serializes shared memory to vector of WaveformData.
+// TODO(b/149950362): Add a common library for serializing and deserializing waveform data.
+std::vector<WaveformData> DeSerializeWaveformData(std::vector<uint32_t> recvReadingsCountList,
+ uint8_t* pData) {
+ std::vector<WaveformData> waveformDataList(recvReadingsCountList.size());
+
+ for (int i = 0; i < waveformDataList.size(); i++) {
+ // Set Id
+ memcpy(&waveformDataList[i].receiverId, pData, sizeof(uint8_t));
+ pData += sizeof(uint8_t);
+
+ waveformDataList[i].readings.resize(recvReadingsCountList[i]);
+
+ for (auto& reading : waveformDataList[i].readings) {
+ // Set the time of flight.
+ memcpy(&reading.first, pData, sizeof(float));
+ pData += sizeof(float);
+
+ // Set the resonance.
+ memcpy(&reading.second, pData, sizeof(float));
+ pData += sizeof(float);
+ }
+ }
+ return waveformDataList;
+}
+
+bool DataFrameValidator(const UltrasonicsDataFrameDesc& /*dataFrameDesc*/) {
+ // TODO(b/214026378): implement a method to validate an ultrasonics data frame
+ (void)DeSerializeWaveformData;
+ return true;
+}
+
+ScopedAStatus FrameHandlerUltrasonics::deliverDataFrame(
+ const UltrasonicsDataFrameDesc& dataFrameDesc) {
+ LOG(DEBUG) << "FrameHandlerUltrasonics::receiveFrames";
+
+ mReceiveFramesCount++;
+
+ if (!DataFrameValidator(dataFrameDesc)) {
+ mAllFramesValid = false;
+ }
+
+ // Send done with data frame.
+ mEvsUltrasonicsArray->doneWithDataFrame(dataFrameDesc);
+ return ScopedAStatus::ok();
+}
+
+bool FrameHandlerUltrasonics::checkEventReceived(const EvsEventDesc& evsEvent) {
+ LOG(DEBUG) << "FrameHandlerUltrasonics::checkEventReceived";
+ int size = mReceivedEvents.size(); // work around
+ LOG(DEBUG) << "Received event number: " << size;
+ auto iter = find(mReceivedEvents.begin(), mReceivedEvents.end(), evsEvent);
+ return iter != mReceivedEvents.end();
+}
+
+int FrameHandlerUltrasonics::getReceiveFramesCount() {
+ return mReceiveFramesCount;
+}
+
+bool FrameHandlerUltrasonics::areAllFramesValid() {
+ return mAllFramesValid;
+}
diff --git a/automotive/evs/aidl/vts/FrameHandlerUltrasonics.h b/automotive/evs/aidl/vts/FrameHandlerUltrasonics.h
new file mode 100644
index 0000000..f853a00
--- /dev/null
+++ b/automotive/evs/aidl/vts/FrameHandlerUltrasonics.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AUTOMOTIVE_EVS_VTS_FRAMEHANDLERULTRASONICS_H
+#define AUTOMOTIVE_EVS_VTS_FRAMEHANDLERULTRASONICS_H
+
+#include <aidl/android/hardware/automotive/evs/BnEvsUltrasonicsArrayStream.h>
+#include <aidl/android/hardware/automotive/evs/IEvsUltrasonicsArray.h>
+
+#include <vector>
+
+class FrameHandlerUltrasonics
+ : public ::aidl::android::hardware::automotive::evs::BnEvsUltrasonicsArrayStream {
+ public:
+ FrameHandlerUltrasonics(
+ const std::shared_ptr<::aidl::android::hardware::automotive::evs::IEvsUltrasonicsArray>&
+ pArray);
+
+ // Implementation for ::aidl::android::hardware::automotive::evs::IEvsUltrasonicsArrayStream
+ ::ndk::ScopedAStatus notify(
+ const ::aidl::android::hardware::automotive::evs::EvsEventDesc& event) override;
+ ::ndk::ScopedAStatus deliverDataFrame(
+ const ::aidl::android::hardware::automotive::evs::UltrasonicsDataFrameDesc& desc)
+ override;
+
+ bool checkEventReceived(
+ const ::aidl::android::hardware::automotive::evs::EvsEventDesc& evsEvent);
+ int getReceiveFramesCount();
+ bool areAllFramesValid();
+
+ private:
+ std::shared_ptr<::aidl::android::hardware::automotive::evs::IEvsUltrasonicsArray>
+ mEvsUltrasonicsArray;
+ std::vector<::aidl::android::hardware::automotive::evs::EvsEventDesc> mReceivedEvents;
+ int mReceiveFramesCount;
+ bool mAllFramesValid = true;
+};
+
+#endif // AUTOMOTIVE_EVS_VTS_FRAMEHANDLERULTRASONICS_H
diff --git a/automotive/evs/aidl/vts/OWNERS b/automotive/evs/aidl/vts/OWNERS
new file mode 100644
index 0000000..a104f50
--- /dev/null
+++ b/automotive/evs/aidl/vts/OWNERS
@@ -0,0 +1,3 @@
+#Bug component : 853002
+ankitarora@google.com
+changyeon@google.com
diff --git a/automotive/evs/aidl/vts/VtsHalEvsTargetTest.cpp b/automotive/evs/aidl/vts/VtsHalEvsTargetTest.cpp
new file mode 100644
index 0000000..c709d40
--- /dev/null
+++ b/automotive/evs/aidl/vts/VtsHalEvsTargetTest.cpp
@@ -0,0 +1,2170 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "FrameHandler.h"
+#include "FrameHandlerUltrasonics.h"
+
+#include <aidl/Gtest.h>
+#include <aidl/Vintf.h>
+#include <aidl/android/hardware/automotive/evs/BufferDesc.h>
+#include <aidl/android/hardware/automotive/evs/CameraDesc.h>
+#include <aidl/android/hardware/automotive/evs/CameraParam.h>
+#include <aidl/android/hardware/automotive/evs/DisplayDesc.h>
+#include <aidl/android/hardware/automotive/evs/DisplayState.h>
+#include <aidl/android/hardware/automotive/evs/EvsEventDesc.h>
+#include <aidl/android/hardware/automotive/evs/EvsEventType.h>
+#include <aidl/android/hardware/automotive/evs/EvsResult.h>
+#include <aidl/android/hardware/automotive/evs/IEvsCamera.h>
+#include <aidl/android/hardware/automotive/evs/IEvsDisplay.h>
+#include <aidl/android/hardware/automotive/evs/IEvsEnumerator.h>
+#include <aidl/android/hardware/automotive/evs/IEvsUltrasonicsArray.h>
+#include <aidl/android/hardware/automotive/evs/ParameterRange.h>
+#include <aidl/android/hardware/automotive/evs/Stream.h>
+#include <aidl/android/hardware/automotive/evs/UltrasonicsArrayDesc.h>
+#include <aidl/android/hardware/common/NativeHandle.h>
+#include <aidl/android/hardware/graphics/common/HardwareBufferDescription.h>
+#include <aidl/android/hardware/graphics/common/PixelFormat.h>
+#include <aidlcommonsupport/NativeHandle.h>
+#include <android-base/logging.h>
+#include <android/binder_ibinder.h>
+#include <android/binder_manager.h>
+#include <android/binder_process.h>
+#include <android/binder_status.h>
+#include <system/camera_metadata.h>
+#include <ui/GraphicBuffer.h>
+#include <ui/GraphicBufferAllocator.h>
+#include <utils/Timers.h>
+
+#include <deque>
+#include <thread>
+#include <unordered_set>
+
+namespace {
+
+// These values are called out in the EVS design doc (as of Mar 8, 2017)
+constexpr int kMaxStreamStartMilliseconds = 500;
+constexpr int kMinimumFramesPerSecond = 10;
+constexpr int kSecondsToMilliseconds = 1000;
+constexpr int kMillisecondsToMicroseconds = 1000;
+constexpr float kNanoToMilliseconds = 0.000001f;
+constexpr float kNanoToSeconds = 0.000000001f;
+
+/*
+ * Please note that this is different from what is defined in
+ * libhardware/modules/camera/3_4/metadata/types.h; this has one additional
+ * field to store a framerate.
+ */
+typedef struct {
+ int32_t id;
+ int32_t width;
+ int32_t height;
+ int32_t format;
+ int32_t direction;
+ int32_t framerate;
+} RawStreamConfig;
+constexpr size_t kStreamCfgSz = sizeof(RawStreamConfig) / sizeof(int32_t);
+
+} // namespace
+
+using ::aidl::android::hardware::automotive::evs::BufferDesc;
+using ::aidl::android::hardware::automotive::evs::CameraDesc;
+using ::aidl::android::hardware::automotive::evs::CameraParam;
+using ::aidl::android::hardware::automotive::evs::DisplayDesc;
+using ::aidl::android::hardware::automotive::evs::DisplayState;
+using ::aidl::android::hardware::automotive::evs::EvsEventDesc;
+using ::aidl::android::hardware::automotive::evs::EvsEventType;
+using ::aidl::android::hardware::automotive::evs::EvsResult;
+using ::aidl::android::hardware::automotive::evs::IEvsCamera;
+using ::aidl::android::hardware::automotive::evs::IEvsDisplay;
+using ::aidl::android::hardware::automotive::evs::IEvsEnumerator;
+using ::aidl::android::hardware::automotive::evs::IEvsUltrasonicsArray;
+using ::aidl::android::hardware::automotive::evs::ParameterRange;
+using ::aidl::android::hardware::automotive::evs::Stream;
+using ::aidl::android::hardware::automotive::evs::UltrasonicsArrayDesc;
+using ::aidl::android::hardware::graphics::common::BufferUsage;
+using ::aidl::android::hardware::graphics::common::HardwareBufferDescription;
+using ::aidl::android::hardware::graphics::common::PixelFormat;
+using std::chrono_literals::operator""s;
+
+// The main test class for EVS
+class EvsAidlTest : public ::testing::TestWithParam<std::string> {
+ public:
+ virtual void SetUp() override {
+ // Make sure we can connect to the enumerator
+ std::string service_name = GetParam();
+ AIBinder* binder = AServiceManager_waitForService(service_name.data());
+ ASSERT_NE(binder, nullptr);
+ mEnumerator = IEvsEnumerator::fromBinder(::ndk::SpAIBinder(binder));
+ LOG(INFO) << "Test target service: " << service_name;
+
+ ASSERT_TRUE(mEnumerator->isHardware(&mIsHwModule).isOk());
+ }
+
+ virtual void TearDown() override {
+ // Attempt to close any active camera
+ for (auto&& cam : mActiveCameras) {
+ if (cam != nullptr) {
+ mEnumerator->closeCamera(cam);
+ }
+ }
+ mActiveCameras.clear();
+ }
+
+ protected:
+ void loadCameraList() {
+ // SetUp() must run first!
+ ASSERT_NE(mEnumerator, nullptr);
+
+ // Get the camera list
+ ASSERT_TRUE(mEnumerator->getCameraList(&mCameraInfo).isOk())
+ << "Failed to get a list of available cameras";
+ LOG(INFO) << "We have " << mCameraInfo.size() << " cameras.";
+ }
+
+ void loadUltrasonicsArrayList() {
+ // SetUp() must run first!
+ ASSERT_NE(mEnumerator, nullptr);
+
+ // Get the ultrasonics array list
+ ASSERT_TRUE(mEnumerator->getUltrasonicsArrayList(&mUltrasonicsArraysInfo).isOk())
+ << "Failed to get a list of available ultrasonics arrays";
+ LOG(INFO) << "We have " << mCameraInfo.size() << " ultrasonics arrays.";
+ }
+
+ bool isLogicalCamera(const camera_metadata_t* metadata) {
+ if (metadata == nullptr) {
+ // A logical camera device must have a valid camera metadata.
+ return false;
+ }
+
+ // Looking for LOGICAL_MULTI_CAMERA capability from metadata.
+ camera_metadata_ro_entry_t entry;
+ int rc = find_camera_metadata_ro_entry(metadata, ANDROID_REQUEST_AVAILABLE_CAPABILITIES,
+ &entry);
+ if (rc != 0) {
+ // No capabilities are found.
+ return false;
+ }
+
+ for (size_t i = 0; i < entry.count; ++i) {
+ uint8_t cap = entry.data.u8[i];
+ if (cap == ANDROID_REQUEST_AVAILABLE_CAPABILITIES_LOGICAL_MULTI_CAMERA) {
+ return true;
+ }
+ }
+
+ return false;
+ }
+
+ std::unordered_set<std::string> getPhysicalCameraIds(const std::string& id, bool& flag) {
+ std::unordered_set<std::string> physicalCameras;
+ const auto it = std::find_if(mCameraInfo.begin(), mCameraInfo.end(),
+ [&id](const CameraDesc& desc) { return id == desc.id; });
+ if (it == mCameraInfo.end()) {
+ // Unknown camera is requested. Return an empty list.
+ return physicalCameras;
+ }
+
+ const camera_metadata_t* metadata = reinterpret_cast<camera_metadata_t*>(&it->metadata[0]);
+ flag = isLogicalCamera(metadata);
+ if (!flag) {
+ // EVS assumes that the device w/o a valid metadata is a physical
+ // device.
+ LOG(INFO) << id << " is not a logical camera device.";
+ physicalCameras.insert(id);
+ return physicalCameras;
+ }
+
+ // Look for physical camera identifiers
+ camera_metadata_ro_entry entry;
+ int rc = find_camera_metadata_ro_entry(metadata, ANDROID_LOGICAL_MULTI_CAMERA_PHYSICAL_IDS,
+ &entry);
+ if (rc != 0) {
+ LOG(ERROR) << "No physical camera ID is found for a logical camera device";
+ }
+
+ const uint8_t* ids = entry.data.u8;
+ size_t start = 0;
+ for (size_t i = 0; i < entry.count; ++i) {
+ if (ids[i] == '\0') {
+ if (start != i) {
+ std::string id(reinterpret_cast<const char*>(ids + start));
+ physicalCameras.insert(id);
+ }
+ start = i + 1;
+ }
+ }
+
+ LOG(INFO) << id << " consists of " << physicalCameras.size() << " physical camera devices";
+ return physicalCameras;
+ }
+
+ Stream getFirstStreamConfiguration(camera_metadata_t* metadata) {
+ Stream targetCfg = {};
+ camera_metadata_entry_t streamCfgs;
+ if (!find_camera_metadata_entry(metadata, ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS,
+ &streamCfgs)) {
+ // Stream configurations are found in metadata
+ RawStreamConfig* ptr = reinterpret_cast<RawStreamConfig*>(streamCfgs.data.i32);
+ for (unsigned offset = 0; offset < streamCfgs.count; offset += kStreamCfgSz) {
+ if (ptr->direction == ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT &&
+ ptr->format == HAL_PIXEL_FORMAT_RGBA_8888) {
+ targetCfg.width = ptr->width;
+ targetCfg.height = ptr->height;
+ targetCfg.format = static_cast<PixelFormat>(ptr->format);
+ break;
+ }
+ ++ptr;
+ }
+ }
+
+ return targetCfg;
+ }
+
+ // Every test needs access to the service
+ std::shared_ptr<IEvsEnumerator> mEnumerator;
+ // Empty unless/util loadCameraList() is called
+ std::vector<CameraDesc> mCameraInfo;
+ // boolean to tell current module under testing is HW module implementation
+ // or not
+ bool mIsHwModule;
+ // A list of active camera handles that are need to be cleaned up
+ std::deque<std::shared_ptr<IEvsCamera>> mActiveCameras;
+ // Empty unless/util loadUltrasonicsArrayList() is called
+ std::vector<UltrasonicsArrayDesc> mUltrasonicsArraysInfo;
+ // A list of active ultrasonics array handles that are to be cleaned up
+ std::deque<std::weak_ptr<IEvsUltrasonicsArray>> mActiveUltrasonicsArrays;
+};
+
+// Test cases, their implementations, and corresponding requirements are
+// documented at go/aae-evs-public-api-test.
+
+/*
+ * CameraOpenClean:
+ * Opens each camera reported by the enumerator and then explicitly closes it via a
+ * call to closeCamera. Then repeats the test to ensure all cameras can be reopened.
+ */
+TEST_P(EvsAidlTest, CameraOpenClean) {
+ LOG(INFO) << "Starting CameraOpenClean test";
+
+ // Get the camera list
+ loadCameraList();
+
+ // Open and close each camera twice
+ for (auto&& cam : mCameraInfo) {
+ bool isLogicalCam = false;
+ auto devices = getPhysicalCameraIds(cam.id, isLogicalCam);
+ if (mIsHwModule && isLogicalCam) {
+ LOG(INFO) << "Skip a logical device, " << cam.id << " for HW target.";
+ continue;
+ }
+
+ // Read a target resolution from the metadata
+ Stream targetCfg = getFirstStreamConfiguration(
+ reinterpret_cast<camera_metadata_t*>(cam.metadata.data()));
+ ASSERT_GT(targetCfg.width, 0);
+ ASSERT_GT(targetCfg.height, 0);
+
+ for (int pass = 0; pass < 2; pass++) {
+ std::shared_ptr<IEvsCamera> pCam;
+ ASSERT_TRUE(mEnumerator->openCamera(cam.id, targetCfg, &pCam).isOk());
+ ASSERT_NE(pCam, nullptr);
+
+ CameraDesc cameraInfo;
+ for (auto&& devName : devices) {
+ ASSERT_TRUE(pCam->getPhysicalCameraInfo(devName, &cameraInfo).isOk());
+ EXPECT_EQ(devName, cameraInfo.id);
+ }
+
+ // Store a camera handle for a clean-up
+ mActiveCameras.push_back(pCam);
+
+ // Verify that this camera self-identifies correctly
+ ASSERT_TRUE(pCam->getCameraInfo(&cameraInfo).isOk());
+ EXPECT_EQ(cam.id, cameraInfo.id);
+
+ // Verify methods for extended info
+ const auto id = 0xFFFFFFFF; // meaningless id
+ std::vector<uint8_t> values;
+ auto status = pCam->setExtendedInfo(id, values);
+ if (isLogicalCam) {
+ EXPECT_TRUE(!status.isOk() && status.getServiceSpecificError() ==
+ static_cast<int>(EvsResult::NOT_SUPPORTED));
+ } else {
+ EXPECT_TRUE(status.isOk());
+ }
+
+ status = pCam->getExtendedInfo(id, &values);
+ if (isLogicalCam) {
+ EXPECT_TRUE(!status.isOk() && status.getServiceSpecificError() ==
+ static_cast<int>(EvsResult::NOT_SUPPORTED));
+ } else {
+ EXPECT_TRUE(status.isOk());
+ }
+
+ // Explicitly close the camera so resources are released right away
+ ASSERT_TRUE(mEnumerator->closeCamera(pCam).isOk());
+ mActiveCameras.clear();
+ }
+ }
+}
+
+/*
+ * CameraOpenAggressive:
+ * Opens each camera reported by the enumerator twice in a row without an intervening closeCamera
+ * call. This ensures that the intended "aggressive open" behavior works. This is necessary for
+ * the system to be tolerant of shutdown/restart race conditions.
+ */
+TEST_P(EvsAidlTest, CameraOpenAggressive) {
+ LOG(INFO) << "Starting CameraOpenAggressive test";
+
+ // Get the camera list
+ loadCameraList();
+
+ // Open and close each camera twice
+ for (auto&& cam : mCameraInfo) {
+ bool isLogicalCam = false;
+ getPhysicalCameraIds(cam.id, isLogicalCam);
+ if (mIsHwModule && isLogicalCam) {
+ LOG(INFO) << "Skip a logical device, " << cam.id << " for HW target.";
+ continue;
+ }
+
+ // Read a target resolution from the metadata
+ Stream targetCfg = getFirstStreamConfiguration(
+ reinterpret_cast<camera_metadata_t*>(cam.metadata.data()));
+ ASSERT_GT(targetCfg.width, 0);
+ ASSERT_GT(targetCfg.height, 0);
+
+ mActiveCameras.clear();
+ std::shared_ptr<IEvsCamera> pCam;
+ ASSERT_TRUE(mEnumerator->openCamera(cam.id, targetCfg, &pCam).isOk());
+ EXPECT_NE(pCam, nullptr);
+
+ // Store a camera handle for a clean-up
+ mActiveCameras.push_back(pCam);
+
+ // Verify that this camera self-identifies correctly
+ CameraDesc cameraInfo;
+ ASSERT_TRUE(pCam->getCameraInfo(&cameraInfo).isOk());
+ EXPECT_EQ(cam.id, cameraInfo.id);
+
+ std::shared_ptr<IEvsCamera> pCam2;
+ ASSERT_TRUE(mEnumerator->openCamera(cam.id, targetCfg, &pCam2).isOk());
+ EXPECT_NE(pCam2, nullptr);
+ EXPECT_NE(pCam, pCam2);
+
+ // Store a camera handle for a clean-up
+ mActiveCameras.push_back(pCam2);
+
+ auto status = pCam->setMaxFramesInFlight(2);
+ if (mIsHwModule) {
+ // Verify that the old camera rejects calls via HW module.
+ EXPECT_TRUE(!status.isOk() && status.getServiceSpecificError() ==
+ static_cast<int>(EvsResult::OWNERSHIP_LOST));
+ } else {
+ // default implementation supports multiple clients.
+ EXPECT_TRUE(status.isOk());
+ }
+
+ // Close the superseded camera
+ ASSERT_TRUE(mEnumerator->closeCamera(pCam).isOk());
+ mActiveCameras.pop_front();
+
+ // Verify that the second camera instance self-identifies correctly
+ ASSERT_TRUE(pCam2->getCameraInfo(&cameraInfo).isOk());
+ EXPECT_EQ(cam.id, cameraInfo.id);
+
+ // Close the second camera instance
+ ASSERT_TRUE(mEnumerator->closeCamera(pCam2).isOk());
+ mActiveCameras.pop_front();
+ }
+
+ // Sleep here to ensure the destructor cleanup has time to run so we don't break follow on tests
+ sleep(1); // I hate that this is an arbitrary time to wait. :( b/36122635
+}
+
+/*
+ * CameraStreamPerformance:
+ * Measure and qualify the stream start up time and streaming frame rate of each reported camera
+ */
+TEST_P(EvsAidlTest, CameraStreamPerformance) {
+ LOG(INFO) << "Starting CameraStreamPerformance test";
+
+ // Get the camera list
+ loadCameraList();
+
+ // Test each reported camera
+ for (auto&& cam : mCameraInfo) {
+ bool isLogicalCam = false;
+ auto devices = getPhysicalCameraIds(cam.id, isLogicalCam);
+ if (mIsHwModule && isLogicalCam) {
+ LOG(INFO) << "Skip a logical device " << cam.id;
+ continue;
+ }
+
+ // Read a target resolution from the metadata
+ Stream targetCfg = getFirstStreamConfiguration(
+ reinterpret_cast<camera_metadata_t*>(cam.metadata.data()));
+ ASSERT_GT(targetCfg.width, 0);
+ ASSERT_GT(targetCfg.height, 0);
+
+ std::shared_ptr<IEvsCamera> pCam;
+ ASSERT_TRUE(mEnumerator->openCamera(cam.id, targetCfg, &pCam).isOk());
+ EXPECT_NE(pCam, nullptr);
+
+ // Store a camera handle for a clean-up
+ mActiveCameras.push_back(pCam);
+
+ // Set up a frame receiver object which will fire up its own thread
+ std::shared_ptr<FrameHandler> frameHandler =
+ std::make_shared<FrameHandler>(pCam, cam, nullptr, FrameHandler::eAutoReturn);
+ EXPECT_NE(frameHandler, nullptr);
+
+ // Start the camera's video stream
+ nsecs_t start = systemTime(SYSTEM_TIME_MONOTONIC);
+ ASSERT_TRUE(frameHandler->startStream());
+
+ // Ensure the first frame arrived within the expected time
+ frameHandler->waitForFrameCount(1);
+ nsecs_t firstFrame = systemTime(SYSTEM_TIME_MONOTONIC);
+ nsecs_t timeToFirstFrame = systemTime(SYSTEM_TIME_MONOTONIC) - start;
+
+ // Extra delays are expected when we attempt to start a video stream on
+ // the logical camera device. The amount of delay is expected the
+ // number of physical camera devices multiplied by
+ // kMaxStreamStartMilliseconds at most.
+ EXPECT_LE(nanoseconds_to_milliseconds(timeToFirstFrame),
+ kMaxStreamStartMilliseconds * devices.size());
+ printf("%s: Measured time to first frame %0.2f ms\n", cam.id.data(),
+ timeToFirstFrame * kNanoToMilliseconds);
+ LOG(INFO) << cam.id << ": Measured time to first frame " << std::scientific
+ << timeToFirstFrame * kNanoToMilliseconds << " ms.";
+
+ // Check aspect ratio
+ unsigned width = 0, height = 0;
+ frameHandler->getFrameDimension(&width, &height);
+ EXPECT_GE(width, height);
+
+ // Wait a bit, then ensure we get at least the required minimum number of frames
+ sleep(5);
+ nsecs_t end = systemTime(SYSTEM_TIME_MONOTONIC);
+
+ // Even when the camera pointer goes out of scope, the FrameHandler object will
+ // keep the stream alive unless we tell it to shutdown.
+ // Also note that the FrameHandle and the Camera have a mutual circular reference, so
+ // we have to break that cycle in order for either of them to get cleaned up.
+ frameHandler->shutdown();
+
+ unsigned framesReceived = 0;
+ frameHandler->getFramesCounters(&framesReceived, nullptr);
+ framesReceived = framesReceived - 1; // Back out the first frame we already waited for
+ nsecs_t runTime = end - firstFrame;
+ float framesPerSecond = framesReceived / (runTime * kNanoToSeconds);
+ printf("Measured camera rate %3.2f fps\n", framesPerSecond);
+ LOG(INFO) << "Measured camera rate " << std::scientific << framesPerSecond << " fps.";
+ EXPECT_GE(framesPerSecond, kMinimumFramesPerSecond);
+
+ // Explicitly release the camera
+ ASSERT_TRUE(mEnumerator->closeCamera(pCam).isOk());
+ mActiveCameras.clear();
+ }
+}
+
+/*
+ * CameraStreamBuffering:
+ * Ensure the camera implementation behaves properly when the client holds onto buffers for more
+ * than one frame time. The camera must cleanly skip frames until the client is ready again.
+ */
+TEST_P(EvsAidlTest, CameraStreamBuffering) {
+ LOG(INFO) << "Starting CameraStreamBuffering test";
+
+ // Arbitrary constant (should be > 1 and not too big)
+ static const unsigned int kBuffersToHold = 6;
+
+ // Get the camera list
+ loadCameraList();
+
+ // Test each reported camera
+ for (auto&& cam : mCameraInfo) {
+ bool isLogicalCam = false;
+ getPhysicalCameraIds(cam.id, isLogicalCam);
+ if (mIsHwModule && isLogicalCam) {
+ LOG(INFO) << "Skip a logical device " << cam.id << " for HW target.";
+ continue;
+ }
+
+ // Read a target resolution from the metadata
+ Stream targetCfg = getFirstStreamConfiguration(
+ reinterpret_cast<camera_metadata_t*>(cam.metadata.data()));
+ ASSERT_GT(targetCfg.width, 0);
+ ASSERT_GT(targetCfg.height, 0);
+
+ std::shared_ptr<IEvsCamera> pCam;
+ ASSERT_TRUE(mEnumerator->openCamera(cam.id, targetCfg, &pCam).isOk());
+ EXPECT_NE(pCam, nullptr);
+
+ // Store a camera handle for a clean-up
+ mActiveCameras.push_back(pCam);
+
+ // Ask for a very large number of buffers in flight to ensure it errors correctly
+ auto badResult = pCam->setMaxFramesInFlight(0xFFFFFFFF);
+ EXPECT_TRUE(!badResult.isOk() && badResult.getServiceSpecificError() ==
+ static_cast<int>(EvsResult::BUFFER_NOT_AVAILABLE));
+
+ // Now ask for exactly two buffers in flight as we'll test behavior in that case
+ ASSERT_TRUE(pCam->setMaxFramesInFlight(kBuffersToHold).isOk());
+
+ // Set up a frame receiver object which will fire up its own thread.
+ std::shared_ptr<FrameHandler> frameHandler =
+ std::make_shared<FrameHandler>(pCam, cam, nullptr, FrameHandler::eNoAutoReturn);
+ EXPECT_NE(frameHandler, nullptr);
+
+ // Start the camera's video stream
+ ASSERT_TRUE(frameHandler->startStream());
+
+ // Check that the video stream stalls once we've gotten exactly the number of buffers
+ // we requested since we told the frameHandler not to return them.
+ sleep(1); // 1 second should be enough for at least 5 frames to be delivered worst case
+ unsigned framesReceived = 0;
+ frameHandler->getFramesCounters(&framesReceived, nullptr);
+ ASSERT_EQ(kBuffersToHold, framesReceived) << "Stream didn't stall at expected buffer limit";
+
+ // Give back one buffer
+ ASSERT_TRUE(frameHandler->returnHeldBuffer());
+
+ // Once we return a buffer, it shouldn't take more than 1/10 second to get a new one
+ // filled since we require 10fps minimum -- but give a 10% allowance just in case.
+ usleep(110 * kMillisecondsToMicroseconds);
+ frameHandler->getFramesCounters(&framesReceived, nullptr);
+ EXPECT_EQ(kBuffersToHold + 1, framesReceived) << "Stream should've resumed";
+
+ // Even when the camera pointer goes out of scope, the FrameHandler object will
+ // keep the stream alive unless we tell it to shutdown.
+ // Also note that the FrameHandle and the Camera have a mutual circular reference, so
+ // we have to break that cycle in order for either of them to get cleaned up.
+ frameHandler->shutdown();
+
+ // Explicitly release the camera
+ ASSERT_TRUE(mEnumerator->closeCamera(pCam).isOk());
+ mActiveCameras.clear();
+ }
+}
+
+/*
+ * CameraToDisplayRoundTrip:
+ * End to end test of data flowing from the camera to the display. Each delivered frame of camera
+ * imagery is simply copied to the display buffer and presented on screen. This is the one test
+ * which a human could observe to see the operation of the system on the physical display.
+ */
+TEST_P(EvsAidlTest, CameraToDisplayRoundTrip) {
+ LOG(INFO) << "Starting CameraToDisplayRoundTrip test";
+
+ // Get the camera list
+ loadCameraList();
+
+ // Request available display IDs
+ uint8_t targetDisplayId = 0;
+ std::vector<uint8_t> displayIds;
+ ASSERT_TRUE(mEnumerator->getDisplayIdList(&displayIds).isOk());
+ EXPECT_GT(displayIds.size(), 0);
+ targetDisplayId = displayIds[0];
+
+ // Request exclusive access to the first EVS display
+ std::shared_ptr<IEvsDisplay> pDisplay;
+ ASSERT_TRUE(mEnumerator->openDisplay(targetDisplayId, &pDisplay).isOk());
+ EXPECT_NE(pDisplay, nullptr);
+ LOG(INFO) << "Display " << targetDisplayId << " is in use.";
+
+ // Get the display descriptor
+ DisplayDesc displayDesc;
+ ASSERT_TRUE(pDisplay->getDisplayInfo(&displayDesc).isOk());
+ LOG(INFO) << " Resolution: " << displayDesc.width << "x" << displayDesc.height;
+ ASSERT_GT(displayDesc.width, 0);
+ ASSERT_GT(displayDesc.height, 0);
+
+ // Test each reported camera
+ for (auto&& cam : mCameraInfo) {
+ bool isLogicalCam = false;
+ getPhysicalCameraIds(cam.id, isLogicalCam);
+ if (mIsHwModule && isLogicalCam) {
+ LOG(INFO) << "Skip a logical device " << cam.id << " for HW target.";
+ continue;
+ }
+
+ // Read a target resolution from the metadata
+ Stream targetCfg = getFirstStreamConfiguration(
+ reinterpret_cast<camera_metadata_t*>(cam.metadata.data()));
+ ASSERT_GT(targetCfg.width, 0);
+ ASSERT_GT(targetCfg.height, 0);
+
+ std::shared_ptr<IEvsCamera> pCam;
+ ASSERT_TRUE(mEnumerator->openCamera(cam.id, targetCfg, &pCam).isOk());
+ EXPECT_NE(pCam, nullptr);
+
+ // Store a camera handle for a clean-up
+ mActiveCameras.push_back(pCam);
+
+ // Set up a frame receiver object which will fire up its own thread.
+ std::shared_ptr<FrameHandler> frameHandler =
+ std::make_shared<FrameHandler>(pCam, cam, pDisplay, FrameHandler::eAutoReturn);
+ EXPECT_NE(frameHandler, nullptr);
+
+ // Activate the display
+ ASSERT_TRUE(pDisplay->setDisplayState(DisplayState::VISIBLE_ON_NEXT_FRAME).isOk());
+
+ // Start the camera's video stream
+ ASSERT_TRUE(frameHandler->startStream());
+
+ // Wait a while to let the data flow
+ static const int kSecondsToWait = 5;
+ const int streamTimeMs =
+ kSecondsToWait * kSecondsToMilliseconds - kMaxStreamStartMilliseconds;
+ const unsigned minimumFramesExpected =
+ streamTimeMs * kMinimumFramesPerSecond / kSecondsToMilliseconds;
+ sleep(kSecondsToWait);
+ unsigned framesReceived = 0;
+ unsigned framesDisplayed = 0;
+ frameHandler->getFramesCounters(&framesReceived, &framesDisplayed);
+ EXPECT_EQ(framesReceived, framesDisplayed);
+ EXPECT_GE(framesDisplayed, minimumFramesExpected);
+
+ // Turn off the display (yes, before the stream stops -- it should be handled)
+ ASSERT_TRUE(pDisplay->setDisplayState(DisplayState::NOT_VISIBLE).isOk());
+
+ // Shut down the streamer
+ frameHandler->shutdown();
+
+ // Explicitly release the camera
+ ASSERT_TRUE(mEnumerator->closeCamera(pCam).isOk());
+ mActiveCameras.clear();
+ }
+
+ // Explicitly release the display
+ ASSERT_TRUE(mEnumerator->closeDisplay(pDisplay).isOk());
+}
+
+/*
+ * MultiCameraStream:
+ * Verify that each client can start and stop video streams on the same
+ * underlying camera.
+ */
+TEST_P(EvsAidlTest, MultiCameraStream) {
+ LOG(INFO) << "Starting MultiCameraStream test";
+
+ if (mIsHwModule) {
+ // This test is not for HW module implementation.
+ return;
+ }
+
+ // Get the camera list
+ loadCameraList();
+
+ // Test each reported camera
+ for (auto&& cam : mCameraInfo) {
+ // Read a target resolution from the metadata
+ Stream targetCfg = getFirstStreamConfiguration(
+ reinterpret_cast<camera_metadata_t*>(cam.metadata.data()));
+ ASSERT_GT(targetCfg.width, 0);
+ ASSERT_GT(targetCfg.height, 0);
+
+ // Create two camera clients.
+ std::shared_ptr<IEvsCamera> pCam0;
+ ASSERT_TRUE(mEnumerator->openCamera(cam.id, targetCfg, &pCam0).isOk());
+ EXPECT_NE(pCam0, nullptr);
+
+ // Store a camera handle for a clean-up
+ mActiveCameras.push_back(pCam0);
+
+ std::shared_ptr<IEvsCamera> pCam1;
+ ASSERT_TRUE(mEnumerator->openCamera(cam.id, targetCfg, &pCam1).isOk());
+ EXPECT_NE(pCam1, nullptr);
+
+ // Store a camera handle for a clean-up
+ mActiveCameras.push_back(pCam1);
+
+ // Set up per-client frame receiver objects which will fire up its own thread
+ std::shared_ptr<FrameHandler> frameHandler0 =
+ std::make_shared<FrameHandler>(pCam0, cam, nullptr, FrameHandler::eAutoReturn);
+ std::shared_ptr<FrameHandler> frameHandler1 =
+ std::make_shared<FrameHandler>(pCam1, cam, nullptr, FrameHandler::eAutoReturn);
+ EXPECT_NE(frameHandler0, nullptr);
+ EXPECT_NE(frameHandler1, nullptr);
+
+ // Start the camera's video stream via client 0
+ ASSERT_TRUE(frameHandler0->startStream());
+ ASSERT_TRUE(frameHandler1->startStream());
+
+ // Ensure the stream starts
+ frameHandler0->waitForFrameCount(1);
+ frameHandler1->waitForFrameCount(1);
+
+ nsecs_t firstFrame = systemTime(SYSTEM_TIME_MONOTONIC);
+
+ // Wait a bit, then ensure both clients get at least the required minimum number of frames
+ sleep(5);
+ nsecs_t end = systemTime(SYSTEM_TIME_MONOTONIC);
+ unsigned framesReceived0 = 0, framesReceived1 = 0;
+ frameHandler0->getFramesCounters(&framesReceived0, nullptr);
+ frameHandler1->getFramesCounters(&framesReceived1, nullptr);
+ framesReceived0 = framesReceived0 - 1; // Back out the first frame we already waited for
+ framesReceived1 = framesReceived1 - 1; // Back out the first frame we already waited for
+ nsecs_t runTime = end - firstFrame;
+ float framesPerSecond0 = framesReceived0 / (runTime * kNanoToSeconds);
+ float framesPerSecond1 = framesReceived1 / (runTime * kNanoToSeconds);
+ LOG(INFO) << "Measured camera rate " << std::scientific << framesPerSecond0 << " fps and "
+ << framesPerSecond1 << " fps";
+ EXPECT_GE(framesPerSecond0, kMinimumFramesPerSecond);
+ EXPECT_GE(framesPerSecond1, kMinimumFramesPerSecond);
+
+ // Shutdown one client
+ frameHandler0->shutdown();
+
+ // Read frame counters again
+ frameHandler0->getFramesCounters(&framesReceived0, nullptr);
+ frameHandler1->getFramesCounters(&framesReceived1, nullptr);
+
+ // Wait a bit again
+ sleep(5);
+ unsigned framesReceivedAfterStop0 = 0, framesReceivedAfterStop1 = 0;
+ frameHandler0->getFramesCounters(&framesReceivedAfterStop0, nullptr);
+ frameHandler1->getFramesCounters(&framesReceivedAfterStop1, nullptr);
+ EXPECT_EQ(framesReceived0, framesReceivedAfterStop0);
+ EXPECT_LT(framesReceived1, framesReceivedAfterStop1);
+
+ // Shutdown another
+ frameHandler1->shutdown();
+
+ // Explicitly release the camera
+ ASSERT_TRUE(mEnumerator->closeCamera(pCam0).isOk());
+ ASSERT_TRUE(mEnumerator->closeCamera(pCam1).isOk());
+ mActiveCameras.clear();
+
+ // TODO(b/145459970, b/145457727): below sleep() is added to ensure the
+ // destruction of active camera objects; this may be related with two
+ // issues.
+ sleep(1);
+ }
+}
+
+/*
+ * CameraParameter:
+ * Verify that a client can adjust a camera parameter.
+ */
+TEST_P(EvsAidlTest, CameraParameter) {
+ LOG(INFO) << "Starting CameraParameter test";
+
+ // Get the camera list
+ loadCameraList();
+
+ // Test each reported camera
+ for (auto&& cam : mCameraInfo) {
+ bool isLogicalCam = false;
+ getPhysicalCameraIds(cam.id, isLogicalCam);
+ if (isLogicalCam) {
+ // TODO(b/145465724): Support camera parameter programming on
+ // logical devices.
+ LOG(INFO) << "Skip a logical device " << cam.id;
+ continue;
+ }
+
+ // Read a target resolution from the metadata
+ Stream targetCfg = getFirstStreamConfiguration(
+ reinterpret_cast<camera_metadata_t*>(cam.metadata.data()));
+ ASSERT_GT(targetCfg.width, 0);
+ ASSERT_GT(targetCfg.height, 0);
+
+ // Create a camera client
+ std::shared_ptr<IEvsCamera> pCam;
+ ASSERT_TRUE(mEnumerator->openCamera(cam.id, targetCfg, &pCam).isOk());
+ EXPECT_NE(pCam, nullptr);
+
+ // Store a camera
+ mActiveCameras.push_back(pCam);
+
+ // Get the parameter list
+ std::vector<CameraParam> cmds;
+ ASSERT_TRUE(pCam->getParameterList(&cmds).isOk());
+ if (cmds.size() < 1) {
+ continue;
+ }
+
+ // Set up per-client frame receiver objects which will fire up its own thread
+ std::shared_ptr<FrameHandler> frameHandler =
+ std::make_shared<FrameHandler>(pCam, cam, nullptr, FrameHandler::eAutoReturn);
+ EXPECT_NE(frameHandler, nullptr);
+
+ // Start the camera's video stream
+ ASSERT_TRUE(frameHandler->startStream());
+
+ // Ensure the stream starts
+ frameHandler->waitForFrameCount(1);
+
+ // Set current client is the primary client
+ ASSERT_TRUE(pCam->setPrimaryClient().isOk());
+ for (auto& cmd : cmds) {
+ // Get a valid parameter value range
+ ParameterRange range;
+ ASSERT_TRUE(pCam->getIntParameterRange(cmd, &range).isOk());
+
+ std::vector<int32_t> values;
+ if (cmd == CameraParam::ABSOLUTE_FOCUS) {
+ // Try to turn off auto-focus
+ ASSERT_TRUE(pCam->setIntParameter(CameraParam::AUTO_FOCUS, 0, &values).isOk());
+ for (auto&& v : values) {
+ EXPECT_EQ(v, 0);
+ }
+ }
+
+ // Try to program a parameter with a random value [minVal, maxVal]
+ int32_t val0 = range.min + (std::rand() % (range.max - range.min));
+
+ // Rounding down
+ val0 = val0 - (val0 % range.step);
+ values.clear();
+ ASSERT_TRUE(pCam->setIntParameter(cmd, val0, &values).isOk());
+
+ values.clear();
+ ASSERT_TRUE(pCam->getIntParameter(cmd, &values).isOk());
+ for (auto&& v : values) {
+ EXPECT_EQ(val0, v) << "Values are not matched.";
+ }
+ }
+ ASSERT_TRUE(pCam->unsetPrimaryClient().isOk());
+
+ // Shutdown
+ frameHandler->shutdown();
+
+ // Explicitly release the camera
+ ASSERT_TRUE(mEnumerator->closeCamera(pCam).isOk());
+ mActiveCameras.clear();
+ }
+}
+
+/*
+ * CameraPrimaryClientRelease
+ * Verify that non-primary client gets notified when the primary client either
+ * terminates or releases a role.
+ */
+TEST_P(EvsAidlTest, CameraPrimaryClientRelease) {
+ LOG(INFO) << "Starting CameraPrimaryClientRelease test";
+
+ if (mIsHwModule) {
+ // This test is not for HW module implementation.
+ return;
+ }
+
+ // Get the camera list
+ loadCameraList();
+
+ // Test each reported camera
+ for (auto&& cam : mCameraInfo) {
+ bool isLogicalCam = false;
+ getPhysicalCameraIds(cam.id, isLogicalCam);
+ if (isLogicalCam) {
+ // TODO(b/145465724): Support camera parameter programming on
+ // logical devices.
+ LOG(INFO) << "Skip a logical device " << cam.id;
+ continue;
+ }
+
+ // Read a target resolution from the metadata
+ Stream targetCfg = getFirstStreamConfiguration(
+ reinterpret_cast<camera_metadata_t*>(cam.metadata.data()));
+ ASSERT_GT(targetCfg.width, 0);
+ ASSERT_GT(targetCfg.height, 0);
+
+ // Create two camera clients.
+ std::shared_ptr<IEvsCamera> pPrimaryCam;
+ ASSERT_TRUE(mEnumerator->openCamera(cam.id, targetCfg, &pPrimaryCam).isOk());
+ EXPECT_NE(pPrimaryCam, nullptr);
+
+ // Store a camera handle for a clean-up
+ mActiveCameras.push_back(pPrimaryCam);
+
+ std::shared_ptr<IEvsCamera> pSecondaryCam;
+ ASSERT_TRUE(mEnumerator->openCamera(cam.id, targetCfg, &pSecondaryCam).isOk());
+ EXPECT_NE(pSecondaryCam, nullptr);
+
+ // Store a camera handle for a clean-up
+ mActiveCameras.push_back(pSecondaryCam);
+
+ // Set up per-client frame receiver objects which will fire up its own thread
+ std::shared_ptr<FrameHandler> frameHandlerPrimary = std::make_shared<FrameHandler>(
+ pPrimaryCam, cam, nullptr, FrameHandler::eAutoReturn);
+ std::shared_ptr<FrameHandler> frameHandlerSecondary = std::make_shared<FrameHandler>(
+ pSecondaryCam, cam, nullptr, FrameHandler::eAutoReturn);
+ EXPECT_NE(frameHandlerPrimary, nullptr);
+ EXPECT_NE(frameHandlerSecondary, nullptr);
+
+ // Set one client as the primary client
+ ASSERT_TRUE(pPrimaryCam->setPrimaryClient().isOk());
+
+ // Try to set another client as the primary client.
+ ASSERT_FALSE(pSecondaryCam->setPrimaryClient().isOk());
+
+ // Start the camera's video stream via a primary client client.
+ ASSERT_TRUE(frameHandlerPrimary->startStream());
+
+ // Ensure the stream starts
+ frameHandlerPrimary->waitForFrameCount(1);
+
+ // Start the camera's video stream via another client
+ ASSERT_TRUE(frameHandlerSecondary->startStream());
+
+ // Ensure the stream starts
+ frameHandlerSecondary->waitForFrameCount(1);
+
+ // Non-primary client expects to receive a primary client role relesed
+ // notification.
+ EvsEventDesc aTargetEvent = {};
+ EvsEventDesc aNotification = {};
+
+ bool listening = false;
+ std::mutex eventLock;
+ std::condition_variable eventCond;
+ std::thread listener =
+ std::thread([&aNotification, &frameHandlerSecondary, &listening, &eventCond]() {
+ // Notify that a listening thread is running.
+ listening = true;
+ eventCond.notify_all();
+
+ EvsEventDesc aTargetEvent;
+ aTargetEvent.aType = EvsEventType::MASTER_RELEASED;
+ if (!frameHandlerSecondary->waitForEvent(aTargetEvent, aNotification, true)) {
+ LOG(WARNING) << "A timer is expired before a target event is fired.";
+ }
+ });
+
+ // Wait until a listening thread starts.
+ std::unique_lock<std::mutex> lock(eventLock);
+ auto timer = std::chrono::system_clock::now();
+ while (!listening) {
+ timer += 1s;
+ eventCond.wait_until(lock, timer);
+ }
+ lock.unlock();
+
+ // Release a primary client role.
+ ASSERT_TRUE(pPrimaryCam->unsetPrimaryClient().isOk());
+
+ // Join a listening thread.
+ if (listener.joinable()) {
+ listener.join();
+ }
+
+ // Verify change notifications.
+ ASSERT_EQ(EvsEventType::MASTER_RELEASED, static_cast<EvsEventType>(aNotification.aType));
+
+ // Non-primary becomes a primary client.
+ ASSERT_TRUE(pSecondaryCam->setPrimaryClient().isOk());
+
+ // Previous primary client fails to become a primary client.
+ ASSERT_FALSE(pPrimaryCam->setPrimaryClient().isOk());
+
+ listening = false;
+ listener = std::thread([&aNotification, &frameHandlerPrimary, &listening, &eventCond]() {
+ // Notify that a listening thread is running.
+ listening = true;
+ eventCond.notify_all();
+
+ EvsEventDesc aTargetEvent;
+ aTargetEvent.aType = EvsEventType::MASTER_RELEASED;
+ if (!frameHandlerPrimary->waitForEvent(aTargetEvent, aNotification, true)) {
+ LOG(WARNING) << "A timer is expired before a target event is fired.";
+ }
+ });
+
+ // Wait until a listening thread starts.
+ timer = std::chrono::system_clock::now();
+ lock.lock();
+ while (!listening) {
+ eventCond.wait_until(lock, timer + 1s);
+ }
+ lock.unlock();
+
+ // Closing current primary client.
+ frameHandlerSecondary->shutdown();
+
+ // Join a listening thread.
+ if (listener.joinable()) {
+ listener.join();
+ }
+
+ // Verify change notifications.
+ ASSERT_EQ(EvsEventType::MASTER_RELEASED, static_cast<EvsEventType>(aNotification.aType));
+
+ // Closing streams.
+ frameHandlerPrimary->shutdown();
+
+ // Explicitly release the camera
+ ASSERT_TRUE(mEnumerator->closeCamera(pPrimaryCam).isOk());
+ ASSERT_TRUE(mEnumerator->closeCamera(pSecondaryCam).isOk());
+ mActiveCameras.clear();
+ }
+}
+
+/*
+ * MultiCameraParameter:
+ * Verify that primary and non-primary clients behave as expected when they try to adjust
+ * camera parameters.
+ */
+TEST_P(EvsAidlTest, MultiCameraParameter) {
+ LOG(INFO) << "Starting MultiCameraParameter test";
+
+ if (mIsHwModule) {
+ // This test is not for HW module implementation.
+ return;
+ }
+
+ // Get the camera list
+ loadCameraList();
+
+ // Test each reported camera
+ for (auto&& cam : mCameraInfo) {
+ bool isLogicalCam = false;
+ getPhysicalCameraIds(cam.id, isLogicalCam);
+ if (isLogicalCam) {
+ // TODO(b/145465724): Support camera parameter programming on
+ // logical devices.
+ LOG(INFO) << "Skip a logical device " << cam.id;
+ continue;
+ }
+
+ // Read a target resolution from the metadata
+ Stream targetCfg = getFirstStreamConfiguration(
+ reinterpret_cast<camera_metadata_t*>(cam.metadata.data()));
+ ASSERT_GT(targetCfg.width, 0);
+ ASSERT_GT(targetCfg.height, 0);
+
+ // Create two camera clients.
+ std::shared_ptr<IEvsCamera> pPrimaryCam;
+ ASSERT_TRUE(mEnumerator->openCamera(cam.id, targetCfg, &pPrimaryCam).isOk());
+ EXPECT_NE(pPrimaryCam, nullptr);
+
+ // Store a camera handle for a clean-up
+ mActiveCameras.push_back(pPrimaryCam);
+
+ std::shared_ptr<IEvsCamera> pSecondaryCam;
+ ASSERT_TRUE(mEnumerator->openCamera(cam.id, targetCfg, &pSecondaryCam).isOk());
+ EXPECT_NE(pSecondaryCam, nullptr);
+
+ // Store a camera handle for a clean-up
+ mActiveCameras.push_back(pSecondaryCam);
+
+ // Get the parameter list
+ std::vector<CameraParam> camPrimaryCmds, camSecondaryCmds;
+ ASSERT_TRUE(pPrimaryCam->getParameterList(&camPrimaryCmds).isOk());
+ ASSERT_TRUE(pSecondaryCam->getParameterList(&camSecondaryCmds).isOk());
+ if (camPrimaryCmds.size() < 1 || camSecondaryCmds.size() < 1) {
+ // Skip a camera device if it does not support any parameter.
+ continue;
+ }
+
+ // Set up per-client frame receiver objects which will fire up its own thread
+ std::shared_ptr<FrameHandler> frameHandlerPrimary = std::make_shared<FrameHandler>(
+ pPrimaryCam, cam, nullptr, FrameHandler::eAutoReturn);
+ std::shared_ptr<FrameHandler> frameHandlerSecondary = std::make_shared<FrameHandler>(
+ pSecondaryCam, cam, nullptr, FrameHandler::eAutoReturn);
+ EXPECT_NE(frameHandlerPrimary, nullptr);
+ EXPECT_NE(frameHandlerSecondary, nullptr);
+
+ // Set one client as the primary client.
+ ASSERT_TRUE(pPrimaryCam->setPrimaryClient().isOk());
+
+ // Try to set another client as the primary client.
+ ASSERT_FALSE(pSecondaryCam->setPrimaryClient().isOk());
+
+ // Start the camera's video stream via a primary client client.
+ ASSERT_TRUE(frameHandlerPrimary->startStream());
+
+ // Ensure the stream starts
+ frameHandlerPrimary->waitForFrameCount(1);
+
+ // Start the camera's video stream via another client
+ ASSERT_TRUE(frameHandlerSecondary->startStream());
+
+ // Ensure the stream starts
+ frameHandlerSecondary->waitForFrameCount(1);
+
+ int32_t val0 = 0;
+ std::vector<int32_t> values;
+ EvsEventDesc aNotification0 = {};
+ EvsEventDesc aNotification1 = {};
+ for (auto& cmd : camPrimaryCmds) {
+ // Get a valid parameter value range
+ ParameterRange range;
+ ASSERT_TRUE(pPrimaryCam->getIntParameterRange(cmd, &range).isOk());
+ if (cmd == CameraParam::ABSOLUTE_FOCUS) {
+ // Try to turn off auto-focus
+ values.clear();
+ ASSERT_TRUE(
+ pPrimaryCam->setIntParameter(CameraParam::AUTO_FOCUS, 0, &values).isOk());
+ for (auto&& v : values) {
+ EXPECT_EQ(v, 0);
+ }
+ }
+
+ // Calculate a parameter value to program.
+ val0 = range.min + (std::rand() % (range.max - range.min));
+ val0 = val0 - (val0 % range.step);
+
+ // Prepare and start event listeners.
+ bool listening0 = false;
+ bool listening1 = false;
+ std::condition_variable eventCond;
+ std::thread listener0 = std::thread([cmd, val0, &aNotification0, &frameHandlerPrimary,
+ &listening0, &listening1, &eventCond]() {
+ listening0 = true;
+ if (listening1) {
+ eventCond.notify_all();
+ }
+
+ EvsEventDesc aTargetEvent;
+ aTargetEvent.aType = EvsEventType::PARAMETER_CHANGED;
+ aTargetEvent.payload[0] = static_cast<uint32_t>(cmd);
+ aTargetEvent.payload[1] = val0;
+ if (!frameHandlerPrimary->waitForEvent(aTargetEvent, aNotification0)) {
+ LOG(WARNING) << "A timer is expired before a target event is fired.";
+ }
+ });
+ std::thread listener1 = std::thread([cmd, val0, &aNotification1, &frameHandlerSecondary,
+ &listening0, &listening1, &eventCond]() {
+ listening1 = true;
+ if (listening0) {
+ eventCond.notify_all();
+ }
+
+ EvsEventDesc aTargetEvent;
+ aTargetEvent.aType = EvsEventType::PARAMETER_CHANGED;
+ aTargetEvent.payload[0] = static_cast<uint32_t>(cmd);
+ aTargetEvent.payload[1] = val0;
+ if (!frameHandlerSecondary->waitForEvent(aTargetEvent, aNotification1)) {
+ LOG(WARNING) << "A timer is expired before a target event is fired.";
+ }
+ });
+
+ // Wait until a listening thread starts.
+ std::mutex eventLock;
+ std::unique_lock<std::mutex> lock(eventLock);
+ auto timer = std::chrono::system_clock::now();
+ while (!listening0 || !listening1) {
+ eventCond.wait_until(lock, timer + 1s);
+ }
+ lock.unlock();
+
+ // Try to program a parameter
+ values.clear();
+ ASSERT_TRUE(pPrimaryCam->setIntParameter(cmd, val0, &values).isOk());
+ for (auto&& v : values) {
+ EXPECT_EQ(val0, v) << "Values are not matched.";
+ }
+
+ // Join a listening thread.
+ if (listener0.joinable()) {
+ listener0.join();
+ }
+ if (listener1.joinable()) {
+ listener1.join();
+ }
+
+ // Verify a change notification
+ ASSERT_EQ(EvsEventType::PARAMETER_CHANGED,
+ static_cast<EvsEventType>(aNotification0.aType));
+ ASSERT_EQ(EvsEventType::PARAMETER_CHANGED,
+ static_cast<EvsEventType>(aNotification1.aType));
+ ASSERT_EQ(cmd, static_cast<CameraParam>(aNotification0.payload[0]));
+ ASSERT_EQ(cmd, static_cast<CameraParam>(aNotification1.payload[0]));
+ for (auto&& v : values) {
+ ASSERT_EQ(v, static_cast<int32_t>(aNotification0.payload[1]));
+ ASSERT_EQ(v, static_cast<int32_t>(aNotification1.payload[1]));
+ }
+
+ // Clients expects to receive a parameter change notification
+ // whenever a primary client client adjusts it.
+ values.clear();
+ ASSERT_TRUE(pPrimaryCam->getIntParameter(cmd, &values).isOk());
+ for (auto&& v : values) {
+ EXPECT_EQ(val0, v) << "Values are not matched.";
+ }
+ }
+
+ // Try to adjust a parameter via non-primary client
+ values.clear();
+ ASSERT_FALSE(pSecondaryCam->setIntParameter(camSecondaryCmds[0], val0, &values).isOk());
+
+ // Non-primary client attempts to be a primary client
+ ASSERT_FALSE(pSecondaryCam->setPrimaryClient().isOk());
+
+ // Primary client retires from a primary client role
+ bool listening = false;
+ std::condition_variable eventCond;
+ std::thread listener =
+ std::thread([&aNotification0, &frameHandlerSecondary, &listening, &eventCond]() {
+ listening = true;
+ eventCond.notify_all();
+
+ EvsEventDesc aTargetEvent;
+ aTargetEvent.aType = EvsEventType::MASTER_RELEASED;
+ if (!frameHandlerSecondary->waitForEvent(aTargetEvent, aNotification0, true)) {
+ LOG(WARNING) << "A timer is expired before a target event is fired.";
+ }
+ });
+
+ std::mutex eventLock;
+ auto timer = std::chrono::system_clock::now();
+ std::unique_lock<std::mutex> lock(eventLock);
+ while (!listening) {
+ eventCond.wait_until(lock, timer + 1s);
+ }
+ lock.unlock();
+
+ ASSERT_TRUE(pPrimaryCam->unsetPrimaryClient().isOk());
+
+ if (listener.joinable()) {
+ listener.join();
+ }
+ ASSERT_EQ(EvsEventType::MASTER_RELEASED, static_cast<EvsEventType>(aNotification0.aType));
+
+ // Try to adjust a parameter after being retired
+ values.clear();
+ ASSERT_FALSE(pPrimaryCam->setIntParameter(camPrimaryCmds[0], val0, &values).isOk());
+
+ // Non-primary client becomes a primary client
+ ASSERT_TRUE(pSecondaryCam->setPrimaryClient().isOk());
+
+ // Try to adjust a parameter via new primary client
+ for (auto& cmd : camSecondaryCmds) {
+ // Get a valid parameter value range
+ ParameterRange range;
+ ASSERT_TRUE(pSecondaryCam->getIntParameterRange(cmd, &range).isOk());
+
+ values.clear();
+ if (cmd == CameraParam::ABSOLUTE_FOCUS) {
+ // Try to turn off auto-focus
+ values.clear();
+ ASSERT_TRUE(
+ pSecondaryCam->setIntParameter(CameraParam::AUTO_FOCUS, 0, &values).isOk());
+ for (auto&& v : values) {
+ EXPECT_EQ(v, 0);
+ }
+ }
+
+ // Calculate a parameter value to program. This is being rounding down.
+ val0 = range.min + (std::rand() % (range.max - range.min));
+ val0 = val0 - (val0 % range.step);
+
+ // Prepare and start event listeners.
+ bool listening0 = false;
+ bool listening1 = false;
+ std::condition_variable eventCond;
+ std::thread listener0 = std::thread([&]() {
+ listening0 = true;
+ if (listening1) {
+ eventCond.notify_all();
+ }
+
+ EvsEventDesc aTargetEvent;
+ aTargetEvent.aType = EvsEventType::PARAMETER_CHANGED;
+ aTargetEvent.payload[0] = static_cast<uint32_t>(cmd);
+ aTargetEvent.payload[1] = val0;
+ if (!frameHandlerPrimary->waitForEvent(aTargetEvent, aNotification0)) {
+ LOG(WARNING) << "A timer is expired before a target event is fired.";
+ }
+ });
+ std::thread listener1 = std::thread([&]() {
+ listening1 = true;
+ if (listening0) {
+ eventCond.notify_all();
+ }
+
+ EvsEventDesc aTargetEvent;
+ aTargetEvent.aType = EvsEventType::PARAMETER_CHANGED;
+ aTargetEvent.payload[0] = static_cast<uint32_t>(cmd);
+ aTargetEvent.payload[1] = val0;
+ if (!frameHandlerSecondary->waitForEvent(aTargetEvent, aNotification1)) {
+ LOG(WARNING) << "A timer is expired before a target event is fired.";
+ }
+ });
+
+ // Wait until a listening thread starts.
+ std::mutex eventLock;
+ std::unique_lock<std::mutex> lock(eventLock);
+ auto timer = std::chrono::system_clock::now();
+ while (!listening0 || !listening1) {
+ eventCond.wait_until(lock, timer + 1s);
+ }
+ lock.unlock();
+
+ // Try to program a parameter
+ values.clear();
+ ASSERT_TRUE(pSecondaryCam->setIntParameter(cmd, val0, &values).isOk());
+
+ // Clients expects to receive a parameter change notification
+ // whenever a primary client client adjusts it.
+ values.clear();
+ ASSERT_TRUE(pSecondaryCam->getIntParameter(cmd, &values).isOk());
+ for (auto&& v : values) {
+ EXPECT_EQ(val0, v) << "Values are not matched.";
+ }
+
+ // Join a listening thread.
+ if (listener0.joinable()) {
+ listener0.join();
+ }
+ if (listener1.joinable()) {
+ listener1.join();
+ }
+
+ // Verify a change notification
+ ASSERT_EQ(EvsEventType::PARAMETER_CHANGED,
+ static_cast<EvsEventType>(aNotification0.aType));
+ ASSERT_EQ(EvsEventType::PARAMETER_CHANGED,
+ static_cast<EvsEventType>(aNotification1.aType));
+ ASSERT_EQ(cmd, static_cast<CameraParam>(aNotification0.payload[0]));
+ ASSERT_EQ(cmd, static_cast<CameraParam>(aNotification1.payload[0]));
+ for (auto&& v : values) {
+ ASSERT_EQ(v, static_cast<int32_t>(aNotification0.payload[1]));
+ ASSERT_EQ(v, static_cast<int32_t>(aNotification1.payload[1]));
+ }
+ }
+
+ // New primary client retires from the role
+ ASSERT_TRUE(pSecondaryCam->unsetPrimaryClient().isOk());
+
+ // Shutdown
+ frameHandlerPrimary->shutdown();
+ frameHandlerSecondary->shutdown();
+
+ // Explicitly release the camera
+ ASSERT_TRUE(mEnumerator->closeCamera(pPrimaryCam).isOk());
+ ASSERT_TRUE(mEnumerator->closeCamera(pSecondaryCam).isOk());
+ mActiveCameras.clear();
+ }
+}
+
+/*
+ * HighPriorityCameraClient:
+ * EVS client, which owns the display, is priortized and therefore can take over
+ * a primary client role from other EVS clients without the display.
+ */
+TEST_P(EvsAidlTest, HighPriorityCameraClient) {
+ LOG(INFO) << "Starting HighPriorityCameraClient test";
+
+ if (mIsHwModule) {
+ // This test is not for HW module implementation.
+ return;
+ }
+
+ // Get the camera list
+ loadCameraList();
+
+ // Request available display IDs
+ uint8_t targetDisplayId = 0;
+ std::vector<uint8_t> displayIds;
+ ASSERT_TRUE(mEnumerator->getDisplayIdList(&displayIds).isOk());
+ EXPECT_GT(displayIds.size(), 0);
+ targetDisplayId = displayIds[0];
+
+ // Request exclusive access to the EVS display
+ std::shared_ptr<IEvsDisplay> pDisplay;
+ ASSERT_TRUE(mEnumerator->openDisplay(targetDisplayId, &pDisplay).isOk());
+ EXPECT_NE(pDisplay, nullptr);
+
+ // Test each reported camera
+ for (auto&& cam : mCameraInfo) {
+ // Read a target resolution from the metadata
+ Stream targetCfg = getFirstStreamConfiguration(
+ reinterpret_cast<camera_metadata_t*>(cam.metadata.data()));
+ ASSERT_GT(targetCfg.width, 0);
+ ASSERT_GT(targetCfg.height, 0);
+
+ // Create two clients
+ std::shared_ptr<IEvsCamera> pCam0;
+ ASSERT_TRUE(mEnumerator->openCamera(cam.id, targetCfg, &pCam0).isOk());
+ EXPECT_NE(pCam0, nullptr);
+
+ // Store a camera handle for a clean-up
+ mActiveCameras.push_back(pCam0);
+
+ std::shared_ptr<IEvsCamera> pCam1;
+ ASSERT_TRUE(mEnumerator->openCamera(cam.id, targetCfg, &pCam1).isOk());
+ EXPECT_NE(pCam1, nullptr);
+
+ // Store a camera handle for a clean-up
+ mActiveCameras.push_back(pCam1);
+
+ // Get the parameter list; this test will use the first command in both
+ // lists.
+ std::vector<CameraParam> cam0Cmds, cam1Cmds;
+ ASSERT_TRUE(pCam0->getParameterList(&cam0Cmds).isOk());
+ ASSERT_TRUE(pCam1->getParameterList(&cam1Cmds).isOk());
+ if (cam0Cmds.size() < 1 || cam1Cmds.size() < 1) {
+ // Cannot execute this test.
+ return;
+ }
+
+ // Set up a frame receiver object which will fire up its own thread.
+ std::shared_ptr<FrameHandler> frameHandler0 =
+ std::make_shared<FrameHandler>(pCam0, cam, nullptr, FrameHandler::eAutoReturn);
+ std::shared_ptr<FrameHandler> frameHandler1 =
+ std::make_shared<FrameHandler>(pCam1, cam, nullptr, FrameHandler::eAutoReturn);
+ EXPECT_NE(frameHandler0, nullptr);
+ EXPECT_NE(frameHandler1, nullptr);
+
+ // Activate the display
+ ASSERT_TRUE(pDisplay->setDisplayState(DisplayState::VISIBLE_ON_NEXT_FRAME).isOk());
+
+ // Start the camera's video stream
+ ASSERT_TRUE(frameHandler0->startStream());
+ ASSERT_TRUE(frameHandler1->startStream());
+
+ // Ensure the stream starts
+ frameHandler0->waitForFrameCount(1);
+ frameHandler1->waitForFrameCount(1);
+
+ // Client 1 becomes a primary client and programs a parameter.
+
+ // Get a valid parameter value range
+ ParameterRange range;
+ ASSERT_TRUE(pCam1->getIntParameterRange(cam1Cmds[0], &range).isOk());
+
+ // Client1 becomes a primary client
+ ASSERT_TRUE(pCam1->setPrimaryClient().isOk());
+
+ std::vector<int32_t> values;
+ EvsEventDesc aTargetEvent = {};
+ EvsEventDesc aNotification = {};
+ bool listening = false;
+ std::mutex eventLock;
+ std::condition_variable eventCond;
+ if (cam1Cmds[0] == CameraParam::ABSOLUTE_FOCUS) {
+ std::thread listener =
+ std::thread([&frameHandler0, &aNotification, &listening, &eventCond] {
+ listening = true;
+ eventCond.notify_all();
+
+ EvsEventDesc aTargetEvent;
+ aTargetEvent.aType = EvsEventType::PARAMETER_CHANGED;
+ aTargetEvent.payload[0] = static_cast<uint32_t>(CameraParam::AUTO_FOCUS);
+ aTargetEvent.payload[1] = 0;
+ if (!frameHandler0->waitForEvent(aTargetEvent, aNotification)) {
+ LOG(WARNING) << "A timer is expired before a target event is fired.";
+ }
+ });
+
+ // Wait until a lister starts.
+ std::unique_lock<std::mutex> lock(eventLock);
+ auto timer = std::chrono::system_clock::now();
+ while (!listening) {
+ eventCond.wait_until(lock, timer + 1s);
+ }
+ lock.unlock();
+
+ // Try to turn off auto-focus
+ ASSERT_TRUE(pCam1->setIntParameter(CameraParam::AUTO_FOCUS, 0, &values).isOk());
+ for (auto&& v : values) {
+ EXPECT_EQ(v, 0);
+ }
+
+ // Join a listener
+ if (listener.joinable()) {
+ listener.join();
+ }
+
+ // Make sure AUTO_FOCUS is off.
+ ASSERT_EQ(static_cast<EvsEventType>(aNotification.aType),
+ EvsEventType::PARAMETER_CHANGED);
+ }
+
+ // Try to program a parameter with a random value [minVal, maxVal] after
+ // rounding it down.
+ int32_t val0 = range.min + (std::rand() % (range.max - range.min));
+ val0 = val0 - (val0 % range.step);
+
+ std::thread listener = std::thread(
+ [&frameHandler1, &aNotification, &listening, &eventCond, &cam1Cmds, val0] {
+ listening = true;
+ eventCond.notify_all();
+
+ EvsEventDesc aTargetEvent;
+ aTargetEvent.aType = EvsEventType::PARAMETER_CHANGED;
+ aTargetEvent.payload[0] = static_cast<uint32_t>(cam1Cmds[0]);
+ aTargetEvent.payload[1] = val0;
+ if (!frameHandler1->waitForEvent(aTargetEvent, aNotification)) {
+ LOG(WARNING) << "A timer is expired before a target event is fired.";
+ }
+ });
+
+ // Wait until a lister starts.
+ listening = false;
+ std::unique_lock<std::mutex> lock(eventLock);
+ auto timer = std::chrono::system_clock::now();
+ while (!listening) {
+ eventCond.wait_until(lock, timer + 1s);
+ }
+ lock.unlock();
+
+ values.clear();
+ ASSERT_TRUE(pCam1->setIntParameter(cam1Cmds[0], val0, &values).isOk());
+ for (auto&& v : values) {
+ EXPECT_EQ(val0, v);
+ }
+
+ // Join a listener
+ if (listener.joinable()) {
+ listener.join();
+ }
+
+ // Verify a change notification
+ ASSERT_EQ(static_cast<EvsEventType>(aNotification.aType), EvsEventType::PARAMETER_CHANGED);
+ ASSERT_EQ(static_cast<CameraParam>(aNotification.payload[0]), cam1Cmds[0]);
+ for (auto&& v : values) {
+ ASSERT_EQ(v, static_cast<int32_t>(aNotification.payload[1]));
+ }
+
+ listener = std::thread([&frameHandler1, &aNotification, &listening, &eventCond] {
+ listening = true;
+ eventCond.notify_all();
+
+ EvsEventDesc aTargetEvent;
+ aTargetEvent.aType = EvsEventType::MASTER_RELEASED;
+ if (!frameHandler1->waitForEvent(aTargetEvent, aNotification, true)) {
+ LOG(WARNING) << "A timer is expired before a target event is fired.";
+ }
+ });
+
+ // Wait until a lister starts.
+ listening = false;
+ lock.lock();
+ timer = std::chrono::system_clock::now();
+ while (!listening) {
+ eventCond.wait_until(lock, timer + 1s);
+ }
+ lock.unlock();
+
+ // Client 0 steals a primary client role
+ ASSERT_TRUE(pCam0->forcePrimaryClient(pDisplay).isOk());
+
+ // Join a listener
+ if (listener.joinable()) {
+ listener.join();
+ }
+
+ ASSERT_EQ(static_cast<EvsEventType>(aNotification.aType), EvsEventType::MASTER_RELEASED);
+
+ // Client 0 programs a parameter
+ val0 = range.min + (std::rand() % (range.max - range.min));
+
+ // Rounding down
+ val0 = val0 - (val0 % range.step);
+
+ if (cam0Cmds[0] == CameraParam::ABSOLUTE_FOCUS) {
+ std::thread listener =
+ std::thread([&frameHandler1, &aNotification, &listening, &eventCond] {
+ listening = true;
+ eventCond.notify_all();
+
+ EvsEventDesc aTargetEvent;
+ aTargetEvent.aType = EvsEventType::PARAMETER_CHANGED;
+ aTargetEvent.payload[0] = static_cast<uint32_t>(CameraParam::AUTO_FOCUS);
+ aTargetEvent.payload[1] = 0;
+ if (!frameHandler1->waitForEvent(aTargetEvent, aNotification)) {
+ LOG(WARNING) << "A timer is expired before a target event is fired.";
+ }
+ });
+
+ // Wait until a lister starts.
+ std::unique_lock<std::mutex> lock(eventLock);
+ auto timer = std::chrono::system_clock::now();
+ while (!listening) {
+ eventCond.wait_until(lock, timer + 1s);
+ }
+ lock.unlock();
+
+ // Try to turn off auto-focus
+ values.clear();
+ ASSERT_TRUE(pCam0->setIntParameter(CameraParam::AUTO_FOCUS, 0, &values).isOk());
+ for (auto&& v : values) {
+ EXPECT_EQ(v, 0);
+ }
+
+ // Join a listener
+ if (listener.joinable()) {
+ listener.join();
+ }
+
+ // Make sure AUTO_FOCUS is off.
+ ASSERT_EQ(static_cast<EvsEventType>(aNotification.aType),
+ EvsEventType::PARAMETER_CHANGED);
+ }
+
+ listener = std::thread(
+ [&frameHandler0, &aNotification, &listening, &eventCond, &cam0Cmds, val0] {
+ listening = true;
+ eventCond.notify_all();
+
+ EvsEventDesc aTargetEvent;
+ aTargetEvent.aType = EvsEventType::PARAMETER_CHANGED;
+ aTargetEvent.payload[0] = static_cast<uint32_t>(cam0Cmds[0]);
+ aTargetEvent.payload[1] = val0;
+ if (!frameHandler0->waitForEvent(aTargetEvent, aNotification)) {
+ LOG(WARNING) << "A timer is expired before a target event is fired.";
+ }
+ });
+
+ // Wait until a lister starts.
+ listening = false;
+ timer = std::chrono::system_clock::now();
+ lock.lock();
+ while (!listening) {
+ eventCond.wait_until(lock, timer + 1s);
+ }
+ lock.unlock();
+
+ values.clear();
+ ASSERT_TRUE(pCam0->setIntParameter(cam0Cmds[0], val0, &values).isOk());
+
+ // Join a listener
+ if (listener.joinable()) {
+ listener.join();
+ }
+ // Verify a change notification
+ ASSERT_EQ(static_cast<EvsEventType>(aNotification.aType), EvsEventType::PARAMETER_CHANGED);
+ ASSERT_EQ(static_cast<CameraParam>(aNotification.payload[0]), cam0Cmds[0]);
+ for (auto&& v : values) {
+ ASSERT_EQ(v, static_cast<int32_t>(aNotification.payload[1]));
+ }
+
+ // Turn off the display (yes, before the stream stops -- it should be handled)
+ ASSERT_TRUE(pDisplay->setDisplayState(DisplayState::NOT_VISIBLE).isOk());
+
+ // Shut down the streamer
+ frameHandler0->shutdown();
+ frameHandler1->shutdown();
+
+ // Explicitly release the camera
+ ASSERT_TRUE(mEnumerator->closeCamera(pCam0).isOk());
+ ASSERT_TRUE(mEnumerator->closeCamera(pCam1).isOk());
+ mActiveCameras.clear();
+ }
+
+ // Explicitly release the display
+ ASSERT_TRUE(mEnumerator->closeDisplay(pDisplay).isOk());
+}
+
+/*
+ * CameraUseStreamConfigToDisplay:
+ * End to end test of data flowing from the camera to the display. Similar to
+ * CameraToDisplayRoundTrip test case but this case retrieves available stream
+ * configurations from EVS and uses one of them to start a video stream.
+ */
+TEST_P(EvsAidlTest, CameraUseStreamConfigToDisplay) {
+ LOG(INFO) << "Starting CameraUseStreamConfigToDisplay test";
+
+ // Get the camera list
+ loadCameraList();
+
+ // Request available display IDs
+ uint8_t targetDisplayId = 0;
+ std::vector<uint8_t> displayIds;
+ ASSERT_TRUE(mEnumerator->getDisplayIdList(&displayIds).isOk());
+ EXPECT_GT(displayIds.size(), 0);
+ targetDisplayId = displayIds[0];
+
+ // Request exclusive access to the EVS display
+ std::shared_ptr<IEvsDisplay> pDisplay;
+ ASSERT_TRUE(mEnumerator->openDisplay(targetDisplayId, &pDisplay).isOk());
+ EXPECT_NE(pDisplay, nullptr);
+
+ // Test each reported camera
+ for (auto&& cam : mCameraInfo) {
+ // choose a configuration that has a frame rate faster than minReqFps.
+ Stream targetCfg = {};
+ const int32_t minReqFps = 15;
+ int32_t maxArea = 0;
+ camera_metadata_entry_t streamCfgs;
+ bool foundCfg = false;
+ if (!find_camera_metadata_entry(reinterpret_cast<camera_metadata_t*>(cam.metadata.data()),
+ ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS,
+ &streamCfgs)) {
+ // Stream configurations are found in metadata
+ RawStreamConfig* ptr = reinterpret_cast<RawStreamConfig*>(streamCfgs.data.i32);
+ for (unsigned offset = 0; offset < streamCfgs.count; offset += kStreamCfgSz) {
+ if (ptr->direction == ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT &&
+ ptr->format == HAL_PIXEL_FORMAT_RGBA_8888) {
+ if (ptr->width * ptr->height > maxArea && ptr->framerate >= minReqFps) {
+ targetCfg.width = ptr->width;
+ targetCfg.height = ptr->height;
+
+ maxArea = ptr->width * ptr->height;
+ foundCfg = true;
+ }
+ }
+ ++ptr;
+ }
+ }
+ targetCfg.format = static_cast<PixelFormat>(HAL_PIXEL_FORMAT_RGBA_8888);
+
+ if (!foundCfg) {
+ // Current EVS camera does not provide stream configurations in the
+ // metadata.
+ continue;
+ }
+
+ std::shared_ptr<IEvsCamera> pCam;
+ ASSERT_TRUE(mEnumerator->openCamera(cam.id, targetCfg, &pCam).isOk());
+ EXPECT_NE(pCam, nullptr);
+
+ // Store a camera handle for a clean-up
+ mActiveCameras.push_back(pCam);
+
+ // Set up a frame receiver object which will fire up its own thread.
+ std::shared_ptr<FrameHandler> frameHandler =
+ std::make_shared<FrameHandler>(pCam, cam, pDisplay, FrameHandler::eAutoReturn);
+ EXPECT_NE(frameHandler, nullptr);
+
+ // Activate the display
+ ASSERT_TRUE(pDisplay->setDisplayState(DisplayState::VISIBLE_ON_NEXT_FRAME).isOk());
+
+ // Start the camera's video stream
+ ASSERT_TRUE(frameHandler->startStream());
+
+ // Wait a while to let the data flow
+ static const int kSecondsToWait = 5;
+ const int streamTimeMs =
+ kSecondsToWait * kSecondsToMilliseconds - kMaxStreamStartMilliseconds;
+ const unsigned minimumFramesExpected =
+ streamTimeMs * kMinimumFramesPerSecond / kSecondsToMilliseconds;
+ sleep(kSecondsToWait);
+ unsigned framesReceived = 0;
+ unsigned framesDisplayed = 0;
+ frameHandler->getFramesCounters(&framesReceived, &framesDisplayed);
+ EXPECT_EQ(framesReceived, framesDisplayed);
+ EXPECT_GE(framesDisplayed, minimumFramesExpected);
+
+ // Turn off the display (yes, before the stream stops -- it should be handled)
+ ASSERT_TRUE(pDisplay->setDisplayState(DisplayState::NOT_VISIBLE).isOk());
+
+ // Shut down the streamer
+ frameHandler->shutdown();
+
+ // Explicitly release the camera
+ ASSERT_TRUE(mEnumerator->closeCamera(pCam).isOk());
+ mActiveCameras.clear();
+ }
+
+ // Explicitly release the display
+ ASSERT_TRUE(mEnumerator->closeDisplay(pDisplay).isOk());
+}
+
+/*
+ * MultiCameraStreamUseConfig:
+ * Verify that each client can start and stop video streams on the same
+ * underlying camera with same configuration.
+ */
+TEST_P(EvsAidlTest, MultiCameraStreamUseConfig) {
+ LOG(INFO) << "Starting MultiCameraStream test";
+
+ if (mIsHwModule) {
+ // This test is not for HW module implementation.
+ return;
+ }
+
+ // Get the camera list
+ loadCameraList();
+
+ // Test each reported camera
+ for (auto&& cam : mCameraInfo) {
+ // choose a configuration that has a frame rate faster than minReqFps.
+ Stream targetCfg = {};
+ const int32_t minReqFps = 15;
+ int32_t maxArea = 0;
+ camera_metadata_entry_t streamCfgs;
+ bool foundCfg = false;
+ if (!find_camera_metadata_entry(reinterpret_cast<camera_metadata_t*>(cam.metadata.data()),
+ ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS,
+ &streamCfgs)) {
+ // Stream configurations are found in metadata
+ RawStreamConfig* ptr = reinterpret_cast<RawStreamConfig*>(streamCfgs.data.i32);
+ for (unsigned offset = 0; offset < streamCfgs.count; offset += kStreamCfgSz) {
+ if (ptr->direction == ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT &&
+ ptr->format == HAL_PIXEL_FORMAT_RGBA_8888) {
+ if (ptr->width * ptr->height > maxArea && ptr->framerate >= minReqFps) {
+ targetCfg.width = ptr->width;
+ targetCfg.height = ptr->height;
+
+ maxArea = ptr->width * ptr->height;
+ foundCfg = true;
+ }
+ }
+ ++ptr;
+ }
+ }
+ targetCfg.format = static_cast<PixelFormat>(HAL_PIXEL_FORMAT_RGBA_8888);
+
+ if (!foundCfg) {
+ LOG(INFO) << "Device " << cam.id
+ << " does not provide a list of supported stream configurations, skipped";
+ continue;
+ }
+
+ // Create the first camera client with a selected stream configuration.
+ std::shared_ptr<IEvsCamera> pCam0;
+ ASSERT_TRUE(mEnumerator->openCamera(cam.id, targetCfg, &pCam0).isOk());
+ EXPECT_NE(pCam0, nullptr);
+
+ // Store a camera handle for a clean-up
+ mActiveCameras.push_back(pCam0);
+
+ // Try to create the second camera client with different stream
+ // configuration.
+ int32_t id = targetCfg.id;
+ targetCfg.id += 1; // EVS manager sees only the stream id.
+ std::shared_ptr<IEvsCamera> pCam1;
+ ASSERT_FALSE(mEnumerator->openCamera(cam.id, targetCfg, &pCam1).isOk());
+
+ // Try again with same stream configuration.
+ targetCfg.id = id;
+ ASSERT_TRUE(mEnumerator->openCamera(cam.id, targetCfg, &pCam1).isOk());
+ EXPECT_NE(pCam1, nullptr);
+
+ // Set up per-client frame receiver objects which will fire up its own thread
+ std::shared_ptr<FrameHandler> frameHandler0 =
+ std::make_shared<FrameHandler>(pCam0, cam, nullptr, FrameHandler::eAutoReturn);
+ std::shared_ptr<FrameHandler> frameHandler1 =
+ std::make_shared<FrameHandler>(pCam1, cam, nullptr, FrameHandler::eAutoReturn);
+ EXPECT_NE(frameHandler0, nullptr);
+ EXPECT_NE(frameHandler1, nullptr);
+
+ // Start the camera's video stream via client 0
+ ASSERT_TRUE(frameHandler0->startStream());
+ ASSERT_TRUE(frameHandler1->startStream());
+
+ // Ensure the stream starts
+ frameHandler0->waitForFrameCount(1);
+ frameHandler1->waitForFrameCount(1);
+
+ nsecs_t firstFrame = systemTime(SYSTEM_TIME_MONOTONIC);
+
+ // Wait a bit, then ensure both clients get at least the required minimum number of frames
+ sleep(5);
+ nsecs_t end = systemTime(SYSTEM_TIME_MONOTONIC);
+ unsigned framesReceived0 = 0, framesReceived1 = 0;
+ frameHandler0->getFramesCounters(&framesReceived0, nullptr);
+ frameHandler1->getFramesCounters(&framesReceived1, nullptr);
+ framesReceived0 = framesReceived0 - 1; // Back out the first frame we already waited for
+ framesReceived1 = framesReceived1 - 1; // Back out the first frame we already waited for
+ nsecs_t runTime = end - firstFrame;
+ float framesPerSecond0 = framesReceived0 / (runTime * kNanoToSeconds);
+ float framesPerSecond1 = framesReceived1 / (runTime * kNanoToSeconds);
+ LOG(INFO) << "Measured camera rate " << std::scientific << framesPerSecond0 << " fps and "
+ << framesPerSecond1 << " fps";
+ EXPECT_GE(framesPerSecond0, kMinimumFramesPerSecond);
+ EXPECT_GE(framesPerSecond1, kMinimumFramesPerSecond);
+
+ // Shutdown one client
+ frameHandler0->shutdown();
+
+ // Read frame counters again
+ frameHandler0->getFramesCounters(&framesReceived0, nullptr);
+ frameHandler1->getFramesCounters(&framesReceived1, nullptr);
+
+ // Wait a bit again
+ sleep(5);
+ unsigned framesReceivedAfterStop0 = 0, framesReceivedAfterStop1 = 0;
+ frameHandler0->getFramesCounters(&framesReceivedAfterStop0, nullptr);
+ frameHandler1->getFramesCounters(&framesReceivedAfterStop1, nullptr);
+ EXPECT_EQ(framesReceived0, framesReceivedAfterStop0);
+ EXPECT_LT(framesReceived1, framesReceivedAfterStop1);
+
+ // Shutdown another
+ frameHandler1->shutdown();
+
+ // Explicitly release the camera
+ ASSERT_TRUE(mEnumerator->closeCamera(pCam0).isOk());
+ ASSERT_TRUE(mEnumerator->closeCamera(pCam1).isOk());
+ mActiveCameras.clear();
+ }
+}
+
+/*
+ * LogicalCameraMetadata:
+ * Opens logical camera reported by the enumerator and validate its metadata by
+ * checking its capability and locating supporting physical camera device
+ * identifiers.
+ */
+TEST_P(EvsAidlTest, LogicalCameraMetadata) {
+ LOG(INFO) << "Starting LogicalCameraMetadata test";
+
+ // Get the camera list
+ loadCameraList();
+
+ // Open and close each camera twice
+ for (auto&& cam : mCameraInfo) {
+ bool isLogicalCam = false;
+ auto devices = getPhysicalCameraIds(cam.id, isLogicalCam);
+ if (isLogicalCam) {
+ ASSERT_GE(devices.size(), 1) << "Logical camera device must have at least one physical "
+ "camera device ID in its metadata.";
+ }
+ }
+}
+
+/*
+ * CameraStreamExternalBuffering:
+ * This is same with CameraStreamBuffering except frame buffers are allocated by
+ * the test client and then imported by EVS framework.
+ */
+TEST_P(EvsAidlTest, CameraStreamExternalBuffering) {
+ LOG(INFO) << "Starting CameraStreamExternalBuffering test";
+
+ // Arbitrary constant (should be > 1 and not too big)
+ static const unsigned int kBuffersToHold = 3;
+
+ // Get the camera list
+ loadCameraList();
+
+ // Acquire the graphics buffer allocator
+ android::GraphicBufferAllocator& alloc(android::GraphicBufferAllocator::get());
+ const auto usage =
+ GRALLOC_USAGE_HW_TEXTURE | GRALLOC_USAGE_SW_READ_RARELY | GRALLOC_USAGE_SW_WRITE_OFTEN;
+
+ // Test each reported camera
+ for (auto&& cam : mCameraInfo) {
+ // Read a target resolution from the metadata
+ Stream targetCfg = getFirstStreamConfiguration(
+ reinterpret_cast<camera_metadata_t*>(cam.metadata.data()));
+ ASSERT_GT(targetCfg.width, 0);
+ ASSERT_GT(targetCfg.height, 0);
+
+ // Allocate buffers to use
+ std::vector<BufferDesc> buffers;
+ buffers.resize(kBuffersToHold);
+ for (auto i = 0; i < kBuffersToHold; ++i) {
+ unsigned pixelsPerLine;
+ buffer_handle_t memHandle = nullptr;
+ android::status_t result =
+ alloc.allocate(targetCfg.width, targetCfg.height,
+ static_cast<android::PixelFormat>(targetCfg.format),
+ /* layerCount = */ 1, usage, &memHandle, &pixelsPerLine,
+ /* graphicBufferId = */ 0,
+ /* requestorName = */ "CameraStreamExternalBufferingTest");
+ if (result != android::NO_ERROR) {
+ LOG(ERROR) << __FUNCTION__ << " failed to allocate memory.";
+ // Release previous allocated buffers
+ for (auto j = 0; j < i; j++) {
+ alloc.free(::android::dupFromAidl(buffers[i].buffer.handle));
+ }
+ return;
+ } else {
+ BufferDesc buf;
+ HardwareBufferDescription* pDesc =
+ reinterpret_cast<HardwareBufferDescription*>(&buf.buffer.description);
+ pDesc->width = targetCfg.width;
+ pDesc->height = targetCfg.height;
+ pDesc->layers = 1;
+ pDesc->format = targetCfg.format;
+ pDesc->usage = static_cast<BufferUsage>(usage);
+ pDesc->stride = pixelsPerLine;
+ buf.buffer.handle = ::android::dupToAidl(memHandle);
+ buf.bufferId = i; // Unique number to identify this buffer
+ buffers[i] = std::move(buf);
+ }
+ }
+
+ bool isLogicalCam = false;
+ getPhysicalCameraIds(cam.id, isLogicalCam);
+
+ std::shared_ptr<IEvsCamera> pCam;
+ ASSERT_TRUE(mEnumerator->openCamera(cam.id, targetCfg, &pCam).isOk());
+ EXPECT_NE(pCam, nullptr);
+
+ // Store a camera handle for a clean-up
+ mActiveCameras.push_back(pCam);
+
+ // Request to import buffers
+ int delta = 0;
+ auto status = pCam->importExternalBuffers(buffers, &delta);
+ if (isLogicalCam) {
+ ASSERT_FALSE(status.isOk());
+ continue;
+ }
+
+ ASSERT_TRUE(status.isOk());
+ EXPECT_GE(delta, kBuffersToHold);
+
+ // Set up a frame receiver object which will fire up its own thread.
+ std::shared_ptr<FrameHandler> frameHandler =
+ std::make_shared<FrameHandler>(pCam, cam, nullptr, FrameHandler::eNoAutoReturn);
+ EXPECT_NE(frameHandler, nullptr);
+
+ // Start the camera's video stream
+ ASSERT_TRUE(frameHandler->startStream());
+
+ // Check that the video stream stalls once we've gotten exactly the number of buffers
+ // we requested since we told the frameHandler not to return them.
+ sleep(1); // 1 second should be enough for at least 5 frames to be delivered worst case
+ unsigned framesReceived = 0;
+ frameHandler->getFramesCounters(&framesReceived, nullptr);
+ ASSERT_LE(kBuffersToHold, framesReceived) << "Stream didn't stall at expected buffer limit";
+
+ // Give back one buffer
+ EXPECT_TRUE(frameHandler->returnHeldBuffer());
+
+ // Once we return a buffer, it shouldn't take more than 1/10 second to get a new one
+ // filled since we require 10fps minimum -- but give a 10% allowance just in case.
+ unsigned framesReceivedAfter = 0;
+ usleep(110 * kMillisecondsToMicroseconds);
+ frameHandler->getFramesCounters(&framesReceivedAfter, nullptr);
+ EXPECT_EQ(framesReceived + 1, framesReceivedAfter) << "Stream should've resumed";
+
+ // Even when the camera pointer goes out of scope, the FrameHandler object will
+ // keep the stream alive unless we tell it to shutdown.
+ // Also note that the FrameHandle and the Camera have a mutual circular reference, so
+ // we have to break that cycle in order for either of them to get cleaned up.
+ frameHandler->shutdown();
+
+ // Explicitly release the camera
+ ASSERT_TRUE(mEnumerator->closeCamera(pCam).isOk());
+ mActiveCameras.clear();
+ // Release buffers
+ for (auto& b : buffers) {
+ alloc.free(::android::dupFromAidl(b.buffer.handle));
+ }
+ buffers.resize(0);
+ }
+}
+
+/*
+ * UltrasonicsArrayOpenClean:
+ * Opens each ultrasonics arrays reported by the enumerator and then explicitly closes it via a
+ * call to closeUltrasonicsArray. Then repeats the test to ensure all ultrasonics arrays
+ * can be reopened.
+ */
+TEST_P(EvsAidlTest, UltrasonicsArrayOpenClean) {
+ LOG(INFO) << "Starting UltrasonicsArrayOpenClean test";
+
+ // Get the ultrasonics array list
+ loadUltrasonicsArrayList();
+
+ // Open and close each ultrasonics array twice
+ for (auto&& ultraInfo : mUltrasonicsArraysInfo) {
+ for (int pass = 0; pass < 2; pass++) {
+ std::shared_ptr<IEvsUltrasonicsArray> pUltrasonicsArray;
+ ASSERT_TRUE(
+ mEnumerator
+ ->openUltrasonicsArray(ultraInfo.ultrasonicsArrayId, &pUltrasonicsArray)
+ .isOk());
+ EXPECT_NE(pUltrasonicsArray, nullptr);
+
+ // Verify that this ultrasonics array self-identifies correctly
+ UltrasonicsArrayDesc desc;
+ ASSERT_TRUE(pUltrasonicsArray->getUltrasonicArrayInfo(&desc).isOk());
+ EXPECT_EQ(ultraInfo.ultrasonicsArrayId, desc.ultrasonicsArrayId);
+ LOG(DEBUG) << "Found ultrasonics array " << ultraInfo.ultrasonicsArrayId;
+
+ // Explicitly close the ultrasonics array so resources are released right away
+ ASSERT_TRUE(mEnumerator->closeUltrasonicsArray(pUltrasonicsArray).isOk());
+ }
+ }
+}
+
+// Starts a stream and verifies all data received is valid.
+TEST_P(EvsAidlTest, UltrasonicsVerifyStreamData) {
+ LOG(INFO) << "Starting UltrasonicsVerifyStreamData";
+
+ // Get the ultrasonics array list
+ loadUltrasonicsArrayList();
+
+ // For each ultrasonics array.
+ for (auto&& ultraInfo : mUltrasonicsArraysInfo) {
+ LOG(DEBUG) << "Testing ultrasonics array: " << ultraInfo.ultrasonicsArrayId;
+
+ std::shared_ptr<IEvsUltrasonicsArray> pUltrasonicsArray;
+ ASSERT_TRUE(
+ mEnumerator->openUltrasonicsArray(ultraInfo.ultrasonicsArrayId, &pUltrasonicsArray)
+ .isOk());
+ EXPECT_NE(pUltrasonicsArray, nullptr);
+
+ std::shared_ptr<FrameHandlerUltrasonics> frameHandler =
+ std::make_shared<FrameHandlerUltrasonics>(pUltrasonicsArray);
+ EXPECT_NE(frameHandler, nullptr);
+
+ // Start stream.
+ ASSERT_TRUE(pUltrasonicsArray->startStream(frameHandler).isOk());
+
+ // Wait 5 seconds to receive frames.
+ sleep(5);
+
+ // Stop stream.
+ ASSERT_TRUE(pUltrasonicsArray->stopStream().isOk());
+
+ EXPECT_GT(frameHandler->getReceiveFramesCount(), 0);
+ EXPECT_TRUE(frameHandler->areAllFramesValid());
+
+ // Explicitly close the ultrasonics array so resources are released right away
+ ASSERT_TRUE(mEnumerator->closeUltrasonicsArray(pUltrasonicsArray).isOk());
+ }
+}
+
+// Sets frames in flight before and after start of stream and verfies success.
+TEST_P(EvsAidlTest, UltrasonicsSetFramesInFlight) {
+ LOG(INFO) << "Starting UltrasonicsSetFramesInFlight";
+
+ // Get the ultrasonics array list
+ loadUltrasonicsArrayList();
+
+ // For each ultrasonics array.
+ for (auto&& ultraInfo : mUltrasonicsArraysInfo) {
+ LOG(DEBUG) << "Testing ultrasonics array: " << ultraInfo.ultrasonicsArrayId;
+
+ std::shared_ptr<IEvsUltrasonicsArray> pUltrasonicsArray;
+ ASSERT_TRUE(
+ mEnumerator->openUltrasonicsArray(ultraInfo.ultrasonicsArrayId, &pUltrasonicsArray)
+ .isOk());
+ EXPECT_NE(pUltrasonicsArray, nullptr);
+
+ ASSERT_TRUE(pUltrasonicsArray->setMaxFramesInFlight(10).isOk());
+
+ std::shared_ptr<FrameHandlerUltrasonics> frameHandler =
+ std::make_shared<FrameHandlerUltrasonics>(pUltrasonicsArray);
+ EXPECT_NE(frameHandler, nullptr);
+
+ // Start stream.
+ ASSERT_TRUE(pUltrasonicsArray->startStream(frameHandler).isOk());
+ ASSERT_TRUE(pUltrasonicsArray->setMaxFramesInFlight(5).isOk());
+
+ // Stop stream.
+ ASSERT_TRUE(pUltrasonicsArray->stopStream().isOk());
+
+ // Explicitly close the ultrasonics array so resources are released right away
+ ASSERT_TRUE(mEnumerator->closeUltrasonicsArray(pUltrasonicsArray).isOk());
+ }
+}
+
+GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(EvsAidlTest);
+INSTANTIATE_TEST_SUITE_P(
+ PerInstance, EvsAidlTest,
+ testing::ValuesIn(android::getAidlHalInstanceNames(IEvsEnumerator::descriptor)),
+ android::PrintInstanceNameToString);
+
+int main(int argc, char** argv) {
+ ::testing::InitGoogleTest(&argc, argv);
+ ABinderProcess_setThreadPoolMaxThreadCount(1);
+ ABinderProcess_startThreadPool();
+ return RUN_ALL_TESTS();
+}
diff --git a/automotive/vehicle/aidl/impl/utils/common/include/VehicleHalTypes.h b/automotive/vehicle/aidl/impl/utils/common/include/VehicleHalTypes.h
index 013d177..a7fcdcf 100644
--- a/automotive/vehicle/aidl/impl/utils/common/include/VehicleHalTypes.h
+++ b/automotive/vehicle/aidl/impl/utils/common/include/VehicleHalTypes.h
@@ -37,6 +37,7 @@
#include <aidl/android/hardware/automotive/vehicle/SetValueResult.h>
#include <aidl/android/hardware/automotive/vehicle/SetValueResults.h>
#include <aidl/android/hardware/automotive/vehicle/StatusCode.h>
+#include <aidl/android/hardware/automotive/vehicle/SubscribeOptions.h>
#include <aidl/android/hardware/automotive/vehicle/VehicleApPowerStateReport.h>
#include <aidl/android/hardware/automotive/vehicle/VehicleApPowerStateReq.h>
#include <aidl/android/hardware/automotive/vehicle/VehicleArea.h>
diff --git a/automotive/vehicle/aidl/impl/utils/common/include/VehicleUtils.h b/automotive/vehicle/aidl/impl/utils/common/include/VehicleUtils.h
index 49b33d5..0f0ccf1 100644
--- a/automotive/vehicle/aidl/impl/utils/common/include/VehicleUtils.h
+++ b/automotive/vehicle/aidl/impl/utils/common/include/VehicleUtils.h
@@ -67,24 +67,30 @@
}
inline const ::aidl::android::hardware::automotive::vehicle::VehicleAreaConfig* getAreaConfig(
- const ::aidl::android::hardware::automotive::vehicle::VehiclePropValue& propValue,
+ int32_t propId, int32_t areaId,
const ::aidl::android::hardware::automotive::vehicle::VehiclePropConfig& config) {
if (config.areaConfigs.size() == 0) {
return nullptr;
}
- if (isGlobalProp(propValue.prop)) {
+ if (isGlobalProp(propId)) {
return &(config.areaConfigs[0]);
}
for (const auto& c : config.areaConfigs) {
- if (c.areaId == propValue.areaId) {
+ if (c.areaId == areaId) {
return &c;
}
}
return nullptr;
}
+inline const ::aidl::android::hardware::automotive::vehicle::VehicleAreaConfig* getAreaConfig(
+ const ::aidl::android::hardware::automotive::vehicle::VehiclePropValue& propValue,
+ const ::aidl::android::hardware::automotive::vehicle::VehiclePropConfig& config) {
+ return getAreaConfig(propValue.prop, propValue.areaId, config);
+}
+
inline std::unique_ptr<::aidl::android::hardware::automotive::vehicle::VehiclePropValue>
createVehiclePropValueVec(::aidl::android::hardware::automotive::vehicle::VehiclePropertyType type,
size_t vecSize) {
diff --git a/automotive/vehicle/aidl/impl/utils/common/src/VehiclePropertyStore.cpp b/automotive/vehicle/aidl/impl/utils/common/src/VehiclePropertyStore.cpp
index 1a79230..c1fa896 100644
--- a/automotive/vehicle/aidl/impl/utils/common/src/VehiclePropertyStore.cpp
+++ b/automotive/vehicle/aidl/impl/utils/common/src/VehiclePropertyStore.cpp
@@ -21,9 +21,11 @@
#include <VehicleHalTypes.h>
#include <VehicleUtils.h>
-#include <android-base/format.h>
+#include <android-base/stringprintf.h>
#include <math/HashCombine.h>
+#include <inttypes.h>
+
namespace android {
namespace hardware {
namespace automotive {
@@ -36,13 +38,14 @@
using ::aidl::android::hardware::automotive::vehicle::VehiclePropValue;
using ::android::base::Error;
using ::android::base::Result;
+using ::android::base::StringPrintf;
bool VehiclePropertyStore::RecordId::operator==(const VehiclePropertyStore::RecordId& other) const {
return area == other.area && token == other.token;
}
std::string VehiclePropertyStore::RecordId::toString() const {
- return ::fmt::format("RecordID{{.areaId={:d}, .token={:d}}}", area, token);
+ return StringPrintf("RecordID{{.areaId=% " PRId32 ", .token=%" PRId64 "}", area, token);
}
size_t VehiclePropertyStore::RecordIdHash::operator()(RecordId const& recordId) const {
diff --git a/automotive/vehicle/aidl/impl/vhal/Android.bp b/automotive/vehicle/aidl/impl/vhal/Android.bp
index a54ab4b..0132e6f 100644
--- a/automotive/vehicle/aidl/impl/vhal/Android.bp
+++ b/automotive/vehicle/aidl/impl/vhal/Android.bp
@@ -57,6 +57,8 @@
"src/ConnectedClient.cpp",
"src/DefaultVehicleHal.cpp",
"src/PendingRequestPool.cpp",
+ "src/RecurrentTimer.cpp",
+ "src/SubscriptionManager.cpp",
],
static_libs: [
"VehicleHalUtils",
diff --git a/automotive/vehicle/aidl/impl/vhal/include/ConnectedClient.h b/automotive/vehicle/aidl/impl/vhal/include/ConnectedClient.h
index 97c25e3..833707a 100644
--- a/automotive/vehicle/aidl/impl/vhal/include/ConnectedClient.h
+++ b/automotive/vehicle/aidl/impl/vhal/include/ConnectedClient.h
@@ -19,6 +19,7 @@
#include "PendingRequestPool.h"
+#include <IVehicleHardware.h>
#include <VehicleHalTypes.h>
#include <aidl/android/hardware/automotive/vehicle/IVehicleCallback.h>
@@ -41,24 +42,24 @@
// This class is thread-safe.
class ConnectedClient {
public:
- ConnectedClient(
- std::shared_ptr<PendingRequestPool> requestPool,
- std::shared_ptr<::aidl::android::hardware::automotive::vehicle::IVehicleCallback>
- callback);
+ using CallbackType =
+ std::shared_ptr<::aidl::android::hardware::automotive::vehicle::IVehicleCallback>;
+
+ ConnectedClient(std::shared_ptr<PendingRequestPool> requestPool, CallbackType callback);
virtual ~ConnectedClient() = default;
// Gets the unique ID for this client.
const void* id();
- // Add client requests. The requests would be registered as pending requests until
+ // Adds client requests. The requests would be registered as pending requests until
// {@code tryFinishRequests} is called for them.
// Returns {@code INVALID_ARG} error if any of the requestIds are duplicate with one of the
// pending request IDs or {@code TRY_AGAIN} error if the pending request pool is full and could
// no longer add requests.
::android::base::Result<void> addRequests(const std::unordered_set<int64_t>& requestIds);
- // Mark the requests as finished. Returns a list of request IDs that was pending and has been
+ // Marks the requests as finished. Returns a list of request IDs that was pending and has been
// finished. It must be a set of the requested request IDs.
std::unordered_set<int64_t> tryFinishRequests(const std::unordered_set<int64_t>& requestIds);
@@ -67,8 +68,7 @@
virtual std::shared_ptr<const PendingRequestPool::TimeoutCallbackFunc> getTimeoutCallback() = 0;
const std::shared_ptr<PendingRequestPool> mRequestPool;
- const std::shared_ptr<::aidl::android::hardware::automotive::vehicle::IVehicleCallback>
- mCallback;
+ const CallbackType mCallback;
};
// A class to represent a client that calls {@code IVehicle.setValues} or {@code
@@ -76,10 +76,7 @@
template <class ResultType, class ResultsType>
class GetSetValuesClient final : public ConnectedClient {
public:
- GetSetValuesClient(
- std::shared_ptr<PendingRequestPool> requestPool,
- std::shared_ptr<::aidl::android::hardware::automotive::vehicle::IVehicleCallback>
- callback);
+ GetSetValuesClient(std::shared_ptr<PendingRequestPool> requestPool, CallbackType callback);
// Sends the results to this client.
void sendResults(const std::vector<ResultType>& results);
@@ -101,6 +98,37 @@
std::shared_ptr<const std::function<void(std::vector<ResultType>)>> mResultCallback;
};
+// A class to represent a client that calls {@code IVehicle.subscribe}.
+class SubscriptionClient final : public ConnectedClient {
+ public:
+ SubscriptionClient(std::shared_ptr<PendingRequestPool> requestPool, CallbackType callback);
+
+ // Gets the callback to be called when the request for this client has finished.
+ std::shared_ptr<const IVehicleHardware::GetValuesCallback> getResultCallback();
+
+ // Marshals the updated values into largeParcelable and sents it through {@code onPropertyEvent}
+ // callback.
+ static void sendUpdatedValues(
+ CallbackType callback,
+ std::vector<::aidl::android::hardware::automotive::vehicle::VehiclePropValue>&&
+ updatedValues);
+
+ protected:
+ // Gets the callback to be called when the request for this client has timeout.
+ std::shared_ptr<const PendingRequestPool::TimeoutCallbackFunc> getTimeoutCallback() override;
+
+ private:
+ // The following members are only initialized during construction.
+ std::shared_ptr<const PendingRequestPool::TimeoutCallbackFunc> mTimeoutCallback;
+ std::shared_ptr<const IVehicleHardware::GetValuesCallback> mResultCallback;
+ std::shared_ptr<const IVehicleHardware::PropertyChangeCallback> mPropertyChangeCallback;
+
+ static void onGetValueResults(
+ const void* clientId, CallbackType callback,
+ std::shared_ptr<PendingRequestPool> requestPool,
+ std::vector<::aidl::android::hardware::automotive::vehicle::GetValueResult> results);
+};
+
} // namespace vehicle
} // namespace automotive
} // namespace hardware
diff --git a/automotive/vehicle/aidl/impl/vhal/include/DefaultVehicleHal.h b/automotive/vehicle/aidl/impl/vhal/include/DefaultVehicleHal.h
index e3e77a3..62b2627 100644
--- a/automotive/vehicle/aidl/impl/vhal/include/DefaultVehicleHal.h
+++ b/automotive/vehicle/aidl/impl/vhal/include/DefaultVehicleHal.h
@@ -20,6 +20,7 @@
#include "ConnectedClient.h"
#include "ParcelableUtils.h"
#include "PendingRequestPool.h"
+#include "SubscriptionManager.h"
#include <IVehicleHardware.h>
#include <VehicleUtils.h>
@@ -52,6 +53,8 @@
explicit DefaultVehicleHal(std::unique_ptr<IVehicleHardware> hardware);
+ ~DefaultVehicleHal();
+
::ndk::ScopedAStatus getAllPropConfigs(
::aidl::android::hardware::automotive::vehicle::VehiclePropConfigs* returnConfigs)
override;
@@ -90,11 +93,68 @@
GetSetValuesClient<::aidl::android::hardware::automotive::vehicle::SetValueResult,
::aidl::android::hardware::automotive::vehicle::SetValueResults>;
+ // A thread safe class to maintain an increasing request ID for each subscribe client. This
+ // class is safe to pass to async callbacks.
+ class SubscribeIdByClient {
+ public:
+ int64_t getId(const CallbackType& callback);
+
+ private:
+ std::mutex mLock;
+ std::unordered_map<const AIBinder*, int64_t> mIds GUARDED_BY(mLock);
+ };
+
+ // A thread safe class to store all subscribe clients. This class is safe to pass to async
+ // callbacks.
+ class SubscriptionClients {
+ public:
+ SubscriptionClients(std::shared_ptr<PendingRequestPool> pool) : mPendingRequestPool(pool) {}
+
+ std::shared_ptr<SubscriptionClient> getClient(const CallbackType& callback);
+
+ void removeClient(const AIBinder* clientId);
+
+ size_t countClients();
+
+ private:
+ std::mutex mLock;
+ std::unordered_map<const AIBinder*, std::shared_ptr<SubscriptionClient>> mClients
+ GUARDED_BY(mLock);
+ // PendingRequestPool is thread-safe.
+ std::shared_ptr<PendingRequestPool> mPendingRequestPool;
+ };
+
+ // A wrapper for linkToDeath to enable stubbing for test.
+ class ILinkToDeath {
+ public:
+ virtual ~ILinkToDeath() = default;
+
+ virtual binder_status_t linkToDeath(AIBinder* binder, AIBinder_DeathRecipient* recipient,
+ void* cookie) = 0;
+ };
+
+ // A real implementation for ILinkToDeath.
+ class AIBinderLinkToDeathImpl final : public ILinkToDeath {
+ public:
+ binder_status_t linkToDeath(AIBinder* binder, AIBinder_DeathRecipient* recipient,
+ void* cookie) override;
+ };
+
+ // OnBinderDiedContext is a type used as a cookie passed deathRecipient. The deathRecipient's
+ // onBinderDied function takes only a cookie as input and we have to store all the contexts
+ // as the cookie.
+ struct OnBinderDiedContext {
+ DefaultVehicleHal* vhal;
+ const AIBinder* clientId;
+ };
+
// The default timeout of get or set value requests is 30s.
// TODO(b/214605968): define TIMEOUT_IN_NANO in IVehicle and allow getValues/setValues/subscribe
// to specify custom timeouts.
static constexpr int64_t TIMEOUT_IN_NANO = 30'000'000'000;
- const std::unique_ptr<IVehicleHardware> mVehicleHardware;
+ // heart beat event interval: 3s
+ static constexpr int64_t HEART_BEAT_INTERVAL_IN_NANO = 3'000'000'000;
+ const std::shared_ptr<IVehicleHardware> mVehicleHardware;
// mConfigsByPropId and mConfigFile are only modified during initialization, so no need to
// lock guard them.
@@ -104,17 +164,25 @@
std::unique_ptr<::ndk::ScopedFileDescriptor> mConfigFile;
// PendingRequestPool is thread-safe.
std::shared_ptr<PendingRequestPool> mPendingRequestPool;
+ // SubscriptionManager is thread-safe.
+ std::shared_ptr<SubscriptionManager> mSubscriptionManager;
std::mutex mLock;
- std::unordered_map<CallbackType, std::shared_ptr<GetValuesClient>> mGetValuesClients
+ std::unordered_map<const AIBinder*, std::unique_ptr<OnBinderDiedContext>> mOnBinderDiedContexts
GUARDED_BY(mLock);
- std::unordered_map<CallbackType, std::shared_ptr<SetValuesClient>> mSetValuesClients
+ std::unordered_map<const AIBinder*, std::shared_ptr<GetValuesClient>> mGetValuesClients
GUARDED_BY(mLock);
+ std::unordered_map<const AIBinder*, std::shared_ptr<SetValuesClient>> mSetValuesClients
+ GUARDED_BY(mLock);
+ // SubscriptionClients is thread-safe.
+ std::shared_ptr<SubscriptionClients> mSubscriptionClients;
+ // mLinkToDeathImpl is only going to be changed in test.
+ std::unique_ptr<ILinkToDeath> mLinkToDeathImpl;
- template <class T>
- std::shared_ptr<T> getOrCreateClient(
- std::unordered_map<CallbackType, std::shared_ptr<T>>* clients,
- const CallbackType& callback) REQUIRES(mLock);
+ // RecurrentTimer is thread-safe.
+ RecurrentTimer mRecurrentTimer;
+
+ ::ndk::ScopedAIBinder_DeathRecipient mDeathRecipient;
::android::base::Result<void> checkProperty(
const ::aidl::android::hardware::automotive::vehicle::VehiclePropValue& propValue);
@@ -127,9 +195,55 @@
const std::vector<::aidl::android::hardware::automotive::vehicle::SetValueRequest>&
requests);
+ ::android::base::Result<void> checkSubscribeOptions(
+ const std::vector<::aidl::android::hardware::automotive::vehicle::SubscribeOptions>&
+ options);
+
+ ::android::base::Result<void> checkReadPermission(
+ const ::aidl::android::hardware::automotive::vehicle::VehiclePropValue& value) const;
+
+ ::android::base::Result<void> checkWritePermission(
+ const ::aidl::android::hardware::automotive::vehicle::VehiclePropValue& value) const;
+
+ ::android::base::Result<
+ const ::aidl::android::hardware::automotive::vehicle::VehiclePropConfig*>
+ getConfig(int32_t propId) const;
+
+ void onBinderDiedWithContext(const AIBinder* clientId);
+
+ void onBinderUnlinkedWithContext(const AIBinder* clientId);
+
+ void monitorBinderLifeCycle(const CallbackType& callback);
+
+ template <class T>
+ static std::shared_ptr<T> getOrCreateClient(
+ std::unordered_map<const AIBinder*, std::shared_ptr<T>>* clients,
+ const CallbackType& callback, std::shared_ptr<PendingRequestPool> pendingRequestPool);
+
+ static void getValueFromHardwareCallCallback(
+ std::weak_ptr<IVehicleHardware> vehicleHardware,
+ std::shared_ptr<SubscribeIdByClient> subscribeIdByClient,
+ std::shared_ptr<SubscriptionClients> subscriptionClients, const CallbackType& callback,
+ const ::aidl::android::hardware::automotive::vehicle::VehiclePropValue& value);
+
+ static void onPropertyChangeEvent(
+ std::weak_ptr<SubscriptionManager> subscriptionManager,
+ const std::vector<::aidl::android::hardware::automotive::vehicle::VehiclePropValue>&
+ updatedValues);
+
+ static void checkHealth(std::weak_ptr<IVehicleHardware> hardware,
+ std::weak_ptr<SubscriptionManager> subscriptionManager);
+
+ static void onBinderDied(void* cookie);
+
+ static void onBinderUnlinked(void* cookie);
+
// Test-only
// Set the default timeout for pending requests.
void setTimeout(int64_t timeoutInNano);
+
+ // Test-only
+ void setLinkToDeathImpl(std::unique_ptr<ILinkToDeath> impl);
};
} // namespace vehicle
diff --git a/automotive/vehicle/aidl/impl/vhal/include/RecurrentTimer.h b/automotive/vehicle/aidl/impl/vhal/include/RecurrentTimer.h
new file mode 100644
index 0000000..5f0f716
--- /dev/null
+++ b/automotive/vehicle/aidl/impl/vhal/include/RecurrentTimer.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef android_hardware_automotive_vehicle_aidl_impl_vhal_include_RecurrentTimer_H_
+#define android_hardware_automotive_vehicle_aidl_impl_vhal_include_RecurrentTimer_H_
+
+#include <android-base/thread_annotations.h>
+
+#include <memory>
+#include <mutex>
+#include <queue>
+#include <thread>
+#include <unordered_map>
+#include <vector>
+
+namespace android {
+namespace hardware {
+namespace automotive {
+namespace vehicle {
+
+// A thread-safe recurrent timer.
+class RecurrentTimer final {
+ public:
+ // The class for the function that would be called recurrently.
+ using Callback = std::function<void()>;
+
+ RecurrentTimer();
+
+ ~RecurrentTimer();
+
+ // Registers a recurrent callback for a given interval.
+ // Registering the same callback twice will override the interval provided before.
+ void registerTimerCallback(int64_t intervalInNano, std::shared_ptr<Callback> callback);
+
+ // Unregisters a previously registered recurrent callback.
+ void unregisterTimerCallback(std::shared_ptr<Callback> callback);
+
+ private:
+ // friend class for unit testing.
+ friend class RecurrentTimerTest;
+
+ struct CallbackInfo {
+ std::shared_ptr<Callback> callback;
+ int64_t interval;
+ int64_t nextTime;
+ // A flag to indicate whether this CallbackInfo is already outdated and should be ignored.
+ // The reason we need this flag is because we cannot easily remove an element from a heap.
+ bool outdated = false;
+
+ static bool cmp(const std::unique_ptr<CallbackInfo>& lhs,
+ const std::unique_ptr<CallbackInfo>& rhs);
+ };
+
+ std::mutex mLock;
+ std::thread mThread;
+ std::condition_variable mCond;
+ bool mStopRequested GUARDED_BY(mLock) = false;
+ // A map to map each callback to its current active CallbackInfo in the mCallbackQueue.
+ std::unordered_map<std::shared_ptr<Callback>, CallbackInfo*> mCallbacks GUARDED_BY(mLock);
+ // A min-heap sorted by nextTime. Note that because we cannot remove arbitrary element from the
+ // heap, a single Callback can have multiple entries in this queue, all but one should be valid.
+ // The rest should be mark as outdated. The valid one is one stored in mCallbacks.
+ std::vector<std::unique_ptr<CallbackInfo>> mCallbackQueue GUARDED_BY(mLock);
+
+ void loop();
+
+ // Mark the callbackInfo as outdated and should be ignored when popped from the heap.
+ void markOutdatedLocked(CallbackInfo* callback) REQUIRES(mLock);
+ // Remove all outdated callbackInfos from the top of the heap. This function must be called
+ // each time we might introduce outdated elements to the top. We must make sure the heap is
+ // always valid from the top.
+ void removeInvalidCallbackLocked() REQUIRES(mLock);
+ // Pops the next closest callback (must be valid) from the heap.
+ std::unique_ptr<CallbackInfo> popNextCallbackLocked() REQUIRES(mLock);
+};
+
+} // namespace vehicle
+} // namespace automotive
+} // namespace hardware
+} // namespace android
+
+#endif // android_hardware_automotive_vehicle_aidl_impl_vhal_include_RecurrentTimer_H_
diff --git a/automotive/vehicle/aidl/impl/vhal/include/SubscriptionManager.h b/automotive/vehicle/aidl/impl/vhal/include/SubscriptionManager.h
new file mode 100644
index 0000000..e739c8c
--- /dev/null
+++ b/automotive/vehicle/aidl/impl/vhal/include/SubscriptionManager.h
@@ -0,0 +1,159 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef android_hardware_automotive_vehicle_aidl_impl_vhal_include_SubscriptionManager_H_
+#define android_hardware_automotive_vehicle_aidl_impl_vhal_include_SubscriptionManager_H_
+
+#include "RecurrentTimer.h"
+
+#include <VehicleHalTypes.h>
+
+#include <aidl/android/hardware/automotive/vehicle/IVehicleCallback.h>
+#include <android-base/result.h>
+#include <android-base/thread_annotations.h>
+
+#include <mutex>
+#include <unordered_map>
+#include <unordered_set>
+#include <vector>
+
+namespace android {
+namespace hardware {
+namespace automotive {
+namespace vehicle {
+
+// A thread-safe subscription manager that manages all VHAL subscriptions.
+class SubscriptionManager final {
+ public:
+ using ClientIdType = const AIBinder*;
+ using CallbackType =
+ std::shared_ptr<::aidl::android::hardware::automotive::vehicle::IVehicleCallback>;
+ using GetValueFunc = std::function<void(
+ const CallbackType& callback,
+ const ::aidl::android::hardware::automotive::vehicle::VehiclePropValue& value)>;
+
+ explicit SubscriptionManager(GetValueFunc&& action);
+ ~SubscriptionManager();
+
+ // Subscribes to properties according to {@code SubscribeOptions}. Note that all option must
+ // contain non-empty areaIds field, which contains all area IDs to subscribe. As a result,
+ // the options here is different from the options passed from VHAL client.
+ // Returns error if any of the subscribe options is not valid. If error is returned, no
+ // properties would be subscribed.
+ // Returns ok if all the options are parsed correctly and all the properties are subscribed.
+ ::android::base::Result<void> subscribe(
+ const CallbackType& callback,
+ const std::vector<::aidl::android::hardware::automotive::vehicle::SubscribeOptions>&
+ options,
+ bool isContinuousProperty);
+
+ // Unsubscribes from the properties for the client.
+ // Returns error if the client was not subscribed before or one of the given property was not
+ // subscribed. If error is returned, no property would be unsubscribed.
+ // Returns ok if all the requested properties for the client are unsubscribed.
+ ::android::base::Result<void> unsubscribe(ClientIdType client,
+ const std::vector<int32_t>& propIds);
+
+ // Unsubscribes from all the properties for the client.
+ // Returns error if the client was not subscribed before. If error is returned, no property
+ // would be unsubscribed.
+ // Returns ok if all the properties for the client are unsubscribed.
+ ::android::base::Result<void> unsubscribe(ClientIdType client);
+
+ // For a list of updated properties, returns a map that maps clients subscribing to
+ // the updated properties to a list of updated values. This would only return on-change property
+ // clients that should be informed for the given updated values.
+ std::unordered_map<
+ CallbackType,
+ std::vector<const ::aidl::android::hardware::automotive::vehicle::VehiclePropValue*>>
+ getSubscribedClients(
+ const std::vector<::aidl::android::hardware::automotive::vehicle::VehiclePropValue>&
+ updatedValues);
+
+ // Checks whether the sample rate is valid.
+ static bool checkSampleRate(float sampleRate);
+
+ private:
+ // Friend class for testing.
+ friend class DefaultVehicleHalTest;
+
+ struct PropIdAreaId {
+ int32_t propId;
+ int32_t areaId;
+
+ bool operator==(const PropIdAreaId& other) const;
+ };
+
+ struct PropIdAreaIdHash {
+ size_t operator()(const PropIdAreaId& propIdAreaId) const;
+ };
+
+ // A class to represent a registered subscription.
+ class Subscription {
+ public:
+ Subscription() = default;
+
+ Subscription(const Subscription&) = delete;
+
+ virtual ~Subscription() = default;
+
+ virtual bool isOnChange();
+ };
+
+ // A subscription for OnContinuous property. The registered action would be called recurrently
+ // until this class is destructed.
+ class RecurrentSubscription final : public Subscription {
+ public:
+ explicit RecurrentSubscription(std::shared_ptr<RecurrentTimer> timer,
+ std::function<void()>&& action, int64_t interval);
+ ~RecurrentSubscription();
+
+ bool isOnChange() override;
+
+ private:
+ std::shared_ptr<std::function<void()>> mAction;
+ std::shared_ptr<RecurrentTimer> mTimer;
+ };
+
+ // A subscription for OnChange property.
+ class OnChangeSubscription final : public Subscription {
+ public:
+ bool isOnChange() override;
+ };
+
+ mutable std::mutex mLock;
+ std::unordered_map<PropIdAreaId, std::unordered_map<ClientIdType, CallbackType>,
+ PropIdAreaIdHash>
+ mClientsByPropIdArea GUARDED_BY(mLock);
+ std::unordered_map<ClientIdType, std::unordered_map<PropIdAreaId, std::unique_ptr<Subscription>,
+ PropIdAreaIdHash>>
+ mSubscriptionsByClient GUARDED_BY(mLock);
+ // RecurrentTimer is thread-safe.
+ std::shared_ptr<RecurrentTimer> mTimer;
+ const GetValueFunc mGetValue;
+
+ static ::android::base::Result<int64_t> getInterval(float sampleRate);
+
+ // Checks whether the manager is empty. For testing purpose.
+ bool isEmpty();
+};
+
+} // namespace vehicle
+} // namespace automotive
+} // namespace hardware
+} // namespace android
+
+#endif // android_hardware_automotive_vehicle_aidl_impl_vhal_include_SubscriptionManager_H_
diff --git a/automotive/vehicle/aidl/impl/vhal/src/ConnectedClient.cpp b/automotive/vehicle/aidl/impl/vhal/src/ConnectedClient.cpp
index abc3eb0..5ccef55 100644
--- a/automotive/vehicle/aidl/impl/vhal/src/ConnectedClient.cpp
+++ b/automotive/vehicle/aidl/impl/vhal/src/ConnectedClient.cpp
@@ -244,6 +244,99 @@
template class GetSetValuesClient<GetValueResult, GetValueResults>;
template class GetSetValuesClient<SetValueResult, SetValueResults>;
+SubscriptionClient::SubscriptionClient(std::shared_ptr<PendingRequestPool> requestPool,
+ std::shared_ptr<IVehicleCallback> callback)
+ : ConnectedClient(requestPool, callback) {
+ mTimeoutCallback = std::make_shared<const PendingRequestPool::TimeoutCallbackFunc>(
+ [](std::unordered_set<int64_t> timeoutIds) {
+ for (int64_t id : timeoutIds) {
+ ALOGW("subscribe: requests with IDs: %" PRId64
+ " has timed-out, not client informed, "
+ "possibly one of recurrent requests for this subscription failed",
+ id);
+ }
+ });
+ auto requestPoolCopy = mRequestPool;
+ const void* clientId = reinterpret_cast<const void*>(this);
+ mResultCallback = std::make_shared<const IVehicleHardware::GetValuesCallback>(
+ [clientId, callback, requestPoolCopy](std::vector<GetValueResult> results) {
+ onGetValueResults(clientId, callback, requestPoolCopy, results);
+ });
+}
+
+std::shared_ptr<const std::function<void(std::vector<GetValueResult>)>>
+SubscriptionClient::getResultCallback() {
+ return mResultCallback;
+}
+
+std::shared_ptr<const PendingRequestPool::TimeoutCallbackFunc>
+SubscriptionClient::getTimeoutCallback() {
+ return mTimeoutCallback;
+}
+
+void SubscriptionClient::sendUpdatedValues(std::shared_ptr<IVehicleCallback> callback,
+ std::vector<VehiclePropValue>&& updatedValues) {
+ if (updatedValues.empty()) {
+ return;
+ }
+
+ // TODO(b/205189110): Use memory pool here and fill in sharedMemoryId.
+ VehiclePropValues vehiclePropValues;
+ int32_t sharedMemoryFileCount = 0;
+ ScopedAStatus status = vectorToStableLargeParcelable(updatedValues, &vehiclePropValues);
+ if (!status.isOk()) {
+ int statusCode = status.getServiceSpecificError();
+ ALOGE("subscribe: failed to marshal result into large parcelable, error: "
+ "%s, code: %d",
+ status.getMessage(), statusCode);
+ return;
+ }
+
+ if (ScopedAStatus callbackStatus =
+ callback->onPropertyEvent(vehiclePropValues, sharedMemoryFileCount);
+ !callbackStatus.isOk()) {
+ ALOGE("subscribe: failed to call callback, error: %s, code: %d", status.getMessage(),
+ status.getServiceSpecificError());
+ }
+}
+
+void SubscriptionClient::onGetValueResults(const void* clientId,
+ std::shared_ptr<IVehicleCallback> callback,
+ std::shared_ptr<PendingRequestPool> requestPool,
+ std::vector<GetValueResult> results) {
+ std::unordered_set<int64_t> requestIds;
+ for (const auto& result : results) {
+ requestIds.insert(result.requestId);
+ }
+
+ auto finishedRequests = requestPool->tryFinishRequests(clientId, requestIds);
+ std::vector<VehiclePropValue> propValues;
+ for (auto& result : results) {
+ int64_t requestId = result.requestId;
+ if (finishedRequests.find(requestId) == finishedRequests.end()) {
+ ALOGE("subscribe[%" PRId64
+ "]: no pending request for the result from hardware, "
+ "possibly already time-out",
+ requestId);
+ continue;
+ }
+ if (result.status != StatusCode::OK) {
+ ALOGE("subscribe[%" PRId64
+ "]: hardware returns non-ok status for getValues, status: "
+ "%d",
+ requestId, toInt(result.status));
+ continue;
+ }
+ if (!result.prop.has_value()) {
+ ALOGE("subscribe[%" PRId64 "]: no prop value in getValues result", requestId);
+ continue;
+ }
+ propValues.push_back(std::move(result.prop.value()));
+ }
+
+ sendUpdatedValues(callback, std::move(propValues));
+}
+
} // namespace vehicle
} // namespace automotive
} // namespace hardware
diff --git a/automotive/vehicle/aidl/impl/vhal/src/DefaultVehicleHal.cpp b/automotive/vehicle/aidl/impl/vhal/src/DefaultVehicleHal.cpp
index 3c454f0..3e088c5 100644
--- a/automotive/vehicle/aidl/impl/vhal/src/DefaultVehicleHal.cpp
+++ b/automotive/vehicle/aidl/impl/vhal/src/DefaultVehicleHal.cpp
@@ -23,8 +23,11 @@
#include <VehicleUtils.h>
#include <android-base/result.h>
+#include <android-base/stringprintf.h>
#include <utils/Log.h>
+#include <utils/SystemClock.h>
+#include <inttypes.h>
#include <set>
#include <unordered_set>
@@ -33,11 +36,12 @@
namespace automotive {
namespace vehicle {
+namespace {
+
using ::aidl::android::hardware::automotive::vehicle::GetValueRequest;
using ::aidl::android::hardware::automotive::vehicle::GetValueRequests;
using ::aidl::android::hardware::automotive::vehicle::GetValueResult;
using ::aidl::android::hardware::automotive::vehicle::GetValueResults;
-using ::aidl::android::hardware::automotive::vehicle::IVehicleCallback;
using ::aidl::android::hardware::automotive::vehicle::SetValueRequest;
using ::aidl::android::hardware::automotive::vehicle::SetValueRequests;
using ::aidl::android::hardware::automotive::vehicle::SetValueResult;
@@ -47,13 +51,56 @@
using ::aidl::android::hardware::automotive::vehicle::VehicleAreaConfig;
using ::aidl::android::hardware::automotive::vehicle::VehiclePropConfig;
using ::aidl::android::hardware::automotive::vehicle::VehiclePropConfigs;
+using ::aidl::android::hardware::automotive::vehicle::VehicleProperty;
+using ::aidl::android::hardware::automotive::vehicle::VehiclePropertyAccess;
+using ::aidl::android::hardware::automotive::vehicle::VehiclePropertyChangeMode;
+using ::aidl::android::hardware::automotive::vehicle::VehiclePropertyStatus;
using ::aidl::android::hardware::automotive::vehicle::VehiclePropValue;
using ::android::automotive::car_binder_lib::LargeParcelableBase;
using ::android::base::Error;
using ::android::base::expected;
using ::android::base::Result;
+using ::android::base::StringPrintf;
+
+using ::ndk::ScopedAIBinder_DeathRecipient;
using ::ndk::ScopedAStatus;
+std::string toString(const std::unordered_set<int64_t>& values) {
+ std::string str = "";
+ for (auto it = values.begin(); it != values.end(); it++) {
+ str += std::to_string(*it);
+ if (std::next(it, 1) != values.end()) {
+ str += ", ";
+ }
+ }
+ return str;
+}
+
+} // namespace
+
+std::shared_ptr<SubscriptionClient> DefaultVehicleHal::SubscriptionClients::getClient(
+ const CallbackType& callback) {
+ std::scoped_lock<std::mutex> lockGuard(mLock);
+ return getOrCreateClient(&mClients, callback, mPendingRequestPool);
+}
+
+int64_t DefaultVehicleHal::SubscribeIdByClient::getId(const CallbackType& callback) {
+ std::scoped_lock<std::mutex> lockGuard(mLock);
+ // This would be initialized to 0 if callback does not exist in the map.
+ int64_t subscribeId = (mIds[callback->asBinder().get()])++;
+ return subscribeId;
+}
+
+void DefaultVehicleHal::SubscriptionClients::removeClient(const AIBinder* clientId) {
+ std::scoped_lock<std::mutex> lockGuard(mLock);
+ mClients.erase(clientId);
+}
+
+size_t DefaultVehicleHal::SubscriptionClients::countClients() {
+ std::scoped_lock<std::mutex> lockGuard(mLock);
+ return mClients.size();
+}
+
DefaultVehicleHal::DefaultVehicleHal(std::unique_ptr<IVehicleHardware> hardware)
: mVehicleHardware(std::move(hardware)),
mPendingRequestPool(std::make_shared<PendingRequestPool>(TIMEOUT_IN_NANO)) {
@@ -73,6 +120,164 @@
if (result.value() != nullptr) {
mConfigFile = std::move(result.value());
}
+
+ mSubscriptionClients = std::make_shared<SubscriptionClients>(mPendingRequestPool);
+
+ auto subscribeIdByClient = std::make_shared<SubscribeIdByClient>();
+ // Make a weak copy of IVehicleHardware because subscriptionManager uses IVehicleHardware and
+ // IVehicleHardware uses subscriptionManager. We want to avoid cyclic reference.
+ std::weak_ptr<IVehicleHardware> hardwareCopy = mVehicleHardware;
+ SubscriptionManager::GetValueFunc getValueFunc = std::bind(
+ &DefaultVehicleHal::getValueFromHardwareCallCallback, hardwareCopy, subscribeIdByClient,
+ mSubscriptionClients, std::placeholders::_1, std::placeholders::_2);
+ mSubscriptionManager = std::make_shared<SubscriptionManager>(std::move(getValueFunc));
+
+ std::weak_ptr<SubscriptionManager> subscriptionManagerCopy = mSubscriptionManager;
+ mVehicleHardware->registerOnPropertyChangeEvent(
+ std::make_unique<IVehicleHardware::PropertyChangeCallback>(
+ [subscriptionManagerCopy](std::vector<VehiclePropValue> updatedValues) {
+ onPropertyChangeEvent(subscriptionManagerCopy, updatedValues);
+ }));
+
+ // Register heartbeat event.
+ mRecurrentTimer.registerTimerCallback(
+ HEART_BEAT_INTERVAL_IN_NANO,
+ std::make_shared<std::function<void()>>([hardwareCopy, subscriptionManagerCopy]() {
+ checkHealth(hardwareCopy, subscriptionManagerCopy);
+ }));
+
+ mLinkToDeathImpl = std::make_unique<AIBinderLinkToDeathImpl>();
+ mDeathRecipient = ScopedAIBinder_DeathRecipient(
+ AIBinder_DeathRecipient_new(&DefaultVehicleHal::onBinderDied));
+ AIBinder_DeathRecipient_setOnUnlinked(mDeathRecipient.get(),
+ &DefaultVehicleHal::onBinderUnlinked);
+}
+
+DefaultVehicleHal::~DefaultVehicleHal() {
+ // Delete the deathRecipient so that onBinderDied would not be called to reference 'this'.
+ mDeathRecipient = ScopedAIBinder_DeathRecipient();
+}
+
+void DefaultVehicleHal::onPropertyChangeEvent(
+ std::weak_ptr<SubscriptionManager> subscriptionManager,
+ const std::vector<VehiclePropValue>& updatedValues) {
+ auto manager = subscriptionManager.lock();
+ if (manager == nullptr) {
+ ALOGW("the SubscriptionManager is destroyed, DefaultVehicleHal is ending");
+ return;
+ }
+ auto updatedValuesByClients = manager->getSubscribedClients(updatedValues);
+ for (const auto& [callback, valuePtrs] : updatedValuesByClients) {
+ std::vector<VehiclePropValue> values;
+ for (const VehiclePropValue* valuePtr : valuePtrs) {
+ values.push_back(*valuePtr);
+ }
+ SubscriptionClient::sendUpdatedValues(callback, std::move(values));
+ }
+}
+
+template <class T>
+std::shared_ptr<T> DefaultVehicleHal::getOrCreateClient(
+ std::unordered_map<const AIBinder*, std::shared_ptr<T>>* clients,
+ const CallbackType& callback, std::shared_ptr<PendingRequestPool> pendingRequestPool) {
+ const AIBinder* clientId = callback->asBinder().get();
+ if (clients->find(clientId) == clients->end()) {
+ (*clients)[clientId] = std::make_shared<T>(pendingRequestPool, callback);
+ }
+ return (*clients)[clientId];
+}
+
+void DefaultVehicleHal::monitorBinderLifeCycle(const CallbackType& callback) {
+ AIBinder* clientId = callback->asBinder().get();
+ {
+ std::scoped_lock<std::mutex> lockGuard(mLock);
+ if (mOnBinderDiedContexts.find(clientId) != mOnBinderDiedContexts.end()) {
+ // Already registered.
+ return;
+ }
+ }
+
+ std::unique_ptr<OnBinderDiedContext> context = std::make_unique<OnBinderDiedContext>(
+ OnBinderDiedContext{.vhal = this, .clientId = clientId});
+ binder_status_t status = mLinkToDeathImpl->linkToDeath(clientId, mDeathRecipient.get(),
+ static_cast<void*>(context.get()));
+ if (status == STATUS_OK) {
+ std::scoped_lock<std::mutex> lockGuard(mLock);
+ // Insert into a map to keep the context object alive.
+ mOnBinderDiedContexts[clientId] = std::move(context);
+ } else {
+ ALOGE("failed to call linkToDeath on client binder, status: %d", static_cast<int>(status));
+ }
+}
+
+void DefaultVehicleHal::onBinderDied(void* cookie) {
+ OnBinderDiedContext* context = reinterpret_cast<OnBinderDiedContext*>(cookie);
+ context->vhal->onBinderDiedWithContext(context->clientId);
+}
+
+void DefaultVehicleHal::onBinderDiedWithContext(const AIBinder* clientId) {
+ std::scoped_lock<std::mutex> lockGuard(mLock);
+ mSetValuesClients.erase(clientId);
+ mGetValuesClients.erase(clientId);
+ mSubscriptionClients->removeClient(clientId);
+ mSubscriptionManager->unsubscribe(clientId);
+}
+
+void DefaultVehicleHal::onBinderUnlinked(void* cookie) {
+ // Delete the context associated with this cookie.
+ OnBinderDiedContext* context = reinterpret_cast<OnBinderDiedContext*>(cookie);
+ context->vhal->onBinderUnlinkedWithContext(context->clientId);
+}
+
+void DefaultVehicleHal::onBinderUnlinkedWithContext(const AIBinder* clientId) {
+ std::scoped_lock<std::mutex> lockGuard(mLock);
+ mOnBinderDiedContexts.erase(clientId);
+}
+
+template std::shared_ptr<DefaultVehicleHal::GetValuesClient>
+DefaultVehicleHal::getOrCreateClient<DefaultVehicleHal::GetValuesClient>(
+ std::unordered_map<const AIBinder*, std::shared_ptr<GetValuesClient>>* clients,
+ const CallbackType& callback, std::shared_ptr<PendingRequestPool> pendingRequestPool);
+template std::shared_ptr<DefaultVehicleHal::SetValuesClient>
+DefaultVehicleHal::getOrCreateClient<DefaultVehicleHal::SetValuesClient>(
+ std::unordered_map<const AIBinder*, std::shared_ptr<SetValuesClient>>* clients,
+ const CallbackType& callback, std::shared_ptr<PendingRequestPool> pendingRequestPool);
+template std::shared_ptr<SubscriptionClient>
+DefaultVehicleHal::getOrCreateClient<SubscriptionClient>(
+ std::unordered_map<const AIBinder*, std::shared_ptr<SubscriptionClient>>* clients,
+ const CallbackType& callback, std::shared_ptr<PendingRequestPool> pendingRequestPool);
+
+void DefaultVehicleHal::getValueFromHardwareCallCallback(
+ std::weak_ptr<IVehicleHardware> vehicleHardware,
+ std::shared_ptr<SubscribeIdByClient> subscribeIdByClient,
+ std::shared_ptr<SubscriptionClients> subscriptionClients, const CallbackType& callback,
+ const VehiclePropValue& value) {
+ int64_t subscribeId = subscribeIdByClient->getId(callback);
+ auto client = subscriptionClients->getClient(callback);
+ if (auto addRequestResult = client->addRequests({subscribeId}); !addRequestResult.ok()) {
+ ALOGE("subscribe[%" PRId64 "]: too many pending requests, ignore the getValue request",
+ subscribeId);
+ return;
+ }
+
+ std::vector<GetValueRequest> hardwareRequests = {{
+ .requestId = subscribeId,
+ .prop = value,
+ }};
+
+ std::shared_ptr<IVehicleHardware> hardware = vehicleHardware.lock();
+ if (hardware == nullptr) {
+ ALOGW("the IVehicleHardware is destroyed, DefaultVehicleHal is ending");
+ return;
+ }
+ if (StatusCode status = hardware->getValues(client->getResultCallback(), hardwareRequests);
+ status != StatusCode::OK) {
+ // If the hardware returns error, finish all the pending requests for this request because
+ // we never expect hardware to call callback for these requests.
+ client->tryFinishRequests({subscribeId});
+ ALOGE("subscribe[%" PRId64 "]: failed to get value from VehicleHardware, code: %d",
+ subscribeId, toInt(status));
+ }
}
void DefaultVehicleHal::setTimeout(int64_t timeoutInNano) {
@@ -92,54 +297,43 @@
return ScopedAStatus::ok();
}
-template <class T>
-std::shared_ptr<T> DefaultVehicleHal::getOrCreateClient(
- std::unordered_map<CallbackType, std::shared_ptr<T>>* clients,
- const CallbackType& callback) {
- if (clients->find(callback) == clients->end()) {
- // TODO(b/204943359): Remove client from clients when linkToDeath is implemented.
- (*clients)[callback] = std::make_shared<T>(mPendingRequestPool, callback);
- }
- return (*clients)[callback];
-}
-
-template std::shared_ptr<DefaultVehicleHal::GetValuesClient>
-DefaultVehicleHal::getOrCreateClient<DefaultVehicleHal::GetValuesClient>(
- std::unordered_map<CallbackType, std::shared_ptr<GetValuesClient>>* clients,
- const CallbackType& callback);
-
-template std::shared_ptr<DefaultVehicleHal::SetValuesClient>
-DefaultVehicleHal::getOrCreateClient<DefaultVehicleHal::SetValuesClient>(
- std::unordered_map<CallbackType, std::shared_ptr<SetValuesClient>>* clients,
- const CallbackType& callback);
-
-Result<void> DefaultVehicleHal::checkProperty(const VehiclePropValue& propValue) {
- int32_t propId = propValue.prop;
+Result<const VehiclePropConfig*> DefaultVehicleHal::getConfig(int32_t propId) const {
auto it = mConfigsByPropId.find(propId);
if (it == mConfigsByPropId.end()) {
return Error() << "no config for property, ID: " << propId;
}
- const VehiclePropConfig& config = it->second;
- const VehicleAreaConfig* areaConfig = getAreaConfig(propValue, config);
+ return &(it->second);
+}
+
+Result<void> DefaultVehicleHal::checkProperty(const VehiclePropValue& propValue) {
+ int32_t propId = propValue.prop;
+ auto result = getConfig(propId);
+ if (!result.ok()) {
+ return result.error();
+ }
+ const VehiclePropConfig* config = result.value();
+ const VehicleAreaConfig* areaConfig = getAreaConfig(propValue, *config);
if (!isGlobalProp(propId) && areaConfig == nullptr) {
// Ignore areaId for global property. For non global property, check whether areaId is
// allowed. areaId must appear in areaConfig.
return Error() << "invalid area ID: " << propValue.areaId << " for prop ID: " << propId
<< ", not listed in config";
}
- if (auto result = checkPropValue(propValue, &config); !result.ok()) {
+ if (auto result = checkPropValue(propValue, config); !result.ok()) {
return Error() << "invalid property value: " << propValue.toString()
- << ", error: " << result.error().message();
+ << ", error: " << getErrorMsg(result);
}
if (auto result = checkValueRange(propValue, areaConfig); !result.ok()) {
return Error() << "property value out of range: " << propValue.toString()
- << ", error: " << result.error().message();
+ << ", error: " << getErrorMsg(result);
}
return {};
}
ScopedAStatus DefaultVehicleHal::getValues(const CallbackType& callback,
const GetValueRequests& requests) {
+ monitorBinderLifeCycle(callback);
+
expected<LargeParcelableBase::BorrowedOwnedObject<GetValueRequests>, ScopedAStatus>
deserializedResults = fromStableLargeParcelable(requests);
if (!deserializedResults.ok()) {
@@ -151,32 +345,63 @@
auto maybeRequestIds = checkDuplicateRequests(getValueRequests);
if (!maybeRequestIds.ok()) {
- ALOGE("duplicate request ID");
+ ALOGE("getValues: duplicate request ID");
return toScopedAStatus(maybeRequestIds, StatusCode::INVALID_ARG);
}
+
+ // A list of failed result we already know before sending to hardware.
+ std::vector<GetValueResult> failedResults;
+ // The list of requests that we would send to hardware.
+ std::vector<GetValueRequest> hardwareRequests;
+
+ for (const auto& request : getValueRequests) {
+ if (auto result = checkReadPermission(request.prop); !result.ok()) {
+ ALOGW("property does not support reading: %s", getErrorMsg(result).c_str());
+ failedResults.push_back(GetValueResult{
+ .requestId = request.requestId,
+ .status = getErrorCode(result),
+ .prop = {},
+ });
+ } else {
+ hardwareRequests.push_back(request);
+ }
+ }
+
// The set of request Ids that we would send to hardware.
- std::unordered_set<int64_t> hardwareRequestIds(maybeRequestIds.value().begin(),
- maybeRequestIds.value().end());
+ std::unordered_set<int64_t> hardwareRequestIds;
+ for (const auto& request : hardwareRequests) {
+ hardwareRequestIds.insert(request.requestId);
+ }
std::shared_ptr<GetValuesClient> client;
{
std::scoped_lock<std::mutex> lockGuard(mLock);
- client = getOrCreateClient(&mGetValuesClients, callback);
+ client = getOrCreateClient(&mGetValuesClients, callback, mPendingRequestPool);
}
// Register the pending hardware requests and also check for duplicate request Ids.
if (auto addRequestResult = client->addRequests(hardwareRequestIds); !addRequestResult.ok()) {
- ALOGE("failed to add pending requests, error: %s",
- addRequestResult.error().message().c_str());
+ ALOGE("getValues[%s]: failed to add pending requests, error: %s",
+ toString(hardwareRequestIds).c_str(), getErrorMsg(addRequestResult).c_str());
return toScopedAStatus(addRequestResult);
}
+ if (!failedResults.empty()) {
+ // First send the failed results we already know back to the client.
+ client->sendResults(failedResults);
+ }
+
+ if (hardwareRequests.empty()) {
+ return ScopedAStatus::ok();
+ }
+
if (StatusCode status =
- mVehicleHardware->getValues(client->getResultCallback(), getValueRequests);
+ mVehicleHardware->getValues(client->getResultCallback(), hardwareRequests);
status != StatusCode::OK) {
// If the hardware returns error, finish all the pending requests for this request because
// we never expect hardware to call callback for these requests.
client->tryFinishRequests(hardwareRequestIds);
- ALOGE("failed to get value from VehicleHardware, status: %d", toInt(status));
+ ALOGE("getValues[%s]: failed to get value from VehicleHardware, status: %d",
+ toString(hardwareRequestIds).c_str(), toInt(status));
return ScopedAStatus::fromServiceSpecificErrorWithMessage(
toInt(status), "failed to get value from VehicleHardware");
}
@@ -185,6 +410,8 @@
ScopedAStatus DefaultVehicleHal::setValues(const CallbackType& callback,
const SetValueRequests& requests) {
+ monitorBinderLifeCycle(callback);
+
expected<LargeParcelableBase::BorrowedOwnedObject<SetValueRequests>, ScopedAStatus>
deserializedResults = fromStableLargeParcelable(requests);
if (!deserializedResults.ok()) {
@@ -201,14 +428,23 @@
auto maybeRequestIds = checkDuplicateRequests(setValueRequests);
if (!maybeRequestIds.ok()) {
- ALOGE("duplicate request ID");
+ ALOGE("setValues: duplicate request ID");
return toScopedAStatus(maybeRequestIds, StatusCode::INVALID_ARG);
}
for (auto& request : setValueRequests) {
int64_t requestId = request.requestId;
+ if (auto result = checkWritePermission(request.value); !result.ok()) {
+ ALOGW("property does not support writing: %s", getErrorMsg(result).c_str());
+ failedResults.push_back(SetValueResult{
+ .requestId = requestId,
+ .status = getErrorCode(result),
+ });
+ continue;
+ }
if (auto result = checkProperty(request.value); !result.ok()) {
- ALOGW("property not valid: %s", result.error().message().c_str());
+ ALOGW("setValues[%" PRId64 "]: property is not valid: %s", requestId,
+ getErrorMsg(result).c_str());
failedResults.push_back(SetValueResult{
.requestId = requestId,
.status = StatusCode::INVALID_ARG,
@@ -228,13 +464,13 @@
std::shared_ptr<SetValuesClient> client;
{
std::scoped_lock<std::mutex> lockGuard(mLock);
- client = getOrCreateClient(&mSetValuesClients, callback);
+ client = getOrCreateClient(&mSetValuesClients, callback, mPendingRequestPool);
}
// Register the pending hardware requests and also check for duplicate request Ids.
if (auto addRequestResult = client->addRequests(hardwareRequestIds); !addRequestResult.ok()) {
- ALOGE("failed to add pending requests, error: %s",
- addRequestResult.error().message().c_str());
+ ALOGE("setValues[%s], failed to add pending requests, error: %s",
+ toString(hardwareRequestIds).c_str(), getErrorMsg(addRequestResult).c_str());
return toScopedAStatus(addRequestResult, StatusCode::INVALID_ARG);
}
@@ -243,13 +479,18 @@
client->sendResults(failedResults);
}
+ if (hardwareRequests.empty()) {
+ return ScopedAStatus::ok();
+ }
+
if (StatusCode status =
mVehicleHardware->setValues(client->getResultCallback(), hardwareRequests);
status != StatusCode::OK) {
// If the hardware returns error, finish all the pending requests for this request because
// we never expect hardware to call callback for these requests.
client->tryFinishRequests(hardwareRequestIds);
- ALOGE("failed to set value to VehicleHardware, status: %d", toInt(status));
+ ALOGE("setValues[%s], failed to set value to VehicleHardware, status: %d",
+ toString(hardwareRequestIds).c_str(), toInt(status));
return ScopedAStatus::fromServiceSpecificErrorWithMessage(
toInt(status), "failed to set value to VehicleHardware");
}
@@ -296,15 +537,112 @@
return vectorToStableLargeParcelable(std::move(configs), output);
}
-ScopedAStatus DefaultVehicleHal::subscribe(const CallbackType&,
- const std::vector<SubscribeOptions>&, int32_t) {
- // TODO(b/200737967): implement this.
+Result<void> DefaultVehicleHal::checkSubscribeOptions(
+ const std::vector<SubscribeOptions>& options) {
+ for (const auto& option : options) {
+ int32_t propId = option.propId;
+ if (mConfigsByPropId.find(propId) == mConfigsByPropId.end()) {
+ return Error(toInt(StatusCode::INVALID_ARG))
+ << StringPrintf("no config for property, ID: %" PRId32, propId);
+ }
+ const VehiclePropConfig& config = mConfigsByPropId[propId];
+
+ if (config.changeMode != VehiclePropertyChangeMode::ON_CHANGE &&
+ config.changeMode != VehiclePropertyChangeMode::CONTINUOUS) {
+ return Error(toInt(StatusCode::INVALID_ARG))
+ << "only support subscribing to ON_CHANGE or CONTINUOUS property";
+ }
+
+ if (config.access != VehiclePropertyAccess::READ &&
+ config.access != VehiclePropertyAccess::READ_WRITE) {
+ return Error(toInt(StatusCode::ACCESS_DENIED))
+ << StringPrintf("Property %" PRId32 " has no read access", propId);
+ }
+
+ if (config.changeMode == VehiclePropertyChangeMode::CONTINUOUS) {
+ float sampleRate = option.sampleRate;
+ float minSampleRate = config.minSampleRate;
+ float maxSampleRate = config.maxSampleRate;
+ if (sampleRate < minSampleRate || sampleRate > maxSampleRate) {
+ return Error(toInt(StatusCode::INVALID_ARG))
+ << StringPrintf("sample rate: %f out of range, must be within %f and %f",
+ sampleRate, minSampleRate, maxSampleRate);
+ }
+ if (!SubscriptionManager::checkSampleRate(sampleRate)) {
+ return Error(toInt(StatusCode::INVALID_ARG))
+ << "invalid sample rate: " << sampleRate;
+ }
+ }
+
+ if (isGlobalProp(propId)) {
+ continue;
+ }
+
+ // Non-global property.
+ for (int32_t areaId : option.areaIds) {
+ if (auto areaConfig = getAreaConfig(propId, areaId, config); areaConfig == nullptr) {
+ return Error(toInt(StatusCode::INVALID_ARG))
+ << StringPrintf("invalid area ID: %" PRId32 " for prop ID: %" PRId32
+ ", not listed in config",
+ areaId, propId);
+ }
+ }
+ }
+ return {};
+}
+
+ScopedAStatus DefaultVehicleHal::subscribe(const CallbackType& callback,
+ const std::vector<SubscribeOptions>& options,
+ [[maybe_unused]] int32_t maxSharedMemoryFileCount) {
+ monitorBinderLifeCycle(callback);
+
+ // TODO(b/205189110): Use shared memory file count.
+ if (auto result = checkSubscribeOptions(options); !result.ok()) {
+ ALOGE("subscribe: invalid subscribe options: %s", getErrorMsg(result).c_str());
+ return toScopedAStatus(result);
+ }
+
+ std::vector<SubscribeOptions> onChangeSubscriptions;
+ std::vector<SubscribeOptions> continuousSubscriptions;
+ for (const auto& option : options) {
+ int32_t propId = option.propId;
+ // We have already validate config exists.
+ const VehiclePropConfig& config = mConfigsByPropId[propId];
+
+ SubscribeOptions optionCopy = option;
+ // If areaIds is empty, subscribe to all areas.
+ if (optionCopy.areaIds.empty() && !isGlobalProp(propId)) {
+ for (const auto& areaConfig : config.areaConfigs) {
+ optionCopy.areaIds.push_back(areaConfig.areaId);
+ }
+ }
+
+ if (isGlobalProp(propId)) {
+ optionCopy.areaIds = {0};
+ }
+
+ if (config.changeMode == VehiclePropertyChangeMode::CONTINUOUS) {
+ continuousSubscriptions.push_back(std::move(optionCopy));
+ } else {
+ onChangeSubscriptions.push_back(std::move(optionCopy));
+ }
+ }
+ // Since we have already check the sample rates, the following functions must succeed.
+ if (!onChangeSubscriptions.empty()) {
+ mSubscriptionManager->subscribe(callback, onChangeSubscriptions,
+ /*isContinuousProperty=*/false);
+ }
+ if (!continuousSubscriptions.empty()) {
+ mSubscriptionManager->subscribe(callback, continuousSubscriptions,
+ /*isContinuousProperty=*/true);
+ }
return ScopedAStatus::ok();
}
-ScopedAStatus DefaultVehicleHal::unsubscribe(const CallbackType&, const std::vector<int32_t>&) {
- // TODO(b/200737967): implement this.
- return ScopedAStatus::ok();
+ScopedAStatus DefaultVehicleHal::unsubscribe(const CallbackType& callback,
+ const std::vector<int32_t>& propIds) {
+ return toScopedAStatus(mSubscriptionManager->unsubscribe(callback->asBinder().get(), propIds),
+ StatusCode::INVALID_ARG);
}
ScopedAStatus DefaultVehicleHal::returnSharedMemory(const CallbackType&, int64_t) {
@@ -316,6 +654,70 @@
return mVehicleHardware.get();
}
+Result<void> DefaultVehicleHal::checkWritePermission(const VehiclePropValue& value) const {
+ int32_t propId = value.prop;
+ auto result = getConfig(propId);
+ if (!result.ok()) {
+ return Error(toInt(StatusCode::INVALID_ARG)) << getErrorMsg(result);
+ }
+ const VehiclePropConfig* config = result.value();
+
+ if (config->access != VehiclePropertyAccess::WRITE &&
+ config->access != VehiclePropertyAccess::READ_WRITE) {
+ return Error(toInt(StatusCode::ACCESS_DENIED))
+ << StringPrintf("Property %" PRId32 " has no write access", propId);
+ }
+ return {};
+}
+
+Result<void> DefaultVehicleHal::checkReadPermission(const VehiclePropValue& value) const {
+ int32_t propId = value.prop;
+ auto result = getConfig(propId);
+ if (!result.ok()) {
+ return Error(toInt(StatusCode::INVALID_ARG)) << getErrorMsg(result);
+ }
+ const VehiclePropConfig* config = result.value();
+
+ if (config->access != VehiclePropertyAccess::READ &&
+ config->access != VehiclePropertyAccess::READ_WRITE) {
+ return Error(toInt(StatusCode::ACCESS_DENIED))
+ << StringPrintf("Property %" PRId32 " has no read access", propId);
+ }
+ return {};
+}
+
+void DefaultVehicleHal::checkHealth(std::weak_ptr<IVehicleHardware> hardware,
+ std::weak_ptr<SubscriptionManager> subscriptionManager) {
+ auto hardwarePtr = hardware.lock();
+ if (hardwarePtr == nullptr) {
+ ALOGW("the VehicleHardware is destroyed, DefaultVehicleHal is ending");
+ return;
+ }
+
+ StatusCode status = hardwarePtr->checkHealth();
+ if (status != StatusCode::OK) {
+ ALOGE("VHAL check health returns non-okay status");
+ return;
+ }
+ std::vector<VehiclePropValue> values = {{
+ .prop = toInt(VehicleProperty::VHAL_HEARTBEAT),
+ .areaId = 0,
+ .status = VehiclePropertyStatus::AVAILABLE,
+ .value.int64Values = {uptimeMillis()},
+ }};
+ onPropertyChangeEvent(subscriptionManager, values);
+ return;
+}
+
+binder_status_t DefaultVehicleHal::AIBinderLinkToDeathImpl::linkToDeath(
+ AIBinder* binder, AIBinder_DeathRecipient* recipient, void* cookie) {
+ return AIBinder_linkToDeath(binder, recipient, cookie);
+}
+
+void DefaultVehicleHal::setLinkToDeathImpl(std::unique_ptr<ILinkToDeath> impl) {
+ mLinkToDeathImpl = std::move(impl);
+}
+
} // namespace vehicle
} // namespace automotive
} // namespace hardware
diff --git a/automotive/vehicle/aidl/impl/vhal/src/RecurrentTimer.cpp b/automotive/vehicle/aidl/impl/vhal/src/RecurrentTimer.cpp
new file mode 100644
index 0000000..8521c4d
--- /dev/null
+++ b/automotive/vehicle/aidl/impl/vhal/src/RecurrentTimer.cpp
@@ -0,0 +1,177 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "RecurrentTimer.h"
+
+#include <utils/Log.h>
+#include <utils/SystemClock.h>
+
+#include <inttypes.h>
+#include <math.h>
+
+namespace android {
+namespace hardware {
+namespace automotive {
+namespace vehicle {
+
+using ::android::base::ScopedLockAssertion;
+
+RecurrentTimer::RecurrentTimer() : mThread(&RecurrentTimer::loop, this) {}
+
+RecurrentTimer::~RecurrentTimer() {
+ {
+ std::scoped_lock<std::mutex> lockGuard(mLock);
+ mStopRequested = true;
+ }
+ mCond.notify_one();
+ if (mThread.joinable()) {
+ mThread.join();
+ }
+}
+
+void RecurrentTimer::registerTimerCallback(int64_t intervalInNano,
+ std::shared_ptr<RecurrentTimer::Callback> callback) {
+ {
+ std::scoped_lock<std::mutex> lockGuard(mLock);
+
+ // Aligns the nextTime to multiply of interval.
+ int64_t nextTime = ceil(elapsedRealtimeNano() / intervalInNano) * intervalInNano;
+
+ std::unique_ptr<CallbackInfo> info = std::make_unique<CallbackInfo>();
+ info->callback = callback;
+ info->interval = intervalInNano;
+ info->nextTime = nextTime;
+
+ auto it = mCallbacks.find(callback);
+ if (it != mCallbacks.end()) {
+ ALOGI("Replacing an existing timer callback with a new interval, current: %" PRId64
+ " ns, new: %" PRId64 " ns",
+ it->second->interval, intervalInNano);
+ markOutdatedLocked(it->second);
+ }
+ mCallbacks[callback] = info.get();
+ mCallbackQueue.push_back(std::move(info));
+ // Insert the last element into the heap.
+ std::push_heap(mCallbackQueue.begin(), mCallbackQueue.end(), CallbackInfo::cmp);
+ }
+ mCond.notify_one();
+}
+
+void RecurrentTimer::unregisterTimerCallback(std::shared_ptr<RecurrentTimer::Callback> callback) {
+ {
+ std::scoped_lock<std::mutex> lockGuard(mLock);
+
+ auto it = mCallbacks.find(callback);
+ if (it == mCallbacks.end()) {
+ ALOGE("No event found to unregister");
+ return;
+ }
+
+ markOutdatedLocked(it->second);
+ mCallbacks.erase(it);
+ }
+
+ mCond.notify_one();
+}
+
+void RecurrentTimer::markOutdatedLocked(RecurrentTimer::CallbackInfo* info) {
+ info->outdated = true;
+ info->callback = nullptr;
+ // Make sure the first element is always valid.
+ removeInvalidCallbackLocked();
+}
+
+void RecurrentTimer::removeInvalidCallbackLocked() {
+ while (mCallbackQueue.size() != 0 && mCallbackQueue[0]->outdated) {
+ std::pop_heap(mCallbackQueue.begin(), mCallbackQueue.end(), CallbackInfo::cmp);
+ mCallbackQueue.pop_back();
+ }
+}
+
+std::unique_ptr<RecurrentTimer::CallbackInfo> RecurrentTimer::popNextCallbackLocked() {
+ std::pop_heap(mCallbackQueue.begin(), mCallbackQueue.end(), CallbackInfo::cmp);
+ std::unique_ptr<CallbackInfo> info = std::move(mCallbackQueue[mCallbackQueue.size() - 1]);
+ mCallbackQueue.pop_back();
+ // Make sure the first element is always valid.
+ removeInvalidCallbackLocked();
+ return info;
+}
+
+void RecurrentTimer::loop() {
+ std::unique_lock<std::mutex> uniqueLock(mLock);
+
+ while (true) {
+ // Wait until the timer exits or we have at least one recurrent callback.
+ mCond.wait(uniqueLock, [this] {
+ ScopedLockAssertion lockAssertion(mLock);
+ return mStopRequested || mCallbackQueue.size() != 0;
+ });
+
+ int64_t interval;
+ {
+ ScopedLockAssertion lockAssertion(mLock);
+ if (mStopRequested) {
+ return;
+ }
+ // The first element is the nearest next event.
+ int64_t nextTime = mCallbackQueue[0]->nextTime;
+ int64_t now = elapsedRealtimeNano();
+ if (nextTime > now) {
+ interval = nextTime - now;
+ } else {
+ interval = 0;
+ }
+ }
+
+ // Wait for the next event or the timer exits.
+ if (mCond.wait_for(uniqueLock, std::chrono::nanoseconds(interval), [this] {
+ ScopedLockAssertion lockAssertion(mLock);
+ return mStopRequested;
+ })) {
+ return;
+ }
+
+ {
+ ScopedLockAssertion lockAssertion(mLock);
+ int64_t now = elapsedRealtimeNano();
+ while (mCallbackQueue.size() > 0) {
+ int64_t nextTime = mCallbackQueue[0]->nextTime;
+ if (nextTime > now) {
+ break;
+ }
+
+ std::unique_ptr<CallbackInfo> info = popNextCallbackLocked();
+ info->nextTime += info->interval;
+
+ auto callback = info->callback;
+ mCallbackQueue.push_back(std::move(info));
+ std::push_heap(mCallbackQueue.begin(), mCallbackQueue.end(), CallbackInfo::cmp);
+
+ (*callback)();
+ }
+ }
+ }
+}
+
+bool RecurrentTimer::CallbackInfo::cmp(const std::unique_ptr<RecurrentTimer::CallbackInfo>& lhs,
+ const std::unique_ptr<RecurrentTimer::CallbackInfo>& rhs) {
+ return lhs->nextTime > rhs->nextTime;
+}
+
+} // namespace vehicle
+} // namespace automotive
+} // namespace hardware
+} // namespace android
diff --git a/automotive/vehicle/aidl/impl/vhal/src/SubscriptionManager.cpp b/automotive/vehicle/aidl/impl/vhal/src/SubscriptionManager.cpp
new file mode 100644
index 0000000..21bfba6
--- /dev/null
+++ b/automotive/vehicle/aidl/impl/vhal/src/SubscriptionManager.cpp
@@ -0,0 +1,247 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "SubscriptionManager.h"
+
+#include <math/HashCombine.h>
+#include <utils/Log.h>
+
+namespace android {
+namespace hardware {
+namespace automotive {
+namespace vehicle {
+
+namespace {
+
+constexpr float ONE_SECOND_IN_NANO = 1'000'000'000.;
+
+} // namespace
+
+using ::aidl::android::hardware::automotive::vehicle::IVehicleCallback;
+using ::aidl::android::hardware::automotive::vehicle::SubscribeOptions;
+using ::aidl::android::hardware::automotive::vehicle::VehiclePropValue;
+using ::android::base::Error;
+using ::android::base::Result;
+using ::ndk::ScopedAStatus;
+
+bool SubscriptionManager::PropIdAreaId::operator==(const PropIdAreaId& other) const {
+ return areaId == other.areaId && propId == other.propId;
+}
+
+size_t SubscriptionManager::PropIdAreaIdHash::operator()(PropIdAreaId const& propIdAreaId) const {
+ size_t res = 0;
+ hashCombine(res, propIdAreaId.propId);
+ hashCombine(res, propIdAreaId.areaId);
+ return res;
+}
+
+SubscriptionManager::SubscriptionManager(GetValueFunc&& action)
+ : mTimer(std::make_shared<RecurrentTimer>()), mGetValue(std::move(action)) {}
+
+SubscriptionManager::~SubscriptionManager() {
+ std::scoped_lock<std::mutex> lockGuard(mLock);
+
+ mClientsByPropIdArea.clear();
+ // RecurrentSubscription has reference to mGetValue, so it must be destroyed before mGetValue is
+ // destroyed.
+ mSubscriptionsByClient.clear();
+}
+
+bool SubscriptionManager::checkSampleRate(float sampleRate) {
+ return getInterval(sampleRate).ok();
+}
+
+Result<int64_t> SubscriptionManager::getInterval(float sampleRate) {
+ int64_t interval = 0;
+ if (sampleRate <= 0) {
+ return Error() << "invalid sample rate, must be a positive number";
+ }
+ if (sampleRate <= (ONE_SECOND_IN_NANO / static_cast<float>(INT64_MAX))) {
+ return Error() << "invalid sample rate: " << sampleRate << ", too small";
+ }
+ interval = static_cast<int64_t>(ONE_SECOND_IN_NANO / sampleRate);
+ return interval;
+}
+
+Result<void> SubscriptionManager::subscribe(const std::shared_ptr<IVehicleCallback>& callback,
+ const std::vector<SubscribeOptions>& options,
+ bool isContinuousProperty) {
+ std::scoped_lock<std::mutex> lockGuard(mLock);
+
+ std::vector<int64_t> intervals;
+ for (const auto& option : options) {
+ float sampleRate = option.sampleRate;
+
+ if (isContinuousProperty) {
+ auto intervalResult = getInterval(sampleRate);
+ if (!intervalResult.ok()) {
+ return intervalResult.error();
+ }
+ intervals.push_back(intervalResult.value());
+ }
+
+ if (option.areaIds.empty()) {
+ ALOGE("area IDs to subscribe must not be empty");
+ return Error() << "area IDs to subscribe must not be empty";
+ }
+ }
+
+ size_t intervalIndex = 0;
+ ClientIdType clientId = callback->asBinder().get();
+ for (const auto& option : options) {
+ int32_t propId = option.propId;
+ const std::vector<int32_t>& areaIds = option.areaIds;
+ int64_t interval = 0;
+ if (isContinuousProperty) {
+ interval = intervals[intervalIndex];
+ intervalIndex++;
+ }
+ for (int32_t areaId : areaIds) {
+ PropIdAreaId propIdAreaId = {
+ .propId = propId,
+ .areaId = areaId,
+ };
+ if (isContinuousProperty) {
+ VehiclePropValue propValueRequest{
+ .prop = propId,
+ .areaId = areaId,
+ };
+ mSubscriptionsByClient[clientId][propIdAreaId] =
+ std::make_unique<RecurrentSubscription>(
+ mTimer,
+ [this, callback, propValueRequest] {
+ mGetValue(callback, propValueRequest);
+ },
+ interval);
+ } else {
+ mSubscriptionsByClient[clientId][propIdAreaId] =
+ std::make_unique<OnChangeSubscription>();
+ }
+ mClientsByPropIdArea[propIdAreaId][clientId] = callback;
+ }
+ }
+ return {};
+}
+
+Result<void> SubscriptionManager::unsubscribe(SubscriptionManager::ClientIdType clientId,
+ const std::vector<int32_t>& propIds) {
+ std::scoped_lock<std::mutex> lockGuard(mLock);
+
+ if (mSubscriptionsByClient.find(clientId) == mSubscriptionsByClient.end()) {
+ return Error() << "No property was subscribed for the callback";
+ }
+ std::unordered_set<int32_t> subscribedPropIds;
+ for (auto const& [propIdAreaId, _] : mSubscriptionsByClient[clientId]) {
+ subscribedPropIds.insert(propIdAreaId.propId);
+ }
+
+ for (int32_t propId : propIds) {
+ if (subscribedPropIds.find(propId) == subscribedPropIds.end()) {
+ return Error() << "property ID: " << propId << " is not subscribed";
+ }
+ }
+
+ auto& subscriptions = mSubscriptionsByClient[clientId];
+ auto it = subscriptions.begin();
+ while (it != subscriptions.end()) {
+ int32_t propId = it->first.propId;
+ if (std::find(propIds.begin(), propIds.end(), propId) != propIds.end()) {
+ auto& clients = mClientsByPropIdArea[it->first];
+ clients.erase(clientId);
+ if (clients.empty()) {
+ mClientsByPropIdArea.erase(it->first);
+ }
+ it = subscriptions.erase(it);
+ } else {
+ it++;
+ }
+ }
+ if (subscriptions.empty()) {
+ mSubscriptionsByClient.erase(clientId);
+ }
+ return {};
+}
+
+Result<void> SubscriptionManager::unsubscribe(SubscriptionManager::ClientIdType clientId) {
+ std::scoped_lock<std::mutex> lockGuard(mLock);
+
+ if (mSubscriptionsByClient.find(clientId) == mSubscriptionsByClient.end()) {
+ return Error() << "No property was subscribed for this client";
+ }
+
+ auto& subscriptions = mSubscriptionsByClient[clientId];
+ for (auto const& [propIdAreaId, _] : subscriptions) {
+ auto& clients = mClientsByPropIdArea[propIdAreaId];
+ clients.erase(clientId);
+ if (clients.empty()) {
+ mClientsByPropIdArea.erase(propIdAreaId);
+ }
+ }
+ mSubscriptionsByClient.erase(clientId);
+ return {};
+}
+
+std::unordered_map<std::shared_ptr<IVehicleCallback>, std::vector<const VehiclePropValue*>>
+SubscriptionManager::getSubscribedClients(const std::vector<VehiclePropValue>& updatedValues) {
+ std::scoped_lock<std::mutex> lockGuard(mLock);
+ std::unordered_map<std::shared_ptr<IVehicleCallback>, std::vector<const VehiclePropValue*>>
+ clients;
+
+ for (const auto& value : updatedValues) {
+ PropIdAreaId propIdAreaId{
+ .propId = value.prop,
+ .areaId = value.areaId,
+ };
+ if (mClientsByPropIdArea.find(propIdAreaId) == mClientsByPropIdArea.end()) {
+ continue;
+ }
+ for (const auto& [clientId, client] : mClientsByPropIdArea[propIdAreaId]) {
+ if (!mSubscriptionsByClient[clientId][propIdAreaId]->isOnChange()) {
+ continue;
+ }
+ clients[client].push_back(&value);
+ }
+ }
+ return clients;
+}
+
+bool SubscriptionManager::isEmpty() {
+ std::scoped_lock<std::mutex> lockGuard(mLock);
+ return mSubscriptionsByClient.empty() && mClientsByPropIdArea.empty();
+}
+
+SubscriptionManager::RecurrentSubscription::RecurrentSubscription(
+ std::shared_ptr<RecurrentTimer> timer, std::function<void()>&& action, int64_t interval)
+ : mAction(std::make_shared<std::function<void()>>(action)), mTimer(timer) {
+ mTimer->registerTimerCallback(interval, mAction);
+}
+
+SubscriptionManager::RecurrentSubscription::~RecurrentSubscription() {
+ mTimer->unregisterTimerCallback(mAction);
+}
+
+bool SubscriptionManager::RecurrentSubscription::isOnChange() {
+ return false;
+}
+
+bool SubscriptionManager::OnChangeSubscription::isOnChange() {
+ return true;
+}
+
+} // namespace vehicle
+} // namespace automotive
+} // namespace hardware
+} // namespace android
diff --git a/automotive/vehicle/aidl/impl/vhal/test/DefaultVehicleHalTest.cpp b/automotive/vehicle/aidl/impl/vhal/test/DefaultVehicleHalTest.cpp
index 6970e48..ff355c3 100644
--- a/automotive/vehicle/aidl/impl/vhal/test/DefaultVehicleHalTest.cpp
+++ b/automotive/vehicle/aidl/impl/vhal/test/DefaultVehicleHalTest.cpp
@@ -14,8 +14,10 @@
* limitations under the License.
*/
+#include "ConnectedClient.h"
#include "DefaultVehicleHal.h"
#include "MockVehicleCallback.h"
+#include "MockVehicleHardware.h"
#include <IVehicleHardware.h>
#include <LargeParcelableBase.h>
@@ -26,6 +28,7 @@
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <utils/Log.h>
+#include <utils/SystemClock.h>
#include <chrono>
#include <list>
@@ -54,10 +57,14 @@
using ::aidl::android::hardware::automotive::vehicle::SetValueResult;
using ::aidl::android::hardware::automotive::vehicle::SetValueResults;
using ::aidl::android::hardware::automotive::vehicle::StatusCode;
+using ::aidl::android::hardware::automotive::vehicle::SubscribeOptions;
using ::aidl::android::hardware::automotive::vehicle::VehicleAreaWindow;
using ::aidl::android::hardware::automotive::vehicle::VehiclePropConfig;
using ::aidl::android::hardware::automotive::vehicle::VehiclePropConfigs;
using ::aidl::android::hardware::automotive::vehicle::VehiclePropErrors;
+using ::aidl::android::hardware::automotive::vehicle::VehicleProperty;
+using ::aidl::android::hardware::automotive::vehicle::VehiclePropertyAccess;
+using ::aidl::android::hardware::automotive::vehicle::VehiclePropertyChangeMode;
using ::aidl::android::hardware::automotive::vehicle::VehiclePropValue;
using ::aidl::android::hardware::automotive::vehicle::VehiclePropValues;
@@ -66,208 +73,34 @@
using ::ndk::ScopedAStatus;
using ::ndk::ScopedFileDescriptor;
+using ::ndk::SpAIBinder;
using ::testing::Eq;
+using ::testing::UnorderedElementsAre;
using ::testing::UnorderedElementsAreArray;
using ::testing::WhenSortedBy;
constexpr int32_t INVALID_PROP_ID = 0;
// VehiclePropertyGroup:SYSTEM,VehicleArea:WINDOW,VehiclePropertyType:INT32
constexpr int32_t INT32_WINDOW_PROP = 10001 + 0x10000000 + 0x03000000 + 0x00400000;
-
-template <class T>
-std::optional<T> pop(std::list<T>& items) {
- if (items.size() > 0) {
- auto item = std::move(items.front());
- items.pop_front();
- return item;
- }
- return std::nullopt;
-}
+// VehiclePropertyGroup:SYSTEM,VehicleArea:GLOBAL,VehiclePropertyType:INT32
+constexpr int32_t GLOBAL_ON_CHANGE_PROP = 10002 + 0x10000000 + 0x01000000 + 0x00400000;
+// VehiclePropertyGroup:SYSTEM,VehicleArea:GLOBAL,VehiclePropertyType:INT32
+constexpr int32_t GLOBAL_CONTINUOUS_PROP = 10003 + 0x10000000 + 0x01000000 + 0x00400000;
+// VehiclePropertyGroup:SYSTEM,VehicleArea:WINDOW,VehiclePropertyType:INT32
+constexpr int32_t AREA_ON_CHANGE_PROP = 10004 + 0x10000000 + 0x03000000 + 0x00400000;
+// VehiclePropertyGroup:SYSTEM,VehicleArea:WINDOW,VehiclePropertyType:INT32
+constexpr int32_t AREA_CONTINUOUS_PROP = 10005 + 0x10000000 + 0x03000000 + 0x00400000;
+// VehiclePropertyGroup:SYSTEM,VehicleArea:GLOBAL,VehiclePropertyType:INT32
+constexpr int32_t READ_ONLY_PROP = 10006 + 0x10000000 + 0x01000000 + 0x00400000;
+// VehiclePropertyGroup:SYSTEM,VehicleArea:GLOBAL,VehiclePropertyType:INT32
+constexpr int32_t WRITE_ONLY_PROP = 10007 + 0x10000000 + 0x01000000 + 0x00400000;
int32_t testInt32VecProp(size_t i) {
// VehiclePropertyGroup:SYSTEM,VehicleArea:GLOBAL,VehiclePropertyType:INT32_VEC
return static_cast<int32_t>(i) + 0x10000000 + 0x01000000 + 0x00410000;
}
-class MockVehicleHardware final : public IVehicleHardware {
- public:
- ~MockVehicleHardware() {
- std::scoped_lock<std::mutex> lockGuard(mLock);
- for (auto& thread : mThreads) {
- thread.join();
- }
- }
-
- std::vector<VehiclePropConfig> getAllPropertyConfigs() const override {
- std::scoped_lock<std::mutex> lockGuard(mLock);
- return mPropertyConfigs;
- }
-
- StatusCode setValues(std::shared_ptr<const SetValuesCallback> callback,
- const std::vector<SetValueRequest>& requests) override {
- std::scoped_lock<std::mutex> lockGuard(mLock);
- return handleRequests(__func__, callback, requests, &mSetValueRequests,
- &mSetValueResponses);
- }
-
- StatusCode getValues(std::shared_ptr<const GetValuesCallback> callback,
- const std::vector<GetValueRequest>& requests) const override {
- std::scoped_lock<std::mutex> lockGuard(mLock);
- return handleRequests(__func__, callback, requests, &mGetValueRequests,
- &mGetValueResponses);
- }
-
- DumpResult dump(const std::vector<std::string>&) override {
- // TODO(b/200737967): mock this.
- return DumpResult{};
- }
-
- StatusCode checkHealth() override {
- // TODO(b/200737967): mock this.
- return StatusCode::OK;
- }
-
- void registerOnPropertyChangeEvent(std::unique_ptr<const PropertyChangeCallback>) override {
- // TODO(b/200737967): mock this.
- }
-
- void registerOnPropertySetErrorEvent(std::unique_ptr<const PropertySetErrorCallback>) override {
- // TODO(b/200737967): mock this.
- }
-
- // Test functions.
- void setPropertyConfigs(const std::vector<VehiclePropConfig>& configs) {
- std::scoped_lock<std::mutex> lockGuard(mLock);
- mPropertyConfigs = configs;
- }
-
- void addGetValueResponses(const std::vector<GetValueResult>& responses) {
- std::scoped_lock<std::mutex> lockGuard(mLock);
- mGetValueResponses.push_back(responses);
- }
-
- void addSetValueResponses(const std::vector<SetValueResult>& responses) {
- std::scoped_lock<std::mutex> lockGuard(mLock);
- mSetValueResponses.push_back(responses);
- }
-
- std::vector<GetValueRequest> nextGetValueRequests() {
- std::scoped_lock<std::mutex> lockGuard(mLock);
- std::optional<std::vector<GetValueRequest>> request = pop(mGetValueRequests);
- if (!request.has_value()) {
- return std::vector<GetValueRequest>();
- }
- return std::move(request.value());
- }
-
- std::vector<SetValueRequest> nextSetValueRequests() {
- std::scoped_lock<std::mutex> lockGuard(mLock);
- std::optional<std::vector<SetValueRequest>> request = pop(mSetValueRequests);
- if (!request.has_value()) {
- return std::vector<SetValueRequest>();
- }
- return std::move(request.value());
- }
-
- void setStatus(const char* functionName, StatusCode status) {
- std::scoped_lock<std::mutex> lockGuard(mLock);
- mStatusByFunctions[functionName] = status;
- }
-
- void setSleepTime(int64_t timeInNano) {
- std::scoped_lock<std::mutex> lockGuard(mLock);
- mSleepTime = timeInNano;
- }
-
- private:
- mutable std::mutex mLock;
- std::vector<VehiclePropConfig> mPropertyConfigs GUARDED_BY(mLock);
- mutable std::list<std::vector<GetValueRequest>> mGetValueRequests GUARDED_BY(mLock);
- mutable std::list<std::vector<GetValueResult>> mGetValueResponses GUARDED_BY(mLock);
- mutable std::list<std::vector<SetValueRequest>> mSetValueRequests GUARDED_BY(mLock);
- mutable std::list<std::vector<SetValueResult>> mSetValueResponses GUARDED_BY(mLock);
- std::unordered_map<const char*, StatusCode> mStatusByFunctions GUARDED_BY(mLock);
- int64_t mSleepTime GUARDED_BY(mLock) = 0;
- mutable std::vector<std::thread> mThreads GUARDED_BY(mLock);
-
- template <class ResultType>
- StatusCode returnResponse(
- std::shared_ptr<const std::function<void(std::vector<ResultType>)>> callback,
- std::list<std::vector<ResultType>>* storedResponses) const;
-
- template <class RequestType, class ResultType>
- StatusCode handleRequests(
- const char* functionName,
- std::shared_ptr<const std::function<void(std::vector<ResultType>)>> callback,
- const std::vector<RequestType>& requests,
- std::list<std::vector<RequestType>>* storedRequests,
- std::list<std::vector<ResultType>>* storedResponses) const REQUIRES(mLock);
-};
-
-template <class ResultType>
-StatusCode MockVehicleHardware::returnResponse(
- std::shared_ptr<const std::function<void(std::vector<ResultType>)>> callback,
- std::list<std::vector<ResultType>>* storedResponses) const {
- if (storedResponses->size() > 0) {
- (*callback)(std::move(storedResponses->front()));
- storedResponses->pop_front();
- return StatusCode::OK;
- } else {
- ALOGE("no more response");
- return StatusCode::INTERNAL_ERROR;
- }
-}
-
-template StatusCode MockVehicleHardware::returnResponse<GetValueResult>(
- std::shared_ptr<const std::function<void(std::vector<GetValueResult>)>> callback,
- std::list<std::vector<GetValueResult>>* storedResponses) const;
-
-template StatusCode MockVehicleHardware::returnResponse<SetValueResult>(
- std::shared_ptr<const std::function<void(std::vector<SetValueResult>)>> callback,
- std::list<std::vector<SetValueResult>>* storedResponses) const;
-
-template <class RequestType, class ResultType>
-StatusCode MockVehicleHardware::handleRequests(
- const char* functionName,
- std::shared_ptr<const std::function<void(std::vector<ResultType>)>> callback,
- const std::vector<RequestType>& requests,
- std::list<std::vector<RequestType>>* storedRequests,
- std::list<std::vector<ResultType>>* storedResponses) const {
- storedRequests->push_back(requests);
- if (auto it = mStatusByFunctions.find(functionName); it != mStatusByFunctions.end()) {
- if (StatusCode status = it->second; status != StatusCode::OK) {
- return status;
- }
- }
-
- if (mSleepTime != 0) {
- int64_t sleepTime = mSleepTime;
- mThreads.emplace_back([this, callback, sleepTime, storedResponses]() {
- std::this_thread::sleep_for(std::chrono::nanoseconds(sleepTime));
- returnResponse(callback, storedResponses);
- });
- return StatusCode::OK;
-
- } else {
- return returnResponse(callback, storedResponses);
- }
-}
-
-template StatusCode MockVehicleHardware::handleRequests<GetValueRequest, GetValueResult>(
- const char* functionName,
- std::shared_ptr<const std::function<void(std::vector<GetValueResult>)>> callback,
- const std::vector<GetValueRequest>& requests,
- std::list<std::vector<GetValueRequest>>* storedRequests,
- std::list<std::vector<GetValueResult>>* storedResponses) const;
-
-template StatusCode MockVehicleHardware::handleRequests<SetValueRequest, SetValueResult>(
- const char* functionName,
- std::shared_ptr<const std::function<void(std::vector<SetValueResult>)>> callback,
- const std::vector<SetValueRequest>& requests,
- std::list<std::vector<SetValueRequest>>* storedRequests,
- std::list<std::vector<SetValueResult>>* storedResponses) const;
-
struct PropConfigCmp {
bool operator()(const VehiclePropConfig& a, const VehiclePropConfig& b) const {
return (a.prop < b.prop);
@@ -320,6 +153,62 @@
.areaId = toInt(VehicleAreaWindow::ROW_1_RIGHT),
},
.expectedStatus = StatusCode::INVALID_ARG,
+ },
+ {
+ .name = "no_write_permission",
+ .request =
+ {
+ .prop = READ_ONLY_PROP,
+ .value.int32Values = {0},
+ },
+ .expectedStatus = StatusCode::ACCESS_DENIED,
+ }};
+}
+
+struct SubscribeInvalidOptionsTestCase {
+ std::string name;
+ SubscribeOptions option;
+};
+
+std::vector<SubscribeInvalidOptionsTestCase> getSubscribeInvalidOptionsTestCases() {
+ return {{
+ .name = "invalid_prop",
+ .option =
+ {
+ .propId = INVALID_PROP_ID,
+ },
+ },
+ {
+ .name = "invalid_area_ID",
+ .option =
+ {
+ .propId = AREA_ON_CHANGE_PROP,
+ .areaIds = {0},
+ },
+ },
+ {
+ .name = "invalid_sample_rate",
+ .option =
+ {
+ .propId = GLOBAL_CONTINUOUS_PROP,
+ .sampleRate = 0.0,
+ },
+ },
+ {
+ .name = "sample_rate_out_of_range",
+ .option =
+ {
+ .propId = GLOBAL_CONTINUOUS_PROP,
+ .sampleRate = 1000.0,
+ },
+ },
+ {
+ .name = "static_property",
+ .option =
+ {
+ // Default change mode is static.
+ .propId = testInt32VecProp(0),
+ },
}};
}
@@ -333,6 +222,7 @@
for (size_t i = 0; i < 10000; i++) {
testConfigs.push_back(VehiclePropConfig{
.prop = testInt32VecProp(i),
+ .access = VehiclePropertyAccess::READ_WRITE,
.areaConfigs =
{
{
@@ -343,19 +233,104 @@
},
});
}
+ // A property with area config.
testConfigs.push_back(
VehiclePropConfig{.prop = INT32_WINDOW_PROP,
+ .access = VehiclePropertyAccess::READ_WRITE,
.areaConfigs = {{
.areaId = toInt(VehicleAreaWindow::ROW_1_LEFT),
.minInt32Value = 0,
.maxInt32Value = 100,
}}});
+ // A global on-change property.
+ testConfigs.push_back(VehiclePropConfig{
+ .prop = GLOBAL_ON_CHANGE_PROP,
+ .access = VehiclePropertyAccess::READ_WRITE,
+ .changeMode = VehiclePropertyChangeMode::ON_CHANGE,
+ });
+ // A global continuous property.
+ testConfigs.push_back(VehiclePropConfig{
+ .prop = GLOBAL_CONTINUOUS_PROP,
+ .access = VehiclePropertyAccess::READ_WRITE,
+ .changeMode = VehiclePropertyChangeMode::CONTINUOUS,
+ .minSampleRate = 0.0,
+ .maxSampleRate = 100.0,
+ });
+ // A per-area on-change property.
+ testConfigs.push_back(VehiclePropConfig{
+ .prop = AREA_ON_CHANGE_PROP,
+ .access = VehiclePropertyAccess::READ_WRITE,
+ .changeMode = VehiclePropertyChangeMode::ON_CHANGE,
+ .areaConfigs =
+ {
+ {
+
+ .areaId = toInt(VehicleAreaWindow::ROW_1_LEFT),
+ .minInt32Value = 0,
+ .maxInt32Value = 100,
+ },
+ {
+ .areaId = toInt(VehicleAreaWindow::ROW_1_RIGHT),
+ .minInt32Value = 0,
+ .maxInt32Value = 100,
+ },
+ },
+ });
+ // A per-area continuous property.
+ testConfigs.push_back(VehiclePropConfig{
+ .prop = AREA_CONTINUOUS_PROP,
+ .access = VehiclePropertyAccess::READ_WRITE,
+ .changeMode = VehiclePropertyChangeMode::CONTINUOUS,
+ .minSampleRate = 0.0,
+ .maxSampleRate = 1000.0,
+ .areaConfigs =
+ {
+ {
+
+ .areaId = toInt(VehicleAreaWindow::ROW_1_LEFT),
+ .minInt32Value = 0,
+ .maxInt32Value = 100,
+ },
+ {
+ .areaId = toInt(VehicleAreaWindow::ROW_1_RIGHT),
+ .minInt32Value = 0,
+ .maxInt32Value = 100,
+ },
+ },
+ });
+ // A read-only property.
+ testConfigs.push_back(VehiclePropConfig{
+ .prop = READ_ONLY_PROP,
+ .access = VehiclePropertyAccess::READ,
+ .changeMode = VehiclePropertyChangeMode::CONTINUOUS,
+ .minSampleRate = 0.0,
+ .maxSampleRate = 1000.0,
+ });
+ // A write-only property.
+ testConfigs.push_back(VehiclePropConfig{
+ .prop = WRITE_ONLY_PROP,
+ .access = VehiclePropertyAccess::WRITE,
+ .changeMode = VehiclePropertyChangeMode::CONTINUOUS,
+ .minSampleRate = 0.0,
+ .maxSampleRate = 1000.0,
+ });
+ // Register the heartbeat event property.
+ testConfigs.push_back(VehiclePropConfig{
+ .prop = toInt(VehicleProperty::VHAL_HEARTBEAT),
+ .access = VehiclePropertyAccess::READ,
+ .changeMode = VehiclePropertyChangeMode::ON_CHANGE,
+ });
hardware->setPropertyConfigs(testConfigs);
mHardwarePtr = hardware.get();
mVhal = ndk::SharedRefBase::make<DefaultVehicleHal>(std::move(hardware));
mVhalClient = IVehicle::fromBinder(mVhal->asBinder());
mCallback = ndk::SharedRefBase::make<MockVehicleCallback>();
- mCallbackClient = IVehicleCallback::fromBinder(mCallback->asBinder());
+ // Keep the local binder alive.
+ mBinder = mCallback->asBinder();
+ mCallbackClient = IVehicleCallback::fromBinder(mBinder);
+
+ // Set the linkToDeath to a fake implementation that always returns OK.
+ setTestLinkToDeathImpl();
}
void TearDown() override {
@@ -373,10 +348,36 @@
void setTimeout(int64_t timeoutInNano) { mVhal->setTimeout(timeoutInNano); }
+ void setTestLinkToDeathImpl() {
+ mVhal->setLinkToDeathImpl(std::make_unique<TestLinkToDeathImpl>());
+ }
+
size_t countPendingRequests() { return mVhal->mPendingRequestPool->countPendingRequests(); }
+ size_t countClients() {
+ std::scoped_lock<std::mutex> lockGuard(mVhal->mLock);
+ return mVhal->mGetValuesClients.size() + mVhal->mSetValuesClients.size() +
+ mVhal->mSubscriptionClients->countClients();
+ }
+
std::shared_ptr<PendingRequestPool> getPool() { return mVhal->mPendingRequestPool; }
+ void onBinderDied(void* cookie) { return mVhal->onBinderDied(cookie); }
+
+ void onBinderUnlinked(void* cookie) { return mVhal->onBinderUnlinked(cookie); }
+
+ void* getOnBinderDiedContexts(AIBinder* clientId) {
+ std::scoped_lock<std::mutex> lockGuard(mVhal->mLock);
+ return mVhal->mOnBinderDiedContexts[clientId].get();
+ }
+
+ bool countOnBinderDiedContexts() {
+ std::scoped_lock<std::mutex> lockGuard(mVhal->mLock);
+ return mVhal->mOnBinderDiedContexts.size();
+ }
+
+ bool hasNoSubscriptions() { return mVhal->mSubscriptionManager->isEmpty(); }
+
static Result<void> getValuesTestCases(size_t size, GetValueRequests& requests,
std::vector<GetValueResult>& expectedResults,
std::vector<GetValueRequest>& expectedHardwareRequests) {
@@ -447,17 +448,20 @@
return {};
}
- size_t countClients() {
- std::scoped_lock<std::mutex> lockGuard(mVhal->mLock);
- return mVhal->mGetValuesClients.size() + mVhal->mSetValuesClients.size();
- }
-
private:
std::shared_ptr<DefaultVehicleHal> mVhal;
std::shared_ptr<IVehicle> mVhalClient;
MockVehicleHardware* mHardwarePtr;
std::shared_ptr<MockVehicleCallback> mCallback;
std::shared_ptr<IVehicleCallback> mCallbackClient;
+ SpAIBinder mBinder;
+
+ class TestLinkToDeathImpl final : public DefaultVehicleHal::ILinkToDeath {
+ public:
+ binder_status_t linkToDeath(AIBinder*, AIBinder_DeathRecipient*, void*) override {
+ return STATUS_OK;
+ }
+ };
};
TEST_F(DefaultVehicleHalTest, testGetAllPropConfigsSmall) {
@@ -583,6 +587,39 @@
ASSERT_EQ(status.getServiceSpecificError(), toInt(StatusCode::INVALID_ARG));
}
+TEST_F(DefaultVehicleHalTest, testGetValuesNoReadPermission) {
+ GetValueRequests requests = {
+ .sharedMemoryFd = {},
+ .payloads =
+ {
+ {
+ .requestId = 0,
+ .prop =
+ {
+ .prop = WRITE_ONLY_PROP,
+ },
+ },
+ },
+ };
+
+ auto status = getClient()->getValues(getCallbackClient(), requests);
+
+ ASSERT_TRUE(status.isOk()) << "getValue with no read permission should return okay with error "
+ "returned from callback"
+ << ", error: " << status.getMessage();
+ EXPECT_TRUE(getHardware()->nextGetValueRequests().empty()) << "expect no request to hardware";
+
+ auto maybeResult = getCallback()->nextGetValueResults();
+ ASSERT_TRUE(maybeResult.has_value()) << "no results in callback";
+ EXPECT_EQ(maybeResult.value().payloads, std::vector<GetValueResult>({
+ {
+ .requestId = 0,
+ .status = StatusCode::ACCESS_DENIED,
+ },
+ }))
+ << "expect to get ACCESS_DENIED status if no read permission";
+}
+
TEST_F(DefaultVehicleHalTest, testGetValuesFinishBeforeTimeout) {
// timeout: 0.1s
int64_t timeout = 100000000;
@@ -969,6 +1006,544 @@
ASSERT_FALSE(status.isOk()) << "duplicate request properties in one request must fail";
}
+TEST_F(DefaultVehicleHalTest, testSubscribeUnsubscribe) {
+ std::vector<SubscribeOptions> options = {
+ {
+ .propId = GLOBAL_ON_CHANGE_PROP,
+ },
+ };
+
+ auto status = getClient()->subscribe(getCallbackClient(), options, 0);
+
+ ASSERT_TRUE(status.isOk()) << "subscribe failed: " << status.getMessage();
+
+ status = getClient()->unsubscribe(getCallbackClient(),
+ std::vector<int32_t>({GLOBAL_ON_CHANGE_PROP}));
+
+ ASSERT_TRUE(status.isOk()) << "unsubscribe failed: " << status.getMessage();
+}
+
+TEST_F(DefaultVehicleHalTest, testSubscribeGlobalOnChangeNormal) {
+ std::vector<SubscribeOptions> options = {
+ {
+ .propId = GLOBAL_ON_CHANGE_PROP,
+ },
+ };
+
+ auto status = getClient()->subscribe(getCallbackClient(), options, 0);
+
+ ASSERT_TRUE(status.isOk()) << "subscribe failed: " << status.getMessage();
+
+ VehiclePropValue testValue{
+ .prop = GLOBAL_ON_CHANGE_PROP,
+ .value.int32Values = {0},
+ };
+ SetValueRequests setValueRequests = {
+ .payloads =
+ {
+ SetValueRequest{
+ .requestId = 0,
+ .value = testValue,
+ },
+ },
+ };
+ std::vector<SetValueResult> setValueResults = {{
+ .requestId = 0,
+ .status = StatusCode::OK,
+ }};
+
+ // Set the value to trigger a property change event.
+ getHardware()->addSetValueResponses(setValueResults);
+ status = getClient()->setValues(getCallbackClient(), setValueRequests);
+
+ ASSERT_TRUE(status.isOk()) << "setValues failed: " << status.getMessage();
+
+ auto maybeResults = getCallback()->nextOnPropertyEventResults();
+ ASSERT_TRUE(maybeResults.has_value()) << "no results in callback";
+ ASSERT_THAT(maybeResults.value().payloads, UnorderedElementsAre(testValue))
+ << "results mismatch, expect on change event for the updated value";
+ ASSERT_FALSE(getCallback()->nextOnPropertyEventResults().has_value())
+ << "more results than expected";
+ EXPECT_EQ(countClients(), static_cast<size_t>(1));
+}
+
+TEST_F(DefaultVehicleHalTest, testSubscribeGlobalOnchangeUnrelatedEventIgnored) {
+ std::vector<SubscribeOptions> options = {
+ {
+ .propId = GLOBAL_ON_CHANGE_PROP,
+ },
+ };
+
+ auto status = getClient()->subscribe(getCallbackClient(), options, 0);
+
+ ASSERT_TRUE(status.isOk()) << "subscribe failed: " << status.getMessage();
+
+ VehiclePropValue testValue{
+ .prop = GLOBAL_CONTINUOUS_PROP,
+ .value.int32Values = {0},
+ };
+
+ // Set the value to trigger a property change event. This event should be ignored because we
+ // have not subscribed to it.
+ getHardware()->addSetValueResponses({{
+ .requestId = 0,
+ .status = StatusCode::OK,
+ }});
+ status = getClient()->setValues(getCallbackClient(),
+ {
+ .payloads =
+ {
+ SetValueRequest{
+ .requestId = 0,
+ .value = testValue,
+ },
+ },
+ });
+
+ ASSERT_TRUE(status.isOk()) << "setValues failed: " << status.getMessage();
+
+ ASSERT_FALSE(getCallback()->nextOnPropertyEventResults().has_value())
+ << "must receive no property update event if the property is not subscribed";
+}
+
+TEST_F(DefaultVehicleHalTest, testSubscribeAreaOnChange) {
+ int testAreaId = toInt(VehicleAreaWindow::ROW_1_LEFT);
+ std::vector<SubscribeOptions> options = {
+ {
+ .propId = AREA_ON_CHANGE_PROP,
+ .areaIds = {testAreaId},
+ },
+ };
+
+ auto status = getClient()->subscribe(getCallbackClient(), options, 0);
+
+ ASSERT_TRUE(status.isOk()) << "subscribe failed: " << status.getMessage();
+
+ VehiclePropValue testValue{
+ .prop = AREA_ON_CHANGE_PROP,
+ .areaId = testAreaId,
+ .value.int32Values = {0},
+ };
+
+ // Set the value to trigger a property change event.
+ getHardware()->addSetValueResponses({{
+ .requestId = 0,
+ .status = StatusCode::OK,
+ }});
+ status = getClient()->setValues(getCallbackClient(),
+ {
+ .payloads =
+ {
+ SetValueRequest{
+ .requestId = 0,
+ .value = testValue,
+ },
+ },
+ });
+
+ ASSERT_TRUE(status.isOk()) << "setValues failed: " << status.getMessage();
+
+ auto maybeResults = getCallback()->nextOnPropertyEventResults();
+ ASSERT_TRUE(maybeResults.has_value()) << "no results in callback";
+ ASSERT_THAT(maybeResults.value().payloads, UnorderedElementsAre(testValue))
+ << "results mismatch, expect on change event for the updated value";
+ ASSERT_FALSE(getCallback()->nextOnPropertyEventResults().has_value())
+ << "more results than expected";
+}
+
+TEST_F(DefaultVehicleHalTest, testSubscribeAreaOnChangeAllAreas) {
+ std::vector<SubscribeOptions> options = {
+ {
+ .propId = AREA_ON_CHANGE_PROP,
+ // No areaIds means subscribing to all area IDs.
+ .areaIds = {},
+ },
+ };
+
+ auto status = getClient()->subscribe(getCallbackClient(), options, 0);
+
+ ASSERT_TRUE(status.isOk()) << "subscribe failed: " << status.getMessage();
+
+ VehiclePropValue testValue1{
+ .prop = AREA_ON_CHANGE_PROP,
+ .areaId = toInt(VehicleAreaWindow::ROW_1_LEFT),
+ .value.int32Values = {0},
+ };
+ VehiclePropValue testValue2{
+ .prop = AREA_ON_CHANGE_PROP,
+ .areaId = toInt(VehicleAreaWindow::ROW_1_RIGHT),
+ .value.int32Values = {0},
+ };
+
+ // Set the values to trigger property change events for two areas.
+ getHardware()->addSetValueResponses({{
+ .requestId = 0,
+ .status = StatusCode::OK,
+ },
+ {
+ .requestId = 1,
+ .status = StatusCode::OK,
+ }});
+ status = getClient()->setValues(getCallbackClient(),
+ {
+ .payloads =
+ {
+ SetValueRequest{
+ .requestId = 0,
+ .value = testValue1,
+ },
+ SetValueRequest{
+ .requestId = 1,
+ .value = testValue2,
+ },
+ },
+ });
+
+ ASSERT_TRUE(status.isOk()) << "setValues failed: " << status.getMessage();
+
+ auto maybeResults = getCallback()->nextOnPropertyEventResults();
+ ASSERT_TRUE(maybeResults.has_value()) << "no results in callback";
+ ASSERT_THAT(maybeResults.value().payloads, UnorderedElementsAre(testValue1, testValue2))
+ << "results mismatch, expect two on-change events for all updated areas";
+ ASSERT_FALSE(getCallback()->nextOnPropertyEventResults().has_value())
+ << "more results than expected";
+}
+
+TEST_F(DefaultVehicleHalTest, testSubscribeGlobalContinuous) {
+ VehiclePropValue testValue{
+ .prop = GLOBAL_CONTINUOUS_PROP,
+ .value.int32Values = {0},
+ };
+ // Set responses for all the hardware getValues requests.
+ getHardware()->setGetValueResponder(
+ [](std::shared_ptr<const IVehicleHardware::GetValuesCallback> callback,
+ const std::vector<GetValueRequest>& requests) {
+ std::vector<GetValueResult> results;
+ for (auto& request : requests) {
+ VehiclePropValue prop = request.prop;
+ prop.value.int32Values = {0};
+ results.push_back({
+ .requestId = request.requestId,
+ .status = StatusCode::OK,
+ .prop = prop,
+ });
+ }
+ (*callback)(results);
+ return StatusCode::OK;
+ });
+
+ std::vector<SubscribeOptions> options = {
+ {
+ .propId = GLOBAL_CONTINUOUS_PROP,
+ .sampleRate = 20.0,
+ },
+ };
+
+ auto status = getClient()->subscribe(getCallbackClient(), options, 0);
+
+ ASSERT_TRUE(status.isOk()) << "subscribe failed: " << status.getMessage();
+
+ // Sleep for 1s, which should generate ~20 events.
+ std::this_thread::sleep_for(std::chrono::seconds(1));
+
+ // Should trigger about 20 times, check for at least 15 events to be safe.
+ for (size_t i = 0; i < 15; i++) {
+ auto maybeResults = getCallback()->nextOnPropertyEventResults();
+ ASSERT_TRUE(maybeResults.has_value()) << "no results in callback";
+ ASSERT_THAT(maybeResults.value().payloads, UnorderedElementsAre(testValue))
+ << "results mismatch, expect to get the updated value";
+ }
+ EXPECT_EQ(countClients(), static_cast<size_t>(1));
+}
+
+TEST_F(DefaultVehicleHalTest, testSubscribeAreaContinuous) {
+ // Set responses for all the hardware getValues requests.
+ getHardware()->setGetValueResponder(
+ [](std::shared_ptr<const IVehicleHardware::GetValuesCallback> callback,
+ const std::vector<GetValueRequest>& requests) {
+ std::vector<GetValueResult> results;
+ for (auto& request : requests) {
+ VehiclePropValue prop = request.prop;
+ prop.value.int32Values = {0};
+ results.push_back({
+ .requestId = request.requestId,
+ .status = StatusCode::OK,
+ .prop = prop,
+ });
+ }
+ (*callback)(results);
+ return StatusCode::OK;
+ });
+
+ std::vector<SubscribeOptions> options = {
+ {
+ .propId = AREA_CONTINUOUS_PROP,
+ .sampleRate = 20.0,
+ .areaIds = {toInt(VehicleAreaWindow::ROW_1_LEFT)},
+ },
+ {
+ .propId = AREA_CONTINUOUS_PROP,
+ .sampleRate = 10.0,
+ .areaIds = {toInt(VehicleAreaWindow::ROW_1_RIGHT)},
+ },
+ };
+
+ auto status = getClient()->subscribe(getCallbackClient(), options, 0);
+
+ ASSERT_TRUE(status.isOk()) << "subscribe failed: " << status.getMessage();
+
+ // Sleep for 1s, which should generate ~20 events.
+ std::this_thread::sleep_for(std::chrono::seconds(1));
+
+ std::vector<VehiclePropValue> events;
+ while (true) {
+ auto maybeResults = getCallback()->nextOnPropertyEventResults();
+ if (!maybeResults.has_value()) {
+ break;
+ }
+ for (const auto& value : maybeResults.value().payloads) {
+ events.push_back(value);
+ }
+ }
+
+ size_t leftCount = 0;
+ size_t rightCount = 0;
+
+ for (const auto& event : events) {
+ ASSERT_EQ(event.prop, AREA_CONTINUOUS_PROP);
+ if (event.areaId == toInt(VehicleAreaWindow::ROW_1_LEFT)) {
+ leftCount++;
+ continue;
+ }
+ rightCount++;
+ }
+
+ // Should trigger about 20 times, check for at least 15 events to be safe.
+ ASSERT_GE(leftCount, static_cast<size_t>(15));
+ // Should trigger about 10 times, check for at least 5 events to be safe.
+ ASSERT_GE(rightCount, static_cast<size_t>(5));
+}
+
+TEST_F(DefaultVehicleHalTest, testUnsubscribeOnChange) {
+ std::vector<SubscribeOptions> options = {
+ {
+ .propId = GLOBAL_ON_CHANGE_PROP,
+ },
+ };
+
+ auto status = getClient()->subscribe(getCallbackClient(), options, 0);
+
+ ASSERT_TRUE(status.isOk()) << "subscribe failed: " << status.getMessage();
+
+ status = getClient()->unsubscribe(getCallbackClient(),
+ std::vector<int32_t>({GLOBAL_ON_CHANGE_PROP}));
+
+ ASSERT_TRUE(status.isOk()) << "unsubscribe failed: " << status.getMessage();
+
+ VehiclePropValue testValue{
+ .prop = GLOBAL_ON_CHANGE_PROP,
+ .value.int32Values = {0},
+ };
+
+ // Set the value to trigger a property change event.
+ getHardware()->addSetValueResponses({{
+ .requestId = 0,
+ .status = StatusCode::OK,
+ }});
+ status = getClient()->setValues(getCallbackClient(),
+ {
+ .payloads =
+ {
+ SetValueRequest{
+ .requestId = 0,
+ .value = testValue,
+ },
+ },
+ });
+
+ ASSERT_TRUE(status.isOk()) << "setValues failed: " << status.getMessage();
+
+ ASSERT_FALSE(getCallback()->nextOnPropertyEventResults().has_value())
+ << "No property event should be generated after unsubscription";
+}
+
+TEST_F(DefaultVehicleHalTest, testUnsubscribeContinuous) {
+ VehiclePropValue testValue{
+ .prop = GLOBAL_CONTINUOUS_PROP,
+ .value.int32Values = {0},
+ };
+ // Set responses for all the hardware getValues requests.
+ getHardware()->setGetValueResponder(
+ [](std::shared_ptr<const IVehicleHardware::GetValuesCallback> callback,
+ const std::vector<GetValueRequest>& requests) {
+ std::vector<GetValueResult> results;
+ for (auto& request : requests) {
+ VehiclePropValue prop = request.prop;
+ prop.value.int32Values = {0};
+ results.push_back({
+ .requestId = request.requestId,
+ .status = StatusCode::OK,
+ .prop = prop,
+ });
+ }
+ (*callback)(results);
+ return StatusCode::OK;
+ });
+
+ std::vector<SubscribeOptions> options = {
+ {
+ .propId = GLOBAL_CONTINUOUS_PROP,
+ .sampleRate = 20.0,
+ },
+ };
+
+ auto status = getClient()->subscribe(getCallbackClient(), options, 0);
+
+ ASSERT_TRUE(status.isOk()) << "subscribe failed: " << status.getMessage();
+
+ status = getClient()->unsubscribe(getCallbackClient(),
+ std::vector<int32_t>({GLOBAL_CONTINUOUS_PROP}));
+
+ ASSERT_TRUE(status.isOk()) << "unsubscribe failed: " << status.getMessage();
+
+ // Clear existing events.
+ while (getCallback()->nextOnPropertyEventResults().has_value()) {
+ // Do nothing.
+ }
+
+ // Wait for a while, make sure no new events are generated.
+ std::this_thread::sleep_for(std::chrono::milliseconds(100));
+
+ ASSERT_FALSE(getCallback()->nextOnPropertyEventResults().has_value())
+ << "No property event should be generated after unsubscription";
+}
+
+class SubscribeInvalidOptionsTest
+ : public DefaultVehicleHalTest,
+ public testing::WithParamInterface<SubscribeInvalidOptionsTestCase> {};
+
+INSTANTIATE_TEST_SUITE_P(
+ SubscribeInvalidOptionsTests, SubscribeInvalidOptionsTest,
+ ::testing::ValuesIn(getSubscribeInvalidOptionsTestCases()),
+ [](const testing::TestParamInfo<SubscribeInvalidOptionsTest::ParamType>& info) {
+ return info.param.name;
+ });
+
+TEST_P(SubscribeInvalidOptionsTest, testSubscribeInvalidOptions) {
+ std::vector<SubscribeOptions> options = {GetParam().option};
+
+ auto status = getClient()->subscribe(getCallbackClient(), options, 0);
+
+ ASSERT_FALSE(status.isOk()) << "invalid subscribe options must fail";
+ ASSERT_EQ(status.getServiceSpecificError(), toInt(StatusCode::INVALID_ARG));
+}
+
+TEST_F(DefaultVehicleHalTest, testSubscribeNoReadPermission) {
+ std::vector<SubscribeOptions> options = {{
+ .propId = WRITE_ONLY_PROP,
+ }};
+
+ auto status = getClient()->subscribe(getCallbackClient(), options, 0);
+
+ ASSERT_FALSE(status.isOk()) << "subscribe to a write-only property must fail";
+ ASSERT_EQ(status.getServiceSpecificError(), toInt(StatusCode::ACCESS_DENIED));
+}
+
+TEST_F(DefaultVehicleHalTest, testUnsubscribeFailure) {
+ auto status = getClient()->unsubscribe(getCallbackClient(),
+ std::vector<int32_t>({GLOBAL_ON_CHANGE_PROP}));
+
+ ASSERT_FALSE(status.isOk()) << "unsubscribe to a not-subscribed property must fail";
+ ASSERT_EQ(status.getServiceSpecificError(), toInt(StatusCode::INVALID_ARG));
+}
+
+TEST_F(DefaultVehicleHalTest, testHeartbeatEvent) {
+ std::vector<SubscribeOptions> options = {{
+ .propId = toInt(VehicleProperty::VHAL_HEARTBEAT),
+ }};
+ int64_t currentTime = uptimeMillis();
+ auto status = getClient()->subscribe(getCallbackClient(), options, 0);
+
+ ASSERT_TRUE(status.isOk()) << "unable to subscribe to heartbeat event: " << status.getMessage();
+
+ // We send out a heartbeat event every 3s, so sleep for 3s.
+ std::this_thread::sleep_for(std::chrono::seconds(3));
+
+ auto maybeResults = getCallback()->nextOnPropertyEventResults();
+ ASSERT_TRUE(maybeResults.has_value()) << "no results in callback";
+ ASSERT_EQ(maybeResults.value().payloads.size(), static_cast<size_t>(1));
+ VehiclePropValue gotValue = maybeResults.value().payloads[0];
+ ASSERT_EQ(gotValue.prop, toInt(VehicleProperty::VHAL_HEARTBEAT));
+ ASSERT_EQ(gotValue.value.int64Values.size(), static_cast<size_t>(1));
+ ASSERT_GE(gotValue.value.int64Values[0], currentTime)
+ << "expect to get the latest timestamp with the heartbeat event";
+}
+
+TEST_F(DefaultVehicleHalTest, testOnBinderDiedUnlinked) {
+ // First subscribe to a continuous property so that we register a death recipient for our
+ // client.
+ VehiclePropValue testValue{
+ .prop = GLOBAL_CONTINUOUS_PROP,
+ .value.int32Values = {0},
+ };
+ // Set responses for all the hardware getValues requests.
+ getHardware()->setGetValueResponder(
+ [](std::shared_ptr<const IVehicleHardware::GetValuesCallback> callback,
+ const std::vector<GetValueRequest>& requests) {
+ std::vector<GetValueResult> results;
+ for (auto& request : requests) {
+ VehiclePropValue prop = request.prop;
+ prop.value.int32Values = {0};
+ results.push_back({
+ .requestId = request.requestId,
+ .status = StatusCode::OK,
+ .prop = prop,
+ });
+ }
+ (*callback)(results);
+ return StatusCode::OK;
+ });
+ std::vector<SubscribeOptions> options = {
+ {
+ .propId = GLOBAL_CONTINUOUS_PROP,
+ .sampleRate = 20.0,
+ },
+ };
+ auto status = getClient()->subscribe(getCallbackClient(), options, 0);
+ ASSERT_TRUE(status.isOk()) << "subscribe failed: " << status.getMessage();
+ // Sleep for 100ms so that the subscriptionClient gets created because we would at least try to
+ // get value once.
+ std::this_thread::sleep_for(std::chrono::milliseconds(100));
+
+ // Issue another getValue request on the same client.
+ GetValueRequests requests;
+ std::vector<GetValueResult> expectedResults;
+ std::vector<GetValueRequest> expectedHardwareRequests;
+ ASSERT_TRUE(getValuesTestCases(1, requests, expectedResults, expectedHardwareRequests).ok());
+ getHardware()->addGetValueResponses(expectedResults);
+ status = getClient()->getValues(getCallbackClient(), requests);
+ ASSERT_TRUE(status.isOk()) << "getValues failed: " << status.getMessage();
+
+ ASSERT_EQ(countOnBinderDiedContexts(), static_cast<size_t>(1))
+ << "expect one OnBinderDied context when one client is registered";
+
+ // Get the death recipient cookie for our callback that would be used in onBinderDied and
+ // onBinderUnlinked.
+ AIBinder* clientId = getCallbackClient()->asBinder().get();
+ void* context = getOnBinderDiedContexts(clientId);
+
+ onBinderDied(context);
+
+ ASSERT_EQ(countClients(), static_cast<size_t>(0))
+ << "expect all clients to be removed when binder died";
+ ASSERT_TRUE(hasNoSubscriptions()) << "expect no subscriptions when binder died";
+
+ onBinderUnlinked(context);
+
+ ASSERT_EQ(countOnBinderDiedContexts(), static_cast<size_t>(0))
+ << "expect OnBinderDied context to be deleted when binder is unlinked";
+}
+
} // namespace vehicle
} // namespace automotive
} // namespace hardware
diff --git a/automotive/vehicle/aidl/impl/vhal/test/MockVehicleCallback.cpp b/automotive/vehicle/aidl/impl/vhal/test/MockVehicleCallback.cpp
index ca366cd..5e3e03c 100644
--- a/automotive/vehicle/aidl/impl/vhal/test/MockVehicleCallback.cpp
+++ b/automotive/vehicle/aidl/impl/vhal/test/MockVehicleCallback.cpp
@@ -31,16 +31,6 @@
using ::ndk::ScopedFileDescriptor;
template <class T>
-std::optional<T> pop(std::list<T>& items) {
- if (items.size() > 0) {
- auto item = std::move(items.front());
- items.pop_front();
- return item;
- }
- return std::nullopt;
-}
-
-template <class T>
static ScopedAStatus storeResults(const T& results, std::list<T>* storedResults) {
T resultsCopy{
.payloads = results.payloads,
@@ -65,8 +55,12 @@
return storeResults(results, &mSetValueResults);
}
-ScopedAStatus MockVehicleCallback::onPropertyEvent(const VehiclePropValues&, int32_t) {
- return ScopedAStatus::ok();
+ScopedAStatus MockVehicleCallback::onPropertyEvent(const VehiclePropValues& results,
+ int32_t sharedMemoryFileCount) {
+ std::scoped_lock<std::mutex> lockGuard(mLock);
+
+ mSharedMemoryFileCount = sharedMemoryFileCount;
+ return storeResults(results, &mOnPropertyEventResults);
}
ScopedAStatus MockVehicleCallback::onPropertySetError(const VehiclePropErrors&) {
@@ -83,6 +77,11 @@
return pop(mSetValueResults);
}
+std::optional<VehiclePropValues> MockVehicleCallback::nextOnPropertyEventResults() {
+ std::scoped_lock<std::mutex> lockGuard(mLock);
+ return pop(mOnPropertyEventResults);
+}
+
} // namespace vehicle
} // namespace automotive
} // namespace hardware
diff --git a/automotive/vehicle/aidl/impl/vhal/test/MockVehicleCallback.h b/automotive/vehicle/aidl/impl/vhal/test/MockVehicleCallback.h
index 916575a..c83164f 100644
--- a/automotive/vehicle/aidl/impl/vhal/test/MockVehicleCallback.h
+++ b/automotive/vehicle/aidl/impl/vhal/test/MockVehicleCallback.h
@@ -31,6 +31,16 @@
namespace automotive {
namespace vehicle {
+template <class T>
+std::optional<T> pop(std::list<T>& items) {
+ if (items.size() > 0) {
+ auto item = std::move(items.front());
+ items.pop_front();
+ return item;
+ }
+ return std::nullopt;
+}
+
// MockVehicleCallback is a mock VehicleCallback implementation that simply stores the results.
class MockVehicleCallback final
: public ::aidl::android::hardware::automotive::vehicle::BnVehicleCallback {
@@ -52,6 +62,8 @@
nextGetValueResults();
std::optional<::aidl::android::hardware::automotive::vehicle::SetValueResults>
nextSetValueResults();
+ std::optional<::aidl::android::hardware::automotive::vehicle::VehiclePropValues>
+ nextOnPropertyEventResults();
private:
std::mutex mLock;
@@ -59,6 +71,9 @@
GUARDED_BY(mLock);
std::list<::aidl::android::hardware::automotive::vehicle::SetValueResults> mSetValueResults
GUARDED_BY(mLock);
+ std::list<::aidl::android::hardware::automotive::vehicle::VehiclePropValues>
+ mOnPropertyEventResults GUARDED_BY(mLock);
+ int32_t mSharedMemoryFileCount GUARDED_BY(mLock);
};
} // namespace vehicle
diff --git a/automotive/vehicle/aidl/impl/vhal/test/MockVehicleHardware.cpp b/automotive/vehicle/aidl/impl/vhal/test/MockVehicleHardware.cpp
new file mode 100644
index 0000000..eec32dd
--- /dev/null
+++ b/automotive/vehicle/aidl/impl/vhal/test/MockVehicleHardware.cpp
@@ -0,0 +1,217 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "MockVehicleHardware.h"
+#include "MockVehicleCallback.h"
+
+#include <utils/Log.h>
+
+namespace android {
+namespace hardware {
+namespace automotive {
+namespace vehicle {
+
+using ::aidl::android::hardware::automotive::vehicle::GetValueRequest;
+using ::aidl::android::hardware::automotive::vehicle::GetValueResult;
+using ::aidl::android::hardware::automotive::vehicle::SetValueRequest;
+using ::aidl::android::hardware::automotive::vehicle::SetValueResult;
+using ::aidl::android::hardware::automotive::vehicle::StatusCode;
+using ::aidl::android::hardware::automotive::vehicle::VehiclePropConfig;
+using ::aidl::android::hardware::automotive::vehicle::VehiclePropValue;
+
+MockVehicleHardware::~MockVehicleHardware() {
+ std::unique_lock<std::mutex> lk(mLock);
+ mCv.wait(lk, [this] { return mThreadCount == 0; });
+}
+
+std::vector<VehiclePropConfig> MockVehicleHardware::getAllPropertyConfigs() const {
+ std::scoped_lock<std::mutex> lockGuard(mLock);
+ return mPropertyConfigs;
+}
+
+StatusCode MockVehicleHardware::setValues(std::shared_ptr<const SetValuesCallback> callback,
+ const std::vector<SetValueRequest>& requests) {
+ std::scoped_lock<std::mutex> lockGuard(mLock);
+ if (StatusCode status = handleRequestsLocked(__func__, callback, requests, &mSetValueRequests,
+ &mSetValueResponses);
+ status != StatusCode::OK) {
+ return status;
+ }
+ if (mPropertyChangeCallback == nullptr) {
+ return StatusCode::OK;
+ }
+ std::vector<VehiclePropValue> values;
+ for (auto& request : requests) {
+ values.push_back(request.value);
+ }
+ (*mPropertyChangeCallback)(values);
+ return StatusCode::OK;
+}
+
+StatusCode MockVehicleHardware::getValues(std::shared_ptr<const GetValuesCallback> callback,
+ const std::vector<GetValueRequest>& requests) const {
+ std::scoped_lock<std::mutex> lockGuard(mLock);
+ if (mGetValueResponder != nullptr) {
+ return mGetValueResponder(callback, requests);
+ }
+ return handleRequestsLocked(__func__, callback, requests, &mGetValueRequests,
+ &mGetValueResponses);
+}
+
+DumpResult MockVehicleHardware::dump(const std::vector<std::string>&) {
+ // TODO(b/200737967): mock this.
+ return DumpResult{};
+}
+
+StatusCode MockVehicleHardware::checkHealth() {
+ // TODO(b/200737967): mock this.
+ return StatusCode::OK;
+}
+
+void MockVehicleHardware::registerOnPropertyChangeEvent(
+ std::unique_ptr<const PropertyChangeCallback> callback) {
+ std::scoped_lock<std::mutex> lockGuard(mLock);
+ mPropertyChangeCallback = std::move(callback);
+}
+
+void MockVehicleHardware::registerOnPropertySetErrorEvent(
+ std::unique_ptr<const PropertySetErrorCallback>) {
+ // TODO(b/200737967): mock this.
+}
+
+void MockVehicleHardware::setPropertyConfigs(const std::vector<VehiclePropConfig>& configs) {
+ std::scoped_lock<std::mutex> lockGuard(mLock);
+ mPropertyConfigs = configs;
+}
+
+void MockVehicleHardware::addGetValueResponses(const std::vector<GetValueResult>& responses) {
+ std::scoped_lock<std::mutex> lockGuard(mLock);
+ mGetValueResponses.push_back(responses);
+}
+
+void MockVehicleHardware::addSetValueResponses(const std::vector<SetValueResult>& responses) {
+ std::scoped_lock<std::mutex> lockGuard(mLock);
+ mSetValueResponses.push_back(responses);
+}
+
+void MockVehicleHardware::setGetValueResponder(
+ std::function<StatusCode(std::shared_ptr<const GetValuesCallback>,
+ const std::vector<GetValueRequest>&)>&& responder) {
+ std::scoped_lock<std::mutex> lockGuard(mLock);
+ mGetValueResponder = responder;
+}
+
+std::vector<GetValueRequest> MockVehicleHardware::nextGetValueRequests() {
+ std::scoped_lock<std::mutex> lockGuard(mLock);
+ std::optional<std::vector<GetValueRequest>> request = pop(mGetValueRequests);
+ if (!request.has_value()) {
+ return std::vector<GetValueRequest>();
+ }
+ return std::move(request.value());
+}
+
+std::vector<SetValueRequest> MockVehicleHardware::nextSetValueRequests() {
+ std::scoped_lock<std::mutex> lockGuard(mLock);
+ std::optional<std::vector<SetValueRequest>> request = pop(mSetValueRequests);
+ if (!request.has_value()) {
+ return std::vector<SetValueRequest>();
+ }
+ return std::move(request.value());
+}
+
+void MockVehicleHardware::setStatus(const char* functionName, StatusCode status) {
+ std::scoped_lock<std::mutex> lockGuard(mLock);
+ mStatusByFunctions[functionName] = status;
+}
+
+void MockVehicleHardware::setSleepTime(int64_t timeInNano) {
+ std::scoped_lock<std::mutex> lockGuard(mLock);
+ mSleepTime = timeInNano;
+}
+
+template <class ResultType>
+StatusCode MockVehicleHardware::returnResponse(
+ std::shared_ptr<const std::function<void(std::vector<ResultType>)>> callback,
+ std::list<std::vector<ResultType>>* storedResponses) const {
+ if (storedResponses->size() > 0) {
+ (*callback)(std::move(storedResponses->front()));
+ storedResponses->pop_front();
+ return StatusCode::OK;
+ } else {
+ ALOGE("no more response");
+ return StatusCode::INTERNAL_ERROR;
+ }
+}
+
+template StatusCode MockVehicleHardware::returnResponse<GetValueResult>(
+ std::shared_ptr<const std::function<void(std::vector<GetValueResult>)>> callback,
+ std::list<std::vector<GetValueResult>>* storedResponses) const;
+
+template StatusCode MockVehicleHardware::returnResponse<SetValueResult>(
+ std::shared_ptr<const std::function<void(std::vector<SetValueResult>)>> callback,
+ std::list<std::vector<SetValueResult>>* storedResponses) const;
+
+template <class RequestType, class ResultType>
+StatusCode MockVehicleHardware::handleRequestsLocked(
+ const char* functionName,
+ std::shared_ptr<const std::function<void(std::vector<ResultType>)>> callback,
+ const std::vector<RequestType>& requests,
+ std::list<std::vector<RequestType>>* storedRequests,
+ std::list<std::vector<ResultType>>* storedResponses) const {
+ storedRequests->push_back(requests);
+ if (auto it = mStatusByFunctions.find(functionName); it != mStatusByFunctions.end()) {
+ if (StatusCode status = it->second; status != StatusCode::OK) {
+ return status;
+ }
+ }
+
+ if (mSleepTime != 0) {
+ int64_t sleepTime = mSleepTime;
+ mThreadCount++;
+ std::thread t([this, callback, sleepTime, storedResponses]() {
+ std::this_thread::sleep_for(std::chrono::nanoseconds(sleepTime));
+ returnResponse(callback, storedResponses);
+ mThreadCount--;
+ mCv.notify_one();
+ });
+ // Detach the thread here so we do not have to maintain the thread object. mThreadCount
+ // and mCv make sure we wait for all threads to end before we exit.
+ t.detach();
+ return StatusCode::OK;
+
+ } else {
+ return returnResponse(callback, storedResponses);
+ }
+}
+
+template StatusCode MockVehicleHardware::handleRequestsLocked<GetValueRequest, GetValueResult>(
+ const char* functionName,
+ std::shared_ptr<const std::function<void(std::vector<GetValueResult>)>> callback,
+ const std::vector<GetValueRequest>& requests,
+ std::list<std::vector<GetValueRequest>>* storedRequests,
+ std::list<std::vector<GetValueResult>>* storedResponses) const;
+
+template StatusCode MockVehicleHardware::handleRequestsLocked<SetValueRequest, SetValueResult>(
+ const char* functionName,
+ std::shared_ptr<const std::function<void(std::vector<SetValueResult>)>> callback,
+ const std::vector<SetValueRequest>& requests,
+ std::list<std::vector<SetValueRequest>>* storedRequests,
+ std::list<std::vector<SetValueResult>>* storedResponses) const;
+
+} // namespace vehicle
+} // namespace automotive
+} // namespace hardware
+} // namespace android
diff --git a/automotive/vehicle/aidl/impl/vhal/test/MockVehicleHardware.h b/automotive/vehicle/aidl/impl/vhal/test/MockVehicleHardware.h
new file mode 100644
index 0000000..0844de1
--- /dev/null
+++ b/automotive/vehicle/aidl/impl/vhal/test/MockVehicleHardware.h
@@ -0,0 +1,124 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef android_hardware_automotive_vehicle_aidl_impl_vhal_test_MockVehicleHardware_H_
+#define android_hardware_automotive_vehicle_aidl_impl_vhal_test_MockVehicleHardware_H_
+
+#include <IVehicleHardware.h>
+#include <VehicleHalTypes.h>
+
+#include <android-base/thread_annotations.h>
+
+#include <atomic>
+#include <condition_variable>
+#include <list>
+#include <memory>
+#include <mutex>
+#include <thread>
+#include <unordered_map>
+#include <vector>
+
+namespace android {
+namespace hardware {
+namespace automotive {
+namespace vehicle {
+
+class MockVehicleHardware final : public IVehicleHardware {
+ public:
+ ~MockVehicleHardware();
+
+ std::vector<::aidl::android::hardware::automotive::vehicle::VehiclePropConfig>
+ getAllPropertyConfigs() const override;
+ ::aidl::android::hardware::automotive::vehicle::StatusCode setValues(
+ std::shared_ptr<const SetValuesCallback> callback,
+ const std::vector<::aidl::android::hardware::automotive::vehicle::SetValueRequest>&
+ requests) override;
+ ::aidl::android::hardware::automotive::vehicle::StatusCode getValues(
+ std::shared_ptr<const GetValuesCallback> callback,
+ const std::vector<::aidl::android::hardware::automotive::vehicle::GetValueRequest>&
+ requests) const override;
+ DumpResult dump(const std::vector<std::string>&) override;
+ ::aidl::android::hardware::automotive::vehicle::StatusCode checkHealth() override;
+ void registerOnPropertyChangeEvent(
+ std::unique_ptr<const PropertyChangeCallback> callback) override;
+ void registerOnPropertySetErrorEvent(std::unique_ptr<const PropertySetErrorCallback>) override;
+
+ // Test functions.
+ void setPropertyConfigs(
+ const std::vector<::aidl::android::hardware::automotive::vehicle::VehiclePropConfig>&
+ configs);
+ void addGetValueResponses(
+ const std::vector<::aidl::android::hardware::automotive::vehicle::GetValueResult>&
+ responses);
+ void addSetValueResponses(
+ const std::vector<::aidl::android::hardware::automotive::vehicle::SetValueResult>&
+ responses);
+ void setGetValueResponder(
+ std::function<::aidl::android::hardware::automotive::vehicle::StatusCode(
+ std::shared_ptr<const GetValuesCallback>,
+ const std::vector<
+ ::aidl::android::hardware::automotive::vehicle::GetValueRequest>&)>&&
+ responder);
+ std::vector<::aidl::android::hardware::automotive::vehicle::GetValueRequest>
+ nextGetValueRequests();
+ std::vector<::aidl::android::hardware::automotive::vehicle::SetValueRequest>
+ nextSetValueRequests();
+ void setStatus(const char* functionName,
+ ::aidl::android::hardware::automotive::vehicle::StatusCode status);
+ void setSleepTime(int64_t timeInNano);
+
+ private:
+ mutable std::mutex mLock;
+ mutable std::condition_variable mCv;
+ mutable std::atomic<int> mThreadCount;
+ std::vector<::aidl::android::hardware::automotive::vehicle::VehiclePropConfig> mPropertyConfigs
+ GUARDED_BY(mLock);
+ mutable std::list<std::vector<::aidl::android::hardware::automotive::vehicle::GetValueRequest>>
+ mGetValueRequests GUARDED_BY(mLock);
+ mutable std::list<std::vector<::aidl::android::hardware::automotive::vehicle::GetValueResult>>
+ mGetValueResponses GUARDED_BY(mLock);
+ mutable std::list<std::vector<::aidl::android::hardware::automotive::vehicle::SetValueRequest>>
+ mSetValueRequests GUARDED_BY(mLock);
+ mutable std::list<std::vector<::aidl::android::hardware::automotive::vehicle::SetValueResult>>
+ mSetValueResponses GUARDED_BY(mLock);
+ std::unordered_map<const char*, ::aidl::android::hardware::automotive::vehicle::StatusCode>
+ mStatusByFunctions GUARDED_BY(mLock);
+ int64_t mSleepTime GUARDED_BY(mLock) = 0;
+ std::unique_ptr<const PropertyChangeCallback> mPropertyChangeCallback GUARDED_BY(mLock);
+ std::function<::aidl::android::hardware::automotive::vehicle::StatusCode(
+ std::shared_ptr<const GetValuesCallback>,
+ const std::vector<::aidl::android::hardware::automotive::vehicle::GetValueRequest>&)>
+ mGetValueResponder GUARDED_BY(mLock);
+
+ template <class ResultType>
+ ::aidl::android::hardware::automotive::vehicle::StatusCode returnResponse(
+ std::shared_ptr<const std::function<void(std::vector<ResultType>)>> callback,
+ std::list<std::vector<ResultType>>* storedResponses) const;
+ template <class RequestType, class ResultType>
+ ::aidl::android::hardware::automotive::vehicle::StatusCode handleRequestsLocked(
+ const char* functionName,
+ std::shared_ptr<const std::function<void(std::vector<ResultType>)>> callback,
+ const std::vector<RequestType>& requests,
+ std::list<std::vector<RequestType>>* storedRequests,
+ std::list<std::vector<ResultType>>* storedResponses) const REQUIRES(mLock);
+};
+
+} // namespace vehicle
+} // namespace automotive
+} // namespace hardware
+} // namespace android
+
+#endif // android_hardware_automotive_vehicle_aidl_impl_vhal_test_MockVehicleHardware_H_
diff --git a/automotive/vehicle/aidl/impl/vhal/test/RecurrentTimerTest.cpp b/automotive/vehicle/aidl/impl/vhal/test/RecurrentTimerTest.cpp
new file mode 100644
index 0000000..d343cea
--- /dev/null
+++ b/automotive/vehicle/aidl/impl/vhal/test/RecurrentTimerTest.cpp
@@ -0,0 +1,192 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "RecurrentTimer.h"
+
+#include <android-base/thread_annotations.h>
+#include <gtest/gtest.h>
+
+#include <chrono>
+#include <memory>
+#include <mutex>
+
+namespace android {
+namespace hardware {
+namespace automotive {
+namespace vehicle {
+
+class RecurrentTimerTest : public ::testing::Test {
+ public:
+ std::shared_ptr<RecurrentTimer::Callback> getCallback(size_t token) {
+ return std::make_shared<RecurrentTimer::Callback>([this, token] {
+ std::scoped_lock<std::mutex> lockGuard(mLock);
+
+ mCallbacks.push_back(token);
+ });
+ }
+
+ std::vector<size_t> getCalledCallbacks() {
+ std::scoped_lock<std::mutex> lockGuard(mLock);
+ return mCallbacks;
+ }
+
+ void clearCalledCallbacks() {
+ std::scoped_lock<std::mutex> lockGuard(mLock);
+ mCallbacks.clear();
+ }
+
+ size_t countTimerCallbackQueue(RecurrentTimer* timer) {
+ std::scoped_lock<std::mutex> lockGuard(timer->mLock);
+ return timer->mCallbackQueue.size();
+ }
+
+ private:
+ std::mutex mLock;
+ std::vector<size_t> mCallbacks GUARDED_BY(mLock);
+};
+
+TEST_F(RecurrentTimerTest, testRegisterCallback) {
+ RecurrentTimer timer;
+ // 0.1s
+ int64_t interval = 100000000;
+
+ auto action = getCallback(0);
+ timer.registerTimerCallback(interval, action);
+
+ std::this_thread::sleep_for(std::chrono::seconds(1));
+
+ timer.unregisterTimerCallback(action);
+
+ // Theoretically trigger 10 times, but check for at least 9 times to be stable.
+ ASSERT_GE(getCalledCallbacks().size(), static_cast<size_t>(9));
+}
+
+TEST_F(RecurrentTimerTest, testRegisterUnregisterRegister) {
+ RecurrentTimer timer;
+ // 0.1s
+ int64_t interval = 100000000;
+
+ auto action = getCallback(0);
+ timer.registerTimerCallback(interval, action);
+
+ std::this_thread::sleep_for(std::chrono::milliseconds(200));
+
+ timer.unregisterTimerCallback(action);
+
+ std::this_thread::sleep_for(std::chrono::milliseconds(200));
+
+ clearCalledCallbacks();
+
+ timer.registerTimerCallback(interval, action);
+
+ std::this_thread::sleep_for(std::chrono::seconds(1));
+
+ // Theoretically trigger 10 times, but check for at least 9 times to be stable.
+ ASSERT_GE(getCalledCallbacks().size(), static_cast<size_t>(9));
+}
+
+TEST_F(RecurrentTimerTest, testDestroyTimerWithCallback) {
+ std::unique_ptr<RecurrentTimer> timer = std::make_unique<RecurrentTimer>();
+ // 0.1s
+ int64_t interval = 100000000;
+
+ auto action = getCallback(0);
+ timer->registerTimerCallback(interval, action);
+
+ std::this_thread::sleep_for(std::chrono::milliseconds(200));
+
+ timer.reset();
+
+ clearCalledCallbacks();
+
+ std::this_thread::sleep_for(std::chrono::milliseconds(200));
+
+ ASSERT_TRUE(getCalledCallbacks().empty());
+}
+
+TEST_F(RecurrentTimerTest, testRegisterMultipleCallbacks) {
+ RecurrentTimer timer;
+ // 0.1s
+ int64_t interval1 = 100000000;
+ auto action1 = getCallback(1);
+ timer.registerTimerCallback(interval1, action1);
+ // 0.05s
+ int64_t interval2 = 50000000;
+ auto action2 = getCallback(2);
+ timer.registerTimerCallback(interval2, action2);
+ // 0.03s
+ int64_t interval3 = 30000000;
+ auto action3 = getCallback(3);
+ timer.registerTimerCallback(interval3, action3);
+
+ std::this_thread::sleep_for(std::chrono::seconds(1));
+
+ timer.unregisterTimerCallback(action1);
+ timer.unregisterTimerCallback(action2);
+ timer.unregisterTimerCallback(action3);
+
+ size_t action1Count = 0;
+ size_t action2Count = 0;
+ size_t action3Count = 0;
+ for (size_t token : getCalledCallbacks()) {
+ if (token == 1) {
+ action1Count++;
+ }
+ if (token == 2) {
+ action2Count++;
+ }
+ if (token == 3) {
+ action3Count++;
+ }
+ }
+ // Theoretically trigger 10 times, but check for at least 9 times to be stable.
+ ASSERT_GE(action1Count, static_cast<size_t>(9));
+ // Theoretically trigger 20 times, but check for at least 15 times to be stable.
+ ASSERT_GE(action2Count, static_cast<size_t>(15));
+ // Theoretically trigger 33 times, but check for at least 25 times to be stable.
+ ASSERT_GE(action3Count, static_cast<size_t>(25));
+}
+
+TEST_F(RecurrentTimerTest, testRegisterSameCallbackMultipleTimes) {
+ RecurrentTimer timer;
+ // 0.02s
+ int64_t interval1 = 20000000;
+ // 0.01s
+ int64_t interval2 = 10000000;
+
+ auto action = getCallback(0);
+ for (int i = 0; i < 10; i++) {
+ timer.registerTimerCallback(interval1, action);
+ timer.registerTimerCallback(interval2, action);
+ }
+
+ clearCalledCallbacks();
+
+ std::this_thread::sleep_for(std::chrono::milliseconds(100));
+
+ // Theoretically trigger 10 times, but check for at least 9 times to be stable.
+ ASSERT_GE(getCalledCallbacks().size(), static_cast<size_t>(9));
+
+ timer.unregisterTimerCallback(action);
+
+ // Make sure there is no item in the callback queue.
+ ASSERT_EQ(countTimerCallbackQueue(&timer), static_cast<size_t>(0));
+}
+
+} // namespace vehicle
+} // namespace automotive
+} // namespace hardware
+} // namespace android
diff --git a/automotive/vehicle/aidl/impl/vhal/test/SubscriptionManagerTest.cpp b/automotive/vehicle/aidl/impl/vhal/test/SubscriptionManagerTest.cpp
new file mode 100644
index 0000000..f81b1a2
--- /dev/null
+++ b/automotive/vehicle/aidl/impl/vhal/test/SubscriptionManagerTest.cpp
@@ -0,0 +1,491 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "SubscriptionManager.h"
+
+#include <VehicleHalTypes.h>
+
+#include <aidl/android/hardware/automotive/vehicle/BnVehicleCallback.h>
+#include <android-base/thread_annotations.h>
+#include <gmock/gmock.h>
+#include <gtest/gtest.h>
+
+#include <float.h>
+#include <chrono>
+#include <list>
+#include <memory>
+#include <mutex>
+#include <thread>
+#include <vector>
+
+namespace android {
+namespace hardware {
+namespace automotive {
+namespace vehicle {
+
+using ::aidl::android::hardware::automotive::vehicle::BnVehicleCallback;
+using ::aidl::android::hardware::automotive::vehicle::GetValueResults;
+using ::aidl::android::hardware::automotive::vehicle::IVehicleCallback;
+using ::aidl::android::hardware::automotive::vehicle::SetValueResults;
+using ::aidl::android::hardware::automotive::vehicle::SubscribeOptions;
+using ::aidl::android::hardware::automotive::vehicle::VehiclePropErrors;
+using ::aidl::android::hardware::automotive::vehicle::VehiclePropValue;
+using ::aidl::android::hardware::automotive::vehicle::VehiclePropValues;
+using ::ndk::ScopedAStatus;
+using ::ndk::SpAIBinder;
+using ::testing::ElementsAre;
+using ::testing::WhenSorted;
+
+class PropertyCallback final : public BnVehicleCallback {
+ public:
+ ScopedAStatus onGetValues(const GetValueResults&) override { return ScopedAStatus::ok(); }
+
+ ScopedAStatus onSetValues(const SetValueResults&) override { return ScopedAStatus::ok(); }
+
+ ScopedAStatus onPropertyEvent(const VehiclePropValues& values, int32_t) override {
+ std::scoped_lock<std::mutex> lockGuard(mLock);
+ for (const auto& value : values.payloads) {
+ mEvents.push_back(value);
+ }
+ return ScopedAStatus::ok();
+ }
+
+ ScopedAStatus onPropertySetError(const VehiclePropErrors&) override {
+ return ScopedAStatus::ok();
+ }
+
+ // Test functions.
+ std::list<VehiclePropValue> getEvents() {
+ std::scoped_lock<std::mutex> lockGuard(mLock);
+ return mEvents;
+ }
+
+ void clearEvents() {
+ std::scoped_lock<std::mutex> lockGuard(mLock);
+ mEvents.clear();
+ }
+
+ private:
+ std::mutex mLock;
+ std::list<VehiclePropValue> mEvents GUARDED_BY(mLock);
+};
+
+class SubscriptionManagerTest : public ::testing::Test {
+ public:
+ void SetUp() override {
+ mManager = std::make_unique<SubscriptionManager>(
+ [](const std::shared_ptr<IVehicleCallback>& callback,
+ const VehiclePropValue& value) {
+ callback->onPropertyEvent(
+ VehiclePropValues{
+ .payloads = {value},
+ },
+ 0);
+ });
+ mCallback = ::ndk::SharedRefBase::make<PropertyCallback>();
+ // Keep the local binder alive.
+ mBinder = mCallback->asBinder();
+ mCallbackClient = IVehicleCallback::fromBinder(mBinder);
+ }
+
+ SubscriptionManager* getManager() { return mManager.get(); }
+
+ std::shared_ptr<IVehicleCallback> getCallbackClient() { return mCallbackClient; }
+
+ PropertyCallback* getCallback() { return mCallback.get(); }
+
+ std::list<VehiclePropValue> getEvents() { return getCallback()->getEvents(); }
+
+ void clearEvents() { return getCallback()->clearEvents(); }
+
+ private:
+ std::unique_ptr<SubscriptionManager> mManager;
+ std::shared_ptr<PropertyCallback> mCallback;
+ std::shared_ptr<IVehicleCallback> mCallbackClient;
+ SpAIBinder mBinder;
+};
+
+TEST_F(SubscriptionManagerTest, testSubscribeGlobalContinuous) {
+ std::vector<SubscribeOptions> options = {{
+ .propId = 0,
+ .areaIds = {0},
+ .sampleRate = 10.0,
+ }};
+
+ auto result = getManager()->subscribe(getCallbackClient(), options, true);
+ ASSERT_TRUE(result.ok()) << "failed to subscribe: " << result.error().message();
+
+ std::this_thread::sleep_for(std::chrono::seconds(1));
+
+ // Theoretically trigger 10 times, but check for at least 9 times to be stable.
+ ASSERT_GE(getEvents().size(), static_cast<size_t>(9));
+ EXPECT_EQ(getEvents().back().prop, 0);
+ EXPECT_EQ(getEvents().back().areaId, 0);
+}
+
+TEST_F(SubscriptionManagerTest, testSubscribeMultiplePropsGlobalContinuous) {
+ std::vector<SubscribeOptions> options = {{
+ .propId = 0,
+ .areaIds = {0},
+ .sampleRate = 10.0,
+ },
+ {
+ .propId = 1,
+ .areaIds = {0},
+ .sampleRate = 20.0,
+ }};
+
+ auto result = getManager()->subscribe(getCallbackClient(), options, true);
+ ASSERT_TRUE(result.ok()) << "failed to subscribe: " << result.error().message();
+
+ std::this_thread::sleep_for(std::chrono::seconds(1));
+
+ size_t event0Count = 0;
+ size_t event1Count = 0;
+
+ for (const auto& event : getEvents()) {
+ if (event.prop == 0) {
+ event0Count++;
+ } else {
+ event1Count++;
+ }
+ }
+
+ // Theoretically trigger 10 times, but check for at least 9 times to be stable.
+ EXPECT_GE(event0Count, static_cast<size_t>(9));
+ // Theoretically trigger 20 times, but check for at least 15 times to be stable.
+ EXPECT_GE(event1Count, static_cast<size_t>(15));
+}
+
+TEST_F(SubscriptionManagerTest, testOverrideSubscriptionContinuous) {
+ std::vector<SubscribeOptions> options = {{
+ .propId = 0,
+ .areaIds = {0},
+ .sampleRate = 20.0,
+ }};
+
+ auto result = getManager()->subscribe(getCallbackClient(), options, true);
+ ASSERT_TRUE(result.ok()) << "failed to subscribe: " << result.error().message();
+
+ // Override sample rate to be 10.0.
+ options[0].sampleRate = 10.0;
+ result = getManager()->subscribe(getCallbackClient(), options, true);
+ ASSERT_TRUE(result.ok()) << "failed to subscribe: " << result.error().message();
+
+ std::this_thread::sleep_for(std::chrono::seconds(1));
+
+ // Theoretically trigger 10 times, but check for at least 9 times to be stable.
+ EXPECT_GE(getEvents().size(), static_cast<size_t>(9));
+ EXPECT_LE(getEvents().size(), static_cast<size_t>(15));
+}
+
+TEST_F(SubscriptionManagerTest, testSubscribeMultipleAreasContinuous) {
+ std::vector<SubscribeOptions> options = {
+ {
+ .propId = 0,
+ .areaIds = {0, 1},
+ .sampleRate = 10.0,
+ },
+ };
+
+ auto result = getManager()->subscribe(getCallbackClient(), options, true);
+ ASSERT_TRUE(result.ok()) << "failed to subscribe: " << result.error().message();
+
+ std::this_thread::sleep_for(std::chrono::seconds(1));
+
+ size_t area0Count = 0;
+ size_t area1Count = 0;
+
+ for (const auto& event : getEvents()) {
+ if (event.areaId == 0) {
+ area0Count++;
+ } else {
+ area1Count++;
+ }
+ }
+
+ // Theoretically trigger 10 times, but check for at least 9 times to be stable.
+ EXPECT_GE(area0Count, static_cast<size_t>(9));
+ // Theoretically trigger 10 times, but check for at least 9 times to be stable.
+ EXPECT_GE(area1Count, static_cast<size_t>(9));
+}
+
+TEST_F(SubscriptionManagerTest, testUnsubscribeGlobalContinuous) {
+ std::vector<SubscribeOptions> options = {{
+ .propId = 0,
+ .areaIds = {0},
+ .sampleRate = 10.0,
+ }};
+
+ auto result = getManager()->subscribe(getCallbackClient(), options, true);
+ ASSERT_TRUE(result.ok()) << "failed to subscribe: " << result.error().message();
+
+ result = getManager()->unsubscribe(getCallbackClient()->asBinder().get());
+ ASSERT_TRUE(result.ok()) << "failed to unsubscribe: " << result.error().message();
+
+ clearEvents();
+
+ std::this_thread::sleep_for(std::chrono::milliseconds(200));
+
+ // Theoretically trigger 10 times, but check for at least 9 times to be stable.
+ ASSERT_TRUE(getEvents().empty());
+}
+
+TEST_F(SubscriptionManagerTest, testUnsubscribeMultipleAreas) {
+ std::vector<SubscribeOptions> options = {
+ {
+ .propId = 0,
+ .areaIds = {0, 1, 2, 3, 4},
+ .sampleRate = 10.0,
+ },
+ {
+ .propId = 1,
+ .areaIds = {0},
+ .sampleRate = 10.0,
+ },
+ };
+
+ auto result = getManager()->subscribe(getCallbackClient(), options, true);
+ ASSERT_TRUE(result.ok()) << "failed to subscribe: " << result.error().message();
+
+ result = getManager()->unsubscribe(getCallbackClient()->asBinder().get(),
+ std::vector<int32_t>({0}));
+ ASSERT_TRUE(result.ok()) << "failed to unsubscribe: " << result.error().message();
+
+ clearEvents();
+
+ std::this_thread::sleep_for(std::chrono::seconds(1));
+
+ // Theoretically trigger 10 times, but check for at least 9 times to be stable.
+ EXPECT_GE(getEvents().size(), static_cast<size_t>(9));
+
+ for (const auto& event : getEvents()) {
+ EXPECT_EQ(event.prop, 1);
+ }
+}
+
+TEST_F(SubscriptionManagerTest, testUnsubscribeByCallback) {
+ std::vector<SubscribeOptions> options = {
+ {
+ .propId = 0,
+ .areaIds = {0, 1, 2, 3, 4},
+ .sampleRate = 10.0,
+ },
+ {
+ .propId = 1,
+ .areaIds = {0},
+ .sampleRate = 10.0,
+ },
+ };
+
+ auto result = getManager()->subscribe(getCallbackClient(), options, true);
+ ASSERT_TRUE(result.ok()) << "failed to subscribe: " << result.error().message();
+
+ result = getManager()->unsubscribe(getCallbackClient()->asBinder().get());
+ ASSERT_TRUE(result.ok()) << "failed to unsubscribe: " << result.error().message();
+
+ clearEvents();
+
+ std::this_thread::sleep_for(std::chrono::seconds(1));
+
+ EXPECT_TRUE(getEvents().empty());
+}
+
+TEST_F(SubscriptionManagerTest, testUnsubscribeFailure) {
+ std::vector<SubscribeOptions> options = {
+ {
+ .propId = 0,
+ .areaIds = {0, 1, 2, 3, 4},
+ },
+ {
+ .propId = 1,
+ .areaIds = {0},
+ },
+ };
+
+ auto result = getManager()->subscribe(getCallbackClient(), options, false);
+ ASSERT_TRUE(result.ok()) << "failed to subscribe: " << result.error().message();
+
+ // Property ID: 2 was not subscribed.
+ result = getManager()->unsubscribe(getCallbackClient()->asBinder().get(),
+ std::vector<int32_t>({0, 1, 2}));
+ ASSERT_FALSE(result.ok()) << "unsubscribe an unsubscribed property must fail";
+
+ // Since property 0 and property 1 was not unsubscribed successfully, we should be able to
+ // unsubscribe them again.
+ result = getManager()->unsubscribe(getCallbackClient()->asBinder().get(),
+ std::vector<int32_t>({0, 1}));
+ ASSERT_TRUE(result.ok()) << "a failed unsubscription must not unsubscribe any properties"
+ << result.error().message();
+}
+
+TEST_F(SubscriptionManagerTest, testSubscribeOnchange) {
+ std::vector<SubscribeOptions> options1 = {
+ {
+ .propId = 0,
+ .areaIds = {0, 1},
+ },
+ {
+ .propId = 1,
+ .areaIds = {0},
+ },
+ };
+ std::vector<SubscribeOptions> options2 = {
+ {
+ .propId = 0,
+ .areaIds = {0},
+ },
+ };
+
+ SpAIBinder binder1 = ::ndk::SharedRefBase::make<PropertyCallback>()->asBinder();
+ std::shared_ptr<IVehicleCallback> client1 = IVehicleCallback::fromBinder(binder1);
+ SpAIBinder binder2 = ::ndk::SharedRefBase::make<PropertyCallback>()->asBinder();
+ std::shared_ptr<IVehicleCallback> client2 = IVehicleCallback::fromBinder(binder2);
+ auto result = getManager()->subscribe(client1, options1, false);
+ ASSERT_TRUE(result.ok()) << "failed to subscribe: " << result.error().message();
+ result = getManager()->subscribe(client2, options2, false);
+ ASSERT_TRUE(result.ok()) << "failed to subscribe: " << result.error().message();
+
+ std::vector<VehiclePropValue> updatedValues = {
+ {
+ .prop = 0,
+ .areaId = 0,
+ },
+ {
+ .prop = 0,
+ .areaId = 1,
+ },
+ {
+ .prop = 1,
+ .areaId = 0,
+ },
+ {
+ .prop = 1,
+ .areaId = 1,
+ },
+ };
+ auto clients = getManager()->getSubscribedClients(updatedValues);
+
+ ASSERT_THAT(clients[client1],
+ WhenSorted(ElementsAre(&updatedValues[0], &updatedValues[1], &updatedValues[2])));
+ ASSERT_THAT(clients[client2], ElementsAre(&updatedValues[0]));
+}
+
+TEST_F(SubscriptionManagerTest, testSubscribeInvalidOption) {
+ std::vector<SubscribeOptions> options = {
+ {
+ .propId = 0,
+ .areaIds = {0, 1, 2, 3, 4},
+ // invalid sample rate.
+ .sampleRate = 0.0,
+ },
+ {
+ .propId = 1,
+ .areaIds = {0},
+ .sampleRate = 10.0,
+ },
+ };
+
+ auto result = getManager()->subscribe(getCallbackClient(), options, true);
+ ASSERT_FALSE(result.ok()) << "subscribe with invalid sample rate must fail";
+ ASSERT_TRUE(getManager()
+ ->getSubscribedClients({{
+ .prop = 0,
+ .areaId = 0,
+ },
+ {
+ .prop = 1,
+ .areaId = 0,
+ }})
+ .empty())
+ << "no property should be subscribed if error is returned";
+}
+
+TEST_F(SubscriptionManagerTest, testSubscribeNoAreaIds) {
+ std::vector<SubscribeOptions> options = {
+ {
+ .propId = 0,
+ .areaIds = {},
+ .sampleRate = 1.0,
+ },
+ {
+ .propId = 1,
+ .areaIds = {0},
+ .sampleRate = 10.0,
+ },
+ };
+
+ auto result = getManager()->subscribe(getCallbackClient(), options, true);
+ ASSERT_FALSE(result.ok()) << "subscribe with invalid sample rate must fail";
+ ASSERT_TRUE(getManager()
+ ->getSubscribedClients({{
+ .prop = 1,
+ .areaId = 0,
+ }})
+ .empty())
+ << "no property should be subscribed if error is returned";
+}
+
+TEST_F(SubscriptionManagerTest, testUnsubscribeOnchange) {
+ std::vector<SubscribeOptions> options = {
+ {
+ .propId = 0,
+ .areaIds = {0, 1},
+ },
+ {
+ .propId = 1,
+ .areaIds = {0},
+ },
+ };
+
+ auto result = getManager()->subscribe(getCallbackClient(), options, false);
+ ASSERT_TRUE(result.ok()) << "failed to subscribe: " << result.error().message();
+
+ result = getManager()->unsubscribe(getCallbackClient()->asBinder().get(),
+ std::vector<int32_t>({0}));
+ ASSERT_TRUE(result.ok()) << "failed to unsubscribe: " << result.error().message();
+
+ std::vector<VehiclePropValue> updatedValues = {
+ {
+ .prop = 0,
+ .areaId = 0,
+ },
+ {
+ .prop = 1,
+ .areaId = 0,
+ },
+ };
+ auto clients = getManager()->getSubscribedClients(updatedValues);
+
+ ASSERT_THAT(clients[getCallbackClient()], ElementsAre(&updatedValues[1]));
+}
+
+TEST_F(SubscriptionManagerTest, testCheckSampleRateValid) {
+ ASSERT_TRUE(SubscriptionManager::checkSampleRate(1.0));
+}
+
+TEST_F(SubscriptionManagerTest, testCheckSampleRateInvalidTooSmall) {
+ ASSERT_FALSE(SubscriptionManager::checkSampleRate(FLT_MIN));
+}
+
+TEST_F(SubscriptionManagerTest, testCheckSampleRateInvalidZero) {
+ ASSERT_FALSE(SubscriptionManager::checkSampleRate(0));
+}
+
+} // namespace vehicle
+} // namespace automotive
+} // namespace hardware
+} // namespace android
diff --git a/bluetooth/audio/aidl/aidl_api/android.hardware.bluetooth.audio/current/android/hardware/bluetooth/audio/CodecType.aidl b/bluetooth/audio/aidl/aidl_api/android.hardware.bluetooth.audio/current/android/hardware/bluetooth/audio/CodecType.aidl
index 44b434b..3a5f951 100644
--- a/bluetooth/audio/aidl/aidl_api/android.hardware.bluetooth.audio/current/android/hardware/bluetooth/audio/CodecType.aidl
+++ b/bluetooth/audio/aidl/aidl_api/android.hardware.bluetooth.audio/current/android/hardware/bluetooth/audio/CodecType.aidl
@@ -41,4 +41,5 @@
APTX_HD = 4,
LDAC = 5,
LC3 = 6,
+ VENDOR = 7,
}
diff --git a/bluetooth/audio/aidl/android/hardware/bluetooth/audio/CodecType.aidl b/bluetooth/audio/aidl/android/hardware/bluetooth/audio/CodecType.aidl
index 68c60f5..9c33081 100644
--- a/bluetooth/audio/aidl/android/hardware/bluetooth/audio/CodecType.aidl
+++ b/bluetooth/audio/aidl/android/hardware/bluetooth/audio/CodecType.aidl
@@ -26,4 +26,5 @@
APTX_HD,
LDAC,
LC3,
+ VENDOR,
}
diff --git a/bluetooth/audio/utils/aidl_session/BluetoothAudioCodecs.cpp b/bluetooth/audio/utils/aidl_session/BluetoothAudioCodecs.cpp
index 92cd0f5..380732f 100644
--- a/bluetooth/audio/utils/aidl_session/BluetoothAudioCodecs.cpp
+++ b/bluetooth/audio/utils/aidl_session/BluetoothAudioCodecs.cpp
@@ -355,6 +355,7 @@
kDefaultOffloadLc3Capability);
break;
case CodecType::UNKNOWN:
+ case CodecType::VENDOR:
codec_capability = {};
break;
}
@@ -420,6 +421,7 @@
}
break;
case CodecType::UNKNOWN:
+ case CodecType::VENDOR:
break;
}
return false;
diff --git a/camera/provider/2.4/vts/functional/VtsHalCameraProviderV2_4TargetTest.cpp b/camera/provider/2.4/vts/functional/VtsHalCameraProviderV2_4TargetTest.cpp
index dd45b0d..8c44010 100644
--- a/camera/provider/2.4/vts/functional/VtsHalCameraProviderV2_4TargetTest.cpp
+++ b/camera/provider/2.4/vts/functional/VtsHalCameraProviderV2_4TargetTest.cpp
@@ -969,7 +969,6 @@
void getPrivacyTestPatternModes(
const camera_metadata_t* staticMetadata,
std::unordered_set<int32_t>* privacyTestPatternModes/*out*/);
- static bool isColorCamera(const camera_metadata_t *metadata);
static V3_2::DataspaceFlags getDataspace(PixelFormat format);
@@ -6880,142 +6879,6 @@
}
}
-// Test the multi-camera API requirement for Google Requirement Freeze S
-// Note that this requirement can only be partially tested. If a vendor
-// device doesn't expose a physical camera in any shape or form, there is no way
-// the test can catch it.
-TEST_P(CameraHidlTest, grfSMultiCameraTest) {
- const int socGrfApi = property_get_int32("ro.board.first_api_level", /*default*/ -1);
- if (socGrfApi < 31 /*S*/) {
- // Non-GRF devices, or version < 31 Skip
- ALOGI("%s: socGrfApi level is %d. Skipping", __FUNCTION__, socGrfApi);
- return;
- }
-
- // Test that if more than one rear-facing color camera is
- // supported, there must be at least one rear-facing logical camera.
- hidl_vec<hidl_string> cameraDeviceNames = getCameraDeviceNames(mProvider);
- // Back facing non-logical color cameras
- std::set<std::string> rearColorCameras;
- // Back facing logical cameras' physical camera Id sets
- std::set<std::set<std::string>> rearPhysicalIds;
- for (const auto& name : cameraDeviceNames) {
- std::string cameraId;
- int deviceVersion = getCameraDeviceVersionAndId(name, mProviderType, &cameraId);
- switch (deviceVersion) {
- case CAMERA_DEVICE_API_VERSION_3_8:
- case CAMERA_DEVICE_API_VERSION_3_7:
- case CAMERA_DEVICE_API_VERSION_3_6:
- case CAMERA_DEVICE_API_VERSION_3_5:
- case CAMERA_DEVICE_API_VERSION_3_4:
- case CAMERA_DEVICE_API_VERSION_3_3:
- case CAMERA_DEVICE_API_VERSION_3_2: {
- ::android::sp<::android::hardware::camera::device::V3_2::ICameraDevice> device3_x;
- ALOGI("getCameraCharacteristics: Testing camera device %s", name.c_str());
- Return<void> ret;
- ret = mProvider->getCameraDeviceInterface_V3_x(
- name, [&](auto status, const auto& device) {
- ALOGI("getCameraDeviceInterface_V3_x returns status:%d", (int)status);
- ASSERT_EQ(Status::OK, status);
- ASSERT_NE(device, nullptr);
- device3_x = device;
- });
- ASSERT_TRUE(ret.isOk());
-
- ret = device3_x->getCameraCharacteristics([&](auto status, const auto& chars) {
- ASSERT_EQ(Status::OK, status);
- const camera_metadata_t* metadata = (camera_metadata_t*)chars.data();
-
- // Skip if this is not a color camera.
- if (!CameraHidlTest::isColorCamera(metadata)) {
- return;
- }
-
- // Check camera facing. Skip if facing is not BACK.
- // If this is not a logical camera, only note down
- // the camera ID, and skip.
- camera_metadata_ro_entry entry;
- int retcode = find_camera_metadata_ro_entry(
- metadata, ANDROID_LENS_FACING, &entry);
- ASSERT_EQ(retcode, 0);
- ASSERT_GT(entry.count, 0);
- uint8_t facing = entry.data.u8[0];
- bool isLogicalCamera = (isLogicalMultiCamera(metadata) == Status::OK);
- if (facing != ANDROID_LENS_FACING_BACK) {
- // Not BACK facing. Skip.
- return;
- }
- if (!isLogicalCamera) {
- rearColorCameras.insert(cameraId);
- return;
- }
-
- // Check logical camera's physical camera IDs for color
- // cameras.
- std::unordered_set<std::string> physicalCameraIds;
- Status s = getPhysicalCameraIds(metadata, &physicalCameraIds);
- ASSERT_EQ(Status::OK, s);
- rearPhysicalIds.emplace(physicalCameraIds.begin(), physicalCameraIds.end());
- for (const auto& physicalId : physicalCameraIds) {
- // Skip if the physicalId is publicly available
- for (auto& deviceName : cameraDeviceNames) {
- std::string publicVersion, publicId;
- ASSERT_TRUE(::matchDeviceName(deviceName, mProviderType,
- &publicVersion, &publicId));
- if (physicalId == publicId) {
- // Skip because public Ids will be iterated in outer loop.
- return;
- }
- }
-
- auto castResult = device::V3_5::ICameraDevice::castFrom(device3_x);
- ASSERT_TRUE(castResult.isOk());
- ::android::sp<::android::hardware::camera::device::V3_5::ICameraDevice>
- device3_5 = castResult;
- ASSERT_NE(device3_5, nullptr);
-
- // Check camera characteristics for hidden camera id
- Return<void> ret = device3_5->getPhysicalCameraCharacteristics(
- physicalId, [&](auto status, const auto& chars) {
- ASSERT_EQ(Status::OK, status);
- const camera_metadata_t* physicalMetadata =
- (camera_metadata_t*)chars.data();
-
- if (CameraHidlTest::isColorCamera(physicalMetadata)) {
- rearColorCameras.insert(physicalId);
- }
- });
- ASSERT_TRUE(ret.isOk());
- }
- });
- ASSERT_TRUE(ret.isOk());
- } break;
- case CAMERA_DEVICE_API_VERSION_1_0: {
- // Not applicable
- } break;
- default: {
- ALOGE("%s: Unsupported device version %d", __func__, deviceVersion);
- ADD_FAILURE();
- } break;
- }
- }
-
- // If there are more than one rear-facing color camera, a logical
- // multi-camera must be defined consisting of all rear-facing color
- // cameras.
- if (rearColorCameras.size() > 1) {
- bool hasRearLogical = false;
- for (const auto& physicalIds : rearPhysicalIds) {
- if (std::includes(physicalIds.begin(), physicalIds.end(),
- rearColorCameras.begin(), rearColorCameras.end())) {
- hasRearLogical = true;
- break;
- }
- }
- ASSERT_TRUE(hasRearLogical);
- }
-}
-
// Retrieve all valid output stream resolutions from the camera
// static characteristics.
Status CameraHidlTest::getAvailableOutputStreams(const camera_metadata_t* staticMeta,
@@ -7523,23 +7386,6 @@
return ret;
}
-bool CameraHidlTest::isColorCamera(const camera_metadata_t *metadata) {
- camera_metadata_ro_entry entry;
- int retcode = find_camera_metadata_ro_entry(
- metadata, ANDROID_REQUEST_AVAILABLE_CAPABILITIES, &entry);
- if ((0 == retcode) && (entry.count > 0)) {
- bool isBackwardCompatible = (std::find(entry.data.u8, entry.data.u8 + entry.count,
- ANDROID_REQUEST_AVAILABLE_CAPABILITIES_BACKWARD_COMPATIBLE) !=
- entry.data.u8 + entry.count);
- bool isMonochrome = (std::find(entry.data.u8, entry.data.u8 + entry.count,
- ANDROID_REQUEST_AVAILABLE_CAPABILITIES_MONOCHROME) !=
- entry.data.u8 + entry.count);
- bool isColor = isBackwardCompatible && !isMonochrome;
- return isColor;
- }
- return false;
-}
-
// Retrieve the reprocess input-output format map from the static
// camera characteristics.
Status CameraHidlTest::getZSLInputOutputMap(camera_metadata_t *staticMeta,
diff --git a/camera/provider/2.7/default/Android.bp b/camera/provider/2.7/default/Android.bp
new file mode 100644
index 0000000..bd5da2d
--- /dev/null
+++ b/camera/provider/2.7/default/Android.bp
@@ -0,0 +1,111 @@
+package {
+ // See: http://go/android-license-faq
+ // A large-scale-change added 'default_applicable_licenses' to import
+ // all of the 'license_kinds' from "hardware_interfaces_license"
+ // to get the below license kinds:
+ // SPDX-license-identifier-Apache-2.0
+ default_applicable_licenses: ["hardware_interfaces_license"],
+}
+
+cc_library_shared {
+ name: "android.hardware.camera.provider@2.7-external",
+ proprietary: true,
+ srcs: ["ExternalCameraProviderImpl_2_7.cpp"],
+ shared_libs: [
+ "android.hardware.camera.common@1.0",
+ "android.hardware.camera.device@1.0",
+ "android.hardware.camera.device@3.2",
+ "android.hardware.camera.device@3.3",
+ "android.hardware.camera.device@3.4",
+ "android.hardware.camera.device@3.5",
+ "android.hardware.camera.device@3.6",
+ "android.hardware.camera.provider@2.4",
+ "android.hardware.camera.provider@2.5",
+ "android.hardware.camera.provider@2.6",
+ "android.hardware.camera.provider@2.7",
+ "android.hardware.graphics.mapper@2.0",
+ "android.hardware.graphics.mapper@3.0",
+ "android.hardware.graphics.mapper@4.0",
+ "android.hidl.allocator@1.0",
+ "android.hidl.memory@1.0",
+ "camera.device@3.3-impl",
+ "camera.device@3.4-external-impl",
+ "camera.device@3.4-impl",
+ "camera.device@3.5-external-impl",
+ "camera.device@3.5-impl",
+ "camera.device@3.6-external-impl",
+ "libcamera_metadata",
+ "libcutils",
+ "libhardware",
+ "libhidlbase",
+ "liblog",
+ "libtinyxml2",
+ "libutils",
+ ],
+ static_libs: [
+ "android.hardware.camera.common@1.0-helper",
+ ],
+ header_libs: [
+ "camera.device@3.4-external-impl_headers",
+ "camera.device@3.5-external-impl_headers",
+ "camera.device@3.6-external-impl_headers",
+ ],
+ export_include_dirs: ["."],
+}
+
+cc_defaults {
+ name: "camera_external_service_2_7_defaults",
+ defaults: ["hidl_defaults"],
+ proprietary: true,
+ relative_install_path: "hw",
+ srcs: ["external-service.cpp"],
+ compile_multilib: "32",
+ shared_libs: [
+ "android.hardware.camera.common@1.0",
+ "android.hardware.camera.device@1.0",
+ "android.hardware.camera.device@3.2",
+ "android.hardware.camera.device@3.3",
+ "android.hardware.camera.device@3.4",
+ "android.hardware.camera.device@3.5",
+ "android.hardware.camera.provider@2.4",
+ "android.hardware.camera.provider@2.4-external",
+ "android.hardware.camera.provider@2.5",
+ "android.hardware.camera.provider@2.5-external",
+ "android.hardware.camera.provider@2.6",
+ "android.hardware.camera.provider@2.7",
+ "android.hardware.camera.provider@2.7-external",
+ "android.hardware.graphics.mapper@2.0",
+ "android.hardware.graphics.mapper@3.0",
+ "android.hardware.graphics.mapper@4.0",
+ "libbinder",
+ "libcamera_metadata",
+ "libhidlbase",
+ "liblog",
+ "libtinyxml2",
+ "libutils",
+ ],
+ static_libs: [
+ "android.hardware.camera.common@1.0-helper",
+ ],
+ header_libs: [
+ "camera.device@3.4-external-impl_headers",
+ "camera.device@3.4-impl_headers",
+ "camera.device@3.5-external-impl_headers",
+ "camera.device@3.5-impl_headers",
+ "camera.device@3.6-external-impl_headers",
+ ],
+}
+
+cc_binary {
+ name: "android.hardware.camera.provider@2.7-external-service",
+ defaults: ["camera_external_service_2_7_defaults"],
+ init_rc: ["android.hardware.camera.provider@2.7-external-service.rc"],
+}
+
+cc_binary {
+ name: "android.hardware.camera.provider@2.7-external-service-lazy",
+ overrides: ["android.hardware.camera.provider@2.7-external-service"],
+ defaults: ["camera_external_service_2_7_defaults"],
+ init_rc: ["android.hardware.camera.provider@2.7-external-service-lazy.rc"],
+ cflags: ["-DLAZY_SERVICE"],
+}
diff --git a/camera/provider/2.7/default/CameraProvider_2_7.h b/camera/provider/2.7/default/CameraProvider_2_7.h
new file mode 100644
index 0000000..c34905f
--- /dev/null
+++ b/camera/provider/2.7/default/CameraProvider_2_7.h
@@ -0,0 +1,122 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_CAMERA_PROVIDER_V2_7_CAMERAPROVIDER_H
+#define ANDROID_HARDWARE_CAMERA_PROVIDER_V2_7_CAMERAPROVIDER_H
+
+#include <android/hardware/camera/provider/2.6/ICameraProviderCallback.h>
+#include <android/hardware/camera/provider/2.7/ICameraProvider.h>
+#include <hidl/MQDescriptor.h>
+#include <hidl/Status.h>
+
+namespace android {
+namespace hardware {
+namespace camera {
+namespace provider {
+namespace V2_7 {
+namespace implementation {
+
+using ::android::sp;
+using ::android::hardware::hidl_string;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+using ::android::hardware::camera::common::V1_0::CameraDeviceStatus;
+using ::android::hardware::camera::common::V1_0::Status;
+using ::android::hardware::camera::common::V1_0::TorchModeStatus;
+using ::android::hardware::camera::common::V1_0::VendorTag;
+using ::android::hardware::camera::common::V1_0::VendorTagSection;
+using ::android::hardware::camera::provider::V2_4::ICameraProviderCallback;
+using ::android::hardware::camera::provider::V2_5::DeviceState;
+using ::android::hardware::camera::provider::V2_7::CameraIdAndStreamCombination;
+using ::android::hardware::camera::provider::V2_7::ICameraProvider;
+using ::android::hidl::base::V1_0::IBase;
+
+// Default recommended RPC thread count for camera provider implementations
+const int HWBINDER_THREAD_COUNT = 6;
+
+template <typename IMPL>
+struct CameraProvider : public ICameraProvider {
+ CameraProvider() : impl() {}
+ ~CameraProvider() {}
+
+ // Caller must use this method to check if CameraProvider ctor failed
+ bool isInitFailed() { return impl.isInitFailed(); }
+
+ // Methods from ::android::hardware::camera::provider::V2_4::ICameraProvider follow.
+ Return<Status> setCallback(const sp<ICameraProviderCallback>& callback) override {
+ return impl.setCallback(callback);
+ }
+
+ Return<void> getVendorTags(getVendorTags_cb _hidl_cb) override {
+ return impl.getVendorTags(_hidl_cb);
+ }
+
+ Return<void> getCameraIdList(getCameraIdList_cb _hidl_cb) override {
+ return impl.getCameraIdList(_hidl_cb);
+ }
+
+ Return<void> isSetTorchModeSupported(isSetTorchModeSupported_cb _hidl_cb) override {
+ return impl.isSetTorchModeSupported(_hidl_cb);
+ }
+
+ Return<void> getCameraDeviceInterface_V1_x(const hidl_string& cameraDeviceName,
+ getCameraDeviceInterface_V1_x_cb _hidl_cb) override {
+ return impl.getCameraDeviceInterface_V1_x(cameraDeviceName, _hidl_cb);
+ }
+
+ Return<void> getCameraDeviceInterface_V3_x(const hidl_string& cameraDeviceName,
+ getCameraDeviceInterface_V3_x_cb _hidl_cb) override {
+ return impl.getCameraDeviceInterface_V3_x(cameraDeviceName, _hidl_cb);
+ }
+
+ // Methods from ::android::hardware::camera::provider::V2_5::ICameraProvider follow.
+ Return<void> notifyDeviceStateChange(hardware::hidl_bitfield<DeviceState> newState) override {
+ return impl.notifyDeviceStateChange(newState);
+ }
+
+ // Methods from ::android::hardware::camera::provider::V2_7::ICameraProvider follow.
+ Return<void> getConcurrentStreamingCameraIds(
+ getConcurrentStreamingCameraIds_cb _hidl_cb) override {
+ return impl.getConcurrentStreamingCameraIds(_hidl_cb);
+ }
+
+ Return<void> isConcurrentStreamCombinationSupported(
+ const hidl_vec<
+ ::android::hardware::camera::provider::V2_6::CameraIdAndStreamCombination>&
+ configs,
+ isConcurrentStreamCombinationSupported_cb _hidl_cb) override {
+ return impl.isConcurrentStreamCombinationSupported(configs, _hidl_cb);
+ }
+
+ Return<void> isConcurrentStreamCombinationSupported_2_7(
+ const hidl_vec<CameraIdAndStreamCombination>& configs,
+ isConcurrentStreamCombinationSupported_2_7_cb _hidl_cb) override {
+ return impl.isConcurrentStreamCombinationSupported_2_7(configs, _hidl_cb);
+ }
+
+ private:
+ IMPL impl;
+};
+
+} // namespace implementation
+} // namespace V2_7
+} // namespace provider
+} // namespace camera
+} // namespace hardware
+} // namespace android
+
+#endif // ANDROID_HARDWARE_CAMERA_PROVIDER_V2_4_CAMERAPROVIDER_H
\ No newline at end of file
diff --git a/camera/provider/2.7/default/ExternalCameraProviderImpl_2_7.cpp b/camera/provider/2.7/default/ExternalCameraProviderImpl_2_7.cpp
new file mode 100644
index 0000000..c812d54
--- /dev/null
+++ b/camera/provider/2.7/default/ExternalCameraProviderImpl_2_7.cpp
@@ -0,0 +1,394 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "CamPrvdr@2.7-external"
+//#define LOG_NDEBUG 0
+#include <log/log.h>
+
+#include <cutils/properties.h>
+#include <errno.h>
+#include <linux/videodev2.h>
+#include <sys/inotify.h>
+#include <regex>
+#include "ExternalCameraDevice_3_4.h"
+#include "ExternalCameraDevice_3_5.h"
+#include "ExternalCameraDevice_3_6.h"
+#include "ExternalCameraProviderImpl_2_7.h"
+
+namespace android {
+namespace hardware {
+namespace camera {
+namespace provider {
+namespace V2_7 {
+namespace implementation {
+
+namespace {
+// "device@<version>/external/<id>"
+const std::regex kDeviceNameRE("device@([0-9]+\\.[0-9]+)/external/(.+)");
+const int kMaxDevicePathLen = 256;
+const char* kDevicePath = "/dev/";
+constexpr char kPrefix[] = "video";
+constexpr int kPrefixLen = sizeof(kPrefix) - 1;
+constexpr int kDevicePrefixLen = sizeof(kDevicePath) + kPrefixLen + 1;
+
+bool matchDeviceName(int cameraIdOffset, const hidl_string& deviceName, std::string* deviceVersion,
+ std::string* cameraDevicePath) {
+ std::string deviceNameStd(deviceName.c_str());
+ std::smatch sm;
+ if (std::regex_match(deviceNameStd, sm, kDeviceNameRE)) {
+ if (deviceVersion != nullptr) {
+ *deviceVersion = sm[1];
+ }
+ if (cameraDevicePath != nullptr) {
+ *cameraDevicePath = "/dev/video" + std::to_string(std::stoi(sm[2]) - cameraIdOffset);
+ }
+ return true;
+ }
+ return false;
+}
+
+} // anonymous namespace
+
+ExternalCameraProviderImpl_2_7::ExternalCameraProviderImpl_2_7()
+ : mCfg(ExternalCameraConfig::loadFromCfg()) {
+ mHotPlugThread = sp<HotplugThread>::make(this);
+ mHotPlugThread->run("ExtCamHotPlug", PRIORITY_BACKGROUND);
+
+ mPreferredHal3MinorVersion =
+ property_get_int32("ro.vendor.camera.external.hal3TrebleMinorVersion", 4);
+ ALOGV("Preferred HAL 3 minor version is %d", mPreferredHal3MinorVersion);
+ switch (mPreferredHal3MinorVersion) {
+ case 4:
+ case 5:
+ case 6:
+ // OK
+ break;
+ default:
+ ALOGW("Unknown minor camera device HAL version %d in property "
+ "'camera.external.hal3TrebleMinorVersion', defaulting to 4",
+ mPreferredHal3MinorVersion);
+ mPreferredHal3MinorVersion = 4;
+ }
+}
+
+ExternalCameraProviderImpl_2_7::~ExternalCameraProviderImpl_2_7() {
+ mHotPlugThread->requestExit();
+}
+
+Return<Status> ExternalCameraProviderImpl_2_7::setCallback(
+ const sp<ICameraProviderCallback>& callback) {
+ Mutex::Autolock _l(mLock);
+ mCallbacks = callback;
+ if (mCallbacks == nullptr) {
+ return Status::OK;
+ }
+ // Send a callback for all devices to initialize
+ {
+ for (const auto& pair : mCameraStatusMap) {
+ mCallbacks->cameraDeviceStatusChange(pair.first, pair.second);
+ }
+ }
+
+ return Status::OK;
+}
+
+Return<void> ExternalCameraProviderImpl_2_7::getVendorTags(
+ ICameraProvider::getVendorTags_cb _hidl_cb) {
+ // No vendor tag support for USB camera
+ hidl_vec<VendorTagSection> zeroSections;
+ _hidl_cb(Status::OK, zeroSections);
+ return Void();
+}
+
+Return<void> ExternalCameraProviderImpl_2_7::getCameraIdList(
+ ICameraProvider::getCameraIdList_cb _hidl_cb) {
+ // External camera HAL always report 0 camera, and extra cameras
+ // are just reported via cameraDeviceStatusChange callbacks
+ hidl_vec<hidl_string> hidlDeviceNameList;
+ _hidl_cb(Status::OK, hidlDeviceNameList);
+ return Void();
+}
+
+void ExternalCameraProviderImpl_2_7::updateAttachedCameras() {
+ ALOGV("%s start scaning for existing V4L2 devices", __FUNCTION__);
+ // Find existing /dev/video* devices
+ DIR* devdir = opendir(kDevicePath);
+ if (devdir == 0) {
+ ALOGE("%s: cannot open %s! Exiting threadloop", __FUNCTION__, kDevicePath);
+ return;
+ }
+
+ struct dirent* de;
+ while ((de = readdir(devdir)) != 0) {
+ // Find external v4l devices that's existing before we start watching and add them
+ if (!strncmp(kPrefix, de->d_name, kPrefixLen)) {
+ // TODO: This might reject some valid devices. Ex: internal is 33 and a device named 3
+ // is added.
+ std::string deviceId(de->d_name + kPrefixLen);
+ if (mCfg.mInternalDevices.count(deviceId) == 0) {
+ ALOGV("Non-internal v4l device %s found", de->d_name);
+ char v4l2DevicePath[kMaxDevicePathLen];
+ snprintf(v4l2DevicePath, kMaxDevicePathLen, "%s%s", kDevicePath, de->d_name);
+ deviceAdded(v4l2DevicePath);
+ }
+ }
+ }
+ closedir(devdir);
+}
+
+Return<void> ExternalCameraProviderImpl_2_7::isSetTorchModeSupported(
+ ICameraProvider::isSetTorchModeSupported_cb _hidl_cb) {
+ // setTorchMode API is supported, though right now no external camera device
+ // has a flash unit.
+ _hidl_cb(Status::OK, true);
+ return Void();
+}
+
+Return<void> ExternalCameraProviderImpl_2_7::getCameraDeviceInterface_V1_x(
+ const hidl_string&, ICameraProvider::getCameraDeviceInterface_V1_x_cb _hidl_cb) {
+ // External Camera HAL does not support HAL1
+ _hidl_cb(Status::OPERATION_NOT_SUPPORTED, nullptr);
+ return Void();
+}
+
+Return<void> ExternalCameraProviderImpl_2_7::getCameraDeviceInterface_V3_x(
+ const hidl_string& cameraDeviceName,
+ ICameraProvider::getCameraDeviceInterface_V3_x_cb _hidl_cb) {
+ std::string cameraDevicePath, deviceVersion;
+ bool match = matchDeviceName(mCfg.cameraIdOffset, cameraDeviceName, &deviceVersion,
+ &cameraDevicePath);
+ if (!match) {
+ _hidl_cb(Status::ILLEGAL_ARGUMENT, nullptr);
+ return Void();
+ }
+
+ if (mCameraStatusMap.count(cameraDeviceName) == 0 ||
+ mCameraStatusMap[cameraDeviceName] != CameraDeviceStatus::PRESENT) {
+ _hidl_cb(Status::ILLEGAL_ARGUMENT, nullptr);
+ return Void();
+ }
+
+ sp<device::V3_4::implementation::ExternalCameraDevice> deviceImpl;
+ switch (mPreferredHal3MinorVersion) {
+ case 4: {
+ ALOGV("Constructing v3.4 external camera device");
+ deviceImpl =
+ new device::V3_4::implementation::ExternalCameraDevice(cameraDevicePath, mCfg);
+ break;
+ }
+ case 5: {
+ ALOGV("Constructing v3.5 external camera device");
+ deviceImpl =
+ new device::V3_5::implementation::ExternalCameraDevice(cameraDevicePath, mCfg);
+ break;
+ }
+ case 6: {
+ ALOGV("Constructing v3.6 external camera device");
+ deviceImpl =
+ new device::V3_6::implementation::ExternalCameraDevice(cameraDevicePath, mCfg);
+ break;
+ }
+ default:
+ ALOGE("%s: Unknown HAL minor version %d!", __FUNCTION__, mPreferredHal3MinorVersion);
+ _hidl_cb(Status::INTERNAL_ERROR, nullptr);
+ return Void();
+ }
+
+ if (deviceImpl == nullptr || deviceImpl->isInitFailed()) {
+ ALOGE("%s: camera device %s init failed!", __FUNCTION__, cameraDevicePath.c_str());
+ _hidl_cb(Status::INTERNAL_ERROR, nullptr);
+ return Void();
+ }
+
+ IF_ALOGV() {
+ deviceImpl->getInterface()->interfaceChain(
+ [](::android::hardware::hidl_vec<::android::hardware::hidl_string> interfaceChain) {
+ ALOGV("Device interface chain:");
+ for (auto iface : interfaceChain) {
+ ALOGV(" %s", iface.c_str());
+ }
+ });
+ }
+
+ _hidl_cb(Status::OK, deviceImpl->getInterface());
+
+ return Void();
+}
+
+void ExternalCameraProviderImpl_2_7::addExternalCamera(const char* devName) {
+ ALOGV("ExtCam: adding %s to External Camera HAL!", devName);
+ Mutex::Autolock _l(mLock);
+ std::string deviceName;
+ std::string cameraId =
+ std::to_string(mCfg.cameraIdOffset + std::atoi(devName + kDevicePrefixLen));
+ if (mPreferredHal3MinorVersion == 6) {
+ deviceName = std::string("device@3.6/external/") + cameraId;
+ } else if (mPreferredHal3MinorVersion == 5) {
+ deviceName = std::string("device@3.5/external/") + cameraId;
+ } else {
+ deviceName = std::string("device@3.4/external/") + cameraId;
+ }
+ mCameraStatusMap[deviceName] = CameraDeviceStatus::PRESENT;
+ if (mCallbacks != nullptr) {
+ mCallbacks->cameraDeviceStatusChange(deviceName, CameraDeviceStatus::PRESENT);
+ }
+}
+
+void ExternalCameraProviderImpl_2_7::deviceAdded(const char* devName) {
+ {
+ base::unique_fd fd(::open(devName, O_RDWR));
+ if (fd.get() < 0) {
+ ALOGE("%s open v4l2 device %s failed:%s", __FUNCTION__, devName, strerror(errno));
+ return;
+ }
+
+ struct v4l2_capability capability;
+ int ret = ioctl(fd.get(), VIDIOC_QUERYCAP, &capability);
+ if (ret < 0) {
+ ALOGE("%s v4l2 QUERYCAP %s failed", __FUNCTION__, devName);
+ return;
+ }
+
+ if (!(capability.device_caps & V4L2_CAP_VIDEO_CAPTURE)) {
+ ALOGW("%s device %s does not support VIDEO_CAPTURE", __FUNCTION__, devName);
+ return;
+ }
+ }
+ // See if we can initialize ExternalCameraDevice correctly
+ sp<device::V3_4::implementation::ExternalCameraDevice> deviceImpl =
+ new device::V3_4::implementation::ExternalCameraDevice(devName, mCfg);
+ if (deviceImpl == nullptr || deviceImpl->isInitFailed()) {
+ ALOGW("%s: Attempt to init camera device %s failed!", __FUNCTION__, devName);
+ return;
+ }
+ deviceImpl.clear();
+
+ addExternalCamera(devName);
+ return;
+}
+
+void ExternalCameraProviderImpl_2_7::deviceRemoved(const char* devName) {
+ Mutex::Autolock _l(mLock);
+ std::string deviceName;
+ std::string cameraId =
+ std::to_string(mCfg.cameraIdOffset + std::atoi(devName + kDevicePrefixLen));
+ if (mPreferredHal3MinorVersion == 6) {
+ deviceName = std::string("device@3.6/external/") + cameraId;
+ } else if (mPreferredHal3MinorVersion == 5) {
+ deviceName = std::string("device@3.5/external/") + cameraId;
+ } else {
+ deviceName = std::string("device@3.4/external/") + cameraId;
+ }
+ if (mCameraStatusMap.find(deviceName) != mCameraStatusMap.end()) {
+ mCameraStatusMap.erase(deviceName);
+ if (mCallbacks != nullptr) {
+ mCallbacks->cameraDeviceStatusChange(deviceName, CameraDeviceStatus::NOT_PRESENT);
+ }
+ } else {
+ ALOGE("%s: cannot find camera device %s", __FUNCTION__, devName);
+ }
+}
+
+ExternalCameraProviderImpl_2_7::HotplugThread::HotplugThread(ExternalCameraProviderImpl_2_7* parent)
+ : Thread(/*canCallJava*/ false),
+ mParent(parent),
+ mInternalDevices(parent->mCfg.mInternalDevices) {}
+
+ExternalCameraProviderImpl_2_7::HotplugThread::~HotplugThread() {}
+
+bool ExternalCameraProviderImpl_2_7::HotplugThread::threadLoop() {
+ // Update existing cameras
+ mParent->updateAttachedCameras();
+
+ // Watch new video devices
+ mINotifyFD = inotify_init();
+ if (mINotifyFD < 0) {
+ ALOGE("%s: inotify init failed! Exiting threadloop", __FUNCTION__);
+ return true;
+ }
+
+ mWd = inotify_add_watch(mINotifyFD, kDevicePath, IN_CREATE | IN_DELETE);
+ if (mWd < 0) {
+ ALOGE("%s: inotify add watch failed! Exiting threadloop", __FUNCTION__);
+ return true;
+ }
+
+ bool done = false;
+ char eventBuf[512];
+ while (!done) {
+ int offset = 0;
+ int ret = read(mINotifyFD, eventBuf, sizeof(eventBuf));
+ if (ret >= (int)sizeof(struct inotify_event)) {
+ while (offset < ret) {
+ struct inotify_event* event = (struct inotify_event*)&eventBuf[offset];
+ if (event->wd == mWd) {
+ ALOGV("%s inotify_event %s", __FUNCTION__, event->name);
+ if (!strncmp(kPrefix, event->name, kPrefixLen)) {
+ std::string deviceId(event->name + kPrefixLen);
+ if (mInternalDevices.count(deviceId) == 0) {
+ char v4l2DevicePath[kMaxDevicePathLen];
+ snprintf(v4l2DevicePath, kMaxDevicePathLen, "%s%s", kDevicePath,
+ event->name);
+ if (event->mask & IN_CREATE) {
+ mParent->deviceAdded(v4l2DevicePath);
+ }
+ if (event->mask & IN_DELETE) {
+ mParent->deviceRemoved(v4l2DevicePath);
+ }
+ }
+ }
+ }
+ offset += sizeof(struct inotify_event) + event->len;
+ }
+ }
+ }
+
+ return true;
+}
+
+Return<void> ExternalCameraProviderImpl_2_7::notifyDeviceStateChange(
+ hidl_bitfield<DeviceState> /*newState*/) {
+ return Void();
+}
+
+Return<void> ExternalCameraProviderImpl_2_7::getConcurrentStreamingCameraIds(
+ ICameraProvider::getConcurrentStreamingCameraIds_cb _hidl_cb) {
+ hidl_vec<hidl_vec<hidl_string>> hidl_camera_id_combinations;
+ _hidl_cb(Status::OK, hidl_camera_id_combinations);
+ return Void();
+}
+
+Return<void> ExternalCameraProviderImpl_2_7::isConcurrentStreamCombinationSupported(
+ const hidl_vec<::android::hardware::camera::provider::V2_6::
+ CameraIdAndStreamCombination>& /* configs */,
+ ICameraProvider::isConcurrentStreamCombinationSupported_cb _hidl_cb) {
+ _hidl_cb(Status::OK, false);
+ return Void();
+}
+
+Return<void> ExternalCameraProviderImpl_2_7::isConcurrentStreamCombinationSupported_2_7(
+ const hidl_vec<CameraIdAndStreamCombination>& /* configs */,
+ ICameraProvider::isConcurrentStreamCombinationSupported_2_7_cb _hidl_cb) {
+ _hidl_cb(Status::OK, false);
+ return Void();
+}
+
+} // namespace implementation
+} // namespace V2_7
+} // namespace provider
+} // namespace camera
+} // namespace hardware
+} // namespace android
\ No newline at end of file
diff --git a/camera/provider/2.7/default/ExternalCameraProviderImpl_2_7.h b/camera/provider/2.7/default/ExternalCameraProviderImpl_2_7.h
new file mode 100644
index 0000000..da9f6b3
--- /dev/null
+++ b/camera/provider/2.7/default/ExternalCameraProviderImpl_2_7.h
@@ -0,0 +1,134 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_CAMERA_PROVIDER_V2_7_EXTCAMERAPROVIDER_H
+#define ANDROID_HARDWARE_CAMERA_PROVIDER_V2_7_EXTCAMERAPROVIDER_H
+
+#include <hidl/MQDescriptor.h>
+#include <hidl/Status.h>
+#include <utils/Mutex.h>
+#include <utils/Thread.h>
+#include <string>
+#include <unordered_map>
+#include <unordered_set>
+#include "ExternalCameraUtils.h"
+
+#include <android/hardware/camera/provider/2.6/ICameraProviderCallback.h>
+#include <android/hardware/camera/provider/2.7/ICameraProvider.h>
+
+namespace android {
+namespace hardware {
+namespace camera {
+namespace provider {
+namespace V2_7 {
+namespace implementation {
+
+using ::android::hardware::hidl_string;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+using ::android::hardware::camera::common::V1_0::CameraDeviceStatus;
+using ::android::hardware::camera::common::V1_0::Status;
+using ::android::hardware::camera::common::V1_0::VendorTagSection;
+using ::android::hardware::camera::external::common::ExternalCameraConfig;
+using ::android::hardware::camera::provider::V2_4::ICameraProviderCallback;
+using ::android::hardware::camera::provider::V2_5::DeviceState;
+using ::android::hardware::camera::provider::V2_7::CameraIdAndStreamCombination;
+using ::android::hardware::camera::provider::V2_7::ICameraProvider;
+using ::android::hidl::base::V1_0::IBase;
+
+/**
+ * The implementation of external webcam CameraProvider 2.7, separated
+ * from the HIDL interface layer to allow for implementation reuse by later
+ * provider versions.
+ *
+ * This camera provider supports standard UVC webcameras via the Linux V4L2
+ * UVC driver.
+ */
+struct ExternalCameraProviderImpl_2_7 {
+ ExternalCameraProviderImpl_2_7();
+ ~ExternalCameraProviderImpl_2_7();
+
+ // Caller must use this method to check if CameraProvider ctor failed
+ bool isInitFailed() { return false; }
+
+ // Methods from ::android::hardware::camera::provider::V2_4::ICameraProvider follow.
+ Return<Status> setCallback(const sp<ICameraProviderCallback>& callback);
+ Return<void> getVendorTags(ICameraProvider::getVendorTags_cb _hidl_cb);
+ Return<void> getCameraIdList(ICameraProvider::getCameraIdList_cb _hidl_cb);
+ Return<void> isSetTorchModeSupported(ICameraProvider::isSetTorchModeSupported_cb _hidl_cb);
+ Return<void> getCameraDeviceInterface_V1_x(const hidl_string&,
+ ICameraProvider::getCameraDeviceInterface_V1_x_cb);
+ Return<void> getCameraDeviceInterface_V3_x(const hidl_string&,
+ ICameraProvider::getCameraDeviceInterface_V3_x_cb);
+
+ // Methods from ::android::hardware::camera::provider::V2_5::ICameraProvider follow.
+ Return<void> notifyDeviceStateChange(hidl_bitfield<DeviceState> newState);
+
+ // Methods from ::android::hardware::camera::provider::V2_7::ICameraProvider follow.
+ Return<void> getConcurrentStreamingCameraIds(
+ ICameraProvider::getConcurrentStreamingCameraIds_cb _hidl_cb);
+
+ Return<void> isConcurrentStreamCombinationSupported(
+ const hidl_vec<
+ ::android::hardware::camera::provider::V2_6::CameraIdAndStreamCombination>&
+ configs,
+ ICameraProvider::isConcurrentStreamCombinationSupported_cb _hidl_cb);
+
+ Return<void> isConcurrentStreamCombinationSupported_2_7(
+ const hidl_vec<CameraIdAndStreamCombination>& configs,
+ ICameraProvider::isConcurrentStreamCombinationSupported_2_7_cb _hidl_cb);
+
+ private:
+ void addExternalCamera(const char* devName);
+
+ void deviceAdded(const char* devName);
+
+ void deviceRemoved(const char* devName);
+
+ void updateAttachedCameras();
+
+ class HotplugThread : public android::Thread {
+ public:
+ HotplugThread(ExternalCameraProviderImpl_2_7* parent);
+ ~HotplugThread();
+
+ virtual bool threadLoop() override;
+
+ private:
+ ExternalCameraProviderImpl_2_7* mParent = nullptr;
+ const std::unordered_set<std::string> mInternalDevices;
+
+ int mINotifyFD = -1;
+ int mWd = -1;
+ };
+
+ Mutex mLock;
+ sp<ICameraProviderCallback> mCallbacks = nullptr;
+ std::unordered_map<std::string, CameraDeviceStatus> mCameraStatusMap; // camera id -> status
+ const ExternalCameraConfig mCfg;
+ sp<HotplugThread> mHotPlugThread;
+ int mPreferredHal3MinorVersion;
+};
+
+} // namespace implementation
+} // namespace V2_7
+} // namespace provider
+} // namespace camera
+} // namespace hardware
+} // namespace android
+
+#endif // ANDROID_HARDWARE_CAMERA_PROVIDER_V2_7_EXTCAMERAPROVIDER_H
diff --git a/camera/provider/2.7/default/android.hardware.camera.provider@2.7-external-service-lazy.rc b/camera/provider/2.7/default/android.hardware.camera.provider@2.7-external-service-lazy.rc
new file mode 100644
index 0000000..9292c4f
--- /dev/null
+++ b/camera/provider/2.7/default/android.hardware.camera.provider@2.7-external-service-lazy.rc
@@ -0,0 +1,13 @@
+service vendor.camera-provider-2-7-ext /vendor/bin/hw/android.hardware.camera.provider@2.7-external-service-lazy
+ interface android.hardware.camera.provider@2.4::ICameraProvider external/0
+ interface android.hardware.camera.provider@2.5::ICameraProvider external/0
+ interface android.hardware.camera.provider@2.6::ICameraProvider external/0
+ interface android.hardware.camera.provider@2.7::ICameraProvider external/0
+ class hal
+ oneshot
+ disabled
+ user cameraserver
+ group audio camera input drmrpc usb
+ ioprio rt 4
+ capabilities SYS_NICE
+ task_profiles CameraServiceCapacity MaxPerformance
\ No newline at end of file
diff --git a/camera/provider/2.7/default/android.hardware.camera.provider@2.7-external-service.rc b/camera/provider/2.7/default/android.hardware.camera.provider@2.7-external-service.rc
new file mode 100644
index 0000000..2c9b782
--- /dev/null
+++ b/camera/provider/2.7/default/android.hardware.camera.provider@2.7-external-service.rc
@@ -0,0 +1,11 @@
+service vendor.camera-provider-2-7-ext /vendor/bin/hw/android.hardware.camera.provider@2.7-external-service
+ interface android.hardware.camera.provider@2.4::ICameraProvider external/0
+ interface android.hardware.camera.provider@2.5::ICameraProvider external/0
+ interface android.hardware.camera.provider@2.6::ICameraProvider external/0
+ interface android.hardware.camera.provider@2.7::ICameraProvider external/0
+ class hal
+ user cameraserver
+ group audio camera input drmrpc usb
+ ioprio rt 4
+ capabilities SYS_NICE
+ task_profiles CameraServiceCapacity MaxPerformance
diff --git a/camera/provider/2.7/default/external-service.cpp b/camera/provider/2.7/default/external-service.cpp
new file mode 100644
index 0000000..90b8239
--- /dev/null
+++ b/camera/provider/2.7/default/external-service.cpp
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifdef LAZY_SERVICE
+#define LOG_TAG "android.hardware.camera.provider@2.7-external-service-lazy"
+#else
+#define LOG_TAG "android.hardware.camera.provider@2.7-external-service"
+#endif
+
+#include <android/hardware/camera/provider/2.7/ICameraProvider.h>
+#include <binder/ProcessState.h>
+#include <hidl/HidlLazyUtils.h>
+#include <hidl/HidlTransportSupport.h>
+
+#include "CameraProvider_2_7.h"
+#include "ExternalCameraProviderImpl_2_7.h"
+
+using android::status_t;
+using android::hardware::camera::provider::V2_7::ICameraProvider;
+using android::hidl::base::V1_0::IBase;
+
+#ifdef LAZY_SERVICE
+const bool kLazyService = true;
+#else
+const bool kLazyService = false;
+#endif
+
+int main() {
+ using namespace android::hardware::camera::provider::V2_7::implementation;
+
+ ALOGI("CameraProvider@2.7 external webcam service is starting.");
+
+ ::android::hardware::configureRpcThreadpool(/*threads*/ HWBINDER_THREAD_COUNT,
+ /*willJoin*/ true);
+
+ ::android::sp<CameraProvider<ExternalCameraProviderImpl_2_7>> provider =
+ new CameraProvider<ExternalCameraProviderImpl_2_7>();
+
+ status_t status;
+ if (kLazyService) {
+ auto serviceRegistrar = ::android::hardware::LazyServiceRegistrar::getInstance();
+ status = serviceRegistrar.registerService(provider, "external/0");
+ } else {
+ status = provider->registerAsService("external/0");
+ }
+
+ LOG_ALWAYS_FATAL_IF(status != android::OK, "Error while registering provider service: %d",
+ status);
+
+ ::android::hardware::joinRpcThreadpool();
+
+ return 0;
+}
\ No newline at end of file
diff --git a/common/support/Android.bp b/common/support/Android.bp
index 718901e..12ab1f7 100644
--- a/common/support/Android.bp
+++ b/common/support/Android.bp
@@ -25,6 +25,7 @@
apex_available: [
"//apex_available:platform",
"com.android.neuralnetworks",
+ "com.android.media.swcodec",
],
min_sdk_version: "29",
}
diff --git a/compatibility_matrices/compatibility_matrix.current.xml b/compatibility_matrices/compatibility_matrix.current.xml
index 761a962..1731c9c 100644
--- a/compatibility_matrices/compatibility_matrix.current.xml
+++ b/compatibility_matrices/compatibility_matrix.current.xml
@@ -60,6 +60,14 @@
<regex-instance>.*</regex-instance>
</interface>
</hal>
+ <hal format="aidl" optional="true">
+ <name>android.hardware.automotive.evs</name>
+ <interface>
+ <name>IEvsEnumerator</name>
+ <instance>default</instance>
+ <regex-instance>[a-z]+/[0-9]+</regex-instance>
+ </interface>
+ </hal>
<hal format="hidl" optional="true">
<name>android.hardware.automotive.evs</name>
<version>1.0-1</version>
@@ -284,6 +292,14 @@
<instance>default</instance>
</interface>
</hal>
+ <hal format="aidl" optional="true">
+ <name>android.hardware.graphics.allocator</name>
+ <version>1</version>
+ <interface>
+ <name>IAllocator</name>
+ <instance>default</instance>
+ </interface>
+ </hal>
<hal format="hidl" optional="false">
<name>android.hardware.graphics.composer</name>
<version>2.1-4</version>
diff --git a/contexthub/aidl/aidl_api/android.hardware.contexthub/current/android/hardware/contexthub/Setting.aidl b/contexthub/aidl/aidl_api/android.hardware.contexthub/current/android/hardware/contexthub/Setting.aidl
index 41bc9ae..d998478 100644
--- a/contexthub/aidl/aidl_api/android.hardware.contexthub/current/android/hardware/contexthub/Setting.aidl
+++ b/contexthub/aidl/aidl_api/android.hardware.contexthub/current/android/hardware/contexthub/Setting.aidl
@@ -39,4 +39,6 @@
WIFI_SCANNING = 3,
AIRPLANE_MODE = 4,
MICROPHONE = 5,
+ BT_MAIN = 6,
+ BT_SCANNING = 7,
}
diff --git a/contexthub/aidl/android/hardware/contexthub/Setting.aidl b/contexthub/aidl/android/hardware/contexthub/Setting.aidl
index f2e55db..91d4c3f 100644
--- a/contexthub/aidl/android/hardware/contexthub/Setting.aidl
+++ b/contexthub/aidl/android/hardware/contexthub/Setting.aidl
@@ -39,4 +39,12 @@
* by CHRE.
*/
MICROPHONE,
+ /**
+ * The main BT toggle in the Android settings for BT connectivity.
+ */
+ BT_MAIN,
+ /**
+ * The "BT scanning" setting for location scans.
+ */
+ BT_SCANNING,
}
diff --git a/contexthub/aidl/vts/VtsAidlHalContextHubTargetTest.cpp b/contexthub/aidl/vts/VtsAidlHalContextHubTargetTest.cpp
index a47f64e..f0583be 100644
--- a/contexthub/aidl/vts/VtsAidlHalContextHubTargetTest.cpp
+++ b/contexthub/aidl/vts/VtsAidlHalContextHubTargetTest.cpp
@@ -300,6 +300,14 @@
testSettingChanged(Setting::MICROPHONE);
}
+TEST_P(ContextHubAidl, TestOnBtMainSettingChanged) {
+ testSettingChanged(Setting::BT_MAIN);
+}
+
+TEST_P(ContextHubAidl, TestOnBtScanningSettingChanged) {
+ testSettingChanged(Setting::BT_SCANNING);
+}
+
std::vector<std::tuple<std::string, int32_t>> generateContextHubMapping() {
std::vector<std::tuple<std::string, int32_t>> tuples;
auto contextHubAidlNames = android::getAidlHalInstanceNames(IContextHub::descriptor);
diff --git a/graphics/allocator/aidl/Android.bp b/graphics/allocator/aidl/Android.bp
new file mode 100644
index 0000000..ea8a599
--- /dev/null
+++ b/graphics/allocator/aidl/Android.bp
@@ -0,0 +1,40 @@
+package {
+ // See: http://go/android-license-faq
+ // A large-scale-change added 'default_applicable_licenses' to import
+ // all of the 'license_kinds' from "hardware_interfaces_license"
+ // to get the below license kinds:
+ // SPDX-license-identifier-Apache-2.0
+ default_applicable_licenses: ["hardware_interfaces_license"],
+}
+
+aidl_interface {
+ name: "android.hardware.graphics.allocator",
+ vendor_available: true,
+ vndk: {
+ enabled: true,
+ support_system_process: true,
+ },
+ srcs: ["android/hardware/graphics/allocator/*.aidl"],
+ imports: [
+ "android.hardware.common-V2",
+ ],
+ stability: "vintf",
+ backend: {
+ cpp: {
+ enabled: false,
+ },
+ java: {
+ enabled: false,
+ },
+ ndk: {
+ apex_available: [
+ "//apex_available:platform",
+ "com.android.media.swcodec",
+ ],
+ vndk: {
+ enabled: true,
+ },
+ min_sdk_version: "29",
+ },
+ },
+}
diff --git a/graphics/allocator/aidl/aidl_api/android.hardware.graphics.allocator/current/android/hardware/graphics/allocator/AllocationError.aidl b/graphics/allocator/aidl/aidl_api/android.hardware.graphics.allocator/current/android/hardware/graphics/allocator/AllocationError.aidl
new file mode 100644
index 0000000..6e7b739
--- /dev/null
+++ b/graphics/allocator/aidl/aidl_api/android.hardware.graphics.allocator/current/android/hardware/graphics/allocator/AllocationError.aidl
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+///////////////////////////////////////////////////////////////////////////////
+// THIS FILE IS IMMUTABLE. DO NOT EDIT IN ANY CASE. //
+///////////////////////////////////////////////////////////////////////////////
+
+// This file is a snapshot of an AIDL file. Do not edit it manually. There are
+// two cases:
+// 1). this is a frozen version file - do not edit this in any case.
+// 2). this is a 'current' file. If you make a backwards compatible change to
+// the interface (from the latest frozen version), the build system will
+// prompt you to update this file with `m <name>-update-api`.
+//
+// You must not make a backward incompatible change to any AIDL file built
+// with the aidl_interface module type with versions property set. The module
+// type is used to build AIDL files in a way that they can be used across
+// independently updatable components of the system. If a device is shipped
+// with such a backward incompatible change, it has a high risk of breaking
+// later when a module using the interface is updated, e.g., Mainline modules.
+
+package android.hardware.graphics.allocator;
+@Backing(type="int") @VintfStability
+enum AllocationError {
+ BAD_DESCRIPTOR = 0,
+ NO_RESOURCES = 1,
+ UNSUPPORTED = 2,
+}
diff --git a/graphics/allocator/aidl/aidl_api/android.hardware.graphics.allocator/current/android/hardware/graphics/allocator/AllocationResult.aidl b/graphics/allocator/aidl/aidl_api/android.hardware.graphics.allocator/current/android/hardware/graphics/allocator/AllocationResult.aidl
new file mode 100644
index 0000000..73cfeb5
--- /dev/null
+++ b/graphics/allocator/aidl/aidl_api/android.hardware.graphics.allocator/current/android/hardware/graphics/allocator/AllocationResult.aidl
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+///////////////////////////////////////////////////////////////////////////////
+// THIS FILE IS IMMUTABLE. DO NOT EDIT IN ANY CASE. //
+///////////////////////////////////////////////////////////////////////////////
+
+// This file is a snapshot of an AIDL file. Do not edit it manually. There are
+// two cases:
+// 1). this is a frozen version file - do not edit this in any case.
+// 2). this is a 'current' file. If you make a backwards compatible change to
+// the interface (from the latest frozen version), the build system will
+// prompt you to update this file with `m <name>-update-api`.
+//
+// You must not make a backward incompatible change to any AIDL file built
+// with the aidl_interface module type with versions property set. The module
+// type is used to build AIDL files in a way that they can be used across
+// independently updatable components of the system. If a device is shipped
+// with such a backward incompatible change, it has a high risk of breaking
+// later when a module using the interface is updated, e.g., Mainline modules.
+
+package android.hardware.graphics.allocator;
+@VintfStability
+parcelable AllocationResult {
+ int stride;
+ android.hardware.common.NativeHandle[] buffers;
+}
diff --git a/graphics/allocator/aidl/aidl_api/android.hardware.graphics.allocator/current/android/hardware/graphics/allocator/IAllocator.aidl b/graphics/allocator/aidl/aidl_api/android.hardware.graphics.allocator/current/android/hardware/graphics/allocator/IAllocator.aidl
new file mode 100644
index 0000000..fe0b0a2
--- /dev/null
+++ b/graphics/allocator/aidl/aidl_api/android.hardware.graphics.allocator/current/android/hardware/graphics/allocator/IAllocator.aidl
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+///////////////////////////////////////////////////////////////////////////////
+// THIS FILE IS IMMUTABLE. DO NOT EDIT IN ANY CASE. //
+///////////////////////////////////////////////////////////////////////////////
+
+// This file is a snapshot of an AIDL file. Do not edit it manually. There are
+// two cases:
+// 1). this is a frozen version file - do not edit this in any case.
+// 2). this is a 'current' file. If you make a backwards compatible change to
+// the interface (from the latest frozen version), the build system will
+// prompt you to update this file with `m <name>-update-api`.
+//
+// You must not make a backward incompatible change to any AIDL file built
+// with the aidl_interface module type with versions property set. The module
+// type is used to build AIDL files in a way that they can be used across
+// independently updatable components of the system. If a device is shipped
+// with such a backward incompatible change, it has a high risk of breaking
+// later when a module using the interface is updated, e.g., Mainline modules.
+
+package android.hardware.graphics.allocator;
+@VintfStability
+interface IAllocator {
+ android.hardware.graphics.allocator.AllocationResult allocate(in byte[] descriptor, in int count);
+}
diff --git a/graphics/allocator/aidl/android/hardware/graphics/allocator/AllocationError.aidl b/graphics/allocator/aidl/android/hardware/graphics/allocator/AllocationError.aidl
new file mode 100644
index 0000000..c6b77b9
--- /dev/null
+++ b/graphics/allocator/aidl/android/hardware/graphics/allocator/AllocationError.aidl
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.graphics.allocator;
+
+@VintfStability
+@Backing(type="int")
+enum AllocationError {
+ /**
+ * Invalid BufferDescriptor.
+ */
+ BAD_DESCRIPTOR,
+
+ /**
+ * Resource unavailable.
+ */
+ NO_RESOURCES,
+
+ /**
+ * Permanent failure.
+ */
+ UNSUPPORTED
+}
\ No newline at end of file
diff --git a/graphics/allocator/aidl/android/hardware/graphics/allocator/AllocationResult.aidl b/graphics/allocator/aidl/android/hardware/graphics/allocator/AllocationResult.aidl
new file mode 100644
index 0000000..0774e25
--- /dev/null
+++ b/graphics/allocator/aidl/android/hardware/graphics/allocator/AllocationResult.aidl
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.graphics.allocator;
+
+import android.hardware.common.NativeHandle;
+
+ /**
+ * Result of an IAllocator::allocate call.
+ *
+ * @sa +ndk libnativewindow#AHardwareBuffer_Desc
+ */
+@VintfStability
+parcelable AllocationResult {
+ int stride;
+ NativeHandle[] buffers;
+}
\ No newline at end of file
diff --git a/graphics/allocator/aidl/android/hardware/graphics/allocator/IAllocator.aidl b/graphics/allocator/aidl/android/hardware/graphics/allocator/IAllocator.aidl
new file mode 100644
index 0000000..8c3ca96
--- /dev/null
+++ b/graphics/allocator/aidl/android/hardware/graphics/allocator/IAllocator.aidl
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.graphics.allocator;
+
+import android.hardware.graphics.allocator.AllocationResult;
+
+@VintfStability
+interface IAllocator {
+ /**
+ * Allocates buffers with the properties specified by the descriptor.
+ *
+ * Allocations should be optimized for usage bits provided in the
+ * descriptor.
+ *
+ * @param descriptor Properties of the buffers to allocate. This must be
+ * obtained from IMapper::createDescriptor().
+ * @param count The number of buffers to allocate.
+ * @return An AllocationResult containing the result of an error, or
+ * an AllocationError status
+ */
+ AllocationResult allocate(in byte[] descriptor, in int count);
+}
diff --git a/graphics/common/aidl/aidl_api/android.hardware.graphics.common/current/android/hardware/graphics/common/BufferUsage.aidl b/graphics/common/aidl/aidl_api/android.hardware.graphics.common/current/android/hardware/graphics/common/BufferUsage.aidl
index b4ef451..e1edb17 100644
--- a/graphics/common/aidl/aidl_api/android.hardware.graphics.common/current/android/hardware/graphics/common/BufferUsage.aidl
+++ b/graphics/common/aidl/aidl_api/android.hardware.graphics.common/current/android/hardware/graphics/common/BufferUsage.aidl
@@ -54,10 +54,11 @@
RENDERSCRIPT = 1048576,
VIDEO_DECODER = 4194304,
SENSOR_DIRECT_DATA = 8388608,
+ GPU_DATA_BUFFER = 16777216,
GPU_CUBE_MAP = 33554432,
GPU_MIPMAP_COMPLETE = 67108864,
HW_IMAGE_ENCODER = 134217728,
- GPU_DATA_BUFFER = 16777216,
+ FRONT_BUFFER = 4294967296,
VENDOR_MASK = -268435456,
VENDOR_MASK_HI = -281474976710656,
}
diff --git a/graphics/common/aidl/android/hardware/graphics/common/BufferUsage.aidl b/graphics/common/aidl/android/hardware/graphics/common/BufferUsage.aidl
index d978f46..4b5a306 100644
--- a/graphics/common/aidl/android/hardware/graphics/common/BufferUsage.aidl
+++ b/graphics/common/aidl/android/hardware/graphics/common/BufferUsage.aidl
@@ -87,6 +87,12 @@
/** buffer is used as a sensor direct report output */
SENSOR_DIRECT_DATA = 1 << 23,
+ /**
+ * buffer is used as as an OpenGL shader storage or uniform
+ * buffer object
+ */
+ GPU_DATA_BUFFER = 1 << 24,
+
/** buffer is used as a cube map texture */
GPU_CUBE_MAP = 1 << 25,
@@ -98,17 +104,17 @@
*/
HW_IMAGE_ENCODER = 1 << 27,
- /**
- * buffer is used as as an OpenGL shader storage or uniform
- * buffer object
- */
- GPU_DATA_BUFFER = 1 << 24,
+ /* Bits 28-31 are reserved for vendor usage */
- /** bits 25-27 must be zero and are reserved for future versions */
+ /**
+ * Buffer is used for front-buffer rendering
+ */
+ FRONT_BUFFER = 1L << 32,
+
/** bits 28-31 are reserved for vendor extensions */
VENDOR_MASK = 0xf << 28,
- /** bits 32-47 must be zero and are reserved for future versions */
+ /** bits 33-47 must be zero and are reserved for future versions */
/** bits 48-63 are reserved for vendor extensions */
VENDOR_MASK_HI = (1L * 0xffff) << 48,
}
diff --git a/graphics/composer/aidl/android/hardware/graphics/composer3/vts/functional/VtsHalGraphicsComposer3_TargetTest.cpp b/graphics/composer/aidl/android/hardware/graphics/composer3/vts/functional/VtsHalGraphicsComposer3_TargetTest.cpp
index 1cfd3f9..1c75749 100644
--- a/graphics/composer/aidl/android/hardware/graphics/composer3/vts/functional/VtsHalGraphicsComposer3_TargetTest.cpp
+++ b/graphics/composer/aidl/android/hardware/graphics/composer3/vts/functional/VtsHalGraphicsComposer3_TargetTest.cpp
@@ -76,7 +76,10 @@
ASSERT_NE(binder, nullptr);
ASSERT_NO_FATAL_FAILURE(mComposer = IComposer::fromBinder(binder));
ASSERT_NE(mComposer, nullptr);
- ASSERT_NO_FATAL_FAILURE(mComposer->createClient(&mComposerClient));
+
+ ndk::ScopedAStatus status;
+ ASSERT_NO_FATAL_FAILURE(status = mComposer->createClient(&mComposerClient));
+ ASSERT_TRUE(status.isOk());
mComposerCallback = ::ndk::SharedRefBase::make<GraphicsComposerCallback>();
EXPECT_TRUE(mComposerClient->registerCallback(mComposerCallback).isOk());
@@ -1538,7 +1541,7 @@
execute();
const auto errors = mReader.takeErrors();
- if (errors.size() == 1 && errors[0].errorCode == EX_UNSUPPORTED_OPERATION) {
+ if (errors.size() == 1 && errors[0].errorCode == IComposerClient::EX_UNSUPPORTED) {
GTEST_SUCCEED() << "setLayerColorTransform is not supported";
return;
}
@@ -1555,7 +1558,7 @@
execute();
const auto errors = mReader.takeErrors();
EXPECT_EQ(1, errors.size());
- EXPECT_EQ(EX_UNSUPPORTED_OPERATION, errors[0].errorCode);
+ EXPECT_EQ(IComposerClient::EX_UNSUPPORTED, errors[0].errorCode);
GTEST_SUCCEED() << "SetDisplayBrightness is not supported";
return;
}
@@ -2024,6 +2027,27 @@
EXPECT_TRUE(mComposerClient->destroyLayer(mPrimaryDisplay, layer).isOk());
}
+TEST_P(GraphicsComposerAidlCommandTest, setLayerWhitePointNits) {
+ int64_t layer;
+ EXPECT_TRUE(mComposerClient->createLayer(mPrimaryDisplay, kBufferSlotCount, &layer).isOk());
+
+ mWriter.setLayerWhitePointNits(mPrimaryDisplay, layer, 200.f);
+ execute();
+ ASSERT_TRUE(mReader.takeErrors().empty());
+
+ mWriter.setLayerWhitePointNits(mPrimaryDisplay, layer, 1000.f);
+ execute();
+ ASSERT_TRUE(mReader.takeErrors().empty());
+
+ mWriter.setLayerWhitePointNits(mPrimaryDisplay, layer, 0.f);
+ execute();
+ ASSERT_TRUE(mReader.takeErrors().empty());
+
+ mWriter.setLayerWhitePointNits(mPrimaryDisplay, layer, -1.f);
+ execute();
+ ASSERT_TRUE(mReader.takeErrors().empty());
+}
+
TEST_P(GraphicsComposerAidlCommandTest, setActiveConfigWithConstraints) {
Test_setActiveConfigWithConstraints({.delayForChange = 0, .refreshMiss = false});
}
diff --git a/graphics/mapper/4.0/vts/functional/VtsHalGraphicsMapperV4_0TargetTest.cpp b/graphics/mapper/4.0/vts/functional/VtsHalGraphicsMapperV4_0TargetTest.cpp
index 2ab9c01..9371154 100644
--- a/graphics/mapper/4.0/vts/functional/VtsHalGraphicsMapperV4_0TargetTest.cpp
+++ b/graphics/mapper/4.0/vts/functional/VtsHalGraphicsMapperV4_0TargetTest.cpp
@@ -21,6 +21,7 @@
#include <thread>
#include <vector>
+#include <aidl/android/hardware/graphics/common/PixelFormat.h>
#include <aidl/android/hardware/graphics/common/PlaneLayoutComponentType.h>
#include <android-base/logging.h>
@@ -1205,6 +1206,40 @@
}
/**
+ * Test IMapper::isSupported with optional format R_8
+ */
+TEST_P(GraphicsMapperHidlTest, IsSupportedR8) {
+ auto info = mDummyDescriptorInfo;
+ info.format = static_cast<android::hardware::graphics::common::V1_2::PixelFormat>(
+ aidl::android::hardware::graphics::common::PixelFormat::R_8);
+ bool supported = false;
+
+ ASSERT_NO_FATAL_FAILURE(supported = mGralloc->isSupported(info));
+
+ if (!supported) {
+ GTEST_SUCCEED() << "R_8 is optional; unsupported so skipping allocation test";
+ return;
+ }
+
+ BufferDescriptor descriptor;
+ ASSERT_NO_FATAL_FAILURE(descriptor = mGralloc->createDescriptor(info));
+
+ constexpr uint32_t count = 1;
+ std::vector<const native_handle_t*> bufferHandles;
+ uint32_t stride;
+ ASSERT_NO_FATAL_FAILURE(bufferHandles =
+ mGralloc->allocate(descriptor, count, false,
+ Tolerance::kToleranceStrict, &stride));
+
+ EXPECT_LE(info.width, stride) << "invalid buffer stride";
+ EXPECT_EQ(1u, bufferHandles.size());
+
+ for (auto bufferHandle : bufferHandles) {
+ mGralloc->freeBuffer(bufferHandle);
+ }
+}
+
+/**
* Test IMapper::get(BufferId)
*/
TEST_P(GraphicsMapperHidlTest, GetBufferId) {
diff --git a/keymaster/3.0/vts/functional/Android.bp b/keymaster/3.0/vts/functional/Android.bp
index e2ae803..39bec3f 100644
--- a/keymaster/3.0/vts/functional/Android.bp
+++ b/keymaster/3.0/vts/functional/Android.bp
@@ -38,5 +38,11 @@
"libcrypto_static",
"libsoftkeymasterdevice",
],
- test_suites: ["general-tests", "vts"],
+ test_suites: [
+ "general-tests",
+ "vts",
+ ],
+ sanitize: {
+ cfi: false,
+ },
}
diff --git a/keymaster/4.1/vts/functional/Android.bp b/keymaster/4.1/vts/functional/Android.bp
index c650bec..547ce38 100644
--- a/keymaster/4.1/vts/functional/Android.bp
+++ b/keymaster/4.1/vts/functional/Android.bp
@@ -48,4 +48,7 @@
"general-tests",
"vts",
],
+ sanitize: {
+ cfi: false,
+ },
}
diff --git a/neuralnetworks/1.0/utils/Android.bp b/neuralnetworks/1.0/utils/Android.bp
index 31cdded..ad30e30 100644
--- a/neuralnetworks/1.0/utils/Android.bp
+++ b/neuralnetworks/1.0/utils/Android.bp
@@ -31,16 +31,11 @@
export_include_dirs: ["include"],
cflags: ["-Wthread-safety"],
static_libs: [
+ "android.hardware.neuralnetworks@1.0",
"libarect",
"neuralnetworks_types",
"neuralnetworks_utils_hal_common",
],
- shared_libs: [
- "android.hardware.neuralnetworks@1.0",
- ],
- export_static_lib_headers: [
- "neuralnetworks_utils_hal_common",
- ],
target: {
android: {
shared_libs: ["libnativewindow"],
@@ -55,19 +50,14 @@
static_libs: [
"android.hardware.neuralnetworks@1.0",
"libgmock",
- "libneuralnetworks_common",
"neuralnetworks_types",
"neuralnetworks_utils_hal_common",
"neuralnetworks_utils_hal_1_0",
],
shared_libs: [
- "android.hidl.allocator@1.0",
- "android.hidl.memory@1.0",
"libbase",
"libcutils",
- "libfmq",
"libhidlbase",
- "libhidlmemory",
"liblog",
"libutils",
],
diff --git a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Burst.h b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Burst.h
index 8bd2fbe..cef76c6 100644
--- a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Burst.h
+++ b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Burst.h
@@ -45,12 +45,15 @@
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> execute(
const nn::Request& request, nn::MeasureTiming measure,
- const nn::OptionalTimePoint& deadline,
- const nn::OptionalDuration& loopTimeoutDuration) const override;
+ const nn::OptionalTimePoint& deadline, const nn::OptionalDuration& loopTimeoutDuration,
+ const std::vector<nn::TokenValuePair>& hints,
+ const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override;
nn::GeneralResult<nn::SharedExecution> createReusableExecution(
const nn::Request& request, nn::MeasureTiming measure,
- const nn::OptionalDuration& loopTimeoutDuration) const override;
+ const nn::OptionalDuration& loopTimeoutDuration,
+ const std::vector<nn::TokenValuePair>& hints,
+ const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override;
private:
const nn::SharedPreparedModel kPreparedModel;
diff --git a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Device.h b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Device.h
index 0a6ca3e..d7c43ef 100644
--- a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Device.h
+++ b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Device.h
@@ -65,8 +65,9 @@
nn::GeneralResult<nn::SharedPreparedModel> prepareModel(
const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority,
nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
- const std::vector<nn::SharedHandle>& dataCache,
- const nn::CacheToken& token) const override;
+ const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token,
+ const std::vector<nn::TokenValuePair>& hints,
+ const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override;
nn::GeneralResult<nn::SharedPreparedModel> prepareModelFromCache(
nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
diff --git a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/PreparedModel.h b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/PreparedModel.h
index bdb5b54..337c132 100644
--- a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/PreparedModel.h
+++ b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/PreparedModel.h
@@ -49,18 +49,23 @@
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> execute(
const nn::Request& request, nn::MeasureTiming measure,
- const nn::OptionalTimePoint& deadline,
- const nn::OptionalDuration& loopTimeoutDuration) const override;
+ const nn::OptionalTimePoint& deadline, const nn::OptionalDuration& loopTimeoutDuration,
+ const std::vector<nn::TokenValuePair>& hints,
+ const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override;
nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> executeFenced(
const nn::Request& request, const std::vector<nn::SyncFence>& waitFor,
nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline,
const nn::OptionalDuration& loopTimeoutDuration,
- const nn::OptionalDuration& timeoutDurationAfterFence) const override;
+ const nn::OptionalDuration& timeoutDurationAfterFence,
+ const std::vector<nn::TokenValuePair>& hints,
+ const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override;
nn::GeneralResult<nn::SharedExecution> createReusableExecution(
const nn::Request& request, nn::MeasureTiming measure,
- const nn::OptionalDuration& loopTimeoutDuration) const override;
+ const nn::OptionalDuration& loopTimeoutDuration,
+ const std::vector<nn::TokenValuePair>& hints,
+ const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override;
nn::GeneralResult<nn::SharedBurst> configureExecutionBurst() const override;
diff --git a/neuralnetworks/1.0/utils/src/Burst.cpp b/neuralnetworks/1.0/utils/src/Burst.cpp
index 1284721..3642bc6 100644
--- a/neuralnetworks/1.0/utils/src/Burst.cpp
+++ b/neuralnetworks/1.0/utils/src/Burst.cpp
@@ -50,15 +50,20 @@
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> Burst::execute(
const nn::Request& request, nn::MeasureTiming measure,
- const nn::OptionalTimePoint& deadline,
- const nn::OptionalDuration& loopTimeoutDuration) const {
- return kPreparedModel->execute(request, measure, deadline, loopTimeoutDuration);
+ const nn::OptionalTimePoint& deadline, const nn::OptionalDuration& loopTimeoutDuration,
+ const std::vector<nn::TokenValuePair>& hints,
+ const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const {
+ return kPreparedModel->execute(request, measure, deadline, loopTimeoutDuration, hints,
+ extensionNameToPrefix);
}
nn::GeneralResult<nn::SharedExecution> Burst::createReusableExecution(
const nn::Request& request, nn::MeasureTiming measure,
- const nn::OptionalDuration& loopTimeoutDuration) const {
- return kPreparedModel->createReusableExecution(request, measure, loopTimeoutDuration);
+ const nn::OptionalDuration& loopTimeoutDuration,
+ const std::vector<nn::TokenValuePair>& hints,
+ const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const {
+ return kPreparedModel->createReusableExecution(request, measure, loopTimeoutDuration, hints,
+ extensionNameToPrefix);
}
} // namespace android::hardware::neuralnetworks::V1_0::utils
diff --git a/neuralnetworks/1.0/utils/src/Device.cpp b/neuralnetworks/1.0/utils/src/Device.cpp
index b0c236e..620d040 100644
--- a/neuralnetworks/1.0/utils/src/Device.cpp
+++ b/neuralnetworks/1.0/utils/src/Device.cpp
@@ -143,7 +143,9 @@
nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModel(
const nn::Model& model, nn::ExecutionPreference /*preference*/, nn::Priority /*priority*/,
nn::OptionalTimePoint /*deadline*/, const std::vector<nn::SharedHandle>& /*modelCache*/,
- const std::vector<nn::SharedHandle>& /*dataCache*/, const nn::CacheToken& /*token*/) const {
+ const std::vector<nn::SharedHandle>& /*dataCache*/, const nn::CacheToken& /*token*/,
+ const std::vector<nn::TokenValuePair>& /*hints*/,
+ const std::vector<nn::ExtensionNameAndPrefix>& /*extensionNameToPrefix*/) const {
// Ensure that model is ready for IPC.
std::optional<nn::Model> maybeModelInShared;
const nn::Model& modelInShared =
diff --git a/neuralnetworks/1.0/utils/src/PreparedModel.cpp b/neuralnetworks/1.0/utils/src/PreparedModel.cpp
index 00e7d22..b8055fc 100644
--- a/neuralnetworks/1.0/utils/src/PreparedModel.cpp
+++ b/neuralnetworks/1.0/utils/src/PreparedModel.cpp
@@ -59,7 +59,9 @@
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> PreparedModel::execute(
const nn::Request& request, nn::MeasureTiming /*measure*/,
const nn::OptionalTimePoint& /*deadline*/,
- const nn::OptionalDuration& /*loopTimeoutDuration*/) const {
+ const nn::OptionalDuration& /*loopTimeoutDuration*/,
+ const std::vector<nn::TokenValuePair>& /*hints*/,
+ const std::vector<nn::ExtensionNameAndPrefix>& /*extensionNameToPrefix*/) const {
// Ensure that request is ready for IPC.
std::optional<nn::Request> maybeRequestInShared;
hal::utils::RequestRelocation relocation;
@@ -94,19 +96,22 @@
}
nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>>
-PreparedModel::executeFenced(const nn::Request& /*request*/,
- const std::vector<nn::SyncFence>& /*waitFor*/,
- nn::MeasureTiming /*measure*/,
- const nn::OptionalTimePoint& /*deadline*/,
- const nn::OptionalDuration& /*loopTimeoutDuration*/,
- const nn::OptionalDuration& /*timeoutDurationAfterFence*/) const {
+PreparedModel::executeFenced(
+ const nn::Request& /*request*/, const std::vector<nn::SyncFence>& /*waitFor*/,
+ nn::MeasureTiming /*measure*/, const nn::OptionalTimePoint& /*deadline*/,
+ const nn::OptionalDuration& /*loopTimeoutDuration*/,
+ const nn::OptionalDuration& /*timeoutDurationAfterFence*/,
+ const std::vector<nn::TokenValuePair>& /*hints*/,
+ const std::vector<nn::ExtensionNameAndPrefix>& /*extensionNameToPrefix*/) const {
return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
<< "IPreparedModel::executeFenced is not supported on 1.0 HAL service";
}
nn::GeneralResult<nn::SharedExecution> PreparedModel::createReusableExecution(
const nn::Request& request, nn::MeasureTiming /*measure*/,
- const nn::OptionalDuration& /*loopTimeoutDuration*/) const {
+ const nn::OptionalDuration& /*loopTimeoutDuration*/,
+ const std::vector<nn::TokenValuePair>& /*hints*/,
+ const std::vector<nn::ExtensionNameAndPrefix>& /*extensionNameToPrefix*/) const {
// Ensure that request is ready for IPC.
std::optional<nn::Request> maybeRequestInShared;
hal::utils::RequestRelocation relocation;
diff --git a/neuralnetworks/1.0/utils/test/DeviceTest.cpp b/neuralnetworks/1.0/utils/test/DeviceTest.cpp
index 83e555f..9e9db16 100644
--- a/neuralnetworks/1.0/utils/test/DeviceTest.cpp
+++ b/neuralnetworks/1.0/utils/test/DeviceTest.cpp
@@ -380,7 +380,7 @@
// run test
const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT,
- nn::Priority::DEFAULT, {}, {}, {}, {});
+ nn::Priority::DEFAULT, {}, {}, {}, {}, {}, {});
// verify result
ASSERT_TRUE(result.has_value())
@@ -399,7 +399,7 @@
// run test
const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT,
- nn::Priority::DEFAULT, {}, {}, {}, {});
+ nn::Priority::DEFAULT, {}, {}, {}, {}, {}, {});
// verify result
ASSERT_FALSE(result.has_value());
@@ -417,7 +417,7 @@
// run test
const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT,
- nn::Priority::DEFAULT, {}, {}, {}, {});
+ nn::Priority::DEFAULT, {}, {}, {}, {}, {}, {});
// verify result
ASSERT_FALSE(result.has_value());
@@ -435,7 +435,7 @@
// run test
const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT,
- nn::Priority::DEFAULT, {}, {}, {}, {});
+ nn::Priority::DEFAULT, {}, {}, {}, {}, {}, {});
// verify result
ASSERT_FALSE(result.has_value());
@@ -452,7 +452,7 @@
// run test
const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT,
- nn::Priority::DEFAULT, {}, {}, {}, {});
+ nn::Priority::DEFAULT, {}, {}, {}, {}, {}, {});
// verify result
ASSERT_FALSE(result.has_value());
@@ -469,7 +469,7 @@
// run test
const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT,
- nn::Priority::DEFAULT, {}, {}, {}, {});
+ nn::Priority::DEFAULT, {}, {}, {}, {}, {}, {});
// verify result
ASSERT_FALSE(result.has_value());
@@ -488,7 +488,7 @@
// run test
const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT,
- nn::Priority::DEFAULT, {}, {}, {}, {});
+ nn::Priority::DEFAULT, {}, {}, {}, {}, {}, {});
// verify result
ASSERT_FALSE(result.has_value());
diff --git a/neuralnetworks/1.0/utils/test/PreparedModelTest.cpp b/neuralnetworks/1.0/utils/test/PreparedModelTest.cpp
index 7820c06..e03a98d 100644
--- a/neuralnetworks/1.0/utils/test/PreparedModelTest.cpp
+++ b/neuralnetworks/1.0/utils/test/PreparedModelTest.cpp
@@ -121,7 +121,7 @@
.WillOnce(Invoke(makeExecute(V1_0::ErrorStatus::NONE, V1_0::ErrorStatus::NONE)));
// run test
- const auto result = preparedModel->execute({}, {}, {}, {});
+ const auto result = preparedModel->execute({}, {}, {}, {}, {}, {});
// verify result
EXPECT_TRUE(result.has_value())
@@ -138,7 +138,7 @@
V1_0::ErrorStatus::GENERAL_FAILURE)));
// run test
- const auto result = preparedModel->execute({}, {}, {}, {});
+ const auto result = preparedModel->execute({}, {}, {}, {}, {}, {});
// verify result
ASSERT_FALSE(result.has_value());
@@ -155,7 +155,7 @@
makeExecute(V1_0::ErrorStatus::NONE, V1_0::ErrorStatus::GENERAL_FAILURE)));
// run test
- const auto result = preparedModel->execute({}, {}, {}, {});
+ const auto result = preparedModel->execute({}, {}, {}, {}, {}, {});
// verify result
ASSERT_FALSE(result.has_value());
@@ -171,7 +171,7 @@
.WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
// run test
- const auto result = preparedModel->execute({}, {}, {}, {});
+ const auto result = preparedModel->execute({}, {}, {}, {}, {}, {});
// verify result
ASSERT_FALSE(result.has_value());
@@ -187,7 +187,7 @@
.WillOnce(InvokeWithoutArgs(makeDeadObjectFailure));
// run test
- const auto result = preparedModel->execute({}, {}, {}, {});
+ const auto result = preparedModel->execute({}, {}, {}, {}, {}, {});
// verify result
ASSERT_FALSE(result.has_value());
@@ -205,7 +205,7 @@
EXPECT_CALL(*mockPreparedModel, execute(_, _)).Times(1).WillOnce(InvokeWithoutArgs(ret));
// run test
- const auto result = preparedModel->execute({}, {}, {}, {});
+ const auto result = preparedModel->execute({}, {}, {}, {}, {}, {});
// verify result
ASSERT_FALSE(result.has_value());
@@ -218,7 +218,7 @@
const auto preparedModel = PreparedModel::create(mockPreparedModel).value();
// run test
- const auto result = preparedModel->executeFenced({}, {}, {}, {}, {}, {});
+ const auto result = preparedModel->executeFenced({}, {}, {}, {}, {}, {}, {}, {});
// verify result
ASSERT_FALSE(result.has_value());
@@ -235,7 +235,7 @@
.WillRepeatedly(Invoke(makeExecute(V1_0::ErrorStatus::NONE, V1_0::ErrorStatus::NONE)));
// create execution
- const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {});
ASSERT_TRUE(createResult.has_value())
<< "Failed with " << createResult.error().code << ": " << createResult.error().message;
ASSERT_NE(createResult.value(), nullptr);
@@ -258,7 +258,7 @@
V1_0::ErrorStatus::GENERAL_FAILURE)));
// create execution
- const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {});
ASSERT_TRUE(createResult.has_value())
<< "Failed with " << createResult.error().code << ": " << createResult.error().message;
ASSERT_NE(createResult.value(), nullptr);
@@ -279,7 +279,7 @@
makeExecute(V1_0::ErrorStatus::NONE, V1_0::ErrorStatus::GENERAL_FAILURE)));
// create execution
- const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {});
ASSERT_TRUE(createResult.has_value())
<< "Failed with " << createResult.error().code << ": " << createResult.error().message;
ASSERT_NE(createResult.value(), nullptr);
@@ -299,7 +299,7 @@
.WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
// create execution
- const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {});
ASSERT_TRUE(createResult.has_value())
<< "Failed with " << createResult.error().code << ": " << createResult.error().message;
ASSERT_NE(createResult.value(), nullptr);
@@ -319,7 +319,7 @@
.WillOnce(InvokeWithoutArgs(makeDeadObjectFailure));
// create execution
- const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {});
ASSERT_TRUE(createResult.has_value())
<< "Failed with " << createResult.error().code << ": " << createResult.error().message;
ASSERT_NE(createResult.value(), nullptr);
@@ -341,7 +341,7 @@
EXPECT_CALL(*mockPreparedModel, execute(_, _)).Times(1).WillOnce(InvokeWithoutArgs(ret));
// create execution
- const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {});
ASSERT_TRUE(createResult.has_value())
<< "Failed with " << createResult.error().code << ": " << createResult.error().message;
ASSERT_NE(createResult.value(), nullptr);
@@ -358,7 +358,7 @@
const auto preparedModel = PreparedModel::create(mockPreparedModel).value();
// create execution
- const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {});
ASSERT_TRUE(createResult.has_value())
<< "Failed with " << createResult.error().code << ": " << createResult.error().message;
ASSERT_NE(createResult.value(), nullptr);
diff --git a/neuralnetworks/1.1/utils/Android.bp b/neuralnetworks/1.1/utils/Android.bp
index 737ff58..4b8999f 100644
--- a/neuralnetworks/1.1/utils/Android.bp
+++ b/neuralnetworks/1.1/utils/Android.bp
@@ -31,17 +31,12 @@
export_include_dirs: ["include"],
cflags: ["-Wthread-safety"],
static_libs: [
+ "android.hardware.neuralnetworks@1.0",
+ "android.hardware.neuralnetworks@1.1",
"neuralnetworks_types",
"neuralnetworks_utils_hal_common",
"neuralnetworks_utils_hal_1_0",
],
- shared_libs: [
- "android.hardware.neuralnetworks@1.0",
- "android.hardware.neuralnetworks@1.1",
- ],
- export_static_lib_headers: [
- "neuralnetworks_utils_hal_common",
- ],
}
cc_test {
@@ -52,20 +47,15 @@
"android.hardware.neuralnetworks@1.0",
"android.hardware.neuralnetworks@1.1",
"libgmock",
- "libneuralnetworks_common",
"neuralnetworks_types",
"neuralnetworks_utils_hal_common",
"neuralnetworks_utils_hal_1_0",
"neuralnetworks_utils_hal_1_1",
],
shared_libs: [
- "android.hidl.allocator@1.0",
- "android.hidl.memory@1.0",
"libbase",
"libcutils",
- "libfmq",
"libhidlbase",
- "libhidlmemory",
"liblog",
"libutils",
],
diff --git a/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Device.h b/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Device.h
index d6bd36a..38ca138 100644
--- a/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Device.h
+++ b/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Device.h
@@ -64,8 +64,9 @@
nn::GeneralResult<nn::SharedPreparedModel> prepareModel(
const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority,
nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
- const std::vector<nn::SharedHandle>& dataCache,
- const nn::CacheToken& token) const override;
+ const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token,
+ const std::vector<nn::TokenValuePair>& hints,
+ const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override;
nn::GeneralResult<nn::SharedPreparedModel> prepareModelFromCache(
nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
diff --git a/neuralnetworks/1.1/utils/src/Device.cpp b/neuralnetworks/1.1/utils/src/Device.cpp
index 3effa84..28f3276 100644
--- a/neuralnetworks/1.1/utils/src/Device.cpp
+++ b/neuralnetworks/1.1/utils/src/Device.cpp
@@ -143,7 +143,9 @@
nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModel(
const nn::Model& model, nn::ExecutionPreference preference, nn::Priority /*priority*/,
nn::OptionalTimePoint /*deadline*/, const std::vector<nn::SharedHandle>& /*modelCache*/,
- const std::vector<nn::SharedHandle>& /*dataCache*/, const nn::CacheToken& /*token*/) const {
+ const std::vector<nn::SharedHandle>& /*dataCache*/, const nn::CacheToken& /*token*/,
+ const std::vector<nn::TokenValuePair>& /*hints*/,
+ const std::vector<nn::ExtensionNameAndPrefix>& /*extensionNameToPrefix*/) const {
// Ensure that model is ready for IPC.
std::optional<nn::Model> maybeModelInShared;
const nn::Model& modelInShared =
diff --git a/neuralnetworks/1.1/utils/test/DeviceTest.cpp b/neuralnetworks/1.1/utils/test/DeviceTest.cpp
index 2248da6..8ab87bc 100644
--- a/neuralnetworks/1.1/utils/test/DeviceTest.cpp
+++ b/neuralnetworks/1.1/utils/test/DeviceTest.cpp
@@ -390,7 +390,7 @@
// run test
const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT,
- nn::Priority::DEFAULT, {}, {}, {}, {});
+ nn::Priority::DEFAULT, {}, {}, {}, {}, {}, {});
// verify result
ASSERT_TRUE(result.has_value())
@@ -409,7 +409,7 @@
// run test
const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT,
- nn::Priority::DEFAULT, {}, {}, {}, {});
+ nn::Priority::DEFAULT, {}, {}, {}, {}, {}, {});
// verify result
ASSERT_FALSE(result.has_value());
@@ -427,7 +427,7 @@
// run test
const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT,
- nn::Priority::DEFAULT, {}, {}, {}, {});
+ nn::Priority::DEFAULT, {}, {}, {}, {}, {}, {});
// verify result
ASSERT_FALSE(result.has_value());
@@ -445,7 +445,7 @@
// run test
const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT,
- nn::Priority::DEFAULT, {}, {}, {}, {});
+ nn::Priority::DEFAULT, {}, {}, {}, {}, {}, {});
// verify result
ASSERT_FALSE(result.has_value());
@@ -462,7 +462,7 @@
// run test
const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT,
- nn::Priority::DEFAULT, {}, {}, {}, {});
+ nn::Priority::DEFAULT, {}, {}, {}, {}, {}, {});
// verify result
ASSERT_FALSE(result.has_value());
@@ -479,7 +479,7 @@
// run test
const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT,
- nn::Priority::DEFAULT, {}, {}, {}, {});
+ nn::Priority::DEFAULT, {}, {}, {}, {}, {}, {});
// verify result
ASSERT_FALSE(result.has_value());
@@ -498,7 +498,7 @@
// run test
const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT,
- nn::Priority::DEFAULT, {}, {}, {}, {});
+ nn::Priority::DEFAULT, {}, {}, {}, {}, {}, {});
// verify result
ASSERT_FALSE(result.has_value());
diff --git a/neuralnetworks/1.2/utils/Android.bp b/neuralnetworks/1.2/utils/Android.bp
index 4eefb0f..4c5f065 100644
--- a/neuralnetworks/1.2/utils/Android.bp
+++ b/neuralnetworks/1.2/utils/Android.bp
@@ -31,19 +31,14 @@
export_include_dirs: ["include"],
cflags: ["-Wthread-safety"],
static_libs: [
- "neuralnetworks_types",
- "neuralnetworks_utils_hal_common",
- "neuralnetworks_utils_hal_1_0",
- "neuralnetworks_utils_hal_1_1",
- ],
- shared_libs: [
"android.hardware.neuralnetworks@1.0",
"android.hardware.neuralnetworks@1.1",
"android.hardware.neuralnetworks@1.2",
"libfmq",
- ],
- export_static_lib_headers: [
+ "neuralnetworks_types",
"neuralnetworks_utils_hal_common",
+ "neuralnetworks_utils_hal_1_0",
+ "neuralnetworks_utils_hal_1_1",
],
product_variables: {
debuggable: { // eng and userdebug builds
@@ -71,7 +66,6 @@
"android.hardware.neuralnetworks@1.1",
"android.hardware.neuralnetworks@1.2",
"libgmock",
- "libneuralnetworks_common",
"neuralnetworks_types",
"neuralnetworks_utils_hal_common",
"neuralnetworks_utils_hal_1_0",
@@ -79,13 +73,10 @@
"neuralnetworks_utils_hal_1_2",
],
shared_libs: [
- "android.hidl.allocator@1.0",
- "android.hidl.memory@1.0",
"libbase",
"libcutils",
"libfmq",
"libhidlbase",
- "libhidlmemory",
"liblog",
"libutils",
],
diff --git a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Burst.h b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Burst.h
index ac9411c..1b28476 100644
--- a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Burst.h
+++ b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Burst.h
@@ -170,13 +170,16 @@
// See IBurst::execute for information on this method.
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> execute(
const nn::Request& request, nn::MeasureTiming measure,
- const nn::OptionalTimePoint& deadline,
- const nn::OptionalDuration& loopTimeoutDuration) const override;
+ const nn::OptionalTimePoint& deadline, const nn::OptionalDuration& loopTimeoutDuration,
+ const std::vector<nn::TokenValuePair>& hints,
+ const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override;
// See IBurst::createReusableExecution for information on this method.
nn::GeneralResult<nn::SharedExecution> createReusableExecution(
const nn::Request& request, nn::MeasureTiming measure,
- const nn::OptionalDuration& loopTimeoutDuration) const override;
+ const nn::OptionalDuration& loopTimeoutDuration,
+ const std::vector<nn::TokenValuePair>& hints,
+ const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override;
// If fallback is not nullptr, this method will invoke the fallback function to try another
// execution path if the packet could not be sent. Otherwise, failing to send the packet will
diff --git a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Conversions.h b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Conversions.h
index c3348aa..4f13adc 100644
--- a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Conversions.h
+++ b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Conversions.h
@@ -37,7 +37,7 @@
GeneralResult<Operand::ExtraParams> unvalidatedConvert(
const hal::V1_2::Operand::ExtraParams& extraParams);
GeneralResult<Model> unvalidatedConvert(const hal::V1_2::Model& model);
-GeneralResult<Model::ExtensionNameAndPrefix> unvalidatedConvert(
+GeneralResult<ExtensionNameAndPrefix> unvalidatedConvert(
const hal::V1_2::Model::ExtensionNameAndPrefix& extensionNameAndPrefix);
GeneralResult<OutputShape> unvalidatedConvert(const hal::V1_2::OutputShape& outputShape);
GeneralResult<MeasureTiming> unvalidatedConvert(const hal::V1_2::MeasureTiming& measureTiming);
@@ -78,7 +78,7 @@
const nn::Operand::ExtraParams& extraParams);
nn::GeneralResult<Model> unvalidatedConvert(const nn::Model& model);
nn::GeneralResult<Model::ExtensionNameAndPrefix> unvalidatedConvert(
- const nn::Model::ExtensionNameAndPrefix& extensionNameAndPrefix);
+ const nn::ExtensionNameAndPrefix& extensionNameAndPrefix);
nn::GeneralResult<OutputShape> unvalidatedConvert(const nn::OutputShape& outputShape);
nn::GeneralResult<MeasureTiming> unvalidatedConvert(const nn::MeasureTiming& measureTiming);
nn::GeneralResult<Timing> unvalidatedConvert(const nn::Timing& timing);
diff --git a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Device.h b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Device.h
index e7ac172..d92cf50 100644
--- a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Device.h
+++ b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Device.h
@@ -83,8 +83,9 @@
nn::GeneralResult<nn::SharedPreparedModel> prepareModel(
const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority,
nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
- const std::vector<nn::SharedHandle>& dataCache,
- const nn::CacheToken& token) const override;
+ const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token,
+ const std::vector<nn::TokenValuePair>& hints,
+ const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override;
nn::GeneralResult<nn::SharedPreparedModel> prepareModelFromCache(
nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
diff --git a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/PreparedModel.h b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/PreparedModel.h
index 1150e5e..72a5b2f 100644
--- a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/PreparedModel.h
+++ b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/PreparedModel.h
@@ -49,18 +49,23 @@
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> execute(
const nn::Request& request, nn::MeasureTiming measure,
- const nn::OptionalTimePoint& deadline,
- const nn::OptionalDuration& loopTimeoutDuration) const override;
+ const nn::OptionalTimePoint& deadline, const nn::OptionalDuration& loopTimeoutDuration,
+ const std::vector<nn::TokenValuePair>& hints,
+ const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override;
nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> executeFenced(
const nn::Request& request, const std::vector<nn::SyncFence>& waitFor,
nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline,
const nn::OptionalDuration& loopTimeoutDuration,
- const nn::OptionalDuration& timeoutDurationAfterFence) const override;
+ const nn::OptionalDuration& timeoutDurationAfterFence,
+ const std::vector<nn::TokenValuePair>& hints,
+ const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override;
nn::GeneralResult<nn::SharedExecution> createReusableExecution(
const nn::Request& request, nn::MeasureTiming measure,
- const nn::OptionalDuration& loopTimeoutDuration) const override;
+ const nn::OptionalDuration& loopTimeoutDuration,
+ const std::vector<nn::TokenValuePair>& hints,
+ const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override;
nn::GeneralResult<nn::SharedBurst> configureExecutionBurst() const override;
diff --git a/neuralnetworks/1.2/utils/src/Burst.cpp b/neuralnetworks/1.2/utils/src/Burst.cpp
index 911fbfa..23e8070 100644
--- a/neuralnetworks/1.2/utils/src/Burst.cpp
+++ b/neuralnetworks/1.2/utils/src/Burst.cpp
@@ -305,8 +305,9 @@
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> Burst::execute(
const nn::Request& request, nn::MeasureTiming measure,
- const nn::OptionalTimePoint& deadline,
- const nn::OptionalDuration& loopTimeoutDuration) const {
+ const nn::OptionalTimePoint& deadline, const nn::OptionalDuration& loopTimeoutDuration,
+ const std::vector<nn::TokenValuePair>& /*hints*/,
+ const std::vector<nn::ExtensionNameAndPrefix>& /*extensionNameToPrefix*/) const {
// This is the first point when we know an execution is occurring, so begin to collect
// systraces. Note that the first point we can begin collecting systraces in
// ExecutionBurstServer is when the RequestChannelReceiver realizes there is data in the FMQ, so
@@ -317,7 +318,7 @@
// fall back to another execution path
if (!compliantVersion(request).ok()) {
// fallback to another execution path if the packet could not be sent
- return kPreparedModel->execute(request, measure, deadline, loopTimeoutDuration);
+ return kPreparedModel->execute(request, measure, deadline, loopTimeoutDuration, {}, {});
}
// ensure that request is ready for IPC
@@ -346,7 +347,7 @@
// send request packet
const auto requestPacket = serialize(hidlRequest, hidlMeasure, slots);
const auto fallback = [this, &request, measure, &deadline, &loopTimeoutDuration] {
- return kPreparedModel->execute(request, measure, deadline, loopTimeoutDuration);
+ return kPreparedModel->execute(request, measure, deadline, loopTimeoutDuration, {}, {});
};
return executeInternal(requestPacket, relocation, fallback);
}
@@ -354,14 +355,17 @@
// See IBurst::createReusableExecution for information on this method.
nn::GeneralResult<nn::SharedExecution> Burst::createReusableExecution(
const nn::Request& request, nn::MeasureTiming measure,
- const nn::OptionalDuration& loopTimeoutDuration) const {
+ const nn::OptionalDuration& loopTimeoutDuration,
+ const std::vector<nn::TokenValuePair>& /*hints*/,
+ const std::vector<nn::ExtensionNameAndPrefix>& /*extensionNameToPrefix*/) const {
NNTRACE_RT(NNTRACE_PHASE_EXECUTION, "Burst::createReusableExecution");
// if the request is valid but of a higher version than what's supported in burst execution,
// fall back to another execution path
if (!compliantVersion(request).ok()) {
// fallback to another execution path if the packet could not be sent
- return kPreparedModel->createReusableExecution(request, measure, loopTimeoutDuration);
+ return kPreparedModel->createReusableExecution(request, measure, loopTimeoutDuration, {},
+ {});
}
// ensure that request is ready for IPC
diff --git a/neuralnetworks/1.2/utils/src/Conversions.cpp b/neuralnetworks/1.2/utils/src/Conversions.cpp
index 838d9c4..62ec2ed 100644
--- a/neuralnetworks/1.2/utils/src/Conversions.cpp
+++ b/neuralnetworks/1.2/utils/src/Conversions.cpp
@@ -212,9 +212,9 @@
};
}
-GeneralResult<Model::ExtensionNameAndPrefix> unvalidatedConvert(
+GeneralResult<ExtensionNameAndPrefix> unvalidatedConvert(
const hal::V1_2::Model::ExtensionNameAndPrefix& extensionNameAndPrefix) {
- return Model::ExtensionNameAndPrefix{
+ return ExtensionNameAndPrefix{
.name = extensionNameAndPrefix.name,
.prefix = extensionNameAndPrefix.prefix,
};
@@ -495,7 +495,7 @@
}
nn::GeneralResult<Model::ExtensionNameAndPrefix> unvalidatedConvert(
- const nn::Model::ExtensionNameAndPrefix& extensionNameAndPrefix) {
+ const nn::ExtensionNameAndPrefix& extensionNameAndPrefix) {
return Model::ExtensionNameAndPrefix{
.name = extensionNameAndPrefix.name,
.prefix = extensionNameAndPrefix.prefix,
diff --git a/neuralnetworks/1.2/utils/src/Device.cpp b/neuralnetworks/1.2/utils/src/Device.cpp
index e7acecd..3a58d2c 100644
--- a/neuralnetworks/1.2/utils/src/Device.cpp
+++ b/neuralnetworks/1.2/utils/src/Device.cpp
@@ -236,7 +236,9 @@
nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModel(
const nn::Model& model, nn::ExecutionPreference preference, nn::Priority /*priority*/,
nn::OptionalTimePoint /*deadline*/, const std::vector<nn::SharedHandle>& modelCache,
- const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token) const {
+ const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token,
+ const std::vector<nn::TokenValuePair>& /*hints*/,
+ const std::vector<nn::ExtensionNameAndPrefix>& /*extensionNameToPrefix*/) const {
// Ensure that model is ready for IPC.
std::optional<nn::Model> maybeModelInShared;
const nn::Model& modelInShared =
diff --git a/neuralnetworks/1.2/utils/src/PreparedModel.cpp b/neuralnetworks/1.2/utils/src/PreparedModel.cpp
index 6df3df3..feb3951 100644
--- a/neuralnetworks/1.2/utils/src/PreparedModel.cpp
+++ b/neuralnetworks/1.2/utils/src/PreparedModel.cpp
@@ -91,7 +91,9 @@
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> PreparedModel::execute(
const nn::Request& request, nn::MeasureTiming measure,
const nn::OptionalTimePoint& /*deadline*/,
- const nn::OptionalDuration& /*loopTimeoutDuration*/) const {
+ const nn::OptionalDuration& /*loopTimeoutDuration*/,
+ const std::vector<nn::TokenValuePair>& /*hints*/,
+ const std::vector<nn::ExtensionNameAndPrefix>& /*extensionNameToPrefix*/) const {
// Ensure that request is ready for IPC.
std::optional<nn::Request> maybeRequestInShared;
hal::utils::RequestRelocation relocation;
@@ -123,19 +125,22 @@
}
nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>>
-PreparedModel::executeFenced(const nn::Request& /*request*/,
- const std::vector<nn::SyncFence>& /*waitFor*/,
- nn::MeasureTiming /*measure*/,
- const nn::OptionalTimePoint& /*deadline*/,
- const nn::OptionalDuration& /*loopTimeoutDuration*/,
- const nn::OptionalDuration& /*timeoutDurationAfterFence*/) const {
+PreparedModel::executeFenced(
+ const nn::Request& /*request*/, const std::vector<nn::SyncFence>& /*waitFor*/,
+ nn::MeasureTiming /*measure*/, const nn::OptionalTimePoint& /*deadline*/,
+ const nn::OptionalDuration& /*loopTimeoutDuration*/,
+ const nn::OptionalDuration& /*timeoutDurationAfterFence*/,
+ const std::vector<nn::TokenValuePair>& /*hints*/,
+ const std::vector<nn::ExtensionNameAndPrefix>& /*extensionNameToPrefix*/) const {
return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
<< "IPreparedModel::executeFenced is not supported on 1.2 HAL service";
}
nn::GeneralResult<nn::SharedExecution> PreparedModel::createReusableExecution(
const nn::Request& request, nn::MeasureTiming measure,
- const nn::OptionalDuration& /*loopTimeoutDuration*/) const {
+ const nn::OptionalDuration& /*loopTimeoutDuration*/,
+ const std::vector<nn::TokenValuePair>& /*hints*/,
+ const std::vector<nn::ExtensionNameAndPrefix>& /*extensionNameToPrefix*/) const {
// Ensure that request is ready for IPC.
std::optional<nn::Request> maybeRequestInShared;
hal::utils::RequestRelocation relocation;
diff --git a/neuralnetworks/1.2/utils/test/DeviceTest.cpp b/neuralnetworks/1.2/utils/test/DeviceTest.cpp
index 1dc6285..0d8c141 100644
--- a/neuralnetworks/1.2/utils/test/DeviceTest.cpp
+++ b/neuralnetworks/1.2/utils/test/DeviceTest.cpp
@@ -636,7 +636,7 @@
// run test
const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT,
- nn::Priority::DEFAULT, {}, {}, {}, {});
+ nn::Priority::DEFAULT, {}, {}, {}, {}, {}, {});
// verify result
ASSERT_TRUE(result.has_value())
@@ -655,7 +655,7 @@
// run test
const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT,
- nn::Priority::DEFAULT, {}, {}, {}, {});
+ nn::Priority::DEFAULT, {}, {}, {}, {}, {}, {});
// verify result
ASSERT_FALSE(result.has_value());
@@ -673,7 +673,7 @@
// run test
const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT,
- nn::Priority::DEFAULT, {}, {}, {}, {});
+ nn::Priority::DEFAULT, {}, {}, {}, {}, {}, {});
// verify result
ASSERT_FALSE(result.has_value());
@@ -691,7 +691,7 @@
// run test
const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT,
- nn::Priority::DEFAULT, {}, {}, {}, {});
+ nn::Priority::DEFAULT, {}, {}, {}, {}, {}, {});
// verify result
ASSERT_FALSE(result.has_value());
@@ -708,7 +708,7 @@
// run test
const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT,
- nn::Priority::DEFAULT, {}, {}, {}, {});
+ nn::Priority::DEFAULT, {}, {}, {}, {}, {}, {});
// verify result
ASSERT_FALSE(result.has_value());
@@ -725,7 +725,7 @@
// run test
const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT,
- nn::Priority::DEFAULT, {}, {}, {}, {});
+ nn::Priority::DEFAULT, {}, {}, {}, {}, {}, {});
// verify result
ASSERT_FALSE(result.has_value());
@@ -746,7 +746,7 @@
// run test
const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT,
- nn::Priority::DEFAULT, {}, {}, {}, {});
+ nn::Priority::DEFAULT, {}, {}, {}, {}, {}, {});
// verify result
ASSERT_FALSE(result.has_value());
diff --git a/neuralnetworks/1.2/utils/test/PreparedModelTest.cpp b/neuralnetworks/1.2/utils/test/PreparedModelTest.cpp
index 5e2ad79..a5ec9d3 100644
--- a/neuralnetworks/1.2/utils/test/PreparedModelTest.cpp
+++ b/neuralnetworks/1.2/utils/test/PreparedModelTest.cpp
@@ -154,7 +154,7 @@
.WillOnce(Invoke(makeExecuteSynchronously(V1_0::ErrorStatus::NONE, {}, kNoTiming)));
// run test
- const auto result = preparedModel->execute({}, {}, {}, {});
+ const auto result = preparedModel->execute({}, {}, {}, {}, {}, {});
// verify result
EXPECT_TRUE(result.has_value())
@@ -172,7 +172,7 @@
makeExecuteSynchronously(V1_0::ErrorStatus::GENERAL_FAILURE, {}, kNoTiming)));
// run test
- const auto result = preparedModel->execute({}, {}, {}, {});
+ const auto result = preparedModel->execute({}, {}, {}, {}, {}, {});
// verify result
ASSERT_FALSE(result.has_value());
@@ -189,7 +189,7 @@
.WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
// run test
- const auto result = preparedModel->execute({}, {}, {}, {});
+ const auto result = preparedModel->execute({}, {}, {}, {}, {}, {});
// verify result
ASSERT_FALSE(result.has_value());
@@ -206,7 +206,7 @@
.WillOnce(InvokeWithoutArgs(makeDeadObjectFailure));
// run test
- const auto result = preparedModel->execute({}, {}, {}, {});
+ const auto result = preparedModel->execute({}, {}, {}, {}, {}, {});
// verify result
ASSERT_FALSE(result.has_value());
@@ -224,7 +224,7 @@
V1_0::ErrorStatus::NONE, {}, kNoTiming)));
// run test
- const auto result = preparedModel->execute({}, {}, {}, {});
+ const auto result = preparedModel->execute({}, {}, {}, {}, {}, {});
// verify result
EXPECT_TRUE(result.has_value())
@@ -243,7 +243,7 @@
kNoTiming)));
// run test
- const auto result = preparedModel->execute({}, {}, {}, {});
+ const auto result = preparedModel->execute({}, {}, {}, {}, {}, {});
// verify result
ASSERT_FALSE(result.has_value());
@@ -261,7 +261,7 @@
V1_0::ErrorStatus::NONE, V1_0::ErrorStatus::GENERAL_FAILURE, {}, kNoTiming)));
// run test
- const auto result = preparedModel->execute({}, {}, {}, {});
+ const auto result = preparedModel->execute({}, {}, {}, {}, {}, {});
// verify result
ASSERT_FALSE(result.has_value());
@@ -278,7 +278,7 @@
.WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
// run test
- const auto result = preparedModel->execute({}, {}, {}, {});
+ const auto result = preparedModel->execute({}, {}, {}, {}, {}, {});
// verify result
ASSERT_FALSE(result.has_value());
@@ -295,7 +295,7 @@
.WillOnce(InvokeWithoutArgs(makeDeadObjectFailure));
// run test
- const auto result = preparedModel->execute({}, {}, {}, {});
+ const auto result = preparedModel->execute({}, {}, {}, {}, {}, {});
// verify result
ASSERT_FALSE(result.has_value());
@@ -314,7 +314,7 @@
EXPECT_CALL(*mockPreparedModel, execute_1_2(_, _, _)).Times(1).WillOnce(InvokeWithoutArgs(ret));
// run test
- const auto result = preparedModel->execute({}, {}, {}, {});
+ const auto result = preparedModel->execute({}, {}, {}, {}, {}, {});
// verify result
ASSERT_FALSE(result.has_value());
@@ -328,7 +328,7 @@
PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/true).value();
// run test
- const auto result = preparedModel->executeFenced({}, {}, {}, {}, {}, {});
+ const auto result = preparedModel->executeFenced({}, {}, {}, {}, {}, {}, {}, {});
// verify result
ASSERT_FALSE(result.has_value());
@@ -347,7 +347,7 @@
Invoke(makeExecuteSynchronously(V1_0::ErrorStatus::NONE, {}, kNoTiming)));
// create execution
- const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {});
ASSERT_TRUE(createResult.has_value())
<< "Failed with " << createResult.error().code << ": " << createResult.error().message;
ASSERT_NE(createResult.value(), nullptr);
@@ -371,7 +371,7 @@
makeExecuteSynchronously(V1_0::ErrorStatus::GENERAL_FAILURE, {}, kNoTiming)));
// create execution
- const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {});
ASSERT_TRUE(createResult.has_value())
<< "Failed with " << createResult.error().code << ": " << createResult.error().message;
ASSERT_NE(createResult.value(), nullptr);
@@ -392,7 +392,7 @@
.WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
// create execution
- const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {});
ASSERT_TRUE(createResult.has_value())
<< "Failed with " << createResult.error().code << ": " << createResult.error().message;
ASSERT_NE(createResult.value(), nullptr);
@@ -413,7 +413,7 @@
.WillOnce(InvokeWithoutArgs(makeDeadObjectFailure));
// create execution
- const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {});
ASSERT_TRUE(createResult.has_value())
<< "Failed with " << createResult.error().code << ": " << createResult.error().message;
ASSERT_NE(createResult.value(), nullptr);
@@ -436,7 +436,7 @@
V1_0::ErrorStatus::NONE, V1_0::ErrorStatus::NONE, {}, kNoTiming)));
// create execution
- const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {});
ASSERT_TRUE(createResult.has_value())
<< "Failed with " << createResult.error().code << ": " << createResult.error().message;
ASSERT_NE(createResult.value(), nullptr);
@@ -461,7 +461,7 @@
kNoTiming)));
// create execution
- const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {});
ASSERT_TRUE(createResult.has_value())
<< "Failed with " << createResult.error().code << ": " << createResult.error().message;
ASSERT_NE(createResult.value(), nullptr);
@@ -483,7 +483,7 @@
V1_0::ErrorStatus::NONE, V1_0::ErrorStatus::GENERAL_FAILURE, {}, kNoTiming)));
// create execution
- const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {});
ASSERT_TRUE(createResult.has_value())
<< "Failed with " << createResult.error().code << ": " << createResult.error().message;
ASSERT_NE(createResult.value(), nullptr);
@@ -504,7 +504,7 @@
.WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
// create execution
- const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {});
ASSERT_TRUE(createResult.has_value())
<< "Failed with " << createResult.error().code << ": " << createResult.error().message;
ASSERT_NE(createResult.value(), nullptr);
@@ -525,7 +525,7 @@
.WillOnce(InvokeWithoutArgs(makeDeadObjectFailure));
// create execution
- const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {});
ASSERT_TRUE(createResult.has_value())
<< "Failed with " << createResult.error().code << ": " << createResult.error().message;
ASSERT_NE(createResult.value(), nullptr);
@@ -548,7 +548,7 @@
EXPECT_CALL(*mockPreparedModel, execute_1_2(_, _, _)).Times(1).WillOnce(InvokeWithoutArgs(ret));
// create execution
- const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {});
ASSERT_TRUE(createResult.has_value())
<< "Failed with " << createResult.error().code << ": " << createResult.error().message;
ASSERT_NE(createResult.value(), nullptr);
@@ -566,7 +566,7 @@
PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/true).value();
// create execution
- const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {});
ASSERT_TRUE(createResult.has_value())
<< "Failed with " << createResult.error().code << ": " << createResult.error().message;
ASSERT_NE(createResult.value(), nullptr);
diff --git a/neuralnetworks/1.3/utils/Android.bp b/neuralnetworks/1.3/utils/Android.bp
index 7acb4fc..c512dda 100644
--- a/neuralnetworks/1.3/utils/Android.bp
+++ b/neuralnetworks/1.3/utils/Android.bp
@@ -31,21 +31,16 @@
export_include_dirs: ["include"],
cflags: ["-Wthread-safety"],
static_libs: [
- "neuralnetworks_types",
- "neuralnetworks_utils_hal_common",
- "neuralnetworks_utils_hal_1_0",
- "neuralnetworks_utils_hal_1_1",
- "neuralnetworks_utils_hal_1_2",
- ],
- shared_libs: [
"android.hardware.neuralnetworks@1.0",
"android.hardware.neuralnetworks@1.1",
"android.hardware.neuralnetworks@1.2",
"android.hardware.neuralnetworks@1.3",
"libfmq",
- ],
- export_static_lib_headers: [
+ "neuralnetworks_types",
"neuralnetworks_utils_hal_common",
+ "neuralnetworks_utils_hal_1_0",
+ "neuralnetworks_utils_hal_1_1",
+ "neuralnetworks_utils_hal_1_2",
],
target: {
host: {
@@ -69,7 +64,6 @@
"android.hardware.neuralnetworks@1.2",
"android.hardware.neuralnetworks@1.3",
"libgmock",
- "libneuralnetworks_common",
"neuralnetworks_types",
"neuralnetworks_utils_hal_common",
"neuralnetworks_utils_hal_1_0",
@@ -78,13 +72,10 @@
"neuralnetworks_utils_hal_1_3",
],
shared_libs: [
- "android.hidl.allocator@1.0",
- "android.hidl.memory@1.0",
"libbase",
"libcutils",
"libfmq",
"libhidlbase",
- "libhidlmemory",
"liblog",
"libutils",
],
diff --git a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Device.h b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Device.h
index c3c6fc4..cf5e5ea0 100644
--- a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Device.h
+++ b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Device.h
@@ -66,8 +66,9 @@
nn::GeneralResult<nn::SharedPreparedModel> prepareModel(
const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority,
nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
- const std::vector<nn::SharedHandle>& dataCache,
- const nn::CacheToken& token) const override;
+ const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token,
+ const std::vector<nn::TokenValuePair>& hints,
+ const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override;
nn::GeneralResult<nn::SharedPreparedModel> prepareModelFromCache(
nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
diff --git a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/PreparedModel.h b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/PreparedModel.h
index 480438d..124cc43 100644
--- a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/PreparedModel.h
+++ b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/PreparedModel.h
@@ -48,18 +48,23 @@
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> execute(
const nn::Request& request, nn::MeasureTiming measure,
- const nn::OptionalTimePoint& deadline,
- const nn::OptionalDuration& loopTimeoutDuration) const override;
+ const nn::OptionalTimePoint& deadline, const nn::OptionalDuration& loopTimeoutDuration,
+ const std::vector<nn::TokenValuePair>& hints,
+ const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override;
nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> executeFenced(
const nn::Request& request, const std::vector<nn::SyncFence>& waitFor,
nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline,
const nn::OptionalDuration& loopTimeoutDuration,
- const nn::OptionalDuration& timeoutDurationAfterFence) const override;
+ const nn::OptionalDuration& timeoutDurationAfterFence,
+ const std::vector<nn::TokenValuePair>& hints,
+ const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override;
nn::GeneralResult<nn::SharedExecution> createReusableExecution(
const nn::Request& request, nn::MeasureTiming measure,
- const nn::OptionalDuration& loopTimeoutDuration) const override;
+ const nn::OptionalDuration& loopTimeoutDuration,
+ const std::vector<nn::TokenValuePair>& hints,
+ const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override;
nn::GeneralResult<nn::SharedBurst> configureExecutionBurst() const override;
diff --git a/neuralnetworks/1.3/utils/src/Conversions.cpp b/neuralnetworks/1.3/utils/src/Conversions.cpp
index a1d414c..09e9d80 100644
--- a/neuralnetworks/1.3/utils/src/Conversions.cpp
+++ b/neuralnetworks/1.3/utils/src/Conversions.cpp
@@ -396,7 +396,7 @@
}
nn::GeneralResult<V1_2::Model::ExtensionNameAndPrefix> unvalidatedConvert(
- const nn::Model::ExtensionNameAndPrefix& extensionNameAndPrefix) {
+ const nn::ExtensionNameAndPrefix& extensionNameAndPrefix) {
return V1_2::utils::unvalidatedConvert(extensionNameAndPrefix);
}
diff --git a/neuralnetworks/1.3/utils/src/Device.cpp b/neuralnetworks/1.3/utils/src/Device.cpp
index 9517fda..824cec6 100644
--- a/neuralnetworks/1.3/utils/src/Device.cpp
+++ b/neuralnetworks/1.3/utils/src/Device.cpp
@@ -187,7 +187,9 @@
nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModel(
const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority,
nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
- const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token) const {
+ const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token,
+ const std::vector<nn::TokenValuePair>& /*hints*/,
+ const std::vector<nn::ExtensionNameAndPrefix>& /*extensionNameToPrefix*/) const {
// Ensure that model is ready for IPC.
std::optional<nn::Model> maybeModelInShared;
const nn::Model& modelInShared =
diff --git a/neuralnetworks/1.3/utils/src/PreparedModel.cpp b/neuralnetworks/1.3/utils/src/PreparedModel.cpp
index ce977e5..b92f877 100644
--- a/neuralnetworks/1.3/utils/src/PreparedModel.cpp
+++ b/neuralnetworks/1.3/utils/src/PreparedModel.cpp
@@ -135,8 +135,9 @@
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> PreparedModel::execute(
const nn::Request& request, nn::MeasureTiming measure,
- const nn::OptionalTimePoint& deadline,
- const nn::OptionalDuration& loopTimeoutDuration) const {
+ const nn::OptionalTimePoint& deadline, const nn::OptionalDuration& loopTimeoutDuration,
+ const std::vector<nn::TokenValuePair>& /*hints*/,
+ const std::vector<nn::ExtensionNameAndPrefix>& /*extensionNameToPrefix*/) const {
// Ensure that request is ready for IPC.
std::optional<nn::Request> maybeRequestInShared;
hal::utils::RequestRelocation relocation;
@@ -174,10 +175,13 @@
}
nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>>
-PreparedModel::executeFenced(const nn::Request& request, const std::vector<nn::SyncFence>& waitFor,
- nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline,
- const nn::OptionalDuration& loopTimeoutDuration,
- const nn::OptionalDuration& timeoutDurationAfterFence) const {
+PreparedModel::executeFenced(
+ const nn::Request& request, const std::vector<nn::SyncFence>& waitFor,
+ nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline,
+ const nn::OptionalDuration& loopTimeoutDuration,
+ const nn::OptionalDuration& timeoutDurationAfterFence,
+ const std::vector<nn::TokenValuePair>& /*hints*/,
+ const std::vector<nn::ExtensionNameAndPrefix>& /*extensionNameToPrefix*/) const {
// Ensure that request is ready for IPC.
std::optional<nn::Request> maybeRequestInShared;
hal::utils::RequestRelocation relocation;
@@ -230,7 +234,9 @@
nn::GeneralResult<nn::SharedExecution> PreparedModel::createReusableExecution(
const nn::Request& request, nn::MeasureTiming measure,
- const nn::OptionalDuration& loopTimeoutDuration) const {
+ const nn::OptionalDuration& loopTimeoutDuration,
+ const std::vector<nn::TokenValuePair>& /*hints*/,
+ const std::vector<nn::ExtensionNameAndPrefix>& /*extensionNameToPrefix*/) const {
// Ensure that request is ready for IPC.
std::optional<nn::Request> maybeRequestInShared;
hal::utils::RequestRelocation relocation;
diff --git a/neuralnetworks/1.3/utils/test/DeviceTest.cpp b/neuralnetworks/1.3/utils/test/DeviceTest.cpp
index 7eba4bc..6f48837 100644
--- a/neuralnetworks/1.3/utils/test/DeviceTest.cpp
+++ b/neuralnetworks/1.3/utils/test/DeviceTest.cpp
@@ -658,7 +658,7 @@
// run test
const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT,
- nn::Priority::DEFAULT, {}, {}, {}, {});
+ nn::Priority::DEFAULT, {}, {}, {}, {}, {}, {});
// verify result
ASSERT_TRUE(result.has_value())
@@ -677,7 +677,7 @@
// run test
const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT,
- nn::Priority::DEFAULT, {}, {}, {}, {});
+ nn::Priority::DEFAULT, {}, {}, {}, {}, {}, {});
// verify result
ASSERT_FALSE(result.has_value());
@@ -695,7 +695,7 @@
// run test
const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT,
- nn::Priority::DEFAULT, {}, {}, {}, {});
+ nn::Priority::DEFAULT, {}, {}, {}, {}, {}, {});
// verify result
ASSERT_FALSE(result.has_value());
@@ -713,7 +713,7 @@
// run test
const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT,
- nn::Priority::DEFAULT, {}, {}, {}, {});
+ nn::Priority::DEFAULT, {}, {}, {}, {}, {}, {});
// verify result
ASSERT_FALSE(result.has_value());
@@ -730,7 +730,7 @@
// run test
const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT,
- nn::Priority::DEFAULT, {}, {}, {}, {});
+ nn::Priority::DEFAULT, {}, {}, {}, {}, {}, {});
// verify result
ASSERT_FALSE(result.has_value());
@@ -747,7 +747,7 @@
// run test
const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT,
- nn::Priority::DEFAULT, {}, {}, {}, {});
+ nn::Priority::DEFAULT, {}, {}, {}, {}, {}, {});
// verify result
ASSERT_FALSE(result.has_value());
@@ -768,7 +768,7 @@
// run test
const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT,
- nn::Priority::DEFAULT, {}, {}, {}, {});
+ nn::Priority::DEFAULT, {}, {}, {}, {}, {}, {});
// verify result
ASSERT_FALSE(result.has_value());
diff --git a/neuralnetworks/1.3/utils/test/PreparedModelTest.cpp b/neuralnetworks/1.3/utils/test/PreparedModelTest.cpp
index 6dbbd6b..51b5d29 100644
--- a/neuralnetworks/1.3/utils/test/PreparedModelTest.cpp
+++ b/neuralnetworks/1.3/utils/test/PreparedModelTest.cpp
@@ -182,7 +182,7 @@
.WillOnce(Invoke(makeExecuteSynchronously(V1_3::ErrorStatus::NONE, {}, kNoTiming)));
// run test
- const auto result = preparedModel->execute({}, {}, {}, {});
+ const auto result = preparedModel->execute({}, {}, {}, {}, {}, {});
// verify result
EXPECT_TRUE(result.has_value())
@@ -200,7 +200,7 @@
makeExecuteSynchronously(V1_3::ErrorStatus::GENERAL_FAILURE, {}, kNoTiming)));
// run test
- const auto result = preparedModel->execute({}, {}, {}, {});
+ const auto result = preparedModel->execute({}, {}, {}, {}, {}, {});
// verify result
ASSERT_FALSE(result.has_value());
@@ -217,7 +217,7 @@
.WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
// run test
- const auto result = preparedModel->execute({}, {}, {}, {});
+ const auto result = preparedModel->execute({}, {}, {}, {}, {}, {});
// verify result
ASSERT_FALSE(result.has_value());
@@ -234,7 +234,7 @@
.WillOnce(InvokeWithoutArgs(makeDeadObjectFailure));
// run test
- const auto result = preparedModel->execute({}, {}, {}, {});
+ const auto result = preparedModel->execute({}, {}, {}, {}, {}, {});
// verify result
ASSERT_FALSE(result.has_value());
@@ -252,7 +252,7 @@
V1_3::ErrorStatus::NONE, {}, kNoTiming)));
// run test
- const auto result = preparedModel->execute({}, {}, {}, {});
+ const auto result = preparedModel->execute({}, {}, {}, {}, {}, {});
// verify result
EXPECT_TRUE(result.has_value())
@@ -271,7 +271,7 @@
kNoTiming)));
// run test
- const auto result = preparedModel->execute({}, {}, {}, {});
+ const auto result = preparedModel->execute({}, {}, {}, {}, {}, {});
// verify result
ASSERT_FALSE(result.has_value());
@@ -289,7 +289,7 @@
V1_3::ErrorStatus::NONE, V1_3::ErrorStatus::GENERAL_FAILURE, {}, kNoTiming)));
// run test
- const auto result = preparedModel->execute({}, {}, {}, {});
+ const auto result = preparedModel->execute({}, {}, {}, {}, {}, {});
// verify result
ASSERT_FALSE(result.has_value());
@@ -306,7 +306,7 @@
.WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
// run test
- const auto result = preparedModel->execute({}, {}, {}, {});
+ const auto result = preparedModel->execute({}, {}, {}, {}, {}, {});
// verify result
ASSERT_FALSE(result.has_value());
@@ -323,7 +323,7 @@
.WillOnce(InvokeWithoutArgs(makeDeadObjectFailure));
// run test
- const auto result = preparedModel->execute({}, {}, {}, {});
+ const auto result = preparedModel->execute({}, {}, {}, {}, {}, {});
// verify result
ASSERT_FALSE(result.has_value());
@@ -344,7 +344,7 @@
.WillOnce(InvokeWithoutArgs(ret));
// run test
- const auto result = preparedModel->execute({}, {}, {}, {});
+ const auto result = preparedModel->execute({}, {}, {}, {}, {}, {});
// verify result
ASSERT_FALSE(result.has_value());
@@ -366,7 +366,7 @@
.WillOnce(Invoke(makeExecuteFencedReturn(V1_3::ErrorStatus::NONE, {}, mockCallback)));
// run test
- const auto result = preparedModel->executeFenced({}, {}, {}, {}, {}, {});
+ const auto result = preparedModel->executeFenced({}, {}, {}, {}, {}, {}, {}, {});
// verify result
ASSERT_TRUE(result.has_value())
@@ -396,7 +396,7 @@
.WillOnce(Invoke(makeExecuteFencedReturn(V1_3::ErrorStatus::NONE, {}, mockCallback)));
// run test
- const auto result = preparedModel->executeFenced({}, {}, {}, {}, {}, {});
+ const auto result = preparedModel->executeFenced({}, {}, {}, {}, {}, {}, {}, {});
// verify result
ASSERT_TRUE(result.has_value())
@@ -422,7 +422,7 @@
makeExecuteFencedReturn(V1_3::ErrorStatus::GENERAL_FAILURE, {}, nullptr)));
// run test
- const auto result = preparedModel->executeFenced({}, {}, {}, {}, {}, {});
+ const auto result = preparedModel->executeFenced({}, {}, {}, {}, {}, {}, {}, {});
// verify result
ASSERT_FALSE(result.has_value());
@@ -439,7 +439,7 @@
.WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
// run test
- const auto result = preparedModel->executeFenced({}, {}, {}, {}, {}, {});
+ const auto result = preparedModel->executeFenced({}, {}, {}, {}, {}, {}, {}, {});
// verify result
ASSERT_FALSE(result.has_value());
@@ -456,7 +456,7 @@
.WillOnce(InvokeWithoutArgs(makeDeadObjectFailure));
// run test
- const auto result = preparedModel->executeFenced({}, {}, {}, {}, {}, {});
+ const auto result = preparedModel->executeFenced({}, {}, {}, {}, {}, {}, {}, {});
// verify result
ASSERT_FALSE(result.has_value());
@@ -475,7 +475,7 @@
Invoke(makeExecuteSynchronously(V1_3::ErrorStatus::NONE, {}, kNoTiming)));
// create execution
- const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {});
ASSERT_TRUE(createResult.has_value())
<< "Failed with " << createResult.error().code << ": " << createResult.error().message;
ASSERT_NE(createResult.value(), nullptr);
@@ -499,7 +499,7 @@
makeExecuteSynchronously(V1_3::ErrorStatus::GENERAL_FAILURE, {}, kNoTiming)));
// create execution
- const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {});
ASSERT_TRUE(createResult.has_value())
<< "Failed with " << createResult.error().code << ": " << createResult.error().message;
ASSERT_NE(createResult.value(), nullptr);
@@ -520,7 +520,7 @@
.WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
// create execution
- const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {});
ASSERT_TRUE(createResult.has_value())
<< "Failed with " << createResult.error().code << ": " << createResult.error().message;
ASSERT_NE(createResult.value(), nullptr);
@@ -541,7 +541,7 @@
.WillOnce(InvokeWithoutArgs(makeDeadObjectFailure));
// create execution
- const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {});
ASSERT_TRUE(createResult.has_value())
<< "Failed with " << createResult.error().code << ": " << createResult.error().message;
ASSERT_NE(createResult.value(), nullptr);
@@ -564,7 +564,7 @@
V1_3::ErrorStatus::NONE, V1_3::ErrorStatus::NONE, {}, kNoTiming)));
// create execution
- const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {});
ASSERT_TRUE(createResult.has_value())
<< "Failed with " << createResult.error().code << ": " << createResult.error().message;
ASSERT_NE(createResult.value(), nullptr);
@@ -589,7 +589,7 @@
kNoTiming)));
// create execution
- const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {});
ASSERT_TRUE(createResult.has_value())
<< "Failed with " << createResult.error().code << ": " << createResult.error().message;
ASSERT_NE(createResult.value(), nullptr);
@@ -611,7 +611,7 @@
V1_3::ErrorStatus::NONE, V1_3::ErrorStatus::GENERAL_FAILURE, {}, kNoTiming)));
// run test
- const auto result = preparedModel->execute({}, {}, {}, {});
+ const auto result = preparedModel->execute({}, {}, {}, {}, {}, {});
// verify result
ASSERT_FALSE(result.has_value());
@@ -628,7 +628,7 @@
.WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
// create execution
- const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {});
ASSERT_TRUE(createResult.has_value())
<< "Failed with " << createResult.error().code << ": " << createResult.error().message;
ASSERT_NE(createResult.value(), nullptr);
@@ -649,7 +649,7 @@
.WillOnce(InvokeWithoutArgs(makeDeadObjectFailure));
// create execution
- const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {});
ASSERT_TRUE(createResult.has_value())
<< "Failed with " << createResult.error().code << ": " << createResult.error().message;
ASSERT_NE(createResult.value(), nullptr);
@@ -674,7 +674,7 @@
.WillOnce(InvokeWithoutArgs(ret));
// create execution
- const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {});
ASSERT_TRUE(createResult.has_value())
<< "Failed with " << createResult.error().code << ": " << createResult.error().message;
ASSERT_NE(createResult.value(), nullptr);
@@ -702,7 +702,7 @@
Invoke(makeExecuteFencedReturn(V1_3::ErrorStatus::NONE, {}, mockCallback)));
// create execution
- const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {});
ASSERT_TRUE(createResult.has_value())
<< "Failed with " << createResult.error().code << ": " << createResult.error().message;
ASSERT_NE(createResult.value(), nullptr);
@@ -738,7 +738,7 @@
.WillOnce(Invoke(makeExecuteFencedReturn(V1_3::ErrorStatus::NONE, {}, mockCallback)));
// create execution
- const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {});
ASSERT_TRUE(createResult.has_value())
<< "Failed with " << createResult.error().code << ": " << createResult.error().message;
ASSERT_NE(createResult.value(), nullptr);
@@ -768,7 +768,7 @@
makeExecuteFencedReturn(V1_3::ErrorStatus::GENERAL_FAILURE, {}, nullptr)));
// create execution
- const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {});
ASSERT_TRUE(createResult.has_value())
<< "Failed with " << createResult.error().code << ": " << createResult.error().message;
ASSERT_NE(createResult.value(), nullptr);
@@ -789,7 +789,7 @@
.WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
// create execution
- const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {});
ASSERT_TRUE(createResult.has_value())
<< "Failed with " << createResult.error().code << ": " << createResult.error().message;
ASSERT_NE(createResult.value(), nullptr);
@@ -810,7 +810,7 @@
.WillOnce(InvokeWithoutArgs(makeDeadObjectFailure));
// create execution
- const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {});
ASSERT_TRUE(createResult.has_value())
<< "Failed with " << createResult.error().code << ": " << createResult.error().message;
ASSERT_NE(createResult.value(), nullptr);
diff --git a/neuralnetworks/aidl/aidl_api/android.hardware.neuralnetworks/current/android/hardware/neuralnetworks/ExecutionConfig.aidl b/neuralnetworks/aidl/aidl_api/android.hardware.neuralnetworks/current/android/hardware/neuralnetworks/ExecutionConfig.aidl
new file mode 100644
index 0000000..cb85743
--- /dev/null
+++ b/neuralnetworks/aidl/aidl_api/android.hardware.neuralnetworks/current/android/hardware/neuralnetworks/ExecutionConfig.aidl
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+///////////////////////////////////////////////////////////////////////////////
+// THIS FILE IS IMMUTABLE. DO NOT EDIT IN ANY CASE. //
+///////////////////////////////////////////////////////////////////////////////
+
+// This file is a snapshot of an AIDL file. Do not edit it manually. There are
+// two cases:
+// 1). this is a frozen version file - do not edit this in any case.
+// 2). this is a 'current' file. If you make a backwards compatible change to
+// the interface (from the latest frozen version), the build system will
+// prompt you to update this file with `m <name>-update-api`.
+//
+// You must not make a backward incompatible change to any AIDL file built
+// with the aidl_interface module type with versions property set. The module
+// type is used to build AIDL files in a way that they can be used across
+// independently updatable components of the system. If a device is shipped
+// with such a backward incompatible change, it has a high risk of breaking
+// later when a module using the interface is updated, e.g., Mainline modules.
+
+package android.hardware.neuralnetworks;
+@VintfStability
+parcelable ExecutionConfig {
+ boolean measureTiming;
+ long loopTimeoutDurationNs;
+ android.hardware.neuralnetworks.TokenValuePair[] executionHints;
+ android.hardware.neuralnetworks.ExtensionNameAndPrefix[] extensionNameToPrefix;
+}
diff --git a/neuralnetworks/aidl/aidl_api/android.hardware.neuralnetworks/current/android/hardware/neuralnetworks/IBurst.aidl b/neuralnetworks/aidl/aidl_api/android.hardware.neuralnetworks/current/android/hardware/neuralnetworks/IBurst.aidl
index eb3d0b0..461fdfa 100644
--- a/neuralnetworks/aidl/aidl_api/android.hardware.neuralnetworks/current/android/hardware/neuralnetworks/IBurst.aidl
+++ b/neuralnetworks/aidl/aidl_api/android.hardware.neuralnetworks/current/android/hardware/neuralnetworks/IBurst.aidl
@@ -36,4 +36,5 @@
interface IBurst {
android.hardware.neuralnetworks.ExecutionResult executeSynchronously(in android.hardware.neuralnetworks.Request request, in long[] memoryIdentifierTokens, in boolean measureTiming, in long deadlineNs, in long loopTimeoutDurationNs);
void releaseMemoryResource(in long memoryIdentifierToken);
+ android.hardware.neuralnetworks.ExecutionResult executeSynchronouslyWithConfig(in android.hardware.neuralnetworks.Request request, in long[] memoryIdentifierTokens, in android.hardware.neuralnetworks.ExecutionConfig config, in long deadlineNs);
}
diff --git a/neuralnetworks/aidl/aidl_api/android.hardware.neuralnetworks/current/android/hardware/neuralnetworks/IDevice.aidl b/neuralnetworks/aidl/aidl_api/android.hardware.neuralnetworks/current/android/hardware/neuralnetworks/IDevice.aidl
index c9c67f2..c0fba47 100644
--- a/neuralnetworks/aidl/aidl_api/android.hardware.neuralnetworks/current/android/hardware/neuralnetworks/IDevice.aidl
+++ b/neuralnetworks/aidl/aidl_api/android.hardware.neuralnetworks/current/android/hardware/neuralnetworks/IDevice.aidl
@@ -43,6 +43,7 @@
String getVersionString();
void prepareModel(in android.hardware.neuralnetworks.Model model, in android.hardware.neuralnetworks.ExecutionPreference preference, in android.hardware.neuralnetworks.Priority priority, in long deadlineNs, in ParcelFileDescriptor[] modelCache, in ParcelFileDescriptor[] dataCache, in byte[] token, in android.hardware.neuralnetworks.IPreparedModelCallback callback);
void prepareModelFromCache(in long deadlineNs, in ParcelFileDescriptor[] modelCache, in ParcelFileDescriptor[] dataCache, in byte[] token, in android.hardware.neuralnetworks.IPreparedModelCallback callback);
+ void prepareModelWithConfig(in android.hardware.neuralnetworks.Model model, in android.hardware.neuralnetworks.PrepareModelConfig config, in android.hardware.neuralnetworks.IPreparedModelCallback callback);
const int BYTE_SIZE_OF_CACHE_TOKEN = 32;
const int MAX_NUMBER_OF_CACHE_FILES = 32;
const int EXTENSION_TYPE_HIGH_BITS_PREFIX = 15;
diff --git a/neuralnetworks/aidl/aidl_api/android.hardware.neuralnetworks/current/android/hardware/neuralnetworks/IPreparedModel.aidl b/neuralnetworks/aidl/aidl_api/android.hardware.neuralnetworks/current/android/hardware/neuralnetworks/IPreparedModel.aidl
index f899567..fb0c372 100644
--- a/neuralnetworks/aidl/aidl_api/android.hardware.neuralnetworks/current/android/hardware/neuralnetworks/IPreparedModel.aidl
+++ b/neuralnetworks/aidl/aidl_api/android.hardware.neuralnetworks/current/android/hardware/neuralnetworks/IPreparedModel.aidl
@@ -37,7 +37,9 @@
android.hardware.neuralnetworks.ExecutionResult executeSynchronously(in android.hardware.neuralnetworks.Request request, in boolean measureTiming, in long deadlineNs, in long loopTimeoutDurationNs);
android.hardware.neuralnetworks.FencedExecutionResult executeFenced(in android.hardware.neuralnetworks.Request request, in ParcelFileDescriptor[] waitFor, in boolean measureTiming, in long deadlineNs, in long loopTimeoutDurationNs, in long durationNs);
android.hardware.neuralnetworks.IBurst configureExecutionBurst();
- android.hardware.neuralnetworks.IExecution createReusableExecution(in android.hardware.neuralnetworks.Request request, in boolean measureTiming, in long loopTimeoutDurationNs);
+ android.hardware.neuralnetworks.IExecution createReusableExecution(in android.hardware.neuralnetworks.Request request, in android.hardware.neuralnetworks.ExecutionConfig config);
+ android.hardware.neuralnetworks.ExecutionResult executeSynchronouslyWithConfig(in android.hardware.neuralnetworks.Request request, in android.hardware.neuralnetworks.ExecutionConfig config, in long deadlineNs);
+ android.hardware.neuralnetworks.FencedExecutionResult executeFencedWithConfig(in android.hardware.neuralnetworks.Request request, in ParcelFileDescriptor[] waitFor, in android.hardware.neuralnetworks.ExecutionConfig config, in long deadlineNs, in long durationNs);
const long DEFAULT_LOOP_TIMEOUT_DURATION_NS = 2000000000;
const long MAXIMUM_LOOP_TIMEOUT_DURATION_NS = 15000000000;
}
diff --git a/neuralnetworks/aidl/aidl_api/android.hardware.neuralnetworks/current/android/hardware/neuralnetworks/PrepareModelConfig.aidl b/neuralnetworks/aidl/aidl_api/android.hardware.neuralnetworks/current/android/hardware/neuralnetworks/PrepareModelConfig.aidl
new file mode 100644
index 0000000..85c924f
--- /dev/null
+++ b/neuralnetworks/aidl/aidl_api/android.hardware.neuralnetworks/current/android/hardware/neuralnetworks/PrepareModelConfig.aidl
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+///////////////////////////////////////////////////////////////////////////////
+// THIS FILE IS IMMUTABLE. DO NOT EDIT IN ANY CASE. //
+///////////////////////////////////////////////////////////////////////////////
+
+// This file is a snapshot of an AIDL file. Do not edit it manually. There are
+// two cases:
+// 1). this is a frozen version file - do not edit this in any case.
+// 2). this is a 'current' file. If you make a backwards compatible change to
+// the interface (from the latest frozen version), the build system will
+// prompt you to update this file with `m <name>-update-api`.
+//
+// You must not make a backward incompatible change to any AIDL file built
+// with the aidl_interface module type with versions property set. The module
+// type is used to build AIDL files in a way that they can be used across
+// independently updatable components of the system. If a device is shipped
+// with such a backward incompatible change, it has a high risk of breaking
+// later when a module using the interface is updated, e.g., Mainline modules.
+
+package android.hardware.neuralnetworks;
+@VintfStability
+parcelable PrepareModelConfig {
+ android.hardware.neuralnetworks.ExecutionPreference preference;
+ android.hardware.neuralnetworks.Priority priority;
+ long deadlineNs;
+ ParcelFileDescriptor[] modelCache;
+ ParcelFileDescriptor[] dataCache;
+ byte[] cacheToken;
+ android.hardware.neuralnetworks.TokenValuePair[] compilationHints;
+ android.hardware.neuralnetworks.ExtensionNameAndPrefix[] extensionNameToPrefix;
+}
diff --git a/neuralnetworks/aidl/aidl_api/android.hardware.neuralnetworks/current/android/hardware/neuralnetworks/TokenValuePair.aidl b/neuralnetworks/aidl/aidl_api/android.hardware.neuralnetworks/current/android/hardware/neuralnetworks/TokenValuePair.aidl
new file mode 100644
index 0000000..e477d6e
--- /dev/null
+++ b/neuralnetworks/aidl/aidl_api/android.hardware.neuralnetworks/current/android/hardware/neuralnetworks/TokenValuePair.aidl
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+///////////////////////////////////////////////////////////////////////////////
+// THIS FILE IS IMMUTABLE. DO NOT EDIT IN ANY CASE. //
+///////////////////////////////////////////////////////////////////////////////
+
+// This file is a snapshot of an AIDL file. Do not edit it manually. There are
+// two cases:
+// 1). this is a frozen version file - do not edit this in any case.
+// 2). this is a 'current' file. If you make a backwards compatible change to
+// the interface (from the latest frozen version), the build system will
+// prompt you to update this file with `m <name>-update-api`.
+//
+// You must not make a backward incompatible change to any AIDL file built
+// with the aidl_interface module type with versions property set. The module
+// type is used to build AIDL files in a way that they can be used across
+// independently updatable components of the system. If a device is shipped
+// with such a backward incompatible change, it has a high risk of breaking
+// later when a module using the interface is updated, e.g., Mainline modules.
+
+package android.hardware.neuralnetworks;
+@VintfStability
+parcelable TokenValuePair {
+ int token;
+ byte[] value;
+}
diff --git a/neuralnetworks/aidl/android/hardware/neuralnetworks/ExecutionConfig.aidl b/neuralnetworks/aidl/android/hardware/neuralnetworks/ExecutionConfig.aidl
new file mode 100644
index 0000000..00f1e11
--- /dev/null
+++ b/neuralnetworks/aidl/android/hardware/neuralnetworks/ExecutionConfig.aidl
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.neuralnetworks;
+
+import android.hardware.neuralnetworks.ExtensionNameAndPrefix;
+import android.hardware.neuralnetworks.TokenValuePair;
+
+/**
+ * A type that is used to represent all configuration related to
+ * an Execution.
+ */
+@VintfStability
+parcelable ExecutionConfig {
+ /**
+ * Specifies whether or not to measure duration of the execution.
+ * For {@link IPreparedModel::executeSynchronouslyWithConfig}, the duration runs from the time
+ * the driver sees the corresponding call to the execute function to the time the driver returns
+ * from the function. For {@link IPreparedModel::executeFencedWithConfig}, please refer to
+ * {@link IPreparedModelCallback} for details.
+ */
+ boolean measureTiming;
+ /**
+ * The maximum amount of time in nanoseconds that should be spent
+ * executing a {@link OperationType::WHILE} operation. If a loop
+ * condition model does not output false within this duration,
+ * the execution must be aborted. If -1 is provided, the maximum
+ * amount of time is {@link DEFAULT_LOOP_TIMEOUT_DURATION_NS}.
+ * Other negative values are invalid. When provided, the duration
+ * must not exceed {@link MAXIMUM_LOOP_TIMEOUT_DURATION_NS}.
+ */
+ long loopTimeoutDurationNs;
+ /**
+ * A vector of token / value pairs represent vendor specific
+ * execution hints or metadata. The provided TokenValuePairs must not
+ * contain the same token twice. The driver must validate the
+ * data and ignore invalid hints. It is up to the driver to
+ * decide whether to respect the provided hints or not.
+ */
+ TokenValuePair[] executionHints;
+ /**
+ * The mapping between extension names and prefixes of token values.
+ * The driver must ignore the corresponding execution hint, if
+ * the extension is not supported.
+ */
+ ExtensionNameAndPrefix[] extensionNameToPrefix;
+}
diff --git a/neuralnetworks/aidl/android/hardware/neuralnetworks/Extension.aidl b/neuralnetworks/aidl/android/hardware/neuralnetworks/Extension.aidl
index 20109bd..9f70a53 100644
--- a/neuralnetworks/aidl/android/hardware/neuralnetworks/Extension.aidl
+++ b/neuralnetworks/aidl/android/hardware/neuralnetworks/Extension.aidl
@@ -20,6 +20,10 @@
/**
* Information about an extension.
+ *
+ * The extension can provide zero or more operation types (which are not enumerated), zero or more
+ * operand types (which are enumerated in {@link Extension::operandTypes}, and compilation and
+ * execution hints (which are not enumerated).
*/
@VintfStability
parcelable Extension {
diff --git a/neuralnetworks/aidl/android/hardware/neuralnetworks/ExtensionNameAndPrefix.aidl b/neuralnetworks/aidl/android/hardware/neuralnetworks/ExtensionNameAndPrefix.aidl
index 29be93f..6c296e0 100644
--- a/neuralnetworks/aidl/android/hardware/neuralnetworks/ExtensionNameAndPrefix.aidl
+++ b/neuralnetworks/aidl/android/hardware/neuralnetworks/ExtensionNameAndPrefix.aidl
@@ -17,7 +17,8 @@
package android.hardware.neuralnetworks;
/**
- * The mapping between extension names and prefixes of operand and operation type values.
+ * The mapping between extension names and prefixes of values like operand and operation type, and
+ * token in {@link TokenValuePair}.
*
* An operand or operation whose numeric type value is above {@link IDevice::OPERAND_TYPE_BASE_MAX}
* or {@link IDevice::OPERATION_TYPE_BASE_MAX} respectively should be interpreted as an extension
diff --git a/neuralnetworks/aidl/android/hardware/neuralnetworks/IBurst.aidl b/neuralnetworks/aidl/android/hardware/neuralnetworks/IBurst.aidl
index b089c49..a05a7fb 100644
--- a/neuralnetworks/aidl/android/hardware/neuralnetworks/IBurst.aidl
+++ b/neuralnetworks/aidl/android/hardware/neuralnetworks/IBurst.aidl
@@ -17,6 +17,7 @@
package android.hardware.neuralnetworks;
import android.hardware.neuralnetworks.ErrorStatus;
+import android.hardware.neuralnetworks.ExecutionConfig;
import android.hardware.neuralnetworks.ExecutionResult;
import android.hardware.neuralnetworks.Request;
@@ -68,6 +69,8 @@
*
* Only a single execution on a given burst object may be active at any time.
*
+ * Also see {@link IBurst::executeSynchronouslyWithConfig}.
+ *
* @param request The input and output information on which the prepared model is to be
* executed.
* @param memoryIdentifierTokens A list of tokens where each token is a non-negative number
@@ -117,4 +120,13 @@
* - INVALID_ARGUMENT if one of the input arguments is invalid
*/
void releaseMemoryResource(in long memoryIdentifierToken);
+
+ /**
+ * For detailed specification, please refer to {@link IBurst::executeSynchronously}. The
+ * difference between the two methods is that executeSynchronouslyWithConfig takes {@link
+ * ExecutionConfig} instead of a list of configuration parameters, and ExecutionConfig contains
+ * more configuration parameters than are passed to executeSynchronously.
+ */
+ ExecutionResult executeSynchronouslyWithConfig(in Request request,
+ in long[] memoryIdentifierTokens, in ExecutionConfig config, in long deadlineNs);
}
diff --git a/neuralnetworks/aidl/android/hardware/neuralnetworks/IDevice.aidl b/neuralnetworks/aidl/android/hardware/neuralnetworks/IDevice.aidl
index 72e2623..821b9fe 100644
--- a/neuralnetworks/aidl/android/hardware/neuralnetworks/IDevice.aidl
+++ b/neuralnetworks/aidl/android/hardware/neuralnetworks/IDevice.aidl
@@ -28,6 +28,7 @@
import android.hardware.neuralnetworks.IPreparedModelParcel;
import android.hardware.neuralnetworks.Model;
import android.hardware.neuralnetworks.NumberOfCacheFiles;
+import android.hardware.neuralnetworks.PrepareModelConfig;
import android.hardware.neuralnetworks.Priority;
/**
@@ -148,7 +149,7 @@
*
* If the device reports that caching is not supported, the user may avoid calling
* IDevice::prepareModelFromCache or providing cache file descriptors to
- * IDevice::prepareModel.
+ * IDevice::prepareModel or IDevice::prepareModelWithConfig.
*
* @return NumberOfCacheFiles structure indicating how many files for model and data cache the
* driver needs to cache a single prepared model. It must be less than or equal to
@@ -302,6 +303,8 @@
*
* Multiple threads may call prepareModel on the same model concurrently.
*
+ * Also see {@link IDevice::prepareModelWithConfig}.
+ *
* @param model The model to be prepared for execution.
* @param preference Indicates the intended execution behavior of a prepared model.
* @param priority The priority of the prepared model relative to other prepared models owned by
@@ -403,17 +406,17 @@
* @param modelCache A vector of file descriptors for the security-sensitive cache. The length
* of the vector must match the numModelCache returned from
* getNumberOfCacheFilesNeeded. The cache file descriptors will be provided in
- * the same order as with prepareModel.
+ * the same order as with prepareModel or prepareModelWithConfig.
* @param dataCache A vector of file descriptors for the constants' cache. The length of the
* vector must match the numDataCache returned from
* getNumberOfCacheFilesNeeded. The cache file descriptors will be provided in
- * the same order as with prepareModel.
+ * the same order as with prepareModel or prepareModelWithConfig.
* @param token A caching token of length BYTE_SIZE_OF_CACHE_TOKEN identifying the prepared
* model. It is the same token provided when saving the cache files with
- * prepareModel. Tokens should be chosen to have a low rate of collision for a
- * particular application. The driver cannot detect a collision; a collision will
- * result in a failed execution or in a successful execution that produces
- * incorrect output values.
+ * prepareModel or prepareModelWithConfig. Tokens should be chosen to have a low
+ * rate of collision for a particular application. The driver cannot detect a
+ * collision; a collision will result in a failed execution or in a successful
+ * execution that produces incorrect output values.
* @param callback A callback object used to return the error status of preparing the model for
* execution and the prepared model if successful, nullptr otherwise. The
* callback object's notify function must be called exactly once, even if the
@@ -429,4 +432,28 @@
void prepareModelFromCache(in long deadlineNs, in ParcelFileDescriptor[] modelCache,
in ParcelFileDescriptor[] dataCache, in byte[] token,
in IPreparedModelCallback callback);
+
+ /**
+ * For detailed specification, please refer to {@link IDevice::prepareModel}. The only
+ * difference between the two methods is that prepareModelWithConfig takes {@link
+ * PrepareModelConfig} instead of standalone configuration parameters, which allows vendor
+ * specific compilation metadata to be passed.
+ *
+ * @param model The model to be prepared for execution.
+ * @param config Configuration parameters to prepare the model.
+ * @param callback A callback object used to return the error status of preparing the model for
+ * execution and the prepared model if successful, nullptr otherwise. The
+ * callback object's notify function must be called exactly once, even if the
+ * model could not be prepared.
+ * @throws ServiceSpecificException with one of the following ErrorStatus values:
+ * - DEVICE_UNAVAILABLE if driver is offline or busy
+ * - GENERAL_FAILURE if there is an unspecified error
+ * - INVALID_ARGUMENT if one of the input arguments related to preparing the model is
+ * invalid
+ * - MISSED_DEADLINE_* if the preparation is aborted because the model cannot be prepared by
+ * the deadline
+ * - RESOURCE_EXHAUSTED_* if the task was aborted by the driver
+ */
+ void prepareModelWithConfig(
+ in Model model, in PrepareModelConfig config, in IPreparedModelCallback callback);
}
diff --git a/neuralnetworks/aidl/android/hardware/neuralnetworks/IPreparedModel.aidl b/neuralnetworks/aidl/android/hardware/neuralnetworks/IPreparedModel.aidl
index 79053e5..949804e 100644
--- a/neuralnetworks/aidl/android/hardware/neuralnetworks/IPreparedModel.aidl
+++ b/neuralnetworks/aidl/android/hardware/neuralnetworks/IPreparedModel.aidl
@@ -18,6 +18,7 @@
import android.hardware.common.NativeHandle;
import android.hardware.neuralnetworks.ErrorStatus;
+import android.hardware.neuralnetworks.ExecutionConfig;
import android.hardware.neuralnetworks.ExecutionResult;
import android.hardware.neuralnetworks.FencedExecutionResult;
import android.hardware.neuralnetworks.IBurst;
@@ -68,6 +69,8 @@
* Any number of calls to the execute* functions, in any combination, may be made concurrently,
* even on the same IPreparedModel object.
*
+ * Also see {@link IPreparedModel::executeSynchronouslyWithConfig}.
+ *
* @param request The input and output information on which the prepared model is to be
* executed.
* @param measure Specifies whether or not to measure duration of the execution. The duration
@@ -134,6 +137,8 @@
* Any number of calls to the execute* functions, in any combination, may be made concurrently,
* even on the same IPreparedModel object.
*
+ * Also see {@link IPreparedModel::executeFencedWithConfig}.
+ *
* @param request The input and output information on which the prepared model is to be
* executed. The outputs in the request must have fully specified dimensions.
* @param waitFor A vector of sync fence file descriptors. Execution must not start until all
@@ -201,15 +206,7 @@
*
* @param request The input and output information on which the prepared model is to be
* executed.
- * @param measure Specifies whether or not to measure duration of the execution.
- * @param loopTimeoutDurationNs The maximum amount of time in nanoseconds that should be spent
- * executing a {@link OperationType::WHILE} operation. If a loop
- * condition model does not output false within this duration, the
- * computation performed on the returned reusable execution object
- * must be aborted. If -1 is provided, the maximum amount
- * of time is {@link DEFAULT_LOOP_TIMEOUT_DURATION_NS}. Other
- * negative values are invalid. When provided, the duration must
- * not exceed {@link MAXIMUM_LOOP_TIMEOUT_DURATION_NS}.
+ * @param config Specifies the execution configuration parameters.
* @return execution An IExecution object representing a reusable execution that has been
* specialized for a fixed request.
* @throws ServiceSpecificException with one of the following ErrorStatus values:
@@ -218,6 +215,64 @@
* - INVALID_ARGUMENT if one of the input arguments is invalid
* - RESOURCE_EXHAUSTED_* if the task was aborted by the driver
*/
- IExecution createReusableExecution(
- in Request request, in boolean measureTiming, in long loopTimeoutDurationNs);
+ IExecution createReusableExecution(in Request request, in ExecutionConfig config);
+
+ /**
+ * For detailed specification, please refer to {@link IPreparedModel::executeSynchronously}. The
+ * difference between the two methods is that executeSynchronouslyWithConfig takes {@link
+ * ExecutionConfig} instead of a list of configuration parameters, and ExecutionConfig contains
+ * more configuration parameters than are passed to executeSynchronously.
+ *
+ * @param request The input and output information on which the prepared model is to be
+ * executed.
+ * @param config Specifies the execution configuration parameters.
+ * @param deadlineNs The time by which the execution is expected to complete. The time is
+ * measured in nanoseconds since boot (as from clock_gettime(CLOCK_BOOTTIME,
+ * &ts) or ::android::base::boot_clock). If the execution cannot be finished
+ * by the deadline, the execution may be aborted. Passing -1 means the
+ * deadline is omitted. Other negative valueggs are invalid.
+ * @return ExecutionResult parcelable, containing the status of the execution, output shapes and
+ * timing information.
+ * - MISSED_DEADLINE_* if the execution is aborted because it cannot be completed by the
+ * deadline
+ * - RESOURCE_EXHAUSTED_* if the task was aborted by the driver
+ */
+ ExecutionResult executeSynchronouslyWithConfig(
+ in Request request, in ExecutionConfig config, in long deadlineNs);
+
+ /**
+ * For detailed specification, please refer to {@link IPreparedModel::executeFenced}. The
+ * difference between the two methods is that executeFencedWithConfig takes {@link
+ * ExecutionConfig} instead of a list of configuration parameters, and ExecutionConfig contains
+ * more configuration parameters than are passed to executeFenced.
+ *
+ * @param request The input and output information on which the prepared model is to be
+ * executed. The outputs in the request must have fully specified dimensions.
+ * @param waitFor A vector of sync fence file descriptors. Execution must not start until all
+ * sync fences have been signaled.
+ * @param config Specifies the execution configuration parameters.
+ * @param deadlineNs The time by which the execution is expected to complete. The time is
+ * measured in nanoseconds since boot (as from clock_gettime(CLOCK_BOOTTIME,
+ * &ts) or ::android::base::boot_clock). If the execution cannot be finished
+ * by the deadline, the execution may be aborted. Passing -1 means the
+ * deadline is omitted. Other negative values are invalid.
+ * @param durationNs The length of time in nanoseconds within which the execution is expected to
+ * complete after all sync fences in waitFor are signaled. If the execution
+ * cannot be finished within the duration, the execution may be aborted.
+ * Passing -1 means the duration is omitted. Other negative values are
+ * invalid.
+ * @return The FencedExecutionResult parcelable, containing IFencedExecutionCallback and the
+ * sync fence.
+ * @throws ServiceSpecificException with one of the following ErrorStatus values:
+ * - DEVICE_UNAVAILABLE if driver is offline or busy
+ * - GENERAL_FAILURE if there is an unspecified error
+ * - INVALID_ARGUMENT if one of the input arguments is invalid, including fences in error
+ * states.
+ * - MISSED_DEADLINE_* if the execution is aborted because it cannot be completed by the
+ * deadline
+ * - RESOURCE_EXHAUSTED_* if the task was aborted by the driver
+ */
+ FencedExecutionResult executeFencedWithConfig(in Request request,
+ in ParcelFileDescriptor[] waitFor, in ExecutionConfig config, in long deadlineNs,
+ in long durationNs);
}
diff --git a/neuralnetworks/aidl/android/hardware/neuralnetworks/PrepareModelConfig.aidl b/neuralnetworks/aidl/android/hardware/neuralnetworks/PrepareModelConfig.aidl
new file mode 100644
index 0000000..96df968
--- /dev/null
+++ b/neuralnetworks/aidl/android/hardware/neuralnetworks/PrepareModelConfig.aidl
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.neuralnetworks;
+
+import android.hardware.neuralnetworks.ExecutionPreference;
+import android.hardware.neuralnetworks.ExtensionNameAndPrefix;
+import android.hardware.neuralnetworks.Priority;
+import android.hardware.neuralnetworks.TokenValuePair;
+
+/**
+ * A type that is used to represent all configuration needed to
+ * prepare a model.
+ */
+@VintfStability
+parcelable PrepareModelConfig {
+ /**
+ * Indicates the intended execution behavior of a prepared model.
+ */
+ ExecutionPreference preference;
+ /**
+ * The priority of the prepared model relative to other prepared
+ * models owned by the client.
+ */
+ Priority priority;
+ /**
+ * The time by which the model is expected to be prepared. The
+ * time is measured in nanoseconds since boot (as from
+ * clock_gettime(CLOCK_BOOTTIME, &ts) or
+ * ::android::base::boot_clock). If the model cannot be prepared
+ * by the deadline, the preparation may be aborted. Passing -1
+ * means the deadline is omitted. Other negative values are
+ * invalid.
+ */
+ long deadlineNs;
+ /**
+ * A vector of file descriptors for the security-sensitive cache.
+ * The length of the vector must either be 0 indicating that
+ * caching information is not provided, or match the
+ * numModelCache returned from IDevice::getNumberOfCacheFilesNeeded. The
+ * cache file descriptors will be provided in the same order when
+ * retrieving the preparedModel from cache files with
+ * IDevice::prepareModelFromCache.
+ */
+ ParcelFileDescriptor[] modelCache;
+ /**
+ * A vector of file descriptors for the constants' cache. The
+ * length of the vector must either be 0 indicating that caching
+ * information is not provided, or match the numDataCache
+ * returned from IDevice::getNumberOfCacheFilesNeeded. The cache file
+ * descriptors will be provided in the same order when retrieving
+ * the preparedModel from cache files with IDevice::prepareModelFromCache.
+ */
+ ParcelFileDescriptor[] dataCache;
+ /**
+ * A caching token of length IDevice::BYTE_SIZE_OF_CACHE_TOKEN identifying
+ * the prepared model. The same token will be provided when
+ * retrieving the prepared model from the cache files with
+ * IDevice::prepareModelFromCache. Tokens should be chosen to have a low
+ * rate of collision for a particular application. The driver
+ * cannot detect a collision; a collision will result in a failed
+ * execution or in a successful execution that produces incorrect
+ * output values. If both modelCache and dataCache are empty
+ * indicating that caching information is not provided, this
+ * token must be ignored.
+ */
+ byte[] cacheToken;
+ /**
+ * A vector of token / value pairs represent vendor specific
+ * compilation hints or metadata. The provided TokenValuePairs must not
+ * contain the same token twice. The driver must validate the
+ * data and ignore invalid hints. It is up to the driver to
+ * decide whether to respect the provided hints or not.
+ */
+ TokenValuePair[] compilationHints;
+ /**
+ * The mapping between extension names and prefixes of token values.
+ * The driver must ignore the corresponding compilation hint, if
+ * the extension is not supported.
+ */
+ ExtensionNameAndPrefix[] extensionNameToPrefix;
+}
diff --git a/neuralnetworks/aidl/android/hardware/neuralnetworks/TokenValuePair.aidl b/neuralnetworks/aidl/android/hardware/neuralnetworks/TokenValuePair.aidl
new file mode 100644
index 0000000..ec665b4
--- /dev/null
+++ b/neuralnetworks/aidl/android/hardware/neuralnetworks/TokenValuePair.aidl
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.neuralnetworks;
+
+/**
+ * A type that is used to represent a token / byte array data pair.
+ */
+@VintfStability
+parcelable TokenValuePair {
+ /**
+ * A 32bit integer token. The token is created by combining the
+ * extension prefix and enum defined within the extension.
+ * The low {@link IDevice::EXTENSION_TYPE_LOW_BITS_TYPE} bits of the value
+ * correspond to the hint within the extension and the high
+ * {@link IDevice::EXTENSION_TYPE_HIGH_BITS_PREFIX} bits encode the "prefix", which maps
+ * uniquely to the extension name. The sign bit is always 0.
+ *
+ * For example, if a token value is 0x7AAA000B and the corresponding
+ * {@link ExtensionNameAndPrefix} contains an entry with prefix=0x7AAA and
+ * name="vendor.test.test_extension", then the token should be interpreted as the hint
+ * 0x000B of the extension named vendor.test.test_extension.
+ */
+ int token;
+ /**
+ * A byte array containing the raw data.
+ */
+ byte[] value;
+}
diff --git a/neuralnetworks/aidl/utils/Android.bp b/neuralnetworks/aidl/utils/Android.bp
index 3faa613..9148eac 100644
--- a/neuralnetworks/aidl/utils/Android.bp
+++ b/neuralnetworks/aidl/utils/Android.bp
@@ -111,19 +111,13 @@
static_libs: [
"libaidlcommonsupport",
"libgmock",
- "libneuralnetworks_common",
"neuralnetworks_types",
"neuralnetworks_utils_hal_common",
],
shared_libs: [
- "android.hidl.allocator@1.0",
"libbase",
"libbinder_ndk",
"libcutils",
- "libhidlbase",
- "libhidlmemory",
- "liblog",
- "libutils",
],
target: {
android: {
diff --git a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Burst.h b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Burst.h
index 0cc78d4..f2e6e75 100644
--- a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Burst.h
+++ b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Burst.h
@@ -86,10 +86,12 @@
GUARDED_BY(mMutex);
};
+ // featureLevel is for testing purposes.
static nn::GeneralResult<std::shared_ptr<const Burst>> create(
- std::shared_ptr<aidl_hal::IBurst> burst);
+ std::shared_ptr<aidl_hal::IBurst> burst, nn::Version featureLevel);
- Burst(PrivateConstructorTag tag, std::shared_ptr<aidl_hal::IBurst> burst);
+ Burst(PrivateConstructorTag tag, std::shared_ptr<aidl_hal::IBurst> burst,
+ nn::Version featureLevel);
// See IBurst::cacheMemory for information.
OptionalCacheHold cacheMemory(const nn::SharedMemory& memory) const override;
@@ -97,23 +99,29 @@
// See IBurst::execute for information.
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> execute(
const nn::Request& request, nn::MeasureTiming measure,
- const nn::OptionalTimePoint& deadline,
- const nn::OptionalDuration& loopTimeoutDuration) const override;
+ const nn::OptionalTimePoint& deadline, const nn::OptionalDuration& loopTimeoutDuration,
+ const std::vector<nn::TokenValuePair>& hints,
+ const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override;
// See IBurst::createReusableExecution for information.
nn::GeneralResult<nn::SharedExecution> createReusableExecution(
const nn::Request& request, nn::MeasureTiming measure,
- const nn::OptionalDuration& loopTimeoutDuration) const override;
+ const nn::OptionalDuration& loopTimeoutDuration,
+ const std::vector<nn::TokenValuePair>& hints,
+ const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override;
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> executeInternal(
const aidl_hal::Request& request, const std::vector<int64_t>& memoryIdentifierTokens,
bool measure, int64_t deadline, int64_t loopTimeoutDuration,
+ const std::vector<nn::TokenValuePair>& hints,
+ const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix,
const hal::utils::RequestRelocation& relocation) const;
private:
mutable std::atomic_flag mExecutionInFlight = ATOMIC_FLAG_INIT;
const std::shared_ptr<aidl_hal::IBurst> kBurst;
const std::shared_ptr<MemoryCache> kMemoryCache;
+ const nn::Version kFeatureLevel;
};
} // namespace aidl::android::hardware::neuralnetworks::utils
diff --git a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Conversions.h b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Conversions.h
index 477b311..af58715 100644
--- a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Conversions.h
+++ b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Conversions.h
@@ -46,6 +46,10 @@
#include <aidl/android/hardware/neuralnetworks/SymmPerChannelQuantParams.h>
#include <aidl/android/hardware/neuralnetworks/Timing.h>
+#ifdef NN_AIDL_V4_OR_ABOVE
+#include <aidl/android/hardware/neuralnetworks/TokenValuePair.h>
+#endif // NN_AIDL_V4_OR_ABOVE
+
#include <android/binder_auto_utils.h>
#include <nnapi/Result.h>
#include <nnapi/Types.h>
@@ -74,7 +78,7 @@
const aidl_hal::SymmPerChannelQuantParams& symmPerChannelQuantParams);
GeneralResult<Operation> unvalidatedConvert(const aidl_hal::Operation& operation);
GeneralResult<Model> unvalidatedConvert(const aidl_hal::Model& model);
-GeneralResult<Model::ExtensionNameAndPrefix> unvalidatedConvert(
+GeneralResult<ExtensionNameAndPrefix> unvalidatedConvert(
const aidl_hal::ExtensionNameAndPrefix& extensionNameAndPrefix);
GeneralResult<Model::OperandValues> unvalidatedConvert(const std::vector<uint8_t>& operandValues);
GeneralResult<Model::Subgraph> unvalidatedConvert(const aidl_hal::Subgraph& subgraph);
@@ -97,6 +101,10 @@
const aidl_hal::ExtensionOperandTypeInformation& operandTypeInformation);
GeneralResult<SharedHandle> unvalidatedConvert(const ndk::ScopedFileDescriptor& handle);
+#ifdef NN_AIDL_V4_OR_ABOVE
+GeneralResult<TokenValuePair> unvalidatedConvert(const aidl_hal::TokenValuePair& tokenValuePair);
+#endif // NN_AIDL_V4_OR_ABOVE
+
GeneralResult<std::vector<Operation>> unvalidatedConvert(
const std::vector<aidl_hal::Operation>& operations);
@@ -116,6 +124,14 @@
GeneralResult<std::vector<Extension>> convert(const std::vector<aidl_hal::Extension>& extension);
GeneralResult<std::vector<SharedMemory>> convert(const std::vector<aidl_hal::Memory>& memories);
+GeneralResult<std::vector<ExtensionNameAndPrefix>> convert(
+ const std::vector<aidl_hal::ExtensionNameAndPrefix>& extensionNameAndPrefix);
+
+#ifdef NN_AIDL_V4_OR_ABOVE
+GeneralResult<std::vector<TokenValuePair>> convert(
+ const std::vector<aidl_hal::TokenValuePair>& metaData);
+#endif // NN_AIDL_V4_OR_ABOVE
+
GeneralResult<std::vector<OutputShape>> convert(
const std::vector<aidl_hal::OutputShape>& outputShapes);
GeneralResult<std::vector<SharedHandle>> convert(
@@ -152,7 +168,7 @@
nn::GeneralResult<std::vector<uint8_t>> unvalidatedConvert(
const nn::Model::OperandValues& operandValues);
nn::GeneralResult<ExtensionNameAndPrefix> unvalidatedConvert(
- const nn::Model::ExtensionNameAndPrefix& extensionNameToPrefix);
+ const nn::ExtensionNameAndPrefix& extensionNameToPrefix);
nn::GeneralResult<Model> unvalidatedConvert(const nn::Model& model);
nn::GeneralResult<Priority> unvalidatedConvert(const nn::Priority& priority);
nn::GeneralResult<Request> unvalidatedConvert(const nn::Request& request);
@@ -166,6 +182,10 @@
nn::GeneralResult<Capabilities> unvalidatedConvert(const nn::Capabilities& capabilities);
nn::GeneralResult<Extension> unvalidatedConvert(const nn::Extension& extension);
+#ifdef NN_AIDL_V4_OR_ABOVE
+nn::GeneralResult<TokenValuePair> unvalidatedConvert(const nn::TokenValuePair& tokenValuePair);
+#endif // NN_AIDL_V4_OR_ABOVE
+
nn::GeneralResult<std::vector<uint8_t>> convert(const nn::CacheToken& cacheToken);
nn::GeneralResult<BufferDesc> convert(const nn::BufferDesc& bufferDesc);
nn::GeneralResult<DeviceType> convert(const nn::DeviceType& deviceType);
@@ -190,6 +210,13 @@
nn::GeneralResult<std::vector<ndk::ScopedFileDescriptor>> convert(
const std::vector<nn::SyncFence>& syncFences);
nn::GeneralResult<std::vector<Extension>> convert(const std::vector<nn::Extension>& extensions);
+nn::GeneralResult<std::vector<ExtensionNameAndPrefix>> convert(
+ const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix);
+
+#ifdef NN_AIDL_V4_OR_ABOVE
+nn::GeneralResult<std::vector<TokenValuePair>> convert(
+ const std::vector<nn::TokenValuePair>& metaData);
+#endif // NN_AIDL_V4_OR_ABOVE
nn::GeneralResult<std::vector<int32_t>> toSigned(const std::vector<uint32_t>& vec);
diff --git a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Device.h b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Device.h
index d558f66..615c6de 100644
--- a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Device.h
+++ b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Device.h
@@ -42,6 +42,7 @@
struct PrivateConstructorTag {};
public:
+ // featureLevel is for testing purposes.
static nn::GeneralResult<std::shared_ptr<const Device>> create(
std::string name, std::shared_ptr<aidl_hal::IDevice> device, nn::Version featureLevel);
@@ -67,8 +68,9 @@
nn::GeneralResult<nn::SharedPreparedModel> prepareModel(
const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority,
nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
- const std::vector<nn::SharedHandle>& dataCache,
- const nn::CacheToken& token) const override;
+ const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token,
+ const std::vector<nn::TokenValuePair>& hints,
+ const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override;
nn::GeneralResult<nn::SharedPreparedModel> prepareModelFromCache(
nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
diff --git a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/HalInterfaces.h b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/HalInterfaces.h
index 205d428..cacdc26 100644
--- a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/HalInterfaces.h
+++ b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/HalInterfaces.h
@@ -63,7 +63,9 @@
#ifdef NN_AIDL_V4_OR_ABOVE
#include <aidl/android/hardware/neuralnetworks/BnExecution.h>
+#include <aidl/android/hardware/neuralnetworks/ExecutionConfig.h>
#include <aidl/android/hardware/neuralnetworks/IExecution.h>
+#include <aidl/android/hardware/neuralnetworks/PrepareModelConfig.h>
#endif // NN_AIDL_V4_OR_ABOVE
namespace android::nn {
diff --git a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/InvalidDevice.h b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/InvalidDevice.h
index e66507a..9375c1d 100644
--- a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/InvalidDevice.h
+++ b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/InvalidDevice.h
@@ -53,6 +53,9 @@
const std::vector<ndk::ScopedFileDescriptor>& dataCache,
const std::vector<uint8_t>& token,
const std::shared_ptr<IPreparedModelCallback>& callback) override;
+ ndk::ScopedAStatus prepareModelWithConfig(
+ const Model& model, const PrepareModelConfig& config,
+ const std::shared_ptr<IPreparedModelCallback>& callback) override;
ndk::ScopedAStatus prepareModelFromCache(
int64_t deadline, const std::vector<ndk::ScopedFileDescriptor>& modelCache,
const std::vector<ndk::ScopedFileDescriptor>& dataCache,
diff --git a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/PreparedModel.h b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/PreparedModel.h
index 24cd681..cb6a85b 100644
--- a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/PreparedModel.h
+++ b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/PreparedModel.h
@@ -40,6 +40,7 @@
struct PrivateConstructorTag {};
public:
+ // featureLevel is for testing purposes.
static nn::GeneralResult<std::shared_ptr<const PreparedModel>> create(
std::shared_ptr<aidl_hal::IPreparedModel> preparedModel, nn::Version featureLevel);
@@ -49,18 +50,23 @@
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> execute(
const nn::Request& request, nn::MeasureTiming measure,
- const nn::OptionalTimePoint& deadline,
- const nn::OptionalDuration& loopTimeoutDuration) const override;
+ const nn::OptionalTimePoint& deadline, const nn::OptionalDuration& loopTimeoutDuration,
+ const std::vector<nn::TokenValuePair>& hints,
+ const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override;
nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> executeFenced(
const nn::Request& request, const std::vector<nn::SyncFence>& waitFor,
nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline,
const nn::OptionalDuration& loopTimeoutDuration,
- const nn::OptionalDuration& timeoutDurationAfterFence) const override;
+ const nn::OptionalDuration& timeoutDurationAfterFence,
+ const std::vector<nn::TokenValuePair>& hints,
+ const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override;
nn::GeneralResult<nn::SharedExecution> createReusableExecution(
const nn::Request& request, nn::MeasureTiming measure,
- const nn::OptionalDuration& loopTimeoutDuration) const override;
+ const nn::OptionalDuration& loopTimeoutDuration,
+ const std::vector<nn::TokenValuePair>& hints,
+ const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override;
nn::GeneralResult<nn::SharedBurst> configureExecutionBurst() const override;
@@ -68,6 +74,8 @@
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> executeInternal(
const Request& request, bool measure, int64_t deadline, int64_t loopTimeoutDuration,
+ const std::vector<nn::TokenValuePair>& hints,
+ const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix,
const hal::utils::RequestRelocation& relocation) const;
nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>>
@@ -75,6 +83,8 @@
const std::vector<ndk::ScopedFileDescriptor>& waitFor, bool measure,
int64_t deadline, int64_t loopTimeoutDuration,
int64_t timeoutDurationAfterFence,
+ const std::vector<nn::TokenValuePair>& hints,
+ const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix,
const hal::utils::RequestRelocation& relocation) const;
private:
diff --git a/neuralnetworks/aidl/utils/src/Burst.cpp b/neuralnetworks/aidl/utils/src/Burst.cpp
index fb00b26..6c7aa88 100644
--- a/neuralnetworks/aidl/utils/src/Burst.cpp
+++ b/neuralnetworks/aidl/utils/src/Burst.cpp
@@ -43,12 +43,16 @@
static nn::GeneralResult<std::shared_ptr<const BurstExecution>> create(
std::shared_ptr<const Burst> burst, Request request,
std::vector<int64_t> memoryIdentifierTokens, bool measure, int64_t loopTimeoutDuration,
+ const std::vector<nn::TokenValuePair>& hints,
+ const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix,
hal::utils::RequestRelocation relocation,
std::vector<Burst::OptionalCacheHold> cacheHolds);
BurstExecution(PrivateConstructorTag tag, std::shared_ptr<const Burst> burst, Request request,
std::vector<int64_t> memoryIdentifierTokens, bool measure,
- int64_t loopTimeoutDuration, hal::utils::RequestRelocation relocation,
+ int64_t loopTimeoutDuration, const std::vector<nn::TokenValuePair>& hints,
+ const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix,
+ hal::utils::RequestRelocation relocation,
std::vector<Burst::OptionalCacheHold> cacheHolds);
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> compute(
@@ -64,6 +68,8 @@
const std::vector<int64_t> kMemoryIdentifierTokens;
const bool kMeasure;
const int64_t kLoopTimeoutDuration;
+ const std::vector<nn::TokenValuePair> kHints;
+ const std::vector<nn::ExtensionNameAndPrefix> kExtensionNameToPrefix;
const hal::utils::RequestRelocation kRelocation;
const std::vector<Burst::OptionalCacheHold> kCacheHolds;
};
@@ -149,17 +155,20 @@
}
nn::GeneralResult<std::shared_ptr<const Burst>> Burst::create(
- std::shared_ptr<aidl_hal::IBurst> burst) {
+ std::shared_ptr<aidl_hal::IBurst> burst, nn::Version featureLevel) {
if (burst == nullptr) {
return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
<< "aidl_hal::utils::Burst::create must have non-null burst";
}
- return std::make_shared<const Burst>(PrivateConstructorTag{}, std::move(burst));
+ return std::make_shared<const Burst>(PrivateConstructorTag{}, std::move(burst), featureLevel);
}
-Burst::Burst(PrivateConstructorTag /*tag*/, std::shared_ptr<aidl_hal::IBurst> burst)
- : kBurst(std::move(burst)), kMemoryCache(std::make_shared<MemoryCache>(kBurst)) {
+Burst::Burst(PrivateConstructorTag /*tag*/, std::shared_ptr<aidl_hal::IBurst> burst,
+ nn::Version featureLevel)
+ : kBurst(std::move(burst)),
+ kMemoryCache(std::make_shared<MemoryCache>(kBurst)),
+ kFeatureLevel(featureLevel) {
CHECK(kBurst != nullptr);
}
@@ -170,8 +179,9 @@
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> Burst::execute(
const nn::Request& request, nn::MeasureTiming measure,
- const nn::OptionalTimePoint& deadline,
- const nn::OptionalDuration& loopTimeoutDuration) const {
+ const nn::OptionalTimePoint& deadline, const nn::OptionalDuration& loopTimeoutDuration,
+ const std::vector<nn::TokenValuePair>& hints,
+ const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const {
// Ensure that request is ready for IPC.
std::optional<nn::Request> maybeRequestInShared;
hal::utils::RequestRelocation relocation;
@@ -200,14 +210,14 @@
memoryIdentifierTokens.push_back(-1);
}
CHECK_EQ(requestInShared.pools.size(), memoryIdentifierTokens.size());
-
return executeInternal(aidlRequest, memoryIdentifierTokens, aidlMeasure, aidlDeadline,
- aidlLoopTimeoutDuration, relocation);
+ aidlLoopTimeoutDuration, hints, extensionNameToPrefix, relocation);
}
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> Burst::executeInternal(
const Request& request, const std::vector<int64_t>& memoryIdentifierTokens, bool measure,
- int64_t deadline, int64_t loopTimeoutDuration,
+ int64_t deadline, int64_t loopTimeoutDuration, const std::vector<nn::TokenValuePair>& hints,
+ const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix,
const hal::utils::RequestRelocation& relocation) const {
// Ensure that at most one execution is in flight at any given time.
const bool alreadyInFlight = mExecutionInFlight.test_and_set();
@@ -221,9 +231,21 @@
}
ExecutionResult executionResult;
- const auto ret = kBurst->executeSynchronously(request, memoryIdentifierTokens, measure,
- deadline, loopTimeoutDuration, &executionResult);
- HANDLE_ASTATUS(ret) << "execute failed";
+ if (kFeatureLevel.level >= nn::Version::Level::FEATURE_LEVEL_8) {
+ auto aidlHints = NN_TRY(convert(hints));
+ auto aidlExtensionPrefix = NN_TRY(convert(extensionNameToPrefix));
+ const auto ret = kBurst->executeSynchronouslyWithConfig(
+ request, memoryIdentifierTokens,
+ {measure, loopTimeoutDuration, std::move(aidlHints),
+ std::move(aidlExtensionPrefix)},
+ deadline, &executionResult);
+ HANDLE_ASTATUS(ret) << "execute failed";
+ } else {
+ const auto ret =
+ kBurst->executeSynchronously(request, memoryIdentifierTokens, measure, deadline,
+ loopTimeoutDuration, &executionResult);
+ HANDLE_ASTATUS(ret) << "execute failed";
+ }
if (!executionResult.outputSufficientSize) {
auto canonicalOutputShapes =
nn::convert(executionResult.outputShapes).value_or(std::vector<nn::OutputShape>{});
@@ -241,7 +263,9 @@
nn::GeneralResult<nn::SharedExecution> Burst::createReusableExecution(
const nn::Request& request, nn::MeasureTiming measure,
- const nn::OptionalDuration& loopTimeoutDuration) const {
+ const nn::OptionalDuration& loopTimeoutDuration,
+ const std::vector<nn::TokenValuePair>& hints,
+ const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const {
// Ensure that request is ready for IPC.
std::optional<nn::Request> maybeRequestInShared;
hal::utils::RequestRelocation relocation;
@@ -272,12 +296,15 @@
return BurstExecution::create(shared_from_this(), std::move(aidlRequest),
std::move(memoryIdentifierTokens), aidlMeasure,
- aidlLoopTimeoutDuration, std::move(relocation), std::move(holds));
+ aidlLoopTimeoutDuration, hints, extensionNameToPrefix,
+ std::move(relocation), std::move(holds));
}
nn::GeneralResult<std::shared_ptr<const BurstExecution>> BurstExecution::create(
std::shared_ptr<const Burst> burst, Request request,
std::vector<int64_t> memoryIdentifierTokens, bool measure, int64_t loopTimeoutDuration,
+ const std::vector<nn::TokenValuePair>& hints,
+ const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix,
hal::utils::RequestRelocation relocation,
std::vector<Burst::OptionalCacheHold> cacheHolds) {
if (burst == nullptr) {
@@ -286,13 +313,15 @@
return std::make_shared<const BurstExecution>(
PrivateConstructorTag{}, std::move(burst), std::move(request),
- std::move(memoryIdentifierTokens), measure, loopTimeoutDuration, std::move(relocation),
- std::move(cacheHolds));
+ std::move(memoryIdentifierTokens), measure, loopTimeoutDuration, hints,
+ extensionNameToPrefix, std::move(relocation), std::move(cacheHolds));
}
BurstExecution::BurstExecution(PrivateConstructorTag /*tag*/, std::shared_ptr<const Burst> burst,
Request request, std::vector<int64_t> memoryIdentifierTokens,
bool measure, int64_t loopTimeoutDuration,
+ const std::vector<nn::TokenValuePair>& hints,
+ const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix,
hal::utils::RequestRelocation relocation,
std::vector<Burst::OptionalCacheHold> cacheHolds)
: kBurst(std::move(burst)),
@@ -300,6 +329,8 @@
kMemoryIdentifierTokens(std::move(memoryIdentifierTokens)),
kMeasure(measure),
kLoopTimeoutDuration(loopTimeoutDuration),
+ kHints(hints),
+ kExtensionNameToPrefix(extensionNameToPrefix),
kRelocation(std::move(relocation)),
kCacheHolds(std::move(cacheHolds)) {}
@@ -307,7 +338,8 @@
const nn::OptionalTimePoint& deadline) const {
const auto aidlDeadline = NN_TRY(convert(deadline));
return kBurst->executeInternal(kRequest, kMemoryIdentifierTokens, kMeasure, aidlDeadline,
- kLoopTimeoutDuration, kRelocation);
+ kLoopTimeoutDuration, kHints, kExtensionNameToPrefix,
+ kRelocation);
}
nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>>
diff --git a/neuralnetworks/aidl/utils/src/Conversions.cpp b/neuralnetworks/aidl/utils/src/Conversions.cpp
index 113d2da..eb28db7 100644
--- a/neuralnetworks/aidl/utils/src/Conversions.cpp
+++ b/neuralnetworks/aidl/utils/src/Conversions.cpp
@@ -302,9 +302,9 @@
};
}
-GeneralResult<Model::ExtensionNameAndPrefix> unvalidatedConvert(
+GeneralResult<ExtensionNameAndPrefix> unvalidatedConvert(
const aidl_hal::ExtensionNameAndPrefix& extensionNameAndPrefix) {
- return Model::ExtensionNameAndPrefix{
+ return ExtensionNameAndPrefix{
.name = extensionNameAndPrefix.name,
.prefix = extensionNameAndPrefix.prefix,
};
@@ -506,6 +506,12 @@
return std::make_shared<const Handle>(std::move(duplicatedFd));
}
+#ifdef NN_AIDL_V4_OR_ABOVE
+GeneralResult<TokenValuePair> unvalidatedConvert(const aidl_hal::TokenValuePair& tokenValuePair) {
+ return TokenValuePair{.token = tokenValuePair.token, .value = tokenValuePair.value};
+}
+#endif // NN_AIDL_V4_OR_ABOVE
+
GeneralResult<Capabilities> convert(const aidl_hal::Capabilities& capabilities) {
return validatedConvert(capabilities);
}
@@ -562,6 +568,17 @@
GeneralResult<std::vector<SharedMemory>> convert(const std::vector<aidl_hal::Memory>& memories) {
return validatedConvert(memories);
}
+GeneralResult<std::vector<ExtensionNameAndPrefix>> convert(
+ const std::vector<aidl_hal::ExtensionNameAndPrefix>& extensionNameAndPrefix) {
+ return unvalidatedConvert(extensionNameAndPrefix);
+}
+
+#ifdef NN_AIDL_V4_OR_ABOVE
+GeneralResult<std::vector<TokenValuePair>> convert(
+ const std::vector<aidl_hal::TokenValuePair>& metaData) {
+ return validatedConvert(metaData);
+}
+#endif // NN_AIDL_V4_OR_ABOVE
GeneralResult<std::vector<OutputShape>> convert(
const std::vector<aidl_hal::OutputShape>& outputShapes) {
@@ -942,7 +959,7 @@
}
nn::GeneralResult<ExtensionNameAndPrefix> unvalidatedConvert(
- const nn::Model::ExtensionNameAndPrefix& extensionNameToPrefix) {
+ const nn::ExtensionNameAndPrefix& extensionNameToPrefix) {
return ExtensionNameAndPrefix{
.name = extensionNameToPrefix.name,
.prefix = extensionNameToPrefix.prefix,
@@ -1055,6 +1072,11 @@
return Extension{.name = extension.name,
.operandTypes = NN_TRY(unvalidatedConvert(extension.operandTypes))};
}
+#ifdef NN_AIDL_V4_OR_ABOVE
+nn::GeneralResult<TokenValuePair> unvalidatedConvert(const nn::TokenValuePair& tokenValuePair) {
+ return TokenValuePair{.token = tokenValuePair.token, .value = tokenValuePair.value};
+}
+#endif // NN_AIDL_V4_OR_ABOVE
nn::GeneralResult<std::vector<uint8_t>> convert(const nn::CacheToken& cacheToken) {
return validatedConvert(cacheToken);
@@ -1134,6 +1156,17 @@
const std::vector<nn::SyncFence>& syncFences) {
return validatedConvert(syncFences);
}
+nn::GeneralResult<std::vector<ExtensionNameAndPrefix>> convert(
+ const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) {
+ return unvalidatedConvert(extensionNameToPrefix);
+}
+
+#ifdef NN_AIDL_V4_OR_ABOVE
+nn::GeneralResult<std::vector<TokenValuePair>> convert(
+ const std::vector<nn::TokenValuePair>& metaData) {
+ return validatedConvert(metaData);
+}
+#endif // NN_AIDL_V4_OR_ABOVE
nn::GeneralResult<std::vector<Extension>> convert(const std::vector<nn::Extension>& extensions) {
return validatedConvert(extensions);
diff --git a/neuralnetworks/aidl/utils/src/Device.cpp b/neuralnetworks/aidl/utils/src/Device.cpp
index bad10ed..f3f4fdb 100644
--- a/neuralnetworks/aidl/utils/src/Device.cpp
+++ b/neuralnetworks/aidl/utils/src/Device.cpp
@@ -215,7 +215,9 @@
nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModel(
const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority,
nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
- const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token) const {
+ const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token,
+ const std::vector<nn::TokenValuePair>& hints,
+ const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const {
// Ensure that model is ready for IPC.
std::optional<nn::Model> maybeModelInShared;
const nn::Model& modelInShared =
@@ -225,17 +227,28 @@
const auto aidlPreference = NN_TRY(convert(preference));
const auto aidlPriority = NN_TRY(convert(priority));
const auto aidlDeadline = NN_TRY(convert(deadline));
- const auto aidlModelCache = NN_TRY(convert(modelCache));
- const auto aidlDataCache = NN_TRY(convert(dataCache));
+ auto aidlModelCache = NN_TRY(convert(modelCache));
+ auto aidlDataCache = NN_TRY(convert(dataCache));
const auto aidlToken = NN_TRY(convert(token));
const auto cb = ndk::SharedRefBase::make<PreparedModelCallback>(kFeatureLevel);
const auto scoped = kDeathHandler.protectCallback(cb.get());
+ if (kFeatureLevel.level >= nn::Version::Level::FEATURE_LEVEL_8) {
+ auto aidlHints = NN_TRY(convert(hints));
+ auto aidlExtensionPrefix = NN_TRY(convert(extensionNameToPrefix));
+ const auto ret = kDevice->prepareModelWithConfig(
+ aidlModel,
+ {aidlPreference, aidlPriority, aidlDeadline, std::move(aidlModelCache),
+ std::move(aidlDataCache), aidlToken, std::move(aidlHints),
+ std::move(aidlExtensionPrefix)},
+ cb);
+ HANDLE_ASTATUS(ret) << "prepareModel failed";
+ return cb->get();
+ }
const auto ret = kDevice->prepareModel(aidlModel, aidlPreference, aidlPriority, aidlDeadline,
aidlModelCache, aidlDataCache, aidlToken, cb);
HANDLE_ASTATUS(ret) << "prepareModel failed";
-
return cb->get();
}
diff --git a/neuralnetworks/aidl/utils/src/Execution.cpp b/neuralnetworks/aidl/utils/src/Execution.cpp
index c4add63..2fd88af 100644
--- a/neuralnetworks/aidl/utils/src/Execution.cpp
+++ b/neuralnetworks/aidl/utils/src/Execution.cpp
@@ -63,7 +63,7 @@
ExecutionWithCachedRequest::compute(const nn::OptionalTimePoint& deadline) const {
const auto aidlDeadline = NN_TRY(convert(deadline));
return kPreparedModel->executeInternal(kRequest, kMeasure, aidlDeadline, kLoopTimeoutDuration,
- kRelocation);
+ {}, {}, kRelocation);
}
nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>>
@@ -73,9 +73,9 @@
const auto aidlWaitFor = NN_TRY(convert(waitFor));
const auto aidlDeadline = NN_TRY(convert(deadline));
const auto aidlTimeoutDurationAfterFence = NN_TRY(convert(timeoutDurationAfterFence));
- return kPreparedModel->executeFencedInternal(kRequest, aidlWaitFor, kMeasure, aidlDeadline,
- kLoopTimeoutDuration,
- aidlTimeoutDurationAfterFence, kRelocation);
+ return kPreparedModel->executeFencedInternal(
+ kRequest, aidlWaitFor, kMeasure, aidlDeadline, kLoopTimeoutDuration,
+ aidlTimeoutDurationAfterFence, {}, {}, kRelocation);
}
nn::GeneralResult<std::shared_ptr<const Execution>> Execution::create(
diff --git a/neuralnetworks/aidl/utils/src/InvalidDevice.cpp b/neuralnetworks/aidl/utils/src/InvalidDevice.cpp
index c9d9955..33270ff 100644
--- a/neuralnetworks/aidl/utils/src/InvalidDevice.cpp
+++ b/neuralnetworks/aidl/utils/src/InvalidDevice.cpp
@@ -167,6 +167,31 @@
return ndk::ScopedAStatus::ok();
}
+ndk::ScopedAStatus InvalidDevice::prepareModelWithConfig(
+ const Model& model, const PrepareModelConfig& config,
+ const std::shared_ptr<IPreparedModelCallback>& callback) {
+ if (!utils::valid(config.extensionNameToPrefix)) {
+ callback->notify(ErrorStatus::INVALID_ARGUMENT, nullptr);
+ return toAStatus(ErrorStatus::INVALID_ARGUMENT, "Invalid extensionNameToPrefix");
+ }
+ for (const auto& hint : config.compilationHints) {
+ auto result = std::find_if(config.extensionNameToPrefix.begin(),
+ config.extensionNameToPrefix.end(),
+ [&hint](const ExtensionNameAndPrefix& extension) {
+ uint16_t prefix = static_cast<uint32_t>(hint.token) >>
+ IDevice::EXTENSION_TYPE_LOW_BITS_TYPE;
+ return prefix == extension.prefix;
+ });
+ if (result == config.extensionNameToPrefix.end()) {
+ callback->notify(ErrorStatus::INVALID_ARGUMENT, nullptr);
+ return toAStatus(ErrorStatus::INVALID_ARGUMENT,
+ "Invalid token for compilation hints: " + std::to_string(hint.token));
+ }
+ }
+ return prepareModel(model, config.preference, config.priority, config.deadlineNs,
+ config.modelCache, config.dataCache, config.cacheToken, callback);
+}
+
ndk::ScopedAStatus InvalidDevice::prepareModelFromCache(
int64_t /*deadline*/, const std::vector<ndk::ScopedFileDescriptor>& /*modelCache*/,
const std::vector<ndk::ScopedFileDescriptor>& /*dataCache*/,
diff --git a/neuralnetworks/aidl/utils/src/PreparedModel.cpp b/neuralnetworks/aidl/utils/src/PreparedModel.cpp
index 6d1de56..7e3a31c 100644
--- a/neuralnetworks/aidl/utils/src/PreparedModel.cpp
+++ b/neuralnetworks/aidl/utils/src/PreparedModel.cpp
@@ -128,8 +128,9 @@
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> PreparedModel::execute(
const nn::Request& request, nn::MeasureTiming measure,
- const nn::OptionalTimePoint& deadline,
- const nn::OptionalDuration& loopTimeoutDuration) const {
+ const nn::OptionalTimePoint& deadline, const nn::OptionalDuration& loopTimeoutDuration,
+ const std::vector<nn::TokenValuePair>& hints,
+ const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const {
// Ensure that request is ready for IPC.
std::optional<nn::Request> maybeRequestInShared;
hal::utils::RequestRelocation relocation;
@@ -141,30 +142,46 @@
const auto aidlMeasure = NN_TRY(convert(measure));
const auto aidlDeadline = NN_TRY(convert(deadline));
const auto aidlLoopTimeoutDuration = NN_TRY(convert(loopTimeoutDuration));
- return executeInternal(aidlRequest, aidlMeasure, aidlDeadline, aidlLoopTimeoutDuration,
- relocation);
+ return executeInternal(aidlRequest, aidlMeasure, aidlDeadline, aidlLoopTimeoutDuration, hints,
+ extensionNameToPrefix, relocation);
}
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
PreparedModel::executeInternal(const Request& request, bool measure, int64_t deadline,
int64_t loopTimeoutDuration,
+ const std::vector<nn::TokenValuePair>& hints,
+ const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix,
const hal::utils::RequestRelocation& relocation) const {
if (relocation.input) {
relocation.input->flush();
}
ExecutionResult executionResult;
- const auto ret = kPreparedModel->executeSynchronously(request, measure, deadline,
- loopTimeoutDuration, &executionResult);
- HANDLE_ASTATUS(ret) << "executeSynchronously failed";
+ if (kFeatureLevel.level >= nn::Version::Level::FEATURE_LEVEL_8) {
+ auto aidlHints = NN_TRY(convert(hints));
+ auto aidlExtensionPrefix = NN_TRY(convert(extensionNameToPrefix));
+ const auto ret = kPreparedModel->executeSynchronouslyWithConfig(
+ request,
+ {measure, loopTimeoutDuration, std::move(aidlHints),
+ std::move(aidlExtensionPrefix)},
+ deadline, &executionResult);
+ HANDLE_ASTATUS(ret) << "executeSynchronouslyWithConfig failed";
+ } else {
+ const auto ret = kPreparedModel->executeSynchronously(
+ request, measure, deadline, loopTimeoutDuration, &executionResult);
+ HANDLE_ASTATUS(ret) << "executeSynchronously failed";
+ }
return handleExecutionResult(executionResult, relocation);
}
nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>>
-PreparedModel::executeFenced(const nn::Request& request, const std::vector<nn::SyncFence>& waitFor,
- nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline,
- const nn::OptionalDuration& loopTimeoutDuration,
- const nn::OptionalDuration& timeoutDurationAfterFence) const {
+PreparedModel::executeFenced(
+ const nn::Request& request, const std::vector<nn::SyncFence>& waitFor,
+ nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline,
+ const nn::OptionalDuration& loopTimeoutDuration,
+ const nn::OptionalDuration& timeoutDurationAfterFence,
+ const std::vector<nn::TokenValuePair>& hints,
+ const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const {
// Ensure that request is ready for IPC.
std::optional<nn::Request> maybeRequestInShared;
hal::utils::RequestRelocation relocation;
@@ -179,31 +196,45 @@
const auto aidlLoopTimeoutDuration = NN_TRY(convert(loopTimeoutDuration));
const auto aidlTimeoutDurationAfterFence = NN_TRY(convert(timeoutDurationAfterFence));
return executeFencedInternal(aidlRequest, aidlWaitFor, aidlMeasure, aidlDeadline,
- aidlLoopTimeoutDuration, aidlTimeoutDurationAfterFence,
- relocation);
+ aidlLoopTimeoutDuration, aidlTimeoutDurationAfterFence, hints,
+ extensionNameToPrefix, relocation);
}
nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>>
-PreparedModel::executeFencedInternal(const Request& request,
- const std::vector<ndk::ScopedFileDescriptor>& waitFor,
- bool measure, int64_t deadline, int64_t loopTimeoutDuration,
- int64_t timeoutDurationAfterFence,
- const hal::utils::RequestRelocation& relocation) const {
+PreparedModel::executeFencedInternal(
+ const Request& request, const std::vector<ndk::ScopedFileDescriptor>& waitFor, bool measure,
+ int64_t deadline, int64_t loopTimeoutDuration, int64_t timeoutDurationAfterFence,
+ const std::vector<nn::TokenValuePair>& hints,
+ const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix,
+ const hal::utils::RequestRelocation& relocation) const {
if (relocation.input) {
relocation.input->flush();
}
FencedExecutionResult result;
- const auto ret =
- kPreparedModel->executeFenced(request, waitFor, measure, deadline, loopTimeoutDuration,
- timeoutDurationAfterFence, &result);
- HANDLE_ASTATUS(ret) << "executeFenced failed";
+ if (kFeatureLevel.level >= nn::Version::Level::FEATURE_LEVEL_8) {
+ auto aidlHints = NN_TRY(convert(hints));
+ auto aidlExtensionPrefix = NN_TRY(convert(extensionNameToPrefix));
+ const auto ret = kPreparedModel->executeFencedWithConfig(
+ request, waitFor,
+ {measure, loopTimeoutDuration, std::move(aidlHints),
+ std::move(aidlExtensionPrefix)},
+ deadline, timeoutDurationAfterFence, &result);
+ HANDLE_ASTATUS(ret) << "executeFencedWithConfig failed";
+ } else {
+ const auto ret = kPreparedModel->executeFenced(request, waitFor, measure, deadline,
+ loopTimeoutDuration,
+ timeoutDurationAfterFence, &result);
+ HANDLE_ASTATUS(ret) << "executeFenced failed";
+ }
return handleFencedExecutionResult(result, relocation);
}
nn::GeneralResult<nn::SharedExecution> PreparedModel::createReusableExecution(
const nn::Request& request, nn::MeasureTiming measure,
- const nn::OptionalDuration& loopTimeoutDuration) const {
+ const nn::OptionalDuration& loopTimeoutDuration,
+ const std::vector<nn::TokenValuePair>& hints,
+ const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const {
// Ensure that request is ready for IPC.
std::optional<nn::Request> maybeRequestInShared;
hal::utils::RequestRelocation relocation;
@@ -217,8 +248,14 @@
if (kFeatureLevel.level >= nn::Version::Level::FEATURE_LEVEL_8) {
std::shared_ptr<IExecution> execution;
+ auto aidlHints = NN_TRY(convert(hints));
+ auto aidlExtensionPrefix = NN_TRY(convert(extensionNameToPrefix));
+
const auto ret = kPreparedModel->createReusableExecution(
- aidlRequest, aidlMeasure, aidlLoopTimeoutDuration, &execution);
+ aidlRequest,
+ {aidlMeasure, aidlLoopTimeoutDuration, std::move(aidlHints),
+ std::move(aidlExtensionPrefix)},
+ &execution);
HANDLE_ASTATUS(ret) << "createReusableExecution failed";
return Execution::create(std::move(execution), std::move(relocation));
}
@@ -232,7 +269,7 @@
std::shared_ptr<IBurst> burst;
const auto ret = kPreparedModel->configureExecutionBurst(&burst);
HANDLE_ASTATUS(ret) << "configureExecutionBurst failed";
- return Burst::create(std::move(burst));
+ return Burst::create(std::move(burst), kFeatureLevel);
}
std::any PreparedModel::getUnderlyingResource() const {
diff --git a/neuralnetworks/aidl/utils/test/DeviceTest.cpp b/neuralnetworks/aidl/utils/test/DeviceTest.cpp
index fb13af8..73727b3 100644
--- a/neuralnetworks/aidl/utils/test/DeviceTest.cpp
+++ b/neuralnetworks/aidl/utils/test/DeviceTest.cpp
@@ -61,7 +61,6 @@
.powerUsage = std::numeric_limits<float>::max()};
constexpr NumberOfCacheFiles kNumberOfCacheFiles = {.numModelCache = nn::kMaxNumberOfCacheFiles - 1,
.numDataCache = nn::kMaxNumberOfCacheFiles};
-
constexpr auto makeStatusOk = [] { return ndk::ScopedAStatus::ok(); };
std::shared_ptr<MockDevice> createMockDevice() {
@@ -124,6 +123,18 @@
};
}
+const std::vector<nn::TokenValuePair> kHints = {nn::TokenValuePair{.token = 0, .value = {1}}};
+const std::vector<nn::ExtensionNameAndPrefix> kExtensionNameToPrefix = {
+ nn::ExtensionNameAndPrefix{.name = "com.android.nn_test", .prefix = 1}};
+auto makePreparedModelWithConfigReturn(ErrorStatus launchStatus, ErrorStatus returnStatus,
+ const std::shared_ptr<MockPreparedModel>& preparedModel) {
+ return [launchStatus, returnStatus, preparedModel](
+ const Model& /*model*/, const PrepareModelConfig& /*config*/,
+ const std::shared_ptr<IPreparedModelCallback>& cb) -> ndk::ScopedAStatus {
+ return makePreparedModelReturnImpl(launchStatus, returnStatus, preparedModel, cb);
+ };
+}
+
auto makePreparedModelFromCacheReturn(ErrorStatus launchStatus, ErrorStatus returnStatus,
const std::shared_ptr<MockPreparedModel>& preparedModel) {
return [launchStatus, returnStatus, preparedModel](
@@ -560,6 +571,8 @@
}
TEST_P(DeviceTest, prepareModel) {
+ if (kVersion.level > nn::Version::Level::FEATURE_LEVEL_7) return;
+
// setup call
const auto mockDevice = createMockDevice();
const auto device = Device::create(kName, mockDevice, kVersion).value();
@@ -571,7 +584,7 @@
// run test
const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT,
- nn::Priority::DEFAULT, {}, {}, {}, {});
+ nn::Priority::DEFAULT, {}, {}, {}, {}, {}, {});
// verify result
ASSERT_TRUE(result.has_value())
@@ -580,6 +593,8 @@
}
TEST_P(DeviceTest, prepareModelLaunchError) {
+ if (kVersion.level > nn::Version::Level::FEATURE_LEVEL_7) return;
+
// setup call
const auto mockDevice = createMockDevice();
const auto device = Device::create(kName, mockDevice, kVersion).value();
@@ -590,7 +605,7 @@
// run test
const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT,
- nn::Priority::DEFAULT, {}, {}, {}, {});
+ nn::Priority::DEFAULT, {}, {}, {}, {}, {}, {});
// verify result
ASSERT_FALSE(result.has_value());
@@ -598,6 +613,8 @@
}
TEST_P(DeviceTest, prepareModelReturnError) {
+ if (kVersion.level > nn::Version::Level::FEATURE_LEVEL_7) return;
+
// setup call
const auto mockDevice = createMockDevice();
const auto device = Device::create(kName, mockDevice, kVersion).value();
@@ -608,7 +625,7 @@
// run test
const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT,
- nn::Priority::DEFAULT, {}, {}, {}, {});
+ nn::Priority::DEFAULT, {}, {}, {}, {}, {}, {});
// verify result
ASSERT_FALSE(result.has_value());
@@ -616,6 +633,8 @@
}
TEST_P(DeviceTest, prepareModelNullptrError) {
+ if (kVersion.level > nn::Version::Level::FEATURE_LEVEL_7) return;
+
// setup call
const auto mockDevice = createMockDevice();
const auto device = Device::create(kName, mockDevice, kVersion).value();
@@ -626,7 +645,7 @@
// run test
const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT,
- nn::Priority::DEFAULT, {}, {}, {}, {});
+ nn::Priority::DEFAULT, {}, {}, {}, {}, {}, {});
// verify result
ASSERT_FALSE(result.has_value());
@@ -634,6 +653,8 @@
}
TEST_P(DeviceTest, prepareModelTransportFailure) {
+ if (kVersion.level > nn::Version::Level::FEATURE_LEVEL_7) return;
+
// setup call
const auto mockDevice = createMockDevice();
const auto device = Device::create(kName, mockDevice, kVersion).value();
@@ -643,7 +664,7 @@
// run test
const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT,
- nn::Priority::DEFAULT, {}, {}, {}, {});
+ nn::Priority::DEFAULT, {}, {}, {}, {}, {}, {});
// verify result
ASSERT_FALSE(result.has_value());
@@ -651,6 +672,8 @@
}
TEST_P(DeviceTest, prepareModelDeadObject) {
+ if (kVersion.level > nn::Version::Level::FEATURE_LEVEL_7) return;
+
// setup call
const auto mockDevice = createMockDevice();
const auto device = Device::create(kName, mockDevice, kVersion).value();
@@ -660,7 +683,7 @@
// run test
const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT,
- nn::Priority::DEFAULT, {}, {}, {}, {});
+ nn::Priority::DEFAULT, {}, {}, {}, {}, {}, {});
// verify result
ASSERT_FALSE(result.has_value());
@@ -668,6 +691,8 @@
}
TEST_P(DeviceTest, prepareModelAsyncCrash) {
+ if (kVersion.level > nn::Version::Level::FEATURE_LEVEL_7) return;
+
// setup test
const auto mockDevice = createMockDevice();
const auto device = Device::create(kName, mockDevice, kVersion).value();
@@ -681,7 +706,157 @@
// run test
const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT,
- nn::Priority::DEFAULT, {}, {}, {}, {});
+ nn::Priority::DEFAULT, {}, {}, {}, {}, {}, {});
+
+ // verify result
+ ASSERT_FALSE(result.has_value());
+ EXPECT_EQ(result.error().code, nn::ErrorStatus::DEAD_OBJECT);
+}
+
+TEST_P(DeviceTest, prepareModelWithConfig) {
+ if (kVersion.level < nn::Version::Level::FEATURE_LEVEL_8) return;
+
+ // setup call
+ const auto mockDevice = createMockDevice();
+ const auto device = Device::create(kName, mockDevice, kVersion).value();
+ const auto mockPreparedModel = MockPreparedModel::create();
+ EXPECT_CALL(*mockDevice, prepareModelWithConfig(_, _, _))
+ .Times(1)
+ .WillOnce(Invoke(makePreparedModelWithConfigReturn(ErrorStatus::NONE, ErrorStatus::NONE,
+ mockPreparedModel)));
+
+ // run test
+ const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT,
+ nn::Priority::DEFAULT, {}, {}, {}, {}, kHints,
+ kExtensionNameToPrefix);
+
+ // verify result
+ ASSERT_TRUE(result.has_value())
+ << "Failed with " << result.error().code << ": " << result.error().message;
+ EXPECT_NE(result.value(), nullptr);
+}
+
+TEST_P(DeviceTest, prepareModelWithConfigLaunchError) {
+ if (kVersion.level < nn::Version::Level::FEATURE_LEVEL_8) return;
+
+ // setup call
+ const auto mockDevice = createMockDevice();
+ const auto device = Device::create(kName, mockDevice, kVersion).value();
+ EXPECT_CALL(*mockDevice, prepareModelWithConfig(_, _, _))
+ .Times(1)
+ .WillOnce(Invoke(makePreparedModelWithConfigReturn(
+ ErrorStatus::GENERAL_FAILURE, ErrorStatus::GENERAL_FAILURE, nullptr)));
+
+ // run test
+ const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT,
+ nn::Priority::DEFAULT, {}, {}, {}, {}, kHints,
+ kExtensionNameToPrefix);
+
+ // verify result
+ ASSERT_FALSE(result.has_value());
+ EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE);
+}
+
+TEST_P(DeviceTest, prepareModelWithConfigReturnError) {
+ if (kVersion.level < nn::Version::Level::FEATURE_LEVEL_8) return;
+
+ // setup call
+ const auto mockDevice = createMockDevice();
+ const auto device = Device::create(kName, mockDevice, kVersion).value();
+ EXPECT_CALL(*mockDevice, prepareModelWithConfig(_, _, _))
+ .Times(1)
+ .WillOnce(Invoke(makePreparedModelWithConfigReturn(
+ ErrorStatus::NONE, ErrorStatus::GENERAL_FAILURE, nullptr)));
+
+ // run test
+ const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT,
+ nn::Priority::DEFAULT, {}, {}, {}, {}, kHints,
+ kExtensionNameToPrefix);
+
+ // verify result
+ ASSERT_FALSE(result.has_value());
+ EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE);
+}
+
+TEST_P(DeviceTest, prepareModelWithConfigNullptrError) {
+ if (kVersion.level < nn::Version::Level::FEATURE_LEVEL_8) return;
+
+ // setup call
+ const auto mockDevice = createMockDevice();
+ const auto device = Device::create(kName, mockDevice, kVersion).value();
+ EXPECT_CALL(*mockDevice, prepareModelWithConfig(_, _, _))
+ .Times(1)
+ .WillOnce(Invoke(makePreparedModelWithConfigReturn(ErrorStatus::NONE, ErrorStatus::NONE,
+ nullptr)));
+
+ // run test
+ const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT,
+ nn::Priority::DEFAULT, {}, {}, {}, {}, kHints,
+ kExtensionNameToPrefix);
+
+ // verify result
+ ASSERT_FALSE(result.has_value());
+ EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE);
+}
+
+TEST_P(DeviceTest, prepareModelWithConfigTransportFailure) {
+ if (kVersion.level < nn::Version::Level::FEATURE_LEVEL_8) return;
+
+ // setup call
+ const auto mockDevice = createMockDevice();
+ const auto device = Device::create(kName, mockDevice, kVersion).value();
+ EXPECT_CALL(*mockDevice, prepareModelWithConfig(_, _, _))
+ .Times(1)
+ .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
+
+ // run test
+ const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT,
+ nn::Priority::DEFAULT, {}, {}, {}, {}, kHints,
+ kExtensionNameToPrefix);
+
+ // verify result
+ ASSERT_FALSE(result.has_value());
+ EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE);
+}
+
+TEST_P(DeviceTest, prepareModelWithConfigDeadObject) {
+ if (kVersion.level < nn::Version::Level::FEATURE_LEVEL_8) return;
+
+ // setup call
+ const auto mockDevice = createMockDevice();
+ const auto device = Device::create(kName, mockDevice, kVersion).value();
+ EXPECT_CALL(*mockDevice, prepareModelWithConfig(_, _, _))
+ .Times(1)
+ .WillOnce(InvokeWithoutArgs(makeDeadObjectFailure));
+
+ // run test
+ const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT,
+ nn::Priority::DEFAULT, {}, {}, {}, {}, kHints,
+ kExtensionNameToPrefix);
+
+ // verify result
+ ASSERT_FALSE(result.has_value());
+ EXPECT_EQ(result.error().code, nn::ErrorStatus::DEAD_OBJECT);
+}
+
+TEST_P(DeviceTest, prepareModelWithConfigAsyncCrash) {
+ if (kVersion.level < nn::Version::Level::FEATURE_LEVEL_8) return;
+
+ // setup test
+ const auto mockDevice = createMockDevice();
+ const auto device = Device::create(kName, mockDevice, kVersion).value();
+ const auto ret = [&device]() {
+ DeathMonitor::serviceDied(device->getDeathMonitor());
+ return ndk::ScopedAStatus::ok();
+ };
+ EXPECT_CALL(*mockDevice, prepareModelWithConfig(_, _, _))
+ .Times(1)
+ .WillOnce(InvokeWithoutArgs(ret));
+
+ // run test
+ const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT,
+ nn::Priority::DEFAULT, {}, {}, {}, {}, kHints,
+ kExtensionNameToPrefix);
// verify result
ASSERT_FALSE(result.has_value());
diff --git a/neuralnetworks/aidl/utils/test/MockBuffer.h b/neuralnetworks/aidl/utils/test/MockBuffer.h
index f77fa86..7a05a0f 100644
--- a/neuralnetworks/aidl/utils/test/MockBuffer.h
+++ b/neuralnetworks/aidl/utils/test/MockBuffer.h
@@ -21,7 +21,6 @@
#include <android/binder_interface_utils.h>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
-#include <hidl/Status.h>
namespace aidl::android::hardware::neuralnetworks::utils {
diff --git a/neuralnetworks/aidl/utils/test/MockBurst.h b/neuralnetworks/aidl/utils/test/MockBurst.h
index 5083bbd..609bd30 100644
--- a/neuralnetworks/aidl/utils/test/MockBurst.h
+++ b/neuralnetworks/aidl/utils/test/MockBurst.h
@@ -21,7 +21,6 @@
#include <android/binder_interface_utils.h>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
-#include <hidl/Status.h>
namespace aidl::android::hardware::neuralnetworks::utils {
@@ -32,6 +31,10 @@
bool measureTiming, int64_t deadline, int64_t loopTimeoutDuration,
ExecutionResult* executionResult),
(override));
+ MOCK_METHOD(ndk::ScopedAStatus, executeSynchronouslyWithConfig,
+ (const Request& request, const std::vector<int64_t>& memoryIdentifierTokens,
+ const ExecutionConfig& config, int64_t deadline, ExecutionResult* executionResult),
+ (override));
MOCK_METHOD(ndk::ScopedAStatus, releaseMemoryResource, (int64_t memoryIdentifierToken),
(override));
};
diff --git a/neuralnetworks/aidl/utils/test/MockDevice.h b/neuralnetworks/aidl/utils/test/MockDevice.h
index 3a28d55..47b8346 100644
--- a/neuralnetworks/aidl/utils/test/MockDevice.h
+++ b/neuralnetworks/aidl/utils/test/MockDevice.h
@@ -50,6 +50,10 @@
const std::vector<uint8_t>& token,
const std::shared_ptr<IPreparedModelCallback>& callback),
(override));
+ MOCK_METHOD(ndk::ScopedAStatus, prepareModelWithConfig,
+ (const Model& model, const PrepareModelConfig& config,
+ const std::shared_ptr<IPreparedModelCallback>& callback),
+ (override));
MOCK_METHOD(ndk::ScopedAStatus, prepareModelFromCache,
(int64_t deadline, const std::vector<ndk::ScopedFileDescriptor>& modelCache,
const std::vector<ndk::ScopedFileDescriptor>& dataCache,
diff --git a/neuralnetworks/aidl/utils/test/MockExecution.h b/neuralnetworks/aidl/utils/test/MockExecution.h
index 216f569..782e54f 100644
--- a/neuralnetworks/aidl/utils/test/MockExecution.h
+++ b/neuralnetworks/aidl/utils/test/MockExecution.h
@@ -21,8 +21,6 @@
#include <android/binder_interface_utils.h>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
-#include <hidl/HidlSupport.h>
-#include <hidl/Status.h>
namespace aidl::android::hardware::neuralnetworks::utils {
diff --git a/neuralnetworks/aidl/utils/test/MockFencedExecutionCallback.h b/neuralnetworks/aidl/utils/test/MockFencedExecutionCallback.h
index 06f9ea2..29449bb 100644
--- a/neuralnetworks/aidl/utils/test/MockFencedExecutionCallback.h
+++ b/neuralnetworks/aidl/utils/test/MockFencedExecutionCallback.h
@@ -22,7 +22,6 @@
#include <android/binder_interface_utils.h>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
-#include <hidl/Status.h>
namespace aidl::android::hardware::neuralnetworks::utils {
diff --git a/neuralnetworks/aidl/utils/test/MockPreparedModel.h b/neuralnetworks/aidl/utils/test/MockPreparedModel.h
index 0ed9af9..a5b3b66 100644
--- a/neuralnetworks/aidl/utils/test/MockPreparedModel.h
+++ b/neuralnetworks/aidl/utils/test/MockPreparedModel.h
@@ -22,8 +22,6 @@
#include <android/binder_interface_utils.h>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
-#include <hidl/HidlSupport.h>
-#include <hidl/Status.h>
namespace aidl::android::hardware::neuralnetworks::utils {
@@ -40,10 +38,19 @@
bool measureTiming, int64_t deadline, int64_t loopTimeoutDuration,
int64_t duration, FencedExecutionResult* fencedExecutionResult),
(override));
+ MOCK_METHOD(ndk::ScopedAStatus, executeSynchronouslyWithConfig,
+ (const Request& request, const ExecutionConfig& config, int64_t deadline,
+ ExecutionResult* executionResult),
+ (override));
+ MOCK_METHOD(ndk::ScopedAStatus, executeFencedWithConfig,
+ (const Request& request, const std::vector<ndk::ScopedFileDescriptor>& waitFor,
+ const ExecutionConfig& config, int64_t deadline, int64_t duration,
+ FencedExecutionResult* fencedExecutionResult),
+ (override));
MOCK_METHOD(ndk::ScopedAStatus, configureExecutionBurst, (std::shared_ptr<IBurst> * burst),
(override));
MOCK_METHOD(ndk::ScopedAStatus, createReusableExecution,
- (const Request& request, bool measureTiming, int64_t loopTimeoutDuration,
+ (const Request& request, const ExecutionConfig& config,
std::shared_ptr<IExecution>* execution),
(override));
};
diff --git a/neuralnetworks/aidl/utils/test/PreparedModelTest.cpp b/neuralnetworks/aidl/utils/test/PreparedModelTest.cpp
index 8cfb7c1..bf6136d 100644
--- a/neuralnetworks/aidl/utils/test/PreparedModelTest.cpp
+++ b/neuralnetworks/aidl/utils/test/PreparedModelTest.cpp
@@ -70,6 +70,21 @@
class PreparedModelTest : public VersionedAidlUtilsTestBase {};
+const std::vector<nn::TokenValuePair> kHints = {nn::TokenValuePair{.token = 0, .value = {1}}};
+const std::vector<nn::ExtensionNameAndPrefix> kExtensionNameToPrefix = {
+ nn::ExtensionNameAndPrefix{.name = "com.android.nn_test", .prefix = 1}};
+auto makeFencedExecutionWithConfigResult(
+ const std::shared_ptr<MockFencedExecutionCallback>& callback) {
+ return [callback](const Request& /*request*/,
+ const std::vector<ndk::ScopedFileDescriptor>& /*waitFor*/,
+ const ExecutionConfig& /*config*/, int64_t /*deadline*/, int64_t /*duration*/,
+ FencedExecutionResult* fencedExecutionResult) {
+ *fencedExecutionResult = FencedExecutionResult{.callback = callback,
+ .syncFence = ndk::ScopedFileDescriptor(-1)};
+ return ndk::ScopedAStatus::ok();
+ };
+}
+
} // namespace
TEST_P(PreparedModelTest, invalidPreparedModel) {
@@ -82,6 +97,8 @@
}
TEST_P(PreparedModelTest, executeSync) {
+ if (kVersion.level >= nn::Version::Level::FEATURE_LEVEL_8) return;
+
// setup call
const auto mockPreparedModel = MockPreparedModel::create();
const auto preparedModel = PreparedModel::create(mockPreparedModel, kVersion).value();
@@ -96,7 +113,7 @@
DoAll(SetArgPointee<4>(mockExecutionResult), InvokeWithoutArgs(makeStatusOk)));
// run test
- const auto result = preparedModel->execute({}, {}, {}, {});
+ const auto result = preparedModel->execute({}, {}, {}, {}, {}, {});
// verify result
EXPECT_TRUE(result.has_value())
@@ -104,6 +121,8 @@
}
TEST_P(PreparedModelTest, executeSyncError) {
+ if (kVersion.level >= nn::Version::Level::FEATURE_LEVEL_8) return;
+
// setup test
const auto mockPreparedModel = MockPreparedModel::create();
const auto preparedModel = PreparedModel::create(mockPreparedModel, kVersion).value();
@@ -112,7 +131,7 @@
.WillOnce(Invoke(makeGeneralFailure));
// run test
- const auto result = preparedModel->execute({}, {}, {}, {});
+ const auto result = preparedModel->execute({}, {}, {}, {}, {}, {});
// verify result
ASSERT_FALSE(result.has_value());
@@ -120,6 +139,8 @@
}
TEST_P(PreparedModelTest, executeSyncTransportFailure) {
+ if (kVersion.level >= nn::Version::Level::FEATURE_LEVEL_8) return;
+
// setup test
const auto mockPreparedModel = MockPreparedModel::create();
const auto preparedModel = PreparedModel::create(mockPreparedModel, kVersion).value();
@@ -128,7 +149,7 @@
.WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
// run test
- const auto result = preparedModel->execute({}, {}, {}, {});
+ const auto result = preparedModel->execute({}, {}, {}, {}, {}, {});
// verify result
ASSERT_FALSE(result.has_value());
@@ -136,6 +157,8 @@
}
TEST_P(PreparedModelTest, executeSyncDeadObject) {
+ if (kVersion.level >= nn::Version::Level::FEATURE_LEVEL_8) return;
+
// setup test
const auto mockPreparedModel = MockPreparedModel::create();
const auto preparedModel = PreparedModel::create(mockPreparedModel, kVersion).value();
@@ -144,7 +167,7 @@
.WillOnce(InvokeWithoutArgs(makeDeadObjectFailure));
// run test
- const auto result = preparedModel->execute({}, {}, {}, {});
+ const auto result = preparedModel->execute({}, {}, {}, {}, {}, {});
// verify result
ASSERT_FALSE(result.has_value());
@@ -152,6 +175,8 @@
}
TEST_P(PreparedModelTest, executeFenced) {
+ if (kVersion.level >= nn::Version::Level::FEATURE_LEVEL_8) return;
+
// setup call
const auto mockPreparedModel = MockPreparedModel::create();
const auto preparedModel = PreparedModel::create(mockPreparedModel, kVersion).value();
@@ -165,7 +190,7 @@
.WillOnce(Invoke(makeFencedExecutionResult(mockCallback)));
// run test
- const auto result = preparedModel->executeFenced({}, {}, {}, {}, {}, {});
+ const auto result = preparedModel->executeFenced({}, {}, {}, {}, {}, {}, {}, {});
// verify result
ASSERT_TRUE(result.has_value())
@@ -181,6 +206,8 @@
}
TEST_P(PreparedModelTest, executeFencedCallbackError) {
+ if (kVersion.level >= nn::Version::Level::FEATURE_LEVEL_8) return;
+
// setup call
const auto mockPreparedModel = MockPreparedModel::create();
const auto preparedModel = PreparedModel::create(mockPreparedModel, kVersion).value();
@@ -195,7 +222,7 @@
.WillOnce(Invoke(makeFencedExecutionResult(mockCallback)));
// run test
- const auto result = preparedModel->executeFenced({}, {}, {}, {}, {}, {});
+ const auto result = preparedModel->executeFenced({}, {}, {}, {}, {}, {}, {}, {});
// verify result
ASSERT_TRUE(result.has_value())
@@ -211,6 +238,8 @@
}
TEST_P(PreparedModelTest, executeFencedError) {
+ if (kVersion.level >= nn::Version::Level::FEATURE_LEVEL_8) return;
+
// setup test
const auto mockPreparedModel = MockPreparedModel::create();
const auto preparedModel = PreparedModel::create(mockPreparedModel, kVersion).value();
@@ -219,7 +248,7 @@
.WillOnce(InvokeWithoutArgs(makeGeneralFailure));
// run test
- const auto result = preparedModel->executeFenced({}, {}, {}, {}, {}, {});
+ const auto result = preparedModel->executeFenced({}, {}, {}, {}, {}, {}, {}, {});
// verify result
ASSERT_FALSE(result.has_value());
@@ -227,6 +256,8 @@
}
TEST_P(PreparedModelTest, executeFencedTransportFailure) {
+ if (kVersion.level >= nn::Version::Level::FEATURE_LEVEL_8) return;
+
// setup test
const auto mockPreparedModel = MockPreparedModel::create();
const auto preparedModel = PreparedModel::create(mockPreparedModel, kVersion).value();
@@ -235,7 +266,7 @@
.WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
// run test
- const auto result = preparedModel->executeFenced({}, {}, {}, {}, {}, {});
+ const auto result = preparedModel->executeFenced({}, {}, {}, {}, {}, {}, {}, {});
// verify result
ASSERT_FALSE(result.has_value());
@@ -243,6 +274,8 @@
}
TEST_P(PreparedModelTest, executeFencedDeadObject) {
+ if (kVersion.level >= nn::Version::Level::FEATURE_LEVEL_8) return;
+
// setup test
const auto mockPreparedModel = MockPreparedModel::create();
const auto preparedModel = PreparedModel::create(mockPreparedModel, kVersion).value();
@@ -251,7 +284,7 @@
.WillOnce(InvokeWithoutArgs(makeDeadObjectFailure));
// run test
- const auto result = preparedModel->executeFenced({}, {}, {}, {}, {}, {});
+ const auto result = preparedModel->executeFenced({}, {}, {}, {}, {}, {}, {}, {});
// verify result
ASSERT_FALSE(result.has_value());
@@ -276,7 +309,7 @@
DoAll(SetArgPointee<4>(mockExecutionResult), InvokeWithoutArgs(makeStatusOk)));
// create execution
- const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {});
ASSERT_TRUE(createResult.has_value())
<< "Failed with " << createResult.error().code << ": " << createResult.error().message;
ASSERT_NE(createResult.value(), nullptr);
@@ -300,7 +333,7 @@
.WillOnce(Invoke(makeGeneralFailure));
// create execution
- const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {});
ASSERT_TRUE(createResult.has_value())
<< "Failed with " << createResult.error().code << ": " << createResult.error().message;
ASSERT_NE(createResult.value(), nullptr);
@@ -322,7 +355,7 @@
.WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
// create execution
- const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {});
ASSERT_TRUE(createResult.has_value())
<< "Failed with " << createResult.error().code << ": " << createResult.error().message;
ASSERT_NE(createResult.value(), nullptr);
@@ -344,7 +377,7 @@
.WillOnce(InvokeWithoutArgs(makeDeadObjectFailure));
// create execution
- const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {});
ASSERT_TRUE(createResult.has_value())
<< "Failed with " << createResult.error().code << ": " << createResult.error().message;
ASSERT_NE(createResult.value(), nullptr);
@@ -372,7 +405,7 @@
.WillRepeatedly(Invoke(makeFencedExecutionResult(mockCallback)));
// create execution
- const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {});
ASSERT_TRUE(createResult.has_value())
<< "Failed with " << createResult.error().code << ": " << createResult.error().message;
ASSERT_NE(createResult.value(), nullptr);
@@ -410,7 +443,7 @@
.WillOnce(Invoke(makeFencedExecutionResult(mockCallback)));
// create execution
- const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {});
ASSERT_TRUE(createResult.has_value())
<< "Failed with " << createResult.error().code << ": " << createResult.error().message;
ASSERT_NE(createResult.value(), nullptr);
@@ -440,7 +473,7 @@
.WillOnce(InvokeWithoutArgs(makeGeneralFailure));
// create execution
- const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {});
ASSERT_TRUE(createResult.has_value())
<< "Failed with " << createResult.error().code << ": " << createResult.error().message;
ASSERT_NE(createResult.value(), nullptr);
@@ -462,7 +495,7 @@
.WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
// create execution
- const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {});
ASSERT_TRUE(createResult.has_value())
<< "Failed with " << createResult.error().code << ": " << createResult.error().message;
ASSERT_NE(createResult.value(), nullptr);
@@ -484,7 +517,7 @@
.WillOnce(InvokeWithoutArgs(makeDeadObjectFailure));
// create execution
- const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {});
ASSERT_TRUE(createResult.has_value())
<< "Failed with " << createResult.error().code << ": " << createResult.error().message;
ASSERT_NE(createResult.value(), nullptr);
@@ -495,6 +528,206 @@
EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::DEAD_OBJECT);
}
+TEST_P(PreparedModelTest, executeSyncWithConfig) {
+ if (kVersion.level < nn::Version::Level::FEATURE_LEVEL_8) return;
+
+ // setup call
+ const auto mockPreparedModel = MockPreparedModel::create();
+ const auto preparedModel = PreparedModel::create(mockPreparedModel, kVersion).value();
+ const auto mockExecutionResult = ExecutionResult{
+ .outputSufficientSize = true,
+ .outputShapes = {},
+ .timing = kNoTiming,
+ };
+ EXPECT_CALL(*mockPreparedModel, executeSynchronouslyWithConfig(_, _, _, _))
+ .Times(1)
+ .WillOnce(
+ DoAll(SetArgPointee<3>(mockExecutionResult), InvokeWithoutArgs(makeStatusOk)));
+
+ // run test
+ const auto result = preparedModel->execute({}, {}, {}, {}, kHints, kExtensionNameToPrefix);
+
+ // verify result
+ EXPECT_TRUE(result.has_value())
+ << "Failed with " << result.error().code << ": " << result.error().message;
+}
+
+TEST_P(PreparedModelTest, executeSyncWithConfigError) {
+ if (kVersion.level < nn::Version::Level::FEATURE_LEVEL_8) return;
+
+ // setup test
+ const auto mockPreparedModel = MockPreparedModel::create();
+ const auto preparedModel = PreparedModel::create(mockPreparedModel, kVersion).value();
+ EXPECT_CALL(*mockPreparedModel, executeSynchronouslyWithConfig(_, _, _, _))
+ .Times(1)
+ .WillOnce(Invoke(makeGeneralFailure));
+
+ // run test
+ const auto result = preparedModel->execute({}, {}, {}, {}, kHints, kExtensionNameToPrefix);
+
+ // verify result
+ ASSERT_FALSE(result.has_value());
+ EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE);
+}
+
+TEST_P(PreparedModelTest, executeSyncWithConfigTransportFailure) {
+ if (kVersion.level < nn::Version::Level::FEATURE_LEVEL_8) return;
+
+ // setup test
+ const auto mockPreparedModel = MockPreparedModel::create();
+ const auto preparedModel = PreparedModel::create(mockPreparedModel, kVersion).value();
+ EXPECT_CALL(*mockPreparedModel, executeSynchronouslyWithConfig(_, _, _, _))
+ .Times(1)
+ .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
+
+ // run test
+ const auto result = preparedModel->execute({}, {}, {}, {}, kHints, kExtensionNameToPrefix);
+
+ // verify result
+ ASSERT_FALSE(result.has_value());
+ EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE);
+}
+
+TEST_P(PreparedModelTest, executeSyncWithConfigDeadObject) {
+ if (kVersion.level < nn::Version::Level::FEATURE_LEVEL_8) return;
+
+ // setup test
+ const auto mockPreparedModel = MockPreparedModel::create();
+ const auto preparedModel = PreparedModel::create(mockPreparedModel, kVersion).value();
+ EXPECT_CALL(*mockPreparedModel, executeSynchronouslyWithConfig(_, _, _, _))
+ .Times(1)
+ .WillOnce(InvokeWithoutArgs(makeDeadObjectFailure));
+
+ // run test
+ const auto result = preparedModel->execute({}, {}, {}, {}, kHints, kExtensionNameToPrefix);
+
+ // verify result
+ ASSERT_FALSE(result.has_value());
+ EXPECT_EQ(result.error().code, nn::ErrorStatus::DEAD_OBJECT);
+}
+
+TEST_P(PreparedModelTest, executeFencedWithConfig) {
+ if (kVersion.level < nn::Version::Level::FEATURE_LEVEL_8) return;
+
+ // setup call
+ const auto mockPreparedModel = MockPreparedModel::create();
+ const auto preparedModel = PreparedModel::create(mockPreparedModel, kVersion).value();
+ const auto mockCallback = MockFencedExecutionCallback::create();
+ EXPECT_CALL(*mockCallback, getExecutionInfo(_, _, _))
+ .Times(1)
+ .WillOnce(DoAll(SetArgPointee<0>(kNoTiming), SetArgPointee<1>(kNoTiming),
+ SetArgPointee<2>(ErrorStatus::NONE), Invoke(makeStatusOk)));
+ EXPECT_CALL(*mockPreparedModel, executeFencedWithConfig(_, _, _, _, _, _))
+ .Times(1)
+ .WillOnce(Invoke(makeFencedExecutionWithConfigResult(mockCallback)));
+
+ // run test
+ const auto result =
+ preparedModel->executeFenced({}, {}, {}, {}, {}, {}, kHints, kExtensionNameToPrefix);
+
+ // verify result
+ ASSERT_TRUE(result.has_value())
+ << "Failed with " << result.error().code << ": " << result.error().message;
+ const auto& [syncFence, callback] = result.value();
+ EXPECT_EQ(syncFence.syncWait({}), nn::SyncFence::FenceState::SIGNALED);
+ ASSERT_NE(callback, nullptr);
+
+ // get results from callback
+ const auto callbackResult = callback();
+ ASSERT_TRUE(callbackResult.has_value()) << "Failed with " << callbackResult.error().code << ": "
+ << callbackResult.error().message;
+}
+
+TEST_P(PreparedModelTest, executeFencedWithConfigCallbackError) {
+ if (kVersion.level < nn::Version::Level::FEATURE_LEVEL_8) return;
+
+ // setup call
+ const auto mockPreparedModel = MockPreparedModel::create();
+ const auto preparedModel = PreparedModel::create(mockPreparedModel, kVersion).value();
+ const auto mockCallback = MockFencedExecutionCallback::create();
+ EXPECT_CALL(*mockCallback, getExecutionInfo(_, _, _))
+ .Times(1)
+ .WillOnce(Invoke(DoAll(SetArgPointee<0>(kNoTiming), SetArgPointee<1>(kNoTiming),
+ SetArgPointee<2>(ErrorStatus::GENERAL_FAILURE),
+ Invoke(makeStatusOk))));
+ EXPECT_CALL(*mockPreparedModel, executeFencedWithConfig(_, _, _, _, _, _))
+ .Times(1)
+ .WillOnce(Invoke(makeFencedExecutionWithConfigResult(mockCallback)));
+
+ // run test
+ const auto result =
+ preparedModel->executeFenced({}, {}, {}, {}, {}, {}, kHints, kExtensionNameToPrefix);
+
+ // verify result
+ ASSERT_TRUE(result.has_value())
+ << "Failed with " << result.error().code << ": " << result.error().message;
+ const auto& [syncFence, callback] = result.value();
+ EXPECT_NE(syncFence.syncWait({}), nn::SyncFence::FenceState::ACTIVE);
+ ASSERT_NE(callback, nullptr);
+
+ // verify callback failure
+ const auto callbackResult = callback();
+ ASSERT_FALSE(callbackResult.has_value());
+ EXPECT_EQ(callbackResult.error().code, nn::ErrorStatus::GENERAL_FAILURE);
+}
+
+TEST_P(PreparedModelTest, executeFencedWithConfigError) {
+ if (kVersion.level < nn::Version::Level::FEATURE_LEVEL_8) return;
+
+ // setup test
+ const auto mockPreparedModel = MockPreparedModel::create();
+ const auto preparedModel = PreparedModel::create(mockPreparedModel, kVersion).value();
+ EXPECT_CALL(*mockPreparedModel, executeFencedWithConfig(_, _, _, _, _, _))
+ .Times(1)
+ .WillOnce(InvokeWithoutArgs(makeGeneralFailure));
+
+ // run test
+ const auto result =
+ preparedModel->executeFenced({}, {}, {}, {}, {}, {}, kHints, kExtensionNameToPrefix);
+
+ // verify result
+ ASSERT_FALSE(result.has_value());
+ EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE);
+}
+
+TEST_P(PreparedModelTest, executeFencedWithConfigTransportFailure) {
+ if (kVersion.level < nn::Version::Level::FEATURE_LEVEL_8) return;
+
+ // setup test
+ const auto mockPreparedModel = MockPreparedModel::create();
+ const auto preparedModel = PreparedModel::create(mockPreparedModel, kVersion).value();
+ EXPECT_CALL(*mockPreparedModel, executeFencedWithConfig(_, _, _, _, _, _))
+ .Times(1)
+ .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
+
+ // run test
+ const auto result =
+ preparedModel->executeFenced({}, {}, {}, {}, {}, {}, kHints, kExtensionNameToPrefix);
+
+ // verify result
+ ASSERT_FALSE(result.has_value());
+ EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE);
+}
+
+TEST_P(PreparedModelTest, executeFencedWithConfigDeadObject) {
+ if (kVersion.level < nn::Version::Level::FEATURE_LEVEL_8) return;
+
+ // setup test
+ const auto mockPreparedModel = MockPreparedModel::create();
+ const auto preparedModel = PreparedModel::create(mockPreparedModel, kVersion).value();
+ EXPECT_CALL(*mockPreparedModel, executeFencedWithConfig(_, _, _, _, _, _))
+ .Times(1)
+ .WillOnce(InvokeWithoutArgs(makeDeadObjectFailure));
+
+ // run test
+ const auto result =
+ preparedModel->executeFenced({}, {}, {}, {}, {}, {}, kHints, kExtensionNameToPrefix);
+
+ // verify result
+ ASSERT_FALSE(result.has_value());
+ EXPECT_EQ(result.error().code, nn::ErrorStatus::DEAD_OBJECT);
+}
+
TEST_P(PreparedModelTest, configureExecutionBurst) {
// setup test
const auto mockPreparedModel = MockPreparedModel::create();
@@ -567,13 +800,13 @@
// setup test
const auto mockPreparedModel = MockPreparedModel::create();
const auto mockExecution = ndk::SharedRefBase::make<MockExecution>();
- EXPECT_CALL(*mockPreparedModel, createReusableExecution(_, _, _, _))
+ EXPECT_CALL(*mockPreparedModel, createReusableExecution(_, _, _))
.Times(1)
- .WillOnce(DoAll(SetArgPointee<3>(mockExecution), Invoke(makeStatusOk)));
+ .WillOnce(DoAll(SetArgPointee<2>(mockExecution), Invoke(makeStatusOk)));
const auto preparedModel = PreparedModel::create(mockPreparedModel, kVersion).value();
// run test
- const auto result = preparedModel->createReusableExecution({}, {}, {});
+ const auto result = preparedModel->createReusableExecution({}, {}, {}, {}, {});
// verify result
ASSERT_TRUE(result.has_value())
@@ -586,13 +819,13 @@
// setup test
const auto mockPreparedModel = MockPreparedModel::create();
- EXPECT_CALL(*mockPreparedModel, createReusableExecution(_, _, _, _))
+ EXPECT_CALL(*mockPreparedModel, createReusableExecution(_, _, _))
.Times(1)
.WillOnce(InvokeWithoutArgs(makeGeneralFailure));
const auto preparedModel = PreparedModel::create(mockPreparedModel, kVersion).value();
// run test
- const auto result = preparedModel->createReusableExecution({}, {}, {});
+ const auto result = preparedModel->createReusableExecution({}, {}, {}, {}, {});
// verify result
ASSERT_FALSE(result.has_value());
@@ -604,13 +837,13 @@
// setup test
const auto mockPreparedModel = MockPreparedModel::create();
- EXPECT_CALL(*mockPreparedModel, createReusableExecution(_, _, _, _))
+ EXPECT_CALL(*mockPreparedModel, createReusableExecution(_, _, _))
.Times(1)
.WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
const auto preparedModel = PreparedModel::create(mockPreparedModel, kVersion).value();
// run test
- const auto result = preparedModel->createReusableExecution({}, {}, {});
+ const auto result = preparedModel->createReusableExecution({}, {}, {}, {}, {});
// verify result
ASSERT_FALSE(result.has_value());
@@ -622,13 +855,13 @@
// setup test
const auto mockPreparedModel = MockPreparedModel::create();
- EXPECT_CALL(*mockPreparedModel, createReusableExecution(_, _, _, _))
+ EXPECT_CALL(*mockPreparedModel, createReusableExecution(_, _, _))
.Times(1)
.WillOnce(InvokeWithoutArgs(makeDeadObjectFailure));
const auto preparedModel = PreparedModel::create(mockPreparedModel, kVersion).value();
// run test
- const auto result = preparedModel->createReusableExecution({}, {}, {});
+ const auto result = preparedModel->createReusableExecution({}, {}, {}, {}, {});
// verify result
ASSERT_FALSE(result.has_value());
diff --git a/neuralnetworks/aidl/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/aidl/vts/functional/GeneratedTestHarness.cpp
index 2460fba..8c8a87a 100644
--- a/neuralnetworks/aidl/vts/functional/GeneratedTestHarness.cpp
+++ b/neuralnetworks/aidl/vts/functional/GeneratedTestHarness.cpp
@@ -63,6 +63,8 @@
// it is skipped. The field is set to true by default and is set to false in
// quantization coupling tests to suppress skipping a test
bool reportSkipping;
+ // `useConfig` indicates if a test should use execute*WithConfig functions for the execution.
+ bool useConfig;
TestConfig(Executor executor, bool measureTiming, OutputType outputType, MemoryType memoryType,
bool reusable)
: executor(executor),
@@ -70,7 +72,8 @@
outputType(outputType),
memoryType(memoryType),
reusable(reusable),
- reportSkipping(true) {}
+ reportSkipping(true),
+ useConfig(false) {}
TestConfig(Executor executor, bool measureTiming, OutputType outputType, MemoryType memoryType,
bool reusable, bool reportSkipping)
: executor(executor),
@@ -78,7 +81,17 @@
outputType(outputType),
memoryType(memoryType),
reusable(reusable),
- reportSkipping(reportSkipping) {}
+ reportSkipping(reportSkipping),
+ useConfig(false) {}
+ TestConfig(Executor executor, bool measureTiming, OutputType outputType, MemoryType memoryType,
+ bool reusable, bool reportSkipping, bool useConfig)
+ : executor(executor),
+ measureTiming(measureTiming),
+ outputType(outputType),
+ memoryType(memoryType),
+ reusable(reusable),
+ reportSkipping(reportSkipping),
+ useConfig(useConfig) {}
};
std::string toString(OutputType type) {
@@ -100,7 +113,8 @@
<< ", .measureTiming=" << (config.measureTiming ? "true" : "false")
<< ", .outputType=" << toString(config.outputType)
<< ", .memoryType=" << toString(config.memoryType)
- << ", .reusable=" << (config.reusable ? "true" : "false") << "}";
+ << ", .reusable=" << (config.reusable ? "true" : "false")
+ << ", .useConfig=" << (config.useConfig ? "true" : "false") << "}";
return ss.str();
}
@@ -587,8 +601,8 @@
std::shared_ptr<IExecution> execution;
if (testConfig.reusable) {
- const auto ret = preparedModel->createReusableExecution(request, testConfig.measureTiming,
- loopTimeoutDurationNs, &execution);
+ const auto ret = preparedModel->createReusableExecution(
+ request, {testConfig.measureTiming, loopTimeoutDurationNs, {}, {}}, &execution);
ASSERT_TRUE(ret.isOk()) << static_cast<nn::ErrorStatus>(ret.getServiceSpecificError());
ASSERT_NE(nullptr, execution.get());
}
@@ -607,6 +621,10 @@
::ndk::ScopedAStatus ret;
if (testConfig.reusable) {
ret = execution->executeSynchronously(kNoDeadline, &executionResult);
+ } else if (testConfig.useConfig) {
+ ret = preparedModel->executeSynchronouslyWithConfig(
+ request, {testConfig.measureTiming, loopTimeoutDurationNs, {}, {}},
+ kNoDeadline, &executionResult);
} else {
ret = preparedModel->executeSynchronously(request, testConfig.measureTiming,
kNoDeadline, loopTimeoutDurationNs,
@@ -649,9 +667,16 @@
ExecutionResult executionResult;
// execute
- ret = burst->executeSynchronously(request, slots, testConfig.measureTiming,
- kNoDeadline, loopTimeoutDurationNs,
- &executionResult);
+ if (testConfig.useConfig) {
+ ret = burst->executeSynchronouslyWithConfig(
+ request, slots,
+ {testConfig.measureTiming, loopTimeoutDurationNs, {}, {}}, kNoDeadline,
+ &executionResult);
+ } else {
+ ret = burst->executeSynchronously(request, slots, testConfig.measureTiming,
+ kNoDeadline, loopTimeoutDurationNs,
+ &executionResult);
+ }
ASSERT_TRUE(ret.isOk() || ret.getExceptionCode() == EX_SERVICE_SPECIFIC)
<< ret.getDescription();
if (ret.isOk()) {
@@ -680,6 +705,10 @@
::ndk::ScopedAStatus ret;
if (testConfig.reusable) {
ret = execution->executeFenced({}, kNoDeadline, kNoDuration, &executionResult);
+ } else if (testConfig.useConfig) {
+ ret = preparedModel->executeFencedWithConfig(
+ request, {}, {testConfig.measureTiming, loopTimeoutDurationNs, {}, {}},
+ kNoDeadline, kNoDuration, &executionResult);
} else {
ret = preparedModel->executeFenced(request, {}, testConfig.measureTiming,
kNoDeadline, loopTimeoutDurationNs,
@@ -697,9 +726,19 @@
waitFor.emplace_back(dupFd);
// If a sync fence is returned, try start another run waiting for the sync
// fence.
- ret = preparedModel->executeFenced(request, waitFor, testConfig.measureTiming,
- kNoDeadline, loopTimeoutDurationNs,
- kNoDuration, &executionResult);
+ if (testConfig.reusable) {
+ ret = execution->executeFenced(waitFor, kNoDeadline, kNoDuration,
+ &executionResult);
+ } else if (testConfig.useConfig) {
+ ret = preparedModel->executeFencedWithConfig(
+ request, waitFor,
+ {testConfig.measureTiming, loopTimeoutDurationNs, {}, {}},
+ kNoDeadline, kNoDuration, &executionResult);
+ } else {
+ ret = preparedModel->executeFenced(
+ request, waitFor, testConfig.measureTiming, kNoDeadline,
+ loopTimeoutDurationNs, kNoDuration, &executionResult);
+ }
ASSERT_TRUE(ret.isOk());
waitForSyncFence(executionResult.syncFence.get());
}
@@ -830,11 +869,13 @@
std::vector<Executor> executorList;
std::vector<MemoryType> memoryTypeList;
std::vector<bool> reusableList = {false};
+ std::vector<bool> useConfigList = {false};
int deviceVersion;
ASSERT_TRUE(device->getInterfaceVersion(&deviceVersion).isOk());
if (deviceVersion >= kMinAidlLevelForFL8) {
reusableList.push_back(true);
+ useConfigList.push_back(true);
}
switch (testKind) {
@@ -879,11 +920,14 @@
for (const Executor executor : executorList) {
for (const MemoryType memoryType : memoryTypeList) {
for (const bool reusable : reusableList) {
- if (executor == Executor::BURST && reusable) continue;
- const TestConfig testConfig(executor, measureTiming, outputType, memoryType,
- reusable);
- SCOPED_TRACE(toString(testConfig));
- EvaluatePreparedModel(device, preparedModel, testModel, testConfig);
+ for (const bool useConfig : useConfigList) {
+ if ((useConfig || executor == Executor::BURST) && reusable) continue;
+ const TestConfig testConfig(executor, measureTiming, outputType,
+ memoryType, reusable,
+ /*reportSkipping=*/true, useConfig);
+ SCOPED_TRACE(toString(testConfig));
+ EvaluatePreparedModel(device, preparedModel, testModel, testConfig);
+ }
}
}
}
@@ -942,6 +986,13 @@
createPreparedModel(device, model, &preparedModel);
if (preparedModel == nullptr) return;
EvaluatePreparedModel(device, preparedModel, testModel, testKind);
+ int32_t deviceVersion;
+ ASSERT_TRUE(device->getInterfaceVersion(&deviceVersion).isOk());
+ if (deviceVersion >= kMinAidlLevelForFL8) {
+ createPreparedModel(device, model, &preparedModel, /*reportSkipping*/ true,
+ /*useConfig*/ true);
+ EvaluatePreparedModel(device, preparedModel, testModel, testKind);
+ }
} break;
case TestKind::QUANTIZATION_COUPLING: {
ASSERT_TRUE(testModel.hasQuant8CoupledOperands());
diff --git a/neuralnetworks/aidl/vts/functional/MemoryDomainTests.cpp b/neuralnetworks/aidl/vts/functional/MemoryDomainTests.cpp
index b3e9c63..97760ae 100644
--- a/neuralnetworks/aidl/vts/functional/MemoryDomainTests.cpp
+++ b/neuralnetworks/aidl/vts/functional/MemoryDomainTests.cpp
@@ -204,11 +204,23 @@
return ndk::ScopedAStatus::fromServiceSpecificError(
static_cast<int32_t>(ErrorStatus::GENERAL_FAILURE));
}
+ ndk::ScopedAStatus executeSynchronouslyWithConfig(const Request&, const ExecutionConfig&,
+ int64_t, ExecutionResult*) override {
+ return ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(ErrorStatus::GENERAL_FAILURE));
+ }
+ ndk::ScopedAStatus executeFencedWithConfig(const Request&,
+ const std::vector<ndk::ScopedFileDescriptor>&,
+ const ExecutionConfig&, int64_t, int64_t,
+ FencedExecutionResult*) override {
+ return ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(ErrorStatus::GENERAL_FAILURE));
+ }
ndk::ScopedAStatus configureExecutionBurst(std::shared_ptr<IBurst>*) override {
return ndk::ScopedAStatus::fromServiceSpecificError(
static_cast<int32_t>(ErrorStatus::GENERAL_FAILURE));
}
- ndk::ScopedAStatus createReusableExecution(const aidl_hal::Request&, bool, int64_t,
+ ndk::ScopedAStatus createReusableExecution(const aidl_hal::Request&, const ExecutionConfig&,
std::shared_ptr<aidl_hal::IExecution>*) override {
return ndk::ScopedAStatus::fromServiceSpecificError(
static_cast<int32_t>(ErrorStatus::GENERAL_FAILURE));
diff --git a/neuralnetworks/aidl/vts/functional/ValidateModel.cpp b/neuralnetworks/aidl/vts/functional/ValidateModel.cpp
index fdc7eff..931ba25 100644
--- a/neuralnetworks/aidl/vts/functional/ValidateModel.cpp
+++ b/neuralnetworks/aidl/vts/functional/ValidateModel.cpp
@@ -77,6 +77,28 @@
ASSERT_EQ(nullptr, preparedModel.get());
}
+static void validatePrepareModelWithConfig(const std::shared_ptr<IDevice>& device,
+ const std::string& message, const Model& model,
+ ExecutionPreference preference, Priority priority) {
+ SCOPED_TRACE(message + " [prepareModelWithConfig]");
+
+ std::shared_ptr<PreparedModelCallback> preparedModelCallback =
+ ndk::SharedRefBase::make<PreparedModelCallback>();
+ const auto prepareLaunchStatus = device->prepareModelWithConfig(
+ model, {preference, priority, kNoDeadline, {}, {}, kEmptyCacheToken, {}, {}},
+ preparedModelCallback);
+ ASSERT_FALSE(prepareLaunchStatus.isOk());
+ ASSERT_EQ(prepareLaunchStatus.getExceptionCode(), EX_SERVICE_SPECIFIC);
+ ASSERT_EQ(static_cast<ErrorStatus>(prepareLaunchStatus.getServiceSpecificError()),
+ ErrorStatus::INVALID_ARGUMENT);
+
+ preparedModelCallback->wait();
+ ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
+ ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, prepareReturnStatus);
+ std::shared_ptr<IPreparedModel> preparedModel = preparedModelCallback->getPreparedModel();
+ ASSERT_EQ(nullptr, preparedModel.get());
+}
+
static bool validExecutionPreference(ExecutionPreference preference) {
return preference == ExecutionPreference::LOW_POWER ||
preference == ExecutionPreference::FAST_SINGLE_ANSWER ||
@@ -103,6 +125,13 @@
}
validatePrepareModel(device, message, model, preference, priority);
+
+ int32_t aidlVersion;
+ ASSERT_TRUE(device->getInterfaceVersion(&aidlVersion).isOk());
+ if (aidlVersion >= kMinAidlLevelForFL8) {
+ // prepareModelWithConfig must satisfy all requirements enforced by prepareModel.
+ validatePrepareModelWithConfig(device, message, model, preference, priority);
+ }
}
static uint32_t addOperand(Model* model) {
diff --git a/neuralnetworks/aidl/vts/functional/ValidateRequest.cpp b/neuralnetworks/aidl/vts/functional/ValidateRequest.cpp
index e8debf7..d749841 100644
--- a/neuralnetworks/aidl/vts/functional/ValidateRequest.cpp
+++ b/neuralnetworks/aidl/vts/functional/ValidateRequest.cpp
@@ -45,7 +45,7 @@
{
SCOPED_TRACE(message + " [createReusableExecution]");
const auto createStatus = preparedModel->createReusableExecution(
- request, measure, kOmittedTimeoutDuration, &execution);
+ request, {measure, kOmittedTimeoutDuration, {}, {}}, &execution);
if (!createStatus.isOk()) {
ASSERT_EQ(createStatus.getExceptionCode(), EX_SERVICE_SPECIFIC);
ASSERT_EQ(static_cast<ErrorStatus>(createStatus.getServiceSpecificError()),
@@ -149,10 +149,59 @@
int32_t aidlVersion;
ASSERT_TRUE(preparedModel->getInterfaceVersion(&aidlVersion).isOk());
+ if (aidlVersion < kMinAidlLevelForFL8) {
+ return;
+ }
// validate reusable execution
- if (aidlVersion >= kMinAidlLevelForFL8) {
- validateReusableExecution(preparedModel, message, request, measure);
+ validateReusableExecution(preparedModel, message, request, measure);
+
+ // synchronous with empty hints
+ {
+ SCOPED_TRACE(message + " [executeSynchronouslyWithConfig]");
+ ExecutionResult executionResult;
+ const auto executeStatus = preparedModel->executeSynchronouslyWithConfig(
+ request, {measure, kOmittedTimeoutDuration, {}, {}}, kNoDeadline, &executionResult);
+ ASSERT_FALSE(executeStatus.isOk());
+ ASSERT_EQ(executeStatus.getExceptionCode(), EX_SERVICE_SPECIFIC);
+ ASSERT_EQ(static_cast<ErrorStatus>(executeStatus.getServiceSpecificError()),
+ ErrorStatus::INVALID_ARGUMENT);
+ }
+
+ // fenced with empty hints
+ {
+ SCOPED_TRACE(message + " [executeFencedWithConfig]");
+ FencedExecutionResult executionResult;
+ const auto executeStatus = preparedModel->executeFencedWithConfig(
+ request, {}, {false, kOmittedTimeoutDuration, {}, {}}, kNoDeadline, kNoDuration,
+ &executionResult);
+ ASSERT_FALSE(executeStatus.isOk());
+ ASSERT_EQ(executeStatus.getExceptionCode(), EX_SERVICE_SPECIFIC);
+ ASSERT_EQ(static_cast<ErrorStatus>(executeStatus.getServiceSpecificError()),
+ ErrorStatus::INVALID_ARGUMENT);
+ }
+
+ // burst with empty hints
+ {
+ SCOPED_TRACE(message + " [burst executeSynchronouslyWithConfig]");
+
+ // create burst
+ std::shared_ptr<IBurst> burst;
+ auto ret = preparedModel->configureExecutionBurst(&burst);
+ ASSERT_TRUE(ret.isOk()) << ret.getDescription();
+ ASSERT_NE(nullptr, burst.get());
+
+ // use -1 for all memory identifier tokens
+ const std::vector<int64_t> slots(request.pools.size(), -1);
+
+ ExecutionResult executionResult;
+ const auto executeStatus = burst->executeSynchronouslyWithConfig(
+ request, slots, {measure, kOmittedTimeoutDuration, {}, {}}, kNoDeadline,
+ &executionResult);
+ ASSERT_FALSE(executeStatus.isOk());
+ ASSERT_EQ(executeStatus.getExceptionCode(), EX_SERVICE_SPECIFIC);
+ ASSERT_EQ(static_cast<ErrorStatus>(executeStatus.getServiceSpecificError()),
+ ErrorStatus::INVALID_ARGUMENT);
}
}
diff --git a/neuralnetworks/aidl/vts/functional/VtsHalNeuralnetworks.cpp b/neuralnetworks/aidl/vts/functional/VtsHalNeuralnetworks.cpp
index c417356..ad93e6d 100644
--- a/neuralnetworks/aidl/vts/functional/VtsHalNeuralnetworks.cpp
+++ b/neuralnetworks/aidl/vts/functional/VtsHalNeuralnetworks.cpp
@@ -41,7 +41,8 @@
// internal helper function
void createPreparedModel(const std::shared_ptr<IDevice>& device, const Model& model,
- std::shared_ptr<IPreparedModel>* preparedModel, bool reportSkipping) {
+ std::shared_ptr<IPreparedModel>* preparedModel, bool reportSkipping,
+ bool useConfig) {
ASSERT_NE(nullptr, preparedModel);
*preparedModel = nullptr;
@@ -56,11 +57,25 @@
// launch prepare model
const std::shared_ptr<PreparedModelCallback> preparedModelCallback =
ndk::SharedRefBase::make<PreparedModelCallback>();
- const auto prepareLaunchStatus =
- device->prepareModel(model, ExecutionPreference::FAST_SINGLE_ANSWER, kDefaultPriority,
- kNoDeadline, {}, {}, kEmptyCacheToken, preparedModelCallback);
- ASSERT_TRUE(prepareLaunchStatus.isOk()) << prepareLaunchStatus.getDescription();
-
+ if (useConfig) {
+ const auto prepareLaunchStatus =
+ device->prepareModelWithConfig(model,
+ {ExecutionPreference::FAST_SINGLE_ANSWER,
+ kDefaultPriority,
+ kNoDeadline,
+ {},
+ {},
+ kEmptyCacheToken,
+ {},
+ {}},
+ preparedModelCallback);
+ ASSERT_TRUE(prepareLaunchStatus.isOk()) << prepareLaunchStatus.getDescription();
+ } else {
+ const auto prepareLaunchStatus = device->prepareModel(
+ model, ExecutionPreference::FAST_SINGLE_ANSWER, kDefaultPriority, kNoDeadline, {},
+ {}, kEmptyCacheToken, preparedModelCallback);
+ ASSERT_TRUE(prepareLaunchStatus.isOk()) << prepareLaunchStatus.getDescription();
+ }
// retrieve prepared model
preparedModelCallback->wait();
const ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
diff --git a/neuralnetworks/aidl/vts/functional/VtsHalNeuralnetworks.h b/neuralnetworks/aidl/vts/functional/VtsHalNeuralnetworks.h
index a900590..00d705c 100644
--- a/neuralnetworks/aidl/vts/functional/VtsHalNeuralnetworks.h
+++ b/neuralnetworks/aidl/vts/functional/VtsHalNeuralnetworks.h
@@ -51,8 +51,8 @@
// Create an IPreparedModel object. If the model cannot be prepared,
// "preparedModel" will be nullptr instead.
void createPreparedModel(const std::shared_ptr<IDevice>& device, const Model& model,
- std::shared_ptr<IPreparedModel>* preparedModel,
- bool reportSkipping = true);
+ std::shared_ptr<IPreparedModel>* preparedModel, bool reportSkipping = true,
+ bool useConfig = false);
enum class Executor { SYNC, BURST, FENCED };
diff --git a/neuralnetworks/utils/adapter/aidl/include/nnapi/hal/aidl/Burst.h b/neuralnetworks/utils/adapter/aidl/include/nnapi/hal/aidl/Burst.h
index f2687c4..8d42e2f 100644
--- a/neuralnetworks/utils/adapter/aidl/include/nnapi/hal/aidl/Burst.h
+++ b/neuralnetworks/utils/adapter/aidl/include/nnapi/hal/aidl/Burst.h
@@ -46,6 +46,11 @@
bool measureTiming, int64_t deadlineNs,
int64_t loopTimeoutDurationNs,
ExecutionResult* executionResult) override;
+ ndk::ScopedAStatus executeSynchronouslyWithConfig(
+ const Request& request, const std::vector<int64_t>& memoryIdentifierTokens,
+ const ExecutionConfig& config, int64_t deadlineNs,
+ ExecutionResult* executionResult) override;
+
ndk::ScopedAStatus releaseMemoryResource(int64_t memoryIdentifierToken) override;
class ThreadSafeMemoryCache {
diff --git a/neuralnetworks/utils/adapter/aidl/include/nnapi/hal/aidl/Device.h b/neuralnetworks/utils/adapter/aidl/include/nnapi/hal/aidl/Device.h
index aa29d63..c94f270 100644
--- a/neuralnetworks/utils/adapter/aidl/include/nnapi/hal/aidl/Device.h
+++ b/neuralnetworks/utils/adapter/aidl/include/nnapi/hal/aidl/Device.h
@@ -31,6 +31,7 @@
#include <aidl/android/hardware/neuralnetworks/IPreparedModelParcel.h>
#include <aidl/android/hardware/neuralnetworks/Model.h>
#include <aidl/android/hardware/neuralnetworks/NumberOfCacheFiles.h>
+#include <aidl/android/hardware/neuralnetworks/PrepareModelConfig.h>
#include <aidl/android/hardware/neuralnetworks/Priority.h>
#include <android/binder_auto_utils.h>
#include <nnapi/IDevice.h>
@@ -72,6 +73,9 @@
const std::vector<ndk::ScopedFileDescriptor>& dataCache,
const std::vector<uint8_t>& token,
const std::shared_ptr<IPreparedModelCallback>& callback) override;
+ ndk::ScopedAStatus prepareModelWithConfig(
+ const Model& model, const PrepareModelConfig& config,
+ const std::shared_ptr<IPreparedModelCallback>& callback) override;
protected:
const ::android::nn::SharedDevice kDevice;
diff --git a/neuralnetworks/utils/adapter/aidl/include/nnapi/hal/aidl/PreparedModel.h b/neuralnetworks/utils/adapter/aidl/include/nnapi/hal/aidl/PreparedModel.h
index f92b0bc..d1359d6 100644
--- a/neuralnetworks/utils/adapter/aidl/include/nnapi/hal/aidl/PreparedModel.h
+++ b/neuralnetworks/utils/adapter/aidl/include/nnapi/hal/aidl/PreparedModel.h
@@ -51,9 +51,17 @@
int64_t loopTimeoutDurationNs, int64_t durationNs,
FencedExecutionResult* executionResult) override;
ndk::ScopedAStatus configureExecutionBurst(std::shared_ptr<IBurst>* burst) override;
- ndk::ScopedAStatus createReusableExecution(const Request& request, bool measureTiming,
- int64_t loopTimeoutDurationNs,
+ ndk::ScopedAStatus createReusableExecution(const Request& request,
+ const ExecutionConfig& config,
std::shared_ptr<IExecution>* execution) override;
+ ndk::ScopedAStatus executeSynchronouslyWithConfig(const Request& request,
+ const ExecutionConfig& config,
+ int64_t deadlineNs,
+ ExecutionResult* executionResult) override;
+ ndk::ScopedAStatus executeFencedWithConfig(
+ const Request& request, const std::vector<ndk::ScopedFileDescriptor>& waitFor,
+ const ExecutionConfig& config, int64_t deadlineNs, int64_t durationNs,
+ FencedExecutionResult* executionResult) override;
::android::nn::SharedPreparedModel getUnderlyingPreparedModel() const;
diff --git a/neuralnetworks/utils/adapter/aidl/src/Burst.cpp b/neuralnetworks/utils/adapter/aidl/src/Burst.cpp
index 4fabb20..a4a80fa 100644
--- a/neuralnetworks/utils/adapter/aidl/src/Burst.cpp
+++ b/neuralnetworks/utils/adapter/aidl/src/Burst.cpp
@@ -93,7 +93,8 @@
nn::ExecutionResult<ExecutionResult> executeSynchronously(
const nn::IBurst& burst, const Burst::ThreadSafeMemoryCache& cache, const Request& request,
const std::vector<int64_t>& memoryIdentifierTokens, bool measureTiming, int64_t deadlineNs,
- int64_t loopTimeoutDurationNs) {
+ int64_t loopTimeoutDurationNs, const std::vector<TokenValuePair>& hints,
+ const std::vector<ExtensionNameAndPrefix>& extensionNameToPrefix) {
if (request.pools.size() != memoryIdentifierTokens.size()) {
return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT)
<< "request.pools.size() != memoryIdentifierTokens.size()";
@@ -107,11 +108,13 @@
const auto nnMeasureTiming = measureTiming ? nn::MeasureTiming::YES : nn::MeasureTiming::NO;
const auto nnDeadline = NN_TRY(makeOptionalTimePoint(deadlineNs));
const auto nnLoopTimeoutDuration = NN_TRY(makeOptionalDuration(loopTimeoutDurationNs));
+ auto nnHints = NN_TRY(convertInput(hints));
+ auto nnExtensionNameToPrefix = NN_TRY(convertInput(extensionNameToPrefix));
const auto hold = ensureAllMemoriesAreCached(&nnRequest, memoryIdentifierTokens, burst, cache);
- const auto result =
- burst.execute(nnRequest, nnMeasureTiming, nnDeadline, nnLoopTimeoutDuration);
+ const auto result = burst.execute(nnRequest, nnMeasureTiming, nnDeadline, nnLoopTimeoutDuration,
+ nnHints, nnExtensionNameToPrefix);
if (!result.ok() && result.error().code == nn::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) {
const auto& [message, code, outputShapes] = result.error();
@@ -155,7 +158,24 @@
ExecutionResult* executionResult) {
auto result =
adapter::executeSynchronously(*kBurst, kMemoryCache, request, memoryIdentifierTokens,
- measureTiming, deadlineNs, loopTimeoutDurationNs);
+ measureTiming, deadlineNs, loopTimeoutDurationNs, {}, {});
+ if (!result.has_value()) {
+ auto [message, code, _] = std::move(result).error();
+ const auto aidlCode = utils::convert(code).value_or(ErrorStatus::GENERAL_FAILURE);
+ return ndk::ScopedAStatus::fromServiceSpecificErrorWithMessage(
+ static_cast<int32_t>(aidlCode), message.c_str());
+ }
+ *executionResult = std::move(result).value();
+ return ndk::ScopedAStatus::ok();
+}
+
+ndk::ScopedAStatus Burst::executeSynchronouslyWithConfig(
+ const Request& request, const std::vector<int64_t>& memoryIdentifierTokens,
+ const ExecutionConfig& config, int64_t deadlineNs, ExecutionResult* executionResult) {
+ auto result = adapter::executeSynchronously(
+ *kBurst, kMemoryCache, request, memoryIdentifierTokens, config.measureTiming,
+ deadlineNs, config.loopTimeoutDurationNs, config.executionHints,
+ config.extensionNameToPrefix);
if (!result.has_value()) {
auto [message, code, _] = std::move(result).error();
const auto aidlCode = utils::convert(code).value_or(ErrorStatus::GENERAL_FAILURE);
diff --git a/neuralnetworks/utils/adapter/aidl/src/Device.cpp b/neuralnetworks/utils/adapter/aidl/src/Device.cpp
index 763be7f..84aaddb 100644
--- a/neuralnetworks/utils/adapter/aidl/src/Device.cpp
+++ b/neuralnetworks/utils/adapter/aidl/src/Device.cpp
@@ -148,13 +148,14 @@
}
}
-nn::GeneralResult<void> prepareModel(const nn::SharedDevice& device, const Executor& executor,
- const Model& model, ExecutionPreference preference,
- Priority priority, int64_t deadlineNs,
- const std::vector<ndk::ScopedFileDescriptor>& modelCache,
- const std::vector<ndk::ScopedFileDescriptor>& dataCache,
- const std::vector<uint8_t>& token,
- const std::shared_ptr<IPreparedModelCallback>& callback) {
+nn::GeneralResult<void> prepareModel(
+ const nn::SharedDevice& device, const Executor& executor, const Model& model,
+ ExecutionPreference preference, Priority priority, int64_t deadlineNs,
+ const std::vector<ndk::ScopedFileDescriptor>& modelCache,
+ const std::vector<ndk::ScopedFileDescriptor>& dataCache, const std::vector<uint8_t>& token,
+ const std::vector<TokenValuePair>& hints,
+ const std::vector<ExtensionNameAndPrefix>& extensionNameToPrefix,
+ const std::shared_ptr<IPreparedModelCallback>& callback) {
if (callback.get() == nullptr) {
return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) << "Invalid callback";
}
@@ -166,12 +167,16 @@
auto nnModelCache = NN_TRY(convertInput(modelCache));
auto nnDataCache = NN_TRY(convertInput(dataCache));
const auto nnToken = NN_TRY(convertCacheToken(token));
+ auto nnHints = NN_TRY(convertInput(hints));
+ auto nnExtensionNameToPrefix = NN_TRY(convertInput(extensionNameToPrefix));
Task task = [device, nnModel = std::move(nnModel), nnPreference, nnPriority, nnDeadline,
nnModelCache = std::move(nnModelCache), nnDataCache = std::move(nnDataCache),
- nnToken, callback] {
- auto result = device->prepareModel(nnModel, nnPreference, nnPriority, nnDeadline,
- nnModelCache, nnDataCache, nnToken);
+ nnToken, nnHints = std::move(nnHints),
+ nnExtensionNameToPrefix = std::move(nnExtensionNameToPrefix), callback] {
+ auto result =
+ device->prepareModel(nnModel, nnPreference, nnPriority, nnDeadline, nnModelCache,
+ nnDataCache, nnToken, nnHints, nnExtensionNameToPrefix);
notify(callback.get(), std::move(result));
};
executor(std::move(task), nnDeadline);
@@ -273,8 +278,9 @@
const std::vector<ndk::ScopedFileDescriptor>& dataCache,
const std::vector<uint8_t>& token,
const std::shared_ptr<IPreparedModelCallback>& callback) {
- const auto result = adapter::prepareModel(kDevice, kExecutor, model, preference, priority,
- deadlineNs, modelCache, dataCache, token, callback);
+ const auto result =
+ adapter::prepareModel(kDevice, kExecutor, model, preference, priority, deadlineNs,
+ modelCache, dataCache, token, {}, {}, callback);
if (!result.has_value()) {
const auto& [message, code] = result.error();
const auto aidlCode = utils::convert(code).value_or(ErrorStatus::GENERAL_FAILURE);
@@ -301,4 +307,21 @@
return ndk::ScopedAStatus::ok();
}
+ndk::ScopedAStatus Device::prepareModelWithConfig(
+ const Model& model, const PrepareModelConfig& config,
+ const std::shared_ptr<IPreparedModelCallback>& callback) {
+ const auto result = adapter::prepareModel(
+ kDevice, kExecutor, model, config.preference, config.priority, config.deadlineNs,
+ config.modelCache, config.dataCache, config.cacheToken, config.compilationHints,
+ config.extensionNameToPrefix, callback);
+ if (!result.has_value()) {
+ const auto& [message, code] = result.error();
+ const auto aidlCode = utils::convert(code).value_or(ErrorStatus::GENERAL_FAILURE);
+ callback->notify(aidlCode, nullptr);
+ return ndk::ScopedAStatus::fromServiceSpecificErrorWithMessage(
+ static_cast<int32_t>(aidlCode), message.c_str());
+ }
+ return ndk::ScopedAStatus::ok();
+}
+
} // namespace aidl::android::hardware::neuralnetworks::adapter
diff --git a/neuralnetworks/utils/adapter/aidl/src/PreparedModel.cpp b/neuralnetworks/utils/adapter/aidl/src/PreparedModel.cpp
index 5cab62c..790558f 100644
--- a/neuralnetworks/utils/adapter/aidl/src/PreparedModel.cpp
+++ b/neuralnetworks/utils/adapter/aidl/src/PreparedModel.cpp
@@ -118,17 +118,20 @@
return durationNs < 0 ? nn::OptionalTimePoint{} : nn::TimePoint(makeDuration(durationNs));
}
-nn::ExecutionResult<ExecutionResult> executeSynchronously(const nn::IPreparedModel& preparedModel,
- const Request& request,
- bool measureTiming, int64_t deadlineNs,
- int64_t loopTimeoutDurationNs) {
+nn::ExecutionResult<ExecutionResult> executeSynchronously(
+ const nn::IPreparedModel& preparedModel, const Request& request, bool measureTiming,
+ int64_t deadlineNs, int64_t loopTimeoutDurationNs, const std::vector<TokenValuePair>& hints,
+ const std::vector<ExtensionNameAndPrefix>& extensionNameToPrefix) {
const auto nnRequest = NN_TRY(convertInput(request));
const auto nnMeasureTiming = measureTiming ? nn::MeasureTiming::YES : nn::MeasureTiming::NO;
const auto nnDeadline = NN_TRY(makeOptionalTimePoint(deadlineNs));
const auto nnLoopTimeoutDuration = NN_TRY(makeOptionalDuration(loopTimeoutDurationNs));
+ auto nnHints = NN_TRY(convertInput(hints));
+ auto nnExtensionNameToPrefix = NN_TRY(convertInput(extensionNameToPrefix));
const auto result =
- preparedModel.execute(nnRequest, nnMeasureTiming, nnDeadline, nnLoopTimeoutDuration);
+ preparedModel.execute(nnRequest, nnMeasureTiming, nnDeadline, nnLoopTimeoutDuration,
+ nnHints, nnExtensionNameToPrefix);
if (!result.ok() && result.error().code == nn::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) {
const auto& [message, code, outputShapes] = result.error();
@@ -147,16 +150,21 @@
nn::GeneralResult<FencedExecutionResult> executeFenced(
const nn::IPreparedModel& preparedModel, const Request& request,
const std::vector<ndk::ScopedFileDescriptor>& waitFor, bool measureTiming,
- int64_t deadlineNs, int64_t loopTimeoutDurationNs, int64_t durationNs) {
+ int64_t deadlineNs, int64_t loopTimeoutDurationNs, int64_t durationNs,
+ const std::vector<TokenValuePair>& hints,
+ const std::vector<ExtensionNameAndPrefix>& extensionNameToPrefix) {
const auto nnRequest = NN_TRY(convertInput(request));
const auto nnWaitFor = NN_TRY(convertSyncFences(waitFor));
const auto nnMeasureTiming = measureTiming ? nn::MeasureTiming::YES : nn::MeasureTiming::NO;
const auto nnDeadline = NN_TRY(makeOptionalTimePoint(deadlineNs));
const auto nnLoopTimeoutDuration = NN_TRY(makeOptionalDuration(loopTimeoutDurationNs));
const auto nnDuration = NN_TRY(makeOptionalDuration(durationNs));
+ auto nnHints = NN_TRY(convertInput(hints));
+ auto nnExtensionNameToPrefix = NN_TRY(convertInput(extensionNameToPrefix));
auto [syncFence, executeFencedInfoCallback] = NN_TRY(preparedModel.executeFenced(
- nnRequest, nnWaitFor, nnMeasureTiming, nnDeadline, nnLoopTimeoutDuration, nnDuration));
+ nnRequest, nnWaitFor, nnMeasureTiming, nnDeadline, nnLoopTimeoutDuration, nnDuration,
+ nnHints, nnExtensionNameToPrefix));
ndk::ScopedFileDescriptor fileDescriptor;
if (syncFence.hasFd()) {
@@ -171,11 +179,16 @@
nn::GeneralResult<nn::SharedExecution> createReusableExecution(
const nn::IPreparedModel& preparedModel, const Request& request, bool measureTiming,
- int64_t loopTimeoutDurationNs) {
+ int64_t loopTimeoutDurationNs, const std::vector<TokenValuePair>& hints,
+ const std::vector<ExtensionNameAndPrefix>& extensionNameToPrefix) {
const auto nnRequest = NN_TRY(convertInput(request));
const auto nnMeasureTiming = measureTiming ? nn::MeasureTiming::YES : nn::MeasureTiming::NO;
const auto nnLoopTimeoutDuration = NN_TRY(makeOptionalDuration(loopTimeoutDurationNs));
- return preparedModel.createReusableExecution(nnRequest, nnMeasureTiming, nnLoopTimeoutDuration);
+ auto nnHints = NN_TRY(convertInput(hints));
+ auto nnExtensionNameToPrefix = NN_TRY(convertInput(extensionNameToPrefix));
+
+ return preparedModel.createReusableExecution(nnRequest, nnMeasureTiming, nnLoopTimeoutDuration,
+ nnHints, nnExtensionNameToPrefix);
}
nn::ExecutionResult<ExecutionResult> executeSynchronously(const nn::IExecution& execution,
@@ -231,7 +244,7 @@
int64_t loopTimeoutDurationNs,
ExecutionResult* executionResult) {
auto result = adapter::executeSynchronously(*kPreparedModel, request, measureTiming, deadlineNs,
- loopTimeoutDurationNs);
+ loopTimeoutDurationNs, {}, {});
if (!result.has_value()) {
const auto& [message, code, _] = result.error();
const auto aidlCode = utils::convert(code).value_or(ErrorStatus::GENERAL_FAILURE);
@@ -247,7 +260,41 @@
bool measureTiming, int64_t deadlineNs, int64_t loopTimeoutDurationNs, int64_t durationNs,
FencedExecutionResult* executionResult) {
auto result = adapter::executeFenced(*kPreparedModel, request, waitFor, measureTiming,
- deadlineNs, loopTimeoutDurationNs, durationNs);
+ deadlineNs, loopTimeoutDurationNs, durationNs, {}, {});
+ if (!result.has_value()) {
+ const auto& [message, code] = result.error();
+ const auto aidlCode = utils::convert(code).value_or(ErrorStatus::GENERAL_FAILURE);
+ return ndk::ScopedAStatus::fromServiceSpecificErrorWithMessage(
+ static_cast<int32_t>(aidlCode), message.c_str());
+ }
+ *executionResult = std::move(result).value();
+ return ndk::ScopedAStatus::ok();
+}
+
+ndk::ScopedAStatus PreparedModel::executeSynchronouslyWithConfig(const Request& request,
+ const ExecutionConfig& config,
+ int64_t deadlineNs,
+ ExecutionResult* executionResult) {
+ auto result = adapter::executeSynchronously(
+ *kPreparedModel, request, config.measureTiming, deadlineNs,
+ config.loopTimeoutDurationNs, config.executionHints, config.extensionNameToPrefix);
+ if (!result.has_value()) {
+ const auto& [message, code, _] = result.error();
+ const auto aidlCode = utils::convert(code).value_or(ErrorStatus::GENERAL_FAILURE);
+ return ndk::ScopedAStatus::fromServiceSpecificErrorWithMessage(
+ static_cast<int32_t>(aidlCode), message.c_str());
+ }
+ *executionResult = std::move(result).value();
+ return ndk::ScopedAStatus::ok();
+}
+
+ndk::ScopedAStatus PreparedModel::executeFencedWithConfig(
+ const Request& request, const std::vector<ndk::ScopedFileDescriptor>& waitFor,
+ const ExecutionConfig& config, int64_t deadlineNs, int64_t durationNs,
+ FencedExecutionResult* executionResult) {
+ auto result = adapter::executeFenced(*kPreparedModel, request, waitFor, config.measureTiming,
+ deadlineNs, config.loopTimeoutDurationNs, durationNs,
+ config.executionHints, config.extensionNameToPrefix);
if (!result.has_value()) {
const auto& [message, code] = result.error();
const auto aidlCode = utils::convert(code).value_or(ErrorStatus::GENERAL_FAILURE);
@@ -275,11 +322,11 @@
}
ndk::ScopedAStatus PreparedModel::createReusableExecution(const Request& request,
- bool measureTiming,
- int64_t loopTimeoutDurationNs,
+ const ExecutionConfig& config,
std::shared_ptr<IExecution>* execution) {
- auto result = adapter::createReusableExecution(*kPreparedModel, request, measureTiming,
- loopTimeoutDurationNs);
+ auto result = adapter::createReusableExecution(
+ *kPreparedModel, request, config.measureTiming, config.loopTimeoutDurationNs,
+ config.executionHints, config.extensionNameToPrefix);
if (!result.has_value()) {
const auto& [message, code] = result.error();
const auto aidlCode = utils::convert(code).value_or(ErrorStatus::GENERAL_FAILURE);
diff --git a/neuralnetworks/utils/adapter/hidl/Android.bp b/neuralnetworks/utils/adapter/hidl/Android.bp
index d073106..6875daa 100644
--- a/neuralnetworks/utils/adapter/hidl/Android.bp
+++ b/neuralnetworks/utils/adapter/hidl/Android.bp
@@ -30,17 +30,16 @@
local_include_dirs: ["include/nnapi/hal"],
export_include_dirs: ["include"],
static_libs: [
- "neuralnetworks_types",
- "neuralnetworks_utils_hal_1_0",
- "neuralnetworks_utils_hal_1_1",
- "neuralnetworks_utils_hal_1_2",
- "neuralnetworks_utils_hal_1_3",
- ],
- shared_libs: [
"android.hardware.neuralnetworks@1.0",
"android.hardware.neuralnetworks@1.1",
"android.hardware.neuralnetworks@1.2",
"android.hardware.neuralnetworks@1.3",
"libfmq",
+ "neuralnetworks_types",
+ "neuralnetworks_utils_hal_1_0",
+ "neuralnetworks_utils_hal_1_1",
+ "neuralnetworks_utils_hal_1_2",
+ "neuralnetworks_utils_hal_1_3",
+ "neuralnetworks_utils_hal_common",
],
}
diff --git a/neuralnetworks/utils/adapter/hidl/src/Burst.cpp b/neuralnetworks/utils/adapter/hidl/src/Burst.cpp
index 8b2e1dd..e3b165b 100644
--- a/neuralnetworks/utils/adapter/hidl/src/Burst.cpp
+++ b/neuralnetworks/utils/adapter/hidl/src/Burst.cpp
@@ -250,7 +250,7 @@
nn::MeasureTiming canonicalMeasure = NN_TRY(nn::convert(measure));
const auto [outputShapes, timing] =
- NN_TRY(mBurstExecutor->execute(canonicalRequest, canonicalMeasure, {}, {}));
+ NN_TRY(mBurstExecutor->execute(canonicalRequest, canonicalMeasure, {}, {}, {}, {}));
return std::make_pair(NN_TRY(V1_2::utils::convert(outputShapes)),
NN_TRY(V1_2::utils::convert(timing)));
diff --git a/neuralnetworks/utils/adapter/hidl/src/Device.cpp b/neuralnetworks/utils/adapter/hidl/src/Device.cpp
index 4993a80..0f44638 100644
--- a/neuralnetworks/utils/adapter/hidl/src/Device.cpp
+++ b/neuralnetworks/utils/adapter/hidl/src/Device.cpp
@@ -135,7 +135,7 @@
Task task = [device, nnModel = std::move(nnModel), executor, callback] {
auto result = device->prepareModel(nnModel, nn::ExecutionPreference::DEFAULT,
- nn::Priority::DEFAULT, {}, {}, {}, {});
+ nn::Priority::DEFAULT, {}, {}, {}, {}, {}, {});
notify(callback.get(), std::move(result), executor);
};
executor(std::move(task), {});
@@ -155,8 +155,8 @@
const auto nnPreference = NN_TRY(convertInput(preference));
Task task = [device, nnModel = std::move(nnModel), nnPreference, executor, callback] {
- auto result =
- device->prepareModel(nnModel, nnPreference, nn::Priority::DEFAULT, {}, {}, {}, {});
+ auto result = device->prepareModel(nnModel, nnPreference, nn::Priority::DEFAULT, {}, {}, {},
+ {}, {}, {});
notify(callback.get(), std::move(result), executor);
};
executor(std::move(task), {});
@@ -185,7 +185,7 @@
nnModelCache = std::move(nnModelCache), nnDataCache = std::move(nnDataCache),
nnToken, executor, callback] {
auto result = device->prepareModel(nnModel, nnPreference, nn::Priority::DEFAULT, {},
- nnModelCache, nnDataCache, nnToken);
+ nnModelCache, nnDataCache, nnToken, {}, {});
notify(callback.get(), std::move(result), executor);
};
executor(std::move(task), {});
@@ -215,7 +215,7 @@
nnModelCache = std::move(nnModelCache), nnDataCache = std::move(nnDataCache),
nnToken, executor, callback] {
auto result = device->prepareModel(nnModel, nnPreference, nnPriority, nnDeadline,
- nnModelCache, nnDataCache, nnToken);
+ nnModelCache, nnDataCache, nnToken, {}, {});
notify(callback.get(), std::move(result), executor);
};
executor(std::move(task), nnDeadline);
diff --git a/neuralnetworks/utils/adapter/hidl/src/PreparedModel.cpp b/neuralnetworks/utils/adapter/hidl/src/PreparedModel.cpp
index 71060d5..c6055a6 100644
--- a/neuralnetworks/utils/adapter/hidl/src/PreparedModel.cpp
+++ b/neuralnetworks/utils/adapter/hidl/src/PreparedModel.cpp
@@ -159,7 +159,7 @@
}
Task task = [preparedModel, nnRequest = std::move(nnRequest), callback] {
- auto result = preparedModel->execute(nnRequest, nn::MeasureTiming::NO, {}, {});
+ auto result = preparedModel->execute(nnRequest, nn::MeasureTiming::NO, {}, {}, {}, {});
notify(callback.get(), std::move(result));
};
executor(std::move(task), {});
@@ -185,7 +185,7 @@
}
Task task = [preparedModel, nnRequest = std::move(nnRequest), nnMeasure, callback] {
- auto result = preparedModel->execute(nnRequest, nnMeasure, {}, {});
+ auto result = preparedModel->execute(nnRequest, nnMeasure, {}, {}, {}, {});
notify(callback.get(), std::move(result));
};
executor(std::move(task), {});
@@ -216,8 +216,8 @@
Task task = [preparedModel, nnRequest = std::move(nnRequest), nnMeasure, nnDeadline,
nnLoopTimeoutDuration, callback] {
- auto result =
- preparedModel->execute(nnRequest, nnMeasure, nnDeadline, nnLoopTimeoutDuration);
+ auto result = preparedModel->execute(nnRequest, nnMeasure, nnDeadline,
+ nnLoopTimeoutDuration, {}, {});
notify(callback.get(), std::move(result));
};
executor(std::move(task), nnDeadline);
@@ -232,7 +232,7 @@
const auto nnMeasure = NN_TRY(convertInput(measure));
const auto [outputShapes, timing] =
- NN_TRY(preparedModel->execute(nnRequest, nnMeasure, {}, {}));
+ NN_TRY(preparedModel->execute(nnRequest, nnMeasure, {}, {}, {}, {}));
auto hidlOutputShapes = NN_TRY(V1_2::utils::convert(outputShapes));
const auto hidlTiming = NN_TRY(V1_2::utils::convert(timing));
@@ -248,8 +248,8 @@
const auto nnDeadline = NN_TRY(convertInput(deadline));
const auto nnLoopTimeoutDuration = NN_TRY(convertInput(loopTimeoutDuration));
- const auto [outputShapes, timing] =
- NN_TRY(preparedModel->execute(nnRequest, nnMeasure, nnDeadline, nnLoopTimeoutDuration));
+ const auto [outputShapes, timing] = NN_TRY(preparedModel->execute(
+ nnRequest, nnMeasure, nnDeadline, nnLoopTimeoutDuration, {}, {}));
auto hidlOutputShapes = NN_TRY(V1_3::utils::convert(outputShapes));
const auto hidlTiming = NN_TRY(V1_3::utils::convert(timing));
@@ -293,8 +293,9 @@
const auto nnLoopTimeoutDuration = NN_TRY(convertInput(loopTimeoutDuration));
const auto nnDuration = NN_TRY(convertInput(duration));
- auto [syncFence, executeFencedCallback] = NN_TRY(preparedModel->executeFenced(
- nnRequest, nnWaitFor, nnMeasure, nnDeadline, nnLoopTimeoutDuration, nnDuration));
+ auto [syncFence, executeFencedCallback] =
+ NN_TRY(preparedModel->executeFenced(nnRequest, nnWaitFor, nnMeasure, nnDeadline,
+ nnLoopTimeoutDuration, nnDuration, {}, {}));
auto hidlSyncFence = NN_TRY(V1_3::utils::convert(syncFence.getSharedHandle()));
auto hidlExecuteFencedCallback = sp<FencedExecutionCallback>::make(executeFencedCallback);
diff --git a/neuralnetworks/utils/common/Android.bp b/neuralnetworks/utils/common/Android.bp
index 39927a3..bfba24f 100644
--- a/neuralnetworks/utils/common/Android.bp
+++ b/neuralnetworks/utils/common/Android.bp
@@ -39,20 +39,12 @@
srcs: ["test/*.cpp"],
static_libs: [
"libgmock",
- "libneuralnetworks_common",
"neuralnetworks_types",
"neuralnetworks_utils_hal_common",
],
shared_libs: [
- "android.hidl.allocator@1.0",
- "android.hidl.memory@1.0",
"libbase",
"libcutils",
- "libfmq",
- "libhidlbase",
- "libhidlmemory",
- "liblog",
- "libutils",
],
target: {
android: {
diff --git a/neuralnetworks/utils/common/include/nnapi/hal/InvalidBurst.h b/neuralnetworks/utils/common/include/nnapi/hal/InvalidBurst.h
index e86edda..1f1245f 100644
--- a/neuralnetworks/utils/common/include/nnapi/hal/InvalidBurst.h
+++ b/neuralnetworks/utils/common/include/nnapi/hal/InvalidBurst.h
@@ -33,12 +33,15 @@
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> execute(
const nn::Request& request, nn::MeasureTiming measure,
- const nn::OptionalTimePoint& deadline,
- const nn::OptionalDuration& loopTimeoutDuration) const override;
+ const nn::OptionalTimePoint& deadline, const nn::OptionalDuration& loopTimeoutDuration,
+ const std::vector<nn::TokenValuePair>& hints,
+ const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override;
nn::GeneralResult<nn::SharedExecution> createReusableExecution(
const nn::Request& request, nn::MeasureTiming measure,
- const nn::OptionalDuration& loopTimeoutDuration) const override;
+ const nn::OptionalDuration& loopTimeoutDuration,
+ const std::vector<nn::TokenValuePair>& hints,
+ const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override;
};
} // namespace android::hardware::neuralnetworks::utils
diff --git a/neuralnetworks/utils/common/include/nnapi/hal/InvalidDevice.h b/neuralnetworks/utils/common/include/nnapi/hal/InvalidDevice.h
index 5e62b9a..9582873 100644
--- a/neuralnetworks/utils/common/include/nnapi/hal/InvalidDevice.h
+++ b/neuralnetworks/utils/common/include/nnapi/hal/InvalidDevice.h
@@ -52,8 +52,9 @@
nn::GeneralResult<nn::SharedPreparedModel> prepareModel(
const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority,
nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
- const std::vector<nn::SharedHandle>& dataCache,
- const nn::CacheToken& token) const override;
+ const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token,
+ const std::vector<nn::TokenValuePair>& hints,
+ const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override;
nn::GeneralResult<nn::SharedPreparedModel> prepareModelFromCache(
nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
diff --git a/neuralnetworks/utils/common/include/nnapi/hal/InvalidPreparedModel.h b/neuralnetworks/utils/common/include/nnapi/hal/InvalidPreparedModel.h
index de30aae..3f1f290 100644
--- a/neuralnetworks/utils/common/include/nnapi/hal/InvalidPreparedModel.h
+++ b/neuralnetworks/utils/common/include/nnapi/hal/InvalidPreparedModel.h
@@ -31,18 +31,23 @@
public:
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> execute(
const nn::Request& request, nn::MeasureTiming measure,
- const nn::OptionalTimePoint& deadline,
- const nn::OptionalDuration& loopTimeoutDuration) const override;
+ const nn::OptionalTimePoint& deadline, const nn::OptionalDuration& loopTimeoutDuration,
+ const std::vector<nn::TokenValuePair>& hints,
+ const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override;
nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> executeFenced(
const nn::Request& request, const std::vector<nn::SyncFence>& waitFor,
nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline,
const nn::OptionalDuration& loopTimeoutDuration,
- const nn::OptionalDuration& timeoutDurationAfterFence) const override;
+ const nn::OptionalDuration& timeoutDurationAfterFence,
+ const std::vector<nn::TokenValuePair>& hints,
+ const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override;
nn::GeneralResult<nn::SharedExecution> createReusableExecution(
const nn::Request& request, nn::MeasureTiming measure,
- const nn::OptionalDuration& loopTimeoutDuration) const override;
+ const nn::OptionalDuration& loopTimeoutDuration,
+ const std::vector<nn::TokenValuePair>& hints,
+ const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override;
nn::GeneralResult<nn::SharedBurst> configureExecutionBurst() const override;
diff --git a/neuralnetworks/utils/common/include/nnapi/hal/ResilientBurst.h b/neuralnetworks/utils/common/include/nnapi/hal/ResilientBurst.h
index fde2486..129431f 100644
--- a/neuralnetworks/utils/common/include/nnapi/hal/ResilientBurst.h
+++ b/neuralnetworks/utils/common/include/nnapi/hal/ResilientBurst.h
@@ -48,18 +48,23 @@
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> execute(
const nn::Request& request, nn::MeasureTiming measure,
- const nn::OptionalTimePoint& deadline,
- const nn::OptionalDuration& loopTimeoutDuration) const override;
+ const nn::OptionalTimePoint& deadline, const nn::OptionalDuration& loopTimeoutDuration,
+ const std::vector<nn::TokenValuePair>& hints,
+ const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override;
nn::GeneralResult<nn::SharedExecution> createReusableExecution(
const nn::Request& request, nn::MeasureTiming measure,
- const nn::OptionalDuration& loopTimeoutDuration) const override;
+ const nn::OptionalDuration& loopTimeoutDuration,
+ const std::vector<nn::TokenValuePair>& hints,
+ const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override;
private:
bool isValidInternal() const EXCLUDES(mMutex);
nn::GeneralResult<nn::SharedExecution> createReusableExecutionInternal(
const nn::Request& request, nn::MeasureTiming measure,
- const nn::OptionalDuration& loopTimeoutDuration) const;
+ const nn::OptionalDuration& loopTimeoutDuration,
+ const std::vector<nn::TokenValuePair>& hints,
+ const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const;
const Factory kMakeBurst;
mutable std::mutex mMutex;
diff --git a/neuralnetworks/utils/common/include/nnapi/hal/ResilientDevice.h b/neuralnetworks/utils/common/include/nnapi/hal/ResilientDevice.h
index 84ae799..267d634 100644
--- a/neuralnetworks/utils/common/include/nnapi/hal/ResilientDevice.h
+++ b/neuralnetworks/utils/common/include/nnapi/hal/ResilientDevice.h
@@ -65,8 +65,9 @@
nn::GeneralResult<nn::SharedPreparedModel> prepareModel(
const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority,
nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
- const std::vector<nn::SharedHandle>& dataCache,
- const nn::CacheToken& token) const override;
+ const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token,
+ const std::vector<nn::TokenValuePair>& hints,
+ const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override;
nn::GeneralResult<nn::SharedPreparedModel> prepareModelFromCache(
nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
@@ -83,7 +84,9 @@
nn::GeneralResult<nn::SharedPreparedModel> prepareModelInternal(
const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority,
nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
- const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token) const;
+ const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token,
+ const std::vector<nn::TokenValuePair>& hints,
+ const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const;
nn::GeneralResult<nn::SharedPreparedModel> prepareModelFromCacheInternal(
nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token) const;
diff --git a/neuralnetworks/utils/common/include/nnapi/hal/ResilientPreparedModel.h b/neuralnetworks/utils/common/include/nnapi/hal/ResilientPreparedModel.h
index 86533ed..bbfc220 100644
--- a/neuralnetworks/utils/common/include/nnapi/hal/ResilientPreparedModel.h
+++ b/neuralnetworks/utils/common/include/nnapi/hal/ResilientPreparedModel.h
@@ -49,18 +49,23 @@
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> execute(
const nn::Request& request, nn::MeasureTiming measure,
- const nn::OptionalTimePoint& deadline,
- const nn::OptionalDuration& loopTimeoutDuration) const override;
+ const nn::OptionalTimePoint& deadline, const nn::OptionalDuration& loopTimeoutDuration,
+ const std::vector<nn::TokenValuePair>& hints,
+ const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override;
nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> executeFenced(
const nn::Request& request, const std::vector<nn::SyncFence>& waitFor,
nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline,
const nn::OptionalDuration& loopTimeoutDuration,
- const nn::OptionalDuration& timeoutDurationAfterFence) const override;
+ const nn::OptionalDuration& timeoutDurationAfterFence,
+ const std::vector<nn::TokenValuePair>& hints,
+ const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override;
nn::GeneralResult<nn::SharedExecution> createReusableExecution(
const nn::Request& request, nn::MeasureTiming measure,
- const nn::OptionalDuration& loopTimeoutDuration) const override;
+ const nn::OptionalDuration& loopTimeoutDuration,
+ const std::vector<nn::TokenValuePair>& hints,
+ const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override;
nn::GeneralResult<nn::SharedBurst> configureExecutionBurst() const override;
@@ -70,7 +75,9 @@
bool isValidInternal() const EXCLUDES(mMutex);
nn::GeneralResult<nn::SharedExecution> createReusableExecutionInternal(
const nn::Request& request, nn::MeasureTiming measure,
- const nn::OptionalDuration& loopTimeoutDuration) const;
+ const nn::OptionalDuration& loopTimeoutDuration,
+ const std::vector<nn::TokenValuePair>& metaData,
+ const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const;
nn::GeneralResult<nn::SharedBurst> configureExecutionBurstInternal() const;
const Factory kMakePreparedModel;
diff --git a/neuralnetworks/utils/common/src/InvalidBurst.cpp b/neuralnetworks/utils/common/src/InvalidBurst.cpp
index 0191533..3fdfb5c 100644
--- a/neuralnetworks/utils/common/src/InvalidBurst.cpp
+++ b/neuralnetworks/utils/common/src/InvalidBurst.cpp
@@ -34,13 +34,17 @@
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> InvalidBurst::execute(
const nn::Request& /*request*/, nn::MeasureTiming /*measure*/,
const nn::OptionalTimePoint& /*deadline*/,
- const nn::OptionalDuration& /*loopTimeoutDuration*/) const {
+ const nn::OptionalDuration& /*loopTimeoutDuration*/,
+ const std::vector<nn::TokenValuePair>& /*hints*/,
+ const std::vector<nn::ExtensionNameAndPrefix>& /*extensionNameToPrefix*/) const {
return NN_ERROR() << "InvalidBurst";
}
nn::GeneralResult<nn::SharedExecution> InvalidBurst::createReusableExecution(
const nn::Request& /*request*/, nn::MeasureTiming /*measure*/,
- const nn::OptionalDuration& /*loopTimeoutDuration*/) const {
+ const nn::OptionalDuration& /*loopTimeoutDuration*/,
+ const std::vector<nn::TokenValuePair>& /*hints*/,
+ const std::vector<nn::ExtensionNameAndPrefix>& /*extensionNameToPrefix*/) const {
return NN_ERROR() << "InvalidBurst";
}
diff --git a/neuralnetworks/utils/common/src/InvalidDevice.cpp b/neuralnetworks/utils/common/src/InvalidDevice.cpp
index 535ccb4..c8cc287 100644
--- a/neuralnetworks/utils/common/src/InvalidDevice.cpp
+++ b/neuralnetworks/utils/common/src/InvalidDevice.cpp
@@ -84,7 +84,9 @@
const nn::Model& /*model*/, nn::ExecutionPreference /*preference*/,
nn::Priority /*priority*/, nn::OptionalTimePoint /*deadline*/,
const std::vector<nn::SharedHandle>& /*modelCache*/,
- const std::vector<nn::SharedHandle>& /*dataCache*/, const nn::CacheToken& /*token*/) const {
+ const std::vector<nn::SharedHandle>& /*dataCache*/, const nn::CacheToken& /*token*/,
+ const std::vector<nn::TokenValuePair>& /*hints*/,
+ const std::vector<nn::ExtensionNameAndPrefix>& /*extensionNameToPrefix*/) const {
return NN_ERROR() << "InvalidDevice";
}
diff --git a/neuralnetworks/utils/common/src/InvalidPreparedModel.cpp b/neuralnetworks/utils/common/src/InvalidPreparedModel.cpp
index 8195462..f6f978d 100644
--- a/neuralnetworks/utils/common/src/InvalidPreparedModel.cpp
+++ b/neuralnetworks/utils/common/src/InvalidPreparedModel.cpp
@@ -27,9 +27,12 @@
namespace android::hardware::neuralnetworks::utils {
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
-InvalidPreparedModel::execute(const nn::Request& /*request*/, nn::MeasureTiming /*measure*/,
- const nn::OptionalTimePoint& /*deadline*/,
- const nn::OptionalDuration& /*loopTimeoutDuration*/) const {
+InvalidPreparedModel::execute(
+ const nn::Request& /*request*/, nn::MeasureTiming /*measure*/,
+ const nn::OptionalTimePoint& /*deadline*/,
+ const nn::OptionalDuration& /*loopTimeoutDuration*/,
+ const std::vector<nn::TokenValuePair>& /*hints*/,
+ const std::vector<nn::ExtensionNameAndPrefix>& /*extensionNameToPrefix*/) const {
return NN_ERROR() << "InvalidPreparedModel";
}
@@ -38,13 +41,17 @@
const nn::Request& /*request*/, const std::vector<nn::SyncFence>& /*waitFor*/,
nn::MeasureTiming /*measure*/, const nn::OptionalTimePoint& /*deadline*/,
const nn::OptionalDuration& /*loopTimeoutDuration*/,
- const nn::OptionalDuration& /*timeoutDurationAfterFence*/) const {
+ const nn::OptionalDuration& /*timeoutDurationAfterFence*/,
+ const std::vector<nn::TokenValuePair>& /*hints*/,
+ const std::vector<nn::ExtensionNameAndPrefix>& /*extensionNameToPrefix*/) const {
return NN_ERROR() << "InvalidPreparedModel";
}
nn::GeneralResult<nn::SharedExecution> InvalidPreparedModel::createReusableExecution(
const nn::Request& /*request*/, nn::MeasureTiming /*measure*/,
- const nn::OptionalDuration& /*loopTimeoutDuration*/) const {
+ const nn::OptionalDuration& /*loopTimeoutDuration*/,
+ const std::vector<nn::TokenValuePair>& /*hints*/,
+ const std::vector<nn::ExtensionNameAndPrefix>& /*extensionNameToPrefix*/) const {
return NN_ERROR() << "InvalidPreparedModel";
}
diff --git a/neuralnetworks/utils/common/src/ResilientBurst.cpp b/neuralnetworks/utils/common/src/ResilientBurst.cpp
index 79cbe39..bf7a8ea 100644
--- a/neuralnetworks/utils/common/src/ResilientBurst.cpp
+++ b/neuralnetworks/utils/common/src/ResilientBurst.cpp
@@ -105,37 +105,49 @@
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> ResilientBurst::execute(
const nn::Request& request, nn::MeasureTiming measure,
- const nn::OptionalTimePoint& deadline,
- const nn::OptionalDuration& loopTimeoutDuration) const {
- const auto fn = [&request, measure, deadline, loopTimeoutDuration](const nn::IBurst& burst) {
- return burst.execute(request, measure, deadline, loopTimeoutDuration);
+ const nn::OptionalTimePoint& deadline, const nn::OptionalDuration& loopTimeoutDuration,
+ const std::vector<nn::TokenValuePair>& hints,
+ const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const {
+ const auto fn = [&request, measure, deadline, loopTimeoutDuration, &hints,
+ &extensionNameToPrefix](const nn::IBurst& burst) {
+ return burst.execute(request, measure, deadline, loopTimeoutDuration, hints,
+ extensionNameToPrefix);
};
return protect(*this, fn);
}
nn::GeneralResult<nn::SharedExecution> ResilientBurst::createReusableExecution(
const nn::Request& request, nn::MeasureTiming measure,
- const nn::OptionalDuration& loopTimeoutDuration) const {
+ const nn::OptionalDuration& loopTimeoutDuration,
+ const std::vector<nn::TokenValuePair>& hints,
+ const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const {
#if 0
auto self = shared_from_this();
- ResilientExecution::Factory makeExecution =
- [burst = std::move(self), request, measure, loopTimeoutDuration] {
- return burst->createReusableExecutionInternal(request, measure, loopTimeoutDuration);
+ ResilientExecution::Factory makeExecution = [burst = std::move(self), request, measure,
+ loopTimeoutDuration, &hints,
+ &extensionNameToPrefix] {
+ return burst->createReusableExecutionInternal(request, measure, loopTimeoutDuration, hints,
+ extensionNameToPrefix);
};
return ResilientExecution::create(std::move(makeExecution));
#else
- return createReusableExecutionInternal(request, measure, loopTimeoutDuration);
+ return createReusableExecutionInternal(request, measure, loopTimeoutDuration, hints,
+ extensionNameToPrefix);
#endif
}
nn::GeneralResult<nn::SharedExecution> ResilientBurst::createReusableExecutionInternal(
const nn::Request& request, nn::MeasureTiming measure,
- const nn::OptionalDuration& loopTimeoutDuration) const {
+ const nn::OptionalDuration& loopTimeoutDuration,
+ const std::vector<nn::TokenValuePair>& hints,
+ const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const {
if (!isValidInternal()) {
return std::make_shared<const InvalidExecution>();
}
- const auto fn = [&request, measure, &loopTimeoutDuration](const nn::IBurst& burst) {
- return burst.createReusableExecution(request, measure, loopTimeoutDuration);
+ const auto fn = [&request, measure, &loopTimeoutDuration, &hints,
+ &extensionNameToPrefix](const nn::IBurst& burst) {
+ return burst.createReusableExecution(request, measure, loopTimeoutDuration, hints,
+ extensionNameToPrefix);
};
return protect(*this, fn);
}
diff --git a/neuralnetworks/utils/common/src/ResilientDevice.cpp b/neuralnetworks/utils/common/src/ResilientDevice.cpp
index 2023c9a..a5c2640 100644
--- a/neuralnetworks/utils/common/src/ResilientDevice.cpp
+++ b/neuralnetworks/utils/common/src/ResilientDevice.cpp
@@ -179,19 +179,21 @@
nn::GeneralResult<nn::SharedPreparedModel> ResilientDevice::prepareModel(
const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority,
nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
- const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token) const {
+ const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token,
+ const std::vector<nn::TokenValuePair>& hints,
+ const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const {
#if 0
auto self = shared_from_this();
ResilientPreparedModel::Factory makePreparedModel = [device = std::move(self), model,
preference, priority, deadline, modelCache,
- dataCache, token] {
+ dataCache, token, hints, extensionNameToPrefix] {
return device->prepareModelInternal(model, preference, priority, deadline, modelCache,
- dataCache, token);
+ dataCache, token, hints, extensionNameToPrefix);
};
return ResilientPreparedModel::create(std::move(makePreparedModel));
#else
- return prepareModelInternal(model, preference, priority, deadline, modelCache, dataCache,
- token);
+ return prepareModelInternal(model, preference, priority, deadline, modelCache, dataCache, token,
+ hints, extensionNameToPrefix);
#endif
}
@@ -234,14 +236,16 @@
nn::GeneralResult<nn::SharedPreparedModel> ResilientDevice::prepareModelInternal(
const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority,
nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
- const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token) const {
+ const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token,
+ const std::vector<nn::TokenValuePair>& hints,
+ const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const {
if (!isValidInternal()) {
return std::make_shared<const InvalidPreparedModel>();
}
- const auto fn = [&model, preference, priority, &deadline, &modelCache, &dataCache,
- &token](const nn::IDevice& device) {
+ const auto fn = [&model, preference, priority, &deadline, &modelCache, &dataCache, &token,
+ &hints, &extensionNameToPrefix](const nn::IDevice& device) {
return device.prepareModel(model, preference, priority, deadline, modelCache, dataCache,
- token);
+ token, hints, extensionNameToPrefix);
};
return protect(*this, fn, /*blocking=*/false);
}
diff --git a/neuralnetworks/utils/common/src/ResilientPreparedModel.cpp b/neuralnetworks/utils/common/src/ResilientPreparedModel.cpp
index 1ae19bc..b5843c0 100644
--- a/neuralnetworks/utils/common/src/ResilientPreparedModel.cpp
+++ b/neuralnetworks/utils/common/src/ResilientPreparedModel.cpp
@@ -104,43 +104,53 @@
}
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
-ResilientPreparedModel::execute(const nn::Request& request, nn::MeasureTiming measure,
- const nn::OptionalTimePoint& deadline,
- const nn::OptionalDuration& loopTimeoutDuration) const {
- const auto fn = [&request, measure, &deadline,
- &loopTimeoutDuration](const nn::IPreparedModel& preparedModel) {
- return preparedModel.execute(request, measure, deadline, loopTimeoutDuration);
+ResilientPreparedModel::execute(
+ const nn::Request& request, nn::MeasureTiming measure,
+ const nn::OptionalTimePoint& deadline, const nn::OptionalDuration& loopTimeoutDuration,
+ const std::vector<nn::TokenValuePair>& hints,
+ const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const {
+ const auto fn = [&request, measure, &deadline, &loopTimeoutDuration, &hints,
+ &extensionNameToPrefix](const nn::IPreparedModel& preparedModel) {
+ return preparedModel.execute(request, measure, deadline, loopTimeoutDuration, hints,
+ extensionNameToPrefix);
};
return protect(*this, fn);
}
nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>>
-ResilientPreparedModel::executeFenced(const nn::Request& request,
- const std::vector<nn::SyncFence>& waitFor,
- nn::MeasureTiming measure,
- const nn::OptionalTimePoint& deadline,
- const nn::OptionalDuration& loopTimeoutDuration,
- const nn::OptionalDuration& timeoutDurationAfterFence) const {
+ResilientPreparedModel::executeFenced(
+ const nn::Request& request, const std::vector<nn::SyncFence>& waitFor,
+ nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline,
+ const nn::OptionalDuration& loopTimeoutDuration,
+ const nn::OptionalDuration& timeoutDurationAfterFence,
+ const std::vector<nn::TokenValuePair>& hints,
+ const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const {
const auto fn = [&request, &waitFor, measure, &deadline, &loopTimeoutDuration,
- &timeoutDurationAfterFence](const nn::IPreparedModel& preparedModel) {
+ &timeoutDurationAfterFence, &hints,
+ &extensionNameToPrefix](const nn::IPreparedModel& preparedModel) {
return preparedModel.executeFenced(request, waitFor, measure, deadline, loopTimeoutDuration,
- timeoutDurationAfterFence);
+ timeoutDurationAfterFence, hints, extensionNameToPrefix);
};
return protect(*this, fn);
}
nn::GeneralResult<nn::SharedExecution> ResilientPreparedModel::createReusableExecution(
const nn::Request& request, nn::MeasureTiming measure,
- const nn::OptionalDuration& loopTimeoutDuration) const {
+ const nn::OptionalDuration& loopTimeoutDuration,
+ const std::vector<nn::TokenValuePair>& hints,
+ const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const {
#if 0
auto self = shared_from_this();
- ResilientExecution::Factory makeExecution =
- [preparedModel = std::move(self), request, measure, loopTimeoutDuration] {
- return preparedModel->createReusableExecutionInternal(request, measure, loopTimeoutDuration);
+ ResilientExecution::Factory makeExecution = [preparedModel = std::move(self), request, measure,
+ loopTimeoutDuration, hints,
+ extensionNameToPrefix] {
+ return preparedModel->createReusableExecutionInternal(request, measure, loopTimeoutDuration,
+ hints, extensionNameToPrefix);
};
return ResilientExecution::create(std::move(makeExecution));
#else
- return createReusableExecutionInternal(request, measure, loopTimeoutDuration);
+ return createReusableExecutionInternal(request, measure, loopTimeoutDuration, hints,
+ extensionNameToPrefix);
#endif
}
@@ -159,13 +169,16 @@
nn::GeneralResult<nn::SharedExecution> ResilientPreparedModel::createReusableExecutionInternal(
const nn::Request& request, nn::MeasureTiming measure,
- const nn::OptionalDuration& loopTimeoutDuration) const {
+ const nn::OptionalDuration& loopTimeoutDuration,
+ const std::vector<nn::TokenValuePair>& hints,
+ const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const {
if (!isValidInternal()) {
return std::make_shared<const InvalidExecution>();
}
- const auto fn = [&request, measure,
- &loopTimeoutDuration](const nn::IPreparedModel& preparedModel) {
- return preparedModel.createReusableExecution(request, measure, loopTimeoutDuration);
+ const auto fn = [&request, measure, &loopTimeoutDuration, &hints,
+ &extensionNameToPrefix](const nn::IPreparedModel& preparedModel) {
+ return preparedModel.createReusableExecution(request, measure, loopTimeoutDuration, hints,
+ extensionNameToPrefix);
};
return protect(*this, fn);
}
diff --git a/neuralnetworks/utils/common/test/MockDevice.h b/neuralnetworks/utils/common/test/MockDevice.h
index a9428bc..a0fc5c3 100644
--- a/neuralnetworks/utils/common/test/MockDevice.h
+++ b/neuralnetworks/utils/common/test/MockDevice.h
@@ -39,7 +39,9 @@
MOCK_METHOD(GeneralResult<SharedPreparedModel>, prepareModel,
(const Model& model, ExecutionPreference preference, Priority priority,
OptionalTimePoint deadline, const std::vector<SharedHandle>& modelCache,
- const std::vector<SharedHandle>& dataCache, const CacheToken& token),
+ const std::vector<SharedHandle>& dataCache, const CacheToken& token,
+ const std::vector<TokenValuePair>& hints,
+ const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix),
(const, override));
MOCK_METHOD(GeneralResult<SharedPreparedModel>, prepareModelFromCache,
(OptionalTimePoint deadline, const std::vector<SharedHandle>& modelCache,
diff --git a/neuralnetworks/utils/common/test/MockPreparedModel.h b/neuralnetworks/utils/common/test/MockPreparedModel.h
index c8ce006..b8613b2 100644
--- a/neuralnetworks/utils/common/test/MockPreparedModel.h
+++ b/neuralnetworks/utils/common/test/MockPreparedModel.h
@@ -27,17 +27,23 @@
public:
MOCK_METHOD((ExecutionResult<std::pair<std::vector<OutputShape>, Timing>>), execute,
(const Request& request, MeasureTiming measure, const OptionalTimePoint& deadline,
- const OptionalDuration& loopTimeoutDuration),
+ const OptionalDuration& loopTimeoutDuration,
+ const std::vector<TokenValuePair>& hints,
+ const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix),
(const, override));
MOCK_METHOD((GeneralResult<std::pair<SyncFence, ExecuteFencedInfoCallback>>), executeFenced,
(const Request& request, const std::vector<SyncFence>& waitFor,
MeasureTiming measure, const OptionalTimePoint& deadline,
const OptionalDuration& loopTimeoutDuration,
- const OptionalDuration& timeoutDurationAfterFence),
+ const OptionalDuration& timeoutDurationAfterFence,
+ const std::vector<TokenValuePair>& hints,
+ const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix),
(const, override));
MOCK_METHOD((GeneralResult<SharedExecution>), createReusableExecution,
- (const nn::Request& request, nn::MeasureTiming measure,
- const nn::OptionalDuration& loopTimeoutDuration),
+ (const Request& request, MeasureTiming measure,
+ const OptionalDuration& loopTimeoutDuration,
+ const std::vector<TokenValuePair>& hints,
+ const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix),
(const, override));
MOCK_METHOD(GeneralResult<SharedBurst>, configureExecutionBurst, (), (const, override));
MOCK_METHOD(std::any, getUnderlyingResource, (), (const, override));
diff --git a/neuralnetworks/utils/common/test/ResilientDeviceTest.cpp b/neuralnetworks/utils/common/test/ResilientDeviceTest.cpp
index 0488b63..d9b8505 100644
--- a/neuralnetworks/utils/common/test/ResilientDeviceTest.cpp
+++ b/neuralnetworks/utils/common/test/ResilientDeviceTest.cpp
@@ -309,12 +309,12 @@
// setup call
const auto [mockDevice, mockDeviceFactory, device] = setup();
const auto mockPreparedModel = std::make_shared<const nn::MockPreparedModel>();
- EXPECT_CALL(*mockDevice, prepareModel(_, _, _, _, _, _, _))
+ EXPECT_CALL(*mockDevice, prepareModel(_, _, _, _, _, _, _, _, _))
.Times(1)
.WillOnce(Return(mockPreparedModel));
// run test
- const auto result = device->prepareModel({}, {}, {}, {}, {}, {}, {});
+ const auto result = device->prepareModel({}, {}, {}, {}, {}, {}, {}, {}, {});
// verify result
ASSERT_TRUE(result.has_value())
@@ -324,12 +324,12 @@
TEST(ResilientDeviceTest, prepareModelError) {
// setup call
const auto [mockDevice, mockDeviceFactory, device] = setup();
- EXPECT_CALL(*mockDevice, prepareModel(_, _, _, _, _, _, _))
+ EXPECT_CALL(*mockDevice, prepareModel(_, _, _, _, _, _, _, _, _))
.Times(1)
.WillOnce(kReturnGeneralFailure);
// run test
- const auto result = device->prepareModel({}, {}, {}, {}, {}, {}, {});
+ const auto result = device->prepareModel({}, {}, {}, {}, {}, {}, {}, {}, {});
// verify result
ASSERT_FALSE(result.has_value());
@@ -339,13 +339,13 @@
TEST(ResilientDeviceTest, prepareModelDeadObjectFailedRecovery) {
// setup call
const auto [mockDevice, mockDeviceFactory, device] = setup();
- EXPECT_CALL(*mockDevice, prepareModel(_, _, _, _, _, _, _))
+ EXPECT_CALL(*mockDevice, prepareModel(_, _, _, _, _, _, _, _, _))
.Times(1)
.WillOnce(kReturnDeadObject);
EXPECT_CALL(*mockDeviceFactory, Call(false)).Times(1).WillOnce(kReturnGeneralFailure);
// run test
- const auto result = device->prepareModel({}, {}, {}, {}, {}, {}, {});
+ const auto result = device->prepareModel({}, {}, {}, {}, {}, {}, {}, {}, {});
// verify result
ASSERT_FALSE(result.has_value());
@@ -355,18 +355,18 @@
TEST(ResilientDeviceTest, prepareModelDeadObjectSuccessfulRecovery) {
// setup call
const auto [mockDevice, mockDeviceFactory, device] = setup();
- EXPECT_CALL(*mockDevice, prepareModel(_, _, _, _, _, _, _))
+ EXPECT_CALL(*mockDevice, prepareModel(_, _, _, _, _, _, _, _, _))
.Times(1)
.WillOnce(kReturnDeadObject);
const auto recoveredMockDevice = createConfiguredMockDevice();
const auto mockPreparedModel = std::make_shared<const nn::MockPreparedModel>();
- EXPECT_CALL(*recoveredMockDevice, prepareModel(_, _, _, _, _, _, _))
+ EXPECT_CALL(*recoveredMockDevice, prepareModel(_, _, _, _, _, _, _, _, _))
.Times(1)
.WillOnce(Return(mockPreparedModel));
EXPECT_CALL(*mockDeviceFactory, Call(false)).Times(1).WillOnce(Return(recoveredMockDevice));
// run test
- const auto result = device->prepareModel({}, {}, {}, {}, {}, {}, {});
+ const auto result = device->prepareModel({}, {}, {}, {}, {}, {}, {}, {}, {});
// verify result
ASSERT_TRUE(result.has_value())
@@ -679,7 +679,7 @@
device->recover(mockDevice.get(), /*blocking=*/false);
// run test
- auto result = device->prepareModel({}, {}, {}, {}, {}, {}, {});
+ auto result = device->prepareModel({}, {}, {}, {}, {}, {}, {}, {}, {});
// verify result
ASSERT_TRUE(result.has_value())
diff --git a/neuralnetworks/utils/common/test/ResilientPreparedModelTest.cpp b/neuralnetworks/utils/common/test/ResilientPreparedModelTest.cpp
index d396ca8..276bfba 100644
--- a/neuralnetworks/utils/common/test/ResilientPreparedModelTest.cpp
+++ b/neuralnetworks/utils/common/test/ResilientPreparedModelTest.cpp
@@ -104,12 +104,12 @@
TEST(ResilientPreparedModelTest, execute) {
// setup call
const auto [mockPreparedModel, mockPreparedModelFactory, preparedModel] = setup();
- EXPECT_CALL(*mockPreparedModel, execute(_, _, _, _))
+ EXPECT_CALL(*mockPreparedModel, execute(_, _, _, _, _, _))
.Times(1)
.WillOnce(Return(kNoExecutionError));
// run test
- const auto result = preparedModel->execute({}, {}, {}, {});
+ const auto result = preparedModel->execute({}, {}, {}, {}, {}, {});
// verify result
ASSERT_TRUE(result.has_value())
@@ -119,10 +119,12 @@
TEST(ResilientPreparedModelTest, executeError) {
// setup call
const auto [mockPreparedModel, mockPreparedModelFactory, preparedModel] = setup();
- EXPECT_CALL(*mockPreparedModel, execute(_, _, _, _)).Times(1).WillOnce(kReturnGeneralFailure);
+ EXPECT_CALL(*mockPreparedModel, execute(_, _, _, _, _, _))
+ .Times(1)
+ .WillOnce(kReturnGeneralFailure);
// run test
- const auto result = preparedModel->execute({}, {}, {}, {});
+ const auto result = preparedModel->execute({}, {}, {}, {}, {}, {});
// verify result
ASSERT_FALSE(result.has_value());
@@ -132,12 +134,12 @@
TEST(ResilientPreparedModelTest, executeDeadObjectFailedRecovery) {
// setup call
const auto [mockPreparedModel, mockPreparedModelFactory, preparedModel] = setup();
- EXPECT_CALL(*mockPreparedModel, execute(_, _, _, _)).Times(1).WillOnce(kReturnDeadObject);
+ EXPECT_CALL(*mockPreparedModel, execute(_, _, _, _, _, _)).Times(1).WillOnce(kReturnDeadObject);
constexpr auto ret = [] { return nn::error(nn::ErrorStatus::GENERAL_FAILURE); };
EXPECT_CALL(*mockPreparedModelFactory, Call()).Times(1).WillOnce(ret);
// run test
- const auto result = preparedModel->execute({}, {}, {}, {});
+ const auto result = preparedModel->execute({}, {}, {}, {}, {}, {});
// verify result
ASSERT_FALSE(result.has_value());
@@ -147,9 +149,9 @@
TEST(ResilientPreparedModelTest, executeDeadObjectSuccessfulRecovery) {
// setup call
const auto [mockPreparedModel, mockPreparedModelFactory, preparedModel] = setup();
- EXPECT_CALL(*mockPreparedModel, execute(_, _, _, _)).Times(1).WillOnce(kReturnDeadObject);
+ EXPECT_CALL(*mockPreparedModel, execute(_, _, _, _, _, _)).Times(1).WillOnce(kReturnDeadObject);
const auto recoveredMockPreparedModel = createConfiguredMockPreparedModel();
- EXPECT_CALL(*recoveredMockPreparedModel, execute(_, _, _, _))
+ EXPECT_CALL(*recoveredMockPreparedModel, execute(_, _, _, _, _, _))
.Times(1)
.WillOnce(Return(kNoExecutionError));
EXPECT_CALL(*mockPreparedModelFactory, Call())
@@ -157,7 +159,7 @@
.WillOnce(Return(recoveredMockPreparedModel));
// run test
- const auto result = preparedModel->execute({}, {}, {}, {});
+ const auto result = preparedModel->execute({}, {}, {}, {}, {}, {});
// verify result
ASSERT_TRUE(result.has_value())
@@ -167,12 +169,12 @@
TEST(ResilientPreparedModelTest, executeFenced) {
// setup call
const auto [mockPreparedModel, mockPreparedModelFactory, preparedModel] = setup();
- EXPECT_CALL(*mockPreparedModel, executeFenced(_, _, _, _, _, _))
+ EXPECT_CALL(*mockPreparedModel, executeFenced(_, _, _, _, _, _, _, _))
.Times(1)
.WillOnce(Return(kNoFencedExecutionError));
// run test
- const auto result = preparedModel->executeFenced({}, {}, {}, {}, {}, {});
+ const auto result = preparedModel->executeFenced({}, {}, {}, {}, {}, {}, {}, {});
// verify result
ASSERT_TRUE(result.has_value())
@@ -182,12 +184,12 @@
TEST(ResilientPreparedModelTest, executeFencedError) {
// setup call
const auto [mockPreparedModel, mockPreparedModelFactory, preparedModel] = setup();
- EXPECT_CALL(*mockPreparedModel, executeFenced(_, _, _, _, _, _))
+ EXPECT_CALL(*mockPreparedModel, executeFenced(_, _, _, _, _, _, _, _))
.Times(1)
.WillOnce(kReturnGeneralFailure);
// run test
- const auto result = preparedModel->executeFenced({}, {}, {}, {}, {}, {});
+ const auto result = preparedModel->executeFenced({}, {}, {}, {}, {}, {}, {}, {});
// verify result
ASSERT_FALSE(result.has_value());
@@ -197,13 +199,13 @@
TEST(ResilientPreparedModelTest, executeFencedDeadObjectFailedRecovery) {
// setup call
const auto [mockPreparedModel, mockPreparedModelFactory, preparedModel] = setup();
- EXPECT_CALL(*mockPreparedModel, executeFenced(_, _, _, _, _, _))
+ EXPECT_CALL(*mockPreparedModel, executeFenced(_, _, _, _, _, _, _, _))
.Times(1)
.WillOnce(kReturnDeadObject);
EXPECT_CALL(*mockPreparedModelFactory, Call()).Times(1).WillOnce(kReturnGeneralFailure);
// run test
- const auto result = preparedModel->executeFenced({}, {}, {}, {}, {}, {});
+ const auto result = preparedModel->executeFenced({}, {}, {}, {}, {}, {}, {}, {});
// verify result
ASSERT_FALSE(result.has_value());
@@ -213,11 +215,11 @@
TEST(ResilientPreparedModelTest, executeFencedDeadObjectSuccessfulRecovery) {
// setup call
const auto [mockPreparedModel, mockPreparedModelFactory, preparedModel] = setup();
- EXPECT_CALL(*mockPreparedModel, executeFenced(_, _, _, _, _, _))
+ EXPECT_CALL(*mockPreparedModel, executeFenced(_, _, _, _, _, _, _, _))
.Times(1)
.WillOnce(kReturnDeadObject);
const auto recoveredMockPreparedModel = createConfiguredMockPreparedModel();
- EXPECT_CALL(*recoveredMockPreparedModel, executeFenced(_, _, _, _, _, _))
+ EXPECT_CALL(*recoveredMockPreparedModel, executeFenced(_, _, _, _, _, _, _, _))
.Times(1)
.WillOnce(Return(kNoFencedExecutionError));
EXPECT_CALL(*mockPreparedModelFactory, Call())
@@ -225,7 +227,7 @@
.WillOnce(Return(recoveredMockPreparedModel));
// run test
- const auto result = preparedModel->executeFenced({}, {}, {}, {}, {}, {});
+ const auto result = preparedModel->executeFenced({}, {}, {}, {}, {}, {}, {}, {});
// verify result
ASSERT_TRUE(result.has_value())
@@ -235,12 +237,12 @@
TEST(ResilientPreparedModelTest, createReusableExecution) {
// setup call
const auto [mockPreparedModel, mockPreparedModelFactory, preparedModel] = setup();
- EXPECT_CALL(*mockPreparedModel, createReusableExecution(_, _, _))
+ EXPECT_CALL(*mockPreparedModel, createReusableExecution(_, _, _, _, _))
.Times(1)
.WillOnce(Return(kNoCreateReusableExecutionError));
// run test
- const auto result = preparedModel->createReusableExecution({}, {}, {});
+ const auto result = preparedModel->createReusableExecution({}, {}, {}, {}, {});
// verify result
ASSERT_TRUE(result.has_value())
@@ -250,12 +252,12 @@
TEST(ResilientPreparedModelTest, createReusableExecutionError) {
// setup call
const auto [mockPreparedModel, mockPreparedModelFactory, preparedModel] = setup();
- EXPECT_CALL(*mockPreparedModel, createReusableExecution(_, _, _))
+ EXPECT_CALL(*mockPreparedModel, createReusableExecution(_, _, _, _, _))
.Times(1)
.WillOnce(kReturnGeneralFailure);
// run test
- const auto result = preparedModel->createReusableExecution({}, {}, {});
+ const auto result = preparedModel->createReusableExecution({}, {}, {}, {}, {});
// verify result
ASSERT_FALSE(result.has_value());
diff --git a/neuralnetworks/utils/service/Android.bp b/neuralnetworks/utils/service/Android.bp
index c3272ae..452078b 100644
--- a/neuralnetworks/utils/service/Android.bp
+++ b/neuralnetworks/utils/service/Android.bp
@@ -33,6 +33,10 @@
local_include_dirs: ["include/nnapi/hal"],
export_include_dirs: ["include"],
static_libs: [
+ "android.hardware.neuralnetworks@1.0",
+ "android.hardware.neuralnetworks@1.1",
+ "android.hardware.neuralnetworks@1.2",
+ "android.hardware.neuralnetworks@1.3",
"neuralnetworks_types",
"neuralnetworks_utils_hal_1_0",
"neuralnetworks_utils_hal_1_1",
@@ -40,10 +44,4 @@
"neuralnetworks_utils_hal_1_3",
"neuralnetworks_utils_hal_common",
],
- shared_libs: [
- "android.hardware.neuralnetworks@1.0",
- "android.hardware.neuralnetworks@1.1",
- "android.hardware.neuralnetworks@1.2",
- "android.hardware.neuralnetworks@1.3",
- ],
}
diff --git a/power/aidl/aidl_api/android.hardware.power/current/android/hardware/power/Mode.aidl b/power/aidl/aidl_api/android.hardware.power/current/android/hardware/power/Mode.aidl
index ba444a7..f38426b 100644
--- a/power/aidl/aidl_api/android.hardware.power/current/android/hardware/power/Mode.aidl
+++ b/power/aidl/aidl_api/android.hardware.power/current/android/hardware/power/Mode.aidl
@@ -49,5 +49,6 @@
CAMERA_STREAMING_LOW = 12,
CAMERA_STREAMING_MID = 13,
CAMERA_STREAMING_HIGH = 14,
- GAME_LOADING = 15,
+ GAME = 15,
+ GAME_LOADING = 16,
}
diff --git a/power/aidl/android/hardware/power/Mode.aidl b/power/aidl/android/hardware/power/Mode.aidl
index 2ebace1..cc4b130 100644
--- a/power/aidl/android/hardware/power/Mode.aidl
+++ b/power/aidl/android/hardware/power/Mode.aidl
@@ -164,6 +164,11 @@
CAMERA_STREAMING_HIGH,
/**
+ * This mode indicates that user is playing a game.
+ */
+ GAME,
+
+ /**
* This mode indicates that the user is waiting for loading in a game.
*/
GAME_LOADING,
diff --git a/sensors/1.0/default/convert.cpp b/sensors/1.0/default/convert.cpp
index 53ceb0d..43ee327 100644
--- a/sensors/1.0/default/convert.cpp
+++ b/sensors/1.0/default/convert.cpp
@@ -190,8 +190,6 @@
}
default: {
- CHECK_GE((int32_t)dst->sensorType, (int32_t)SensorType::DEVICE_PRIVATE_BASE);
-
memcpy(dst->u.data.data(), src.data, 16 * sizeof(float));
break;
}
@@ -330,9 +328,6 @@
}
default: {
- CHECK_GE((int32_t)src.sensorType,
- (int32_t)SensorType::DEVICE_PRIVATE_BASE);
-
memcpy(dst->data, src.u.data.data(), 16 * sizeof(float));
break;
}
diff --git a/sensors/aidl/aidl_api/android.hardware.sensors/current/android/hardware/sensors/Event.aidl b/sensors/aidl/aidl_api/android.hardware.sensors/current/android/hardware/sensors/Event.aidl
index c92ab1a..4f49002 100644
--- a/sensors/aidl/aidl_api/android.hardware.sensors/current/android/hardware/sensors/Event.aidl
+++ b/sensors/aidl/aidl_api/android.hardware.sensors/current/android/hardware/sensors/Event.aidl
@@ -52,6 +52,9 @@
android.hardware.sensors.AdditionalInfo additional;
android.hardware.sensors.Event.EventPayload.Data data;
android.hardware.sensors.Event.EventPayload.HeadTracker headTracker;
+ android.hardware.sensors.Event.EventPayload.LimitedAxesImu limitedAxesImu;
+ android.hardware.sensors.Event.EventPayload.LimitedAxesImuUncal limitedAxesImuUncal;
+ android.hardware.sensors.Event.EventPayload.Heading heading;
@FixedSize @VintfStability
parcelable Vec4 {
float x;
@@ -86,11 +89,37 @@
int discontinuityCount;
}
@FixedSize @VintfStability
+ parcelable LimitedAxesImu {
+ float x;
+ float y;
+ float z;
+ float xSupported;
+ float ySupported;
+ float zSupported;
+ }
+ @FixedSize @VintfStability
+ parcelable LimitedAxesImuUncal {
+ float x;
+ float y;
+ float z;
+ float xBias;
+ float yBias;
+ float zBias;
+ float xSupported;
+ float ySupported;
+ float zSupported;
+ }
+ @FixedSize @VintfStability
parcelable HeartRate {
float bpm;
android.hardware.sensors.SensorStatus status;
}
@FixedSize @VintfStability
+ parcelable Heading {
+ float heading;
+ float accuracy;
+ }
+ @FixedSize @VintfStability
parcelable MetaData {
android.hardware.sensors.Event.EventPayload.MetaData.MetaDataEventType what;
@Backing(type="int") @VintfStability
diff --git a/sensors/aidl/aidl_api/android.hardware.sensors/current/android/hardware/sensors/SensorType.aidl b/sensors/aidl/aidl_api/android.hardware.sensors/current/android/hardware/sensors/SensorType.aidl
index 3d7ab45..8c864e9 100644
--- a/sensors/aidl/aidl_api/android.hardware.sensors/current/android/hardware/sensors/SensorType.aidl
+++ b/sensors/aidl/aidl_api/android.hardware.sensors/current/android/hardware/sensors/SensorType.aidl
@@ -71,5 +71,10 @@
ACCELEROMETER_UNCALIBRATED = 35,
HINGE_ANGLE = 36,
HEAD_TRACKER = 37,
+ ACCELEROMETER_LIMITED_AXES = 38,
+ GYROSCOPE_LIMITED_AXES = 39,
+ ACCELEROMETER_LIMITED_AXES_UNCALIBRATED = 40,
+ GYROSCOPE_LIMITED_AXES_UNCALIBRATED = 41,
+ HEADING = 42,
DEVICE_PRIVATE_BASE = 65536,
}
diff --git a/sensors/aidl/android/hardware/sensors/Event.aidl b/sensors/aidl/android/hardware/sensors/Event.aidl
index fd6a8cc..e8550f1 100644
--- a/sensors/aidl/android/hardware/sensors/Event.aidl
+++ b/sensors/aidl/android/hardware/sensors/Event.aidl
@@ -132,6 +132,23 @@
*/
HeadTracker headTracker;
+ /**
+ * SensorType::ACCELEROMETER_LIMITED_AXES
+ * SensorType::GYROSCOPE_LIMITED_AXES
+ */
+ LimitedAxesImu limitedAxesImu;
+
+ /**
+ * SensorType::ACCELEROMETER_LIMITED_AXES_UNCALIBRATED
+ * SensorType::GYROSCOPE_LIMITED_AXES_UNCALIBRATED
+ */
+ LimitedAxesImuUncal limitedAxesImuUncal;
+
+ /**
+ * SensorType::HEADING
+ */
+ Heading heading;
+
@FixedSize
@VintfStability
parcelable Vec4 {
@@ -201,6 +218,70 @@
int discontinuityCount;
}
+ /**
+ * Payload of the ACCELEROMETER_LIMITED_AXES and GYROSCOPE_LIMITED_AXES
+ * sensor types.
+ */
+ @FixedSize
+ @VintfStability
+ parcelable LimitedAxesImu {
+ /**
+ * Acceleration or angular speed values. If certain axes are not
+ * supported, the associated value must be set to 0.
+ */
+ float x;
+ float y;
+ float z;
+
+ /**
+ * Limited axes sensors must not be supported for all three axes.
+ * These values indicate which axes are supported with a 1.0 for
+ * supported, and a 0 for not supported. The supported axes should
+ * be determined at build time and these values must not change
+ * during runtime.
+ */
+ float xSupported;
+ float ySupported;
+ float zSupported;
+ }
+
+ /**
+ * Payload of the ACCELEROMETER_LIMITED_AXES_UNCALIBRATED and
+ * GYROSCOPE_LIMITED_AXES_UNCALIBRATED sensor types.
+ */
+ @FixedSize
+ @VintfStability
+ parcelable LimitedAxesImuUncal {
+ /**
+ * Acceleration (without bias compensation) or angular (speed
+ * (without drift compensation) values. If certain axes are not
+ * supported, the associated value must be set to 0.
+ */
+ float x;
+ float y;
+ float z;
+
+ /**
+ * Estimated bias values for uncalibrated accelerometer or
+ * estimated drift values for uncalibrated gyroscope. If certain
+ * axes are not supported, the associated value must be set to 0.
+ */
+ float xBias;
+ float yBias;
+ float zBias;
+
+ /**
+ * Limited axes sensors must not be supported for all three axes.
+ * These values indicate which axes are supported with a 1.0 for
+ * supported, and a 0 for not supported. The supported axes should
+ * be determined at build time and these values must not change
+ * during runtime.
+ */
+ float xSupported;
+ float ySupported;
+ float zSupported;
+ }
+
@FixedSize
@VintfStability
parcelable HeartRate {
@@ -218,6 +299,27 @@
@FixedSize
@VintfStability
+ parcelable Heading {
+ /**
+ * The direction in which the device is pointing relative to true
+ * north in degrees. The value must be between 0.0 (inclusive) and
+ * 360.0 (exclusive), with 0 indicating north, 90 east, 180 south,
+ * and 270 west.
+ */
+ float heading;
+ /**
+ * Accuracy is defined at 68% confidence. In the case where the
+ * underlying distribution is assumed Gaussian normal, this would be
+ * considered one standard deviation. For example, if the heading
+ * returns 60 degrees, and accuracy returns 10 degrees, then there
+ * is a 68 percent probability of the true heading being between 50
+ * degrees and 70 degrees.
+ */
+ float accuracy;
+ }
+
+ @FixedSize
+ @VintfStability
parcelable MetaData {
MetaDataEventType what;
diff --git a/sensors/aidl/android/hardware/sensors/SensorType.aidl b/sensors/aidl/android/hardware/sensors/SensorType.aidl
index 01e6bee..9098894 100644
--- a/sensors/aidl/android/hardware/sensors/SensorType.aidl
+++ b/sensors/aidl/android/hardware/sensors/SensorType.aidl
@@ -667,6 +667,57 @@
HEAD_TRACKER = 37,
/**
+ * ACCELEROMETER_LIMITED_AXES
+ * reporting-mode: continuous
+ *
+ * Equivalent to ACCELEROMETER, but supporting cases where one or two axes
+ * are not supported.
+ */
+ ACCELEROMETER_LIMITED_AXES = 38,
+
+ /**
+ * GYROSCOPE_LIMITED_AXES
+ * reporting-mode: continuous
+ *
+ * Equivalent to GYROSCOPE, but supporting cases where one or two axes are
+ * not supported.
+ */
+ GYROSCOPE_LIMITED_AXES = 39,
+
+ /**
+ * ACCELEROMETER_LIMITED_AXES_UNCALIBRATED
+ * reporting-mode: continuous
+ *
+ * Equivalent to ACCELEROMETER_UNCALIBRATED, but supporting cases where one
+ * or two axes are not supported.
+ */
+ ACCELEROMETER_LIMITED_AXES_UNCALIBRATED = 40,
+
+ /**
+ * GYROSCOPE_LIMITED_AXES_UNCALIBRATED
+ * reporting-mode: continuous
+ *
+ * Equivalent to GYROSCOPE_UNCALIBRATED, but supporting cases where one or
+ * two axes are not supported.
+ */
+ GYROSCOPE_LIMITED_AXES_UNCALIBRATED = 41,
+
+ /**
+ * HEADING
+ * reporting-mode: continuous
+ *
+ * A sensor of this type measures the direction in which the device is
+ * pointing relative to true north in degrees.
+ *
+ * This sensor was added for automotive form factors. Other devices with a
+ * clear forward direction might find it useful as well. However, devices
+ * with a more ambiguous orientation such as phones or wearables might want
+ * to consider using other sensors such as Sensor.TYPE_ROTATION_VECTOR
+ * which might be more suitable.
+ */
+ HEADING = 42,
+
+ /**
* Base for device manufacturers private sensor types.
* These sensor types can't be exposed in the SDK.
*/
diff --git a/soundtrigger/aidl/android/hardware/soundtrigger3/ISoundTriggerHw.aidl b/soundtrigger/aidl/android/hardware/soundtrigger3/ISoundTriggerHw.aidl
index 2a3fc64..618331b 100644
--- a/soundtrigger/aidl/android/hardware/soundtrigger3/ISoundTriggerHw.aidl
+++ b/soundtrigger/aidl/android/hardware/soundtrigger3/ISoundTriggerHw.aidl
@@ -18,15 +18,12 @@
import android.hardware.soundtrigger3.ISoundTriggerHwCallback;
import android.hardware.soundtrigger3.ISoundTriggerHwGlobalCallback;
-
+import android.media.soundtrigger.ModelParameter;
+import android.media.soundtrigger.ModelParameterRange;
import android.media.soundtrigger.PhraseSoundModel;
import android.media.soundtrigger.Properties;
import android.media.soundtrigger.RecognitionConfig;
import android.media.soundtrigger.SoundModel;
-import android.media.soundtrigger.ModelParameter;
-import android.media.soundtrigger.ModelParameterRange;
-import android.media.soundtrigger.Properties;
-import android.media.soundtrigger.RecognitionConfig;
/**
* SoundTrigger HAL interface. Used for hardware recognition of hotwords
@@ -196,12 +193,12 @@
* an audio stream associated with this recognition session.
* @param config A RecognitionConfig structure containing attributes of the recognition to
* perform.
- * @throws ServiceSpecificException(RESOURCE_CONTENTION) if the model cannot be started due
+ * @throws ServiceSpecificException(RESOURCE_CONTENTION) if the model cannot be started due
* to resource constraints. This is typically a temporary condition and the client may
* retry after the onResourcesAvailable() global callback is invoked.
- */
- void startRecognition(in int modelHandle, in int deviceHandle,
- in int ioHandle, in RecognitionConfig config);
+ */
+ void startRecognition(
+ in int modelHandle, in int deviceHandle, in int ioHandle, in RecognitionConfig config);
/**
* Stop recognition on a given model.
@@ -235,7 +232,8 @@
* @return This structure indicates supported attributes of the parameter for the given model
* handle. If the parameter is not supported, null is returned.
*/
- @nullable ModelParameterRange queryParameter(in int modelHandle, in ModelParameter modelParam);
+ @nullable ModelParameterRange queryParameter(
+ in int modelHandle, in ModelParameter modelParam);
/**
* Get a model specific parameter.
diff --git a/tv/tuner/aidl/aidl_api/android.hardware.tv.tuner/current/android/hardware/tv/tuner/FrontendStatus.aidl b/tv/tuner/aidl/aidl_api/android.hardware.tv.tuner/current/android/hardware/tv/tuner/FrontendStatus.aidl
index fc0efc9..1e0f5f0 100644
--- a/tv/tuner/aidl/aidl_api/android.hardware.tv.tuner/current/android/hardware/tv/tuner/FrontendStatus.aidl
+++ b/tv/tuner/aidl/aidl_api/android.hardware.tv.tuner/current/android/hardware/tv/tuner/FrontendStatus.aidl
@@ -76,4 +76,5 @@
android.hardware.tv.tuner.FrontendIsdbtPartialReceptionFlag partialReceptionFlag;
int[] streamIdList;
int[] dvbtCellIds;
+ android.hardware.tv.tuner.FrontendScanAtsc3PlpInfo[] allPlpInfo;
}
diff --git a/tv/tuner/aidl/aidl_api/android.hardware.tv.tuner/current/android/hardware/tv/tuner/FrontendStatusType.aidl b/tv/tuner/aidl/aidl_api/android.hardware.tv.tuner/current/android/hardware/tv/tuner/FrontendStatusType.aidl
index 2cc62d5..cd6ccb3 100644
--- a/tv/tuner/aidl/aidl_api/android.hardware.tv.tuner/current/android/hardware/tv/tuner/FrontendStatusType.aidl
+++ b/tv/tuner/aidl/aidl_api/android.hardware.tv.tuner/current/android/hardware/tv/tuner/FrontendStatusType.aidl
@@ -76,4 +76,5 @@
ISDBT_PARTIAL_RECEPTION_FLAG = 38,
STREAM_ID_LIST = 39,
DVBT_CELL_IDS = 40,
+ ATSC3_ALL_PLP_INFO = 41,
}
diff --git a/tv/tuner/aidl/android/hardware/tv/tuner/FrontendStatus.aidl b/tv/tuner/aidl/android/hardware/tv/tuner/FrontendStatus.aidl
index ae6e46f..b5d0201 100644
--- a/tv/tuner/aidl/android/hardware/tv/tuner/FrontendStatus.aidl
+++ b/tv/tuner/aidl/android/hardware/tv/tuner/FrontendStatus.aidl
@@ -27,6 +27,7 @@
import android.hardware.tv.tuner.FrontendModulationStatus;
import android.hardware.tv.tuner.FrontendRollOff;
import android.hardware.tv.tuner.FrontendSpectralInversion;
+import android.hardware.tv.tuner.FrontendScanAtsc3PlpInfo;
import android.hardware.tv.tuner.FrontendStatusAtsc3PlpInfo;
import android.hardware.tv.tuner.FrontendTransmissionMode;
import android.hardware.tv.tuner.LnbVoltage;
@@ -241,5 +242,9 @@
*/
int[] dvbtCellIds;
-
+ /**
+ * A list of all PLPs in the frequency band for ATSC3 frontend, which includes both tuned
+ * and not tuned PLPs for currently watching service.
+ */
+ FrontendScanAtsc3PlpInfo[] allPlpInfo;
}
diff --git a/tv/tuner/aidl/android/hardware/tv/tuner/FrontendStatusType.aidl b/tv/tuner/aidl/android/hardware/tv/tuner/FrontendStatusType.aidl
index e7da517..8f3f2c5 100644
--- a/tv/tuner/aidl/android/hardware/tv/tuner/FrontendStatusType.aidl
+++ b/tv/tuner/aidl/android/hardware/tv/tuner/FrontendStatusType.aidl
@@ -130,7 +130,7 @@
RF_LOCK,
/**
- * PLP information in a frequency band for ATSC3.0 frontend.
+ * Current tuned PLP information in a frequency band for ATSC3 frontend.
*/
ATSC3_PLP_INFO,
@@ -222,10 +222,16 @@
/**
* Stream ID list included in a transponder.
*/
- STREAM_ID_LIST,
+ STREAM_ID_LIST,
- /**
- * DVB-T Cell Id.
- */
- DVBT_CELL_IDS,
+ /**
+ * DVB-T Cell Id.
+ */
+ DVBT_CELL_IDS,
+
+ /**
+ * All PLP information in a frequency band for ATSC3 frontend, which includes both tuned
+ * and not tuned PLPs for currently watching service.
+ */
+ ATSC3_ALL_PLP_INFO,
}
diff --git a/tv/tuner/aidl/default/Frontend.cpp b/tv/tuner/aidl/default/Frontend.cpp
index 714612d..445d2b6 100644
--- a/tv/tuner/aidl/default/Frontend.cpp
+++ b/tv/tuner/aidl/default/Frontend.cpp
@@ -708,6 +708,20 @@
status.set<FrontendStatus::dvbtCellIds>(dvbtCellIds);
break;
}
+ case FrontendStatusType::ATSC3_ALL_PLP_INFO: {
+ FrontendScanAtsc3PlpInfo info1;
+ info1.plpId = 1;
+ info1.bLlsFlag = false;
+ FrontendScanAtsc3PlpInfo info2;
+ info2.plpId = 2;
+ info2.bLlsFlag = true;
+ FrontendScanAtsc3PlpInfo info3;
+ info3.plpId = 3;
+ info3.bLlsFlag = false;
+ vector<FrontendScanAtsc3PlpInfo> infos = {info1, info2, info3};
+ status.set<FrontendStatus::allPlpInfo>(infos);
+ break;
+ }
default: {
continue;
}
diff --git a/tv/tuner/aidl/default/Tuner.cpp b/tv/tuner/aidl/default/Tuner.cpp
index 48c1b66..7a5fa6e 100644
--- a/tv/tuner/aidl/default/Tuner.cpp
+++ b/tv/tuner/aidl/default/Tuner.cpp
@@ -77,6 +77,7 @@
FrontendStatusType::BERS,
FrontendStatusType::INTERLEAVINGS,
FrontendStatusType::BANDWIDTH,
+ FrontendStatusType::ATSC3_ALL_PLP_INFO,
};
mFrontendStatusCaps[1] = statusCaps;
mMaxUsableFrontends[FrontendType::ATSC3] = 1;
diff --git a/tv/tuner/aidl/vts/functional/FrontendTests.cpp b/tv/tuner/aidl/vts/functional/FrontendTests.cpp
index 41e98ea..a8799ab 100644
--- a/tv/tuner/aidl/vts/functional/FrontendTests.cpp
+++ b/tv/tuner/aidl/vts/functional/FrontendTests.cpp
@@ -414,6 +414,13 @@
expectStatuses[i].get<FrontendStatus::Tag::dvbtCellIds>().begin()));
break;
}
+ case FrontendStatusType::ATSC3_ALL_PLP_INFO: {
+ ASSERT_TRUE(std::equal(
+ realStatuses[i].get<FrontendStatus::Tag::allPlpInfo>().begin(),
+ realStatuses[i].get<FrontendStatus::Tag::allPlpInfo>().end(),
+ expectStatuses[i].get<FrontendStatus::Tag::allPlpInfo>().begin()));
+ break;
+ }
default: {
continue;
}