Ken Chen | 45c7b15 | 2021-12-20 18:22:06 +0800 | [diff] [blame] | 1 | /* Common BPF helpers to be used by all BPF programs loaded by Android */ |
| 2 | |
| 3 | #include <linux/bpf.h> |
| 4 | #include <stdbool.h> |
| 5 | #include <stdint.h> |
| 6 | |
| 7 | #include "bpf_map_def.h" |
| 8 | |
| 9 | /****************************************************************************** |
Maciej Żenczykowski | 401c98f | 2024-08-15 14:55:05 -0700 | [diff] [blame] | 10 | * WARNING: CHANGES TO THIS FILE OUTSIDE OF AOSP/MAIN ARE LIKELY TO BREAK * |
Ken Chen | 45c7b15 | 2021-12-20 18:22:06 +0800 | [diff] [blame] | 11 | * DEVICE COMPATIBILITY WITH MAINLINE MODULES SHIPPING EBPF CODE. * |
| 12 | * * |
| 13 | * THIS WILL LIKELY RESULT IN BRICKED DEVICES AT SOME ARBITRARY FUTURE TIME * |
| 14 | * * |
| 15 | * THAT GOES ESPECIALLY FOR THE 'SECTION' 'LICENSE' AND 'CRITICAL' MACROS * |
| 16 | * * |
| 17 | * We strongly suggest that if you need changes to bpfloader functionality * |
| 18 | * you get your changes reviewed and accepted into aosp/master. * |
| 19 | * * |
| 20 | ******************************************************************************/ |
| 21 | |
Maciej Żenczykowski | 6f49c95 | 2022-05-24 16:07:16 -0700 | [diff] [blame] | 22 | // The actual versions of the bpfloader that shipped in various Android releases |
| 23 | |
| 24 | // Android P/Q/R: BpfLoader was initially part of netd, |
| 25 | // this was later split out into a standalone binary, but was unversioned. |
| 26 | |
| 27 | // Android S / 12 (api level 31) - added 'tethering' mainline eBPF support |
| 28 | #define BPFLOADER_S_VERSION 2u |
| 29 | |
Maciej Żenczykowski | 1a542aa | 2022-06-22 19:07:15 -0700 | [diff] [blame] | 30 | // Android T / 13 (api level 33) - support for shared/selinux_context/pindir |
| 31 | #define BPFLOADER_T_VERSION 19u |
| 32 | |
Maciej Żenczykowski | b6338ce | 2022-07-15 13:00:21 -0700 | [diff] [blame] | 33 | // BpfLoader v0.25+ support obj@ver.o files |
| 34 | #define BPFLOADER_OBJ_AT_VER_VERSION 25u |
| 35 | |
Ryan Zuklie | 888bd2d | 2023-01-04 16:09:02 -0800 | [diff] [blame] | 36 | // Bpfloader v0.33+ supports {map,prog}.ignore_on_{eng,user,userdebug} |
| 37 | #define BPFLOADER_IGNORED_ON_VERSION 33u |
| 38 | |
Maciej Żenczykowski | 1e697e5 | 2023-04-26 23:04:59 +0000 | [diff] [blame] | 39 | // Android U / 14 (api level 34) - various new program types added |
Maciej Żenczykowski | 11141da | 2024-03-15 18:21:33 -0700 | [diff] [blame] | 40 | #define BPFLOADER_U_VERSION 38u |
Maciej Żenczykowski | 1e697e5 | 2023-04-26 23:04:59 +0000 | [diff] [blame] | 41 | |
Maciej Żenczykowski | bf6f928 | 2024-06-13 13:08:10 -0700 | [diff] [blame] | 42 | // Android U QPR2 / 14 (api level 34) - platform only |
Maciej Żenczykowski | 0de80f1 | 2024-03-12 18:21:23 -0700 | [diff] [blame] | 43 | // (note: the platform bpfloader in V isn't really versioned at all, |
| 44 | // as there is no need as it can only load objects compiled at the |
| 45 | // same time as itself and the rest of the platform) |
Maciej Żenczykowski | bf6f928 | 2024-06-13 13:08:10 -0700 | [diff] [blame] | 46 | #define BPFLOADER_U_QPR2_VERSION 41u |
| 47 | #define BPFLOADER_PLATFORM_VERSION BPFLOADER_U_QPR2_VERSION |
Maciej Żenczykowski | 0de80f1 | 2024-03-12 18:21:23 -0700 | [diff] [blame] | 48 | |
Maciej Żenczykowski | 65f7022 | 2024-03-18 14:43:09 -0700 | [diff] [blame] | 49 | // Android Mainline - this bpfloader should eventually go back to T (or even S) |
| 50 | // Note: this value (and the following +1u's) are hardcoded in NetBpfLoad.cpp |
Maciej Żenczykowski | 84aa9ea | 2024-03-12 00:28:19 +0000 | [diff] [blame] | 51 | #define BPFLOADER_MAINLINE_VERSION 42u |
| 52 | |
Maciej Żenczykowski | de7374d | 2024-09-03 15:56:44 -0700 | [diff] [blame] | 53 | // Android Mainline BpfLoader when running on Android T (sdk=33) |
Maciej Żenczykowski | 65f7022 | 2024-03-18 14:43:09 -0700 | [diff] [blame] | 54 | #define BPFLOADER_MAINLINE_T_VERSION (BPFLOADER_MAINLINE_VERSION + 1u) |
| 55 | |
Maciej Żenczykowski | de7374d | 2024-09-03 15:56:44 -0700 | [diff] [blame] | 56 | // Android Mainline BpfLoader when running on Android U (sdk=34) |
Maciej Żenczykowski | 65f7022 | 2024-03-18 14:43:09 -0700 | [diff] [blame] | 57 | #define BPFLOADER_MAINLINE_U_VERSION (BPFLOADER_MAINLINE_T_VERSION + 1u) |
| 58 | |
Maciej Żenczykowski | 1a3b54f | 2024-06-13 15:35:46 -0700 | [diff] [blame] | 59 | // Android Mainline BpfLoader when running on Android U QPR3 |
| 60 | #define BPFLOADER_MAINLINE_U_QPR3_VERSION (BPFLOADER_MAINLINE_U_VERSION + 1u) |
| 61 | |
Maciej Żenczykowski | de7374d | 2024-09-03 15:56:44 -0700 | [diff] [blame] | 62 | // Android Mainline BpfLoader when running on Android V (sdk=35) |
Maciej Żenczykowski | 1a3b54f | 2024-06-13 15:35:46 -0700 | [diff] [blame] | 63 | #define BPFLOADER_MAINLINE_V_VERSION (BPFLOADER_MAINLINE_U_QPR3_VERSION + 1u) |
Maciej Żenczykowski | 65f7022 | 2024-03-18 14:43:09 -0700 | [diff] [blame] | 64 | |
Maciej Żenczykowski | de7374d | 2024-09-03 15:56:44 -0700 | [diff] [blame] | 65 | // Android Mainline BpfLoader when running on Android W (sdk=36) |
| 66 | #define BPFLOADER_MAINLINE_W_VERSION (BPFLOADER_MAINLINE_V_VERSION + 1u) |
| 67 | |
Ken Chen | 45c7b15 | 2021-12-20 18:22:06 +0800 | [diff] [blame] | 68 | /* For mainline module use, you can #define BPFLOADER_{MIN/MAX}_VER |
| 69 | * before #include "bpf_helpers.h" to change which bpfloaders will |
| 70 | * process the resulting .o file. |
| 71 | * |
| 72 | * While this will work outside of mainline too, there just is no point to |
| 73 | * using it when the .o and the bpfloader ship in sync with each other. |
Maciej Żenczykowski | 30a1808 | 2022-07-24 22:45:35 +0000 | [diff] [blame] | 74 | * In which case it's just best to use the default. |
Ken Chen | 45c7b15 | 2021-12-20 18:22:06 +0800 | [diff] [blame] | 75 | */ |
| 76 | #ifndef BPFLOADER_MIN_VER |
Maciej Żenczykowski | fac81f1 | 2024-08-15 16:04:29 -0700 | [diff] [blame] | 77 | #define BPFLOADER_MIN_VER BPFLOADER_PLATFORM_VERSION // inclusive, ie. >= |
Ken Chen | 45c7b15 | 2021-12-20 18:22:06 +0800 | [diff] [blame] | 78 | #endif |
| 79 | |
| 80 | #ifndef BPFLOADER_MAX_VER |
Maciej Żenczykowski | fac81f1 | 2024-08-15 16:04:29 -0700 | [diff] [blame] | 81 | #define BPFLOADER_MAX_VER 0x10000u // exclusive, ie. < v1.0 |
Ken Chen | 45c7b15 | 2021-12-20 18:22:06 +0800 | [diff] [blame] | 82 | #endif |
| 83 | |
| 84 | /* place things in different elf sections */ |
| 85 | #define SECTION(NAME) __attribute__((section(NAME), used)) |
| 86 | |
| 87 | /* Must be present in every program, example usage: |
| 88 | * LICENSE("GPL"); or LICENSE("Apache 2.0"); |
| 89 | * |
| 90 | * We also take this opportunity to embed a bunch of other useful values in |
| 91 | * the resulting .o (This is to enable some limited forward compatibility |
| 92 | * with mainline module shipped ebpf programs) |
| 93 | * |
| 94 | * The bpfloader_{min/max}_ver defines the [min, max) range of bpfloader |
| 95 | * versions that should load this .o file (bpfloaders outside of this range |
| 96 | * will simply ignore/skip this *entire* .o) |
| 97 | * The [inclusive,exclusive) matches what we do for kernel ver dependencies. |
| 98 | * |
| 99 | * The size_of_bpf_{map,prog}_def allow the bpfloader to load programs where |
| 100 | * these structures have been extended with additional fields (they will of |
| 101 | * course simply be ignored then). |
| 102 | * |
| 103 | * If missing, bpfloader_{min/max}_ver default to 0/0x10000 ie. [v0.0, v1.0), |
| 104 | * while size_of_bpf_{map/prog}_def default to 32/20 which are the v0.0 sizes. |
Maciej Żenczykowski | 8837bf2 | 2023-06-09 05:50:30 +0000 | [diff] [blame] | 105 | * |
Maciej Żenczykowski | da723a0 | 2024-08-16 18:44:33 -0700 | [diff] [blame] | 106 | * This macro also disables loading BTF map debug information, as versions |
| 107 | * of the platform bpfloader that support BTF require fork-exec of btfloader |
| 108 | * which causes a regression in boot time. |
Maciej Żenczykowski | 8837bf2 | 2023-06-09 05:50:30 +0000 | [diff] [blame] | 109 | */ |
Maciej Żenczykowski | da723a0 | 2024-08-16 18:44:33 -0700 | [diff] [blame] | 110 | #define LICENSE(NAME) \ |
| 111 | unsigned int _bpfloader_min_ver SECTION("bpfloader_min_ver") = BPFLOADER_MIN_VER; \ |
| 112 | unsigned int _bpfloader_max_ver SECTION("bpfloader_max_ver") = BPFLOADER_MAX_VER; \ |
| 113 | size_t _size_of_bpf_map_def SECTION("size_of_bpf_map_def") = sizeof(struct bpf_map_def); \ |
| 114 | size_t _size_of_bpf_prog_def SECTION("size_of_bpf_prog_def") = sizeof(struct bpf_prog_def); \ |
| 115 | unsigned _btf_min_bpfloader_ver SECTION("btf_min_bpfloader_ver") = BPFLOADER_MAINLINE_VERSION; \ |
| 116 | unsigned _btf_user_min_bpfloader_ver SECTION("btf_user_min_bpfloader_ver") = 0xFFFFFFFFu; \ |
| 117 | char _license[] SECTION("license") = (NAME) |
Maciej Żenczykowski | 8837bf2 | 2023-06-09 05:50:30 +0000 | [diff] [blame] | 118 | |
Ken Chen | 45c7b15 | 2021-12-20 18:22:06 +0800 | [diff] [blame] | 119 | /* flag the resulting bpf .o file as critical to system functionality, |
| 120 | * loading all kernel version appropriate programs in it must succeed |
| 121 | * for bpfloader success |
| 122 | */ |
| 123 | #define CRITICAL(REASON) char _critical[] SECTION("critical") = (REASON) |
| 124 | |
| 125 | /* |
| 126 | * Helper functions called from eBPF programs written in C. These are |
| 127 | * implemented in the kernel sources. |
| 128 | */ |
| 129 | |
Maciej Żenczykowski | 3a64568 | 2023-10-06 15:11:01 -0700 | [diff] [blame] | 130 | struct kver_uint { unsigned int kver; }; |
| 131 | #define KVER_(v) ((struct kver_uint){ .kver = (v) }) |
| 132 | #define KVER(a, b, c) KVER_(((a) << 24) + ((b) << 16) + (c)) |
| 133 | #define KVER_NONE KVER_(0) |
| 134 | #define KVER_4_14 KVER(4, 14, 0) |
| 135 | #define KVER_4_19 KVER(4, 19, 0) |
Maciej Żenczykowski | 978b3f2 | 2024-03-10 22:11:28 +0000 | [diff] [blame] | 136 | #define KVER_5_4 KVER(5, 4, 0) |
| 137 | #define KVER_5_8 KVER(5, 8, 0) |
| 138 | #define KVER_5_9 KVER(5, 9, 0) |
Maciej Żenczykowski | 22db590 | 2024-05-10 06:44:08 -0700 | [diff] [blame] | 139 | #define KVER_5_10 KVER(5, 10, 0) |
Maciej Żenczykowski | 3a64568 | 2023-10-06 15:11:01 -0700 | [diff] [blame] | 140 | #define KVER_5_15 KVER(5, 15, 0) |
Maciej Żenczykowski | 978b3f2 | 2024-03-10 22:11:28 +0000 | [diff] [blame] | 141 | #define KVER_6_1 KVER(6, 1, 0) |
| 142 | #define KVER_6_6 KVER(6, 6, 0) |
Maciej Żenczykowski | 3a64568 | 2023-10-06 15:11:01 -0700 | [diff] [blame] | 143 | #define KVER_INF KVER_(0xFFFFFFFFu) |
| 144 | |
| 145 | #define KVER_IS_AT_LEAST(kver, a, b, c) ((kver).kver >= KVER(a, b, c).kver) |
Ken Chen | 45c7b15 | 2021-12-20 18:22:06 +0800 | [diff] [blame] | 146 | |
Patrick Rohr | 2f5c115 | 2023-05-10 21:48:23 +0000 | [diff] [blame] | 147 | /* |
| 148 | * BPFFS (ie. /sys/fs/bpf) labelling is as follows: |
| 149 | * subdirectory selinux context mainline usecase / usable by |
| 150 | * / fs_bpf no [*] core operating system (ie. platform) |
| 151 | * /loader fs_bpf_loader no, U+ (as yet unused) |
| 152 | * /net_private fs_bpf_net_private yes, T+ network_stack |
| 153 | * /net_shared fs_bpf_net_shared yes, T+ network_stack & system_server |
| 154 | * /netd_readonly fs_bpf_netd_readonly yes, T+ network_stack & system_server & r/o to netd |
| 155 | * /netd_shared fs_bpf_netd_shared yes, T+ network_stack & system_server & netd [**] |
| 156 | * /tethering fs_bpf_tethering yes, S+ network_stack |
| 157 | * /vendor fs_bpf_vendor no, T+ vendor |
| 158 | * |
| 159 | * [*] initial support for bpf was added back in P, |
| 160 | * but things worked differently back then with no bpfloader, |
| 161 | * and instead netd doing stuff by hand, |
| 162 | * bpfloader with pinning into /sys/fs/bpf was (I believe) added in Q |
| 163 | * (and was definitely there in R). |
| 164 | * |
| 165 | * [**] additionally bpf programs are accessible to netutils_wrapper |
| 166 | * for use by iptables xt_bpf extensions. |
| 167 | * |
| 168 | * See cs/p:aosp-master%20-file:prebuilts/%20file:genfs_contexts%20"genfscon%20bpf" |
| 169 | */ |
| 170 | |
Ken Chen | 45c7b15 | 2021-12-20 18:22:06 +0800 | [diff] [blame] | 171 | /* generic functions */ |
| 172 | |
| 173 | /* |
| 174 | * Type-unsafe bpf map functions - avoid if possible. |
| 175 | * |
| 176 | * Using these it is possible to pass in keys/values of the wrong type/size, |
| 177 | * or, for 'bpf_map_lookup_elem_unsafe' receive into a pointer to the wrong type. |
| 178 | * You will not get a compile time failure, and for certain types of errors you |
| 179 | * might not even get a failure from the kernel's ebpf verifier during program load, |
| 180 | * instead stuff might just not work right at runtime. |
| 181 | * |
| 182 | * Instead please use: |
| 183 | * DEFINE_BPF_MAP(foo_map, TYPE, KeyType, ValueType, num_entries) |
| 184 | * where TYPE can be something like HASH or ARRAY, and num_entries is an integer. |
| 185 | * |
| 186 | * This defines the map (hence this should not be used in a header file included |
| 187 | * from multiple locations) and provides type safe accessors: |
| 188 | * ValueType * bpf_foo_map_lookup_elem(const KeyType *) |
| 189 | * int bpf_foo_map_update_elem(const KeyType *, const ValueType *, flags) |
| 190 | * int bpf_foo_map_delete_elem(const KeyType *) |
| 191 | * |
| 192 | * This will make sure that if you change the type of a map you'll get compile |
| 193 | * errors at any spots you forget to update with the new type. |
| 194 | * |
| 195 | * Note: these all take pointers to const map because from the C/eBPF point of view |
| 196 | * the map struct is really just a readonly map definition of the in kernel object. |
| 197 | * Runtime modification of the map defining struct is meaningless, since |
| 198 | * the contents is only ever used during bpf program loading & map creation |
| 199 | * by the bpf loader, and not by the eBPF program itself. |
| 200 | */ |
| 201 | static void* (*bpf_map_lookup_elem_unsafe)(const struct bpf_map_def* map, |
| 202 | const void* key) = (void*)BPF_FUNC_map_lookup_elem; |
| 203 | static int (*bpf_map_update_elem_unsafe)(const struct bpf_map_def* map, const void* key, |
| 204 | const void* value, unsigned long long flags) = (void*) |
| 205 | BPF_FUNC_map_update_elem; |
| 206 | static int (*bpf_map_delete_elem_unsafe)(const struct bpf_map_def* map, |
| 207 | const void* key) = (void*)BPF_FUNC_map_delete_elem; |
Ryan Zuklie | 79ce874 | 2022-11-21 17:19:25 -0800 | [diff] [blame] | 208 | static int (*bpf_ringbuf_output_unsafe)(const struct bpf_map_def* ringbuf, |
| 209 | const void* data, __u64 size, __u64 flags) = (void*) |
| 210 | BPF_FUNC_ringbuf_output; |
| 211 | static void* (*bpf_ringbuf_reserve_unsafe)(const struct bpf_map_def* ringbuf, |
| 212 | __u64 size, __u64 flags) = (void*) |
| 213 | BPF_FUNC_ringbuf_reserve; |
| 214 | static void (*bpf_ringbuf_submit_unsafe)(const void* data, __u64 flags) = (void*) |
| 215 | BPF_FUNC_ringbuf_submit; |
Ken Chen | 45c7b15 | 2021-12-20 18:22:06 +0800 | [diff] [blame] | 216 | |
Connor O'Brien | 5687545 | 2022-01-18 21:27:50 -0800 | [diff] [blame] | 217 | #define BPF_ANNOTATE_KV_PAIR(name, type_key, type_val) \ |
| 218 | struct ____btf_map_##name { \ |
| 219 | type_key key; \ |
| 220 | type_val value; \ |
| 221 | }; \ |
| 222 | struct ____btf_map_##name \ |
| 223 | __attribute__ ((section(".maps." #name), used)) \ |
| 224 | ____btf_map_##name = { } |
| 225 | |
Maciej Żenczykowski | 6dec6e9 | 2023-10-06 14:52:46 -0700 | [diff] [blame] | 226 | #define BPF_ASSERT_LOADER_VERSION(min_loader, ignore_eng, ignore_user, ignore_userdebug) \ |
| 227 | _Static_assert( \ |
| 228 | (min_loader) >= BPFLOADER_IGNORED_ON_VERSION || \ |
| 229 | !((ignore_eng).ignore_on_eng || \ |
| 230 | (ignore_user).ignore_on_user || \ |
| 231 | (ignore_userdebug).ignore_on_userdebug), \ |
Ryan Zuklie | 888bd2d | 2023-01-04 16:09:02 -0800 | [diff] [blame] | 232 | "bpfloader min version must be >= 0.33 in order to use ignored_on"); |
| 233 | |
Ryan Zuklie | 79ce874 | 2022-11-21 17:19:25 -0800 | [diff] [blame] | 234 | #define DEFINE_BPF_MAP_BASE(the_map, TYPE, keysize, valuesize, num_entries, \ |
| 235 | usr, grp, md, selinux, pindir, share, minkver, \ |
Ryan Zuklie | 888bd2d | 2023-01-04 16:09:02 -0800 | [diff] [blame] | 236 | maxkver, minloader, maxloader, ignore_eng, \ |
| 237 | ignore_user, ignore_userdebug) \ |
| 238 | const struct bpf_map_def SECTION("maps") the_map = { \ |
| 239 | .type = BPF_MAP_TYPE_##TYPE, \ |
| 240 | .key_size = (keysize), \ |
| 241 | .value_size = (valuesize), \ |
| 242 | .max_entries = (num_entries), \ |
| 243 | .map_flags = 0, \ |
| 244 | .uid = (usr), \ |
| 245 | .gid = (grp), \ |
| 246 | .mode = (md), \ |
| 247 | .bpfloader_min_ver = (minloader), \ |
| 248 | .bpfloader_max_ver = (maxloader), \ |
Maciej Żenczykowski | 3a64568 | 2023-10-06 15:11:01 -0700 | [diff] [blame] | 249 | .min_kver = (minkver).kver, \ |
| 250 | .max_kver = (maxkver).kver, \ |
Ryan Zuklie | 888bd2d | 2023-01-04 16:09:02 -0800 | [diff] [blame] | 251 | .selinux_context = (selinux), \ |
| 252 | .pin_subdir = (pindir), \ |
Maciej Żenczykowski | a262bd3 | 2023-10-06 14:36:01 -0700 | [diff] [blame] | 253 | .shared = (share).shared, \ |
Maciej Żenczykowski | 6dec6e9 | 2023-10-06 14:52:46 -0700 | [diff] [blame] | 254 | .ignore_on_eng = (ignore_eng).ignore_on_eng, \ |
| 255 | .ignore_on_user = (ignore_user).ignore_on_user, \ |
| 256 | .ignore_on_userdebug = (ignore_userdebug).ignore_on_userdebug, \ |
Ryan Zuklie | 888bd2d | 2023-01-04 16:09:02 -0800 | [diff] [blame] | 257 | }; \ |
| 258 | BPF_ASSERT_LOADER_VERSION(minloader, ignore_eng, ignore_user, ignore_userdebug); |
Ryan Zuklie | 79ce874 | 2022-11-21 17:19:25 -0800 | [diff] [blame] | 259 | |
| 260 | // Type safe macro to declare a ring buffer and related output functions. |
| 261 | // Compatibility: |
| 262 | // * BPF ring buffers are only available kernels 5.8 and above. Any program |
| 263 | // accessing the ring buffer should set a program level min_kver >= 5.8. |
| 264 | // * The definition below sets a map min_kver of 5.8 which requires targeting |
| 265 | // a BPFLOADER_MIN_VER >= BPFLOADER_S_VERSION. |
Ryan Zuklie | 888bd2d | 2023-01-04 16:09:02 -0800 | [diff] [blame] | 266 | #define DEFINE_BPF_RINGBUF_EXT(the_map, ValueType, size_bytes, usr, grp, md, \ |
| 267 | selinux, pindir, share, min_loader, max_loader, \ |
| 268 | ignore_eng, ignore_user, ignore_userdebug) \ |
| 269 | DEFINE_BPF_MAP_BASE(the_map, RINGBUF, 0, 0, size_bytes, usr, grp, md, \ |
Maciej Żenczykowski | 3a64568 | 2023-10-06 15:11:01 -0700 | [diff] [blame] | 270 | selinux, pindir, share, KVER_5_8, KVER_INF, \ |
Ryan Zuklie | 888bd2d | 2023-01-04 16:09:02 -0800 | [diff] [blame] | 271 | min_loader, max_loader, ignore_eng, ignore_user, \ |
| 272 | ignore_userdebug); \ |
Maciej Żenczykowski | f1416b5 | 2023-06-16 16:30:46 +0000 | [diff] [blame] | 273 | \ |
| 274 | _Static_assert((size_bytes) >= 4096, "min 4 kiB ringbuffer size"); \ |
| 275 | _Static_assert((size_bytes) <= 0x10000000, "max 256 MiB ringbuffer size"); \ |
| 276 | _Static_assert(((size_bytes) & ((size_bytes) - 1)) == 0, \ |
| 277 | "ring buffer size must be a power of two"); \ |
| 278 | \ |
Ryan Zuklie | 888bd2d | 2023-01-04 16:09:02 -0800 | [diff] [blame] | 279 | static inline __always_inline __unused int bpf_##the_map##_output( \ |
| 280 | const ValueType* v) { \ |
| 281 | return bpf_ringbuf_output_unsafe(&the_map, v, sizeof(*v), 0); \ |
| 282 | } \ |
Maciej Żenczykowski | f1416b5 | 2023-06-16 16:30:46 +0000 | [diff] [blame] | 283 | \ |
Ryan Zuklie | 888bd2d | 2023-01-04 16:09:02 -0800 | [diff] [blame] | 284 | static inline __always_inline __unused \ |
| 285 | ValueType* bpf_##the_map##_reserve() { \ |
| 286 | return bpf_ringbuf_reserve_unsafe(&the_map, sizeof(ValueType), 0); \ |
| 287 | } \ |
Maciej Żenczykowski | f1416b5 | 2023-06-16 16:30:46 +0000 | [diff] [blame] | 288 | \ |
Ryan Zuklie | 888bd2d | 2023-01-04 16:09:02 -0800 | [diff] [blame] | 289 | static inline __always_inline __unused void bpf_##the_map##_submit( \ |
| 290 | const ValueType* v) { \ |
| 291 | bpf_ringbuf_submit_unsafe(v, 0); \ |
| 292 | } |
Ryan Zuklie | 79ce874 | 2022-11-21 17:19:25 -0800 | [diff] [blame] | 293 | |
Maciej Żenczykowski | cc87174 | 2024-09-04 16:36:10 -0700 | [diff] [blame] | 294 | #define DEFINE_BPF_RINGBUF(the_map, ValueType, size_bytes, usr, grp, md) \ |
| 295 | DEFINE_BPF_RINGBUF_EXT(the_map, ValueType, size_bytes, usr, grp, md, \ |
| 296 | DEFAULT_BPF_MAP_SELINUX_CONTEXT, DEFAULT_BPF_MAP_PIN_SUBDIR, \ |
| 297 | PRIVATE, BPFLOADER_MIN_VER, BPFLOADER_MAX_VER, \ |
| 298 | LOAD_ON_ENG, LOAD_ON_USER, LOAD_ON_USERDEBUG) |
| 299 | |
Maciej Żenczykowski | 6f9830c | 2022-10-20 23:45:10 +0000 | [diff] [blame] | 300 | /* There exist buggy kernels with pre-T OS, that due to |
| 301 | * kernel patch "[ALPS05162612] bpf: fix ubsan error" |
| 302 | * do not support userspace writes into non-zero index of bpf map arrays. |
| 303 | * |
| 304 | * We use this assert to prevent us from being able to define such a map. |
| 305 | */ |
| 306 | |
| 307 | #ifdef THIS_BPF_PROGRAM_IS_FOR_TEST_PURPOSES_ONLY |
| 308 | #define BPF_MAP_ASSERT_OK(type, entries, mode) |
Maciej Żenczykowski | 9ee26f7 | 2023-04-26 23:16:49 +0000 | [diff] [blame] | 309 | #elif BPFLOADER_MIN_VER >= BPFLOADER_T_VERSION |
Maciej Żenczykowski | 6f9830c | 2022-10-20 23:45:10 +0000 | [diff] [blame] | 310 | #define BPF_MAP_ASSERT_OK(type, entries, mode) |
| 311 | #else |
| 312 | #define BPF_MAP_ASSERT_OK(type, entries, mode) \ |
| 313 | _Static_assert(((type) != BPF_MAP_TYPE_ARRAY) || ((entries) <= 1) || !((mode) & 0222), \ |
| 314 | "Writable arrays with more than 1 element not supported on pre-T devices.") |
| 315 | #endif |
| 316 | |
Ken Chen | 45c7b15 | 2021-12-20 18:22:06 +0800 | [diff] [blame] | 317 | /* type safe macro to declare a map and related accessor functions */ |
Maciej Żenczykowski | 53a144e | 2022-06-16 23:11:54 -0700 | [diff] [blame] | 318 | #define DEFINE_BPF_MAP_EXT(the_map, TYPE, KeyType, ValueType, num_entries, usr, grp, md, \ |
Ryan Zuklie | be2ff67 | 2023-01-19 15:24:38 -0800 | [diff] [blame] | 319 | selinux, pindir, share, min_loader, max_loader, ignore_eng, \ |
| 320 | ignore_user, ignore_userdebug) \ |
Ryan Zuklie | 79ce874 | 2022-11-21 17:19:25 -0800 | [diff] [blame] | 321 | DEFINE_BPF_MAP_BASE(the_map, TYPE, sizeof(KeyType), sizeof(ValueType), \ |
| 322 | num_entries, usr, grp, md, selinux, pindir, share, \ |
Ryan Zuklie | be2ff67 | 2023-01-19 15:24:38 -0800 | [diff] [blame] | 323 | KVER_NONE, KVER_INF, min_loader, max_loader, \ |
| 324 | ignore_eng, ignore_user, ignore_userdebug); \ |
Maciej Żenczykowski | 6f9830c | 2022-10-20 23:45:10 +0000 | [diff] [blame] | 325 | BPF_MAP_ASSERT_OK(BPF_MAP_TYPE_##TYPE, (num_entries), (md)); \ |
Maciej Żenczykowski | 5bec8b4 | 2023-06-07 06:59:20 +0000 | [diff] [blame] | 326 | _Static_assert(sizeof(KeyType) < 1024, "aosp/2370288 requires < 1024 byte keys"); \ |
| 327 | _Static_assert(sizeof(ValueType) < 65536, "aosp/2370288 requires < 65536 byte values"); \ |
Maciej Żenczykowski | 53a144e | 2022-06-16 23:11:54 -0700 | [diff] [blame] | 328 | BPF_ANNOTATE_KV_PAIR(the_map, KeyType, ValueType); \ |
Ken Chen | 45c7b15 | 2021-12-20 18:22:06 +0800 | [diff] [blame] | 329 | \ |
Maciej Żenczykowski | 53a144e | 2022-06-16 23:11:54 -0700 | [diff] [blame] | 330 | static inline __always_inline __unused ValueType* bpf_##the_map##_lookup_elem( \ |
| 331 | const KeyType* k) { \ |
Ken Chen | 45c7b15 | 2021-12-20 18:22:06 +0800 | [diff] [blame] | 332 | return bpf_map_lookup_elem_unsafe(&the_map, k); \ |
| 333 | }; \ |
| 334 | \ |
| 335 | static inline __always_inline __unused int bpf_##the_map##_update_elem( \ |
Maciej Żenczykowski | 53a144e | 2022-06-16 23:11:54 -0700 | [diff] [blame] | 336 | const KeyType* k, const ValueType* v, unsigned long long flags) { \ |
Ken Chen | 45c7b15 | 2021-12-20 18:22:06 +0800 | [diff] [blame] | 337 | return bpf_map_update_elem_unsafe(&the_map, k, v, flags); \ |
| 338 | }; \ |
| 339 | \ |
Maciej Żenczykowski | 53a144e | 2022-06-16 23:11:54 -0700 | [diff] [blame] | 340 | static inline __always_inline __unused int bpf_##the_map##_delete_elem(const KeyType* k) { \ |
Ken Chen | 45c7b15 | 2021-12-20 18:22:06 +0800 | [diff] [blame] | 341 | return bpf_map_delete_elem_unsafe(&the_map, k); \ |
| 342 | }; |
| 343 | |
Maciej Żenczykowski | 72e19c5 | 2022-07-08 14:22:57 -0700 | [diff] [blame] | 344 | #ifndef DEFAULT_BPF_MAP_SELINUX_CONTEXT |
| 345 | #define DEFAULT_BPF_MAP_SELINUX_CONTEXT "" |
| 346 | #endif |
| 347 | |
| 348 | #ifndef DEFAULT_BPF_MAP_PIN_SUBDIR |
| 349 | #define DEFAULT_BPF_MAP_PIN_SUBDIR "" |
| 350 | #endif |
| 351 | |
| 352 | #ifndef DEFAULT_BPF_MAP_UID |
| 353 | #define DEFAULT_BPF_MAP_UID AID_ROOT |
Maciej Żenczykowski | eb4194e | 2022-07-21 13:33:16 +0000 | [diff] [blame] | 354 | #elif BPFLOADER_MIN_VER < 28u |
| 355 | #error "Bpf Map UID must be left at default of AID_ROOT for BpfLoader prior to v0.28" |
Maciej Żenczykowski | 72e19c5 | 2022-07-08 14:22:57 -0700 | [diff] [blame] | 356 | #endif |
| 357 | |
Maciej Żenczykowski | 1ec8d7d | 2024-09-04 16:44:04 -0700 | [diff] [blame] | 358 | // for maps not meant to be accessed from userspace |
| 359 | #define DEFINE_BPF_MAP_KERNEL_INTERNAL(the_map, TYPE, KeyType, ValueType, num_entries) \ |
| 360 | DEFINE_BPF_MAP_EXT(the_map, TYPE, KeyType, ValueType, num_entries, AID_ROOT, AID_ROOT, \ |
| 361 | 0000, "fs_bpf_loader", "", PRIVATE, BPFLOADER_MIN_VER, BPFLOADER_MAX_VER, \ |
| 362 | LOAD_ON_ENG, LOAD_ON_USER, LOAD_ON_USERDEBUG) |
| 363 | |
| 364 | #define DEFINE_BPF_MAP_UGM(the_map, TYPE, KeyType, ValueType, num_entries, usr, grp, md) \ |
| 365 | DEFINE_BPF_MAP_EXT(the_map, TYPE, KeyType, ValueType, num_entries, usr, grp, md, \ |
| 366 | DEFAULT_BPF_MAP_SELINUX_CONTEXT, DEFAULT_BPF_MAP_PIN_SUBDIR, \ |
| 367 | PRIVATE, BPFLOADER_MIN_VER, BPFLOADER_MAX_VER, \ |
| 368 | LOAD_ON_ENG, LOAD_ON_USER, LOAD_ON_USERDEBUG) |
Ken Chen | 45c7b15 | 2021-12-20 18:22:06 +0800 | [diff] [blame] | 369 | |
Maciej Żenczykowski | 53a144e | 2022-06-16 23:11:54 -0700 | [diff] [blame] | 370 | #define DEFINE_BPF_MAP(the_map, TYPE, KeyType, ValueType, num_entries) \ |
Maciej Żenczykowski | 72e19c5 | 2022-07-08 14:22:57 -0700 | [diff] [blame] | 371 | DEFINE_BPF_MAP_UGM(the_map, TYPE, KeyType, ValueType, num_entries, \ |
| 372 | DEFAULT_BPF_MAP_UID, AID_ROOT, 0600) |
Ken Chen | 45c7b15 | 2021-12-20 18:22:06 +0800 | [diff] [blame] | 373 | |
Maciej Żenczykowski | dd3fe1d | 2022-10-20 04:05:00 +0000 | [diff] [blame] | 374 | #define DEFINE_BPF_MAP_RO(the_map, TYPE, KeyType, ValueType, num_entries, gid) \ |
| 375 | DEFINE_BPF_MAP_UGM(the_map, TYPE, KeyType, ValueType, num_entries, \ |
| 376 | DEFAULT_BPF_MAP_UID, gid, 0440) |
| 377 | |
Maciej Żenczykowski | 53a144e | 2022-06-16 23:11:54 -0700 | [diff] [blame] | 378 | #define DEFINE_BPF_MAP_GWO(the_map, TYPE, KeyType, ValueType, num_entries, gid) \ |
Maciej Żenczykowski | 72e19c5 | 2022-07-08 14:22:57 -0700 | [diff] [blame] | 379 | DEFINE_BPF_MAP_UGM(the_map, TYPE, KeyType, ValueType, num_entries, \ |
| 380 | DEFAULT_BPF_MAP_UID, gid, 0620) |
Ken Chen | 45c7b15 | 2021-12-20 18:22:06 +0800 | [diff] [blame] | 381 | |
Maciej Żenczykowski | 53a144e | 2022-06-16 23:11:54 -0700 | [diff] [blame] | 382 | #define DEFINE_BPF_MAP_GRO(the_map, TYPE, KeyType, ValueType, num_entries, gid) \ |
Maciej Żenczykowski | 72e19c5 | 2022-07-08 14:22:57 -0700 | [diff] [blame] | 383 | DEFINE_BPF_MAP_UGM(the_map, TYPE, KeyType, ValueType, num_entries, \ |
| 384 | DEFAULT_BPF_MAP_UID, gid, 0640) |
Maciej Żenczykowski | 53a144e | 2022-06-16 23:11:54 -0700 | [diff] [blame] | 385 | |
| 386 | #define DEFINE_BPF_MAP_GRW(the_map, TYPE, KeyType, ValueType, num_entries, gid) \ |
Maciej Żenczykowski | 72e19c5 | 2022-07-08 14:22:57 -0700 | [diff] [blame] | 387 | DEFINE_BPF_MAP_UGM(the_map, TYPE, KeyType, ValueType, num_entries, \ |
| 388 | DEFAULT_BPF_MAP_UID, gid, 0660) |
Ken Chen | 45c7b15 | 2021-12-20 18:22:06 +0800 | [diff] [blame] | 389 | |
Maciej Żenczykowski | 3114700 | 2022-12-27 12:25:40 +0000 | [diff] [blame] | 390 | // LLVM eBPF builtins: they directly generate BPF_LD_ABS/BPF_LD_IND (skb may be ignored?) |
| 391 | unsigned long long load_byte(void* skb, unsigned long long off) asm("llvm.bpf.load.byte"); |
| 392 | unsigned long long load_half(void* skb, unsigned long long off) asm("llvm.bpf.load.half"); |
| 393 | unsigned long long load_word(void* skb, unsigned long long off) asm("llvm.bpf.load.word"); |
| 394 | |
Ken Chen | 45c7b15 | 2021-12-20 18:22:06 +0800 | [diff] [blame] | 395 | static int (*bpf_probe_read)(void* dst, int size, void* unsafe_ptr) = (void*) BPF_FUNC_probe_read; |
| 396 | static int (*bpf_probe_read_str)(void* dst, int size, void* unsafe_ptr) = (void*) BPF_FUNC_probe_read_str; |
Gopal Krishna Shukla | 2632888 | 2023-04-14 23:21:22 +0530 | [diff] [blame] | 397 | static int (*bpf_probe_read_user)(void* dst, int size, const void* unsafe_ptr) = (void*)BPF_FUNC_probe_read_user; |
Steven Moreland | 59e3433 | 2023-03-29 00:12:36 +0000 | [diff] [blame] | 398 | static int (*bpf_probe_read_user_str)(void* dst, int size, const void* unsafe_ptr) = (void*) BPF_FUNC_probe_read_user_str; |
Ken Chen | 45c7b15 | 2021-12-20 18:22:06 +0800 | [diff] [blame] | 399 | static unsigned long long (*bpf_ktime_get_ns)(void) = (void*) BPF_FUNC_ktime_get_ns; |
| 400 | static unsigned long long (*bpf_ktime_get_boot_ns)(void) = (void*)BPF_FUNC_ktime_get_boot_ns; |
Ken Chen | 45c7b15 | 2021-12-20 18:22:06 +0800 | [diff] [blame] | 401 | static unsigned long long (*bpf_get_current_pid_tgid)(void) = (void*) BPF_FUNC_get_current_pid_tgid; |
| 402 | static unsigned long long (*bpf_get_current_uid_gid)(void) = (void*) BPF_FUNC_get_current_uid_gid; |
| 403 | static unsigned long long (*bpf_get_smp_processor_id)(void) = (void*) BPF_FUNC_get_smp_processor_id; |
eric.yan | fa1baa1 | 2022-05-26 00:25:26 +0800 | [diff] [blame] | 404 | static long (*bpf_get_stackid)(void* ctx, void* map, uint64_t flags) = (void*) BPF_FUNC_get_stackid; |
| 405 | static long (*bpf_get_current_comm)(void* buf, uint32_t buf_size) = (void*) BPF_FUNC_get_current_comm; |
Patrick Rohr | b7028cb | 2024-10-28 15:49:11 -0700 | [diff] [blame^] | 406 | // bpf_sk_fullsock requires 5.1+ kernel |
| 407 | static struct bpf_sock* (*bpf_sk_fullsock)(struct bpf_sock* sk) = (void*) BPF_FUNC_sk_fullsock; |
Ken Chen | 45c7b15 | 2021-12-20 18:22:06 +0800 | [diff] [blame] | 408 | |
Maciej Żenczykowski | 3935d8b | 2024-09-03 19:19:04 -0700 | [diff] [blame] | 409 | // GPL only: |
Maciej Żenczykowski | 2a8bf65 | 2024-08-29 11:57:08 -0700 | [diff] [blame] | 410 | static int (*bpf_trace_printk)(const char* fmt, int fmt_size, ...) = (void*) BPF_FUNC_trace_printk; |
| 411 | #define bpf_printf(s, n...) bpf_trace_printk(s, sizeof(s), ## n) |
| 412 | // Note: bpf only supports up to 3 arguments, log via: bpf_printf("msg %d %d %d", 1, 2, 3); |
| 413 | // and read via the blocking: sudo cat /sys/kernel/debug/tracing/trace_pipe |
| 414 | |
Ryan Zuklie | 888bd2d | 2023-01-04 16:09:02 -0800 | [diff] [blame] | 415 | #define DEFINE_BPF_PROG_EXT(SECTION_NAME, prog_uid, prog_gid, the_prog, min_kv, max_kv, \ |
| 416 | min_loader, max_loader, opt, selinux, pindir, ignore_eng, \ |
| 417 | ignore_user, ignore_userdebug) \ |
| 418 | const struct bpf_prog_def SECTION("progs") the_prog##_def = { \ |
| 419 | .uid = (prog_uid), \ |
| 420 | .gid = (prog_gid), \ |
Maciej Żenczykowski | 3a64568 | 2023-10-06 15:11:01 -0700 | [diff] [blame] | 421 | .min_kver = (min_kv).kver, \ |
| 422 | .max_kver = (max_kv).kver, \ |
Maciej Żenczykowski | f2466ef | 2023-10-06 14:47:26 -0700 | [diff] [blame] | 423 | .optional = (opt).optional, \ |
Ryan Zuklie | 888bd2d | 2023-01-04 16:09:02 -0800 | [diff] [blame] | 424 | .bpfloader_min_ver = (min_loader), \ |
| 425 | .bpfloader_max_ver = (max_loader), \ |
| 426 | .selinux_context = (selinux), \ |
| 427 | .pin_subdir = (pindir), \ |
Maciej Żenczykowski | 6dec6e9 | 2023-10-06 14:52:46 -0700 | [diff] [blame] | 428 | .ignore_on_eng = (ignore_eng).ignore_on_eng, \ |
| 429 | .ignore_on_user = (ignore_user).ignore_on_user, \ |
| 430 | .ignore_on_userdebug = (ignore_userdebug).ignore_on_userdebug, \ |
Ryan Zuklie | 888bd2d | 2023-01-04 16:09:02 -0800 | [diff] [blame] | 431 | }; \ |
| 432 | SECTION(SECTION_NAME) \ |
Ken Chen | 45c7b15 | 2021-12-20 18:22:06 +0800 | [diff] [blame] | 433 | int the_prog |
| 434 | |
Maciej Żenczykowski | 53a144e | 2022-06-16 23:11:54 -0700 | [diff] [blame] | 435 | #define DEFINE_BPF_PROG_KVER_RANGE_OPT(SECTION_NAME, prog_uid, prog_gid, the_prog, min_kv, max_kv, \ |
Ryan Zuklie | 888bd2d | 2023-01-04 16:09:02 -0800 | [diff] [blame] | 436 | opt) \ |
| 437 | DEFINE_BPF_PROG_EXT(SECTION_NAME, prog_uid, prog_gid, the_prog, min_kv, max_kv, \ |
Maciej Żenczykowski | f7abc43 | 2024-08-15 15:19:58 -0700 | [diff] [blame] | 438 | BPFLOADER_MIN_VER, BPFLOADER_MAX_VER, opt, "", "", \ |
Maciej Żenczykowski | 10da6d4 | 2023-10-06 14:14:00 -0700 | [diff] [blame] | 439 | LOAD_ON_ENG, LOAD_ON_USER, LOAD_ON_USERDEBUG) |
Maciej Żenczykowski | 53a144e | 2022-06-16 23:11:54 -0700 | [diff] [blame] | 440 | |
Ken Chen | 45c7b15 | 2021-12-20 18:22:06 +0800 | [diff] [blame] | 441 | // Programs (here used in the sense of functions/sections) marked optional are allowed to fail |
| 442 | // to load (for example due to missing kernel patches). |
| 443 | // The bpfloader will just ignore these failures and continue processing the next section. |
| 444 | // |
| 445 | // A non-optional program (function/section) failing to load causes a failure and aborts |
| 446 | // processing of the entire .o, if the .o is additionally marked critical, this will result |
| 447 | // in the entire bpfloader process terminating with a failure and not setting the bpf.progs_loaded |
| 448 | // system property. This in turn results in waitForProgsLoaded() never finishing. |
| 449 | // |
| 450 | // ie. a non-optional program in a critical .o is mandatory for kernels matching the min/max kver. |
| 451 | |
| 452 | // programs requiring a kernel version >= min_kv && < max_kv |
| 453 | #define DEFINE_BPF_PROG_KVER_RANGE(SECTION_NAME, prog_uid, prog_gid, the_prog, min_kv, max_kv) \ |
| 454 | DEFINE_BPF_PROG_KVER_RANGE_OPT(SECTION_NAME, prog_uid, prog_gid, the_prog, min_kv, max_kv, \ |
Maciej Żenczykowski | 10da6d4 | 2023-10-06 14:14:00 -0700 | [diff] [blame] | 455 | MANDATORY) |
Ken Chen | 45c7b15 | 2021-12-20 18:22:06 +0800 | [diff] [blame] | 456 | #define DEFINE_OPTIONAL_BPF_PROG_KVER_RANGE(SECTION_NAME, prog_uid, prog_gid, the_prog, min_kv, \ |
| 457 | max_kv) \ |
Maciej Żenczykowski | 10da6d4 | 2023-10-06 14:14:00 -0700 | [diff] [blame] | 458 | DEFINE_BPF_PROG_KVER_RANGE_OPT(SECTION_NAME, prog_uid, prog_gid, the_prog, min_kv, max_kv, \ |
| 459 | OPTIONAL) |
Ken Chen | 45c7b15 | 2021-12-20 18:22:06 +0800 | [diff] [blame] | 460 | |
| 461 | // programs requiring a kernel version >= min_kv |
| 462 | #define DEFINE_BPF_PROG_KVER(SECTION_NAME, prog_uid, prog_gid, the_prog, min_kv) \ |
| 463 | DEFINE_BPF_PROG_KVER_RANGE_OPT(SECTION_NAME, prog_uid, prog_gid, the_prog, min_kv, KVER_INF, \ |
Maciej Żenczykowski | 10da6d4 | 2023-10-06 14:14:00 -0700 | [diff] [blame] | 464 | MANDATORY) |
Ken Chen | 45c7b15 | 2021-12-20 18:22:06 +0800 | [diff] [blame] | 465 | #define DEFINE_OPTIONAL_BPF_PROG_KVER(SECTION_NAME, prog_uid, prog_gid, the_prog, min_kv) \ |
| 466 | DEFINE_BPF_PROG_KVER_RANGE_OPT(SECTION_NAME, prog_uid, prog_gid, the_prog, min_kv, KVER_INF, \ |
Maciej Żenczykowski | 10da6d4 | 2023-10-06 14:14:00 -0700 | [diff] [blame] | 467 | OPTIONAL) |
Ken Chen | 45c7b15 | 2021-12-20 18:22:06 +0800 | [diff] [blame] | 468 | |
| 469 | // programs with no kernel version requirements |
| 470 | #define DEFINE_BPF_PROG(SECTION_NAME, prog_uid, prog_gid, the_prog) \ |
Maciej Żenczykowski | 3a64568 | 2023-10-06 15:11:01 -0700 | [diff] [blame] | 471 | DEFINE_BPF_PROG_KVER_RANGE_OPT(SECTION_NAME, prog_uid, prog_gid, the_prog, KVER_NONE, KVER_INF, \ |
Maciej Żenczykowski | 10da6d4 | 2023-10-06 14:14:00 -0700 | [diff] [blame] | 472 | MANDATORY) |
Ken Chen | 45c7b15 | 2021-12-20 18:22:06 +0800 | [diff] [blame] | 473 | #define DEFINE_OPTIONAL_BPF_PROG(SECTION_NAME, prog_uid, prog_gid, the_prog) \ |
Maciej Żenczykowski | 3a64568 | 2023-10-06 15:11:01 -0700 | [diff] [blame] | 474 | DEFINE_BPF_PROG_KVER_RANGE_OPT(SECTION_NAME, prog_uid, prog_gid, the_prog, KVER_NONE, KVER_INF, \ |
Maciej Żenczykowski | 10da6d4 | 2023-10-06 14:14:00 -0700 | [diff] [blame] | 475 | OPTIONAL) |