Ken Chen | 587d423 | 2022-01-17 17:18:43 +0800 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2018 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
Maciej Żenczykowski | 11141da | 2024-03-15 18:21:33 -0700 | [diff] [blame] | 17 | // The resulting .o needs to load on Android T+ |
| 18 | #define BPFLOADER_MIN_VER BPFLOADER_MAINLINE_T_VERSION |
Maciej Żenczykowski | acebffb | 2022-05-16 16:05:15 -0700 | [diff] [blame] | 19 | |
Ken Chen | 587d423 | 2022-01-17 17:18:43 +0800 | [diff] [blame] | 20 | #include <bpf_helpers.h> |
| 21 | #include <linux/bpf.h> |
| 22 | #include <linux/if.h> |
| 23 | #include <linux/if_ether.h> |
| 24 | #include <linux/if_packet.h> |
| 25 | #include <linux/in.h> |
| 26 | #include <linux/in6.h> |
| 27 | #include <linux/ip.h> |
| 28 | #include <linux/ipv6.h> |
| 29 | #include <linux/pkt_cls.h> |
| 30 | #include <linux/tcp.h> |
Ken Chen | 587d423 | 2022-01-17 17:18:43 +0800 | [diff] [blame] | 31 | #include <stdbool.h> |
| 32 | #include <stdint.h> |
| 33 | #include "bpf_net_helpers.h" |
Maciej Żenczykowski | 513474c | 2022-12-08 16:20:43 +0000 | [diff] [blame] | 34 | #include "netd.h" |
Ken Chen | 587d423 | 2022-01-17 17:18:43 +0800 | [diff] [blame] | 35 | |
| 36 | // This is defined for cgroup bpf filter only. |
Maciej Żenczykowski | 5547498 | 2022-11-20 13:48:39 +0000 | [diff] [blame] | 37 | static const int DROP = 0; |
| 38 | static const int PASS = 1; |
| 39 | static const int DROP_UNLESS_DNS = 2; // internal to our program |
Ken Chen | 587d423 | 2022-01-17 17:18:43 +0800 | [diff] [blame] | 40 | |
| 41 | // This is used for xt_bpf program only. |
Maciej Żenczykowski | 5547498 | 2022-11-20 13:48:39 +0000 | [diff] [blame] | 42 | static const int BPF_NOMATCH = 0; |
| 43 | static const int BPF_MATCH = 1; |
Ken Chen | 587d423 | 2022-01-17 17:18:43 +0800 | [diff] [blame] | 44 | |
Ryan Zuklie | 9419d25 | 2023-01-20 17:03:56 -0800 | [diff] [blame] | 45 | // Used for 'bool enable_tracing' |
| 46 | static const bool TRACE_ON = true; |
| 47 | static const bool TRACE_OFF = false; |
Maciej Żenczykowski | 879839a1 | 2022-08-03 10:48:25 +0000 | [diff] [blame] | 48 | |
| 49 | // offsetof(struct iphdr, ihl) -- but that's a bitfield |
Ken Chen | 587d423 | 2022-01-17 17:18:43 +0800 | [diff] [blame] | 50 | #define IPPROTO_IHL_OFF 0 |
Maciej Żenczykowski | 879839a1 | 2022-08-03 10:48:25 +0000 | [diff] [blame] | 51 | |
| 52 | // This is offsetof(struct tcphdr, "32 bit tcp flag field") |
| 53 | // The tcp flags are after be16 source, dest & be32 seq, ack_seq, hence 12 bytes in. |
| 54 | // |
| 55 | // Note that TCP_FLAG_{ACK,PSH,RST,SYN,FIN} are htonl(0x00{10,08,04,02,01}0000) |
| 56 | // see include/uapi/linux/tcp.h |
| 57 | #define TCP_FLAG32_OFF 12 |
Ken Chen | 587d423 | 2022-01-17 17:18:43 +0800 | [diff] [blame] | 58 | |
Maciej Żenczykowski | a8cb825 | 2023-09-11 21:29:28 +0000 | [diff] [blame] | 59 | #define TCP_FLAG8_OFF (TCP_FLAG32_OFF + 1) |
| 60 | |
Maciej Żenczykowski | a4a58a3 | 2022-06-13 17:56:06 -0700 | [diff] [blame] | 61 | // For maps netd does not need to access |
Maciej Żenczykowski | c112629 | 2023-10-03 05:14:25 +0000 | [diff] [blame] | 62 | #define DEFINE_BPF_MAP_NO_NETD(the_map, TYPE, TypeOfKey, TypeOfValue, num_entries) \ |
| 63 | DEFINE_BPF_MAP_EXT(the_map, TYPE, TypeOfKey, TypeOfValue, num_entries, \ |
| 64 | AID_ROOT, AID_NET_BW_ACCT, 0060, "fs_bpf_net_shared", "", \ |
| 65 | PRIVATE, BPFLOADER_MIN_VER, BPFLOADER_MAX_VER, \ |
| 66 | LOAD_ON_ENG, LOAD_ON_USER, LOAD_ON_USERDEBUG) |
Maciej Żenczykowski | a4a58a3 | 2022-06-13 17:56:06 -0700 | [diff] [blame] | 67 | |
| 68 | // For maps netd only needs read only access to |
Maciej Żenczykowski | c112629 | 2023-10-03 05:14:25 +0000 | [diff] [blame] | 69 | #define DEFINE_BPF_MAP_RO_NETD(the_map, TYPE, TypeOfKey, TypeOfValue, num_entries) \ |
| 70 | DEFINE_BPF_MAP_EXT(the_map, TYPE, TypeOfKey, TypeOfValue, num_entries, \ |
| 71 | AID_ROOT, AID_NET_BW_ACCT, 0460, "fs_bpf_netd_readonly", "", \ |
| 72 | PRIVATE, BPFLOADER_MIN_VER, BPFLOADER_MAX_VER, \ |
| 73 | LOAD_ON_ENG, LOAD_ON_USER, LOAD_ON_USERDEBUG) |
Maciej Żenczykowski | a4a58a3 | 2022-06-13 17:56:06 -0700 | [diff] [blame] | 74 | |
| 75 | // For maps netd needs to be able to read and write |
| 76 | #define DEFINE_BPF_MAP_RW_NETD(the_map, TYPE, TypeOfKey, TypeOfValue, num_entries) \ |
Maciej Żenczykowski | cae181d | 2022-06-16 23:26:33 -0700 | [diff] [blame] | 77 | DEFINE_BPF_MAP_UGM(the_map, TYPE, TypeOfKey, TypeOfValue, num_entries, \ |
| 78 | AID_ROOT, AID_NET_BW_ACCT, 0660) |
Maciej Żenczykowski | a4a58a3 | 2022-06-13 17:56:06 -0700 | [diff] [blame] | 79 | |
Maciej Żenczykowski | b10e055 | 2022-06-16 14:49:27 -0700 | [diff] [blame] | 80 | // Bpf map arrays on creation are preinitialized to 0 and do not support deletion of a key, |
| 81 | // see: kernel/bpf/arraymap.c array_map_delete_elem() returns -EINVAL (from both syscall and ebpf) |
| 82 | // Additionally on newer kernels the bpf jit can optimize out the lookups. |
| 83 | // only valid indexes are [0..CONFIGURATION_MAP_SIZE-1] |
| 84 | DEFINE_BPF_MAP_RO_NETD(configuration_map, ARRAY, uint32_t, uint32_t, CONFIGURATION_MAP_SIZE) |
| 85 | |
Maciej Żenczykowski | 1b7c1f1 | 2022-11-21 09:39:23 +0000 | [diff] [blame] | 86 | // TODO: consider whether we can merge some of these maps |
| 87 | // for example it might be possible to merge 2 or 3 of: |
| 88 | // uid_counterset_map + uid_owner_map + uid_permission_map |
Maciej Żenczykowski | a4a58a3 | 2022-06-13 17:56:06 -0700 | [diff] [blame] | 89 | DEFINE_BPF_MAP_RW_NETD(cookie_tag_map, HASH, uint64_t, UidTagValue, COOKIE_UID_MAP_SIZE) |
| 90 | DEFINE_BPF_MAP_NO_NETD(uid_counterset_map, HASH, uint32_t, uint8_t, UID_COUNTERSET_MAP_SIZE) |
| 91 | DEFINE_BPF_MAP_NO_NETD(app_uid_stats_map, HASH, uint32_t, StatsValue, APP_STATS_MAP_SIZE) |
Maciej Żenczykowski | 7e2f53e | 2023-09-28 01:08:28 +0000 | [diff] [blame] | 92 | DEFINE_BPF_MAP_RO_NETD(stats_map_A, HASH, StatsKey, StatsValue, STATS_MAP_SIZE) |
Maciej Żenczykowski | a4a58a3 | 2022-06-13 17:56:06 -0700 | [diff] [blame] | 93 | DEFINE_BPF_MAP_RO_NETD(stats_map_B, HASH, StatsKey, StatsValue, STATS_MAP_SIZE) |
| 94 | DEFINE_BPF_MAP_NO_NETD(iface_stats_map, HASH, uint32_t, StatsValue, IFACE_STATS_MAP_SIZE) |
Ken Chen | ec0f7ac | 2023-09-08 14:14:55 +0800 | [diff] [blame] | 95 | DEFINE_BPF_MAP_RO_NETD(uid_owner_map, HASH, uint32_t, UidOwnerValue, UID_OWNER_MAP_SIZE) |
Maciej Żenczykowski | 7e2f53e | 2023-09-28 01:08:28 +0000 | [diff] [blame] | 96 | DEFINE_BPF_MAP_RO_NETD(uid_permission_map, HASH, uint32_t, uint8_t, UID_OWNER_MAP_SIZE) |
Maciej Żenczykowski | 6109d94 | 2023-08-29 18:39:28 +0000 | [diff] [blame] | 97 | DEFINE_BPF_MAP_NO_NETD(ingress_discard_map, HASH, IngressDiscardKey, IngressDiscardValue, |
| 98 | INGRESS_DISCARD_MAP_SIZE) |
Ken Chen | 587d423 | 2022-01-17 17:18:43 +0800 | [diff] [blame] | 99 | |
| 100 | /* never actually used from ebpf */ |
Maciej Żenczykowski | a4a58a3 | 2022-06-13 17:56:06 -0700 | [diff] [blame] | 101 | DEFINE_BPF_MAP_NO_NETD(iface_index_name_map, HASH, uint32_t, IfaceValue, IFACE_INDEX_NAME_MAP_SIZE) |
Ken Chen | 587d423 | 2022-01-17 17:18:43 +0800 | [diff] [blame] | 102 | |
Ryan Zuklie | 9419d25 | 2023-01-20 17:03:56 -0800 | [diff] [blame] | 103 | // A single-element configuration array, packet tracing is enabled when 'true'. |
| 104 | DEFINE_BPF_MAP_EXT(packet_trace_enabled_map, ARRAY, uint32_t, bool, 1, |
Maciej Żenczykowski | c112629 | 2023-10-03 05:14:25 +0000 | [diff] [blame] | 105 | AID_ROOT, AID_SYSTEM, 0060, "fs_bpf_net_shared", "", PRIVATE, |
Maciej Żenczykowski | 11141da | 2024-03-15 18:21:33 -0700 | [diff] [blame] | 106 | BPFLOADER_MAINLINE_U_VERSION, BPFLOADER_MAX_VER, LOAD_ON_ENG, |
Ryan Zuklie | 9fb8f18 | 2023-09-28 15:50:59 -0700 | [diff] [blame] | 107 | LOAD_ON_USER, LOAD_ON_USERDEBUG) |
Ryan Zuklie | 9419d25 | 2023-01-20 17:03:56 -0800 | [diff] [blame] | 108 | |
Ryan Zuklie | 9fb8f18 | 2023-09-28 15:50:59 -0700 | [diff] [blame] | 109 | // A ring buffer on which packet information is pushed. |
Ryan Zuklie | 9419d25 | 2023-01-20 17:03:56 -0800 | [diff] [blame] | 110 | DEFINE_BPF_RINGBUF_EXT(packet_trace_ringbuf, PacketTrace, PACKET_TRACE_BUF_SIZE, |
Maciej Żenczykowski | c112629 | 2023-10-03 05:14:25 +0000 | [diff] [blame] | 111 | AID_ROOT, AID_SYSTEM, 0060, "fs_bpf_net_shared", "", PRIVATE, |
Maciej Żenczykowski | 11141da | 2024-03-15 18:21:33 -0700 | [diff] [blame] | 112 | BPFLOADER_MAINLINE_U_VERSION, BPFLOADER_MAX_VER, LOAD_ON_ENG, |
Ryan Zuklie | 9fb8f18 | 2023-09-28 15:50:59 -0700 | [diff] [blame] | 113 | LOAD_ON_USER, LOAD_ON_USERDEBUG); |
Ryan Zuklie | 9419d25 | 2023-01-20 17:03:56 -0800 | [diff] [blame] | 114 | |
Ken Chen | 2433017 | 2023-10-20 13:02:14 +0800 | [diff] [blame] | 115 | DEFINE_BPF_MAP_RO_NETD(data_saver_enabled_map, ARRAY, uint32_t, bool, |
| 116 | DATA_SAVER_ENABLED_MAP_SIZE) |
| 117 | |
Maciej Żenczykowski | cae181d | 2022-06-16 23:26:33 -0700 | [diff] [blame] | 118 | // iptables xt_bpf programs need to be usable by both netd and netutils_wrappers |
Maciej Żenczykowski | 285f705 | 2022-08-09 17:50:31 +0000 | [diff] [blame] | 119 | // selinux contexts, because even non-xt_bpf iptables mutations are implemented as |
Maciej Żenczykowski | 06085b0 | 2022-08-09 14:15:34 +0000 | [diff] [blame] | 120 | // a full table dump, followed by an update in userspace, and then a reload into the kernel, |
| 121 | // where any already in-use xt_bpf matchers are serialized as the path to the pinned |
| 122 | // program (see XT_BPF_MODE_PATH_PINNED) and then the iptables binary (or rather |
| 123 | // the kernel acting on behalf of it) must be able to retrieve the pinned program |
| 124 | // for the reload to succeed |
Maciej Żenczykowski | cae181d | 2022-06-16 23:26:33 -0700 | [diff] [blame] | 125 | #define DEFINE_XTBPF_PROG(SECTION_NAME, prog_uid, prog_gid, the_prog) \ |
| 126 | DEFINE_BPF_PROG(SECTION_NAME, prog_uid, prog_gid, the_prog) |
| 127 | |
| 128 | // programs that need to be usable by netd, but not by netutils_wrappers |
Maciej Żenczykowski | 06085b0 | 2022-08-09 14:15:34 +0000 | [diff] [blame] | 129 | // (this is because these are currently attached by the mainline provided libnetd_updatable .so |
| 130 | // which is loaded into netd and thus runs as netd uid/gid/selinux context) |
Maciej Żenczykowski | 879839a1 | 2022-08-03 10:48:25 +0000 | [diff] [blame] | 131 | #define DEFINE_NETD_BPF_PROG_KVER_RANGE(SECTION_NAME, prog_uid, prog_gid, the_prog, minKV, maxKV) \ |
Ryan Zuklie | cc72fa8 | 2023-01-04 16:13:01 -0800 | [diff] [blame] | 132 | DEFINE_BPF_PROG_EXT(SECTION_NAME, prog_uid, prog_gid, the_prog, \ |
Maciej Żenczykowski | c112629 | 2023-10-03 05:14:25 +0000 | [diff] [blame] | 133 | minKV, maxKV, BPFLOADER_MIN_VER, BPFLOADER_MAX_VER, MANDATORY, \ |
| 134 | "fs_bpf_netd_readonly", "", LOAD_ON_ENG, LOAD_ON_USER, LOAD_ON_USERDEBUG) |
Maciej Żenczykowski | cae181d | 2022-06-16 23:26:33 -0700 | [diff] [blame] | 135 | |
Lorenzo Colitti | 3505b58 | 2022-10-27 19:36:27 +0900 | [diff] [blame] | 136 | #define DEFINE_NETD_BPF_PROG_KVER(SECTION_NAME, prog_uid, prog_gid, the_prog, min_kv) \ |
Maciej Żenczykowski | 879839a1 | 2022-08-03 10:48:25 +0000 | [diff] [blame] | 137 | DEFINE_NETD_BPF_PROG_KVER_RANGE(SECTION_NAME, prog_uid, prog_gid, the_prog, min_kv, KVER_INF) |
| 138 | |
| 139 | #define DEFINE_NETD_BPF_PROG(SECTION_NAME, prog_uid, prog_gid, the_prog) \ |
| 140 | DEFINE_NETD_BPF_PROG_KVER(SECTION_NAME, prog_uid, prog_gid, the_prog, KVER_NONE) |
Lorenzo Colitti | 3505b58 | 2022-10-27 19:36:27 +0900 | [diff] [blame] | 141 | |
Maciej Żenczykowski | cae181d | 2022-06-16 23:26:33 -0700 | [diff] [blame] | 142 | // programs that only need to be usable by the system server |
| 143 | #define DEFINE_SYS_BPF_PROG(SECTION_NAME, prog_uid, prog_gid, the_prog) \ |
Ryan Zuklie | cc72fa8 | 2023-01-04 16:13:01 -0800 | [diff] [blame] | 144 | DEFINE_BPF_PROG_EXT(SECTION_NAME, prog_uid, prog_gid, the_prog, KVER_NONE, KVER_INF, \ |
Maciej Żenczykowski | c112629 | 2023-10-03 05:14:25 +0000 | [diff] [blame] | 145 | BPFLOADER_MIN_VER, BPFLOADER_MAX_VER, MANDATORY, \ |
| 146 | "fs_bpf_net_shared", "", LOAD_ON_ENG, LOAD_ON_USER, LOAD_ON_USERDEBUG) |
Maciej Żenczykowski | cae181d | 2022-06-16 23:26:33 -0700 | [diff] [blame] | 147 | |
Ken Chen | 587d423 | 2022-01-17 17:18:43 +0800 | [diff] [blame] | 148 | /* |
| 149 | * Note: this blindly assumes an MTU of 1500, and that packets > MTU are always TCP, |
| 150 | * and that TCP is using the Linux default settings with TCP timestamp option enabled |
| 151 | * which uses 12 TCP option bytes per frame. |
| 152 | * |
| 153 | * These are not unreasonable assumptions: |
| 154 | * |
| 155 | * The internet does not really support MTUs greater than 1500, so most TCP traffic will |
| 156 | * be at that MTU, or slightly below it (worst case our upwards adjustment is too small). |
| 157 | * |
| 158 | * The chance our traffic isn't IP at all is basically zero, so the IP overhead correction |
| 159 | * is bound to be needed. |
| 160 | * |
| 161 | * Furthermore, the likelyhood that we're having to deal with GSO (ie. > MTU) packets that |
| 162 | * are not IP/TCP is pretty small (few other things are supported by Linux) and worse case |
| 163 | * our extra overhead will be slightly off, but probably still better than assuming none. |
| 164 | * |
| 165 | * Most servers are also Linux and thus support/default to using TCP timestamp option |
| 166 | * (and indeed TCP timestamp option comes from RFC 1323 titled "TCP Extensions for High |
| 167 | * Performance" which also defined TCP window scaling and are thus absolutely ancient...). |
| 168 | * |
| 169 | * All together this should be more correct than if we simply ignored GSO frames |
| 170 | * (ie. counted them as single packets with no extra overhead) |
| 171 | * |
| 172 | * Especially since the number of packets is important for any future clat offload correction. |
| 173 | * (which adjusts upward by 20 bytes per packet to account for ipv4 -> ipv6 header conversion) |
| 174 | */ |
Maciej Żenczykowski | 99a1a26 | 2022-12-29 11:57:23 +0000 | [diff] [blame] | 175 | #define DEFINE_UPDATE_STATS(the_stats_map, TypeOfKey) \ |
| 176 | static __always_inline inline void update_##the_stats_map(const struct __sk_buff* const skb, \ |
| 177 | const TypeOfKey* const key, \ |
Maciej Żenczykowski | a8852b2 | 2023-10-08 18:31:12 -0700 | [diff] [blame] | 178 | const struct egress_bool egress, \ |
Maciej Żenczykowski | 3a64568 | 2023-10-06 15:11:01 -0700 | [diff] [blame] | 179 | const struct kver_uint kver) { \ |
Maciej Żenczykowski | 99a1a26 | 2022-12-29 11:57:23 +0000 | [diff] [blame] | 180 | StatsValue* value = bpf_##the_stats_map##_lookup_elem(key); \ |
| 181 | if (!value) { \ |
| 182 | StatsValue newValue = {}; \ |
| 183 | bpf_##the_stats_map##_update_elem(key, &newValue, BPF_NOEXIST); \ |
| 184 | value = bpf_##the_stats_map##_lookup_elem(key); \ |
| 185 | } \ |
| 186 | if (value) { \ |
| 187 | const int mtu = 1500; \ |
| 188 | uint64_t packets = 1; \ |
| 189 | uint64_t bytes = skb->len; \ |
| 190 | if (bytes > mtu) { \ |
| 191 | bool is_ipv6 = (skb->protocol == htons(ETH_P_IPV6)); \ |
| 192 | int ip_overhead = (is_ipv6 ? sizeof(struct ipv6hdr) : sizeof(struct iphdr)); \ |
| 193 | int tcp_overhead = ip_overhead + sizeof(struct tcphdr) + 12; \ |
| 194 | int mss = mtu - tcp_overhead; \ |
| 195 | uint64_t payload = bytes - tcp_overhead; \ |
| 196 | packets = (payload + mss - 1) / mss; \ |
| 197 | bytes = tcp_overhead * packets + payload; \ |
| 198 | } \ |
Maciej Żenczykowski | a8852b2 | 2023-10-08 18:31:12 -0700 | [diff] [blame] | 199 | if (egress.egress) { \ |
Maciej Żenczykowski | 99a1a26 | 2022-12-29 11:57:23 +0000 | [diff] [blame] | 200 | __sync_fetch_and_add(&value->txPackets, packets); \ |
| 201 | __sync_fetch_and_add(&value->txBytes, bytes); \ |
| 202 | } else { \ |
| 203 | __sync_fetch_and_add(&value->rxPackets, packets); \ |
| 204 | __sync_fetch_and_add(&value->rxBytes, bytes); \ |
| 205 | } \ |
| 206 | } \ |
Ken Chen | 587d423 | 2022-01-17 17:18:43 +0800 | [diff] [blame] | 207 | } |
| 208 | |
| 209 | DEFINE_UPDATE_STATS(app_uid_stats_map, uint32_t) |
| 210 | DEFINE_UPDATE_STATS(iface_stats_map, uint32_t) |
| 211 | DEFINE_UPDATE_STATS(stats_map_A, StatsKey) |
| 212 | DEFINE_UPDATE_STATS(stats_map_B, StatsKey) |
| 213 | |
Maciej Żenczykowski | 879839a1 | 2022-08-03 10:48:25 +0000 | [diff] [blame] | 214 | // both of these return 0 on success or -EFAULT on failure (and zero out the buffer) |
Maciej Żenczykowski | 0966bbe | 2022-12-29 11:39:48 +0000 | [diff] [blame] | 215 | static __always_inline inline int bpf_skb_load_bytes_net(const struct __sk_buff* const skb, |
| 216 | const int L3_off, |
| 217 | void* const to, |
| 218 | const int len, |
Maciej Żenczykowski | 3a64568 | 2023-10-06 15:11:01 -0700 | [diff] [blame] | 219 | const struct kver_uint kver) { |
Maciej Żenczykowski | 0966bbe | 2022-12-29 11:39:48 +0000 | [diff] [blame] | 220 | // 'kver' (here and throughout) is the compile time guaranteed minimum kernel version, |
| 221 | // ie. we're building (a version of) the bpf program for kver (or newer!) kernels. |
| 222 | // |
| 223 | // 4.19+ kernels support the 'bpf_skb_load_bytes_relative()' bpf helper function, |
| 224 | // so we can use it. On pre-4.19 kernels we cannot use the relative load helper, |
| 225 | // and thus will simply get things wrong if there's any L2 (ethernet) header in the skb. |
| 226 | // |
| 227 | // Luckily, for cellular traffic, there likely isn't any, as cell is usually 'rawip'. |
| 228 | // |
| 229 | // However, this does mean that wifi (and ethernet) on 4.14 is basically a lost cause: |
| 230 | // we'll be making decisions based on the *wrong* bytes (fetched from the wrong offset), |
| 231 | // because the 'L3_off' passed to bpf_skb_load_bytes() should be increased by l2_header_size, |
| 232 | // which for ethernet is 14 and not 0 like it is for rawip. |
| 233 | // |
| 234 | // For similar reasons this will fail with non-offloaded VLAN tags on < 4.19 kernels, |
| 235 | // since those extend the ethernet header from 14 to 18 bytes. |
Maciej Żenczykowski | 3a64568 | 2023-10-06 15:11:01 -0700 | [diff] [blame] | 236 | return KVER_IS_AT_LEAST(kver, 4, 19, 0) |
Maciej Żenczykowski | 0966bbe | 2022-12-29 11:39:48 +0000 | [diff] [blame] | 237 | ? bpf_skb_load_bytes_relative(skb, L3_off, to, len, BPF_HDR_START_NET) |
| 238 | : bpf_skb_load_bytes(skb, L3_off, to, len); |
Maciej Żenczykowski | 879839a1 | 2022-08-03 10:48:25 +0000 | [diff] [blame] | 239 | } |
| 240 | |
Ryan Zuklie | 9419d25 | 2023-01-20 17:03:56 -0800 | [diff] [blame] | 241 | static __always_inline inline void do_packet_tracing( |
Maciej Żenczykowski | a8852b2 | 2023-10-08 18:31:12 -0700 | [diff] [blame] | 242 | const struct __sk_buff* const skb, const struct egress_bool egress, const uint32_t uid, |
Maciej Żenczykowski | 3a64568 | 2023-10-06 15:11:01 -0700 | [diff] [blame] | 243 | const uint32_t tag, const bool enable_tracing, const struct kver_uint kver) { |
Ryan Zuklie | 9419d25 | 2023-01-20 17:03:56 -0800 | [diff] [blame] | 244 | if (!enable_tracing) return; |
Maciej Żenczykowski | 3a64568 | 2023-10-06 15:11:01 -0700 | [diff] [blame] | 245 | if (!KVER_IS_AT_LEAST(kver, 5, 8, 0)) return; |
Ryan Zuklie | 9419d25 | 2023-01-20 17:03:56 -0800 | [diff] [blame] | 246 | |
| 247 | uint32_t mapKey = 0; |
| 248 | bool* traceConfig = bpf_packet_trace_enabled_map_lookup_elem(&mapKey); |
| 249 | if (traceConfig == NULL) return; |
| 250 | if (*traceConfig == false) return; |
| 251 | |
| 252 | PacketTrace* pkt = bpf_packet_trace_ringbuf_reserve(); |
| 253 | if (pkt == NULL) return; |
| 254 | |
| 255 | // Errors from bpf_skb_load_bytes_net are ignored to favor returning something |
| 256 | // over returning nothing. In the event of an error, the kernel will fill in |
| 257 | // zero for the destination memory. Do not change the default '= 0' below. |
| 258 | |
| 259 | uint8_t proto = 0; |
| 260 | uint8_t L4_off = 0; |
| 261 | uint8_t ipVersion = 0; |
| 262 | if (skb->protocol == htons(ETH_P_IP)) { |
| 263 | (void)bpf_skb_load_bytes_net(skb, IP4_OFFSET(protocol), &proto, sizeof(proto), kver); |
| 264 | (void)bpf_skb_load_bytes_net(skb, IPPROTO_IHL_OFF, &L4_off, sizeof(L4_off), kver); |
| 265 | L4_off = (L4_off & 0x0F) * 4; // IHL calculation. |
| 266 | ipVersion = 4; |
| 267 | } else if (skb->protocol == htons(ETH_P_IPV6)) { |
| 268 | (void)bpf_skb_load_bytes_net(skb, IP6_OFFSET(nexthdr), &proto, sizeof(proto), kver); |
| 269 | L4_off = sizeof(struct ipv6hdr); |
| 270 | ipVersion = 6; |
Maciej Żenczykowski | 73896a7 | 2023-09-13 06:09:01 +0000 | [diff] [blame] | 271 | // skip over a *single* HOPOPTS or DSTOPTS extension header (if present) |
| 272 | if (proto == IPPROTO_HOPOPTS || proto == IPPROTO_DSTOPTS) { |
| 273 | struct { |
| 274 | uint8_t proto, len; |
| 275 | } ext_hdr; |
| 276 | if (!bpf_skb_load_bytes_net(skb, L4_off, &ext_hdr, sizeof(ext_hdr), kver)) { |
| 277 | proto = ext_hdr.proto; |
| 278 | L4_off += (ext_hdr.len + 1) * 8; |
| 279 | } |
| 280 | } |
Ryan Zuklie | 9419d25 | 2023-01-20 17:03:56 -0800 | [diff] [blame] | 281 | } |
| 282 | |
| 283 | uint8_t flags = 0; |
| 284 | __be16 sport = 0, dport = 0; |
Maciej Żenczykowski | a8cb825 | 2023-09-11 21:29:28 +0000 | [diff] [blame] | 285 | if (L4_off >= 20) { |
| 286 | switch (proto) { |
| 287 | case IPPROTO_TCP: |
| 288 | (void)bpf_skb_load_bytes_net(skb, L4_off + TCP_FLAG8_OFF, &flags, sizeof(flags), kver); |
| 289 | // fallthrough |
| 290 | case IPPROTO_DCCP: |
| 291 | case IPPROTO_UDP: |
| 292 | case IPPROTO_UDPLITE: |
| 293 | case IPPROTO_SCTP: |
| 294 | // all of these L4 protocols start with be16 src & dst port |
| 295 | (void)bpf_skb_load_bytes_net(skb, L4_off + 0, &sport, sizeof(sport), kver); |
| 296 | (void)bpf_skb_load_bytes_net(skb, L4_off + 2, &dport, sizeof(dport), kver); |
| 297 | break; |
| 298 | case IPPROTO_ICMP: |
| 299 | case IPPROTO_ICMPV6: |
| 300 | // Both IPv4 and IPv6 icmp start with u8 type & code, which we store in the bottom |
| 301 | // (ie. second) byte of sport/dport (which are be16s), the top byte is already zero. |
| 302 | (void)bpf_skb_load_bytes_net(skb, L4_off + 0, (char *)&sport + 1, 1, kver); //type |
| 303 | (void)bpf_skb_load_bytes_net(skb, L4_off + 1, (char *)&dport + 1, 1, kver); //code |
| 304 | break; |
| 305 | } |
Ryan Zuklie | 9419d25 | 2023-01-20 17:03:56 -0800 | [diff] [blame] | 306 | } |
| 307 | |
| 308 | pkt->timestampNs = bpf_ktime_get_boot_ns(); |
| 309 | pkt->ifindex = skb->ifindex; |
| 310 | pkt->length = skb->len; |
| 311 | |
| 312 | pkt->uid = uid; |
| 313 | pkt->tag = tag; |
| 314 | pkt->sport = sport; |
| 315 | pkt->dport = dport; |
| 316 | |
Maciej Żenczykowski | a8852b2 | 2023-10-08 18:31:12 -0700 | [diff] [blame] | 317 | pkt->egress = egress.egress; |
| 318 | pkt->wakeup = !egress.egress && (skb->mark & 0x80000000); // Fwmark.ingress_cpu_wakeup |
Ryan Zuklie | 9419d25 | 2023-01-20 17:03:56 -0800 | [diff] [blame] | 319 | pkt->ipProto = proto; |
| 320 | pkt->tcpFlags = flags; |
| 321 | pkt->ipVersion = ipVersion; |
| 322 | |
| 323 | bpf_packet_trace_ringbuf_submit(pkt); |
| 324 | } |
| 325 | |
Maciej Żenczykowski | a8852b2 | 2023-10-08 18:31:12 -0700 | [diff] [blame] | 326 | static __always_inline inline bool skip_owner_match(struct __sk_buff* skb, |
| 327 | const struct egress_bool egress, |
Maciej Żenczykowski | 3a64568 | 2023-10-06 15:11:01 -0700 | [diff] [blame] | 328 | const struct kver_uint kver) { |
Maciej Żenczykowski | 3621cbd | 2022-11-20 13:31:06 +0000 | [diff] [blame] | 329 | uint32_t flag = 0; |
Ken Chen | 587d423 | 2022-01-17 17:18:43 +0800 | [diff] [blame] | 330 | if (skb->protocol == htons(ETH_P_IP)) { |
Ken Chen | 587d423 | 2022-01-17 17:18:43 +0800 | [diff] [blame] | 331 | uint8_t proto; |
Maciej Żenczykowski | 879839a1 | 2022-08-03 10:48:25 +0000 | [diff] [blame] | 332 | // no need to check for success, proto will be zeroed if bpf_skb_load_bytes_net() fails |
Ryan Zuklie | 1db34f3 | 2023-01-20 17:00:04 -0800 | [diff] [blame] | 333 | (void)bpf_skb_load_bytes_net(skb, IP4_OFFSET(protocol), &proto, sizeof(proto), kver); |
Maciej Żenczykowski | 879839a1 | 2022-08-03 10:48:25 +0000 | [diff] [blame] | 334 | if (proto == IPPROTO_ESP) return true; |
| 335 | if (proto != IPPROTO_TCP) return false; // handles read failure above |
| 336 | uint8_t ihl; |
| 337 | // we don't check for success, as this cannot fail, as it is earlier in the packet than |
| 338 | // proto, the reading of which must have succeeded, additionally the next read |
| 339 | // (a little bit deeper in the packet in spite of ihl being zeroed) of the tcp flags |
| 340 | // field will also fail, and that failure we already handle correctly |
| 341 | // (we also don't check that ihl in [0x45,0x4F] nor that ipv4 header checksum is correct) |
Maciej Żenczykowski | 0966bbe | 2022-12-29 11:39:48 +0000 | [diff] [blame] | 342 | (void)bpf_skb_load_bytes_net(skb, IPPROTO_IHL_OFF, &ihl, sizeof(ihl), kver); |
Maciej Żenczykowski | 879839a1 | 2022-08-03 10:48:25 +0000 | [diff] [blame] | 343 | // if the read below fails, we'll just assume no TCP flags are set, which is fine. |
| 344 | (void)bpf_skb_load_bytes_net(skb, (ihl & 0xF) * 4 + TCP_FLAG32_OFF, |
Maciej Żenczykowski | 0966bbe | 2022-12-29 11:39:48 +0000 | [diff] [blame] | 345 | &flag, sizeof(flag), kver); |
Maciej Żenczykowski | 879839a1 | 2022-08-03 10:48:25 +0000 | [diff] [blame] | 346 | } else if (skb->protocol == htons(ETH_P_IPV6)) { |
| 347 | uint8_t proto; |
| 348 | // no need to check for success, proto will be zeroed if bpf_skb_load_bytes_net() fails |
Ryan Zuklie | 1db34f3 | 2023-01-20 17:00:04 -0800 | [diff] [blame] | 349 | (void)bpf_skb_load_bytes_net(skb, IP6_OFFSET(nexthdr), &proto, sizeof(proto), kver); |
Maciej Żenczykowski | 879839a1 | 2022-08-03 10:48:25 +0000 | [diff] [blame] | 350 | if (proto == IPPROTO_ESP) return true; |
| 351 | if (proto != IPPROTO_TCP) return false; // handles read failure above |
Maciej Żenczykowski | 879839a1 | 2022-08-03 10:48:25 +0000 | [diff] [blame] | 352 | // if the read below fails, we'll just assume no TCP flags are set, which is fine. |
| 353 | (void)bpf_skb_load_bytes_net(skb, sizeof(struct ipv6hdr) + TCP_FLAG32_OFF, |
Maciej Żenczykowski | 0966bbe | 2022-12-29 11:39:48 +0000 | [diff] [blame] | 354 | &flag, sizeof(flag), kver); |
Maciej Żenczykowski | 3621cbd | 2022-11-20 13:31:06 +0000 | [diff] [blame] | 355 | } else { |
| 356 | return false; |
Ken Chen | 587d423 | 2022-01-17 17:18:43 +0800 | [diff] [blame] | 357 | } |
Maciej Żenczykowski | bdccc50 | 2023-04-18 06:45:35 +0000 | [diff] [blame] | 358 | // Always allow RST's, and additionally allow ingress FINs |
Maciej Żenczykowski | a8852b2 | 2023-10-08 18:31:12 -0700 | [diff] [blame] | 359 | return flag & (TCP_FLAG_RST | (egress.egress ? 0 : TCP_FLAG_FIN)); // false on read failure |
Ken Chen | 587d423 | 2022-01-17 17:18:43 +0800 | [diff] [blame] | 360 | } |
| 361 | |
Maciej Żenczykowski | 879839a1 | 2022-08-03 10:48:25 +0000 | [diff] [blame] | 362 | static __always_inline inline BpfConfig getConfig(uint32_t configKey) { |
Ken Chen | 587d423 | 2022-01-17 17:18:43 +0800 | [diff] [blame] | 363 | uint32_t mapSettingKey = configKey; |
| 364 | BpfConfig* config = bpf_configuration_map_lookup_elem(&mapSettingKey); |
| 365 | if (!config) { |
| 366 | // Couldn't read configuration entry. Assume everything is disabled. |
| 367 | return DEFAULT_CONFIG; |
| 368 | } |
| 369 | return *config; |
| 370 | } |
| 371 | |
Maciej Żenczykowski | 6109d94 | 2023-08-29 18:39:28 +0000 | [diff] [blame] | 372 | static __always_inline inline bool ingress_should_discard(struct __sk_buff* skb, |
Maciej Żenczykowski | 3a64568 | 2023-10-06 15:11:01 -0700 | [diff] [blame] | 373 | const struct kver_uint kver) { |
Maciej Żenczykowski | 6109d94 | 2023-08-29 18:39:28 +0000 | [diff] [blame] | 374 | // Require 4.19, since earlier kernels don't have bpf_skb_load_bytes_relative() which |
| 375 | // provides relative to L3 header reads. Without that we could fetch the wrong bytes. |
| 376 | // Additionally earlier bpf verifiers are much harder to please. |
Maciej Żenczykowski | 3a64568 | 2023-10-06 15:11:01 -0700 | [diff] [blame] | 377 | if (!KVER_IS_AT_LEAST(kver, 4, 19, 0)) return false; |
Maciej Żenczykowski | 6109d94 | 2023-08-29 18:39:28 +0000 | [diff] [blame] | 378 | |
| 379 | IngressDiscardKey k = {}; |
| 380 | if (skb->protocol == htons(ETH_P_IP)) { |
| 381 | k.daddr.s6_addr32[2] = htonl(0xFFFF); |
| 382 | (void)bpf_skb_load_bytes_net(skb, IP4_OFFSET(daddr), &k.daddr.s6_addr32[3], 4, kver); |
| 383 | } else if (skb->protocol == htons(ETH_P_IPV6)) { |
| 384 | (void)bpf_skb_load_bytes_net(skb, IP6_OFFSET(daddr), &k.daddr, sizeof(k.daddr), kver); |
| 385 | } else { |
| 386 | return false; // non IPv4/IPv6, so no IP to match on |
| 387 | } |
| 388 | |
| 389 | // we didn't check for load success, because destination bytes will be zeroed if |
| 390 | // bpf_skb_load_bytes_net() fails, instead we rely on daddr of '::' and '::ffff:0.0.0.0' |
| 391 | // never being present in the map itself |
| 392 | |
| 393 | IngressDiscardValue* v = bpf_ingress_discard_map_lookup_elem(&k); |
| 394 | if (!v) return false; // lookup failure -> no protection in place -> allow |
| 395 | // if (skb->ifindex == 1) return false; // allow 'lo', but can't happen - see callsite |
| 396 | if (skb->ifindex == v->iif[0]) return false; // allowed interface |
| 397 | if (skb->ifindex == v->iif[1]) return false; // allowed interface |
| 398 | return true; // disallowed interface |
| 399 | } |
| 400 | |
Maciej Żenczykowski | 879839a1 | 2022-08-03 10:48:25 +0000 | [diff] [blame] | 401 | static __always_inline inline int bpf_owner_match(struct __sk_buff* skb, uint32_t uid, |
Maciej Żenczykowski | a8852b2 | 2023-10-08 18:31:12 -0700 | [diff] [blame] | 402 | const struct egress_bool egress, |
| 403 | const struct kver_uint kver) { |
Maciej Żenczykowski | 5547498 | 2022-11-20 13:48:39 +0000 | [diff] [blame] | 404 | if (is_system_uid(uid)) return PASS; |
Ken Chen | 587d423 | 2022-01-17 17:18:43 +0800 | [diff] [blame] | 405 | |
Maciej Żenczykowski | bdccc50 | 2023-04-18 06:45:35 +0000 | [diff] [blame] | 406 | if (skip_owner_match(skb, egress, kver)) return PASS; |
Maciej Żenczykowski | e4c0473 | 2023-03-02 00:18:05 +0000 | [diff] [blame] | 407 | |
Ken Chen | 587d423 | 2022-01-17 17:18:43 +0800 | [diff] [blame] | 408 | BpfConfig enabledRules = getConfig(UID_RULES_CONFIGURATION_KEY); |
| 409 | |
Maciej Żenczykowski | 95d8506 | 2024-02-08 00:37:17 +0000 | [diff] [blame^] | 410 | // BACKGROUND match does not apply to loopback traffic |
| 411 | if (skb->ifindex == 1) enabledRules &= ~BACKGROUND_MATCH; |
| 412 | |
Ken Chen | 587d423 | 2022-01-17 17:18:43 +0800 | [diff] [blame] | 413 | UidOwnerValue* uidEntry = bpf_uid_owner_map_lookup_elem(&uid); |
Motomu Utsumi | 42edc60 | 2022-05-12 13:57:42 +0000 | [diff] [blame] | 414 | uint32_t uidRules = uidEntry ? uidEntry->rule : 0; |
Ken Chen | 587d423 | 2022-01-17 17:18:43 +0800 | [diff] [blame] | 415 | uint32_t allowed_iif = uidEntry ? uidEntry->iif : 0; |
| 416 | |
Ken Chen | f7d23e1 | 2023-09-16 16:44:42 +0800 | [diff] [blame] | 417 | if (isBlockedByUidRules(enabledRules, uidRules)) return DROP; |
Maciej Żenczykowski | 474512a | 2022-06-07 23:22:53 +0000 | [diff] [blame] | 418 | |
Maciej Żenczykowski | a8852b2 | 2023-10-08 18:31:12 -0700 | [diff] [blame] | 419 | if (!egress.egress && skb->ifindex != 1) { |
Maciej Żenczykowski | 6109d94 | 2023-08-29 18:39:28 +0000 | [diff] [blame] | 420 | if (ingress_should_discard(skb, kver)) return DROP; |
Motomu Utsumi | b08654c | 2022-05-11 05:56:26 +0000 | [diff] [blame] | 421 | if (uidRules & IIF_MATCH) { |
| 422 | if (allowed_iif && skb->ifindex != allowed_iif) { |
| 423 | // Drops packets not coming from lo nor the allowed interface |
| 424 | // allowed interface=0 is a wildcard and does not drop packets |
Maciej Żenczykowski | 5547498 | 2022-11-20 13:48:39 +0000 | [diff] [blame] | 425 | return DROP_UNLESS_DNS; |
Motomu Utsumi | b08654c | 2022-05-11 05:56:26 +0000 | [diff] [blame] | 426 | } |
| 427 | } else if (uidRules & LOCKDOWN_VPN_MATCH) { |
| 428 | // Drops packets not coming from lo and rule does not have IIF_MATCH but has |
| 429 | // LOCKDOWN_VPN_MATCH |
Maciej Żenczykowski | 5547498 | 2022-11-20 13:48:39 +0000 | [diff] [blame] | 430 | return DROP_UNLESS_DNS; |
Ken Chen | 587d423 | 2022-01-17 17:18:43 +0800 | [diff] [blame] | 431 | } |
| 432 | } |
Maciej Żenczykowski | 5547498 | 2022-11-20 13:48:39 +0000 | [diff] [blame] | 433 | return PASS; |
Ken Chen | 587d423 | 2022-01-17 17:18:43 +0800 | [diff] [blame] | 434 | } |
| 435 | |
Maciej Żenczykowski | 99a1a26 | 2022-12-29 11:57:23 +0000 | [diff] [blame] | 436 | static __always_inline inline void update_stats_with_config(const uint32_t selectedMap, |
| 437 | const struct __sk_buff* const skb, |
| 438 | const StatsKey* const key, |
Maciej Żenczykowski | a8852b2 | 2023-10-08 18:31:12 -0700 | [diff] [blame] | 439 | const struct egress_bool egress, |
Maciej Żenczykowski | 3a64568 | 2023-10-06 15:11:01 -0700 | [diff] [blame] | 440 | const struct kver_uint kver) { |
Ken Chen | 587d423 | 2022-01-17 17:18:43 +0800 | [diff] [blame] | 441 | if (selectedMap == SELECT_MAP_A) { |
Maciej Żenczykowski | 99a1a26 | 2022-12-29 11:57:23 +0000 | [diff] [blame] | 442 | update_stats_map_A(skb, key, egress, kver); |
Maciej Żenczykowski | 28b9a29 | 2022-12-29 12:06:33 +0000 | [diff] [blame] | 443 | } else { |
Maciej Żenczykowski | 99a1a26 | 2022-12-29 11:57:23 +0000 | [diff] [blame] | 444 | update_stats_map_B(skb, key, egress, kver); |
Ken Chen | 587d423 | 2022-01-17 17:18:43 +0800 | [diff] [blame] | 445 | } |
| 446 | } |
| 447 | |
Maciej Żenczykowski | a8852b2 | 2023-10-08 18:31:12 -0700 | [diff] [blame] | 448 | static __always_inline inline int bpf_traffic_account(struct __sk_buff* skb, |
| 449 | const struct egress_bool egress, |
Ryan Zuklie | 9419d25 | 2023-01-20 17:03:56 -0800 | [diff] [blame] | 450 | const bool enable_tracing, |
Maciej Żenczykowski | 3a64568 | 2023-10-06 15:11:01 -0700 | [diff] [blame] | 451 | const struct kver_uint kver) { |
Maciej Żenczykowski | a08846c | 2024-02-07 01:30:01 +0000 | [diff] [blame] | 452 | // sock_uid will be 'overflowuid' if !sk_fullsock(sk_to_full_sk(skb->sk)) |
Ken Chen | 587d423 | 2022-01-17 17:18:43 +0800 | [diff] [blame] | 453 | uint32_t sock_uid = bpf_get_socket_uid(skb); |
Maciej Żenczykowski | a08846c | 2024-02-07 01:30:01 +0000 | [diff] [blame] | 454 | |
| 455 | // kernel's DEFAULT_OVERFLOWUID is 65534, this is the overflow 'nobody' uid, |
| 456 | // usually this being returned means that skb->sk is NULL during RX |
| 457 | // (early decap socket lookup failure), which commonly happens for incoming |
| 458 | // packets to an unconnected udp socket. |
| 459 | // But it can also happen for egress from a timewait socket. |
| 460 | // Let's treat such cases as 'root' which is_system_uid() |
| 461 | if (sock_uid == 65534) sock_uid = 0; |
| 462 | |
| 463 | uint64_t cookie = bpf_get_socket_cookie(skb); // 0 iff !skb->sk |
Ken Chen | 587d423 | 2022-01-17 17:18:43 +0800 | [diff] [blame] | 464 | UidTagValue* utag = bpf_cookie_tag_map_lookup_elem(&cookie); |
| 465 | uint32_t uid, tag; |
| 466 | if (utag) { |
| 467 | uid = utag->uid; |
| 468 | tag = utag->tag; |
| 469 | } else { |
| 470 | uid = sock_uid; |
| 471 | tag = 0; |
| 472 | } |
| 473 | |
| 474 | // Always allow and never count clat traffic. Only the IPv4 traffic on the stacked |
| 475 | // interface is accounted for and subject to usage restrictions. |
Maciej Żenczykowski | 83dde6b | 2023-05-20 17:24:47 +0000 | [diff] [blame] | 476 | // CLAT IPv6 TX sockets are *always* tagged with CLAT uid, see tagSocketAsClat() |
Maciej Żenczykowski | fca4ee4 | 2023-08-29 15:00:01 +0000 | [diff] [blame] | 477 | // CLAT daemon receives via an untagged AF_PACKET socket. |
Maciej Żenczykowski | a8852b2 | 2023-10-08 18:31:12 -0700 | [diff] [blame] | 478 | if (egress.egress && uid == AID_CLAT) return PASS; |
Ken Chen | 587d423 | 2022-01-17 17:18:43 +0800 | [diff] [blame] | 479 | |
Maciej Żenczykowski | 0966bbe | 2022-12-29 11:39:48 +0000 | [diff] [blame] | 480 | int match = bpf_owner_match(skb, sock_uid, egress, kver); |
Ken Chen | 587d423 | 2022-01-17 17:18:43 +0800 | [diff] [blame] | 481 | |
| 482 | // Workaround for secureVPN with VpnIsolation enabled, refer to b/159994981 for details. |
| 483 | // Keep TAG_SYSTEM_DNS in sync with DnsResolver/include/netd_resolv/resolv.h |
| 484 | // and TrafficStatsConstants.java |
| 485 | #define TAG_SYSTEM_DNS 0xFFFFFF82 |
| 486 | if (tag == TAG_SYSTEM_DNS && uid == AID_DNS) { |
| 487 | uid = sock_uid; |
Maciej Żenczykowski | 5547498 | 2022-11-20 13:48:39 +0000 | [diff] [blame] | 488 | if (match == DROP_UNLESS_DNS) match = PASS; |
Ken Chen | 587d423 | 2022-01-17 17:18:43 +0800 | [diff] [blame] | 489 | } else { |
Maciej Żenczykowski | 5547498 | 2022-11-20 13:48:39 +0000 | [diff] [blame] | 490 | if (match == DROP_UNLESS_DNS) match = DROP; |
Ken Chen | 587d423 | 2022-01-17 17:18:43 +0800 | [diff] [blame] | 491 | } |
| 492 | |
Maciej Żenczykowski | 8e4a794 | 2023-03-02 00:07:00 +0000 | [diff] [blame] | 493 | // If an outbound packet is going to be dropped, we do not count that traffic. |
Maciej Żenczykowski | a8852b2 | 2023-10-08 18:31:12 -0700 | [diff] [blame] | 494 | if (egress.egress && (match == DROP)) return DROP; |
Maciej Żenczykowski | 8e4a794 | 2023-03-02 00:07:00 +0000 | [diff] [blame] | 495 | |
Ken Chen | 587d423 | 2022-01-17 17:18:43 +0800 | [diff] [blame] | 496 | StatsKey key = {.uid = uid, .tag = tag, .counterSet = 0, .ifaceIndex = skb->ifindex}; |
| 497 | |
| 498 | uint8_t* counterSet = bpf_uid_counterset_map_lookup_elem(&uid); |
| 499 | if (counterSet) key.counterSet = (uint32_t)*counterSet; |
| 500 | |
| 501 | uint32_t mapSettingKey = CURRENT_STATS_MAP_CONFIGURATION_KEY; |
Lorenzo Colitti | 60cbed3 | 2022-03-03 17:49:01 +0900 | [diff] [blame] | 502 | uint32_t* selectedMap = bpf_configuration_map_lookup_elem(&mapSettingKey); |
Ken Chen | 587d423 | 2022-01-17 17:18:43 +0800 | [diff] [blame] | 503 | |
Maciej Żenczykowski | 399c9f2 | 2023-05-20 17:11:27 +0000 | [diff] [blame] | 504 | if (!selectedMap) return PASS; // cannot happen, needed to keep bpf verifier happy |
Ken Chen | 587d423 | 2022-01-17 17:18:43 +0800 | [diff] [blame] | 505 | |
Ryan Zuklie | 9419d25 | 2023-01-20 17:03:56 -0800 | [diff] [blame] | 506 | do_packet_tracing(skb, egress, uid, tag, enable_tracing, kver); |
Maciej Żenczykowski | 99a1a26 | 2022-12-29 11:57:23 +0000 | [diff] [blame] | 507 | update_stats_with_config(*selectedMap, skb, &key, egress, kver); |
| 508 | update_app_uid_stats_map(skb, &uid, egress, kver); |
Maciej Żenczykowski | 399c9f2 | 2023-05-20 17:11:27 +0000 | [diff] [blame] | 509 | |
| 510 | // We've already handled DROP_UNLESS_DNS up above, thus when we reach here the only |
| 511 | // possible values of match are DROP(0) or PASS(1), however we need to use |
| 512 | // "match &= 1" before 'return match' to help the kernel's bpf verifier, |
| 513 | // so that it can be 100% certain that the returned value is always 0 or 1. |
| 514 | // We use assembly so that it cannot be optimized out by a too smart compiler. |
Ken Chen | 587d423 | 2022-01-17 17:18:43 +0800 | [diff] [blame] | 515 | asm("%0 &= 1" : "+r"(match)); |
| 516 | return match; |
| 517 | } |
| 518 | |
Ryan Zuklie | 9fb8f18 | 2023-09-28 15:50:59 -0700 | [diff] [blame] | 519 | // This program is optional, and enables tracing on Android U+, 5.8+ on user builds. |
| 520 | DEFINE_BPF_PROG_EXT("cgroupskb/ingress/stats$trace_user", AID_ROOT, AID_SYSTEM, |
Maciej Żenczykowski | 901c710 | 2023-10-06 15:47:46 -0700 | [diff] [blame] | 521 | bpf_cgroup_ingress_trace_user, KVER_5_8, KVER_INF, |
Maciej Żenczykowski | 11141da | 2024-03-15 18:21:33 -0700 | [diff] [blame] | 522 | BPFLOADER_MAINLINE_U_VERSION, BPFLOADER_MAX_VER, OPTIONAL, |
Maciej Żenczykowski | c112629 | 2023-10-03 05:14:25 +0000 | [diff] [blame] | 523 | "fs_bpf_netd_readonly", "", |
| 524 | IGNORE_ON_ENG, LOAD_ON_USER, IGNORE_ON_USERDEBUG) |
Ryan Zuklie | 9fb8f18 | 2023-09-28 15:50:59 -0700 | [diff] [blame] | 525 | (struct __sk_buff* skb) { |
Maciej Żenczykowski | 901c710 | 2023-10-06 15:47:46 -0700 | [diff] [blame] | 526 | return bpf_traffic_account(skb, INGRESS, TRACE_ON, KVER_5_8); |
Ryan Zuklie | 9fb8f18 | 2023-09-28 15:50:59 -0700 | [diff] [blame] | 527 | } |
| 528 | |
| 529 | // This program is required, and enables tracing on Android U+, 5.8+, userdebug/eng. |
Ryan Zuklie | 9419d25 | 2023-01-20 17:03:56 -0800 | [diff] [blame] | 530 | DEFINE_BPF_PROG_EXT("cgroupskb/ingress/stats$trace", AID_ROOT, AID_SYSTEM, |
Maciej Żenczykowski | 901c710 | 2023-10-06 15:47:46 -0700 | [diff] [blame] | 531 | bpf_cgroup_ingress_trace, KVER_5_8, KVER_INF, |
Maciej Żenczykowski | 11141da | 2024-03-15 18:21:33 -0700 | [diff] [blame] | 532 | BPFLOADER_MAINLINE_U_VERSION, BPFLOADER_MAX_VER, MANDATORY, |
Maciej Żenczykowski | c112629 | 2023-10-03 05:14:25 +0000 | [diff] [blame] | 533 | "fs_bpf_netd_readonly", "", |
| 534 | LOAD_ON_ENG, IGNORE_ON_USER, LOAD_ON_USERDEBUG) |
Ryan Zuklie | 9419d25 | 2023-01-20 17:03:56 -0800 | [diff] [blame] | 535 | (struct __sk_buff* skb) { |
Maciej Żenczykowski | 901c710 | 2023-10-06 15:47:46 -0700 | [diff] [blame] | 536 | return bpf_traffic_account(skb, INGRESS, TRACE_ON, KVER_5_8); |
Ryan Zuklie | 9419d25 | 2023-01-20 17:03:56 -0800 | [diff] [blame] | 537 | } |
| 538 | |
Maciej Żenczykowski | 879839a1 | 2022-08-03 10:48:25 +0000 | [diff] [blame] | 539 | DEFINE_NETD_BPF_PROG_KVER_RANGE("cgroupskb/ingress/stats$4_19", AID_ROOT, AID_SYSTEM, |
Maciej Żenczykowski | 901c710 | 2023-10-06 15:47:46 -0700 | [diff] [blame] | 540 | bpf_cgroup_ingress_4_19, KVER_4_19, KVER_INF) |
Ken Chen | 587d423 | 2022-01-17 17:18:43 +0800 | [diff] [blame] | 541 | (struct __sk_buff* skb) { |
Maciej Żenczykowski | 901c710 | 2023-10-06 15:47:46 -0700 | [diff] [blame] | 542 | return bpf_traffic_account(skb, INGRESS, TRACE_OFF, KVER_4_19); |
Ken Chen | 587d423 | 2022-01-17 17:18:43 +0800 | [diff] [blame] | 543 | } |
| 544 | |
Maciej Żenczykowski | 879839a1 | 2022-08-03 10:48:25 +0000 | [diff] [blame] | 545 | DEFINE_NETD_BPF_PROG_KVER_RANGE("cgroupskb/ingress/stats$4_14", AID_ROOT, AID_SYSTEM, |
Maciej Żenczykowski | 901c710 | 2023-10-06 15:47:46 -0700 | [diff] [blame] | 546 | bpf_cgroup_ingress_4_14, KVER_NONE, KVER_4_19) |
Ken Chen | 587d423 | 2022-01-17 17:18:43 +0800 | [diff] [blame] | 547 | (struct __sk_buff* skb) { |
Ryan Zuklie | 9419d25 | 2023-01-20 17:03:56 -0800 | [diff] [blame] | 548 | return bpf_traffic_account(skb, INGRESS, TRACE_OFF, KVER_NONE); |
| 549 | } |
| 550 | |
Ryan Zuklie | 9fb8f18 | 2023-09-28 15:50:59 -0700 | [diff] [blame] | 551 | // This program is optional, and enables tracing on Android U+, 5.8+ on user builds. |
| 552 | DEFINE_BPF_PROG_EXT("cgroupskb/egress/stats$trace_user", AID_ROOT, AID_SYSTEM, |
Maciej Żenczykowski | 901c710 | 2023-10-06 15:47:46 -0700 | [diff] [blame] | 553 | bpf_cgroup_egress_trace_user, KVER_5_8, KVER_INF, |
Maciej Żenczykowski | 11141da | 2024-03-15 18:21:33 -0700 | [diff] [blame] | 554 | BPFLOADER_MAINLINE_U_VERSION, BPFLOADER_MAX_VER, OPTIONAL, |
Maciej Żenczykowski | c112629 | 2023-10-03 05:14:25 +0000 | [diff] [blame] | 555 | "fs_bpf_netd_readonly", "", |
Ryan Zuklie | 5733d01 | 2024-02-29 17:47:45 -0800 | [diff] [blame] | 556 | IGNORE_ON_ENG, LOAD_ON_USER, IGNORE_ON_USERDEBUG) |
Ryan Zuklie | 9fb8f18 | 2023-09-28 15:50:59 -0700 | [diff] [blame] | 557 | (struct __sk_buff* skb) { |
Maciej Żenczykowski | 901c710 | 2023-10-06 15:47:46 -0700 | [diff] [blame] | 558 | return bpf_traffic_account(skb, EGRESS, TRACE_ON, KVER_5_8); |
Ryan Zuklie | 9fb8f18 | 2023-09-28 15:50:59 -0700 | [diff] [blame] | 559 | } |
| 560 | |
| 561 | // This program is required, and enables tracing on Android U+, 5.8+, userdebug/eng. |
Ryan Zuklie | 9419d25 | 2023-01-20 17:03:56 -0800 | [diff] [blame] | 562 | DEFINE_BPF_PROG_EXT("cgroupskb/egress/stats$trace", AID_ROOT, AID_SYSTEM, |
Maciej Żenczykowski | 901c710 | 2023-10-06 15:47:46 -0700 | [diff] [blame] | 563 | bpf_cgroup_egress_trace, KVER_5_8, KVER_INF, |
Maciej Żenczykowski | 11141da | 2024-03-15 18:21:33 -0700 | [diff] [blame] | 564 | BPFLOADER_MAINLINE_U_VERSION, BPFLOADER_MAX_VER, MANDATORY, |
Maciej Żenczykowski | c112629 | 2023-10-03 05:14:25 +0000 | [diff] [blame] | 565 | "fs_bpf_netd_readonly", "", |
| 566 | LOAD_ON_ENG, IGNORE_ON_USER, LOAD_ON_USERDEBUG) |
Ryan Zuklie | 9419d25 | 2023-01-20 17:03:56 -0800 | [diff] [blame] | 567 | (struct __sk_buff* skb) { |
Maciej Żenczykowski | 901c710 | 2023-10-06 15:47:46 -0700 | [diff] [blame] | 568 | return bpf_traffic_account(skb, EGRESS, TRACE_ON, KVER_5_8); |
Maciej Żenczykowski | 879839a1 | 2022-08-03 10:48:25 +0000 | [diff] [blame] | 569 | } |
| 570 | |
| 571 | DEFINE_NETD_BPF_PROG_KVER_RANGE("cgroupskb/egress/stats$4_19", AID_ROOT, AID_SYSTEM, |
Maciej Żenczykowski | 901c710 | 2023-10-06 15:47:46 -0700 | [diff] [blame] | 572 | bpf_cgroup_egress_4_19, KVER_4_19, KVER_INF) |
Maciej Żenczykowski | 879839a1 | 2022-08-03 10:48:25 +0000 | [diff] [blame] | 573 | (struct __sk_buff* skb) { |
Maciej Żenczykowski | 901c710 | 2023-10-06 15:47:46 -0700 | [diff] [blame] | 574 | return bpf_traffic_account(skb, EGRESS, TRACE_OFF, KVER_4_19); |
Maciej Żenczykowski | 879839a1 | 2022-08-03 10:48:25 +0000 | [diff] [blame] | 575 | } |
| 576 | |
| 577 | DEFINE_NETD_BPF_PROG_KVER_RANGE("cgroupskb/egress/stats$4_14", AID_ROOT, AID_SYSTEM, |
Maciej Żenczykowski | 901c710 | 2023-10-06 15:47:46 -0700 | [diff] [blame] | 578 | bpf_cgroup_egress_4_14, KVER_NONE, KVER_4_19) |
Maciej Żenczykowski | 879839a1 | 2022-08-03 10:48:25 +0000 | [diff] [blame] | 579 | (struct __sk_buff* skb) { |
Ryan Zuklie | 9419d25 | 2023-01-20 17:03:56 -0800 | [diff] [blame] | 580 | return bpf_traffic_account(skb, EGRESS, TRACE_OFF, KVER_NONE); |
Ken Chen | 587d423 | 2022-01-17 17:18:43 +0800 | [diff] [blame] | 581 | } |
| 582 | |
Maciej Żenczykowski | 1205737 | 2022-06-14 14:36:34 -0700 | [diff] [blame] | 583 | // WARNING: Android T's non-updatable netd depends on the name of this program. |
Maciej Żenczykowski | cae181d | 2022-06-16 23:26:33 -0700 | [diff] [blame] | 584 | DEFINE_XTBPF_PROG("skfilter/egress/xtbpf", AID_ROOT, AID_NET_ADMIN, xt_bpf_egress_prog) |
Ken Chen | 587d423 | 2022-01-17 17:18:43 +0800 | [diff] [blame] | 585 | (struct __sk_buff* skb) { |
| 586 | // Clat daemon does not generate new traffic, all its traffic is accounted for already |
| 587 | // on the v4-* interfaces (except for the 20 (or 28) extra bytes of IPv6 vs IPv4 overhead, |
| 588 | // but that can be corrected for later when merging v4-foo stats into interface foo's). |
Maciej Żenczykowski | 83dde6b | 2023-05-20 17:24:47 +0000 | [diff] [blame] | 589 | // CLAT sockets are created by system server and tagged as uid CLAT, see tagSocketAsClat() |
Ken Chen | 587d423 | 2022-01-17 17:18:43 +0800 | [diff] [blame] | 590 | uint32_t sock_uid = bpf_get_socket_uid(skb); |
Ken Chen | 587d423 | 2022-01-17 17:18:43 +0800 | [diff] [blame] | 591 | if (sock_uid == AID_SYSTEM) { |
| 592 | uint64_t cookie = bpf_get_socket_cookie(skb); |
| 593 | UidTagValue* utag = bpf_cookie_tag_map_lookup_elem(&cookie); |
| 594 | if (utag && utag->uid == AID_CLAT) return BPF_NOMATCH; |
| 595 | } |
| 596 | |
| 597 | uint32_t key = skb->ifindex; |
Maciej Żenczykowski | 99a1a26 | 2022-12-29 11:57:23 +0000 | [diff] [blame] | 598 | update_iface_stats_map(skb, &key, EGRESS, KVER_NONE); |
Ken Chen | 587d423 | 2022-01-17 17:18:43 +0800 | [diff] [blame] | 599 | return BPF_MATCH; |
| 600 | } |
| 601 | |
Maciej Żenczykowski | 1205737 | 2022-06-14 14:36:34 -0700 | [diff] [blame] | 602 | // WARNING: Android T's non-updatable netd depends on the name of this program. |
Maciej Żenczykowski | cae181d | 2022-06-16 23:26:33 -0700 | [diff] [blame] | 603 | DEFINE_XTBPF_PROG("skfilter/ingress/xtbpf", AID_ROOT, AID_NET_ADMIN, xt_bpf_ingress_prog) |
Ken Chen | 587d423 | 2022-01-17 17:18:43 +0800 | [diff] [blame] | 604 | (struct __sk_buff* skb) { |
| 605 | // Clat daemon traffic is not accounted by virtue of iptables raw prerouting drop rule |
| 606 | // (in clat_raw_PREROUTING chain), which triggers before this (in bw_raw_PREROUTING chain). |
| 607 | // It will be accounted for on the v4-* clat interface instead. |
| 608 | // Keep that in mind when moving this out of iptables xt_bpf and into tc ingress (or xdp). |
| 609 | |
| 610 | uint32_t key = skb->ifindex; |
Maciej Żenczykowski | 99a1a26 | 2022-12-29 11:57:23 +0000 | [diff] [blame] | 611 | update_iface_stats_map(skb, &key, INGRESS, KVER_NONE); |
Ken Chen | 587d423 | 2022-01-17 17:18:43 +0800 | [diff] [blame] | 612 | return BPF_MATCH; |
| 613 | } |
| 614 | |
Maciej Żenczykowski | cae181d | 2022-06-16 23:26:33 -0700 | [diff] [blame] | 615 | DEFINE_SYS_BPF_PROG("schedact/ingress/account", AID_ROOT, AID_NET_ADMIN, |
| 616 | tc_bpf_ingress_account_prog) |
Ken Chen | 587d423 | 2022-01-17 17:18:43 +0800 | [diff] [blame] | 617 | (struct __sk_buff* skb) { |
Patrick Rohr | 148aea8 | 2022-02-24 14:12:32 +0100 | [diff] [blame] | 618 | if (is_received_skb(skb)) { |
| 619 | // Account for ingress traffic before tc drops it. |
| 620 | uint32_t key = skb->ifindex; |
Maciej Żenczykowski | 99a1a26 | 2022-12-29 11:57:23 +0000 | [diff] [blame] | 621 | update_iface_stats_map(skb, &key, INGRESS, KVER_NONE); |
Patrick Rohr | 148aea8 | 2022-02-24 14:12:32 +0100 | [diff] [blame] | 622 | } |
Ken Chen | 587d423 | 2022-01-17 17:18:43 +0800 | [diff] [blame] | 623 | return TC_ACT_UNSPEC; |
| 624 | } |
| 625 | |
Maciej Żenczykowski | 1205737 | 2022-06-14 14:36:34 -0700 | [diff] [blame] | 626 | // WARNING: Android T's non-updatable netd depends on the name of this program. |
Maciej Żenczykowski | cae181d | 2022-06-16 23:26:33 -0700 | [diff] [blame] | 627 | DEFINE_XTBPF_PROG("skfilter/allowlist/xtbpf", AID_ROOT, AID_NET_ADMIN, xt_bpf_allowlist_prog) |
Ken Chen | 587d423 | 2022-01-17 17:18:43 +0800 | [diff] [blame] | 628 | (struct __sk_buff* skb) { |
| 629 | uint32_t sock_uid = bpf_get_socket_uid(skb); |
| 630 | if (is_system_uid(sock_uid)) return BPF_MATCH; |
| 631 | |
Maciej Żenczykowski | d54374f | 2024-02-08 00:24:26 +0000 | [diff] [blame] | 632 | // kernel's DEFAULT_OVERFLOWUID is 65534, this is the overflow 'nobody' uid, |
| 633 | // usually this being returned means that skb->sk is NULL during RX |
| 634 | // (early decap socket lookup failure), which commonly happens for incoming |
| 635 | // packets to an unconnected udp socket. |
| 636 | // But it can also happen for egress from a timewait socket. |
| 637 | // Let's treat such cases as 'root' which is_system_uid() |
| 638 | if (sock_uid == 65534) return BPF_MATCH; |
Ken Chen | 587d423 | 2022-01-17 17:18:43 +0800 | [diff] [blame] | 639 | |
| 640 | UidOwnerValue* allowlistMatch = bpf_uid_owner_map_lookup_elem(&sock_uid); |
| 641 | if (allowlistMatch) return allowlistMatch->rule & HAPPY_BOX_MATCH ? BPF_MATCH : BPF_NOMATCH; |
| 642 | return BPF_NOMATCH; |
| 643 | } |
| 644 | |
Maciej Żenczykowski | 1205737 | 2022-06-14 14:36:34 -0700 | [diff] [blame] | 645 | // WARNING: Android T's non-updatable netd depends on the name of this program. |
Maciej Żenczykowski | cae181d | 2022-06-16 23:26:33 -0700 | [diff] [blame] | 646 | DEFINE_XTBPF_PROG("skfilter/denylist/xtbpf", AID_ROOT, AID_NET_ADMIN, xt_bpf_denylist_prog) |
Ken Chen | 587d423 | 2022-01-17 17:18:43 +0800 | [diff] [blame] | 647 | (struct __sk_buff* skb) { |
| 648 | uint32_t sock_uid = bpf_get_socket_uid(skb); |
| 649 | UidOwnerValue* denylistMatch = bpf_uid_owner_map_lookup_elem(&sock_uid); |
| 650 | if (denylistMatch) return denylistMatch->rule & PENALTY_BOX_MATCH ? BPF_MATCH : BPF_NOMATCH; |
| 651 | return BPF_NOMATCH; |
| 652 | } |
| 653 | |
Maciej Żenczykowski | f060849 | 2023-10-07 19:33:39 +0000 | [diff] [blame] | 654 | static __always_inline inline uint8_t get_app_permissions() { |
Ken Chen | 587d423 | 2022-01-17 17:18:43 +0800 | [diff] [blame] | 655 | uint64_t gid_uid = bpf_get_current_uid_gid(); |
| 656 | /* |
| 657 | * A given app is guaranteed to have the same app ID in all the profiles in |
| 658 | * which it is installed, and install permission is granted to app for all |
| 659 | * user at install time so we only check the appId part of a request uid at |
| 660 | * run time. See UserHandle#isSameApp for detail. |
| 661 | */ |
Maciej Żenczykowski | b909d8a | 2022-06-15 00:40:43 -0700 | [diff] [blame] | 662 | uint32_t appId = (gid_uid & 0xffffffff) % AID_USER_OFFSET; // == PER_USER_RANGE == 100000 |
Ken Chen | 587d423 | 2022-01-17 17:18:43 +0800 | [diff] [blame] | 663 | uint8_t* permissions = bpf_uid_permission_map_lookup_elem(&appId); |
Maciej Żenczykowski | f060849 | 2023-10-07 19:33:39 +0000 | [diff] [blame] | 664 | // if UID not in map, then default to just INTERNET permission. |
| 665 | return permissions ? *permissions : BPF_PERMISSION_INTERNET; |
| 666 | } |
Ken Chen | 587d423 | 2022-01-17 17:18:43 +0800 | [diff] [blame] | 667 | |
Maciej Żenczykowski | f060849 | 2023-10-07 19:33:39 +0000 | [diff] [blame] | 668 | DEFINE_NETD_BPF_PROG_KVER("cgroupsock/inet/create", AID_ROOT, AID_ROOT, inet_socket_create, |
| 669 | KVER_4_14) |
| 670 | (struct bpf_sock* sk) { |
Ken Chen | 587d423 | 2022-01-17 17:18:43 +0800 | [diff] [blame] | 671 | // A return value of 1 means allow, everything else means deny. |
Maciej Żenczykowski | f060849 | 2023-10-07 19:33:39 +0000 | [diff] [blame] | 672 | return (get_app_permissions() & BPF_PERMISSION_INTERNET) ? 1 : 0; |
Ken Chen | 587d423 | 2022-01-17 17:18:43 +0800 | [diff] [blame] | 673 | } |
| 674 | |
| 675 | LICENSE("Apache 2.0"); |
Maciej Żenczykowski | c41e35d | 2022-08-04 13:58:46 +0000 | [diff] [blame] | 676 | CRITICAL("Connectivity and netd"); |
Maciej Żenczykowski | de1342a | 2023-06-09 05:45:57 +0000 | [diff] [blame] | 677 | DISABLE_BTF_ON_USER_BUILDS(); |