blob: 211a11153dc836da27ab60b5bb06d1ea20e2b4e6 [file] [log] [blame]
Hungming Chen56c632c2020-09-10 15:42:58 +08001/*
2 * Copyright (C) 2020 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Maciej Żenczykowski561fb4a2023-10-24 18:48:54 -070017#ifdef MAINLINE
Maciej Żenczykowski07d30132022-04-23 12:33:32 -070018// BTF is incompatible with bpfloaders < v0.10, hence for S (v0.2) we must
19// ship a different file than for later versions, but we need bpfloader v0.25+
20// for obj@ver.o support
Maciej Żenczykowski4e4f8722024-06-15 06:38:08 -070021#define BPFLOADER_MIN_VER BPFLOADER_MAINLINE_T_VERSION
Maciej Żenczykowski561fb4a2023-10-24 18:48:54 -070022#else /* MAINLINE */
Maciej Żenczykowski4e4f8722024-06-15 06:38:08 -070023// The resulting .o needs to load on the Android S bpfloader
Maciej Żenczykowskif7699522022-05-24 15:56:03 -070024#define BPFLOADER_MIN_VER BPFLOADER_S_VERSION
Maciej Żenczykowski4e4f8722024-06-15 06:38:08 -070025#define BPFLOADER_MAX_VER BPFLOADER_T_VERSION
Maciej Żenczykowski561fb4a2023-10-24 18:48:54 -070026#endif /* MAINLINE */
Maciej Żenczykowskia457bf72021-10-22 21:41:25 -070027
Hungming Chen56c632c2020-09-10 15:42:58 +080028#include "bpf_net_helpers.h"
Maciej Żenczykowski4e3321e2022-12-08 12:59:23 +000029#include "offload.h"
Hungming Chen56c632c2020-09-10 15:42:58 +080030
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -080031// From kernel:include/net/ip.h
32#define IP_DF 0x4000 // Flag: "Don't Fragment"
33
Maciej Żenczykowskiec5f67d2021-01-25 02:32:01 -080034// ----- Helper functions for offsets to fields -----
35
36// They all assume simple IP packets:
37// - no VLAN ethernet tags
38// - no IPv4 options (see IPV4_HLEN/TCP4_OFFSET/UDP4_OFFSET)
39// - no IPv6 extension headers
40// - no TCP options (see TCP_HLEN)
41
42//#define ETH_HLEN sizeof(struct ethhdr)
43#define IP4_HLEN sizeof(struct iphdr)
44#define IP6_HLEN sizeof(struct ipv6hdr)
45#define TCP_HLEN sizeof(struct tcphdr)
46#define UDP_HLEN sizeof(struct udphdr)
47
48// Offsets from beginning of L4 (TCP/UDP) header
49#define TCP_OFFSET(field) offsetof(struct tcphdr, field)
50#define UDP_OFFSET(field) offsetof(struct udphdr, field)
51
52// Offsets from beginning of L3 (IPv4) header
53#define IP4_OFFSET(field) offsetof(struct iphdr, field)
54#define IP4_TCP_OFFSET(field) (IP4_HLEN + TCP_OFFSET(field))
55#define IP4_UDP_OFFSET(field) (IP4_HLEN + UDP_OFFSET(field))
56
57// Offsets from beginning of L3 (IPv6) header
58#define IP6_OFFSET(field) offsetof(struct ipv6hdr, field)
59#define IP6_TCP_OFFSET(field) (IP6_HLEN + TCP_OFFSET(field))
60#define IP6_UDP_OFFSET(field) (IP6_HLEN + UDP_OFFSET(field))
61
62// Offsets from beginning of L2 (ie. Ethernet) header (which must be present)
63#define ETH_IP4_OFFSET(field) (ETH_HLEN + IP4_OFFSET(field))
64#define ETH_IP4_TCP_OFFSET(field) (ETH_HLEN + IP4_TCP_OFFSET(field))
65#define ETH_IP4_UDP_OFFSET(field) (ETH_HLEN + IP4_UDP_OFFSET(field))
66#define ETH_IP6_OFFSET(field) (ETH_HLEN + IP6_OFFSET(field))
67#define ETH_IP6_TCP_OFFSET(field) (ETH_HLEN + IP6_TCP_OFFSET(field))
68#define ETH_IP6_UDP_OFFSET(field) (ETH_HLEN + IP6_UDP_OFFSET(field))
69
Maciej Żenczykowskie982f092021-02-04 20:26:26 -080070// ----- Tethering Error Counters -----
71
Maciej Żenczykowskibe25f962022-10-20 00:13:15 +000072// Note that pre-T devices with Mediatek chipsets may have a kernel bug (bad patch
73// "[ALPS05162612] bpf: fix ubsan error") making it impossible to write to non-zero
Maciej Żenczykowskif932a8d2022-12-03 10:30:24 +000074// offset of bpf map ARRAYs. This file (offload.o) loads on S+, but luckily this
Maciej Żenczykowskibe25f962022-10-20 00:13:15 +000075// array is only written by bpf code, and only read by userspace.
Maciej Żenczykowski1edfbf82024-08-16 18:08:09 -070076DEFINE_BPF_MAP_RO(tether_error_map, ARRAY, uint32_t, uint32_t, BPF_TETHER_ERR__MAX, AID_NETWORK_STACK)
Maciej Żenczykowskie982f092021-02-04 20:26:26 -080077
Maciej Żenczykowski3f32a832021-03-17 19:27:23 -070078#define COUNT_AND_RETURN(counter, ret) do { \
Maciej Żenczykowskie982f092021-02-04 20:26:26 -080079 uint32_t code = BPF_TETHER_ERR_ ## counter; \
80 uint32_t *count = bpf_tether_error_map_lookup_elem(&code); \
Maciej Żenczykowski3f32a832021-03-17 19:27:23 -070081 if (count) __sync_fetch_and_add(count, 1); \
82 return ret; \
Maciej Żenczykowskie982f092021-02-04 20:26:26 -080083} while(0)
84
85#define TC_DROP(counter) COUNT_AND_RETURN(counter, TC_ACT_SHOT)
Maciej Żenczykowski6e66a362021-08-24 15:43:15 -070086#define TC_PUNT(counter) COUNT_AND_RETURN(counter, TC_ACT_PIPE)
Maciej Żenczykowskie982f092021-02-04 20:26:26 -080087
88#define XDP_DROP(counter) COUNT_AND_RETURN(counter, XDP_DROP)
89#define XDP_PUNT(counter) COUNT_AND_RETURN(counter, XDP_PASS)
90
91// ----- Tethering Data Stats and Limits -----
Maciej Żenczykowskiec5f67d2021-01-25 02:32:01 -080092
Hungming Chen56c632c2020-09-10 15:42:58 +080093// Tethering stats, indexed by upstream interface.
Maciej Żenczykowski1edfbf82024-08-16 18:08:09 -070094DEFINE_BPF_MAP_GRW(tether_stats_map, HASH, TetherStatsKey, TetherStatsValue, 16, AID_NETWORK_STACK)
Hungming Chen56c632c2020-09-10 15:42:58 +080095
96// Tethering data limit, indexed by upstream interface.
97// (tethering allowed when stats[iif].rxBytes + stats[iif].txBytes < limit[iif])
Maciej Żenczykowski1edfbf82024-08-16 18:08:09 -070098DEFINE_BPF_MAP_GRW(tether_limit_map, HASH, TetherLimitKey, TetherLimitValue, 16, AID_NETWORK_STACK)
Maciej Żenczykowski088fe192021-01-20 13:34:17 -080099
100// ----- IPv6 Support -----
101
Maciej Żenczykowski7dfbcf52021-01-26 16:08:57 -0800102DEFINE_BPF_MAP_GRW(tether_downstream6_map, HASH, TetherDownstream6Key, Tether6Value, 64,
Maciej Żenczykowski1edfbf82024-08-16 18:08:09 -0700103 AID_NETWORK_STACK)
Maciej Żenczykowski088fe192021-01-20 13:34:17 -0800104
105DEFINE_BPF_MAP_GRW(tether_downstream64_map, HASH, TetherDownstream64Key, TetherDownstream64Value,
Maciej Żenczykowski1edfbf82024-08-16 18:08:09 -0700106 1024, AID_NETWORK_STACK)
Maciej Żenczykowski088fe192021-01-20 13:34:17 -0800107
Maciej Żenczykowski7dfbcf52021-01-26 16:08:57 -0800108DEFINE_BPF_MAP_GRW(tether_upstream6_map, HASH, TetherUpstream6Key, Tether6Value, 64,
Maciej Żenczykowski1edfbf82024-08-16 18:08:09 -0700109 AID_NETWORK_STACK)
Hungming Chen56c632c2020-09-10 15:42:58 +0800110
Maciej Żenczykowski8d3bde72023-10-08 18:43:23 -0700111static inline __always_inline int do_forward6(struct __sk_buff* skb,
112 const struct rawip_bool rawip,
Maciej Żenczykowskie1a615a2023-10-10 03:34:56 -0700113 const struct stream_bool stream,
Maciej Żenczykowski2e0da9b2024-07-24 17:54:47 -0700114 __unused const struct kver_uint kver) {
Maciej Żenczykowski8d3bde72023-10-08 18:43:23 -0700115 const bool is_ethernet = !rawip.rawip;
116
Maciej Żenczykowski8e69ec12021-03-07 07:06:13 -0800117 // Must be meta-ethernet IPv6 frame
Maciej Żenczykowski6e66a362021-08-24 15:43:15 -0700118 if (skb->protocol != htons(ETH_P_IPV6)) return TC_ACT_PIPE;
Hungming Chen56c632c2020-09-10 15:42:58 +0800119
Maciej Żenczykowski18552e82021-01-24 19:59:05 -0800120 // Require ethernet dst mac address to be our unicast address.
Maciej Żenczykowski6e66a362021-08-24 15:43:15 -0700121 if (is_ethernet && (skb->pkt_type != PACKET_HOST)) return TC_ACT_PIPE;
Maciej Żenczykowski18552e82021-01-24 19:59:05 -0800122
Maciej Żenczykowski8e69ec12021-03-07 07:06:13 -0800123 const int l2_header_size = is_ethernet ? sizeof(struct ethhdr) : 0;
124
125 // Since the program never writes via DPA (direct packet access) auto-pull/unclone logic does
126 // not trigger and thus we need to manually make sure we can read packet headers via DPA.
127 // Note: this is a blind best effort pull, which may fail or pull less - this doesn't matter.
128 // It has to be done early cause it will invalidate any skb->data/data_end derived pointers.
Maciej Żenczykowski824fb292022-04-11 23:29:46 -0700129 try_make_writable(skb, l2_header_size + IP6_HLEN + TCP_HLEN);
Maciej Żenczykowski8e69ec12021-03-07 07:06:13 -0800130
131 void* data = (void*)(long)skb->data;
132 const void* data_end = (void*)(long)skb->data_end;
133 struct ethhdr* eth = is_ethernet ? data : NULL; // used iff is_ethernet
134 struct ipv6hdr* ip6 = is_ethernet ? (void*)(eth + 1) : data;
Hungming Chen56c632c2020-09-10 15:42:58 +0800135
136 // Must have (ethernet and) ipv6 header
Maciej Żenczykowski6e66a362021-08-24 15:43:15 -0700137 if (data + l2_header_size + sizeof(*ip6) > data_end) return TC_ACT_PIPE;
Hungming Chen56c632c2020-09-10 15:42:58 +0800138
139 // Ethertype - if present - must be IPv6
Maciej Żenczykowski6e66a362021-08-24 15:43:15 -0700140 if (is_ethernet && (eth->h_proto != htons(ETH_P_IPV6))) return TC_ACT_PIPE;
Hungming Chen56c632c2020-09-10 15:42:58 +0800141
142 // IP version must be 6
Maciej Żenczykowskib82bf652022-08-10 19:28:16 +0000143 if (ip6->version != 6) TC_PUNT(INVALID_IPV6_VERSION);
Hungming Chen56c632c2020-09-10 15:42:58 +0800144
145 // Cannot decrement during forward if already zero or would be zero,
146 // Let the kernel's stack handle these cases and generate appropriate ICMP errors.
Maciej Żenczykowskie982f092021-02-04 20:26:26 -0800147 if (ip6->hop_limit <= 1) TC_PUNT(LOW_TTL);
Hungming Chen56c632c2020-09-10 15:42:58 +0800148
Maciej Żenczykowskifc4f6542021-01-22 22:19:45 -0800149 // If hardware offload is running and programming flows based on conntrack entries,
150 // try not to interfere with it.
151 if (ip6->nexthdr == IPPROTO_TCP) {
152 struct tcphdr* tcph = (void*)(ip6 + 1);
153
154 // Make sure we can get at the tcp header
Lorenzo Colittib81584d2021-02-06 00:00:58 +0900155 if (data + l2_header_size + sizeof(*ip6) + sizeof(*tcph) > data_end)
Maciej Żenczykowskie982f092021-02-04 20:26:26 -0800156 TC_PUNT(INVALID_TCP_HEADER);
Maciej Żenczykowskifc4f6542021-01-22 22:19:45 -0800157
158 // Do not offload TCP packets with any one of the SYN/FIN/RST flags
Maciej Żenczykowski0dd2bb32022-08-10 19:33:06 +0000159 if (tcph->syn || tcph->fin || tcph->rst) TC_PUNT(TCPV6_CONTROL_PACKET);
Maciej Żenczykowskifc4f6542021-01-22 22:19:45 -0800160 }
161
Hungming Chen56c632c2020-09-10 15:42:58 +0800162 // Protect against forwarding packets sourced from ::1 or fe80::/64 or other weirdness.
163 __be32 src32 = ip6->saddr.s6_addr32[0];
164 if (src32 != htonl(0x0064ff9b) && // 64:ff9b:/32 incl. XLAT464 WKP
165 (src32 & htonl(0xe0000000)) != htonl(0x20000000)) // 2000::/3 Global Unicast
Maciej Żenczykowskie982f092021-02-04 20:26:26 -0800166 TC_PUNT(NON_GLOBAL_SRC);
Hungming Chen56c632c2020-09-10 15:42:58 +0800167
Maciej Żenczykowskibca0c852021-01-19 01:22:17 -0800168 // Protect against forwarding packets destined to ::1 or fe80::/64 or other weirdness.
169 __be32 dst32 = ip6->daddr.s6_addr32[0];
170 if (dst32 != htonl(0x0064ff9b) && // 64:ff9b:/32 incl. XLAT464 WKP
171 (dst32 & htonl(0xe0000000)) != htonl(0x20000000)) // 2000::/3 Global Unicast
Maciej Żenczykowskie982f092021-02-04 20:26:26 -0800172 TC_PUNT(NON_GLOBAL_DST);
Maciej Żenczykowskibca0c852021-01-19 01:22:17 -0800173
174 // In the upstream direction do not forward traffic within the same /64 subnet.
Maciej Żenczykowskie1a615a2023-10-10 03:34:56 -0700175 if (!stream.down && (src32 == dst32) && (ip6->saddr.s6_addr32[1] == ip6->daddr.s6_addr32[1]))
Maciej Żenczykowskie982f092021-02-04 20:26:26 -0800176 TC_PUNT(LOCAL_SRC_DST);
Maciej Żenczykowskibca0c852021-01-19 01:22:17 -0800177
178 TetherDownstream6Key kd = {
Hungming Chen56c632c2020-09-10 15:42:58 +0800179 .iif = skb->ifindex,
180 .neigh6 = ip6->daddr,
181 };
182
Maciej Żenczykowskibca0c852021-01-19 01:22:17 -0800183 TetherUpstream6Key ku = {
184 .iif = skb->ifindex,
KH Shi3f738fc2023-05-23 22:37:17 +0800185 // Retrieve the first 64 bits of the source IPv6 address in network order
186 .src64 = *(uint64_t*)&(ip6->saddr.s6_addr32[0]),
Maciej Żenczykowskibca0c852021-01-19 01:22:17 -0800187 };
Maciej Żenczykowskie1a615a2023-10-10 03:34:56 -0700188 if (is_ethernet) __builtin_memcpy(stream.down ? kd.dstMac : ku.dstMac, eth->h_dest, ETH_ALEN);
Maciej Żenczykowskibca0c852021-01-19 01:22:17 -0800189
Maciej Żenczykowskie1a615a2023-10-10 03:34:56 -0700190 Tether6Value* v = stream.down ? bpf_tether_downstream6_map_lookup_elem(&kd)
191 : bpf_tether_upstream6_map_lookup_elem(&ku);
Hungming Chen56c632c2020-09-10 15:42:58 +0800192
193 // If we don't find any offload information then simply let the core stack handle it...
Maciej Żenczykowski6e66a362021-08-24 15:43:15 -0700194 if (!v) return TC_ACT_PIPE;
Hungming Chen56c632c2020-09-10 15:42:58 +0800195
Maciej Żenczykowskie1a615a2023-10-10 03:34:56 -0700196 uint32_t stat_and_limit_k = stream.down ? skb->ifindex : v->oif;
Hungming Chen56c632c2020-09-10 15:42:58 +0800197
198 TetherStatsValue* stat_v = bpf_tether_stats_map_lookup_elem(&stat_and_limit_k);
199
200 // If we don't have anywhere to put stats, then abort...
Maciej Żenczykowskie982f092021-02-04 20:26:26 -0800201 if (!stat_v) TC_PUNT(NO_STATS_ENTRY);
Hungming Chen56c632c2020-09-10 15:42:58 +0800202
203 uint64_t* limit_v = bpf_tether_limit_map_lookup_elem(&stat_and_limit_k);
204
205 // If we don't have a limit, then abort...
Maciej Żenczykowskie982f092021-02-04 20:26:26 -0800206 if (!limit_v) TC_PUNT(NO_LIMIT_ENTRY);
Hungming Chen56c632c2020-09-10 15:42:58 +0800207
208 // Required IPv6 minimum mtu is 1280, below that not clear what we should do, abort...
Maciej Żenczykowskie982f092021-02-04 20:26:26 -0800209 if (v->pmtu < IPV6_MIN_MTU) TC_PUNT(BELOW_IPV6_MTU);
Hungming Chen56c632c2020-09-10 15:42:58 +0800210
211 // Approximate handling of TCP/IPv6 overhead for incoming LRO/GRO packets: default
212 // outbound path mtu of 1500 is not necessarily correct, but worst case we simply
213 // undercount, which is still better then not accounting for this overhead at all.
214 // Note: this really shouldn't be device/path mtu at all, but rather should be
215 // derived from this particular connection's mss (ie. from gro segment size).
216 // This would require a much newer kernel with newer ebpf accessors.
217 // (This is also blindly assuming 12 bytes of tcp timestamp option in tcp header)
218 uint64_t packets = 1;
Maciej Żenczykowskibab0c1a2022-12-29 11:18:35 +0000219 uint64_t L3_bytes = skb->len - l2_header_size;
220 if (L3_bytes > v->pmtu) {
221 const int tcp6_overhead = sizeof(struct ipv6hdr) + sizeof(struct tcphdr) + 12;
222 const int mss = v->pmtu - tcp6_overhead;
223 const uint64_t payload = L3_bytes - tcp6_overhead;
Hungming Chen56c632c2020-09-10 15:42:58 +0800224 packets = (payload + mss - 1) / mss;
Maciej Żenczykowskibab0c1a2022-12-29 11:18:35 +0000225 L3_bytes = tcp6_overhead * packets + payload;
Hungming Chen56c632c2020-09-10 15:42:58 +0800226 }
227
228 // Are we past the limit? If so, then abort...
229 // Note: will not overflow since u64 is 936 years even at 5Gbps.
230 // Do not drop here. Offload is just that, whenever we fail to handle
231 // a packet we let the core stack deal with things.
232 // (The core stack needs to handle limits correctly anyway,
233 // since we don't offload all traffic in both directions)
Maciej Żenczykowskibab0c1a2022-12-29 11:18:35 +0000234 if (stat_v->rxBytes + stat_v->txBytes + L3_bytes > *limit_v) TC_PUNT(LIMIT_REACHED);
Hungming Chen56c632c2020-09-10 15:42:58 +0800235
236 if (!is_ethernet) {
Maciej Żenczykowskibca0c852021-01-19 01:22:17 -0800237 // Try to inject an ethernet header, and simply return if we fail.
238 // We do this even if TX interface is RAWIP and thus does not need an ethernet header,
239 // because this is easier and the kernel will strip extraneous ethernet header.
240 if (bpf_skb_change_head(skb, sizeof(struct ethhdr), /*flags*/ 0)) {
Maciej Żenczykowskie1a615a2023-10-10 03:34:56 -0700241 __sync_fetch_and_add(stream.down ? &stat_v->rxErrors : &stat_v->txErrors, 1);
Maciej Żenczykowskie982f092021-02-04 20:26:26 -0800242 TC_PUNT(CHANGE_HEAD_FAILED);
Hungming Chen56c632c2020-09-10 15:42:58 +0800243 }
244
245 // bpf_skb_change_head() invalidates all pointers - reload them
246 data = (void*)(long)skb->data;
247 data_end = (void*)(long)skb->data_end;
248 eth = data;
249 ip6 = (void*)(eth + 1);
250
251 // I do not believe this can ever happen, but keep the verifier happy...
Maciej Żenczykowskibca0c852021-01-19 01:22:17 -0800252 if (data + sizeof(struct ethhdr) + sizeof(*ip6) > data_end) {
Maciej Żenczykowskie1a615a2023-10-10 03:34:56 -0700253 __sync_fetch_and_add(stream.down ? &stat_v->rxErrors : &stat_v->txErrors, 1);
Maciej Żenczykowskie982f092021-02-04 20:26:26 -0800254 TC_DROP(TOO_SHORT);
Hungming Chen56c632c2020-09-10 15:42:58 +0800255 }
256 };
257
Maciej Żenczykowskibca0c852021-01-19 01:22:17 -0800258 // At this point we always have an ethernet header - which will get stripped by the
259 // kernel during transmit through a rawip interface. ie. 'eth' pointer is valid.
260 // Additionally note that 'is_ethernet' and 'l2_header_size' are no longer correct.
261
Hungming Chen56c632c2020-09-10 15:42:58 +0800262 // CHECKSUM_COMPLETE is a 16-bit one's complement sum,
263 // thus corrections for it need to be done in 16-byte chunks at even offsets.
264 // IPv6 nexthdr is at offset 6, while hop limit is at offset 7
265 uint8_t old_hl = ip6->hop_limit;
266 --ip6->hop_limit;
267 uint8_t new_hl = ip6->hop_limit;
268
269 // bpf_csum_update() always succeeds if the skb is CHECKSUM_COMPLETE and returns an error
270 // (-ENOTSUPP) if it isn't.
271 bpf_csum_update(skb, 0xFFFF - ntohs(old_hl) + ntohs(new_hl));
272
Maciej Żenczykowskie1a615a2023-10-10 03:34:56 -0700273 __sync_fetch_and_add(stream.down ? &stat_v->rxPackets : &stat_v->txPackets, packets);
274 __sync_fetch_and_add(stream.down ? &stat_v->rxBytes : &stat_v->txBytes, L3_bytes);
Hungming Chen56c632c2020-09-10 15:42:58 +0800275
276 // Overwrite any mac header with the new one
Maciej Żenczykowskibca0c852021-01-19 01:22:17 -0800277 // For a rawip tx interface it will simply be a bunch of zeroes and later stripped.
Maciej Żenczykowski7dfbcf52021-01-26 16:08:57 -0800278 *eth = v->macHeader;
Hungming Chen56c632c2020-09-10 15:42:58 +0800279
280 // Redirect to forwarded interface.
281 //
282 // Note that bpf_redirect() cannot fail unless you pass invalid flags.
283 // The redirect actually happens after the ebpf program has already terminated,
284 // and can fail for example for mtu reasons at that point in time, but there's nothing
285 // we can do about it here.
Maciej Żenczykowski7dfbcf52021-01-26 16:08:57 -0800286 return bpf_redirect(v->oif, 0 /* this is effectively BPF_F_EGRESS */);
Hungming Chen56c632c2020-09-10 15:42:58 +0800287}
288
Maciej Żenczykowski1edfbf82024-08-16 18:08:09 -0700289DEFINE_BPF_PROG("schedcls/tether_downstream6_ether", AID_ROOT, AID_NETWORK_STACK,
Maciej Żenczykowski770e0a72021-01-18 20:14:03 -0800290 sched_cls_tether_downstream6_ether)
Maciej Żenczykowski6b7829f2021-01-18 00:03:37 -0800291(struct __sk_buff* skb) {
Maciej Żenczykowski63fadd12023-04-19 16:39:57 -0700292 return do_forward6(skb, ETHER, DOWNSTREAM, KVER_NONE);
Maciej Żenczykowskibca0c852021-01-19 01:22:17 -0800293}
294
Maciej Żenczykowski1edfbf82024-08-16 18:08:09 -0700295DEFINE_BPF_PROG("schedcls/tether_upstream6_ether", AID_ROOT, AID_NETWORK_STACK,
Maciej Żenczykowskibca0c852021-01-19 01:22:17 -0800296 sched_cls_tether_upstream6_ether)
297(struct __sk_buff* skb) {
Maciej Żenczykowski63fadd12023-04-19 16:39:57 -0700298 return do_forward6(skb, ETHER, UPSTREAM, KVER_NONE);
Hungming Chen56c632c2020-09-10 15:42:58 +0800299}
300
301// Note: section names must be unique to prevent programs from appending to each other,
302// so instead the bpf loader will strip everything past the final $ symbol when actually
303// pinning the program into the filesystem.
304//
305// bpf_skb_change_head() is only present on 4.14+ and 2 trivial kernel patches are needed:
306// ANDROID: net: bpf: Allow TC programs to call BPF_FUNC_skb_change_head
307// ANDROID: net: bpf: permit redirect from ingress L3 to egress L2 devices at near max mtu
308// (the first of those has already been upstreamed)
309//
Maciej Żenczykowskiefe862e2022-07-28 09:36:52 +0000310// These were added to 4.14+ Android Common Kernel in R (including the original release of ACK 5.4)
311// and there is a test in kernel/tests/net/test/bpf_test.py testSkbChangeHead()
312// and in system/netd/tests/binder_test.cpp NetdBinderTest TetherOffloadForwarding.
Hungming Chen56c632c2020-09-10 15:42:58 +0800313//
Maciej Żenczykowskiefe862e2022-07-28 09:36:52 +0000314// Hence, these mandatory (must load successfully) implementations for 4.14+ kernels:
Maciej Żenczykowski1edfbf82024-08-16 18:08:09 -0700315DEFINE_BPF_PROG_KVER("schedcls/tether_downstream6_rawip$4_14", AID_ROOT, AID_NETWORK_STACK,
Maciej Żenczykowski901c7102023-10-06 15:47:46 -0700316 sched_cls_tether_downstream6_rawip_4_14, KVER_4_14)
Hungming Chen56c632c2020-09-10 15:42:58 +0800317(struct __sk_buff* skb) {
Maciej Żenczykowski901c7102023-10-06 15:47:46 -0700318 return do_forward6(skb, RAWIP, DOWNSTREAM, KVER_4_14);
Hungming Chen56c632c2020-09-10 15:42:58 +0800319}
320
Maciej Żenczykowski1edfbf82024-08-16 18:08:09 -0700321DEFINE_BPF_PROG_KVER("schedcls/tether_upstream6_rawip$4_14", AID_ROOT, AID_NETWORK_STACK,
Maciej Żenczykowski901c7102023-10-06 15:47:46 -0700322 sched_cls_tether_upstream6_rawip_4_14, KVER_4_14)
Maciej Żenczykowskibca0c852021-01-19 01:22:17 -0800323(struct __sk_buff* skb) {
Maciej Żenczykowski901c7102023-10-06 15:47:46 -0700324 return do_forward6(skb, RAWIP, UPSTREAM, KVER_4_14);
Maciej Żenczykowskibca0c852021-01-19 01:22:17 -0800325}
326
Maciej Żenczykowskiefe862e2022-07-28 09:36:52 +0000327// and define no-op stubs for pre-4.14 kernels.
Maciej Żenczykowski1edfbf82024-08-16 18:08:09 -0700328DEFINE_BPF_PROG_KVER_RANGE("schedcls/tether_downstream6_rawip$stub", AID_ROOT, AID_NETWORK_STACK,
Maciej Żenczykowski901c7102023-10-06 15:47:46 -0700329 sched_cls_tether_downstream6_rawip_stub, KVER_NONE, KVER_4_14)
Maciej Żenczykowski2e0da9b2024-07-24 17:54:47 -0700330(__unused struct __sk_buff* skb) {
Maciej Żenczykowski6e66a362021-08-24 15:43:15 -0700331 return TC_ACT_PIPE;
Hungming Chen56c632c2020-09-10 15:42:58 +0800332}
333
Maciej Żenczykowski1edfbf82024-08-16 18:08:09 -0700334DEFINE_BPF_PROG_KVER_RANGE("schedcls/tether_upstream6_rawip$stub", AID_ROOT, AID_NETWORK_STACK,
Maciej Żenczykowski901c7102023-10-06 15:47:46 -0700335 sched_cls_tether_upstream6_rawip_stub, KVER_NONE, KVER_4_14)
Maciej Żenczykowski2e0da9b2024-07-24 17:54:47 -0700336(__unused struct __sk_buff* skb) {
Maciej Żenczykowski6e66a362021-08-24 15:43:15 -0700337 return TC_ACT_PIPE;
Maciej Żenczykowskibca0c852021-01-19 01:22:17 -0800338}
339
Maciej Żenczykowski088fe192021-01-20 13:34:17 -0800340// ----- IPv4 Support -----
341
Maciej Żenczykowski1edfbf82024-08-16 18:08:09 -0700342DEFINE_BPF_MAP_GRW(tether_downstream4_map, HASH, Tether4Key, Tether4Value, 1024, AID_NETWORK_STACK)
Maciej Żenczykowski088fe192021-01-20 13:34:17 -0800343
Maciej Żenczykowski1edfbf82024-08-16 18:08:09 -0700344DEFINE_BPF_MAP_GRW(tether_upstream4_map, HASH, Tether4Key, Tether4Value, 1024, AID_NETWORK_STACK)
Maciej Żenczykowski088fe192021-01-20 13:34:17 -0800345
Maciej Żenczykowskif72c8aa2022-04-28 02:02:45 -0700346static inline __always_inline int do_forward4_bottom(struct __sk_buff* skb,
347 const int l2_header_size, void* data, const void* data_end,
Maciej Żenczykowski8d3bde72023-10-08 18:43:23 -0700348 struct ethhdr* eth, struct iphdr* ip, const struct rawip_bool rawip,
Maciej Żenczykowskie1a615a2023-10-10 03:34:56 -0700349 const struct stream_bool stream, const struct updatetime_bool updatetime,
Maciej Żenczykowski2e0da9b2024-07-24 17:54:47 -0700350 const bool is_tcp, __unused const struct kver_uint kver) {
Maciej Żenczykowski8d3bde72023-10-08 18:43:23 -0700351 const bool is_ethernet = !rawip.rawip;
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -0800352 struct tcphdr* tcph = is_tcp ? (void*)(ip + 1) : NULL;
353 struct udphdr* udph = is_tcp ? NULL : (void*)(ip + 1);
354
355 if (is_tcp) {
356 // Make sure we can get at the tcp header
Maciej Żenczykowskie982f092021-02-04 20:26:26 -0800357 if (data + l2_header_size + sizeof(*ip) + sizeof(*tcph) > data_end)
358 TC_PUNT(SHORT_TCP_HEADER);
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -0800359
360 // If hardware offload is running and programming flows based on conntrack entries, try not
361 // to interfere with it, so do not offload TCP packets with any one of the SYN/FIN/RST flags
Maciej Żenczykowski0dd2bb32022-08-10 19:33:06 +0000362 if (tcph->syn || tcph->fin || tcph->rst) TC_PUNT(TCPV4_CONTROL_PACKET);
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -0800363 } else { // UDP
364 // Make sure we can get at the udp header
Maciej Żenczykowskie982f092021-02-04 20:26:26 -0800365 if (data + l2_header_size + sizeof(*ip) + sizeof(*udph) > data_end)
366 TC_PUNT(SHORT_UDP_HEADER);
Maciej Żenczykowskie4a726a2021-02-16 17:27:34 -0800367
368 // Skip handling of CHECKSUM_COMPLETE packets with udp checksum zero due to need for
369 // additional updating of skb->csum (this could be fixed up manually with more effort).
370 //
371 // Note that the in-kernel implementation of 'int64_t bpf_csum_update(skb, u32 csum)' is:
372 // if (skb->ip_summed == CHECKSUM_COMPLETE)
373 // return (skb->csum = csum_add(skb->csum, csum));
374 // else
375 // return -ENOTSUPP;
376 //
377 // So this will punt any CHECKSUM_COMPLETE packet with a zero UDP checksum,
378 // and leave all other packets unaffected (since it just at most adds zero to skb->csum).
379 //
380 // In practice this should almost never trigger because most nics do not generate
381 // CHECKSUM_COMPLETE packets on receive - especially so for nics/drivers on a phone.
382 //
383 // Additionally since we're forwarding, in most cases the value of the skb->csum field
384 // shouldn't matter (it's not used by physical nic egress).
385 //
386 // It only matters if we're ingressing through a CHECKSUM_COMPLETE capable nic
387 // and egressing through a virtual interface looping back to the kernel itself
388 // (ie. something like veth) where the CHECKSUM_COMPLETE/skb->csum can get reused
389 // on ingress.
390 //
391 // If we were in the kernel we'd simply probably call
392 // void skb_checksum_complete_unset(struct sk_buff *skb) {
393 // if (skb->ip_summed == CHECKSUM_COMPLETE) skb->ip_summed = CHECKSUM_NONE;
394 // }
395 // here instead. Perhaps there should be a bpf helper for that?
Maciej Żenczykowskie982f092021-02-04 20:26:26 -0800396 if (!udph->check && (bpf_csum_update(skb, 0) >= 0)) TC_PUNT(UDP_CSUM_ZERO);
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -0800397 }
398
Maciej Żenczykowski1feb8b42021-01-25 12:01:31 -0800399 Tether4Key k = {
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -0800400 .iif = skb->ifindex,
401 .l4Proto = ip->protocol,
402 .src4.s_addr = ip->saddr,
403 .dst4.s_addr = ip->daddr,
404 .srcPort = is_tcp ? tcph->source : udph->source,
405 .dstPort = is_tcp ? tcph->dest : udph->dest,
406 };
Maciej Żenczykowski62733f52021-04-01 21:51:41 -0700407 if (is_ethernet) __builtin_memcpy(k.dstMac, eth->h_dest, ETH_ALEN);
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -0800408
Maciej Żenczykowskie1a615a2023-10-10 03:34:56 -0700409 Tether4Value* v = stream.down ? bpf_tether_downstream4_map_lookup_elem(&k)
410 : bpf_tether_upstream4_map_lookup_elem(&k);
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -0800411
412 // If we don't find any offload information then simply let the core stack handle it...
Maciej Żenczykowski6e66a362021-08-24 15:43:15 -0700413 if (!v) return TC_ACT_PIPE;
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -0800414
Maciej Żenczykowskie1a615a2023-10-10 03:34:56 -0700415 uint32_t stat_and_limit_k = stream.down ? skb->ifindex : v->oif;
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -0800416
417 TetherStatsValue* stat_v = bpf_tether_stats_map_lookup_elem(&stat_and_limit_k);
418
419 // If we don't have anywhere to put stats, then abort...
Maciej Żenczykowskie982f092021-02-04 20:26:26 -0800420 if (!stat_v) TC_PUNT(NO_STATS_ENTRY);
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -0800421
422 uint64_t* limit_v = bpf_tether_limit_map_lookup_elem(&stat_and_limit_k);
423
424 // If we don't have a limit, then abort...
Maciej Żenczykowskie982f092021-02-04 20:26:26 -0800425 if (!limit_v) TC_PUNT(NO_LIMIT_ENTRY);
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -0800426
427 // Required IPv4 minimum mtu is 68, below that not clear what we should do, abort...
Maciej Żenczykowskie982f092021-02-04 20:26:26 -0800428 if (v->pmtu < 68) TC_PUNT(BELOW_IPV4_MTU);
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -0800429
430 // Approximate handling of TCP/IPv4 overhead for incoming LRO/GRO packets: default
431 // outbound path mtu of 1500 is not necessarily correct, but worst case we simply
432 // undercount, which is still better then not accounting for this overhead at all.
433 // Note: this really shouldn't be device/path mtu at all, but rather should be
434 // derived from this particular connection's mss (ie. from gro segment size).
435 // This would require a much newer kernel with newer ebpf accessors.
436 // (This is also blindly assuming 12 bytes of tcp timestamp option in tcp header)
437 uint64_t packets = 1;
Maciej Żenczykowskibab0c1a2022-12-29 11:18:35 +0000438 uint64_t L3_bytes = skb->len - l2_header_size;
439 if (L3_bytes > v->pmtu) {
440 const int tcp4_overhead = sizeof(struct iphdr) + sizeof(struct tcphdr) + 12;
441 const int mss = v->pmtu - tcp4_overhead;
442 const uint64_t payload = L3_bytes - tcp4_overhead;
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -0800443 packets = (payload + mss - 1) / mss;
Maciej Żenczykowskibab0c1a2022-12-29 11:18:35 +0000444 L3_bytes = tcp4_overhead * packets + payload;
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -0800445 }
446
447 // Are we past the limit? If so, then abort...
448 // Note: will not overflow since u64 is 936 years even at 5Gbps.
449 // Do not drop here. Offload is just that, whenever we fail to handle
450 // a packet we let the core stack deal with things.
451 // (The core stack needs to handle limits correctly anyway,
452 // since we don't offload all traffic in both directions)
Maciej Żenczykowskibab0c1a2022-12-29 11:18:35 +0000453 if (stat_v->rxBytes + stat_v->txBytes + L3_bytes > *limit_v) TC_PUNT(LIMIT_REACHED);
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -0800454
Maciej Żenczykowskiec5f67d2021-01-25 02:32:01 -0800455 if (!is_ethernet) {
456 // Try to inject an ethernet header, and simply return if we fail.
457 // We do this even if TX interface is RAWIP and thus does not need an ethernet header,
458 // because this is easier and the kernel will strip extraneous ethernet header.
459 if (bpf_skb_change_head(skb, sizeof(struct ethhdr), /*flags*/ 0)) {
Maciej Żenczykowskie1a615a2023-10-10 03:34:56 -0700460 __sync_fetch_and_add(stream.down ? &stat_v->rxErrors : &stat_v->txErrors, 1);
Maciej Żenczykowskie982f092021-02-04 20:26:26 -0800461 TC_PUNT(CHANGE_HEAD_FAILED);
Maciej Żenczykowskiec5f67d2021-01-25 02:32:01 -0800462 }
463
464 // bpf_skb_change_head() invalidates all pointers - reload them
465 data = (void*)(long)skb->data;
466 data_end = (void*)(long)skb->data_end;
467 eth = data;
468 ip = (void*)(eth + 1);
469 tcph = is_tcp ? (void*)(ip + 1) : NULL;
470 udph = is_tcp ? NULL : (void*)(ip + 1);
471
472 // I do not believe this can ever happen, but keep the verifier happy...
473 if (data + sizeof(struct ethhdr) + sizeof(*ip) + (is_tcp ? sizeof(*tcph) : sizeof(*udph)) > data_end) {
Maciej Żenczykowskie1a615a2023-10-10 03:34:56 -0700474 __sync_fetch_and_add(stream.down ? &stat_v->rxErrors : &stat_v->txErrors, 1);
Maciej Żenczykowskie982f092021-02-04 20:26:26 -0800475 TC_DROP(TOO_SHORT);
Maciej Żenczykowskiec5f67d2021-01-25 02:32:01 -0800476 }
477 };
478
479 // At this point we always have an ethernet header - which will get stripped by the
480 // kernel during transmit through a rawip interface. ie. 'eth' pointer is valid.
481 // Additionally note that 'is_ethernet' and 'l2_header_size' are no longer correct.
482
483 // Overwrite any mac header with the new one
484 // For a rawip tx interface it will simply be a bunch of zeroes and later stripped.
485 *eth = v->macHeader;
486
Maciej Żenczykowskic29af792021-07-02 01:54:04 -0700487 // Decrement the IPv4 TTL, we already know it's greater than 1.
488 // u8 TTL field is followed by u8 protocol to make a u16 for ipv4 header checksum update.
489 // Since we're keeping the ipv4 checksum valid (which means the checksum of the entire
490 // ipv4 header remains 0), the overall checksum of the entire packet does not change.
491 const int sz2 = sizeof(__be16);
492 const __be16 old_ttl_proto = *(__be16 *)&ip->ttl;
493 const __be16 new_ttl_proto = old_ttl_proto - htons(0x0100);
494 bpf_l3_csum_replace(skb, ETH_IP4_OFFSET(check), old_ttl_proto, new_ttl_proto, sz2);
495 bpf_skb_store_bytes(skb, ETH_IP4_OFFSET(ttl), &new_ttl_proto, sz2, 0);
496
Maciej Żenczykowskie4a726a2021-02-16 17:27:34 -0800497 const int l4_offs_csum = is_tcp ? ETH_IP4_TCP_OFFSET(check) : ETH_IP4_UDP_OFFSET(check);
Maciej Żenczykowskiec5f67d2021-01-25 02:32:01 -0800498 const int sz4 = sizeof(__be32);
Maciej Żenczykowskie4a726a2021-02-16 17:27:34 -0800499 // UDP 0 is special and stored as FFFF (this flag also causes a csum of 0 to be unmodified)
500 const int l4_flags = is_tcp ? 0 : BPF_F_MARK_MANGLED_0;
Maciej Żenczykowskiec5f67d2021-01-25 02:32:01 -0800501 const __be32 old_daddr = k.dst4.s_addr;
502 const __be32 old_saddr = k.src4.s_addr;
503 const __be32 new_daddr = v->dst46.s6_addr32[3];
504 const __be32 new_saddr = v->src46.s6_addr32[3];
505
Maciej Żenczykowskie4a726a2021-02-16 17:27:34 -0800506 bpf_l4_csum_replace(skb, l4_offs_csum, old_daddr, new_daddr, sz4 | BPF_F_PSEUDO_HDR | l4_flags);
Maciej Żenczykowskiec5f67d2021-01-25 02:32:01 -0800507 bpf_l3_csum_replace(skb, ETH_IP4_OFFSET(check), old_daddr, new_daddr, sz4);
508 bpf_skb_store_bytes(skb, ETH_IP4_OFFSET(daddr), &new_daddr, sz4, 0);
509
Maciej Żenczykowskie4a726a2021-02-16 17:27:34 -0800510 bpf_l4_csum_replace(skb, l4_offs_csum, old_saddr, new_saddr, sz4 | BPF_F_PSEUDO_HDR | l4_flags);
Maciej Żenczykowskiec5f67d2021-01-25 02:32:01 -0800511 bpf_l3_csum_replace(skb, ETH_IP4_OFFSET(check), old_saddr, new_saddr, sz4);
512 bpf_skb_store_bytes(skb, ETH_IP4_OFFSET(saddr), &new_saddr, sz4, 0);
513
Maciej Żenczykowskie4a726a2021-02-16 17:27:34 -0800514 // The offsets for TCP and UDP ports: source (u16 @ L4 offset 0) & dest (u16 @ L4 offset 2) are
515 // actually the same, so the compiler should just optimize them both down to a constant.
516 bpf_l4_csum_replace(skb, l4_offs_csum, k.srcPort, v->srcPort, sz2 | l4_flags);
517 bpf_skb_store_bytes(skb, is_tcp ? ETH_IP4_TCP_OFFSET(source) : ETH_IP4_UDP_OFFSET(source),
518 &v->srcPort, sz2, 0);
Maciej Żenczykowskiec5f67d2021-01-25 02:32:01 -0800519
Maciej Żenczykowskie4a726a2021-02-16 17:27:34 -0800520 bpf_l4_csum_replace(skb, l4_offs_csum, k.dstPort, v->dstPort, sz2 | l4_flags);
521 bpf_skb_store_bytes(skb, is_tcp ? ETH_IP4_TCP_OFFSET(dest) : ETH_IP4_UDP_OFFSET(dest),
522 &v->dstPort, sz2, 0);
Maciej Żenczykowskiec5f67d2021-01-25 02:32:01 -0800523
Maciej Żenczykowski36867352021-02-15 01:53:17 -0800524 // This requires the bpf_ktime_get_boot_ns() helper which was added in 5.8,
525 // and backported to all Android Common Kernel 4.14+ trees.
Maciej Żenczykowski8a6c6d52023-10-10 00:59:31 -0700526 if (updatetime.updatetime) v->last_used = bpf_ktime_get_boot_ns();
Maciej Żenczykowskiec5f67d2021-01-25 02:32:01 -0800527
Maciej Żenczykowskie1a615a2023-10-10 03:34:56 -0700528 __sync_fetch_and_add(stream.down ? &stat_v->rxPackets : &stat_v->txPackets, packets);
529 __sync_fetch_and_add(stream.down ? &stat_v->rxBytes : &stat_v->txBytes, L3_bytes);
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -0800530
Maciej Żenczykowskiec5f67d2021-01-25 02:32:01 -0800531 // Redirect to forwarded interface.
532 //
533 // Note that bpf_redirect() cannot fail unless you pass invalid flags.
534 // The redirect actually happens after the ebpf program has already terminated,
535 // and can fail for example for mtu reasons at that point in time, but there's nothing
536 // we can do about it here.
537 return bpf_redirect(v->oif, 0 /* this is effectively BPF_F_EGRESS */);
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -0800538}
539
Maciej Żenczykowski8d3bde72023-10-08 18:43:23 -0700540static inline __always_inline int do_forward4(struct __sk_buff* skb,
541 const struct rawip_bool rawip,
Maciej Żenczykowskie1a615a2023-10-10 03:34:56 -0700542 const struct stream_bool stream,
Maciej Żenczykowski8a6c6d52023-10-10 00:59:31 -0700543 const struct updatetime_bool updatetime,
Maciej Żenczykowski8d3bde72023-10-08 18:43:23 -0700544 const struct kver_uint kver) {
545 const bool is_ethernet = !rawip.rawip;
546
Maciej Żenczykowskif72c8aa2022-04-28 02:02:45 -0700547 // Require ethernet dst mac address to be our unicast address.
548 if (is_ethernet && (skb->pkt_type != PACKET_HOST)) return TC_ACT_PIPE;
549
550 // Must be meta-ethernet IPv4 frame
551 if (skb->protocol != htons(ETH_P_IP)) return TC_ACT_PIPE;
552
553 const int l2_header_size = is_ethernet ? sizeof(struct ethhdr) : 0;
554
555 // Since the program never writes via DPA (direct packet access) auto-pull/unclone logic does
556 // not trigger and thus we need to manually make sure we can read packet headers via DPA.
557 // Note: this is a blind best effort pull, which may fail or pull less - this doesn't matter.
558 // It has to be done early cause it will invalidate any skb->data/data_end derived pointers.
559 try_make_writable(skb, l2_header_size + IP4_HLEN + TCP_HLEN);
560
561 void* data = (void*)(long)skb->data;
562 const void* data_end = (void*)(long)skb->data_end;
563 struct ethhdr* eth = is_ethernet ? data : NULL; // used iff is_ethernet
564 struct iphdr* ip = is_ethernet ? (void*)(eth + 1) : data;
565
566 // Must have (ethernet and) ipv4 header
567 if (data + l2_header_size + sizeof(*ip) > data_end) return TC_ACT_PIPE;
568
569 // Ethertype - if present - must be IPv4
570 if (is_ethernet && (eth->h_proto != htons(ETH_P_IP))) return TC_ACT_PIPE;
571
572 // IP version must be 4
Maciej Żenczykowskib82bf652022-08-10 19:28:16 +0000573 if (ip->version != 4) TC_PUNT(INVALID_IPV4_VERSION);
Maciej Żenczykowskif72c8aa2022-04-28 02:02:45 -0700574
575 // We cannot handle IP options, just standard 20 byte == 5 dword minimal IPv4 header
576 if (ip->ihl != 5) TC_PUNT(HAS_IP_OPTIONS);
577
578 // Calculate the IPv4 one's complement checksum of the IPv4 header.
579 __wsum sum4 = 0;
Maciej Żenczykowskic11dfd82024-07-24 17:54:41 -0700580 for (unsigned i = 0; i < sizeof(*ip) / sizeof(__u16); ++i) {
Maciej Żenczykowskif72c8aa2022-04-28 02:02:45 -0700581 sum4 += ((__u16*)ip)[i];
582 }
583 // Note that sum4 is guaranteed to be non-zero by virtue of ip4->version == 4
584 sum4 = (sum4 & 0xFFFF) + (sum4 >> 16); // collapse u32 into range 1 .. 0x1FFFE
585 sum4 = (sum4 & 0xFFFF) + (sum4 >> 16); // collapse any potential carry into u16
586 // for a correct checksum we should get *a* zero, but sum4 must be positive, ie 0xFFFF
587 if (sum4 != 0xFFFF) TC_PUNT(CHECKSUM);
588
589 // Minimum IPv4 total length is the size of the header
590 if (ntohs(ip->tot_len) < sizeof(*ip)) TC_PUNT(TRUNCATED_IPV4);
591
592 // We are incapable of dealing with IPv4 fragments
593 if (ip->frag_off & ~htons(IP_DF)) TC_PUNT(IS_IP_FRAG);
594
595 // Cannot decrement during forward if already zero or would be zero,
596 // Let the kernel's stack handle these cases and generate appropriate ICMP errors.
597 if (ip->ttl <= 1) TC_PUNT(LOW_TTL);
598
599 // If we cannot update the 'last_used' field due to lack of bpf_ktime_get_boot_ns() helper,
600 // then it is not safe to offload UDP due to the small conntrack timeouts, as such,
601 // in such a situation we can only support TCP. This also has the added nice benefit of
602 // using a separate error counter, and thus making it obvious which version of the program
603 // is loaded.
Maciej Żenczykowski8a6c6d52023-10-10 00:59:31 -0700604 if (!updatetime.updatetime && ip->protocol != IPPROTO_TCP) TC_PUNT(NON_TCP);
Maciej Żenczykowskif72c8aa2022-04-28 02:02:45 -0700605
606 // We do not support offloading anything besides IPv4 TCP and UDP, due to need for NAT,
607 // but no need to check this if !updatetime due to check immediately above.
Maciej Żenczykowski8a6c6d52023-10-10 00:59:31 -0700608 if (updatetime.updatetime && (ip->protocol != IPPROTO_TCP) && (ip->protocol != IPPROTO_UDP))
Maciej Żenczykowskif72c8aa2022-04-28 02:02:45 -0700609 TC_PUNT(NON_TCP_UDP);
610
611 // We want to make sure that the compiler will, in the !updatetime case, entirely optimize
612 // out all the non-tcp logic. Also note that at this point is_udp === !is_tcp.
Maciej Żenczykowski8a6c6d52023-10-10 00:59:31 -0700613 const bool is_tcp = !updatetime.updatetime || (ip->protocol == IPPROTO_TCP);
Maciej Żenczykowskif72c8aa2022-04-28 02:02:45 -0700614
615 // This is a bit of a hack to make things easier on the bpf verifier.
616 // (In particular I believe the Linux 4.14 kernel's verifier can get confused later on about
617 // what offsets into the packet are valid and can spuriously reject the program, this is
618 // because it fails to realize that is_tcp && !is_tcp is impossible)
619 //
620 // For both TCP & UDP we'll need to read and modify the src/dst ports, which so happen to
621 // always be in the first 4 bytes of the L4 header. Additionally for UDP we'll need access
622 // to the checksum field which is in bytes 7 and 8. While for TCP we'll need to read the
623 // TCP flags (at offset 13) and access to the checksum field (2 bytes at offset 16).
624 // As such we *always* need access to at least 8 bytes.
625 if (data + l2_header_size + sizeof(*ip) + 8 > data_end) TC_PUNT(SHORT_L4_HEADER);
626
627 // We're forcing the compiler to emit two copies of the following code, optimized
628 // separately for is_tcp being true or false. This simplifies the resulting bpf
629 // byte code sufficiently that the 4.14 bpf verifier is able to keep track of things.
630 // Without this (updatetime == true) case would fail to bpf verify on 4.14 even
631 // if the underlying requisite kernel support (bpf_ktime_get_boot_ns) was backported.
632 if (is_tcp) {
633 return do_forward4_bottom(skb, l2_header_size, data, data_end, eth, ip,
Maciej Żenczykowskie1a615a2023-10-10 03:34:56 -0700634 rawip, stream, updatetime, /* is_tcp */ true, kver);
Maciej Żenczykowskif72c8aa2022-04-28 02:02:45 -0700635 } else {
636 return do_forward4_bottom(skb, l2_header_size, data, data_end, eth, ip,
Maciej Żenczykowskie1a615a2023-10-10 03:34:56 -0700637 rawip, stream, updatetime, /* is_tcp */ false, kver);
Maciej Żenczykowskif72c8aa2022-04-28 02:02:45 -0700638 }
639}
640
Maciej Żenczykowskiacddd4f2021-03-09 21:43:48 -0800641// Full featured (required) implementations for 5.8+ kernels (these are S+ by definition)
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -0800642
Maciej Żenczykowski1edfbf82024-08-16 18:08:09 -0700643DEFINE_BPF_PROG_KVER("schedcls/tether_downstream4_rawip$5_8", AID_ROOT, AID_NETWORK_STACK,
Maciej Żenczykowski901c7102023-10-06 15:47:46 -0700644 sched_cls_tether_downstream4_rawip_5_8, KVER_5_8)
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -0800645(struct __sk_buff* skb) {
Maciej Żenczykowski901c7102023-10-06 15:47:46 -0700646 return do_forward4(skb, RAWIP, DOWNSTREAM, UPDATETIME, KVER_5_8);
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -0800647}
648
Maciej Żenczykowski1edfbf82024-08-16 18:08:09 -0700649DEFINE_BPF_PROG_KVER("schedcls/tether_upstream4_rawip$5_8", AID_ROOT, AID_NETWORK_STACK,
Maciej Żenczykowski901c7102023-10-06 15:47:46 -0700650 sched_cls_tether_upstream4_rawip_5_8, KVER_5_8)
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -0800651(struct __sk_buff* skb) {
Maciej Żenczykowski901c7102023-10-06 15:47:46 -0700652 return do_forward4(skb, RAWIP, UPSTREAM, UPDATETIME, KVER_5_8);
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -0800653}
654
Maciej Żenczykowski1edfbf82024-08-16 18:08:09 -0700655DEFINE_BPF_PROG_KVER("schedcls/tether_downstream4_ether$5_8", AID_ROOT, AID_NETWORK_STACK,
Maciej Żenczykowski901c7102023-10-06 15:47:46 -0700656 sched_cls_tether_downstream4_ether_5_8, KVER_5_8)
Maciej Żenczykowski36867352021-02-15 01:53:17 -0800657(struct __sk_buff* skb) {
Maciej Żenczykowski901c7102023-10-06 15:47:46 -0700658 return do_forward4(skb, ETHER, DOWNSTREAM, UPDATETIME, KVER_5_8);
Maciej Żenczykowski36867352021-02-15 01:53:17 -0800659}
660
Maciej Żenczykowski1edfbf82024-08-16 18:08:09 -0700661DEFINE_BPF_PROG_KVER("schedcls/tether_upstream4_ether$5_8", AID_ROOT, AID_NETWORK_STACK,
Maciej Żenczykowski901c7102023-10-06 15:47:46 -0700662 sched_cls_tether_upstream4_ether_5_8, KVER_5_8)
Maciej Żenczykowski2278aed2021-03-09 21:19:52 -0800663(struct __sk_buff* skb) {
Maciej Żenczykowski901c7102023-10-06 15:47:46 -0700664 return do_forward4(skb, ETHER, UPSTREAM, UPDATETIME, KVER_5_8);
Maciej Żenczykowski2278aed2021-03-09 21:19:52 -0800665}
666
Maciej Żenczykowskiacddd4f2021-03-09 21:43:48 -0800667// Full featured (optional) implementations for 4.14-S, 4.19-S & 5.4-S kernels
668// (optional, because we need to be able to fallback for 4.14/4.19/5.4 pre-S kernels)
Maciej Żenczykowski2278aed2021-03-09 21:19:52 -0800669
Maciej Żenczykowski36867352021-02-15 01:53:17 -0800670DEFINE_OPTIONAL_BPF_PROG_KVER_RANGE("schedcls/tether_downstream4_rawip$opt",
Maciej Żenczykowski1edfbf82024-08-16 18:08:09 -0700671 AID_ROOT, AID_NETWORK_STACK,
Maciej Żenczykowski36867352021-02-15 01:53:17 -0800672 sched_cls_tether_downstream4_rawip_opt,
Maciej Żenczykowski901c7102023-10-06 15:47:46 -0700673 KVER_4_14, KVER_5_8)
Maciej Żenczykowski36867352021-02-15 01:53:17 -0800674(struct __sk_buff* skb) {
Maciej Żenczykowski901c7102023-10-06 15:47:46 -0700675 return do_forward4(skb, RAWIP, DOWNSTREAM, UPDATETIME, KVER_4_14);
Maciej Żenczykowski36867352021-02-15 01:53:17 -0800676}
677
Maciej Żenczykowski36867352021-02-15 01:53:17 -0800678DEFINE_OPTIONAL_BPF_PROG_KVER_RANGE("schedcls/tether_upstream4_rawip$opt",
Maciej Żenczykowski1edfbf82024-08-16 18:08:09 -0700679 AID_ROOT, AID_NETWORK_STACK,
Maciej Żenczykowski36867352021-02-15 01:53:17 -0800680 sched_cls_tether_upstream4_rawip_opt,
Maciej Żenczykowski901c7102023-10-06 15:47:46 -0700681 KVER_4_14, KVER_5_8)
Maciej Żenczykowski36867352021-02-15 01:53:17 -0800682(struct __sk_buff* skb) {
Maciej Żenczykowski901c7102023-10-06 15:47:46 -0700683 return do_forward4(skb, RAWIP, UPSTREAM, UPDATETIME, KVER_4_14);
Maciej Żenczykowski36867352021-02-15 01:53:17 -0800684}
685
Maciej Żenczykowski2278aed2021-03-09 21:19:52 -0800686DEFINE_OPTIONAL_BPF_PROG_KVER_RANGE("schedcls/tether_downstream4_ether$opt",
Maciej Żenczykowski1edfbf82024-08-16 18:08:09 -0700687 AID_ROOT, AID_NETWORK_STACK,
Maciej Żenczykowski2278aed2021-03-09 21:19:52 -0800688 sched_cls_tether_downstream4_ether_opt,
Maciej Żenczykowski901c7102023-10-06 15:47:46 -0700689 KVER_4_14, KVER_5_8)
Maciej Żenczykowski2278aed2021-03-09 21:19:52 -0800690(struct __sk_buff* skb) {
Maciej Żenczykowski901c7102023-10-06 15:47:46 -0700691 return do_forward4(skb, ETHER, DOWNSTREAM, UPDATETIME, KVER_4_14);
Maciej Żenczykowski2278aed2021-03-09 21:19:52 -0800692}
693
694DEFINE_OPTIONAL_BPF_PROG_KVER_RANGE("schedcls/tether_upstream4_ether$opt",
Maciej Żenczykowski1edfbf82024-08-16 18:08:09 -0700695 AID_ROOT, AID_NETWORK_STACK,
Maciej Żenczykowski2278aed2021-03-09 21:19:52 -0800696 sched_cls_tether_upstream4_ether_opt,
Maciej Żenczykowski901c7102023-10-06 15:47:46 -0700697 KVER_4_14, KVER_5_8)
Maciej Żenczykowski2278aed2021-03-09 21:19:52 -0800698(struct __sk_buff* skb) {
Maciej Żenczykowski901c7102023-10-06 15:47:46 -0700699 return do_forward4(skb, ETHER, UPSTREAM, UPDATETIME, KVER_4_14);
Maciej Żenczykowski2278aed2021-03-09 21:19:52 -0800700}
701
Maciej Żenczykowski36867352021-02-15 01:53:17 -0800702// Partial (TCP-only: will not update 'last_used' field) implementations for 4.14+ kernels.
Maciej Żenczykowskiacddd4f2021-03-09 21:43:48 -0800703// These will be loaded only if the above optional ones failed (loading of *these* must succeed
704// for 5.4+, since that is always an R patched kernel).
Maciej Żenczykowski36867352021-02-15 01:53:17 -0800705//
706// [Note: as a result TCP connections will not have their conntrack timeout refreshed, however,
707// since /proc/sys/net/netfilter/nf_conntrack_tcp_timeout_established defaults to 432000 (seconds),
708// this in practice means they'll break only after 5 days. This seems an acceptable trade-off.
709//
710// Additionally kernel/tests change "net-test: add bpf_ktime_get_ns / bpf_ktime_get_boot_ns tests"
711// which enforces and documents the required kernel cherrypicks will make it pretty unlikely that
712// many devices upgrading to S will end up relying on these fallback programs.
713
Maciej Żenczykowskiacddd4f2021-03-09 21:43:48 -0800714// RAWIP: Required for 5.4-R kernels -- which always support bpf_skb_change_head().
715
Maciej Żenczykowski1edfbf82024-08-16 18:08:09 -0700716DEFINE_BPF_PROG_KVER_RANGE("schedcls/tether_downstream4_rawip$5_4", AID_ROOT, AID_NETWORK_STACK,
Maciej Żenczykowski901c7102023-10-06 15:47:46 -0700717 sched_cls_tether_downstream4_rawip_5_4, KVER_5_4, KVER_5_8)
Maciej Żenczykowski36867352021-02-15 01:53:17 -0800718(struct __sk_buff* skb) {
Maciej Żenczykowski901c7102023-10-06 15:47:46 -0700719 return do_forward4(skb, RAWIP, DOWNSTREAM, NO_UPDATETIME, KVER_5_4);
Maciej Żenczykowski36867352021-02-15 01:53:17 -0800720}
721
Maciej Żenczykowski1edfbf82024-08-16 18:08:09 -0700722DEFINE_BPF_PROG_KVER_RANGE("schedcls/tether_upstream4_rawip$5_4", AID_ROOT, AID_NETWORK_STACK,
Maciej Żenczykowski901c7102023-10-06 15:47:46 -0700723 sched_cls_tether_upstream4_rawip_5_4, KVER_5_4, KVER_5_8)
Maciej Żenczykowski36867352021-02-15 01:53:17 -0800724(struct __sk_buff* skb) {
Maciej Żenczykowski901c7102023-10-06 15:47:46 -0700725 return do_forward4(skb, RAWIP, UPSTREAM, NO_UPDATETIME, KVER_5_4);
Maciej Żenczykowski36867352021-02-15 01:53:17 -0800726}
727
Maciej Żenczykowskiacddd4f2021-03-09 21:43:48 -0800728// RAWIP: Optional for 4.14/4.19 (R) kernels -- which support bpf_skb_change_head().
729// [Note: fallback for 4.14/4.19 (P/Q) kernels is below in stub section]
730
731DEFINE_OPTIONAL_BPF_PROG_KVER_RANGE("schedcls/tether_downstream4_rawip$4_14",
Maciej Żenczykowski1edfbf82024-08-16 18:08:09 -0700732 AID_ROOT, AID_NETWORK_STACK,
Maciej Żenczykowskiacddd4f2021-03-09 21:43:48 -0800733 sched_cls_tether_downstream4_rawip_4_14,
Maciej Żenczykowski901c7102023-10-06 15:47:46 -0700734 KVER_4_14, KVER_5_4)
Maciej Żenczykowskiacddd4f2021-03-09 21:43:48 -0800735(struct __sk_buff* skb) {
Maciej Żenczykowski901c7102023-10-06 15:47:46 -0700736 return do_forward4(skb, RAWIP, DOWNSTREAM, NO_UPDATETIME, KVER_4_14);
Maciej Żenczykowskiacddd4f2021-03-09 21:43:48 -0800737}
738
739DEFINE_OPTIONAL_BPF_PROG_KVER_RANGE("schedcls/tether_upstream4_rawip$4_14",
Maciej Żenczykowski1edfbf82024-08-16 18:08:09 -0700740 AID_ROOT, AID_NETWORK_STACK,
Maciej Żenczykowskiacddd4f2021-03-09 21:43:48 -0800741 sched_cls_tether_upstream4_rawip_4_14,
Maciej Żenczykowski901c7102023-10-06 15:47:46 -0700742 KVER_4_14, KVER_5_4)
Maciej Żenczykowskiacddd4f2021-03-09 21:43:48 -0800743(struct __sk_buff* skb) {
Maciej Żenczykowski901c7102023-10-06 15:47:46 -0700744 return do_forward4(skb, RAWIP, UPSTREAM, NO_UPDATETIME, KVER_4_14);
Maciej Żenczykowskiacddd4f2021-03-09 21:43:48 -0800745}
746
747// ETHER: Required for 4.14-Q/R, 4.19-Q/R & 5.4-R kernels.
748
Maciej Żenczykowski1edfbf82024-08-16 18:08:09 -0700749DEFINE_BPF_PROG_KVER_RANGE("schedcls/tether_downstream4_ether$4_14", AID_ROOT, AID_NETWORK_STACK,
Maciej Żenczykowski901c7102023-10-06 15:47:46 -0700750 sched_cls_tether_downstream4_ether_4_14, KVER_4_14, KVER_5_8)
Maciej Żenczykowski088fe192021-01-20 13:34:17 -0800751(struct __sk_buff* skb) {
Maciej Żenczykowski901c7102023-10-06 15:47:46 -0700752 return do_forward4(skb, ETHER, DOWNSTREAM, NO_UPDATETIME, KVER_4_14);
Maciej Żenczykowski088fe192021-01-20 13:34:17 -0800753}
754
Maciej Żenczykowski1edfbf82024-08-16 18:08:09 -0700755DEFINE_BPF_PROG_KVER_RANGE("schedcls/tether_upstream4_ether$4_14", AID_ROOT, AID_NETWORK_STACK,
Maciej Żenczykowski901c7102023-10-06 15:47:46 -0700756 sched_cls_tether_upstream4_ether_4_14, KVER_4_14, KVER_5_8)
Maciej Żenczykowski2278aed2021-03-09 21:19:52 -0800757(struct __sk_buff* skb) {
Maciej Żenczykowski901c7102023-10-06 15:47:46 -0700758 return do_forward4(skb, ETHER, UPSTREAM, NO_UPDATETIME, KVER_4_14);
Maciej Żenczykowski2278aed2021-03-09 21:19:52 -0800759}
760
Maciej Żenczykowskiacddd4f2021-03-09 21:43:48 -0800761// Placeholder (no-op) implementations for older Q kernels
762
763// RAWIP: 4.9-P/Q, 4.14-P/Q & 4.19-Q kernels -- without bpf_skb_change_head() for tc programs
Maciej Żenczykowski2278aed2021-03-09 21:19:52 -0800764
Maciej Żenczykowski1edfbf82024-08-16 18:08:09 -0700765DEFINE_BPF_PROG_KVER_RANGE("schedcls/tether_downstream4_rawip$stub", AID_ROOT, AID_NETWORK_STACK,
Maciej Żenczykowski901c7102023-10-06 15:47:46 -0700766 sched_cls_tether_downstream4_rawip_stub, KVER_NONE, KVER_5_4)
Maciej Żenczykowski2e0da9b2024-07-24 17:54:47 -0700767(__unused struct __sk_buff* skb) {
Maciej Żenczykowski6e66a362021-08-24 15:43:15 -0700768 return TC_ACT_PIPE;
Maciej Żenczykowski088fe192021-01-20 13:34:17 -0800769}
770
Maciej Żenczykowski1edfbf82024-08-16 18:08:09 -0700771DEFINE_BPF_PROG_KVER_RANGE("schedcls/tether_upstream4_rawip$stub", AID_ROOT, AID_NETWORK_STACK,
Maciej Żenczykowski901c7102023-10-06 15:47:46 -0700772 sched_cls_tether_upstream4_rawip_stub, KVER_NONE, KVER_5_4)
Maciej Żenczykowski2e0da9b2024-07-24 17:54:47 -0700773(__unused struct __sk_buff* skb) {
Maciej Żenczykowski6e66a362021-08-24 15:43:15 -0700774 return TC_ACT_PIPE;
Maciej Żenczykowski088fe192021-01-20 13:34:17 -0800775}
776
Maciej Żenczykowskiacddd4f2021-03-09 21:43:48 -0800777// ETHER: 4.9-P/Q kernel
778
Maciej Żenczykowski1edfbf82024-08-16 18:08:09 -0700779DEFINE_BPF_PROG_KVER_RANGE("schedcls/tether_downstream4_ether$stub", AID_ROOT, AID_NETWORK_STACK,
Maciej Żenczykowski901c7102023-10-06 15:47:46 -0700780 sched_cls_tether_downstream4_ether_stub, KVER_NONE, KVER_4_14)
Maciej Żenczykowski2e0da9b2024-07-24 17:54:47 -0700781(__unused struct __sk_buff* skb) {
Maciej Żenczykowski6e66a362021-08-24 15:43:15 -0700782 return TC_ACT_PIPE;
Maciej Żenczykowski2278aed2021-03-09 21:19:52 -0800783}
784
Maciej Żenczykowski1edfbf82024-08-16 18:08:09 -0700785DEFINE_BPF_PROG_KVER_RANGE("schedcls/tether_upstream4_ether$stub", AID_ROOT, AID_NETWORK_STACK,
Maciej Żenczykowski901c7102023-10-06 15:47:46 -0700786 sched_cls_tether_upstream4_ether_stub, KVER_NONE, KVER_4_14)
Maciej Żenczykowski2e0da9b2024-07-24 17:54:47 -0700787(__unused struct __sk_buff* skb) {
Maciej Żenczykowski6e66a362021-08-24 15:43:15 -0700788 return TC_ACT_PIPE;
Maciej Żenczykowski088fe192021-01-20 13:34:17 -0800789}
790
Maciej Żenczykowskib1997422021-01-20 14:31:50 -0800791// ----- XDP Support -----
792
Maciej Żenczykowski1edfbf82024-08-16 18:08:09 -0700793DEFINE_BPF_MAP_GRW(tether_dev_map, DEVMAP_HASH, uint32_t, uint32_t, 64, AID_NETWORK_STACK)
Maciej Żenczykowskidb2cff52021-03-01 21:22:49 -0800794
Maciej Żenczykowski2e0da9b2024-07-24 17:54:47 -0700795static inline __always_inline int do_xdp_forward6(__unused struct xdp_md *ctx,
796 __unused const struct rawip_bool rawip, __unused const struct stream_bool stream) {
Maciej Żenczykowski90b81ac2021-03-07 06:48:26 -0800797 return XDP_PASS;
798}
799
Maciej Żenczykowski2e0da9b2024-07-24 17:54:47 -0700800static inline __always_inline int do_xdp_forward4(__unused struct xdp_md *ctx,
801 __unused const struct rawip_bool rawip, __unused const struct stream_bool stream) {
Maciej Żenczykowski90b81ac2021-03-07 06:48:26 -0800802 return XDP_PASS;
803}
804
Maciej Żenczykowskie1a615a2023-10-10 03:34:56 -0700805static inline __always_inline int do_xdp_forward_ether(struct xdp_md *ctx,
806 const struct stream_bool stream) {
Maciej Żenczykowski90b81ac2021-03-07 06:48:26 -0800807 const void* data = (void*)(long)ctx->data;
808 const void* data_end = (void*)(long)ctx->data_end;
809 const struct ethhdr* eth = data;
810
811 // Make sure we actually have an ethernet header
812 if ((void*)(eth + 1) > data_end) return XDP_PASS;
813
814 if (eth->h_proto == htons(ETH_P_IPV6))
Maciej Żenczykowskie1a615a2023-10-10 03:34:56 -0700815 return do_xdp_forward6(ctx, ETHER, stream);
Maciej Żenczykowski90b81ac2021-03-07 06:48:26 -0800816 if (eth->h_proto == htons(ETH_P_IP))
Maciej Żenczykowskie1a615a2023-10-10 03:34:56 -0700817 return do_xdp_forward4(ctx, ETHER, stream);
Maciej Żenczykowski90b81ac2021-03-07 06:48:26 -0800818
819 // Anything else we don't know how to handle...
820 return XDP_PASS;
821}
822
Maciej Żenczykowskie1a615a2023-10-10 03:34:56 -0700823static inline __always_inline int do_xdp_forward_rawip(struct xdp_md *ctx,
824 const struct stream_bool stream) {
Maciej Żenczykowski90b81ac2021-03-07 06:48:26 -0800825 const void* data = (void*)(long)ctx->data;
826 const void* data_end = (void*)(long)ctx->data_end;
827
828 // The top nibble of both IPv4 and IPv6 headers is the IP version.
829 if (data_end - data < 1) return XDP_PASS;
830 const uint8_t v = (*(uint8_t*)data) >> 4;
831
Maciej Żenczykowskie1a615a2023-10-10 03:34:56 -0700832 if (v == 6) return do_xdp_forward6(ctx, RAWIP, stream);
833 if (v == 4) return do_xdp_forward4(ctx, RAWIP, stream);
Maciej Żenczykowski90b81ac2021-03-07 06:48:26 -0800834
835 // Anything else we don't know how to handle...
836 return XDP_PASS;
837}
838
Maciej Żenczykowskib1997422021-01-20 14:31:50 -0800839#define DEFINE_XDP_PROG(str, func) \
Maciej Żenczykowski1edfbf82024-08-16 18:08:09 -0700840 DEFINE_BPF_PROG_KVER(str, AID_ROOT, AID_NETWORK_STACK, func, KVER_5_9)(struct xdp_md *ctx)
Maciej Żenczykowskib1997422021-01-20 14:31:50 -0800841
842DEFINE_XDP_PROG("xdp/tether_downstream_ether",
843 xdp_tether_downstream_ether) {
Maciej Żenczykowskicad569f2023-04-19 16:33:30 -0700844 return do_xdp_forward_ether(ctx, DOWNSTREAM);
Maciej Żenczykowskib1997422021-01-20 14:31:50 -0800845}
846
847DEFINE_XDP_PROG("xdp/tether_downstream_rawip",
848 xdp_tether_downstream_rawip) {
Maciej Żenczykowskicad569f2023-04-19 16:33:30 -0700849 return do_xdp_forward_rawip(ctx, DOWNSTREAM);
Maciej Żenczykowskib1997422021-01-20 14:31:50 -0800850}
851
852DEFINE_XDP_PROG("xdp/tether_upstream_ether",
853 xdp_tether_upstream_ether) {
Maciej Żenczykowski941ea032023-04-19 16:33:02 -0700854 return do_xdp_forward_ether(ctx, UPSTREAM);
Maciej Żenczykowskib1997422021-01-20 14:31:50 -0800855}
856
857DEFINE_XDP_PROG("xdp/tether_upstream_rawip",
858 xdp_tether_upstream_rawip) {
Maciej Żenczykowski941ea032023-04-19 16:33:02 -0700859 return do_xdp_forward_rawip(ctx, UPSTREAM);
Maciej Żenczykowskib1997422021-01-20 14:31:50 -0800860}
861
Hungming Chen56c632c2020-09-10 15:42:58 +0800862LICENSE("Apache 2.0");
Maciej Żenczykowski3dd052e2024-04-19 23:39:44 +0000863CRITICAL("Connectivity (Tethering)");
Maciej Żenczykowskide1342a2023-06-09 05:45:57 +0000864DISABLE_BTF_ON_USER_BUILDS();