blob: 2e339b256002f7ddfc7e9bc68f257635f6504c57 [file] [log] [blame]
Hungming Chen56c632c2020-09-10 15:42:58 +08001/*
2 * Copyright (C) 2020 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <linux/if.h>
18#include <linux/ip.h>
19#include <linux/ipv6.h>
20#include <linux/pkt_cls.h>
21#include <linux/tcp.h>
22
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -080023// bionic kernel uapi linux/udp.h header is munged...
24#define __kernel_udphdr udphdr
25#include <linux/udp.h>
26
Maciej Żenczykowski07d30132022-04-23 12:33:32 -070027#ifdef BTF
28// BTF is incompatible with bpfloaders < v0.10, hence for S (v0.2) we must
29// ship a different file than for later versions, but we need bpfloader v0.25+
30// for obj@ver.o support
31#define BPFLOADER_MIN_VER BPFLOADER_OBJ_AT_VER_VERSION
32#else /* BTF */
Maciej Żenczykowskif7699522022-05-24 15:56:03 -070033// The resulting .o needs to load on the Android S bpfloader
34#define BPFLOADER_MIN_VER BPFLOADER_S_VERSION
Maciej Żenczykowski07d30132022-04-23 12:33:32 -070035#define BPFLOADER_MAX_VER BPFLOADER_OBJ_AT_VER_VERSION
36#endif /* BTF */
Maciej Żenczykowskia457bf72021-10-22 21:41:25 -070037
Maciej Żenczykowskiccce4a32022-07-17 01:28:38 -070038// Warning: values other than AID_ROOT don't work for map uid on BpfLoader < v0.21
39#define TETHERING_UID AID_ROOT
40
41#ifdef INPROCESS
42#define DEFAULT_BPF_MAP_SELINUX_CONTEXT "fs_bpf_net_shared"
43#define DEFAULT_BPF_PROG_SELINUX_CONTEXT "fs_bpf_net_shared"
44#define TETHERING_GID AID_SYSTEM
45#else
46#define TETHERING_GID AID_NETWORK_STACK
47#endif
48
Hungming Chen56c632c2020-09-10 15:42:58 +080049#include "bpf_helpers.h"
50#include "bpf_net_helpers.h"
Maciej Żenczykowski4e3321e2022-12-08 12:59:23 +000051#include "offload.h"
Hungming Chen56c632c2020-09-10 15:42:58 +080052
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -080053// From kernel:include/net/ip.h
54#define IP_DF 0x4000 // Flag: "Don't Fragment"
55
Maciej Żenczykowskiec5f67d2021-01-25 02:32:01 -080056// ----- Helper functions for offsets to fields -----
57
58// They all assume simple IP packets:
59// - no VLAN ethernet tags
60// - no IPv4 options (see IPV4_HLEN/TCP4_OFFSET/UDP4_OFFSET)
61// - no IPv6 extension headers
62// - no TCP options (see TCP_HLEN)
63
64//#define ETH_HLEN sizeof(struct ethhdr)
65#define IP4_HLEN sizeof(struct iphdr)
66#define IP6_HLEN sizeof(struct ipv6hdr)
67#define TCP_HLEN sizeof(struct tcphdr)
68#define UDP_HLEN sizeof(struct udphdr)
69
70// Offsets from beginning of L4 (TCP/UDP) header
71#define TCP_OFFSET(field) offsetof(struct tcphdr, field)
72#define UDP_OFFSET(field) offsetof(struct udphdr, field)
73
74// Offsets from beginning of L3 (IPv4) header
75#define IP4_OFFSET(field) offsetof(struct iphdr, field)
76#define IP4_TCP_OFFSET(field) (IP4_HLEN + TCP_OFFSET(field))
77#define IP4_UDP_OFFSET(field) (IP4_HLEN + UDP_OFFSET(field))
78
79// Offsets from beginning of L3 (IPv6) header
80#define IP6_OFFSET(field) offsetof(struct ipv6hdr, field)
81#define IP6_TCP_OFFSET(field) (IP6_HLEN + TCP_OFFSET(field))
82#define IP6_UDP_OFFSET(field) (IP6_HLEN + UDP_OFFSET(field))
83
84// Offsets from beginning of L2 (ie. Ethernet) header (which must be present)
85#define ETH_IP4_OFFSET(field) (ETH_HLEN + IP4_OFFSET(field))
86#define ETH_IP4_TCP_OFFSET(field) (ETH_HLEN + IP4_TCP_OFFSET(field))
87#define ETH_IP4_UDP_OFFSET(field) (ETH_HLEN + IP4_UDP_OFFSET(field))
88#define ETH_IP6_OFFSET(field) (ETH_HLEN + IP6_OFFSET(field))
89#define ETH_IP6_TCP_OFFSET(field) (ETH_HLEN + IP6_TCP_OFFSET(field))
90#define ETH_IP6_UDP_OFFSET(field) (ETH_HLEN + IP6_UDP_OFFSET(field))
91
Maciej Żenczykowskie982f092021-02-04 20:26:26 -080092// ----- Tethering Error Counters -----
93
Maciej Żenczykowskibe25f962022-10-20 00:13:15 +000094// Note that pre-T devices with Mediatek chipsets may have a kernel bug (bad patch
95// "[ALPS05162612] bpf: fix ubsan error") making it impossible to write to non-zero
Maciej Żenczykowskif932a8d2022-12-03 10:30:24 +000096// offset of bpf map ARRAYs. This file (offload.o) loads on S+, but luckily this
Maciej Żenczykowskibe25f962022-10-20 00:13:15 +000097// array is only written by bpf code, and only read by userspace.
98DEFINE_BPF_MAP_RO(tether_error_map, ARRAY, uint32_t, uint32_t, BPF_TETHER_ERR__MAX, TETHERING_GID)
Maciej Żenczykowskie982f092021-02-04 20:26:26 -080099
Maciej Żenczykowski3f32a832021-03-17 19:27:23 -0700100#define COUNT_AND_RETURN(counter, ret) do { \
Maciej Żenczykowskie982f092021-02-04 20:26:26 -0800101 uint32_t code = BPF_TETHER_ERR_ ## counter; \
102 uint32_t *count = bpf_tether_error_map_lookup_elem(&code); \
Maciej Żenczykowski3f32a832021-03-17 19:27:23 -0700103 if (count) __sync_fetch_and_add(count, 1); \
104 return ret; \
Maciej Żenczykowskie982f092021-02-04 20:26:26 -0800105} while(0)
106
107#define TC_DROP(counter) COUNT_AND_RETURN(counter, TC_ACT_SHOT)
Maciej Żenczykowski6e66a362021-08-24 15:43:15 -0700108#define TC_PUNT(counter) COUNT_AND_RETURN(counter, TC_ACT_PIPE)
Maciej Żenczykowskie982f092021-02-04 20:26:26 -0800109
110#define XDP_DROP(counter) COUNT_AND_RETURN(counter, XDP_DROP)
111#define XDP_PUNT(counter) COUNT_AND_RETURN(counter, XDP_PASS)
112
113// ----- Tethering Data Stats and Limits -----
Maciej Żenczykowskiec5f67d2021-01-25 02:32:01 -0800114
Hungming Chen56c632c2020-09-10 15:42:58 +0800115// Tethering stats, indexed by upstream interface.
Maciej Żenczykowskiccce4a32022-07-17 01:28:38 -0700116DEFINE_BPF_MAP_GRW(tether_stats_map, HASH, TetherStatsKey, TetherStatsValue, 16, TETHERING_GID)
Hungming Chen56c632c2020-09-10 15:42:58 +0800117
118// Tethering data limit, indexed by upstream interface.
119// (tethering allowed when stats[iif].rxBytes + stats[iif].txBytes < limit[iif])
Maciej Żenczykowskiccce4a32022-07-17 01:28:38 -0700120DEFINE_BPF_MAP_GRW(tether_limit_map, HASH, TetherLimitKey, TetherLimitValue, 16, TETHERING_GID)
Maciej Żenczykowski088fe192021-01-20 13:34:17 -0800121
122// ----- IPv6 Support -----
123
Maciej Żenczykowski7dfbcf52021-01-26 16:08:57 -0800124DEFINE_BPF_MAP_GRW(tether_downstream6_map, HASH, TetherDownstream6Key, Tether6Value, 64,
Maciej Żenczykowskiccce4a32022-07-17 01:28:38 -0700125 TETHERING_GID)
Maciej Żenczykowski088fe192021-01-20 13:34:17 -0800126
127DEFINE_BPF_MAP_GRW(tether_downstream64_map, HASH, TetherDownstream64Key, TetherDownstream64Value,
Maciej Żenczykowskiccce4a32022-07-17 01:28:38 -0700128 1024, TETHERING_GID)
Maciej Żenczykowski088fe192021-01-20 13:34:17 -0800129
Maciej Żenczykowski7dfbcf52021-01-26 16:08:57 -0800130DEFINE_BPF_MAP_GRW(tether_upstream6_map, HASH, TetherUpstream6Key, Tether6Value, 64,
Maciej Żenczykowskiccce4a32022-07-17 01:28:38 -0700131 TETHERING_GID)
Hungming Chen56c632c2020-09-10 15:42:58 +0800132
Maciej Żenczykowskibf8ec1a2021-01-24 19:56:39 -0800133static inline __always_inline int do_forward6(struct __sk_buff* skb, const bool is_ethernet,
Maciej Żenczykowski2541a192022-12-29 12:19:47 +0000134 const bool downstream, const unsigned kver) {
Maciej Żenczykowski8e69ec12021-03-07 07:06:13 -0800135 // Must be meta-ethernet IPv6 frame
Maciej Żenczykowski6e66a362021-08-24 15:43:15 -0700136 if (skb->protocol != htons(ETH_P_IPV6)) return TC_ACT_PIPE;
Hungming Chen56c632c2020-09-10 15:42:58 +0800137
Maciej Żenczykowski18552e82021-01-24 19:59:05 -0800138 // Require ethernet dst mac address to be our unicast address.
Maciej Żenczykowski6e66a362021-08-24 15:43:15 -0700139 if (is_ethernet && (skb->pkt_type != PACKET_HOST)) return TC_ACT_PIPE;
Maciej Żenczykowski18552e82021-01-24 19:59:05 -0800140
Maciej Żenczykowski8e69ec12021-03-07 07:06:13 -0800141 const int l2_header_size = is_ethernet ? sizeof(struct ethhdr) : 0;
142
143 // Since the program never writes via DPA (direct packet access) auto-pull/unclone logic does
144 // not trigger and thus we need to manually make sure we can read packet headers via DPA.
145 // Note: this is a blind best effort pull, which may fail or pull less - this doesn't matter.
146 // It has to be done early cause it will invalidate any skb->data/data_end derived pointers.
Maciej Żenczykowski824fb292022-04-11 23:29:46 -0700147 try_make_writable(skb, l2_header_size + IP6_HLEN + TCP_HLEN);
Maciej Żenczykowski8e69ec12021-03-07 07:06:13 -0800148
149 void* data = (void*)(long)skb->data;
150 const void* data_end = (void*)(long)skb->data_end;
151 struct ethhdr* eth = is_ethernet ? data : NULL; // used iff is_ethernet
152 struct ipv6hdr* ip6 = is_ethernet ? (void*)(eth + 1) : data;
Hungming Chen56c632c2020-09-10 15:42:58 +0800153
154 // Must have (ethernet and) ipv6 header
Maciej Żenczykowski6e66a362021-08-24 15:43:15 -0700155 if (data + l2_header_size + sizeof(*ip6) > data_end) return TC_ACT_PIPE;
Hungming Chen56c632c2020-09-10 15:42:58 +0800156
157 // Ethertype - if present - must be IPv6
Maciej Żenczykowski6e66a362021-08-24 15:43:15 -0700158 if (is_ethernet && (eth->h_proto != htons(ETH_P_IPV6))) return TC_ACT_PIPE;
Hungming Chen56c632c2020-09-10 15:42:58 +0800159
160 // IP version must be 6
Maciej Żenczykowskib82bf652022-08-10 19:28:16 +0000161 if (ip6->version != 6) TC_PUNT(INVALID_IPV6_VERSION);
Hungming Chen56c632c2020-09-10 15:42:58 +0800162
163 // Cannot decrement during forward if already zero or would be zero,
164 // Let the kernel's stack handle these cases and generate appropriate ICMP errors.
Maciej Żenczykowskie982f092021-02-04 20:26:26 -0800165 if (ip6->hop_limit <= 1) TC_PUNT(LOW_TTL);
Hungming Chen56c632c2020-09-10 15:42:58 +0800166
Maciej Żenczykowskifc4f6542021-01-22 22:19:45 -0800167 // If hardware offload is running and programming flows based on conntrack entries,
168 // try not to interfere with it.
169 if (ip6->nexthdr == IPPROTO_TCP) {
170 struct tcphdr* tcph = (void*)(ip6 + 1);
171
172 // Make sure we can get at the tcp header
Lorenzo Colittib81584d2021-02-06 00:00:58 +0900173 if (data + l2_header_size + sizeof(*ip6) + sizeof(*tcph) > data_end)
Maciej Żenczykowskie982f092021-02-04 20:26:26 -0800174 TC_PUNT(INVALID_TCP_HEADER);
Maciej Żenczykowskifc4f6542021-01-22 22:19:45 -0800175
176 // Do not offload TCP packets with any one of the SYN/FIN/RST flags
Maciej Żenczykowski0dd2bb32022-08-10 19:33:06 +0000177 if (tcph->syn || tcph->fin || tcph->rst) TC_PUNT(TCPV6_CONTROL_PACKET);
Maciej Żenczykowskifc4f6542021-01-22 22:19:45 -0800178 }
179
Hungming Chen56c632c2020-09-10 15:42:58 +0800180 // Protect against forwarding packets sourced from ::1 or fe80::/64 or other weirdness.
181 __be32 src32 = ip6->saddr.s6_addr32[0];
182 if (src32 != htonl(0x0064ff9b) && // 64:ff9b:/32 incl. XLAT464 WKP
183 (src32 & htonl(0xe0000000)) != htonl(0x20000000)) // 2000::/3 Global Unicast
Maciej Żenczykowskie982f092021-02-04 20:26:26 -0800184 TC_PUNT(NON_GLOBAL_SRC);
Hungming Chen56c632c2020-09-10 15:42:58 +0800185
Maciej Żenczykowskibca0c852021-01-19 01:22:17 -0800186 // Protect against forwarding packets destined to ::1 or fe80::/64 or other weirdness.
187 __be32 dst32 = ip6->daddr.s6_addr32[0];
188 if (dst32 != htonl(0x0064ff9b) && // 64:ff9b:/32 incl. XLAT464 WKP
189 (dst32 & htonl(0xe0000000)) != htonl(0x20000000)) // 2000::/3 Global Unicast
Maciej Żenczykowskie982f092021-02-04 20:26:26 -0800190 TC_PUNT(NON_GLOBAL_DST);
Maciej Żenczykowskibca0c852021-01-19 01:22:17 -0800191
192 // In the upstream direction do not forward traffic within the same /64 subnet.
193 if (!downstream && (src32 == dst32) && (ip6->saddr.s6_addr32[1] == ip6->daddr.s6_addr32[1]))
Maciej Żenczykowskie982f092021-02-04 20:26:26 -0800194 TC_PUNT(LOCAL_SRC_DST);
Maciej Żenczykowskibca0c852021-01-19 01:22:17 -0800195
196 TetherDownstream6Key kd = {
Hungming Chen56c632c2020-09-10 15:42:58 +0800197 .iif = skb->ifindex,
198 .neigh6 = ip6->daddr,
199 };
200
Maciej Żenczykowskibca0c852021-01-19 01:22:17 -0800201 TetherUpstream6Key ku = {
202 .iif = skb->ifindex,
203 };
Maciej Żenczykowski62733f52021-04-01 21:51:41 -0700204 if (is_ethernet) __builtin_memcpy(downstream ? kd.dstMac : ku.dstMac, eth->h_dest, ETH_ALEN);
Maciej Żenczykowskibca0c852021-01-19 01:22:17 -0800205
Maciej Żenczykowski7dfbcf52021-01-26 16:08:57 -0800206 Tether6Value* v = downstream ? bpf_tether_downstream6_map_lookup_elem(&kd)
207 : bpf_tether_upstream6_map_lookup_elem(&ku);
Hungming Chen56c632c2020-09-10 15:42:58 +0800208
209 // If we don't find any offload information then simply let the core stack handle it...
Maciej Żenczykowski6e66a362021-08-24 15:43:15 -0700210 if (!v) return TC_ACT_PIPE;
Hungming Chen56c632c2020-09-10 15:42:58 +0800211
Maciej Żenczykowski7dfbcf52021-01-26 16:08:57 -0800212 uint32_t stat_and_limit_k = downstream ? skb->ifindex : v->oif;
Hungming Chen56c632c2020-09-10 15:42:58 +0800213
214 TetherStatsValue* stat_v = bpf_tether_stats_map_lookup_elem(&stat_and_limit_k);
215
216 // If we don't have anywhere to put stats, then abort...
Maciej Żenczykowskie982f092021-02-04 20:26:26 -0800217 if (!stat_v) TC_PUNT(NO_STATS_ENTRY);
Hungming Chen56c632c2020-09-10 15:42:58 +0800218
219 uint64_t* limit_v = bpf_tether_limit_map_lookup_elem(&stat_and_limit_k);
220
221 // If we don't have a limit, then abort...
Maciej Żenczykowskie982f092021-02-04 20:26:26 -0800222 if (!limit_v) TC_PUNT(NO_LIMIT_ENTRY);
Hungming Chen56c632c2020-09-10 15:42:58 +0800223
224 // Required IPv6 minimum mtu is 1280, below that not clear what we should do, abort...
Maciej Żenczykowskie982f092021-02-04 20:26:26 -0800225 if (v->pmtu < IPV6_MIN_MTU) TC_PUNT(BELOW_IPV6_MTU);
Hungming Chen56c632c2020-09-10 15:42:58 +0800226
227 // Approximate handling of TCP/IPv6 overhead for incoming LRO/GRO packets: default
228 // outbound path mtu of 1500 is not necessarily correct, but worst case we simply
229 // undercount, which is still better then not accounting for this overhead at all.
230 // Note: this really shouldn't be device/path mtu at all, but rather should be
231 // derived from this particular connection's mss (ie. from gro segment size).
232 // This would require a much newer kernel with newer ebpf accessors.
233 // (This is also blindly assuming 12 bytes of tcp timestamp option in tcp header)
234 uint64_t packets = 1;
Maciej Żenczykowskibab0c1a2022-12-29 11:18:35 +0000235 uint64_t L3_bytes = skb->len - l2_header_size;
236 if (L3_bytes > v->pmtu) {
237 const int tcp6_overhead = sizeof(struct ipv6hdr) + sizeof(struct tcphdr) + 12;
238 const int mss = v->pmtu - tcp6_overhead;
239 const uint64_t payload = L3_bytes - tcp6_overhead;
Hungming Chen56c632c2020-09-10 15:42:58 +0800240 packets = (payload + mss - 1) / mss;
Maciej Żenczykowskibab0c1a2022-12-29 11:18:35 +0000241 L3_bytes = tcp6_overhead * packets + payload;
Hungming Chen56c632c2020-09-10 15:42:58 +0800242 }
243
244 // Are we past the limit? If so, then abort...
245 // Note: will not overflow since u64 is 936 years even at 5Gbps.
246 // Do not drop here. Offload is just that, whenever we fail to handle
247 // a packet we let the core stack deal with things.
248 // (The core stack needs to handle limits correctly anyway,
249 // since we don't offload all traffic in both directions)
Maciej Żenczykowskibab0c1a2022-12-29 11:18:35 +0000250 if (stat_v->rxBytes + stat_v->txBytes + L3_bytes > *limit_v) TC_PUNT(LIMIT_REACHED);
Hungming Chen56c632c2020-09-10 15:42:58 +0800251
252 if (!is_ethernet) {
Maciej Żenczykowskibca0c852021-01-19 01:22:17 -0800253 // Try to inject an ethernet header, and simply return if we fail.
254 // We do this even if TX interface is RAWIP and thus does not need an ethernet header,
255 // because this is easier and the kernel will strip extraneous ethernet header.
256 if (bpf_skb_change_head(skb, sizeof(struct ethhdr), /*flags*/ 0)) {
257 __sync_fetch_and_add(downstream ? &stat_v->rxErrors : &stat_v->txErrors, 1);
Maciej Żenczykowskie982f092021-02-04 20:26:26 -0800258 TC_PUNT(CHANGE_HEAD_FAILED);
Hungming Chen56c632c2020-09-10 15:42:58 +0800259 }
260
261 // bpf_skb_change_head() invalidates all pointers - reload them
262 data = (void*)(long)skb->data;
263 data_end = (void*)(long)skb->data_end;
264 eth = data;
265 ip6 = (void*)(eth + 1);
266
267 // I do not believe this can ever happen, but keep the verifier happy...
Maciej Żenczykowskibca0c852021-01-19 01:22:17 -0800268 if (data + sizeof(struct ethhdr) + sizeof(*ip6) > data_end) {
269 __sync_fetch_and_add(downstream ? &stat_v->rxErrors : &stat_v->txErrors, 1);
Maciej Żenczykowskie982f092021-02-04 20:26:26 -0800270 TC_DROP(TOO_SHORT);
Hungming Chen56c632c2020-09-10 15:42:58 +0800271 }
272 };
273
Maciej Żenczykowskibca0c852021-01-19 01:22:17 -0800274 // At this point we always have an ethernet header - which will get stripped by the
275 // kernel during transmit through a rawip interface. ie. 'eth' pointer is valid.
276 // Additionally note that 'is_ethernet' and 'l2_header_size' are no longer correct.
277
Hungming Chen56c632c2020-09-10 15:42:58 +0800278 // CHECKSUM_COMPLETE is a 16-bit one's complement sum,
279 // thus corrections for it need to be done in 16-byte chunks at even offsets.
280 // IPv6 nexthdr is at offset 6, while hop limit is at offset 7
281 uint8_t old_hl = ip6->hop_limit;
282 --ip6->hop_limit;
283 uint8_t new_hl = ip6->hop_limit;
284
285 // bpf_csum_update() always succeeds if the skb is CHECKSUM_COMPLETE and returns an error
286 // (-ENOTSUPP) if it isn't.
287 bpf_csum_update(skb, 0xFFFF - ntohs(old_hl) + ntohs(new_hl));
288
Maciej Żenczykowskibca0c852021-01-19 01:22:17 -0800289 __sync_fetch_and_add(downstream ? &stat_v->rxPackets : &stat_v->txPackets, packets);
Maciej Żenczykowskibab0c1a2022-12-29 11:18:35 +0000290 __sync_fetch_and_add(downstream ? &stat_v->rxBytes : &stat_v->txBytes, L3_bytes);
Hungming Chen56c632c2020-09-10 15:42:58 +0800291
292 // Overwrite any mac header with the new one
Maciej Żenczykowskibca0c852021-01-19 01:22:17 -0800293 // For a rawip tx interface it will simply be a bunch of zeroes and later stripped.
Maciej Żenczykowski7dfbcf52021-01-26 16:08:57 -0800294 *eth = v->macHeader;
Hungming Chen56c632c2020-09-10 15:42:58 +0800295
296 // Redirect to forwarded interface.
297 //
298 // Note that bpf_redirect() cannot fail unless you pass invalid flags.
299 // The redirect actually happens after the ebpf program has already terminated,
300 // and can fail for example for mtu reasons at that point in time, but there's nothing
301 // we can do about it here.
Maciej Żenczykowski7dfbcf52021-01-26 16:08:57 -0800302 return bpf_redirect(v->oif, 0 /* this is effectively BPF_F_EGRESS */);
Hungming Chen56c632c2020-09-10 15:42:58 +0800303}
304
Maciej Żenczykowskiccce4a32022-07-17 01:28:38 -0700305DEFINE_BPF_PROG("schedcls/tether_downstream6_ether", TETHERING_UID, TETHERING_GID,
Maciej Żenczykowski770e0a72021-01-18 20:14:03 -0800306 sched_cls_tether_downstream6_ether)
Maciej Żenczykowski6b7829f2021-01-18 00:03:37 -0800307(struct __sk_buff* skb) {
Maciej Żenczykowski2541a192022-12-29 12:19:47 +0000308 return do_forward6(skb, /* is_ethernet */ true, /* downstream */ true, KVER_NONE);
Maciej Żenczykowskibca0c852021-01-19 01:22:17 -0800309}
310
Maciej Żenczykowskiccce4a32022-07-17 01:28:38 -0700311DEFINE_BPF_PROG("schedcls/tether_upstream6_ether", TETHERING_UID, TETHERING_GID,
Maciej Żenczykowskibca0c852021-01-19 01:22:17 -0800312 sched_cls_tether_upstream6_ether)
313(struct __sk_buff* skb) {
Maciej Żenczykowski2541a192022-12-29 12:19:47 +0000314 return do_forward6(skb, /* is_ethernet */ true, /* downstream */ false, KVER_NONE);
Hungming Chen56c632c2020-09-10 15:42:58 +0800315}
316
317// Note: section names must be unique to prevent programs from appending to each other,
318// so instead the bpf loader will strip everything past the final $ symbol when actually
319// pinning the program into the filesystem.
320//
321// bpf_skb_change_head() is only present on 4.14+ and 2 trivial kernel patches are needed:
322// ANDROID: net: bpf: Allow TC programs to call BPF_FUNC_skb_change_head
323// ANDROID: net: bpf: permit redirect from ingress L3 to egress L2 devices at near max mtu
324// (the first of those has already been upstreamed)
325//
Maciej Żenczykowskiefe862e2022-07-28 09:36:52 +0000326// These were added to 4.14+ Android Common Kernel in R (including the original release of ACK 5.4)
327// and there is a test in kernel/tests/net/test/bpf_test.py testSkbChangeHead()
328// and in system/netd/tests/binder_test.cpp NetdBinderTest TetherOffloadForwarding.
Hungming Chen56c632c2020-09-10 15:42:58 +0800329//
Maciej Żenczykowskiefe862e2022-07-28 09:36:52 +0000330// Hence, these mandatory (must load successfully) implementations for 4.14+ kernels:
331DEFINE_BPF_PROG_KVER("schedcls/tether_downstream6_rawip$4_14", TETHERING_UID, TETHERING_GID,
332 sched_cls_tether_downstream6_rawip_4_14, KVER(4, 14, 0))
Hungming Chen56c632c2020-09-10 15:42:58 +0800333(struct __sk_buff* skb) {
Maciej Żenczykowski2541a192022-12-29 12:19:47 +0000334 return do_forward6(skb, /* is_ethernet */ false, /* downstream */ true, KVER(4, 14, 0));
Hungming Chen56c632c2020-09-10 15:42:58 +0800335}
336
Maciej Żenczykowskiefe862e2022-07-28 09:36:52 +0000337DEFINE_BPF_PROG_KVER("schedcls/tether_upstream6_rawip$4_14", TETHERING_UID, TETHERING_GID,
338 sched_cls_tether_upstream6_rawip_4_14, KVER(4, 14, 0))
Maciej Żenczykowskibca0c852021-01-19 01:22:17 -0800339(struct __sk_buff* skb) {
Maciej Żenczykowski2541a192022-12-29 12:19:47 +0000340 return do_forward6(skb, /* is_ethernet */ false, /* downstream */ false, KVER(4, 14, 0));
Maciej Żenczykowskibca0c852021-01-19 01:22:17 -0800341}
342
Maciej Żenczykowskiefe862e2022-07-28 09:36:52 +0000343// and define no-op stubs for pre-4.14 kernels.
Maciej Żenczykowskiccce4a32022-07-17 01:28:38 -0700344DEFINE_BPF_PROG_KVER_RANGE("schedcls/tether_downstream6_rawip$stub", TETHERING_UID, TETHERING_GID,
Maciej Żenczykowskiefe862e2022-07-28 09:36:52 +0000345 sched_cls_tether_downstream6_rawip_stub, KVER_NONE, KVER(4, 14, 0))
Hungming Chen56c632c2020-09-10 15:42:58 +0800346(struct __sk_buff* skb) {
Maciej Żenczykowski6e66a362021-08-24 15:43:15 -0700347 return TC_ACT_PIPE;
Hungming Chen56c632c2020-09-10 15:42:58 +0800348}
349
Maciej Żenczykowskiccce4a32022-07-17 01:28:38 -0700350DEFINE_BPF_PROG_KVER_RANGE("schedcls/tether_upstream6_rawip$stub", TETHERING_UID, TETHERING_GID,
Maciej Żenczykowskiefe862e2022-07-28 09:36:52 +0000351 sched_cls_tether_upstream6_rawip_stub, KVER_NONE, KVER(4, 14, 0))
Maciej Żenczykowskibca0c852021-01-19 01:22:17 -0800352(struct __sk_buff* skb) {
Maciej Żenczykowski6e66a362021-08-24 15:43:15 -0700353 return TC_ACT_PIPE;
Maciej Żenczykowskibca0c852021-01-19 01:22:17 -0800354}
355
Maciej Żenczykowski088fe192021-01-20 13:34:17 -0800356// ----- IPv4 Support -----
357
Maciej Żenczykowskiccce4a32022-07-17 01:28:38 -0700358DEFINE_BPF_MAP_GRW(tether_downstream4_map, HASH, Tether4Key, Tether4Value, 1024, TETHERING_GID)
Maciej Żenczykowski088fe192021-01-20 13:34:17 -0800359
Maciej Żenczykowskiccce4a32022-07-17 01:28:38 -0700360DEFINE_BPF_MAP_GRW(tether_upstream4_map, HASH, Tether4Key, Tether4Value, 1024, TETHERING_GID)
Maciej Żenczykowski088fe192021-01-20 13:34:17 -0800361
Maciej Żenczykowskif72c8aa2022-04-28 02:02:45 -0700362static inline __always_inline int do_forward4_bottom(struct __sk_buff* skb,
363 const int l2_header_size, void* data, const void* data_end,
364 struct ethhdr* eth, struct iphdr* ip, const bool is_ethernet,
Maciej Żenczykowski2541a192022-12-29 12:19:47 +0000365 const bool downstream, const bool updatetime, const bool is_tcp,
366 const unsigned kver) {
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -0800367 struct tcphdr* tcph = is_tcp ? (void*)(ip + 1) : NULL;
368 struct udphdr* udph = is_tcp ? NULL : (void*)(ip + 1);
369
370 if (is_tcp) {
371 // Make sure we can get at the tcp header
Maciej Żenczykowskie982f092021-02-04 20:26:26 -0800372 if (data + l2_header_size + sizeof(*ip) + sizeof(*tcph) > data_end)
373 TC_PUNT(SHORT_TCP_HEADER);
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -0800374
375 // If hardware offload is running and programming flows based on conntrack entries, try not
376 // to interfere with it, so do not offload TCP packets with any one of the SYN/FIN/RST flags
Maciej Żenczykowski0dd2bb32022-08-10 19:33:06 +0000377 if (tcph->syn || tcph->fin || tcph->rst) TC_PUNT(TCPV4_CONTROL_PACKET);
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -0800378 } else { // UDP
379 // Make sure we can get at the udp header
Maciej Żenczykowskie982f092021-02-04 20:26:26 -0800380 if (data + l2_header_size + sizeof(*ip) + sizeof(*udph) > data_end)
381 TC_PUNT(SHORT_UDP_HEADER);
Maciej Żenczykowskie4a726a2021-02-16 17:27:34 -0800382
383 // Skip handling of CHECKSUM_COMPLETE packets with udp checksum zero due to need for
384 // additional updating of skb->csum (this could be fixed up manually with more effort).
385 //
386 // Note that the in-kernel implementation of 'int64_t bpf_csum_update(skb, u32 csum)' is:
387 // if (skb->ip_summed == CHECKSUM_COMPLETE)
388 // return (skb->csum = csum_add(skb->csum, csum));
389 // else
390 // return -ENOTSUPP;
391 //
392 // So this will punt any CHECKSUM_COMPLETE packet with a zero UDP checksum,
393 // and leave all other packets unaffected (since it just at most adds zero to skb->csum).
394 //
395 // In practice this should almost never trigger because most nics do not generate
396 // CHECKSUM_COMPLETE packets on receive - especially so for nics/drivers on a phone.
397 //
398 // Additionally since we're forwarding, in most cases the value of the skb->csum field
399 // shouldn't matter (it's not used by physical nic egress).
400 //
401 // It only matters if we're ingressing through a CHECKSUM_COMPLETE capable nic
402 // and egressing through a virtual interface looping back to the kernel itself
403 // (ie. something like veth) where the CHECKSUM_COMPLETE/skb->csum can get reused
404 // on ingress.
405 //
406 // If we were in the kernel we'd simply probably call
407 // void skb_checksum_complete_unset(struct sk_buff *skb) {
408 // if (skb->ip_summed == CHECKSUM_COMPLETE) skb->ip_summed = CHECKSUM_NONE;
409 // }
410 // here instead. Perhaps there should be a bpf helper for that?
Maciej Żenczykowskie982f092021-02-04 20:26:26 -0800411 if (!udph->check && (bpf_csum_update(skb, 0) >= 0)) TC_PUNT(UDP_CSUM_ZERO);
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -0800412 }
413
Maciej Żenczykowski1feb8b42021-01-25 12:01:31 -0800414 Tether4Key k = {
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -0800415 .iif = skb->ifindex,
416 .l4Proto = ip->protocol,
417 .src4.s_addr = ip->saddr,
418 .dst4.s_addr = ip->daddr,
419 .srcPort = is_tcp ? tcph->source : udph->source,
420 .dstPort = is_tcp ? tcph->dest : udph->dest,
421 };
Maciej Żenczykowski62733f52021-04-01 21:51:41 -0700422 if (is_ethernet) __builtin_memcpy(k.dstMac, eth->h_dest, ETH_ALEN);
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -0800423
Maciej Żenczykowski1feb8b42021-01-25 12:01:31 -0800424 Tether4Value* v = downstream ? bpf_tether_downstream4_map_lookup_elem(&k)
425 : bpf_tether_upstream4_map_lookup_elem(&k);
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -0800426
427 // If we don't find any offload information then simply let the core stack handle it...
Maciej Żenczykowski6e66a362021-08-24 15:43:15 -0700428 if (!v) return TC_ACT_PIPE;
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -0800429
Maciej Żenczykowski1feb8b42021-01-25 12:01:31 -0800430 uint32_t stat_and_limit_k = downstream ? skb->ifindex : v->oif;
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -0800431
432 TetherStatsValue* stat_v = bpf_tether_stats_map_lookup_elem(&stat_and_limit_k);
433
434 // If we don't have anywhere to put stats, then abort...
Maciej Żenczykowskie982f092021-02-04 20:26:26 -0800435 if (!stat_v) TC_PUNT(NO_STATS_ENTRY);
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -0800436
437 uint64_t* limit_v = bpf_tether_limit_map_lookup_elem(&stat_and_limit_k);
438
439 // If we don't have a limit, then abort...
Maciej Żenczykowskie982f092021-02-04 20:26:26 -0800440 if (!limit_v) TC_PUNT(NO_LIMIT_ENTRY);
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -0800441
442 // Required IPv4 minimum mtu is 68, below that not clear what we should do, abort...
Maciej Żenczykowskie982f092021-02-04 20:26:26 -0800443 if (v->pmtu < 68) TC_PUNT(BELOW_IPV4_MTU);
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -0800444
445 // Approximate handling of TCP/IPv4 overhead for incoming LRO/GRO packets: default
446 // outbound path mtu of 1500 is not necessarily correct, but worst case we simply
447 // undercount, which is still better then not accounting for this overhead at all.
448 // Note: this really shouldn't be device/path mtu at all, but rather should be
449 // derived from this particular connection's mss (ie. from gro segment size).
450 // This would require a much newer kernel with newer ebpf accessors.
451 // (This is also blindly assuming 12 bytes of tcp timestamp option in tcp header)
452 uint64_t packets = 1;
Maciej Żenczykowskibab0c1a2022-12-29 11:18:35 +0000453 uint64_t L3_bytes = skb->len - l2_header_size;
454 if (L3_bytes > v->pmtu) {
455 const int tcp4_overhead = sizeof(struct iphdr) + sizeof(struct tcphdr) + 12;
456 const int mss = v->pmtu - tcp4_overhead;
457 const uint64_t payload = L3_bytes - tcp4_overhead;
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -0800458 packets = (payload + mss - 1) / mss;
Maciej Żenczykowskibab0c1a2022-12-29 11:18:35 +0000459 L3_bytes = tcp4_overhead * packets + payload;
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -0800460 }
461
462 // Are we past the limit? If so, then abort...
463 // Note: will not overflow since u64 is 936 years even at 5Gbps.
464 // Do not drop here. Offload is just that, whenever we fail to handle
465 // a packet we let the core stack deal with things.
466 // (The core stack needs to handle limits correctly anyway,
467 // since we don't offload all traffic in both directions)
Maciej Żenczykowskibab0c1a2022-12-29 11:18:35 +0000468 if (stat_v->rxBytes + stat_v->txBytes + L3_bytes > *limit_v) TC_PUNT(LIMIT_REACHED);
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -0800469
Maciej Żenczykowskiec5f67d2021-01-25 02:32:01 -0800470 if (!is_ethernet) {
471 // Try to inject an ethernet header, and simply return if we fail.
472 // We do this even if TX interface is RAWIP and thus does not need an ethernet header,
473 // because this is easier and the kernel will strip extraneous ethernet header.
474 if (bpf_skb_change_head(skb, sizeof(struct ethhdr), /*flags*/ 0)) {
475 __sync_fetch_and_add(downstream ? &stat_v->rxErrors : &stat_v->txErrors, 1);
Maciej Żenczykowskie982f092021-02-04 20:26:26 -0800476 TC_PUNT(CHANGE_HEAD_FAILED);
Maciej Żenczykowskiec5f67d2021-01-25 02:32:01 -0800477 }
478
479 // bpf_skb_change_head() invalidates all pointers - reload them
480 data = (void*)(long)skb->data;
481 data_end = (void*)(long)skb->data_end;
482 eth = data;
483 ip = (void*)(eth + 1);
484 tcph = is_tcp ? (void*)(ip + 1) : NULL;
485 udph = is_tcp ? NULL : (void*)(ip + 1);
486
487 // I do not believe this can ever happen, but keep the verifier happy...
488 if (data + sizeof(struct ethhdr) + sizeof(*ip) + (is_tcp ? sizeof(*tcph) : sizeof(*udph)) > data_end) {
489 __sync_fetch_and_add(downstream ? &stat_v->rxErrors : &stat_v->txErrors, 1);
Maciej Żenczykowskie982f092021-02-04 20:26:26 -0800490 TC_DROP(TOO_SHORT);
Maciej Żenczykowskiec5f67d2021-01-25 02:32:01 -0800491 }
492 };
493
494 // At this point we always have an ethernet header - which will get stripped by the
495 // kernel during transmit through a rawip interface. ie. 'eth' pointer is valid.
496 // Additionally note that 'is_ethernet' and 'l2_header_size' are no longer correct.
497
498 // Overwrite any mac header with the new one
499 // For a rawip tx interface it will simply be a bunch of zeroes and later stripped.
500 *eth = v->macHeader;
501
Maciej Żenczykowskic29af792021-07-02 01:54:04 -0700502 // Decrement the IPv4 TTL, we already know it's greater than 1.
503 // u8 TTL field is followed by u8 protocol to make a u16 for ipv4 header checksum update.
504 // Since we're keeping the ipv4 checksum valid (which means the checksum of the entire
505 // ipv4 header remains 0), the overall checksum of the entire packet does not change.
506 const int sz2 = sizeof(__be16);
507 const __be16 old_ttl_proto = *(__be16 *)&ip->ttl;
508 const __be16 new_ttl_proto = old_ttl_proto - htons(0x0100);
509 bpf_l3_csum_replace(skb, ETH_IP4_OFFSET(check), old_ttl_proto, new_ttl_proto, sz2);
510 bpf_skb_store_bytes(skb, ETH_IP4_OFFSET(ttl), &new_ttl_proto, sz2, 0);
511
Maciej Żenczykowskie4a726a2021-02-16 17:27:34 -0800512 const int l4_offs_csum = is_tcp ? ETH_IP4_TCP_OFFSET(check) : ETH_IP4_UDP_OFFSET(check);
Maciej Żenczykowskiec5f67d2021-01-25 02:32:01 -0800513 const int sz4 = sizeof(__be32);
Maciej Żenczykowskie4a726a2021-02-16 17:27:34 -0800514 // UDP 0 is special and stored as FFFF (this flag also causes a csum of 0 to be unmodified)
515 const int l4_flags = is_tcp ? 0 : BPF_F_MARK_MANGLED_0;
Maciej Żenczykowskiec5f67d2021-01-25 02:32:01 -0800516 const __be32 old_daddr = k.dst4.s_addr;
517 const __be32 old_saddr = k.src4.s_addr;
518 const __be32 new_daddr = v->dst46.s6_addr32[3];
519 const __be32 new_saddr = v->src46.s6_addr32[3];
520
Maciej Żenczykowskie4a726a2021-02-16 17:27:34 -0800521 bpf_l4_csum_replace(skb, l4_offs_csum, old_daddr, new_daddr, sz4 | BPF_F_PSEUDO_HDR | l4_flags);
Maciej Żenczykowskiec5f67d2021-01-25 02:32:01 -0800522 bpf_l3_csum_replace(skb, ETH_IP4_OFFSET(check), old_daddr, new_daddr, sz4);
523 bpf_skb_store_bytes(skb, ETH_IP4_OFFSET(daddr), &new_daddr, sz4, 0);
524
Maciej Żenczykowskie4a726a2021-02-16 17:27:34 -0800525 bpf_l4_csum_replace(skb, l4_offs_csum, old_saddr, new_saddr, sz4 | BPF_F_PSEUDO_HDR | l4_flags);
Maciej Żenczykowskiec5f67d2021-01-25 02:32:01 -0800526 bpf_l3_csum_replace(skb, ETH_IP4_OFFSET(check), old_saddr, new_saddr, sz4);
527 bpf_skb_store_bytes(skb, ETH_IP4_OFFSET(saddr), &new_saddr, sz4, 0);
528
Maciej Żenczykowskie4a726a2021-02-16 17:27:34 -0800529 // The offsets for TCP and UDP ports: source (u16 @ L4 offset 0) & dest (u16 @ L4 offset 2) are
530 // actually the same, so the compiler should just optimize them both down to a constant.
531 bpf_l4_csum_replace(skb, l4_offs_csum, k.srcPort, v->srcPort, sz2 | l4_flags);
532 bpf_skb_store_bytes(skb, is_tcp ? ETH_IP4_TCP_OFFSET(source) : ETH_IP4_UDP_OFFSET(source),
533 &v->srcPort, sz2, 0);
Maciej Żenczykowskiec5f67d2021-01-25 02:32:01 -0800534
Maciej Żenczykowskie4a726a2021-02-16 17:27:34 -0800535 bpf_l4_csum_replace(skb, l4_offs_csum, k.dstPort, v->dstPort, sz2 | l4_flags);
536 bpf_skb_store_bytes(skb, is_tcp ? ETH_IP4_TCP_OFFSET(dest) : ETH_IP4_UDP_OFFSET(dest),
537 &v->dstPort, sz2, 0);
Maciej Żenczykowskiec5f67d2021-01-25 02:32:01 -0800538
Maciej Żenczykowski36867352021-02-15 01:53:17 -0800539 // This requires the bpf_ktime_get_boot_ns() helper which was added in 5.8,
540 // and backported to all Android Common Kernel 4.14+ trees.
Maciej Żenczykowskiaefa0952021-02-14 23:15:19 -0800541 if (updatetime) v->last_used = bpf_ktime_get_boot_ns();
Maciej Żenczykowskiec5f67d2021-01-25 02:32:01 -0800542
543 __sync_fetch_and_add(downstream ? &stat_v->rxPackets : &stat_v->txPackets, packets);
Maciej Żenczykowskibab0c1a2022-12-29 11:18:35 +0000544 __sync_fetch_and_add(downstream ? &stat_v->rxBytes : &stat_v->txBytes, L3_bytes);
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -0800545
Maciej Żenczykowskiec5f67d2021-01-25 02:32:01 -0800546 // Redirect to forwarded interface.
547 //
548 // Note that bpf_redirect() cannot fail unless you pass invalid flags.
549 // The redirect actually happens after the ebpf program has already terminated,
550 // and can fail for example for mtu reasons at that point in time, but there's nothing
551 // we can do about it here.
552 return bpf_redirect(v->oif, 0 /* this is effectively BPF_F_EGRESS */);
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -0800553}
554
Maciej Żenczykowskif72c8aa2022-04-28 02:02:45 -0700555static inline __always_inline int do_forward4(struct __sk_buff* skb, const bool is_ethernet,
Maciej Żenczykowski2541a192022-12-29 12:19:47 +0000556 const bool downstream, const bool updatetime, const unsigned kver) {
Maciej Żenczykowskif72c8aa2022-04-28 02:02:45 -0700557 // Require ethernet dst mac address to be our unicast address.
558 if (is_ethernet && (skb->pkt_type != PACKET_HOST)) return TC_ACT_PIPE;
559
560 // Must be meta-ethernet IPv4 frame
561 if (skb->protocol != htons(ETH_P_IP)) return TC_ACT_PIPE;
562
563 const int l2_header_size = is_ethernet ? sizeof(struct ethhdr) : 0;
564
565 // Since the program never writes via DPA (direct packet access) auto-pull/unclone logic does
566 // not trigger and thus we need to manually make sure we can read packet headers via DPA.
567 // Note: this is a blind best effort pull, which may fail or pull less - this doesn't matter.
568 // It has to be done early cause it will invalidate any skb->data/data_end derived pointers.
569 try_make_writable(skb, l2_header_size + IP4_HLEN + TCP_HLEN);
570
571 void* data = (void*)(long)skb->data;
572 const void* data_end = (void*)(long)skb->data_end;
573 struct ethhdr* eth = is_ethernet ? data : NULL; // used iff is_ethernet
574 struct iphdr* ip = is_ethernet ? (void*)(eth + 1) : data;
575
576 // Must have (ethernet and) ipv4 header
577 if (data + l2_header_size + sizeof(*ip) > data_end) return TC_ACT_PIPE;
578
579 // Ethertype - if present - must be IPv4
580 if (is_ethernet && (eth->h_proto != htons(ETH_P_IP))) return TC_ACT_PIPE;
581
582 // IP version must be 4
Maciej Żenczykowskib82bf652022-08-10 19:28:16 +0000583 if (ip->version != 4) TC_PUNT(INVALID_IPV4_VERSION);
Maciej Żenczykowskif72c8aa2022-04-28 02:02:45 -0700584
585 // We cannot handle IP options, just standard 20 byte == 5 dword minimal IPv4 header
586 if (ip->ihl != 5) TC_PUNT(HAS_IP_OPTIONS);
587
588 // Calculate the IPv4 one's complement checksum of the IPv4 header.
589 __wsum sum4 = 0;
590 for (int i = 0; i < sizeof(*ip) / sizeof(__u16); ++i) {
591 sum4 += ((__u16*)ip)[i];
592 }
593 // Note that sum4 is guaranteed to be non-zero by virtue of ip4->version == 4
594 sum4 = (sum4 & 0xFFFF) + (sum4 >> 16); // collapse u32 into range 1 .. 0x1FFFE
595 sum4 = (sum4 & 0xFFFF) + (sum4 >> 16); // collapse any potential carry into u16
596 // for a correct checksum we should get *a* zero, but sum4 must be positive, ie 0xFFFF
597 if (sum4 != 0xFFFF) TC_PUNT(CHECKSUM);
598
599 // Minimum IPv4 total length is the size of the header
600 if (ntohs(ip->tot_len) < sizeof(*ip)) TC_PUNT(TRUNCATED_IPV4);
601
602 // We are incapable of dealing with IPv4 fragments
603 if (ip->frag_off & ~htons(IP_DF)) TC_PUNT(IS_IP_FRAG);
604
605 // Cannot decrement during forward if already zero or would be zero,
606 // Let the kernel's stack handle these cases and generate appropriate ICMP errors.
607 if (ip->ttl <= 1) TC_PUNT(LOW_TTL);
608
609 // If we cannot update the 'last_used' field due to lack of bpf_ktime_get_boot_ns() helper,
610 // then it is not safe to offload UDP due to the small conntrack timeouts, as such,
611 // in such a situation we can only support TCP. This also has the added nice benefit of
612 // using a separate error counter, and thus making it obvious which version of the program
613 // is loaded.
614 if (!updatetime && ip->protocol != IPPROTO_TCP) TC_PUNT(NON_TCP);
615
616 // We do not support offloading anything besides IPv4 TCP and UDP, due to need for NAT,
617 // but no need to check this if !updatetime due to check immediately above.
618 if (updatetime && (ip->protocol != IPPROTO_TCP) && (ip->protocol != IPPROTO_UDP))
619 TC_PUNT(NON_TCP_UDP);
620
621 // We want to make sure that the compiler will, in the !updatetime case, entirely optimize
622 // out all the non-tcp logic. Also note that at this point is_udp === !is_tcp.
623 const bool is_tcp = !updatetime || (ip->protocol == IPPROTO_TCP);
624
625 // This is a bit of a hack to make things easier on the bpf verifier.
626 // (In particular I believe the Linux 4.14 kernel's verifier can get confused later on about
627 // what offsets into the packet are valid and can spuriously reject the program, this is
628 // because it fails to realize that is_tcp && !is_tcp is impossible)
629 //
630 // For both TCP & UDP we'll need to read and modify the src/dst ports, which so happen to
631 // always be in the first 4 bytes of the L4 header. Additionally for UDP we'll need access
632 // to the checksum field which is in bytes 7 and 8. While for TCP we'll need to read the
633 // TCP flags (at offset 13) and access to the checksum field (2 bytes at offset 16).
634 // As such we *always* need access to at least 8 bytes.
635 if (data + l2_header_size + sizeof(*ip) + 8 > data_end) TC_PUNT(SHORT_L4_HEADER);
636
637 // We're forcing the compiler to emit two copies of the following code, optimized
638 // separately for is_tcp being true or false. This simplifies the resulting bpf
639 // byte code sufficiently that the 4.14 bpf verifier is able to keep track of things.
640 // Without this (updatetime == true) case would fail to bpf verify on 4.14 even
641 // if the underlying requisite kernel support (bpf_ktime_get_boot_ns) was backported.
642 if (is_tcp) {
643 return do_forward4_bottom(skb, l2_header_size, data, data_end, eth, ip,
Maciej Żenczykowski2541a192022-12-29 12:19:47 +0000644 is_ethernet, downstream, updatetime, /* is_tcp */ true, kver);
Maciej Żenczykowskif72c8aa2022-04-28 02:02:45 -0700645 } else {
646 return do_forward4_bottom(skb, l2_header_size, data, data_end, eth, ip,
Maciej Żenczykowski2541a192022-12-29 12:19:47 +0000647 is_ethernet, downstream, updatetime, /* is_tcp */ false, kver);
Maciej Żenczykowskif72c8aa2022-04-28 02:02:45 -0700648 }
649}
650
Maciej Żenczykowskiacddd4f2021-03-09 21:43:48 -0800651// Full featured (required) implementations for 5.8+ kernels (these are S+ by definition)
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -0800652
Maciej Żenczykowskiccce4a32022-07-17 01:28:38 -0700653DEFINE_BPF_PROG_KVER("schedcls/tether_downstream4_rawip$5_8", TETHERING_UID, TETHERING_GID,
Maciej Żenczykowskib0ac41f2021-02-14 21:34:03 -0800654 sched_cls_tether_downstream4_rawip_5_8, KVER(5, 8, 0))
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -0800655(struct __sk_buff* skb) {
Maciej Żenczykowski2541a192022-12-29 12:19:47 +0000656 return do_forward4(skb, /* is_ethernet */ false, /* downstream */ true, /* updatetime */ true, KVER(5, 8, 0));
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -0800657}
658
Maciej Żenczykowskiccce4a32022-07-17 01:28:38 -0700659DEFINE_BPF_PROG_KVER("schedcls/tether_upstream4_rawip$5_8", TETHERING_UID, TETHERING_GID,
Maciej Żenczykowskib0ac41f2021-02-14 21:34:03 -0800660 sched_cls_tether_upstream4_rawip_5_8, KVER(5, 8, 0))
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -0800661(struct __sk_buff* skb) {
Maciej Żenczykowski2541a192022-12-29 12:19:47 +0000662 return do_forward4(skb, /* is_ethernet */ false, /* downstream */ false, /* updatetime */ true, KVER(5, 8, 0));
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -0800663}
664
Maciej Żenczykowskiccce4a32022-07-17 01:28:38 -0700665DEFINE_BPF_PROG_KVER("schedcls/tether_downstream4_ether$5_8", TETHERING_UID, TETHERING_GID,
Maciej Żenczykowski2278aed2021-03-09 21:19:52 -0800666 sched_cls_tether_downstream4_ether_5_8, KVER(5, 8, 0))
Maciej Żenczykowski36867352021-02-15 01:53:17 -0800667(struct __sk_buff* skb) {
Maciej Żenczykowski2541a192022-12-29 12:19:47 +0000668 return do_forward4(skb, /* is_ethernet */ true, /* downstream */ true, /* updatetime */ true, KVER(5, 8, 0));
Maciej Żenczykowski36867352021-02-15 01:53:17 -0800669}
670
Maciej Żenczykowskiccce4a32022-07-17 01:28:38 -0700671DEFINE_BPF_PROG_KVER("schedcls/tether_upstream4_ether$5_8", TETHERING_UID, TETHERING_GID,
Maciej Żenczykowski2278aed2021-03-09 21:19:52 -0800672 sched_cls_tether_upstream4_ether_5_8, KVER(5, 8, 0))
673(struct __sk_buff* skb) {
Maciej Żenczykowski2541a192022-12-29 12:19:47 +0000674 return do_forward4(skb, /* is_ethernet */ true, /* downstream */ false, /* updatetime */ true, KVER(5, 8, 0));
Maciej Żenczykowski2278aed2021-03-09 21:19:52 -0800675}
676
Maciej Żenczykowskiacddd4f2021-03-09 21:43:48 -0800677// Full featured (optional) implementations for 4.14-S, 4.19-S & 5.4-S kernels
678// (optional, because we need to be able to fallback for 4.14/4.19/5.4 pre-S kernels)
Maciej Żenczykowski2278aed2021-03-09 21:19:52 -0800679
Maciej Żenczykowski36867352021-02-15 01:53:17 -0800680DEFINE_OPTIONAL_BPF_PROG_KVER_RANGE("schedcls/tether_downstream4_rawip$opt",
Maciej Żenczykowskiccce4a32022-07-17 01:28:38 -0700681 TETHERING_UID, TETHERING_GID,
Maciej Żenczykowski36867352021-02-15 01:53:17 -0800682 sched_cls_tether_downstream4_rawip_opt,
683 KVER(4, 14, 0), KVER(5, 8, 0))
684(struct __sk_buff* skb) {
Maciej Żenczykowski2541a192022-12-29 12:19:47 +0000685 return do_forward4(skb, /* is_ethernet */ false, /* downstream */ true, /* updatetime */ true, KVER(4, 14, 0));
Maciej Żenczykowski36867352021-02-15 01:53:17 -0800686}
687
Maciej Żenczykowski36867352021-02-15 01:53:17 -0800688DEFINE_OPTIONAL_BPF_PROG_KVER_RANGE("schedcls/tether_upstream4_rawip$opt",
Maciej Żenczykowskiccce4a32022-07-17 01:28:38 -0700689 TETHERING_UID, TETHERING_GID,
Maciej Żenczykowski36867352021-02-15 01:53:17 -0800690 sched_cls_tether_upstream4_rawip_opt,
691 KVER(4, 14, 0), KVER(5, 8, 0))
692(struct __sk_buff* skb) {
Maciej Żenczykowski2541a192022-12-29 12:19:47 +0000693 return do_forward4(skb, /* is_ethernet */ false, /* downstream */ false, /* updatetime */ true, KVER(4, 14, 0));
Maciej Żenczykowski36867352021-02-15 01:53:17 -0800694}
695
Maciej Żenczykowski2278aed2021-03-09 21:19:52 -0800696DEFINE_OPTIONAL_BPF_PROG_KVER_RANGE("schedcls/tether_downstream4_ether$opt",
Maciej Żenczykowskiccce4a32022-07-17 01:28:38 -0700697 TETHERING_UID, TETHERING_GID,
Maciej Żenczykowski2278aed2021-03-09 21:19:52 -0800698 sched_cls_tether_downstream4_ether_opt,
699 KVER(4, 14, 0), KVER(5, 8, 0))
700(struct __sk_buff* skb) {
Maciej Żenczykowski2541a192022-12-29 12:19:47 +0000701 return do_forward4(skb, /* is_ethernet */ true, /* downstream */ true, /* updatetime */ true, KVER(4, 14, 0));
Maciej Żenczykowski2278aed2021-03-09 21:19:52 -0800702}
703
704DEFINE_OPTIONAL_BPF_PROG_KVER_RANGE("schedcls/tether_upstream4_ether$opt",
Maciej Żenczykowskiccce4a32022-07-17 01:28:38 -0700705 TETHERING_UID, TETHERING_GID,
Maciej Żenczykowski2278aed2021-03-09 21:19:52 -0800706 sched_cls_tether_upstream4_ether_opt,
707 KVER(4, 14, 0), KVER(5, 8, 0))
708(struct __sk_buff* skb) {
Maciej Żenczykowski2541a192022-12-29 12:19:47 +0000709 return do_forward4(skb, /* is_ethernet */ true, /* downstream */ false, /* updatetime */ true, KVER(4, 14, 0));
Maciej Żenczykowski2278aed2021-03-09 21:19:52 -0800710}
711
Maciej Żenczykowski36867352021-02-15 01:53:17 -0800712// Partial (TCP-only: will not update 'last_used' field) implementations for 4.14+ kernels.
Maciej Żenczykowskiacddd4f2021-03-09 21:43:48 -0800713// These will be loaded only if the above optional ones failed (loading of *these* must succeed
714// for 5.4+, since that is always an R patched kernel).
Maciej Żenczykowski36867352021-02-15 01:53:17 -0800715//
716// [Note: as a result TCP connections will not have their conntrack timeout refreshed, however,
717// since /proc/sys/net/netfilter/nf_conntrack_tcp_timeout_established defaults to 432000 (seconds),
718// this in practice means they'll break only after 5 days. This seems an acceptable trade-off.
719//
720// Additionally kernel/tests change "net-test: add bpf_ktime_get_ns / bpf_ktime_get_boot_ns tests"
721// which enforces and documents the required kernel cherrypicks will make it pretty unlikely that
722// many devices upgrading to S will end up relying on these fallback programs.
723
Maciej Żenczykowskiacddd4f2021-03-09 21:43:48 -0800724// RAWIP: Required for 5.4-R kernels -- which always support bpf_skb_change_head().
725
Maciej Żenczykowskiccce4a32022-07-17 01:28:38 -0700726DEFINE_BPF_PROG_KVER_RANGE("schedcls/tether_downstream4_rawip$5_4", TETHERING_UID, TETHERING_GID,
Maciej Żenczykowskiacddd4f2021-03-09 21:43:48 -0800727 sched_cls_tether_downstream4_rawip_5_4, KVER(5, 4, 0), KVER(5, 8, 0))
Maciej Żenczykowski36867352021-02-15 01:53:17 -0800728(struct __sk_buff* skb) {
Maciej Żenczykowski2541a192022-12-29 12:19:47 +0000729 return do_forward4(skb, /* is_ethernet */ false, /* downstream */ true, /* updatetime */ false, KVER(5, 4, 0));
Maciej Żenczykowski36867352021-02-15 01:53:17 -0800730}
731
Maciej Żenczykowskiccce4a32022-07-17 01:28:38 -0700732DEFINE_BPF_PROG_KVER_RANGE("schedcls/tether_upstream4_rawip$5_4", TETHERING_UID, TETHERING_GID,
Maciej Żenczykowskiacddd4f2021-03-09 21:43:48 -0800733 sched_cls_tether_upstream4_rawip_5_4, KVER(5, 4, 0), KVER(5, 8, 0))
Maciej Żenczykowski36867352021-02-15 01:53:17 -0800734(struct __sk_buff* skb) {
Maciej Żenczykowski2541a192022-12-29 12:19:47 +0000735 return do_forward4(skb, /* is_ethernet */ false, /* downstream */ false, /* updatetime */ false, KVER(5, 4, 0));
Maciej Żenczykowski36867352021-02-15 01:53:17 -0800736}
737
Maciej Żenczykowskiacddd4f2021-03-09 21:43:48 -0800738// RAWIP: Optional for 4.14/4.19 (R) kernels -- which support bpf_skb_change_head().
739// [Note: fallback for 4.14/4.19 (P/Q) kernels is below in stub section]
740
741DEFINE_OPTIONAL_BPF_PROG_KVER_RANGE("schedcls/tether_downstream4_rawip$4_14",
Maciej Żenczykowskiccce4a32022-07-17 01:28:38 -0700742 TETHERING_UID, TETHERING_GID,
Maciej Żenczykowskiacddd4f2021-03-09 21:43:48 -0800743 sched_cls_tether_downstream4_rawip_4_14,
744 KVER(4, 14, 0), KVER(5, 4, 0))
745(struct __sk_buff* skb) {
Maciej Żenczykowski2541a192022-12-29 12:19:47 +0000746 return do_forward4(skb, /* is_ethernet */ false, /* downstream */ true, /* updatetime */ false, KVER(4, 14, 0));
Maciej Żenczykowskiacddd4f2021-03-09 21:43:48 -0800747}
748
749DEFINE_OPTIONAL_BPF_PROG_KVER_RANGE("schedcls/tether_upstream4_rawip$4_14",
Maciej Żenczykowskiccce4a32022-07-17 01:28:38 -0700750 TETHERING_UID, TETHERING_GID,
Maciej Żenczykowskiacddd4f2021-03-09 21:43:48 -0800751 sched_cls_tether_upstream4_rawip_4_14,
752 KVER(4, 14, 0), KVER(5, 4, 0))
753(struct __sk_buff* skb) {
Maciej Żenczykowski2541a192022-12-29 12:19:47 +0000754 return do_forward4(skb, /* is_ethernet */ false, /* downstream */ false, /* updatetime */ false, KVER(4, 14, 0));
Maciej Żenczykowskiacddd4f2021-03-09 21:43:48 -0800755}
756
757// ETHER: Required for 4.14-Q/R, 4.19-Q/R & 5.4-R kernels.
758
Maciej Żenczykowskiccce4a32022-07-17 01:28:38 -0700759DEFINE_BPF_PROG_KVER_RANGE("schedcls/tether_downstream4_ether$4_14", TETHERING_UID, TETHERING_GID,
Maciej Żenczykowski2278aed2021-03-09 21:19:52 -0800760 sched_cls_tether_downstream4_ether_4_14, KVER(4, 14, 0), KVER(5, 8, 0))
Maciej Żenczykowski088fe192021-01-20 13:34:17 -0800761(struct __sk_buff* skb) {
Maciej Żenczykowski2541a192022-12-29 12:19:47 +0000762 return do_forward4(skb, /* is_ethernet */ true, /* downstream */ true, /* updatetime */ false, KVER(4, 14, 0));
Maciej Żenczykowski088fe192021-01-20 13:34:17 -0800763}
764
Maciej Żenczykowskiccce4a32022-07-17 01:28:38 -0700765DEFINE_BPF_PROG_KVER_RANGE("schedcls/tether_upstream4_ether$4_14", TETHERING_UID, TETHERING_GID,
Maciej Żenczykowski2278aed2021-03-09 21:19:52 -0800766 sched_cls_tether_upstream4_ether_4_14, KVER(4, 14, 0), KVER(5, 8, 0))
767(struct __sk_buff* skb) {
Maciej Żenczykowski2541a192022-12-29 12:19:47 +0000768 return do_forward4(skb, /* is_ethernet */ true, /* downstream */ false, /* updatetime */ false, KVER(4, 14, 0));
Maciej Żenczykowski2278aed2021-03-09 21:19:52 -0800769}
770
Maciej Żenczykowskiacddd4f2021-03-09 21:43:48 -0800771// Placeholder (no-op) implementations for older Q kernels
772
773// RAWIP: 4.9-P/Q, 4.14-P/Q & 4.19-Q kernels -- without bpf_skb_change_head() for tc programs
Maciej Żenczykowski2278aed2021-03-09 21:19:52 -0800774
Maciej Żenczykowskiccce4a32022-07-17 01:28:38 -0700775DEFINE_BPF_PROG_KVER_RANGE("schedcls/tether_downstream4_rawip$stub", TETHERING_UID, TETHERING_GID,
Maciej Żenczykowskiacddd4f2021-03-09 21:43:48 -0800776 sched_cls_tether_downstream4_rawip_stub, KVER_NONE, KVER(5, 4, 0))
Maciej Żenczykowski088fe192021-01-20 13:34:17 -0800777(struct __sk_buff* skb) {
Maciej Żenczykowski6e66a362021-08-24 15:43:15 -0700778 return TC_ACT_PIPE;
Maciej Żenczykowski088fe192021-01-20 13:34:17 -0800779}
780
Maciej Żenczykowskiccce4a32022-07-17 01:28:38 -0700781DEFINE_BPF_PROG_KVER_RANGE("schedcls/tether_upstream4_rawip$stub", TETHERING_UID, TETHERING_GID,
Maciej Żenczykowskiacddd4f2021-03-09 21:43:48 -0800782 sched_cls_tether_upstream4_rawip_stub, KVER_NONE, KVER(5, 4, 0))
Maciej Żenczykowski088fe192021-01-20 13:34:17 -0800783(struct __sk_buff* skb) {
Maciej Żenczykowski6e66a362021-08-24 15:43:15 -0700784 return TC_ACT_PIPE;
Maciej Żenczykowski088fe192021-01-20 13:34:17 -0800785}
786
Maciej Żenczykowskiacddd4f2021-03-09 21:43:48 -0800787// ETHER: 4.9-P/Q kernel
788
Maciej Żenczykowskiccce4a32022-07-17 01:28:38 -0700789DEFINE_BPF_PROG_KVER_RANGE("schedcls/tether_downstream4_ether$stub", TETHERING_UID, TETHERING_GID,
Maciej Żenczykowski2278aed2021-03-09 21:19:52 -0800790 sched_cls_tether_downstream4_ether_stub, KVER_NONE, KVER(4, 14, 0))
791(struct __sk_buff* skb) {
Maciej Żenczykowski6e66a362021-08-24 15:43:15 -0700792 return TC_ACT_PIPE;
Maciej Żenczykowski2278aed2021-03-09 21:19:52 -0800793}
794
Maciej Żenczykowskiccce4a32022-07-17 01:28:38 -0700795DEFINE_BPF_PROG_KVER_RANGE("schedcls/tether_upstream4_ether$stub", TETHERING_UID, TETHERING_GID,
Maciej Żenczykowski2278aed2021-03-09 21:19:52 -0800796 sched_cls_tether_upstream4_ether_stub, KVER_NONE, KVER(4, 14, 0))
Maciej Żenczykowski088fe192021-01-20 13:34:17 -0800797(struct __sk_buff* skb) {
Maciej Żenczykowski6e66a362021-08-24 15:43:15 -0700798 return TC_ACT_PIPE;
Maciej Żenczykowski088fe192021-01-20 13:34:17 -0800799}
800
Maciej Żenczykowskib1997422021-01-20 14:31:50 -0800801// ----- XDP Support -----
802
Maciej Żenczykowskiccce4a32022-07-17 01:28:38 -0700803DEFINE_BPF_MAP_GRW(tether_dev_map, DEVMAP_HASH, uint32_t, uint32_t, 64, TETHERING_GID)
Maciej Żenczykowskidb2cff52021-03-01 21:22:49 -0800804
Maciej Żenczykowski90b81ac2021-03-07 06:48:26 -0800805static inline __always_inline int do_xdp_forward6(struct xdp_md *ctx, const bool is_ethernet,
806 const bool downstream) {
807 return XDP_PASS;
808}
809
810static inline __always_inline int do_xdp_forward4(struct xdp_md *ctx, const bool is_ethernet,
811 const bool downstream) {
812 return XDP_PASS;
813}
814
815static inline __always_inline int do_xdp_forward_ether(struct xdp_md *ctx, const bool downstream) {
816 const void* data = (void*)(long)ctx->data;
817 const void* data_end = (void*)(long)ctx->data_end;
818 const struct ethhdr* eth = data;
819
820 // Make sure we actually have an ethernet header
821 if ((void*)(eth + 1) > data_end) return XDP_PASS;
822
823 if (eth->h_proto == htons(ETH_P_IPV6))
824 return do_xdp_forward6(ctx, /* is_ethernet */ true, downstream);
825 if (eth->h_proto == htons(ETH_P_IP))
826 return do_xdp_forward4(ctx, /* is_ethernet */ true, downstream);
827
828 // Anything else we don't know how to handle...
829 return XDP_PASS;
830}
831
832static inline __always_inline int do_xdp_forward_rawip(struct xdp_md *ctx, const bool downstream) {
833 const void* data = (void*)(long)ctx->data;
834 const void* data_end = (void*)(long)ctx->data_end;
835
836 // The top nibble of both IPv4 and IPv6 headers is the IP version.
837 if (data_end - data < 1) return XDP_PASS;
838 const uint8_t v = (*(uint8_t*)data) >> 4;
839
840 if (v == 6) return do_xdp_forward6(ctx, /* is_ethernet */ false, downstream);
841 if (v == 4) return do_xdp_forward4(ctx, /* is_ethernet */ false, downstream);
842
843 // Anything else we don't know how to handle...
844 return XDP_PASS;
845}
846
Maciej Żenczykowskib1997422021-01-20 14:31:50 -0800847#define DEFINE_XDP_PROG(str, func) \
Maciej Żenczykowskiccce4a32022-07-17 01:28:38 -0700848 DEFINE_BPF_PROG_KVER(str, TETHERING_UID, TETHERING_GID, func, KVER(5, 9, 0))(struct xdp_md *ctx)
Maciej Żenczykowskib1997422021-01-20 14:31:50 -0800849
850DEFINE_XDP_PROG("xdp/tether_downstream_ether",
851 xdp_tether_downstream_ether) {
Maciej Żenczykowski90b81ac2021-03-07 06:48:26 -0800852 return do_xdp_forward_ether(ctx, /* downstream */ true);
Maciej Żenczykowskib1997422021-01-20 14:31:50 -0800853}
854
855DEFINE_XDP_PROG("xdp/tether_downstream_rawip",
856 xdp_tether_downstream_rawip) {
Maciej Żenczykowski90b81ac2021-03-07 06:48:26 -0800857 return do_xdp_forward_rawip(ctx, /* downstream */ true);
Maciej Żenczykowskib1997422021-01-20 14:31:50 -0800858}
859
860DEFINE_XDP_PROG("xdp/tether_upstream_ether",
861 xdp_tether_upstream_ether) {
Maciej Żenczykowski90b81ac2021-03-07 06:48:26 -0800862 return do_xdp_forward_ether(ctx, /* downstream */ false);
Maciej Żenczykowskib1997422021-01-20 14:31:50 -0800863}
864
865DEFINE_XDP_PROG("xdp/tether_upstream_rawip",
866 xdp_tether_upstream_rawip) {
Maciej Żenczykowski90b81ac2021-03-07 06:48:26 -0800867 return do_xdp_forward_rawip(ctx, /* downstream */ false);
Maciej Żenczykowskib1997422021-01-20 14:31:50 -0800868}
869
Hungming Chen56c632c2020-09-10 15:42:58 +0800870LICENSE("Apache 2.0");
Maciej Żenczykowskic41e35d2022-08-04 13:58:46 +0000871CRITICAL("Connectivity (Tethering)");