blob: 8a2e6bc47ddbe079aea4ea008f0323dcd8db048f [file] [log] [blame]
Hungming Chen56c632c2020-09-10 15:42:58 +08001/*
2 * Copyright (C) 2020 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <linux/if.h>
18#include <linux/ip.h>
19#include <linux/ipv6.h>
20#include <linux/pkt_cls.h>
21#include <linux/tcp.h>
22
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -080023// bionic kernel uapi linux/udp.h header is munged...
24#define __kernel_udphdr udphdr
25#include <linux/udp.h>
26
Hungming Chen56c632c2020-09-10 15:42:58 +080027#include "bpf_helpers.h"
28#include "bpf_net_helpers.h"
29#include "netdbpf/bpf_shared.h"
30
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -080031// From kernel:include/net/ip.h
32#define IP_DF 0x4000 // Flag: "Don't Fragment"
33
Maciej Żenczykowskiec5f67d2021-01-25 02:32:01 -080034// ----- Helper functions for offsets to fields -----
35
36// They all assume simple IP packets:
37// - no VLAN ethernet tags
38// - no IPv4 options (see IPV4_HLEN/TCP4_OFFSET/UDP4_OFFSET)
39// - no IPv6 extension headers
40// - no TCP options (see TCP_HLEN)
41
42//#define ETH_HLEN sizeof(struct ethhdr)
43#define IP4_HLEN sizeof(struct iphdr)
44#define IP6_HLEN sizeof(struct ipv6hdr)
45#define TCP_HLEN sizeof(struct tcphdr)
46#define UDP_HLEN sizeof(struct udphdr)
47
48// Offsets from beginning of L4 (TCP/UDP) header
49#define TCP_OFFSET(field) offsetof(struct tcphdr, field)
50#define UDP_OFFSET(field) offsetof(struct udphdr, field)
51
52// Offsets from beginning of L3 (IPv4) header
53#define IP4_OFFSET(field) offsetof(struct iphdr, field)
54#define IP4_TCP_OFFSET(field) (IP4_HLEN + TCP_OFFSET(field))
55#define IP4_UDP_OFFSET(field) (IP4_HLEN + UDP_OFFSET(field))
56
57// Offsets from beginning of L3 (IPv6) header
58#define IP6_OFFSET(field) offsetof(struct ipv6hdr, field)
59#define IP6_TCP_OFFSET(field) (IP6_HLEN + TCP_OFFSET(field))
60#define IP6_UDP_OFFSET(field) (IP6_HLEN + UDP_OFFSET(field))
61
62// Offsets from beginning of L2 (ie. Ethernet) header (which must be present)
63#define ETH_IP4_OFFSET(field) (ETH_HLEN + IP4_OFFSET(field))
64#define ETH_IP4_TCP_OFFSET(field) (ETH_HLEN + IP4_TCP_OFFSET(field))
65#define ETH_IP4_UDP_OFFSET(field) (ETH_HLEN + IP4_UDP_OFFSET(field))
66#define ETH_IP6_OFFSET(field) (ETH_HLEN + IP6_OFFSET(field))
67#define ETH_IP6_TCP_OFFSET(field) (ETH_HLEN + IP6_TCP_OFFSET(field))
68#define ETH_IP6_UDP_OFFSET(field) (ETH_HLEN + IP6_UDP_OFFSET(field))
69
70// ----- Tethering stats and data limits -----
71
Hungming Chen56c632c2020-09-10 15:42:58 +080072// Tethering stats, indexed by upstream interface.
Maciej Żenczykowski088fe192021-01-20 13:34:17 -080073DEFINE_BPF_MAP_GRW(tether_stats_map, HASH, TetherStatsKey, TetherStatsValue, 16, AID_NETWORK_STACK)
Hungming Chen56c632c2020-09-10 15:42:58 +080074
75// Tethering data limit, indexed by upstream interface.
76// (tethering allowed when stats[iif].rxBytes + stats[iif].txBytes < limit[iif])
Maciej Żenczykowski088fe192021-01-20 13:34:17 -080077DEFINE_BPF_MAP_GRW(tether_limit_map, HASH, TetherLimitKey, TetherLimitValue, 16, AID_NETWORK_STACK)
78
79// ----- IPv6 Support -----
80
Maciej Żenczykowski7dfbcf52021-01-26 16:08:57 -080081DEFINE_BPF_MAP_GRW(tether_downstream6_map, HASH, TetherDownstream6Key, Tether6Value, 64,
Maciej Żenczykowski088fe192021-01-20 13:34:17 -080082 AID_NETWORK_STACK)
83
84DEFINE_BPF_MAP_GRW(tether_downstream64_map, HASH, TetherDownstream64Key, TetherDownstream64Value,
85 64, AID_NETWORK_STACK)
86
Maciej Żenczykowski7dfbcf52021-01-26 16:08:57 -080087DEFINE_BPF_MAP_GRW(tether_upstream6_map, HASH, TetherUpstream6Key, Tether6Value, 64,
Maciej Żenczykowski088fe192021-01-20 13:34:17 -080088 AID_NETWORK_STACK)
Hungming Chen56c632c2020-09-10 15:42:58 +080089
Maciej Żenczykowskibf8ec1a2021-01-24 19:56:39 -080090static inline __always_inline int do_forward6(struct __sk_buff* skb, const bool is_ethernet,
Maciej Żenczykowskibca0c852021-01-19 01:22:17 -080091 const bool downstream) {
92 const int l2_header_size = is_ethernet ? sizeof(struct ethhdr) : 0;
Hungming Chen56c632c2020-09-10 15:42:58 +080093 void* data = (void*)(long)skb->data;
94 const void* data_end = (void*)(long)skb->data_end;
95 struct ethhdr* eth = is_ethernet ? data : NULL; // used iff is_ethernet
96 struct ipv6hdr* ip6 = is_ethernet ? (void*)(eth + 1) : data;
97
Maciej Żenczykowski18552e82021-01-24 19:59:05 -080098 // Require ethernet dst mac address to be our unicast address.
99 if (is_ethernet && (skb->pkt_type != PACKET_HOST)) return TC_ACT_OK;
100
Hungming Chen56c632c2020-09-10 15:42:58 +0800101 // Must be meta-ethernet IPv6 frame
102 if (skb->protocol != htons(ETH_P_IPV6)) return TC_ACT_OK;
103
104 // Must have (ethernet and) ipv6 header
105 if (data + l2_header_size + sizeof(*ip6) > data_end) return TC_ACT_OK;
106
107 // Ethertype - if present - must be IPv6
108 if (is_ethernet && (eth->h_proto != htons(ETH_P_IPV6))) return TC_ACT_OK;
109
110 // IP version must be 6
111 if (ip6->version != 6) return TC_ACT_OK;
112
113 // Cannot decrement during forward if already zero or would be zero,
114 // Let the kernel's stack handle these cases and generate appropriate ICMP errors.
115 if (ip6->hop_limit <= 1) return TC_ACT_OK;
116
Maciej Żenczykowskifc4f6542021-01-22 22:19:45 -0800117 // If hardware offload is running and programming flows based on conntrack entries,
118 // try not to interfere with it.
119 if (ip6->nexthdr == IPPROTO_TCP) {
120 struct tcphdr* tcph = (void*)(ip6 + 1);
121
122 // Make sure we can get at the tcp header
123 if (data + l2_header_size + sizeof(*ip6) + sizeof(*tcph) > data_end) return TC_ACT_OK;
124
125 // Do not offload TCP packets with any one of the SYN/FIN/RST flags
126 if (tcph->syn || tcph->fin || tcph->rst) return TC_ACT_OK;
127 }
128
Hungming Chen56c632c2020-09-10 15:42:58 +0800129 // Protect against forwarding packets sourced from ::1 or fe80::/64 or other weirdness.
130 __be32 src32 = ip6->saddr.s6_addr32[0];
131 if (src32 != htonl(0x0064ff9b) && // 64:ff9b:/32 incl. XLAT464 WKP
132 (src32 & htonl(0xe0000000)) != htonl(0x20000000)) // 2000::/3 Global Unicast
133 return TC_ACT_OK;
134
Maciej Żenczykowskibca0c852021-01-19 01:22:17 -0800135 // Protect against forwarding packets destined to ::1 or fe80::/64 or other weirdness.
136 __be32 dst32 = ip6->daddr.s6_addr32[0];
137 if (dst32 != htonl(0x0064ff9b) && // 64:ff9b:/32 incl. XLAT464 WKP
138 (dst32 & htonl(0xe0000000)) != htonl(0x20000000)) // 2000::/3 Global Unicast
139 return TC_ACT_OK;
140
141 // In the upstream direction do not forward traffic within the same /64 subnet.
142 if (!downstream && (src32 == dst32) && (ip6->saddr.s6_addr32[1] == ip6->daddr.s6_addr32[1]))
143 return TC_ACT_OK;
144
145 TetherDownstream6Key kd = {
Hungming Chen56c632c2020-09-10 15:42:58 +0800146 .iif = skb->ifindex,
147 .neigh6 = ip6->daddr,
148 };
149
Maciej Żenczykowskibca0c852021-01-19 01:22:17 -0800150 TetherUpstream6Key ku = {
151 .iif = skb->ifindex,
152 };
153
Maciej Żenczykowski7dfbcf52021-01-26 16:08:57 -0800154 Tether6Value* v = downstream ? bpf_tether_downstream6_map_lookup_elem(&kd)
155 : bpf_tether_upstream6_map_lookup_elem(&ku);
Hungming Chen56c632c2020-09-10 15:42:58 +0800156
157 // If we don't find any offload information then simply let the core stack handle it...
Maciej Żenczykowski7dfbcf52021-01-26 16:08:57 -0800158 if (!v) return TC_ACT_OK;
Hungming Chen56c632c2020-09-10 15:42:58 +0800159
Maciej Żenczykowski7dfbcf52021-01-26 16:08:57 -0800160 uint32_t stat_and_limit_k = downstream ? skb->ifindex : v->oif;
Hungming Chen56c632c2020-09-10 15:42:58 +0800161
162 TetherStatsValue* stat_v = bpf_tether_stats_map_lookup_elem(&stat_and_limit_k);
163
164 // If we don't have anywhere to put stats, then abort...
165 if (!stat_v) return TC_ACT_OK;
166
167 uint64_t* limit_v = bpf_tether_limit_map_lookup_elem(&stat_and_limit_k);
168
169 // If we don't have a limit, then abort...
170 if (!limit_v) return TC_ACT_OK;
171
172 // Required IPv6 minimum mtu is 1280, below that not clear what we should do, abort...
Maciej Żenczykowski7dfbcf52021-01-26 16:08:57 -0800173 if (v->pmtu < IPV6_MIN_MTU) return TC_ACT_OK;
Hungming Chen56c632c2020-09-10 15:42:58 +0800174
175 // Approximate handling of TCP/IPv6 overhead for incoming LRO/GRO packets: default
176 // outbound path mtu of 1500 is not necessarily correct, but worst case we simply
177 // undercount, which is still better then not accounting for this overhead at all.
178 // Note: this really shouldn't be device/path mtu at all, but rather should be
179 // derived from this particular connection's mss (ie. from gro segment size).
180 // This would require a much newer kernel with newer ebpf accessors.
181 // (This is also blindly assuming 12 bytes of tcp timestamp option in tcp header)
182 uint64_t packets = 1;
183 uint64_t bytes = skb->len;
Maciej Żenczykowski7dfbcf52021-01-26 16:08:57 -0800184 if (bytes > v->pmtu) {
Hungming Chen56c632c2020-09-10 15:42:58 +0800185 const int tcp_overhead = sizeof(struct ipv6hdr) + sizeof(struct tcphdr) + 12;
Maciej Żenczykowski7dfbcf52021-01-26 16:08:57 -0800186 const int mss = v->pmtu - tcp_overhead;
Hungming Chen56c632c2020-09-10 15:42:58 +0800187 const uint64_t payload = bytes - tcp_overhead;
188 packets = (payload + mss - 1) / mss;
189 bytes = tcp_overhead * packets + payload;
190 }
191
192 // Are we past the limit? If so, then abort...
193 // Note: will not overflow since u64 is 936 years even at 5Gbps.
194 // Do not drop here. Offload is just that, whenever we fail to handle
195 // a packet we let the core stack deal with things.
196 // (The core stack needs to handle limits correctly anyway,
197 // since we don't offload all traffic in both directions)
198 if (stat_v->rxBytes + stat_v->txBytes + bytes > *limit_v) return TC_ACT_OK;
199
200 if (!is_ethernet) {
Maciej Żenczykowskibca0c852021-01-19 01:22:17 -0800201 // Try to inject an ethernet header, and simply return if we fail.
202 // We do this even if TX interface is RAWIP and thus does not need an ethernet header,
203 // because this is easier and the kernel will strip extraneous ethernet header.
204 if (bpf_skb_change_head(skb, sizeof(struct ethhdr), /*flags*/ 0)) {
205 __sync_fetch_and_add(downstream ? &stat_v->rxErrors : &stat_v->txErrors, 1);
Hungming Chen56c632c2020-09-10 15:42:58 +0800206 return TC_ACT_OK;
207 }
208
209 // bpf_skb_change_head() invalidates all pointers - reload them
210 data = (void*)(long)skb->data;
211 data_end = (void*)(long)skb->data_end;
212 eth = data;
213 ip6 = (void*)(eth + 1);
214
215 // I do not believe this can ever happen, but keep the verifier happy...
Maciej Żenczykowskibca0c852021-01-19 01:22:17 -0800216 if (data + sizeof(struct ethhdr) + sizeof(*ip6) > data_end) {
217 __sync_fetch_and_add(downstream ? &stat_v->rxErrors : &stat_v->txErrors, 1);
Hungming Chen56c632c2020-09-10 15:42:58 +0800218 return TC_ACT_SHOT;
219 }
220 };
221
Maciej Żenczykowskibca0c852021-01-19 01:22:17 -0800222 // At this point we always have an ethernet header - which will get stripped by the
223 // kernel during transmit through a rawip interface. ie. 'eth' pointer is valid.
224 // Additionally note that 'is_ethernet' and 'l2_header_size' are no longer correct.
225
Hungming Chen56c632c2020-09-10 15:42:58 +0800226 // CHECKSUM_COMPLETE is a 16-bit one's complement sum,
227 // thus corrections for it need to be done in 16-byte chunks at even offsets.
228 // IPv6 nexthdr is at offset 6, while hop limit is at offset 7
229 uint8_t old_hl = ip6->hop_limit;
230 --ip6->hop_limit;
231 uint8_t new_hl = ip6->hop_limit;
232
233 // bpf_csum_update() always succeeds if the skb is CHECKSUM_COMPLETE and returns an error
234 // (-ENOTSUPP) if it isn't.
235 bpf_csum_update(skb, 0xFFFF - ntohs(old_hl) + ntohs(new_hl));
236
Maciej Żenczykowskibca0c852021-01-19 01:22:17 -0800237 __sync_fetch_and_add(downstream ? &stat_v->rxPackets : &stat_v->txPackets, packets);
238 __sync_fetch_and_add(downstream ? &stat_v->rxBytes : &stat_v->txBytes, bytes);
Hungming Chen56c632c2020-09-10 15:42:58 +0800239
240 // Overwrite any mac header with the new one
Maciej Żenczykowskibca0c852021-01-19 01:22:17 -0800241 // For a rawip tx interface it will simply be a bunch of zeroes and later stripped.
Maciej Żenczykowski7dfbcf52021-01-26 16:08:57 -0800242 *eth = v->macHeader;
Hungming Chen56c632c2020-09-10 15:42:58 +0800243
244 // Redirect to forwarded interface.
245 //
246 // Note that bpf_redirect() cannot fail unless you pass invalid flags.
247 // The redirect actually happens after the ebpf program has already terminated,
248 // and can fail for example for mtu reasons at that point in time, but there's nothing
249 // we can do about it here.
Maciej Żenczykowski7dfbcf52021-01-26 16:08:57 -0800250 return bpf_redirect(v->oif, 0 /* this is effectively BPF_F_EGRESS */);
Hungming Chen56c632c2020-09-10 15:42:58 +0800251}
252
Maciej Żenczykowski5b00fbd2021-01-19 23:37:51 -0800253DEFINE_BPF_PROG("schedcls/tether_downstream6_ether", AID_ROOT, AID_NETWORK_STACK,
Maciej Żenczykowski770e0a72021-01-18 20:14:03 -0800254 sched_cls_tether_downstream6_ether)
Maciej Żenczykowski6b7829f2021-01-18 00:03:37 -0800255(struct __sk_buff* skb) {
Maciej Żenczykowskibf8ec1a2021-01-24 19:56:39 -0800256 return do_forward6(skb, /* is_ethernet */ true, /* downstream */ true);
Maciej Żenczykowskibca0c852021-01-19 01:22:17 -0800257}
258
Maciej Żenczykowski5b00fbd2021-01-19 23:37:51 -0800259DEFINE_BPF_PROG("schedcls/tether_upstream6_ether", AID_ROOT, AID_NETWORK_STACK,
Maciej Żenczykowskibca0c852021-01-19 01:22:17 -0800260 sched_cls_tether_upstream6_ether)
261(struct __sk_buff* skb) {
Maciej Żenczykowskibf8ec1a2021-01-24 19:56:39 -0800262 return do_forward6(skb, /* is_ethernet */ true, /* downstream */ false);
Hungming Chen56c632c2020-09-10 15:42:58 +0800263}
264
265// Note: section names must be unique to prevent programs from appending to each other,
266// so instead the bpf loader will strip everything past the final $ symbol when actually
267// pinning the program into the filesystem.
268//
269// bpf_skb_change_head() is only present on 4.14+ and 2 trivial kernel patches are needed:
270// ANDROID: net: bpf: Allow TC programs to call BPF_FUNC_skb_change_head
271// ANDROID: net: bpf: permit redirect from ingress L3 to egress L2 devices at near max mtu
272// (the first of those has already been upstreamed)
273//
274// 5.4 kernel support was only added to Android Common Kernel in R,
275// and thus a 5.4 kernel always supports this.
276//
Maciej Żenczykowskibca0c852021-01-19 01:22:17 -0800277// Hence, these mandatory (must load successfully) implementations for 5.4+ kernels:
Maciej Żenczykowski5b00fbd2021-01-19 23:37:51 -0800278DEFINE_BPF_PROG_KVER("schedcls/tether_downstream6_rawip$5_4", AID_ROOT, AID_NETWORK_STACK,
Maciej Żenczykowski770e0a72021-01-18 20:14:03 -0800279 sched_cls_tether_downstream6_rawip_5_4, KVER(5, 4, 0))
Hungming Chen56c632c2020-09-10 15:42:58 +0800280(struct __sk_buff* skb) {
Maciej Żenczykowskibf8ec1a2021-01-24 19:56:39 -0800281 return do_forward6(skb, /* is_ethernet */ false, /* downstream */ true);
Hungming Chen56c632c2020-09-10 15:42:58 +0800282}
283
Maciej Żenczykowski5b00fbd2021-01-19 23:37:51 -0800284DEFINE_BPF_PROG_KVER("schedcls/tether_upstream6_rawip$5_4", AID_ROOT, AID_NETWORK_STACK,
Maciej Żenczykowskibca0c852021-01-19 01:22:17 -0800285 sched_cls_tether_upstream6_rawip_5_4, KVER(5, 4, 0))
286(struct __sk_buff* skb) {
Maciej Żenczykowskibf8ec1a2021-01-24 19:56:39 -0800287 return do_forward6(skb, /* is_ethernet */ false, /* downstream */ false);
Maciej Żenczykowskibca0c852021-01-19 01:22:17 -0800288}
289
290// and these identical optional (may fail to load) implementations for [4.14..5.4) patched kernels:
Maciej Żenczykowski5b00fbd2021-01-19 23:37:51 -0800291DEFINE_OPTIONAL_BPF_PROG_KVER_RANGE("schedcls/tether_downstream6_rawip$4_14",
292 AID_ROOT, AID_NETWORK_STACK,
293 sched_cls_tether_downstream6_rawip_4_14,
294 KVER(4, 14, 0), KVER(5, 4, 0))
Hungming Chen56c632c2020-09-10 15:42:58 +0800295(struct __sk_buff* skb) {
Maciej Żenczykowskibf8ec1a2021-01-24 19:56:39 -0800296 return do_forward6(skb, /* is_ethernet */ false, /* downstream */ true);
Hungming Chen56c632c2020-09-10 15:42:58 +0800297}
298
Maciej Żenczykowski5b00fbd2021-01-19 23:37:51 -0800299DEFINE_OPTIONAL_BPF_PROG_KVER_RANGE("schedcls/tether_upstream6_rawip$4_14",
300 AID_ROOT, AID_NETWORK_STACK,
301 sched_cls_tether_upstream6_rawip_4_14,
302 KVER(4, 14, 0), KVER(5, 4, 0))
Maciej Żenczykowskibca0c852021-01-19 01:22:17 -0800303(struct __sk_buff* skb) {
Maciej Żenczykowskibf8ec1a2021-01-24 19:56:39 -0800304 return do_forward6(skb, /* is_ethernet */ false, /* downstream */ false);
Maciej Żenczykowskibca0c852021-01-19 01:22:17 -0800305}
306
307// and define no-op stubs for [4.9,4.14) and unpatched [4.14,5.4) kernels.
Hungming Chen56c632c2020-09-10 15:42:58 +0800308// (if the above real 4.14+ program loaded successfully, then bpfloader will have already pinned
309// it at the same location this one would be pinned at and will thus skip loading this stub)
Maciej Żenczykowski5b00fbd2021-01-19 23:37:51 -0800310DEFINE_BPF_PROG_KVER_RANGE("schedcls/tether_downstream6_rawip$stub", AID_ROOT, AID_NETWORK_STACK,
Maciej Żenczykowski770e0a72021-01-18 20:14:03 -0800311 sched_cls_tether_downstream6_rawip_stub, KVER_NONE, KVER(5, 4, 0))
Hungming Chen56c632c2020-09-10 15:42:58 +0800312(struct __sk_buff* skb) {
313 return TC_ACT_OK;
314}
315
Maciej Żenczykowski5b00fbd2021-01-19 23:37:51 -0800316DEFINE_BPF_PROG_KVER_RANGE("schedcls/tether_upstream6_rawip$stub", AID_ROOT, AID_NETWORK_STACK,
Maciej Żenczykowskibca0c852021-01-19 01:22:17 -0800317 sched_cls_tether_upstream6_rawip_stub, KVER_NONE, KVER(5, 4, 0))
318(struct __sk_buff* skb) {
319 return TC_ACT_OK;
320}
321
Maciej Żenczykowski088fe192021-01-20 13:34:17 -0800322// ----- IPv4 Support -----
323
Maciej Żenczykowski1feb8b42021-01-25 12:01:31 -0800324DEFINE_BPF_MAP_GRW(tether_downstream4_map, HASH, Tether4Key, Tether4Value, 64, AID_NETWORK_STACK)
Maciej Żenczykowski088fe192021-01-20 13:34:17 -0800325
Maciej Żenczykowski1feb8b42021-01-25 12:01:31 -0800326DEFINE_BPF_MAP_GRW(tether_upstream4_map, HASH, Tether4Key, Tether4Value, 64, AID_NETWORK_STACK)
Maciej Żenczykowski088fe192021-01-20 13:34:17 -0800327
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -0800328static inline __always_inline int do_forward4(struct __sk_buff* skb, const bool is_ethernet,
329 const bool downstream) {
330 const int l2_header_size = is_ethernet ? sizeof(struct ethhdr) : 0;
331 void* data = (void*)(long)skb->data;
332 const void* data_end = (void*)(long)skb->data_end;
333 struct ethhdr* eth = is_ethernet ? data : NULL; // used iff is_ethernet
334 struct iphdr* ip = is_ethernet ? (void*)(eth + 1) : data;
335
336 // Require ethernet dst mac address to be our unicast address.
337 if (is_ethernet && (skb->pkt_type != PACKET_HOST)) return TC_ACT_OK;
338
339 // Must be meta-ethernet IPv4 frame
340 if (skb->protocol != htons(ETH_P_IP)) return TC_ACT_OK;
341
342 // Must have (ethernet and) ipv4 header
343 if (data + l2_header_size + sizeof(*ip) > data_end) return TC_ACT_OK;
344
345 // Ethertype - if present - must be IPv4
346 if (is_ethernet && (eth->h_proto != htons(ETH_P_IP))) return TC_ACT_OK;
347
348 // IP version must be 4
349 if (ip->version != 4) return TC_ACT_OK;
350
351 // We cannot handle IP options, just standard 20 byte == 5 dword minimal IPv4 header
352 if (ip->ihl != 5) return TC_ACT_OK;
353
354 // Calculate the IPv4 one's complement checksum of the IPv4 header.
355 __wsum sum4 = 0;
356 for (int i = 0; i < sizeof(*ip) / sizeof(__u16); ++i) {
357 sum4 += ((__u16*)ip)[i];
358 }
359 // Note that sum4 is guaranteed to be non-zero by virtue of ip4->version == 4
360 sum4 = (sum4 & 0xFFFF) + (sum4 >> 16); // collapse u32 into range 1 .. 0x1FFFE
361 sum4 = (sum4 & 0xFFFF) + (sum4 >> 16); // collapse any potential carry into u16
362 // for a correct checksum we should get *a* zero, but sum4 must be positive, ie 0xFFFF
363 if (sum4 != 0xFFFF) return TC_ACT_OK;
364
365 // Minimum IPv4 total length is the size of the header
366 if (ntohs(ip->tot_len) < sizeof(*ip)) return TC_ACT_OK;
367
368 // We are incapable of dealing with IPv4 fragments
369 if (ip->frag_off & ~htons(IP_DF)) return TC_ACT_OK;
370
371 // Cannot decrement during forward if already zero or would be zero,
372 // Let the kernel's stack handle these cases and generate appropriate ICMP errors.
373 if (ip->ttl <= 1) return TC_ACT_OK;
374
375 const bool is_tcp = (ip->protocol == IPPROTO_TCP);
376
377 // We do not support anything besides TCP and UDP
378 if (!is_tcp && (ip->protocol != IPPROTO_UDP)) return TC_ACT_OK;
379
380 struct tcphdr* tcph = is_tcp ? (void*)(ip + 1) : NULL;
381 struct udphdr* udph = is_tcp ? NULL : (void*)(ip + 1);
382
383 if (is_tcp) {
384 // Make sure we can get at the tcp header
385 if (data + l2_header_size + sizeof(*ip) + sizeof(*tcph) > data_end) return TC_ACT_OK;
386
387 // If hardware offload is running and programming flows based on conntrack entries, try not
388 // to interfere with it, so do not offload TCP packets with any one of the SYN/FIN/RST flags
389 if (tcph->syn || tcph->fin || tcph->rst) return TC_ACT_OK;
390 } else { // UDP
391 // Make sure we can get at the udp header
392 if (data + l2_header_size + sizeof(*ip) + sizeof(*udph) > data_end) return TC_ACT_OK;
393 }
394
Maciej Żenczykowski1feb8b42021-01-25 12:01:31 -0800395 Tether4Key k = {
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -0800396 .iif = skb->ifindex,
397 .l4Proto = ip->protocol,
398 .src4.s_addr = ip->saddr,
399 .dst4.s_addr = ip->daddr,
400 .srcPort = is_tcp ? tcph->source : udph->source,
401 .dstPort = is_tcp ? tcph->dest : udph->dest,
402 };
Maciej Żenczykowski1feb8b42021-01-25 12:01:31 -0800403 if (is_ethernet) for (int i = 0; i < ETH_ALEN; ++i) k.dstMac[i] = eth->h_dest[i];
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -0800404
Maciej Żenczykowski1feb8b42021-01-25 12:01:31 -0800405 Tether4Value* v = downstream ? bpf_tether_downstream4_map_lookup_elem(&k)
406 : bpf_tether_upstream4_map_lookup_elem(&k);
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -0800407
408 // If we don't find any offload information then simply let the core stack handle it...
Maciej Żenczykowski1feb8b42021-01-25 12:01:31 -0800409 if (!v) return TC_ACT_OK;
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -0800410
Maciej Żenczykowski1feb8b42021-01-25 12:01:31 -0800411 uint32_t stat_and_limit_k = downstream ? skb->ifindex : v->oif;
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -0800412
413 TetherStatsValue* stat_v = bpf_tether_stats_map_lookup_elem(&stat_and_limit_k);
414
415 // If we don't have anywhere to put stats, then abort...
416 if (!stat_v) return TC_ACT_OK;
417
418 uint64_t* limit_v = bpf_tether_limit_map_lookup_elem(&stat_and_limit_k);
419
420 // If we don't have a limit, then abort...
421 if (!limit_v) return TC_ACT_OK;
422
423 // Required IPv4 minimum mtu is 68, below that not clear what we should do, abort...
Maciej Żenczykowski1feb8b42021-01-25 12:01:31 -0800424 if (v->pmtu < 68) return TC_ACT_OK;
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -0800425
426 // Approximate handling of TCP/IPv4 overhead for incoming LRO/GRO packets: default
427 // outbound path mtu of 1500 is not necessarily correct, but worst case we simply
428 // undercount, which is still better then not accounting for this overhead at all.
429 // Note: this really shouldn't be device/path mtu at all, but rather should be
430 // derived from this particular connection's mss (ie. from gro segment size).
431 // This would require a much newer kernel with newer ebpf accessors.
432 // (This is also blindly assuming 12 bytes of tcp timestamp option in tcp header)
433 uint64_t packets = 1;
434 uint64_t bytes = skb->len;
Maciej Żenczykowski1feb8b42021-01-25 12:01:31 -0800435 if (bytes > v->pmtu) {
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -0800436 const int tcp_overhead = sizeof(struct iphdr) + sizeof(struct tcphdr) + 12;
Maciej Żenczykowski1feb8b42021-01-25 12:01:31 -0800437 const int mss = v->pmtu - tcp_overhead;
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -0800438 const uint64_t payload = bytes - tcp_overhead;
439 packets = (payload + mss - 1) / mss;
440 bytes = tcp_overhead * packets + payload;
441 }
442
443 // Are we past the limit? If so, then abort...
444 // Note: will not overflow since u64 is 936 years even at 5Gbps.
445 // Do not drop here. Offload is just that, whenever we fail to handle
446 // a packet we let the core stack deal with things.
447 // (The core stack needs to handle limits correctly anyway,
448 // since we don't offload all traffic in both directions)
449 if (stat_v->rxBytes + stat_v->txBytes + bytes > *limit_v) return TC_ACT_OK;
450
Maciej Żenczykowskiec5f67d2021-01-25 02:32:01 -0800451
452if (!is_tcp) return TC_ACT_OK; // HACK
453
454 if (!is_ethernet) {
455 // Try to inject an ethernet header, and simply return if we fail.
456 // We do this even if TX interface is RAWIP and thus does not need an ethernet header,
457 // because this is easier and the kernel will strip extraneous ethernet header.
458 if (bpf_skb_change_head(skb, sizeof(struct ethhdr), /*flags*/ 0)) {
459 __sync_fetch_and_add(downstream ? &stat_v->rxErrors : &stat_v->txErrors, 1);
460 return TC_ACT_OK;
461 }
462
463 // bpf_skb_change_head() invalidates all pointers - reload them
464 data = (void*)(long)skb->data;
465 data_end = (void*)(long)skb->data_end;
466 eth = data;
467 ip = (void*)(eth + 1);
468 tcph = is_tcp ? (void*)(ip + 1) : NULL;
469 udph = is_tcp ? NULL : (void*)(ip + 1);
470
471 // I do not believe this can ever happen, but keep the verifier happy...
472 if (data + sizeof(struct ethhdr) + sizeof(*ip) + (is_tcp ? sizeof(*tcph) : sizeof(*udph)) > data_end) {
473 __sync_fetch_and_add(downstream ? &stat_v->rxErrors : &stat_v->txErrors, 1);
474 return TC_ACT_SHOT;
475 }
476 };
477
478 // At this point we always have an ethernet header - which will get stripped by the
479 // kernel during transmit through a rawip interface. ie. 'eth' pointer is valid.
480 // Additionally note that 'is_ethernet' and 'l2_header_size' are no longer correct.
481
482 // Overwrite any mac header with the new one
483 // For a rawip tx interface it will simply be a bunch of zeroes and later stripped.
484 *eth = v->macHeader;
485
486 const int sz4 = sizeof(__be32);
487 const __be32 old_daddr = k.dst4.s_addr;
488 const __be32 old_saddr = k.src4.s_addr;
489 const __be32 new_daddr = v->dst46.s6_addr32[3];
490 const __be32 new_saddr = v->src46.s6_addr32[3];
491
492 bpf_l4_csum_replace(skb, ETH_IP4_TCP_OFFSET(check), old_daddr, new_daddr, sz4 | BPF_F_PSEUDO_HDR);
493 bpf_l3_csum_replace(skb, ETH_IP4_OFFSET(check), old_daddr, new_daddr, sz4);
494 bpf_skb_store_bytes(skb, ETH_IP4_OFFSET(daddr), &new_daddr, sz4, 0);
495
496 bpf_l4_csum_replace(skb, ETH_IP4_TCP_OFFSET(check), old_saddr, new_saddr, sz4 | BPF_F_PSEUDO_HDR);
497 bpf_l3_csum_replace(skb, ETH_IP4_OFFSET(check), old_saddr, new_saddr, sz4);
498 bpf_skb_store_bytes(skb, ETH_IP4_OFFSET(saddr), &new_saddr, sz4, 0);
499
500 const int sz2 = sizeof(__be16);
501 bpf_l4_csum_replace(skb, ETH_IP4_TCP_OFFSET(check), k.srcPort, v->srcPort, sz2);
502 bpf_skb_store_bytes(skb, ETH_IP4_TCP_OFFSET(source), &v->srcPort, sz2, 0);
503
504 bpf_l4_csum_replace(skb, ETH_IP4_TCP_OFFSET(check), k.dstPort, v->dstPort, sz2);
505 bpf_skb_store_bytes(skb, ETH_IP4_TCP_OFFSET(dest), &v->dstPort, sz2, 0);
506
507// TTL dec
508
509// v->last_used = bpf_ktime_get_boot_ns();
510
511 __sync_fetch_and_add(downstream ? &stat_v->rxPackets : &stat_v->txPackets, packets);
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -0800512 __sync_fetch_and_add(downstream ? &stat_v->rxBytes : &stat_v->txBytes, bytes);
513
Maciej Żenczykowskiec5f67d2021-01-25 02:32:01 -0800514 // Redirect to forwarded interface.
515 //
516 // Note that bpf_redirect() cannot fail unless you pass invalid flags.
517 // The redirect actually happens after the ebpf program has already terminated,
518 // and can fail for example for mtu reasons at that point in time, but there's nothing
519 // we can do about it here.
520 return bpf_redirect(v->oif, 0 /* this is effectively BPF_F_EGRESS */);
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -0800521}
522
523// Real implementations for 5.9+ kernels
524
525DEFINE_BPF_PROG_KVER("schedcls/tether_downstream4_ether$5_9", AID_ROOT, AID_NETWORK_STACK,
526 sched_cls_tether_downstream4_ether_5_9, KVER(5, 9, 0))
527(struct __sk_buff* skb) {
528 return do_forward4(skb, /* is_ethernet */ true, /* downstream */ true);
529}
530
531DEFINE_BPF_PROG_KVER("schedcls/tether_downstream4_rawip$5_9", AID_ROOT, AID_NETWORK_STACK,
532 sched_cls_tether_downstream4_rawip_5_9, KVER(5, 9, 0))
533(struct __sk_buff* skb) {
534 return do_forward4(skb, /* is_ethernet */ false, /* downstream */ true);
535}
536
537DEFINE_BPF_PROG_KVER("schedcls/tether_upstream4_ether$5_9", AID_ROOT, AID_NETWORK_STACK,
538 sched_cls_tether_upstream4_ether_5_9, KVER(5, 9, 0))
539(struct __sk_buff* skb) {
540 return do_forward4(skb, /* is_ethernet */ true, /* downstream */ false);
541}
542
543DEFINE_BPF_PROG_KVER("schedcls/tether_upstream4_rawip$5_9", AID_ROOT, AID_NETWORK_STACK,
544 sched_cls_tether_upstream4_rawip_5_9, KVER(5, 9, 0))
545(struct __sk_buff* skb) {
546 return do_forward4(skb, /* is_ethernet */ false, /* downstream */ false);
547}
548
549// Placeholder implementations for older pre-5.9 kernels
550
551DEFINE_BPF_PROG_KVER_RANGE("schedcls/tether_downstream4_ether$stub", AID_ROOT, AID_NETWORK_STACK,
552 sched_cls_tether_downstream4_ether_stub, KVER_NONE, KVER(5, 9, 0))
Maciej Żenczykowski088fe192021-01-20 13:34:17 -0800553(struct __sk_buff* skb) {
554 return TC_ACT_OK;
555}
556
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -0800557DEFINE_BPF_PROG_KVER_RANGE("schedcls/tether_downstream4_rawip$stub", AID_ROOT, AID_NETWORK_STACK,
558 sched_cls_tether_downstream4_rawip_stub, KVER_NONE, KVER(5, 9, 0))
Maciej Żenczykowski088fe192021-01-20 13:34:17 -0800559(struct __sk_buff* skb) {
560 return TC_ACT_OK;
561}
562
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -0800563DEFINE_BPF_PROG_KVER_RANGE("schedcls/tether_upstream4_ether$stub", AID_ROOT, AID_NETWORK_STACK,
564 sched_cls_tether_upstream4_ether_stub, KVER_NONE, KVER(5, 9, 0))
Maciej Żenczykowski088fe192021-01-20 13:34:17 -0800565(struct __sk_buff* skb) {
566 return TC_ACT_OK;
567}
568
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -0800569DEFINE_BPF_PROG_KVER_RANGE("schedcls/tether_upstream4_rawip$stub", AID_ROOT, AID_NETWORK_STACK,
570 sched_cls_tether_upstream4_rawip_stub, KVER_NONE, KVER(5, 9, 0))
Maciej Żenczykowski088fe192021-01-20 13:34:17 -0800571(struct __sk_buff* skb) {
572 return TC_ACT_OK;
573}
574
Maciej Żenczykowskib1997422021-01-20 14:31:50 -0800575// ----- XDP Support -----
576
577#define DEFINE_XDP_PROG(str, func) \
578 DEFINE_BPF_PROG_KVER(str, AID_ROOT, AID_NETWORK_STACK, func, KVER(5, 9, 0))(struct xdp_md *ctx)
579
580DEFINE_XDP_PROG("xdp/tether_downstream_ether",
581 xdp_tether_downstream_ether) {
582 return XDP_PASS;
583}
584
585DEFINE_XDP_PROG("xdp/tether_downstream_rawip",
586 xdp_tether_downstream_rawip) {
587 return XDP_PASS;
588}
589
590DEFINE_XDP_PROG("xdp/tether_upstream_ether",
591 xdp_tether_upstream_ether) {
592 return XDP_PASS;
593}
594
595DEFINE_XDP_PROG("xdp/tether_upstream_rawip",
596 xdp_tether_upstream_rawip) {
597 return XDP_PASS;
598}
599
Hungming Chen56c632c2020-09-10 15:42:58 +0800600LICENSE("Apache 2.0");
601CRITICAL("netd");