blob: 0b2c08d7ba524ef2cdd878ce89fc3f1c1e1e55af [file] [log] [blame]
Hungming Chen56c632c2020-09-10 15:42:58 +08001/*
2 * Copyright (C) 2020 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <linux/if.h>
18#include <linux/ip.h>
19#include <linux/ipv6.h>
20#include <linux/pkt_cls.h>
21#include <linux/tcp.h>
22
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -080023// bionic kernel uapi linux/udp.h header is munged...
24#define __kernel_udphdr udphdr
25#include <linux/udp.h>
26
Hungming Chen56c632c2020-09-10 15:42:58 +080027#include "bpf_helpers.h"
28#include "bpf_net_helpers.h"
Lorenzo Colittib81584d2021-02-06 00:00:58 +090029#include "bpf_tethering.h"
Hungming Chen56c632c2020-09-10 15:42:58 +080030#include "netdbpf/bpf_shared.h"
31
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -080032// From kernel:include/net/ip.h
33#define IP_DF 0x4000 // Flag: "Don't Fragment"
34
Maciej Żenczykowskiec5f67d2021-01-25 02:32:01 -080035// ----- Helper functions for offsets to fields -----
36
37// They all assume simple IP packets:
38// - no VLAN ethernet tags
39// - no IPv4 options (see IPV4_HLEN/TCP4_OFFSET/UDP4_OFFSET)
40// - no IPv6 extension headers
41// - no TCP options (see TCP_HLEN)
42
43//#define ETH_HLEN sizeof(struct ethhdr)
44#define IP4_HLEN sizeof(struct iphdr)
45#define IP6_HLEN sizeof(struct ipv6hdr)
46#define TCP_HLEN sizeof(struct tcphdr)
47#define UDP_HLEN sizeof(struct udphdr)
48
49// Offsets from beginning of L4 (TCP/UDP) header
50#define TCP_OFFSET(field) offsetof(struct tcphdr, field)
51#define UDP_OFFSET(field) offsetof(struct udphdr, field)
52
53// Offsets from beginning of L3 (IPv4) header
54#define IP4_OFFSET(field) offsetof(struct iphdr, field)
55#define IP4_TCP_OFFSET(field) (IP4_HLEN + TCP_OFFSET(field))
56#define IP4_UDP_OFFSET(field) (IP4_HLEN + UDP_OFFSET(field))
57
58// Offsets from beginning of L3 (IPv6) header
59#define IP6_OFFSET(field) offsetof(struct ipv6hdr, field)
60#define IP6_TCP_OFFSET(field) (IP6_HLEN + TCP_OFFSET(field))
61#define IP6_UDP_OFFSET(field) (IP6_HLEN + UDP_OFFSET(field))
62
63// Offsets from beginning of L2 (ie. Ethernet) header (which must be present)
64#define ETH_IP4_OFFSET(field) (ETH_HLEN + IP4_OFFSET(field))
65#define ETH_IP4_TCP_OFFSET(field) (ETH_HLEN + IP4_TCP_OFFSET(field))
66#define ETH_IP4_UDP_OFFSET(field) (ETH_HLEN + IP4_UDP_OFFSET(field))
67#define ETH_IP6_OFFSET(field) (ETH_HLEN + IP6_OFFSET(field))
68#define ETH_IP6_TCP_OFFSET(field) (ETH_HLEN + IP6_TCP_OFFSET(field))
69#define ETH_IP6_UDP_OFFSET(field) (ETH_HLEN + IP6_UDP_OFFSET(field))
70
71// ----- Tethering stats and data limits -----
72
Hungming Chen56c632c2020-09-10 15:42:58 +080073// Tethering stats, indexed by upstream interface.
Maciej Żenczykowski088fe192021-01-20 13:34:17 -080074DEFINE_BPF_MAP_GRW(tether_stats_map, HASH, TetherStatsKey, TetherStatsValue, 16, AID_NETWORK_STACK)
Hungming Chen56c632c2020-09-10 15:42:58 +080075
76// Tethering data limit, indexed by upstream interface.
77// (tethering allowed when stats[iif].rxBytes + stats[iif].txBytes < limit[iif])
Maciej Żenczykowski088fe192021-01-20 13:34:17 -080078DEFINE_BPF_MAP_GRW(tether_limit_map, HASH, TetherLimitKey, TetherLimitValue, 16, AID_NETWORK_STACK)
79
80// ----- IPv6 Support -----
81
Maciej Żenczykowski7dfbcf52021-01-26 16:08:57 -080082DEFINE_BPF_MAP_GRW(tether_downstream6_map, HASH, TetherDownstream6Key, Tether6Value, 64,
Maciej Żenczykowski088fe192021-01-20 13:34:17 -080083 AID_NETWORK_STACK)
84
85DEFINE_BPF_MAP_GRW(tether_downstream64_map, HASH, TetherDownstream64Key, TetherDownstream64Value,
86 64, AID_NETWORK_STACK)
87
Maciej Żenczykowski7dfbcf52021-01-26 16:08:57 -080088DEFINE_BPF_MAP_GRW(tether_upstream6_map, HASH, TetherUpstream6Key, Tether6Value, 64,
Maciej Żenczykowski088fe192021-01-20 13:34:17 -080089 AID_NETWORK_STACK)
Hungming Chen56c632c2020-09-10 15:42:58 +080090
Lorenzo Colittib81584d2021-02-06 00:00:58 +090091DEFINE_BPF_MAP_GRW(tether_error_map, ARRAY, __u32, __u32, BPF_TETHER_ERR__MAX,
92 AID_NETWORK_STACK)
93
Lorenzo Colitti72ec3ba2021-02-09 11:47:46 +090094#define COUNT_AND_RETURN(counter, ret) do { \
95 __u32 code = BPF_TETHER_ERR_ ## counter; \
96 __u32 *count = bpf_tether_error_map_lookup_elem(&code); \
97 if (count) __sync_fetch_and_add(count, 1); \
98 return ret; \
Lorenzo Colittib81584d2021-02-06 00:00:58 +090099} while(0)
100
Lorenzo Colitti72ec3ba2021-02-09 11:47:46 +0900101#define DROP(counter) COUNT_AND_RETURN(counter, TC_ACT_SHOT)
102#define PUNT(counter) COUNT_AND_RETURN(counter, TC_ACT_OK)
103
Maciej Żenczykowskibf8ec1a2021-01-24 19:56:39 -0800104static inline __always_inline int do_forward6(struct __sk_buff* skb, const bool is_ethernet,
Maciej Żenczykowskibca0c852021-01-19 01:22:17 -0800105 const bool downstream) {
106 const int l2_header_size = is_ethernet ? sizeof(struct ethhdr) : 0;
Hungming Chen56c632c2020-09-10 15:42:58 +0800107 void* data = (void*)(long)skb->data;
108 const void* data_end = (void*)(long)skb->data_end;
109 struct ethhdr* eth = is_ethernet ? data : NULL; // used iff is_ethernet
110 struct ipv6hdr* ip6 = is_ethernet ? (void*)(eth + 1) : data;
111
Maciej Żenczykowski18552e82021-01-24 19:59:05 -0800112 // Require ethernet dst mac address to be our unicast address.
113 if (is_ethernet && (skb->pkt_type != PACKET_HOST)) return TC_ACT_OK;
114
Hungming Chen56c632c2020-09-10 15:42:58 +0800115 // Must be meta-ethernet IPv6 frame
116 if (skb->protocol != htons(ETH_P_IPV6)) return TC_ACT_OK;
117
118 // Must have (ethernet and) ipv6 header
119 if (data + l2_header_size + sizeof(*ip6) > data_end) return TC_ACT_OK;
120
121 // Ethertype - if present - must be IPv6
122 if (is_ethernet && (eth->h_proto != htons(ETH_P_IPV6))) return TC_ACT_OK;
123
124 // IP version must be 6
Lorenzo Colitti72ec3ba2021-02-09 11:47:46 +0900125 if (ip6->version != 6) PUNT(INVALID_IP_VERSION);
Hungming Chen56c632c2020-09-10 15:42:58 +0800126
127 // Cannot decrement during forward if already zero or would be zero,
128 // Let the kernel's stack handle these cases and generate appropriate ICMP errors.
Lorenzo Colitti72ec3ba2021-02-09 11:47:46 +0900129 if (ip6->hop_limit <= 1) PUNT(LOW_TTL);
Hungming Chen56c632c2020-09-10 15:42:58 +0800130
Maciej Żenczykowskifc4f6542021-01-22 22:19:45 -0800131 // If hardware offload is running and programming flows based on conntrack entries,
132 // try not to interfere with it.
133 if (ip6->nexthdr == IPPROTO_TCP) {
134 struct tcphdr* tcph = (void*)(ip6 + 1);
135
136 // Make sure we can get at the tcp header
Lorenzo Colittib81584d2021-02-06 00:00:58 +0900137 if (data + l2_header_size + sizeof(*ip6) + sizeof(*tcph) > data_end)
Lorenzo Colitti72ec3ba2021-02-09 11:47:46 +0900138 PUNT(INVALID_TCP_HEADER);
Maciej Żenczykowskifc4f6542021-01-22 22:19:45 -0800139
140 // Do not offload TCP packets with any one of the SYN/FIN/RST flags
Lorenzo Colitti72ec3ba2021-02-09 11:47:46 +0900141 if (tcph->syn || tcph->fin || tcph->rst) PUNT(TCP_CONTROL_PACKET);
Maciej Żenczykowskifc4f6542021-01-22 22:19:45 -0800142 }
143
Hungming Chen56c632c2020-09-10 15:42:58 +0800144 // Protect against forwarding packets sourced from ::1 or fe80::/64 or other weirdness.
145 __be32 src32 = ip6->saddr.s6_addr32[0];
146 if (src32 != htonl(0x0064ff9b) && // 64:ff9b:/32 incl. XLAT464 WKP
147 (src32 & htonl(0xe0000000)) != htonl(0x20000000)) // 2000::/3 Global Unicast
Lorenzo Colitti72ec3ba2021-02-09 11:47:46 +0900148 PUNT(NON_GLOBAL_SRC);
Hungming Chen56c632c2020-09-10 15:42:58 +0800149
Maciej Żenczykowskibca0c852021-01-19 01:22:17 -0800150 // Protect against forwarding packets destined to ::1 or fe80::/64 or other weirdness.
151 __be32 dst32 = ip6->daddr.s6_addr32[0];
152 if (dst32 != htonl(0x0064ff9b) && // 64:ff9b:/32 incl. XLAT464 WKP
153 (dst32 & htonl(0xe0000000)) != htonl(0x20000000)) // 2000::/3 Global Unicast
Lorenzo Colitti72ec3ba2021-02-09 11:47:46 +0900154 PUNT(NON_GLOBAL_DST);
Maciej Żenczykowskibca0c852021-01-19 01:22:17 -0800155
156 // In the upstream direction do not forward traffic within the same /64 subnet.
157 if (!downstream && (src32 == dst32) && (ip6->saddr.s6_addr32[1] == ip6->daddr.s6_addr32[1]))
Lorenzo Colitti72ec3ba2021-02-09 11:47:46 +0900158 PUNT(LOCAL_SRC_DST);
Maciej Żenczykowskibca0c852021-01-19 01:22:17 -0800159
160 TetherDownstream6Key kd = {
Hungming Chen56c632c2020-09-10 15:42:58 +0800161 .iif = skb->ifindex,
162 .neigh6 = ip6->daddr,
163 };
164
Maciej Żenczykowskibca0c852021-01-19 01:22:17 -0800165 TetherUpstream6Key ku = {
166 .iif = skb->ifindex,
167 };
168
Maciej Żenczykowski7dfbcf52021-01-26 16:08:57 -0800169 Tether6Value* v = downstream ? bpf_tether_downstream6_map_lookup_elem(&kd)
170 : bpf_tether_upstream6_map_lookup_elem(&ku);
Hungming Chen56c632c2020-09-10 15:42:58 +0800171
172 // If we don't find any offload information then simply let the core stack handle it...
Maciej Żenczykowski7dfbcf52021-01-26 16:08:57 -0800173 if (!v) return TC_ACT_OK;
Hungming Chen56c632c2020-09-10 15:42:58 +0800174
Maciej Żenczykowski7dfbcf52021-01-26 16:08:57 -0800175 uint32_t stat_and_limit_k = downstream ? skb->ifindex : v->oif;
Hungming Chen56c632c2020-09-10 15:42:58 +0800176
177 TetherStatsValue* stat_v = bpf_tether_stats_map_lookup_elem(&stat_and_limit_k);
178
179 // If we don't have anywhere to put stats, then abort...
Lorenzo Colitti72ec3ba2021-02-09 11:47:46 +0900180 if (!stat_v) PUNT(NO_STATS_ENTRY);
Hungming Chen56c632c2020-09-10 15:42:58 +0800181
182 uint64_t* limit_v = bpf_tether_limit_map_lookup_elem(&stat_and_limit_k);
183
184 // If we don't have a limit, then abort...
Lorenzo Colitti72ec3ba2021-02-09 11:47:46 +0900185 if (!limit_v) PUNT(NO_LIMIT_ENTRY);
Hungming Chen56c632c2020-09-10 15:42:58 +0800186
187 // Required IPv6 minimum mtu is 1280, below that not clear what we should do, abort...
Lorenzo Colitti72ec3ba2021-02-09 11:47:46 +0900188 if (v->pmtu < IPV6_MIN_MTU) PUNT(BELOW_IPV6_MTU);
Hungming Chen56c632c2020-09-10 15:42:58 +0800189
190 // Approximate handling of TCP/IPv6 overhead for incoming LRO/GRO packets: default
191 // outbound path mtu of 1500 is not necessarily correct, but worst case we simply
192 // undercount, which is still better then not accounting for this overhead at all.
193 // Note: this really shouldn't be device/path mtu at all, but rather should be
194 // derived from this particular connection's mss (ie. from gro segment size).
195 // This would require a much newer kernel with newer ebpf accessors.
196 // (This is also blindly assuming 12 bytes of tcp timestamp option in tcp header)
197 uint64_t packets = 1;
198 uint64_t bytes = skb->len;
Maciej Żenczykowski7dfbcf52021-01-26 16:08:57 -0800199 if (bytes > v->pmtu) {
Hungming Chen56c632c2020-09-10 15:42:58 +0800200 const int tcp_overhead = sizeof(struct ipv6hdr) + sizeof(struct tcphdr) + 12;
Maciej Żenczykowski7dfbcf52021-01-26 16:08:57 -0800201 const int mss = v->pmtu - tcp_overhead;
Hungming Chen56c632c2020-09-10 15:42:58 +0800202 const uint64_t payload = bytes - tcp_overhead;
203 packets = (payload + mss - 1) / mss;
204 bytes = tcp_overhead * packets + payload;
205 }
206
207 // Are we past the limit? If so, then abort...
208 // Note: will not overflow since u64 is 936 years even at 5Gbps.
209 // Do not drop here. Offload is just that, whenever we fail to handle
210 // a packet we let the core stack deal with things.
211 // (The core stack needs to handle limits correctly anyway,
212 // since we don't offload all traffic in both directions)
Lorenzo Colitti72ec3ba2021-02-09 11:47:46 +0900213 if (stat_v->rxBytes + stat_v->txBytes + bytes > *limit_v) PUNT(LIMIT_REACHED);
Hungming Chen56c632c2020-09-10 15:42:58 +0800214
215 if (!is_ethernet) {
Maciej Żenczykowskibca0c852021-01-19 01:22:17 -0800216 // Try to inject an ethernet header, and simply return if we fail.
217 // We do this even if TX interface is RAWIP and thus does not need an ethernet header,
218 // because this is easier and the kernel will strip extraneous ethernet header.
219 if (bpf_skb_change_head(skb, sizeof(struct ethhdr), /*flags*/ 0)) {
220 __sync_fetch_and_add(downstream ? &stat_v->rxErrors : &stat_v->txErrors, 1);
Lorenzo Colitti72ec3ba2021-02-09 11:47:46 +0900221 PUNT(CHANGE_HEAD_FAILED);
Hungming Chen56c632c2020-09-10 15:42:58 +0800222 }
223
224 // bpf_skb_change_head() invalidates all pointers - reload them
225 data = (void*)(long)skb->data;
226 data_end = (void*)(long)skb->data_end;
227 eth = data;
228 ip6 = (void*)(eth + 1);
229
230 // I do not believe this can ever happen, but keep the verifier happy...
Maciej Żenczykowskibca0c852021-01-19 01:22:17 -0800231 if (data + sizeof(struct ethhdr) + sizeof(*ip6) > data_end) {
232 __sync_fetch_and_add(downstream ? &stat_v->rxErrors : &stat_v->txErrors, 1);
Lorenzo Colitti72ec3ba2021-02-09 11:47:46 +0900233 DROP(TOO_SHORT);
Hungming Chen56c632c2020-09-10 15:42:58 +0800234 }
235 };
236
Maciej Żenczykowskibca0c852021-01-19 01:22:17 -0800237 // At this point we always have an ethernet header - which will get stripped by the
238 // kernel during transmit through a rawip interface. ie. 'eth' pointer is valid.
239 // Additionally note that 'is_ethernet' and 'l2_header_size' are no longer correct.
240
Hungming Chen56c632c2020-09-10 15:42:58 +0800241 // CHECKSUM_COMPLETE is a 16-bit one's complement sum,
242 // thus corrections for it need to be done in 16-byte chunks at even offsets.
243 // IPv6 nexthdr is at offset 6, while hop limit is at offset 7
244 uint8_t old_hl = ip6->hop_limit;
245 --ip6->hop_limit;
246 uint8_t new_hl = ip6->hop_limit;
247
248 // bpf_csum_update() always succeeds if the skb is CHECKSUM_COMPLETE and returns an error
249 // (-ENOTSUPP) if it isn't.
250 bpf_csum_update(skb, 0xFFFF - ntohs(old_hl) + ntohs(new_hl));
251
Maciej Żenczykowskibca0c852021-01-19 01:22:17 -0800252 __sync_fetch_and_add(downstream ? &stat_v->rxPackets : &stat_v->txPackets, packets);
253 __sync_fetch_and_add(downstream ? &stat_v->rxBytes : &stat_v->txBytes, bytes);
Hungming Chen56c632c2020-09-10 15:42:58 +0800254
255 // Overwrite any mac header with the new one
Maciej Żenczykowskibca0c852021-01-19 01:22:17 -0800256 // For a rawip tx interface it will simply be a bunch of zeroes and later stripped.
Maciej Żenczykowski7dfbcf52021-01-26 16:08:57 -0800257 *eth = v->macHeader;
Hungming Chen56c632c2020-09-10 15:42:58 +0800258
259 // Redirect to forwarded interface.
260 //
261 // Note that bpf_redirect() cannot fail unless you pass invalid flags.
262 // The redirect actually happens after the ebpf program has already terminated,
263 // and can fail for example for mtu reasons at that point in time, but there's nothing
264 // we can do about it here.
Maciej Żenczykowski7dfbcf52021-01-26 16:08:57 -0800265 return bpf_redirect(v->oif, 0 /* this is effectively BPF_F_EGRESS */);
Hungming Chen56c632c2020-09-10 15:42:58 +0800266}
267
Maciej Żenczykowski5b00fbd2021-01-19 23:37:51 -0800268DEFINE_BPF_PROG("schedcls/tether_downstream6_ether", AID_ROOT, AID_NETWORK_STACK,
Maciej Żenczykowski770e0a72021-01-18 20:14:03 -0800269 sched_cls_tether_downstream6_ether)
Maciej Żenczykowski6b7829f2021-01-18 00:03:37 -0800270(struct __sk_buff* skb) {
Maciej Żenczykowskibf8ec1a2021-01-24 19:56:39 -0800271 return do_forward6(skb, /* is_ethernet */ true, /* downstream */ true);
Maciej Żenczykowskibca0c852021-01-19 01:22:17 -0800272}
273
Maciej Żenczykowski5b00fbd2021-01-19 23:37:51 -0800274DEFINE_BPF_PROG("schedcls/tether_upstream6_ether", AID_ROOT, AID_NETWORK_STACK,
Maciej Żenczykowskibca0c852021-01-19 01:22:17 -0800275 sched_cls_tether_upstream6_ether)
276(struct __sk_buff* skb) {
Maciej Żenczykowskibf8ec1a2021-01-24 19:56:39 -0800277 return do_forward6(skb, /* is_ethernet */ true, /* downstream */ false);
Hungming Chen56c632c2020-09-10 15:42:58 +0800278}
279
280// Note: section names must be unique to prevent programs from appending to each other,
281// so instead the bpf loader will strip everything past the final $ symbol when actually
282// pinning the program into the filesystem.
283//
284// bpf_skb_change_head() is only present on 4.14+ and 2 trivial kernel patches are needed:
285// ANDROID: net: bpf: Allow TC programs to call BPF_FUNC_skb_change_head
286// ANDROID: net: bpf: permit redirect from ingress L3 to egress L2 devices at near max mtu
287// (the first of those has already been upstreamed)
288//
289// 5.4 kernel support was only added to Android Common Kernel in R,
290// and thus a 5.4 kernel always supports this.
291//
Maciej Żenczykowskibca0c852021-01-19 01:22:17 -0800292// Hence, these mandatory (must load successfully) implementations for 5.4+ kernels:
Maciej Żenczykowski5b00fbd2021-01-19 23:37:51 -0800293DEFINE_BPF_PROG_KVER("schedcls/tether_downstream6_rawip$5_4", AID_ROOT, AID_NETWORK_STACK,
Maciej Żenczykowski770e0a72021-01-18 20:14:03 -0800294 sched_cls_tether_downstream6_rawip_5_4, KVER(5, 4, 0))
Hungming Chen56c632c2020-09-10 15:42:58 +0800295(struct __sk_buff* skb) {
Maciej Żenczykowskibf8ec1a2021-01-24 19:56:39 -0800296 return do_forward6(skb, /* is_ethernet */ false, /* downstream */ true);
Hungming Chen56c632c2020-09-10 15:42:58 +0800297}
298
Maciej Żenczykowski5b00fbd2021-01-19 23:37:51 -0800299DEFINE_BPF_PROG_KVER("schedcls/tether_upstream6_rawip$5_4", AID_ROOT, AID_NETWORK_STACK,
Maciej Żenczykowskibca0c852021-01-19 01:22:17 -0800300 sched_cls_tether_upstream6_rawip_5_4, KVER(5, 4, 0))
301(struct __sk_buff* skb) {
Maciej Żenczykowskibf8ec1a2021-01-24 19:56:39 -0800302 return do_forward6(skb, /* is_ethernet */ false, /* downstream */ false);
Maciej Żenczykowskibca0c852021-01-19 01:22:17 -0800303}
304
305// and these identical optional (may fail to load) implementations for [4.14..5.4) patched kernels:
Maciej Żenczykowski5b00fbd2021-01-19 23:37:51 -0800306DEFINE_OPTIONAL_BPF_PROG_KVER_RANGE("schedcls/tether_downstream6_rawip$4_14",
307 AID_ROOT, AID_NETWORK_STACK,
308 sched_cls_tether_downstream6_rawip_4_14,
309 KVER(4, 14, 0), KVER(5, 4, 0))
Hungming Chen56c632c2020-09-10 15:42:58 +0800310(struct __sk_buff* skb) {
Maciej Żenczykowskibf8ec1a2021-01-24 19:56:39 -0800311 return do_forward6(skb, /* is_ethernet */ false, /* downstream */ true);
Hungming Chen56c632c2020-09-10 15:42:58 +0800312}
313
Maciej Żenczykowski5b00fbd2021-01-19 23:37:51 -0800314DEFINE_OPTIONAL_BPF_PROG_KVER_RANGE("schedcls/tether_upstream6_rawip$4_14",
315 AID_ROOT, AID_NETWORK_STACK,
316 sched_cls_tether_upstream6_rawip_4_14,
317 KVER(4, 14, 0), KVER(5, 4, 0))
Maciej Żenczykowskibca0c852021-01-19 01:22:17 -0800318(struct __sk_buff* skb) {
Maciej Żenczykowskibf8ec1a2021-01-24 19:56:39 -0800319 return do_forward6(skb, /* is_ethernet */ false, /* downstream */ false);
Maciej Żenczykowskibca0c852021-01-19 01:22:17 -0800320}
321
322// and define no-op stubs for [4.9,4.14) and unpatched [4.14,5.4) kernels.
Hungming Chen56c632c2020-09-10 15:42:58 +0800323// (if the above real 4.14+ program loaded successfully, then bpfloader will have already pinned
324// it at the same location this one would be pinned at and will thus skip loading this stub)
Maciej Żenczykowski5b00fbd2021-01-19 23:37:51 -0800325DEFINE_BPF_PROG_KVER_RANGE("schedcls/tether_downstream6_rawip$stub", AID_ROOT, AID_NETWORK_STACK,
Maciej Żenczykowski770e0a72021-01-18 20:14:03 -0800326 sched_cls_tether_downstream6_rawip_stub, KVER_NONE, KVER(5, 4, 0))
Hungming Chen56c632c2020-09-10 15:42:58 +0800327(struct __sk_buff* skb) {
328 return TC_ACT_OK;
329}
330
Maciej Żenczykowski5b00fbd2021-01-19 23:37:51 -0800331DEFINE_BPF_PROG_KVER_RANGE("schedcls/tether_upstream6_rawip$stub", AID_ROOT, AID_NETWORK_STACK,
Maciej Żenczykowskibca0c852021-01-19 01:22:17 -0800332 sched_cls_tether_upstream6_rawip_stub, KVER_NONE, KVER(5, 4, 0))
333(struct __sk_buff* skb) {
334 return TC_ACT_OK;
335}
336
Maciej Żenczykowski088fe192021-01-20 13:34:17 -0800337// ----- IPv4 Support -----
338
Maciej Żenczykowski1feb8b42021-01-25 12:01:31 -0800339DEFINE_BPF_MAP_GRW(tether_downstream4_map, HASH, Tether4Key, Tether4Value, 64, AID_NETWORK_STACK)
Maciej Żenczykowski088fe192021-01-20 13:34:17 -0800340
Maciej Żenczykowski1feb8b42021-01-25 12:01:31 -0800341DEFINE_BPF_MAP_GRW(tether_upstream4_map, HASH, Tether4Key, Tether4Value, 64, AID_NETWORK_STACK)
Maciej Żenczykowski088fe192021-01-20 13:34:17 -0800342
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -0800343static inline __always_inline int do_forward4(struct __sk_buff* skb, const bool is_ethernet,
344 const bool downstream) {
345 const int l2_header_size = is_ethernet ? sizeof(struct ethhdr) : 0;
346 void* data = (void*)(long)skb->data;
347 const void* data_end = (void*)(long)skb->data_end;
348 struct ethhdr* eth = is_ethernet ? data : NULL; // used iff is_ethernet
349 struct iphdr* ip = is_ethernet ? (void*)(eth + 1) : data;
350
351 // Require ethernet dst mac address to be our unicast address.
352 if (is_ethernet && (skb->pkt_type != PACKET_HOST)) return TC_ACT_OK;
353
354 // Must be meta-ethernet IPv4 frame
355 if (skb->protocol != htons(ETH_P_IP)) return TC_ACT_OK;
356
357 // Must have (ethernet and) ipv4 header
358 if (data + l2_header_size + sizeof(*ip) > data_end) return TC_ACT_OK;
359
360 // Ethertype - if present - must be IPv4
361 if (is_ethernet && (eth->h_proto != htons(ETH_P_IP))) return TC_ACT_OK;
362
363 // IP version must be 4
364 if (ip->version != 4) return TC_ACT_OK;
365
366 // We cannot handle IP options, just standard 20 byte == 5 dword minimal IPv4 header
367 if (ip->ihl != 5) return TC_ACT_OK;
368
369 // Calculate the IPv4 one's complement checksum of the IPv4 header.
370 __wsum sum4 = 0;
371 for (int i = 0; i < sizeof(*ip) / sizeof(__u16); ++i) {
372 sum4 += ((__u16*)ip)[i];
373 }
374 // Note that sum4 is guaranteed to be non-zero by virtue of ip4->version == 4
375 sum4 = (sum4 & 0xFFFF) + (sum4 >> 16); // collapse u32 into range 1 .. 0x1FFFE
376 sum4 = (sum4 & 0xFFFF) + (sum4 >> 16); // collapse any potential carry into u16
377 // for a correct checksum we should get *a* zero, but sum4 must be positive, ie 0xFFFF
378 if (sum4 != 0xFFFF) return TC_ACT_OK;
379
380 // Minimum IPv4 total length is the size of the header
381 if (ntohs(ip->tot_len) < sizeof(*ip)) return TC_ACT_OK;
382
383 // We are incapable of dealing with IPv4 fragments
384 if (ip->frag_off & ~htons(IP_DF)) return TC_ACT_OK;
385
386 // Cannot decrement during forward if already zero or would be zero,
387 // Let the kernel's stack handle these cases and generate appropriate ICMP errors.
388 if (ip->ttl <= 1) return TC_ACT_OK;
389
390 const bool is_tcp = (ip->protocol == IPPROTO_TCP);
391
392 // We do not support anything besides TCP and UDP
393 if (!is_tcp && (ip->protocol != IPPROTO_UDP)) return TC_ACT_OK;
394
395 struct tcphdr* tcph = is_tcp ? (void*)(ip + 1) : NULL;
396 struct udphdr* udph = is_tcp ? NULL : (void*)(ip + 1);
397
398 if (is_tcp) {
399 // Make sure we can get at the tcp header
400 if (data + l2_header_size + sizeof(*ip) + sizeof(*tcph) > data_end) return TC_ACT_OK;
401
402 // If hardware offload is running and programming flows based on conntrack entries, try not
403 // to interfere with it, so do not offload TCP packets with any one of the SYN/FIN/RST flags
404 if (tcph->syn || tcph->fin || tcph->rst) return TC_ACT_OK;
405 } else { // UDP
406 // Make sure we can get at the udp header
407 if (data + l2_header_size + sizeof(*ip) + sizeof(*udph) > data_end) return TC_ACT_OK;
408 }
409
Maciej Żenczykowski1feb8b42021-01-25 12:01:31 -0800410 Tether4Key k = {
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -0800411 .iif = skb->ifindex,
412 .l4Proto = ip->protocol,
413 .src4.s_addr = ip->saddr,
414 .dst4.s_addr = ip->daddr,
415 .srcPort = is_tcp ? tcph->source : udph->source,
416 .dstPort = is_tcp ? tcph->dest : udph->dest,
417 };
Maciej Żenczykowski1feb8b42021-01-25 12:01:31 -0800418 if (is_ethernet) for (int i = 0; i < ETH_ALEN; ++i) k.dstMac[i] = eth->h_dest[i];
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -0800419
Maciej Żenczykowski1feb8b42021-01-25 12:01:31 -0800420 Tether4Value* v = downstream ? bpf_tether_downstream4_map_lookup_elem(&k)
421 : bpf_tether_upstream4_map_lookup_elem(&k);
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -0800422
423 // If we don't find any offload information then simply let the core stack handle it...
Maciej Żenczykowski1feb8b42021-01-25 12:01:31 -0800424 if (!v) return TC_ACT_OK;
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -0800425
Maciej Żenczykowski1feb8b42021-01-25 12:01:31 -0800426 uint32_t stat_and_limit_k = downstream ? skb->ifindex : v->oif;
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -0800427
428 TetherStatsValue* stat_v = bpf_tether_stats_map_lookup_elem(&stat_and_limit_k);
429
430 // If we don't have anywhere to put stats, then abort...
431 if (!stat_v) return TC_ACT_OK;
432
433 uint64_t* limit_v = bpf_tether_limit_map_lookup_elem(&stat_and_limit_k);
434
435 // If we don't have a limit, then abort...
436 if (!limit_v) return TC_ACT_OK;
437
438 // Required IPv4 minimum mtu is 68, below that not clear what we should do, abort...
Maciej Żenczykowski1feb8b42021-01-25 12:01:31 -0800439 if (v->pmtu < 68) return TC_ACT_OK;
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -0800440
441 // Approximate handling of TCP/IPv4 overhead for incoming LRO/GRO packets: default
442 // outbound path mtu of 1500 is not necessarily correct, but worst case we simply
443 // undercount, which is still better then not accounting for this overhead at all.
444 // Note: this really shouldn't be device/path mtu at all, but rather should be
445 // derived from this particular connection's mss (ie. from gro segment size).
446 // This would require a much newer kernel with newer ebpf accessors.
447 // (This is also blindly assuming 12 bytes of tcp timestamp option in tcp header)
448 uint64_t packets = 1;
449 uint64_t bytes = skb->len;
Maciej Żenczykowski1feb8b42021-01-25 12:01:31 -0800450 if (bytes > v->pmtu) {
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -0800451 const int tcp_overhead = sizeof(struct iphdr) + sizeof(struct tcphdr) + 12;
Maciej Żenczykowski1feb8b42021-01-25 12:01:31 -0800452 const int mss = v->pmtu - tcp_overhead;
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -0800453 const uint64_t payload = bytes - tcp_overhead;
454 packets = (payload + mss - 1) / mss;
455 bytes = tcp_overhead * packets + payload;
456 }
457
458 // Are we past the limit? If so, then abort...
459 // Note: will not overflow since u64 is 936 years even at 5Gbps.
460 // Do not drop here. Offload is just that, whenever we fail to handle
461 // a packet we let the core stack deal with things.
462 // (The core stack needs to handle limits correctly anyway,
463 // since we don't offload all traffic in both directions)
464 if (stat_v->rxBytes + stat_v->txBytes + bytes > *limit_v) return TC_ACT_OK;
465
Maciej Żenczykowskiec5f67d2021-01-25 02:32:01 -0800466
467if (!is_tcp) return TC_ACT_OK; // HACK
468
469 if (!is_ethernet) {
470 // Try to inject an ethernet header, and simply return if we fail.
471 // We do this even if TX interface is RAWIP and thus does not need an ethernet header,
472 // because this is easier and the kernel will strip extraneous ethernet header.
473 if (bpf_skb_change_head(skb, sizeof(struct ethhdr), /*flags*/ 0)) {
474 __sync_fetch_and_add(downstream ? &stat_v->rxErrors : &stat_v->txErrors, 1);
475 return TC_ACT_OK;
476 }
477
478 // bpf_skb_change_head() invalidates all pointers - reload them
479 data = (void*)(long)skb->data;
480 data_end = (void*)(long)skb->data_end;
481 eth = data;
482 ip = (void*)(eth + 1);
483 tcph = is_tcp ? (void*)(ip + 1) : NULL;
484 udph = is_tcp ? NULL : (void*)(ip + 1);
485
486 // I do not believe this can ever happen, but keep the verifier happy...
487 if (data + sizeof(struct ethhdr) + sizeof(*ip) + (is_tcp ? sizeof(*tcph) : sizeof(*udph)) > data_end) {
488 __sync_fetch_and_add(downstream ? &stat_v->rxErrors : &stat_v->txErrors, 1);
489 return TC_ACT_SHOT;
490 }
491 };
492
493 // At this point we always have an ethernet header - which will get stripped by the
494 // kernel during transmit through a rawip interface. ie. 'eth' pointer is valid.
495 // Additionally note that 'is_ethernet' and 'l2_header_size' are no longer correct.
496
497 // Overwrite any mac header with the new one
498 // For a rawip tx interface it will simply be a bunch of zeroes and later stripped.
499 *eth = v->macHeader;
500
501 const int sz4 = sizeof(__be32);
502 const __be32 old_daddr = k.dst4.s_addr;
503 const __be32 old_saddr = k.src4.s_addr;
504 const __be32 new_daddr = v->dst46.s6_addr32[3];
505 const __be32 new_saddr = v->src46.s6_addr32[3];
506
507 bpf_l4_csum_replace(skb, ETH_IP4_TCP_OFFSET(check), old_daddr, new_daddr, sz4 | BPF_F_PSEUDO_HDR);
508 bpf_l3_csum_replace(skb, ETH_IP4_OFFSET(check), old_daddr, new_daddr, sz4);
509 bpf_skb_store_bytes(skb, ETH_IP4_OFFSET(daddr), &new_daddr, sz4, 0);
510
511 bpf_l4_csum_replace(skb, ETH_IP4_TCP_OFFSET(check), old_saddr, new_saddr, sz4 | BPF_F_PSEUDO_HDR);
512 bpf_l3_csum_replace(skb, ETH_IP4_OFFSET(check), old_saddr, new_saddr, sz4);
513 bpf_skb_store_bytes(skb, ETH_IP4_OFFSET(saddr), &new_saddr, sz4, 0);
514
515 const int sz2 = sizeof(__be16);
516 bpf_l4_csum_replace(skb, ETH_IP4_TCP_OFFSET(check), k.srcPort, v->srcPort, sz2);
517 bpf_skb_store_bytes(skb, ETH_IP4_TCP_OFFSET(source), &v->srcPort, sz2, 0);
518
519 bpf_l4_csum_replace(skb, ETH_IP4_TCP_OFFSET(check), k.dstPort, v->dstPort, sz2);
520 bpf_skb_store_bytes(skb, ETH_IP4_TCP_OFFSET(dest), &v->dstPort, sz2, 0);
521
522// TTL dec
523
524// v->last_used = bpf_ktime_get_boot_ns();
525
526 __sync_fetch_and_add(downstream ? &stat_v->rxPackets : &stat_v->txPackets, packets);
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -0800527 __sync_fetch_and_add(downstream ? &stat_v->rxBytes : &stat_v->txBytes, bytes);
528
Maciej Żenczykowskiec5f67d2021-01-25 02:32:01 -0800529 // Redirect to forwarded interface.
530 //
531 // Note that bpf_redirect() cannot fail unless you pass invalid flags.
532 // The redirect actually happens after the ebpf program has already terminated,
533 // and can fail for example for mtu reasons at that point in time, but there's nothing
534 // we can do about it here.
535 return bpf_redirect(v->oif, 0 /* this is effectively BPF_F_EGRESS */);
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -0800536}
537
538// Real implementations for 5.9+ kernels
539
540DEFINE_BPF_PROG_KVER("schedcls/tether_downstream4_ether$5_9", AID_ROOT, AID_NETWORK_STACK,
541 sched_cls_tether_downstream4_ether_5_9, KVER(5, 9, 0))
542(struct __sk_buff* skb) {
543 return do_forward4(skb, /* is_ethernet */ true, /* downstream */ true);
544}
545
546DEFINE_BPF_PROG_KVER("schedcls/tether_downstream4_rawip$5_9", AID_ROOT, AID_NETWORK_STACK,
547 sched_cls_tether_downstream4_rawip_5_9, KVER(5, 9, 0))
548(struct __sk_buff* skb) {
549 return do_forward4(skb, /* is_ethernet */ false, /* downstream */ true);
550}
551
552DEFINE_BPF_PROG_KVER("schedcls/tether_upstream4_ether$5_9", AID_ROOT, AID_NETWORK_STACK,
553 sched_cls_tether_upstream4_ether_5_9, KVER(5, 9, 0))
554(struct __sk_buff* skb) {
555 return do_forward4(skb, /* is_ethernet */ true, /* downstream */ false);
556}
557
558DEFINE_BPF_PROG_KVER("schedcls/tether_upstream4_rawip$5_9", AID_ROOT, AID_NETWORK_STACK,
559 sched_cls_tether_upstream4_rawip_5_9, KVER(5, 9, 0))
560(struct __sk_buff* skb) {
561 return do_forward4(skb, /* is_ethernet */ false, /* downstream */ false);
562}
563
564// Placeholder implementations for older pre-5.9 kernels
565
566DEFINE_BPF_PROG_KVER_RANGE("schedcls/tether_downstream4_ether$stub", AID_ROOT, AID_NETWORK_STACK,
567 sched_cls_tether_downstream4_ether_stub, KVER_NONE, KVER(5, 9, 0))
Maciej Żenczykowski088fe192021-01-20 13:34:17 -0800568(struct __sk_buff* skb) {
569 return TC_ACT_OK;
570}
571
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -0800572DEFINE_BPF_PROG_KVER_RANGE("schedcls/tether_downstream4_rawip$stub", AID_ROOT, AID_NETWORK_STACK,
573 sched_cls_tether_downstream4_rawip_stub, KVER_NONE, KVER(5, 9, 0))
Maciej Żenczykowski088fe192021-01-20 13:34:17 -0800574(struct __sk_buff* skb) {
575 return TC_ACT_OK;
576}
577
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -0800578DEFINE_BPF_PROG_KVER_RANGE("schedcls/tether_upstream4_ether$stub", AID_ROOT, AID_NETWORK_STACK,
579 sched_cls_tether_upstream4_ether_stub, KVER_NONE, KVER(5, 9, 0))
Maciej Żenczykowski088fe192021-01-20 13:34:17 -0800580(struct __sk_buff* skb) {
581 return TC_ACT_OK;
582}
583
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -0800584DEFINE_BPF_PROG_KVER_RANGE("schedcls/tether_upstream4_rawip$stub", AID_ROOT, AID_NETWORK_STACK,
585 sched_cls_tether_upstream4_rawip_stub, KVER_NONE, KVER(5, 9, 0))
Maciej Żenczykowski088fe192021-01-20 13:34:17 -0800586(struct __sk_buff* skb) {
587 return TC_ACT_OK;
588}
589
Maciej Żenczykowskib1997422021-01-20 14:31:50 -0800590// ----- XDP Support -----
591
592#define DEFINE_XDP_PROG(str, func) \
593 DEFINE_BPF_PROG_KVER(str, AID_ROOT, AID_NETWORK_STACK, func, KVER(5, 9, 0))(struct xdp_md *ctx)
594
595DEFINE_XDP_PROG("xdp/tether_downstream_ether",
596 xdp_tether_downstream_ether) {
597 return XDP_PASS;
598}
599
600DEFINE_XDP_PROG("xdp/tether_downstream_rawip",
601 xdp_tether_downstream_rawip) {
602 return XDP_PASS;
603}
604
605DEFINE_XDP_PROG("xdp/tether_upstream_ether",
606 xdp_tether_upstream_ether) {
607 return XDP_PASS;
608}
609
610DEFINE_XDP_PROG("xdp/tether_upstream_rawip",
611 xdp_tether_upstream_rawip) {
612 return XDP_PASS;
613}
614
Hungming Chen56c632c2020-09-10 15:42:58 +0800615LICENSE("Apache 2.0");
616CRITICAL("netd");