| Maciej Żenczykowski | 849b3aa | 2022-01-20 20:58:34 -0800 | [diff] [blame] | 1 | /* | 
|  | 2 | * Copyright (C) 2019 The Android Open Source Project | 
|  | 3 | * | 
|  | 4 | * Licensed under the Apache License, Version 2.0 (the "License"); | 
|  | 5 | * you may not use this file except in compliance with the License. | 
|  | 6 | * You may obtain a copy of the License at | 
|  | 7 | * | 
|  | 8 | *      http://www.apache.org/licenses/LICENSE-2.0 | 
|  | 9 | * | 
|  | 10 | * Unless required by applicable law or agreed to in writing, software | 
|  | 11 | * distributed under the License is distributed on an "AS IS" BASIS, | 
|  | 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | 
|  | 13 | * See the License for the specific language governing permissions and | 
|  | 14 | * limitations under the License. | 
|  | 15 | */ | 
|  | 16 |  | 
|  | 17 | #include <linux/bpf.h> | 
|  | 18 | #include <linux/if.h> | 
|  | 19 | #include <linux/if_ether.h> | 
|  | 20 | #include <linux/in.h> | 
|  | 21 | #include <linux/in6.h> | 
|  | 22 | #include <linux/ip.h> | 
|  | 23 | #include <linux/ipv6.h> | 
|  | 24 | #include <linux/pkt_cls.h> | 
|  | 25 | #include <linux/swab.h> | 
|  | 26 | #include <stdbool.h> | 
|  | 27 | #include <stdint.h> | 
|  | 28 |  | 
|  | 29 | // bionic kernel uapi linux/udp.h header is munged... | 
|  | 30 | #define __kernel_udphdr udphdr | 
|  | 31 | #include <linux/udp.h> | 
|  | 32 |  | 
| Maciej Żenczykowski | f769952 | 2022-05-24 15:56:03 -0700 | [diff] [blame] | 33 | // The resulting .o needs to load on the Android T beta 3 bpfloader | 
|  | 34 | #define BPFLOADER_MIN_VER BPFLOADER_T_BETA3_VERSION | 
| Maciej Żenczykowski | acebffb | 2022-05-16 16:05:15 -0700 | [diff] [blame] | 35 |  | 
| Maciej Żenczykowski | 849b3aa | 2022-01-20 20:58:34 -0800 | [diff] [blame] | 36 | #include "bpf_helpers.h" | 
|  | 37 | #include "bpf_net_helpers.h" | 
| Maciej Żenczykowski | 7b452a1 | 2022-12-08 13:10:29 +0000 | [diff] [blame] | 38 | #include "clatd.h" | 
| Maciej Żenczykowski | ce9108f | 2022-06-15 02:02:21 -0700 | [diff] [blame] | 39 | #include "clat_mark.h" | 
| Maciej Żenczykowski | 849b3aa | 2022-01-20 20:58:34 -0800 | [diff] [blame] | 40 |  | 
| Maciej Żenczykowski | e0ddc47 | 2022-10-24 02:43:21 -0700 | [diff] [blame] | 41 | // IP flags. (from kernel's include/net/ip.h) | 
|  | 42 | #define IP_CE      0x8000  // Flag: "Congestion" (really reserved 'evil bit') | 
|  | 43 | #define IP_DF      0x4000  // Flag: "Don't Fragment" | 
|  | 44 | #define IP_MF      0x2000  // Flag: "More Fragments" | 
|  | 45 | #define IP_OFFSET  0x1FFF  // "Fragment Offset" part | 
|  | 46 |  | 
|  | 47 | // from kernel's include/net/ipv6.h | 
|  | 48 | struct frag_hdr { | 
|  | 49 | __u8   nexthdr; | 
|  | 50 | __u8   reserved;        // always zero | 
|  | 51 | __be16 frag_off;        // 13 bit offset, 2 bits zero, 1 bit "More Fragments" | 
|  | 52 | __be32 identification; | 
|  | 53 | }; | 
| Maciej Żenczykowski | 849b3aa | 2022-01-20 20:58:34 -0800 | [diff] [blame] | 54 |  | 
|  | 55 | DEFINE_BPF_MAP_GRW(clat_ingress6_map, HASH, ClatIngress6Key, ClatIngress6Value, 16, AID_SYSTEM) | 
|  | 56 |  | 
|  | 57 | static inline __always_inline int nat64(struct __sk_buff* skb, bool is_ethernet) { | 
| Maciej Żenczykowski | 849b3aa | 2022-01-20 20:58:34 -0800 | [diff] [blame] | 58 | // Require ethernet dst mac address to be our unicast address. | 
|  | 59 | if (is_ethernet && (skb->pkt_type != PACKET_HOST)) return TC_ACT_PIPE; | 
|  | 60 |  | 
|  | 61 | // Must be meta-ethernet IPv6 frame | 
|  | 62 | if (skb->protocol != htons(ETH_P_IPV6)) return TC_ACT_PIPE; | 
|  | 63 |  | 
| Maciej Żenczykowski | 4b1e0f8 | 2022-04-07 16:53:02 -0700 | [diff] [blame] | 64 | const int l2_header_size = is_ethernet ? sizeof(struct ethhdr) : 0; | 
|  | 65 |  | 
|  | 66 | // Not clear if this is actually necessary considering we use DPA (Direct Packet Access), | 
|  | 67 | // but we need to make sure we can read the IPv6 header reliably so that we can set | 
|  | 68 | // skb->mark = 0xDeadC1a7 for packets we fail to offload. | 
| Maciej Żenczykowski | 824fb29 | 2022-04-11 23:29:46 -0700 | [diff] [blame] | 69 | try_make_writable(skb, l2_header_size + sizeof(struct ipv6hdr)); | 
| Maciej Żenczykowski | 4b1e0f8 | 2022-04-07 16:53:02 -0700 | [diff] [blame] | 70 |  | 
|  | 71 | void* data = (void*)(long)skb->data; | 
|  | 72 | const void* data_end = (void*)(long)skb->data_end; | 
|  | 73 | const struct ethhdr* const eth = is_ethernet ? data : NULL;  // used iff is_ethernet | 
|  | 74 | const struct ipv6hdr* const ip6 = is_ethernet ? (void*)(eth + 1) : data; | 
|  | 75 |  | 
| Maciej Żenczykowski | 849b3aa | 2022-01-20 20:58:34 -0800 | [diff] [blame] | 76 | // Must have (ethernet and) ipv6 header | 
|  | 77 | if (data + l2_header_size + sizeof(*ip6) > data_end) return TC_ACT_PIPE; | 
|  | 78 |  | 
|  | 79 | // Ethertype - if present - must be IPv6 | 
|  | 80 | if (is_ethernet && (eth->h_proto != htons(ETH_P_IPV6))) return TC_ACT_PIPE; | 
|  | 81 |  | 
|  | 82 | // IP version must be 6 | 
|  | 83 | if (ip6->version != 6) return TC_ACT_PIPE; | 
|  | 84 |  | 
|  | 85 | // Maximum IPv6 payload length that can be translated to IPv4 | 
|  | 86 | if (ntohs(ip6->payload_len) > 0xFFFF - sizeof(struct iphdr)) return TC_ACT_PIPE; | 
|  | 87 |  | 
| Maciej Żenczykowski | 849b3aa | 2022-01-20 20:58:34 -0800 | [diff] [blame] | 88 | ClatIngress6Key k = { | 
|  | 89 | .iif = skb->ifindex, | 
|  | 90 | .pfx96.in6_u.u6_addr32 = | 
|  | 91 | { | 
|  | 92 | ip6->saddr.in6_u.u6_addr32[0], | 
|  | 93 | ip6->saddr.in6_u.u6_addr32[1], | 
|  | 94 | ip6->saddr.in6_u.u6_addr32[2], | 
|  | 95 | }, | 
|  | 96 | .local6 = ip6->daddr, | 
|  | 97 | }; | 
|  | 98 |  | 
|  | 99 | ClatIngress6Value* v = bpf_clat_ingress6_map_lookup_elem(&k); | 
|  | 100 |  | 
|  | 101 | if (!v) return TC_ACT_PIPE; | 
|  | 102 |  | 
| Maciej Żenczykowski | 321321c | 2022-10-24 03:08:06 -0700 | [diff] [blame] | 103 | __u8 proto = ip6->nexthdr; | 
|  | 104 | __be16 ip_id = 0; | 
|  | 105 | __be16 frag_off = htons(IP_DF); | 
| Maciej Żenczykowski | be9685c | 2022-10-24 04:54:47 -0700 | [diff] [blame] | 106 | __u16 tot_len = ntohs(ip6->payload_len) + sizeof(struct iphdr);  // cannot overflow, see above | 
|  | 107 |  | 
|  | 108 | if (proto == IPPROTO_FRAGMENT) { | 
|  | 109 | // Must have (ethernet and) ipv6 header and ipv6 fragment extension header | 
|  | 110 | if (data + l2_header_size + sizeof(*ip6) + sizeof(struct frag_hdr) > data_end) | 
|  | 111 | return TC_ACT_PIPE; | 
|  | 112 | const struct frag_hdr *frag = (const struct frag_hdr *)(ip6 + 1); | 
|  | 113 | proto = frag->nexthdr; | 
| Maciej Żenczykowski | f7d2393 | 2022-11-21 10:31:17 +0000 | [diff] [blame] | 114 | // RFC6145: use bottom 16-bits of network endian 32-bit IPv6 ID field for 16-bit IPv4 field. | 
|  | 115 | // this is equivalent to: ip_id = htons(ntohl(frag->identification)); | 
|  | 116 | ip_id = frag->identification >> 16; | 
| Maciej Żenczykowski | be9685c | 2022-10-24 04:54:47 -0700 | [diff] [blame] | 117 | // Conversion of 16-bit IPv6 frag offset to 16-bit IPv4 frag offset field. | 
|  | 118 | // IPv6 is '13 bits of offset in multiples of 8' + 2 zero bits + more fragment bit | 
|  | 119 | // IPv4 is zero bit + don't frag bit + more frag bit + '13 bits of offset in multiples of 8' | 
|  | 120 | frag_off = ntohs(frag->frag_off); | 
|  | 121 | frag_off = ((frag_off & 1) << 13) | (frag_off >> 3); | 
|  | 122 | frag_off = htons(frag_off); | 
|  | 123 | // Note that by construction tot_len is guaranteed to not underflow here | 
|  | 124 | tot_len -= sizeof(struct frag_hdr); | 
|  | 125 | // This is a badly formed IPv6 packet with less payload than the size of an IPv6 Frag EH | 
|  | 126 | if (tot_len < sizeof(struct iphdr)) return TC_ACT_PIPE; | 
|  | 127 | } | 
| Maciej Żenczykowski | 321321c | 2022-10-24 03:08:06 -0700 | [diff] [blame] | 128 |  | 
|  | 129 | switch (proto) { | 
| Hungming Chen | 6c0b1e8 | 2022-04-01 19:51:56 +0800 | [diff] [blame] | 130 | case IPPROTO_TCP:  // For TCP & UDP the checksum neutrality of the chosen IPv6 | 
|  | 131 | case IPPROTO_UDP:  // address means there is no need to update their checksums. | 
|  | 132 | case IPPROTO_GRE:  // We do not need to bother looking at GRE/ESP headers, | 
|  | 133 | case IPPROTO_ESP:  // since there is never a checksum to update. | 
|  | 134 | break; | 
|  | 135 |  | 
|  | 136 | default:  // do not know how to handle anything else | 
|  | 137 | // Mark ingress non-offloaded clat packet for dropping in ip6tables bw_raw_PREROUTING. | 
|  | 138 | // Non-offloaded clat packet is going to be handled by clat daemon and ip6tables. The | 
|  | 139 | // duplicate one in ip6tables is not necessary. | 
|  | 140 | skb->mark = CLAT_MARK; | 
|  | 141 | return TC_ACT_PIPE; | 
|  | 142 | } | 
|  | 143 |  | 
| Maciej Żenczykowski | 849b3aa | 2022-01-20 20:58:34 -0800 | [diff] [blame] | 144 | struct ethhdr eth2;  // used iff is_ethernet | 
|  | 145 | if (is_ethernet) { | 
|  | 146 | eth2 = *eth;                     // Copy over the ethernet header (src/dst mac) | 
|  | 147 | eth2.h_proto = htons(ETH_P_IP);  // But replace the ethertype | 
|  | 148 | } | 
|  | 149 |  | 
|  | 150 | struct iphdr ip = { | 
|  | 151 | .version = 4,                                                      // u4 | 
|  | 152 | .ihl = sizeof(struct iphdr) / sizeof(__u32),                       // u4 | 
|  | 153 | .tos = (ip6->priority << 4) + (ip6->flow_lbl[0] >> 4),             // u8 | 
| Maciej Żenczykowski | be9685c | 2022-10-24 04:54:47 -0700 | [diff] [blame] | 154 | .tot_len = htons(tot_len),                                         // be16 | 
| Maciej Żenczykowski | 321321c | 2022-10-24 03:08:06 -0700 | [diff] [blame] | 155 | .id = ip_id,                                                       // be16 | 
|  | 156 | .frag_off = frag_off,                                              // be16 | 
| Maciej Żenczykowski | 849b3aa | 2022-01-20 20:58:34 -0800 | [diff] [blame] | 157 | .ttl = ip6->hop_limit,                                             // u8 | 
| Maciej Żenczykowski | 321321c | 2022-10-24 03:08:06 -0700 | [diff] [blame] | 158 | .protocol = proto,                                                 // u8 | 
| Maciej Żenczykowski | 849b3aa | 2022-01-20 20:58:34 -0800 | [diff] [blame] | 159 | .check = 0,                                                        // u16 | 
| Maciej Żenczykowski | 321321c | 2022-10-24 03:08:06 -0700 | [diff] [blame] | 160 | .saddr = ip6->saddr.in6_u.u6_addr32[3],                            // be32 | 
|  | 161 | .daddr = v->local4.s_addr,                                         // be32 | 
| Maciej Żenczykowski | 849b3aa | 2022-01-20 20:58:34 -0800 | [diff] [blame] | 162 | }; | 
|  | 163 |  | 
|  | 164 | // Calculate the IPv4 one's complement checksum of the IPv4 header. | 
|  | 165 | __wsum sum4 = 0; | 
|  | 166 | for (int i = 0; i < sizeof(ip) / sizeof(__u16); ++i) { | 
|  | 167 | sum4 += ((__u16*)&ip)[i]; | 
|  | 168 | } | 
|  | 169 | // Note that sum4 is guaranteed to be non-zero by virtue of ip.version == 4 | 
|  | 170 | sum4 = (sum4 & 0xFFFF) + (sum4 >> 16);  // collapse u32 into range 1 .. 0x1FFFE | 
|  | 171 | sum4 = (sum4 & 0xFFFF) + (sum4 >> 16);  // collapse any potential carry into u16 | 
|  | 172 | ip.check = (__u16)~sum4;                // sum4 cannot be zero, so this is never 0xFFFF | 
|  | 173 |  | 
|  | 174 | // Calculate the *negative* IPv6 16-bit one's complement checksum of the IPv6 header. | 
|  | 175 | __wsum sum6 = 0; | 
|  | 176 | // We'll end up with a non-zero sum due to ip6->version == 6 (which has '0' bits) | 
|  | 177 | for (int i = 0; i < sizeof(*ip6) / sizeof(__u16); ++i) { | 
|  | 178 | sum6 += ~((__u16*)ip6)[i];  // note the bitwise negation | 
|  | 179 | } | 
|  | 180 |  | 
|  | 181 | // Note that there is no L4 checksum update: we are relying on the checksum neutrality | 
|  | 182 | // of the ipv6 address chosen by netd's ClatdController. | 
|  | 183 |  | 
|  | 184 | // Packet mutations begin - point of no return, but if this first modification fails | 
|  | 185 | // the packet is probably still pristine, so let clatd handle it. | 
| Hungming Chen | 6c0b1e8 | 2022-04-01 19:51:56 +0800 | [diff] [blame] | 186 | if (bpf_skb_change_proto(skb, htons(ETH_P_IP), 0)) { | 
|  | 187 | // Mark ingress non-offloaded clat packet for dropping in ip6tables bw_raw_PREROUTING. | 
|  | 188 | // Non-offloaded clat packet is going to be handled by clat daemon and ip6tables. The | 
|  | 189 | // duplicate one in ip6tables is not necessary. | 
|  | 190 | skb->mark = CLAT_MARK; | 
|  | 191 | return TC_ACT_PIPE; | 
|  | 192 | } | 
| Maciej Żenczykowski | 849b3aa | 2022-01-20 20:58:34 -0800 | [diff] [blame] | 193 |  | 
|  | 194 | // This takes care of updating the skb->csum field for a CHECKSUM_COMPLETE packet. | 
|  | 195 | // | 
|  | 196 | // In such a case, skb->csum is a 16-bit one's complement sum of the entire payload, | 
|  | 197 | // thus we need to subtract out the ipv6 header's sum, and add in the ipv4 header's sum. | 
|  | 198 | // However, by construction of ip.check above the checksum of an ipv4 header is zero. | 
|  | 199 | // Thus we only need to subtract the ipv6 header's sum, which is the same as adding | 
|  | 200 | // in the sum of the bitwise negation of the ipv6 header. | 
|  | 201 | // | 
|  | 202 | // bpf_csum_update() always succeeds if the skb is CHECKSUM_COMPLETE and returns an error | 
|  | 203 | // (-ENOTSUPP) if it isn't.  So we just ignore the return code. | 
|  | 204 | // | 
|  | 205 | // if (skb->ip_summed == CHECKSUM_COMPLETE) | 
|  | 206 | //   return (skb->csum = csum_add(skb->csum, csum)); | 
|  | 207 | // else | 
|  | 208 | //   return -ENOTSUPP; | 
|  | 209 | bpf_csum_update(skb, sum6); | 
|  | 210 |  | 
| Maciej Żenczykowski | be9685c | 2022-10-24 04:54:47 -0700 | [diff] [blame] | 211 | if (frag_off != htons(IP_DF)) { | 
|  | 212 | // If we're converting an IPv6 Fragment, we need to trim off 8 more bytes | 
|  | 213 | // We're beyond recovery on error here... but hard to imagine how this could fail. | 
|  | 214 | if (bpf_skb_adjust_room(skb, -(__s32)sizeof(struct frag_hdr), BPF_ADJ_ROOM_NET, /*flags*/0)) | 
|  | 215 | return TC_ACT_SHOT; | 
|  | 216 | } | 
|  | 217 |  | 
| Maciej Żenczykowski | 849b3aa | 2022-01-20 20:58:34 -0800 | [diff] [blame] | 218 | // bpf_skb_change_proto() invalidates all pointers - reload them. | 
|  | 219 | data = (void*)(long)skb->data; | 
|  | 220 | data_end = (void*)(long)skb->data_end; | 
|  | 221 |  | 
|  | 222 | // I cannot think of any valid way for this error condition to trigger, however I do | 
|  | 223 | // believe the explicit check is required to keep the in kernel ebpf verifier happy. | 
|  | 224 | if (data + l2_header_size + sizeof(struct iphdr) > data_end) return TC_ACT_SHOT; | 
|  | 225 |  | 
|  | 226 | if (is_ethernet) { | 
|  | 227 | struct ethhdr* new_eth = data; | 
|  | 228 |  | 
|  | 229 | // Copy over the updated ethernet header | 
|  | 230 | *new_eth = eth2; | 
|  | 231 |  | 
|  | 232 | // Copy over the new ipv4 header. | 
|  | 233 | *(struct iphdr*)(new_eth + 1) = ip; | 
|  | 234 | } else { | 
|  | 235 | // Copy over the new ipv4 header without an ethernet header. | 
|  | 236 | *(struct iphdr*)data = ip; | 
|  | 237 | } | 
|  | 238 |  | 
|  | 239 | // Redirect, possibly back to same interface, so tcpdump sees packet twice. | 
|  | 240 | if (v->oif) return bpf_redirect(v->oif, BPF_F_INGRESS); | 
|  | 241 |  | 
|  | 242 | // Just let it through, tcpdump will not see IPv4 packet. | 
|  | 243 | return TC_ACT_PIPE; | 
|  | 244 | } | 
|  | 245 |  | 
|  | 246 | DEFINE_BPF_PROG("schedcls/ingress6/clat_ether", AID_ROOT, AID_SYSTEM, sched_cls_ingress6_clat_ether) | 
|  | 247 | (struct __sk_buff* skb) { | 
|  | 248 | return nat64(skb, true); | 
|  | 249 | } | 
|  | 250 |  | 
|  | 251 | DEFINE_BPF_PROG("schedcls/ingress6/clat_rawip", AID_ROOT, AID_SYSTEM, sched_cls_ingress6_clat_rawip) | 
|  | 252 | (struct __sk_buff* skb) { | 
|  | 253 | return nat64(skb, false); | 
|  | 254 | } | 
|  | 255 |  | 
|  | 256 | DEFINE_BPF_MAP_GRW(clat_egress4_map, HASH, ClatEgress4Key, ClatEgress4Value, 16, AID_SYSTEM) | 
|  | 257 |  | 
| Maciej Żenczykowski | 849b3aa | 2022-01-20 20:58:34 -0800 | [diff] [blame] | 258 | DEFINE_BPF_PROG("schedcls/egress4/clat_rawip", AID_ROOT, AID_SYSTEM, sched_cls_egress4_clat_rawip) | 
|  | 259 | (struct __sk_buff* skb) { | 
| Maciej Żenczykowski | 4b1e0f8 | 2022-04-07 16:53:02 -0700 | [diff] [blame] | 260 | // Must be meta-ethernet IPv4 frame | 
|  | 261 | if (skb->protocol != htons(ETH_P_IP)) return TC_ACT_PIPE; | 
|  | 262 |  | 
|  | 263 | // Possibly not needed, but for consistency with nat64 up above | 
| Maciej Żenczykowski | 824fb29 | 2022-04-11 23:29:46 -0700 | [diff] [blame] | 264 | try_make_writable(skb, sizeof(struct iphdr)); | 
| Maciej Żenczykowski | 4b1e0f8 | 2022-04-07 16:53:02 -0700 | [diff] [blame] | 265 |  | 
| Maciej Żenczykowski | 849b3aa | 2022-01-20 20:58:34 -0800 | [diff] [blame] | 266 | void* data = (void*)(long)skb->data; | 
|  | 267 | const void* data_end = (void*)(long)skb->data_end; | 
|  | 268 | const struct iphdr* const ip4 = data; | 
|  | 269 |  | 
| Maciej Żenczykowski | 849b3aa | 2022-01-20 20:58:34 -0800 | [diff] [blame] | 270 | // Must have ipv4 header | 
|  | 271 | if (data + sizeof(*ip4) > data_end) return TC_ACT_PIPE; | 
|  | 272 |  | 
|  | 273 | // IP version must be 4 | 
|  | 274 | if (ip4->version != 4) return TC_ACT_PIPE; | 
|  | 275 |  | 
|  | 276 | // We cannot handle IP options, just standard 20 byte == 5 dword minimal IPv4 header | 
|  | 277 | if (ip4->ihl != 5) return TC_ACT_PIPE; | 
|  | 278 |  | 
|  | 279 | // Calculate the IPv4 one's complement checksum of the IPv4 header. | 
|  | 280 | __wsum sum4 = 0; | 
|  | 281 | for (int i = 0; i < sizeof(*ip4) / sizeof(__u16); ++i) { | 
|  | 282 | sum4 += ((__u16*)ip4)[i]; | 
|  | 283 | } | 
|  | 284 | // Note that sum4 is guaranteed to be non-zero by virtue of ip4->version == 4 | 
|  | 285 | sum4 = (sum4 & 0xFFFF) + (sum4 >> 16);  // collapse u32 into range 1 .. 0x1FFFE | 
|  | 286 | sum4 = (sum4 & 0xFFFF) + (sum4 >> 16);  // collapse any potential carry into u16 | 
|  | 287 | // for a correct checksum we should get *a* zero, but sum4 must be positive, ie 0xFFFF | 
|  | 288 | if (sum4 != 0xFFFF) return TC_ACT_PIPE; | 
|  | 289 |  | 
|  | 290 | // Minimum IPv4 total length is the size of the header | 
|  | 291 | if (ntohs(ip4->tot_len) < sizeof(*ip4)) return TC_ACT_PIPE; | 
|  | 292 |  | 
|  | 293 | // We are incapable of dealing with IPv4 fragments | 
|  | 294 | if (ip4->frag_off & ~htons(IP_DF)) return TC_ACT_PIPE; | 
|  | 295 |  | 
|  | 296 | switch (ip4->protocol) { | 
|  | 297 | case IPPROTO_TCP:  // For TCP & UDP the checksum neutrality of the chosen IPv6 | 
|  | 298 | case IPPROTO_GRE:  // address means there is no need to update their checksums. | 
|  | 299 | case IPPROTO_ESP:  // We do not need to bother looking at GRE/ESP headers, | 
|  | 300 | break;         // since there is never a checksum to update. | 
|  | 301 |  | 
|  | 302 | case IPPROTO_UDP:  // See above comment, but must also have UDP header... | 
|  | 303 | if (data + sizeof(*ip4) + sizeof(struct udphdr) > data_end) return TC_ACT_PIPE; | 
|  | 304 | const struct udphdr* uh = (const struct udphdr*)(ip4 + 1); | 
|  | 305 | // If IPv4/UDP checksum is 0 then fallback to clatd so it can calculate the | 
|  | 306 | // checksum.  Otherwise the network or more likely the NAT64 gateway might | 
|  | 307 | // drop the packet because in most cases IPv6/UDP packets with a zero checksum | 
|  | 308 | // are invalid. See RFC 6935.  TODO: calculate checksum via bpf_csum_diff() | 
|  | 309 | if (!uh->check) return TC_ACT_PIPE; | 
|  | 310 | break; | 
|  | 311 |  | 
|  | 312 | default:  // do not know how to handle anything else | 
|  | 313 | return TC_ACT_PIPE; | 
|  | 314 | } | 
|  | 315 |  | 
|  | 316 | ClatEgress4Key k = { | 
|  | 317 | .iif = skb->ifindex, | 
|  | 318 | .local4.s_addr = ip4->saddr, | 
|  | 319 | }; | 
|  | 320 |  | 
|  | 321 | ClatEgress4Value* v = bpf_clat_egress4_map_lookup_elem(&k); | 
|  | 322 |  | 
|  | 323 | if (!v) return TC_ACT_PIPE; | 
|  | 324 |  | 
|  | 325 | // Translating without redirecting doesn't make sense. | 
|  | 326 | if (!v->oif) return TC_ACT_PIPE; | 
|  | 327 |  | 
|  | 328 | // This implementation is currently limited to rawip. | 
|  | 329 | if (v->oifIsEthernet) return TC_ACT_PIPE; | 
|  | 330 |  | 
|  | 331 | struct ipv6hdr ip6 = { | 
|  | 332 | .version = 6,                                    // __u8:4 | 
|  | 333 | .priority = ip4->tos >> 4,                       // __u8:4 | 
|  | 334 | .flow_lbl = {(ip4->tos & 0xF) << 4, 0, 0},       // __u8[3] | 
|  | 335 | .payload_len = htons(ntohs(ip4->tot_len) - 20),  // __be16 | 
|  | 336 | .nexthdr = ip4->protocol,                        // __u8 | 
|  | 337 | .hop_limit = ip4->ttl,                           // __u8 | 
|  | 338 | .saddr = v->local6,                              // struct in6_addr | 
|  | 339 | .daddr = v->pfx96,                               // struct in6_addr | 
|  | 340 | }; | 
|  | 341 | ip6.daddr.in6_u.u6_addr32[3] = ip4->daddr; | 
|  | 342 |  | 
|  | 343 | // Calculate the IPv6 16-bit one's complement checksum of the IPv6 header. | 
|  | 344 | __wsum sum6 = 0; | 
|  | 345 | // We'll end up with a non-zero sum due to ip6.version == 6 | 
|  | 346 | for (int i = 0; i < sizeof(ip6) / sizeof(__u16); ++i) { | 
|  | 347 | sum6 += ((__u16*)&ip6)[i]; | 
|  | 348 | } | 
|  | 349 |  | 
|  | 350 | // Note that there is no L4 checksum update: we are relying on the checksum neutrality | 
|  | 351 | // of the ipv6 address chosen by netd's ClatdController. | 
|  | 352 |  | 
|  | 353 | // Packet mutations begin - point of no return, but if this first modification fails | 
|  | 354 | // the packet is probably still pristine, so let clatd handle it. | 
|  | 355 | if (bpf_skb_change_proto(skb, htons(ETH_P_IPV6), 0)) return TC_ACT_PIPE; | 
|  | 356 |  | 
|  | 357 | // This takes care of updating the skb->csum field for a CHECKSUM_COMPLETE packet. | 
|  | 358 | // | 
|  | 359 | // In such a case, skb->csum is a 16-bit one's complement sum of the entire payload, | 
|  | 360 | // thus we need to subtract out the ipv4 header's sum, and add in the ipv6 header's sum. | 
|  | 361 | // However, we've already verified the ipv4 checksum is correct and thus 0. | 
|  | 362 | // Thus we only need to add the ipv6 header's sum. | 
|  | 363 | // | 
|  | 364 | // bpf_csum_update() always succeeds if the skb is CHECKSUM_COMPLETE and returns an error | 
|  | 365 | // (-ENOTSUPP) if it isn't.  So we just ignore the return code (see above for more details). | 
|  | 366 | bpf_csum_update(skb, sum6); | 
|  | 367 |  | 
|  | 368 | // bpf_skb_change_proto() invalidates all pointers - reload them. | 
|  | 369 | data = (void*)(long)skb->data; | 
|  | 370 | data_end = (void*)(long)skb->data_end; | 
|  | 371 |  | 
|  | 372 | // I cannot think of any valid way for this error condition to trigger, however I do | 
|  | 373 | // believe the explicit check is required to keep the in kernel ebpf verifier happy. | 
|  | 374 | if (data + sizeof(ip6) > data_end) return TC_ACT_SHOT; | 
|  | 375 |  | 
|  | 376 | // Copy over the new ipv6 header without an ethernet header. | 
|  | 377 | *(struct ipv6hdr*)data = ip6; | 
|  | 378 |  | 
|  | 379 | // Redirect to non v4-* interface.  Tcpdump only sees packet after this redirect. | 
|  | 380 | return bpf_redirect(v->oif, 0 /* this is effectively BPF_F_EGRESS */); | 
|  | 381 | } | 
|  | 382 |  | 
|  | 383 | LICENSE("Apache 2.0"); | 
| Maciej Żenczykowski | c41e35d | 2022-08-04 13:58:46 +0000 | [diff] [blame] | 384 | CRITICAL("Connectivity"); |