blob: 11d4bbf80af31e3142769399c35083577dce62dc [file] [log] [blame]
Maciej Żenczykowski23f5d802021-06-09 19:39:49 -07001/*
2 * Copyright (C) 2019 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#pragma once
18
19#include <linux/bpf.h>
Maciej Żenczykowski22a5f912024-08-16 17:55:50 -070020#include <linux/if.h>
21#include <linux/if_ether.h>
Maciej Żenczykowski23f5d802021-06-09 19:39:49 -070022#include <linux/if_packet.h>
Maciej Żenczykowski22a5f912024-08-16 17:55:50 -070023#include <linux/in.h>
24#include <linux/in6.h>
25#include <linux/ip.h>
26#include <linux/ipv6.h>
27#include <linux/pkt_cls.h>
28#include <linux/tcp.h>
Ryan Zuklie1db34f32023-01-20 17:00:04 -080029// bionic kernel uapi linux/udp.h header is munged...
30#define __kernel_udphdr udphdr
31#include <linux/udp.h>
Maciej Żenczykowski22a5f912024-08-16 17:55:50 -070032#include <stdbool.h>
33#include <stdint.h>
34
35#include "bpf_helpers.h"
Ryan Zuklie1db34f32023-01-20 17:00:04 -080036
Maciej Żenczykowski5161c302024-08-16 18:16:15 -070037// IP flags. (from kernel's include/net/ip.h)
38#define IP_CE 0x8000 // Flag: "Congestion" (really reserved 'evil bit')
39#define IP_DF 0x4000 // Flag: "Don't Fragment"
40#define IP_MF 0x2000 // Flag: "More Fragments"
41#define IP_OFFSET 0x1FFF // "Fragment Offset" part
42
43// IPv6 fragmentation header. (from kernel's include/net/ipv6.h)
44struct frag_hdr {
45 __u8 nexthdr;
46 __u8 reserved; // always zero
47 __be16 frag_off; // 13 bit offset, 2 bits zero, 1 bit "More Fragments"
48 __be32 identification;
49};
50
Ryan Zuklie1db34f32023-01-20 17:00:04 -080051// Offsets from beginning of L4 (TCP/UDP) header
52#define TCP_OFFSET(field) offsetof(struct tcphdr, field)
53#define UDP_OFFSET(field) offsetof(struct udphdr, field)
54
55// Offsets from beginning of L3 (IPv4/IPv6) header
56#define IP4_OFFSET(field) offsetof(struct iphdr, field)
57#define IP6_OFFSET(field) offsetof(struct ipv6hdr, field)
58
Maciej Żenczykowski23f5d802021-06-09 19:39:49 -070059// this returns 0 iff skb->sk is NULL
60static uint64_t (*bpf_get_socket_cookie)(struct __sk_buff* skb) = (void*)BPF_FUNC_get_socket_cookie;
Maciej Żenczykowski22db5902024-05-10 06:44:08 -070061static uint64_t (*bpf_get_sk_cookie)(struct bpf_sock* sk) = (void*)BPF_FUNC_get_socket_cookie;
Maciej Żenczykowski23f5d802021-06-09 19:39:49 -070062
63static uint32_t (*bpf_get_socket_uid)(struct __sk_buff* skb) = (void*)BPF_FUNC_get_socket_uid;
64
65static int (*bpf_skb_pull_data)(struct __sk_buff* skb, __u32 len) = (void*)BPF_FUNC_skb_pull_data;
66
Maciej Żenczykowski879839a12022-08-03 10:48:25 +000067static int (*bpf_skb_load_bytes)(const struct __sk_buff* skb, int off, void* to,
Maciej Żenczykowski23f5d802021-06-09 19:39:49 -070068 int len) = (void*)BPF_FUNC_skb_load_bytes;
69
Maciej Żenczykowski879839a12022-08-03 10:48:25 +000070static int (*bpf_skb_load_bytes_relative)(const struct __sk_buff* skb, int off, void* to, int len,
71 int start_hdr) = (void*)BPF_FUNC_skb_load_bytes_relative;
72
Maciej Żenczykowski23f5d802021-06-09 19:39:49 -070073static int (*bpf_skb_store_bytes)(struct __sk_buff* skb, __u32 offset, const void* from, __u32 len,
74 __u64 flags) = (void*)BPF_FUNC_skb_store_bytes;
75
76static int64_t (*bpf_csum_diff)(__be32* from, __u32 from_size, __be32* to, __u32 to_size,
77 __wsum seed) = (void*)BPF_FUNC_csum_diff;
78
79static int64_t (*bpf_csum_update)(struct __sk_buff* skb, __wsum csum) = (void*)BPF_FUNC_csum_update;
80
81static int (*bpf_skb_change_proto)(struct __sk_buff* skb, __be16 proto,
82 __u64 flags) = (void*)BPF_FUNC_skb_change_proto;
83static int (*bpf_l3_csum_replace)(struct __sk_buff* skb, __u32 offset, __u64 from, __u64 to,
84 __u64 flags) = (void*)BPF_FUNC_l3_csum_replace;
85static int (*bpf_l4_csum_replace)(struct __sk_buff* skb, __u32 offset, __u64 from, __u64 to,
86 __u64 flags) = (void*)BPF_FUNC_l4_csum_replace;
87static int (*bpf_redirect)(__u32 ifindex, __u64 flags) = (void*)BPF_FUNC_redirect;
88static int (*bpf_redirect_map)(const struct bpf_map_def* map, __u32 key,
89 __u64 flags) = (void*)BPF_FUNC_redirect_map;
90
91static int (*bpf_skb_change_head)(struct __sk_buff* skb, __u32 head_room,
92 __u64 flags) = (void*)BPF_FUNC_skb_change_head;
93static int (*bpf_skb_adjust_room)(struct __sk_buff* skb, __s32 len_diff, __u32 mode,
94 __u64 flags) = (void*)BPF_FUNC_skb_adjust_room;
95
96// Android only supports little endian architectures
97#define htons(x) (__builtin_constant_p(x) ? ___constant_swab16(x) : __builtin_bswap16(x))
98#define htonl(x) (__builtin_constant_p(x) ? ___constant_swab32(x) : __builtin_bswap32(x))
99#define ntohs(x) htons(x)
100#define ntohl(x) htonl(x)
101
102static inline __always_inline __unused bool is_received_skb(struct __sk_buff* skb) {
103 return skb->pkt_type == PACKET_HOST || skb->pkt_type == PACKET_BROADCAST ||
104 skb->pkt_type == PACKET_MULTICAST;
105}
106
Maciej Żenczykowski824fb292022-04-11 23:29:46 -0700107// try to make the first 'len' header bytes readable/writable via direct packet access
108// (note: AFAIK there is no way to ask for only direct packet read without also getting write)
Maciej Żenczykowskic11dfd82024-07-24 17:54:41 -0700109static inline __always_inline void try_make_writable(struct __sk_buff* skb, unsigned len) {
Maciej Żenczykowski23f5d802021-06-09 19:39:49 -0700110 if (len > skb->len) len = skb->len;
111 if (skb->data_end - skb->data < len) bpf_skb_pull_data(skb, len);
112}
Maciej Żenczykowski4c33f5c2023-04-19 16:37:11 -0700113
Maciej Żenczykowskia8852b22023-10-08 18:31:12 -0700114struct egress_bool { bool egress; };
115#define INGRESS ((struct egress_bool){ .egress = false })
116#define EGRESS ((struct egress_bool){ .egress = true })
Maciej Żenczykowski4c33f5c2023-04-19 16:37:11 -0700117
Maciej Żenczykowskie1a615a2023-10-10 03:34:56 -0700118struct stream_bool { bool down; };
119#define UPSTREAM ((struct stream_bool){ .down = false })
120#define DOWNSTREAM ((struct stream_bool){ .down = true })
Maciej Żenczykowski4c33f5c2023-04-19 16:37:11 -0700121
Maciej Żenczykowski8d3bde72023-10-08 18:43:23 -0700122struct rawip_bool { bool rawip; };
123#define ETHER ((struct rawip_bool){ .rawip = false })
124#define RAWIP ((struct rawip_bool){ .rawip = true })
Maciej Żenczykowski4c33f5c2023-04-19 16:37:11 -0700125
Maciej Żenczykowski8a6c6d52023-10-10 00:59:31 -0700126struct updatetime_bool { bool updatetime; };
127#define NO_UPDATETIME ((struct updatetime_bool){ .updatetime = false })
128#define UPDATETIME ((struct updatetime_bool){ .updatetime = true })
Maciej Żenczykowskiaeff0b62024-08-16 15:47:52 -0700129
130// Return value for xt_bpf (netfilter match extension) programs
131static const int XTBPF_NOMATCH = 0;
132static const int XTBPF_MATCH = 1;
Maciej Żenczykowski1745e542024-08-16 15:52:55 -0700133
134static const int BPF_DISALLOW = 0;
135static const int BPF_ALLOW = 1;