blob: 8754466b9ece4d94ace3830df8cf5e39e200295e [file] [log] [blame]
Maciej Żenczykowski23f5d802021-06-09 19:39:49 -07001/*
2 * Copyright (C) 2019 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#pragma once
18
19#include <linux/bpf.h>
Maciej Żenczykowski22a5f912024-08-16 17:55:50 -070020#include <linux/if.h>
21#include <linux/if_ether.h>
Maciej Żenczykowski23f5d802021-06-09 19:39:49 -070022#include <linux/if_packet.h>
Maciej Żenczykowski22a5f912024-08-16 17:55:50 -070023#include <linux/in.h>
24#include <linux/in6.h>
25#include <linux/ip.h>
26#include <linux/ipv6.h>
27#include <linux/pkt_cls.h>
28#include <linux/tcp.h>
Ryan Zuklie1db34f32023-01-20 17:00:04 -080029// bionic kernel uapi linux/udp.h header is munged...
30#define __kernel_udphdr udphdr
31#include <linux/udp.h>
Maciej Żenczykowski22a5f912024-08-16 17:55:50 -070032#include <stdbool.h>
33#include <stdint.h>
34
35#include "bpf_helpers.h"
Ryan Zuklie1db34f32023-01-20 17:00:04 -080036
37// Offsets from beginning of L4 (TCP/UDP) header
38#define TCP_OFFSET(field) offsetof(struct tcphdr, field)
39#define UDP_OFFSET(field) offsetof(struct udphdr, field)
40
41// Offsets from beginning of L3 (IPv4/IPv6) header
42#define IP4_OFFSET(field) offsetof(struct iphdr, field)
43#define IP6_OFFSET(field) offsetof(struct ipv6hdr, field)
44
Maciej Żenczykowski23f5d802021-06-09 19:39:49 -070045// this returns 0 iff skb->sk is NULL
46static uint64_t (*bpf_get_socket_cookie)(struct __sk_buff* skb) = (void*)BPF_FUNC_get_socket_cookie;
Maciej Żenczykowski22db5902024-05-10 06:44:08 -070047static uint64_t (*bpf_get_sk_cookie)(struct bpf_sock* sk) = (void*)BPF_FUNC_get_socket_cookie;
Maciej Żenczykowski23f5d802021-06-09 19:39:49 -070048
49static uint32_t (*bpf_get_socket_uid)(struct __sk_buff* skb) = (void*)BPF_FUNC_get_socket_uid;
50
51static int (*bpf_skb_pull_data)(struct __sk_buff* skb, __u32 len) = (void*)BPF_FUNC_skb_pull_data;
52
Maciej Żenczykowski879839a12022-08-03 10:48:25 +000053static int (*bpf_skb_load_bytes)(const struct __sk_buff* skb, int off, void* to,
Maciej Żenczykowski23f5d802021-06-09 19:39:49 -070054 int len) = (void*)BPF_FUNC_skb_load_bytes;
55
Maciej Żenczykowski879839a12022-08-03 10:48:25 +000056static int (*bpf_skb_load_bytes_relative)(const struct __sk_buff* skb, int off, void* to, int len,
57 int start_hdr) = (void*)BPF_FUNC_skb_load_bytes_relative;
58
Maciej Żenczykowski23f5d802021-06-09 19:39:49 -070059static int (*bpf_skb_store_bytes)(struct __sk_buff* skb, __u32 offset, const void* from, __u32 len,
60 __u64 flags) = (void*)BPF_FUNC_skb_store_bytes;
61
62static int64_t (*bpf_csum_diff)(__be32* from, __u32 from_size, __be32* to, __u32 to_size,
63 __wsum seed) = (void*)BPF_FUNC_csum_diff;
64
65static int64_t (*bpf_csum_update)(struct __sk_buff* skb, __wsum csum) = (void*)BPF_FUNC_csum_update;
66
67static int (*bpf_skb_change_proto)(struct __sk_buff* skb, __be16 proto,
68 __u64 flags) = (void*)BPF_FUNC_skb_change_proto;
69static int (*bpf_l3_csum_replace)(struct __sk_buff* skb, __u32 offset, __u64 from, __u64 to,
70 __u64 flags) = (void*)BPF_FUNC_l3_csum_replace;
71static int (*bpf_l4_csum_replace)(struct __sk_buff* skb, __u32 offset, __u64 from, __u64 to,
72 __u64 flags) = (void*)BPF_FUNC_l4_csum_replace;
73static int (*bpf_redirect)(__u32 ifindex, __u64 flags) = (void*)BPF_FUNC_redirect;
74static int (*bpf_redirect_map)(const struct bpf_map_def* map, __u32 key,
75 __u64 flags) = (void*)BPF_FUNC_redirect_map;
76
77static int (*bpf_skb_change_head)(struct __sk_buff* skb, __u32 head_room,
78 __u64 flags) = (void*)BPF_FUNC_skb_change_head;
79static int (*bpf_skb_adjust_room)(struct __sk_buff* skb, __s32 len_diff, __u32 mode,
80 __u64 flags) = (void*)BPF_FUNC_skb_adjust_room;
81
82// Android only supports little endian architectures
83#define htons(x) (__builtin_constant_p(x) ? ___constant_swab16(x) : __builtin_bswap16(x))
84#define htonl(x) (__builtin_constant_p(x) ? ___constant_swab32(x) : __builtin_bswap32(x))
85#define ntohs(x) htons(x)
86#define ntohl(x) htonl(x)
87
88static inline __always_inline __unused bool is_received_skb(struct __sk_buff* skb) {
89 return skb->pkt_type == PACKET_HOST || skb->pkt_type == PACKET_BROADCAST ||
90 skb->pkt_type == PACKET_MULTICAST;
91}
92
Maciej Żenczykowski824fb292022-04-11 23:29:46 -070093// try to make the first 'len' header bytes readable/writable via direct packet access
94// (note: AFAIK there is no way to ask for only direct packet read without also getting write)
Maciej Żenczykowskic11dfd82024-07-24 17:54:41 -070095static inline __always_inline void try_make_writable(struct __sk_buff* skb, unsigned len) {
Maciej Żenczykowski23f5d802021-06-09 19:39:49 -070096 if (len > skb->len) len = skb->len;
97 if (skb->data_end - skb->data < len) bpf_skb_pull_data(skb, len);
98}
Maciej Żenczykowski4c33f5c2023-04-19 16:37:11 -070099
Maciej Żenczykowskia8852b22023-10-08 18:31:12 -0700100struct egress_bool { bool egress; };
101#define INGRESS ((struct egress_bool){ .egress = false })
102#define EGRESS ((struct egress_bool){ .egress = true })
Maciej Żenczykowski4c33f5c2023-04-19 16:37:11 -0700103
Maciej Żenczykowskie1a615a2023-10-10 03:34:56 -0700104struct stream_bool { bool down; };
105#define UPSTREAM ((struct stream_bool){ .down = false })
106#define DOWNSTREAM ((struct stream_bool){ .down = true })
Maciej Żenczykowski4c33f5c2023-04-19 16:37:11 -0700107
Maciej Żenczykowski8d3bde72023-10-08 18:43:23 -0700108struct rawip_bool { bool rawip; };
109#define ETHER ((struct rawip_bool){ .rawip = false })
110#define RAWIP ((struct rawip_bool){ .rawip = true })
Maciej Żenczykowski4c33f5c2023-04-19 16:37:11 -0700111
Maciej Żenczykowski8a6c6d52023-10-10 00:59:31 -0700112struct updatetime_bool { bool updatetime; };
113#define NO_UPDATETIME ((struct updatetime_bool){ .updatetime = false })
114#define UPDATETIME ((struct updatetime_bool){ .updatetime = true })
Maciej Żenczykowskiaeff0b62024-08-16 15:47:52 -0700115
116// Return value for xt_bpf (netfilter match extension) programs
117static const int XTBPF_NOMATCH = 0;
118static const int XTBPF_MATCH = 1;
Maciej Żenczykowski1745e542024-08-16 15:52:55 -0700119
120static const int BPF_DISALLOW = 0;
121static const int BPF_ALLOW = 1;