type safety for kernel version
Test: TreeHugger
Signed-off-by: Maciej Żenczykowski <maze@google.com>
Change-Id: If05c0c5383dceeb65964143f5574d2ee1a484907
diff --git a/bpf_progs/bpf_net_helpers.h b/bpf_progs/bpf_net_helpers.h
index 2383d95..1122f50 100644
--- a/bpf_progs/bpf_net_helpers.h
+++ b/bpf_progs/bpf_net_helpers.h
@@ -102,5 +102,3 @@
// constants for passing in to 'bool updatetime'
static const bool NO_UPDATETIME = false;
static const bool UPDATETIME = true;
-
-#define KVER_4_14 KVER(4, 14, 0)
diff --git a/bpf_progs/clatd.c b/bpf_progs/clatd.c
index 8f0ff84..ae63d2a 100644
--- a/bpf_progs/clatd.c
+++ b/bpf_progs/clatd.c
@@ -56,7 +56,7 @@
static inline __always_inline int nat64(struct __sk_buff* skb,
const bool is_ethernet,
- const unsigned kver) {
+ const struct kver_uint kver) {
// Require ethernet dst mac address to be our unicast address.
if (is_ethernet && (skb->pkt_type != PACKET_HOST)) return TC_ACT_PIPE;
@@ -115,7 +115,7 @@
if (proto == IPPROTO_FRAGMENT) {
// Fragment handling requires bpf_skb_adjust_room which is 4.14+
- if (kver < KVER_4_14) return TC_ACT_PIPE;
+ if (!KVER_IS_AT_LEAST(kver, 4, 14, 0)) return TC_ACT_PIPE;
// Must have (ethernet and) ipv6 header and ipv6 fragment extension header
if (data + l2_header_size + sizeof(*ip6) + sizeof(struct frag_hdr) > data_end)
@@ -233,7 +233,7 @@
//
// Note: we currently have no TreeHugger coverage for 4.9-T devices (there are no such
// Pixel or cuttlefish devices), so likely you won't notice for months if this breaks...
- if (kver >= KVER_4_14 && frag_off != htons(IP_DF)) {
+ if (KVER_IS_AT_LEAST(kver, 4, 14, 0) && frag_off != htons(IP_DF)) {
// If we're converting an IPv6 Fragment, we need to trim off 8 more bytes
// We're beyond recovery on error here... but hard to imagine how this could fail.
if (bpf_skb_adjust_room(skb, -(__s32)sizeof(struct frag_hdr), BPF_ADJ_ROOM_NET, /*flags*/0))
diff --git a/bpf_progs/netd.c b/bpf_progs/netd.c
index 7a48e8c..7800fbc 100644
--- a/bpf_progs/netd.c
+++ b/bpf_progs/netd.c
@@ -179,7 +179,7 @@
static __always_inline inline void update_##the_stats_map(const struct __sk_buff* const skb, \
const TypeOfKey* const key, \
const bool egress, \
- const unsigned kver) { \
+ const struct kver_uint kver) { \
StatsValue* value = bpf_##the_stats_map##_lookup_elem(key); \
if (!value) { \
StatsValue newValue = {}; \
@@ -219,7 +219,7 @@
const int L3_off,
void* const to,
const int len,
- const unsigned kver) {
+ const struct kver_uint kver) {
// 'kver' (here and throughout) is the compile time guaranteed minimum kernel version,
// ie. we're building (a version of) the bpf program for kver (or newer!) kernels.
//
@@ -236,16 +236,16 @@
//
// For similar reasons this will fail with non-offloaded VLAN tags on < 4.19 kernels,
// since those extend the ethernet header from 14 to 18 bytes.
- return kver >= KVER(4, 19, 0)
+ return KVER_IS_AT_LEAST(kver, 4, 19, 0)
? bpf_skb_load_bytes_relative(skb, L3_off, to, len, BPF_HDR_START_NET)
: bpf_skb_load_bytes(skb, L3_off, to, len);
}
static __always_inline inline void do_packet_tracing(
const struct __sk_buff* const skb, const bool egress, const uint32_t uid,
- const uint32_t tag, const bool enable_tracing, const unsigned kver) {
+ const uint32_t tag, const bool enable_tracing, const struct kver_uint kver) {
if (!enable_tracing) return;
- if (kver < KVER(5, 8, 0)) return;
+ if (!KVER_IS_AT_LEAST(kver, 5, 8, 0)) return;
uint32_t mapKey = 0;
bool* traceConfig = bpf_packet_trace_enabled_map_lookup_elem(&mapKey);
@@ -327,7 +327,7 @@
}
static __always_inline inline bool skip_owner_match(struct __sk_buff* skb, bool egress,
- const unsigned kver) {
+ const struct kver_uint kver) {
uint32_t flag = 0;
if (skb->protocol == htons(ETH_P_IP)) {
uint8_t proto;
@@ -372,11 +372,11 @@
}
static __always_inline inline bool ingress_should_discard(struct __sk_buff* skb,
- const unsigned kver) {
+ const struct kver_uint kver) {
// Require 4.19, since earlier kernels don't have bpf_skb_load_bytes_relative() which
// provides relative to L3 header reads. Without that we could fetch the wrong bytes.
// Additionally earlier bpf verifiers are much harder to please.
- if (kver < KVER(4, 19, 0)) return false;
+ if (!KVER_IS_AT_LEAST(kver, 4, 19, 0)) return false;
IngressDiscardKey k = {};
if (skb->protocol == htons(ETH_P_IP)) {
@@ -401,7 +401,7 @@
}
static __always_inline inline int bpf_owner_match(struct __sk_buff* skb, uint32_t uid,
- bool egress, const unsigned kver) {
+ bool egress, const struct kver_uint kver) {
if (is_system_uid(uid)) return PASS;
if (skip_owner_match(skb, egress, kver)) return PASS;
@@ -435,7 +435,7 @@
const struct __sk_buff* const skb,
const StatsKey* const key,
const bool egress,
- const unsigned kver) {
+ const struct kver_uint kver) {
if (selectedMap == SELECT_MAP_A) {
update_stats_map_A(skb, key, egress, kver);
} else {
@@ -445,7 +445,7 @@
static __always_inline inline int bpf_traffic_account(struct __sk_buff* skb, bool egress,
const bool enable_tracing,
- const unsigned kver) {
+ const struct kver_uint kver) {
uint32_t sock_uid = bpf_get_socket_uid(skb);
uint64_t cookie = bpf_get_socket_cookie(skb);
UidTagValue* utag = bpf_cookie_tag_map_lookup_elem(&cookie);
diff --git a/bpf_progs/offload.c b/bpf_progs/offload.c
index c752779..ae674ed 100644
--- a/bpf_progs/offload.c
+++ b/bpf_progs/offload.c
@@ -125,7 +125,7 @@
TETHERING_GID)
static inline __always_inline int do_forward6(struct __sk_buff* skb, const bool is_ethernet,
- const bool downstream, const unsigned kver) {
+ const bool downstream, const struct kver_uint kver) {
// Must be meta-ethernet IPv6 frame
if (skb->protocol != htons(ETH_P_IPV6)) return TC_ACT_PIPE;
@@ -358,7 +358,7 @@
const int l2_header_size, void* data, const void* data_end,
struct ethhdr* eth, struct iphdr* ip, const bool is_ethernet,
const bool downstream, const bool updatetime, const bool is_tcp,
- const unsigned kver) {
+ const struct kver_uint kver) {
struct tcphdr* tcph = is_tcp ? (void*)(ip + 1) : NULL;
struct udphdr* udph = is_tcp ? NULL : (void*)(ip + 1);
@@ -548,7 +548,7 @@
}
static inline __always_inline int do_forward4(struct __sk_buff* skb, const bool is_ethernet,
- const bool downstream, const bool updatetime, const unsigned kver) {
+ const bool downstream, const bool updatetime, const struct kver_uint kver) {
// Require ethernet dst mac address to be our unicast address.
if (is_ethernet && (skb->pkt_type != PACKET_HOST)) return TC_ACT_PIPE;
diff --git a/staticlibs/native/bpf_headers/include/bpf/bpf_helpers.h b/staticlibs/native/bpf_headers/include/bpf/bpf_helpers.h
index abeb4f6..baff09b 100644
--- a/staticlibs/native/bpf_headers/include/bpf/bpf_helpers.h
+++ b/staticlibs/native/bpf_headers/include/bpf/bpf_helpers.h
@@ -105,9 +105,19 @@
* implemented in the kernel sources.
*/
-#define KVER_NONE 0
-#define KVER(a, b, c) (((a) << 24) + ((b) << 16) + (c))
-#define KVER_INF 0xFFFFFFFFu
+struct kver_uint { unsigned int kver; };
+#define KVER_(v) ((struct kver_uint){ .kver = (v) })
+#define KVER(a, b, c) KVER_(((a) << 24) + ((b) << 16) + (c))
+#define KVER_NONE KVER_(0)
+#define KVER_4_14 KVER(4, 14, 0)
+#define KVER_4_19 KVER(4, 19, 0)
+#define KVER_5_4 KVER(5, 4, 0)
+#define KVER_5_8 KVER(5, 8, 0)
+#define KVER_5_9 KVER(5, 9, 0)
+#define KVER_5_15 KVER(5, 15, 0)
+#define KVER_INF KVER_(0xFFFFFFFFu)
+
+#define KVER_IS_AT_LEAST(kver, a, b, c) ((kver).kver >= KVER(a, b, c).kver)
/*
* BPFFS (ie. /sys/fs/bpf) labelling is as follows:
@@ -211,8 +221,8 @@
.mode = (md), \
.bpfloader_min_ver = (minloader), \
.bpfloader_max_ver = (maxloader), \
- .min_kver = (minkver), \
- .max_kver = (maxkver), \
+ .min_kver = (minkver).kver, \
+ .max_kver = (maxkver).kver, \
.selinux_context = (selinux), \
.pin_subdir = (pindir), \
.shared = (share).shared, \
@@ -232,7 +242,7 @@
selinux, pindir, share, min_loader, max_loader, \
ignore_eng, ignore_user, ignore_userdebug) \
DEFINE_BPF_MAP_BASE(the_map, RINGBUF, 0, 0, size_bytes, usr, grp, md, \
- selinux, pindir, share, KVER(5, 8, 0), KVER_INF, \
+ selinux, pindir, share, KVER_5_8, KVER_INF, \
min_loader, max_loader, ignore_eng, ignore_user, \
ignore_userdebug); \
\
@@ -364,8 +374,8 @@
const struct bpf_prog_def SECTION("progs") the_prog##_def = { \
.uid = (prog_uid), \
.gid = (prog_gid), \
- .min_kver = (min_kv), \
- .max_kver = (max_kv), \
+ .min_kver = (min_kv).kver, \
+ .max_kver = (max_kv).kver, \
.optional = (opt).optional, \
.bpfloader_min_ver = (min_loader), \
.bpfloader_max_ver = (max_loader), \
@@ -423,8 +433,8 @@
// programs with no kernel version requirements
#define DEFINE_BPF_PROG(SECTION_NAME, prog_uid, prog_gid, the_prog) \
- DEFINE_BPF_PROG_KVER_RANGE_OPT(SECTION_NAME, prog_uid, prog_gid, the_prog, 0, KVER_INF, \
+ DEFINE_BPF_PROG_KVER_RANGE_OPT(SECTION_NAME, prog_uid, prog_gid, the_prog, KVER_NONE, KVER_INF, \
MANDATORY)
#define DEFINE_OPTIONAL_BPF_PROG(SECTION_NAME, prog_uid, prog_gid, the_prog) \
- DEFINE_BPF_PROG_KVER_RANGE_OPT(SECTION_NAME, prog_uid, prog_gid, the_prog, 0, KVER_INF, \
+ DEFINE_BPF_PROG_KVER_RANGE_OPT(SECTION_NAME, prog_uid, prog_gid, the_prog, KVER_NONE, KVER_INF, \
OPTIONAL)