Merge changes If33414ec,I8f211e47,If05c0c53 into main am: ae0971b4ae am: 15f2d41759

Original change: https://android-review.googlesource.com/c/platform/packages/modules/Connectivity/+/2776122

Change-Id: Ibdd2fef129d25865e97ba841ff2a06781bc906de
Signed-off-by: Automerger Merge Worker <android-build-automerger-merge-worker@system.gserviceaccount.com>
diff --git a/bpf_progs/block.c b/bpf_progs/block.c
index 68da0f7..0a2b0b8 100644
--- a/bpf_progs/block.c
+++ b/bpf_progs/block.c
@@ -63,12 +63,12 @@
                         BPFLOADER_MIN_VER, BPFLOADER_MAX_VER, MANDATORY, \
                         "", "netd_readonly/", LOAD_ON_ENG, LOAD_ON_USER, LOAD_ON_USERDEBUG)
 
-DEFINE_NETD_RO_BPF_PROG("bind4/block_port", bind4_block_port, KVER(4, 19, 0))
+DEFINE_NETD_RO_BPF_PROG("bind4/block_port", bind4_block_port, KVER_4_19)
 (struct bpf_sock_addr *ctx) {
     return block_port(ctx);
 }
 
-DEFINE_NETD_RO_BPF_PROG("bind6/block_port", bind6_block_port, KVER(4, 19, 0))
+DEFINE_NETD_RO_BPF_PROG("bind6/block_port", bind6_block_port, KVER_4_19)
 (struct bpf_sock_addr *ctx) {
     return block_port(ctx);
 }
diff --git a/bpf_progs/bpf_net_helpers.h b/bpf_progs/bpf_net_helpers.h
index 2383d95..1122f50 100644
--- a/bpf_progs/bpf_net_helpers.h
+++ b/bpf_progs/bpf_net_helpers.h
@@ -102,5 +102,3 @@
 // constants for passing in to 'bool updatetime'
 static const bool NO_UPDATETIME = false;
 static const bool UPDATETIME = true;
-
-#define KVER_4_14 KVER(4, 14, 0)
diff --git a/bpf_progs/clatd.c b/bpf_progs/clatd.c
index 8f0ff84..ae63d2a 100644
--- a/bpf_progs/clatd.c
+++ b/bpf_progs/clatd.c
@@ -56,7 +56,7 @@
 
 static inline __always_inline int nat64(struct __sk_buff* skb,
                                         const bool is_ethernet,
-                                        const unsigned kver) {
+                                        const struct kver_uint kver) {
     // Require ethernet dst mac address to be our unicast address.
     if (is_ethernet && (skb->pkt_type != PACKET_HOST)) return TC_ACT_PIPE;
 
@@ -115,7 +115,7 @@
 
     if (proto == IPPROTO_FRAGMENT) {
         // Fragment handling requires bpf_skb_adjust_room which is 4.14+
-        if (kver < KVER_4_14) return TC_ACT_PIPE;
+        if (!KVER_IS_AT_LEAST(kver, 4, 14, 0)) return TC_ACT_PIPE;
 
         // Must have (ethernet and) ipv6 header and ipv6 fragment extension header
         if (data + l2_header_size + sizeof(*ip6) + sizeof(struct frag_hdr) > data_end)
@@ -233,7 +233,7 @@
     //
     // Note: we currently have no TreeHugger coverage for 4.9-T devices (there are no such
     // Pixel or cuttlefish devices), so likely you won't notice for months if this breaks...
-    if (kver >= KVER_4_14 && frag_off != htons(IP_DF)) {
+    if (KVER_IS_AT_LEAST(kver, 4, 14, 0) && frag_off != htons(IP_DF)) {
         // If we're converting an IPv6 Fragment, we need to trim off 8 more bytes
         // We're beyond recovery on error here... but hard to imagine how this could fail.
         if (bpf_skb_adjust_room(skb, -(__s32)sizeof(struct frag_hdr), BPF_ADJ_ROOM_NET, /*flags*/0))
diff --git a/bpf_progs/dscpPolicy.c b/bpf_progs/dscpPolicy.c
index 88b50b5..e845a69 100644
--- a/bpf_progs/dscpPolicy.c
+++ b/bpf_progs/dscpPolicy.c
@@ -222,7 +222,7 @@
 }
 
 DEFINE_BPF_PROG_KVER("schedcls/set_dscp_ether", AID_ROOT, AID_SYSTEM, schedcls_set_dscp_ether,
-                     KVER(5, 15, 0))
+                     KVER_5_15)
 (struct __sk_buff* skb) {
     if (skb->pkt_type != PACKET_HOST) return TC_ACT_PIPE;
 
diff --git a/bpf_progs/netd.c b/bpf_progs/netd.c
index 7a48e8c..fc29c59 100644
--- a/bpf_progs/netd.c
+++ b/bpf_progs/netd.c
@@ -179,7 +179,7 @@
     static __always_inline inline void update_##the_stats_map(const struct __sk_buff* const skb, \
                                                               const TypeOfKey* const key,        \
                                                               const bool egress,                 \
-                                                              const unsigned kver) {             \
+                                                              const struct kver_uint kver) {     \
         StatsValue* value = bpf_##the_stats_map##_lookup_elem(key);                              \
         if (!value) {                                                                            \
             StatsValue newValue = {};                                                            \
@@ -219,7 +219,7 @@
                                                          const int L3_off,
                                                          void* const to,
                                                          const int len,
-                                                         const unsigned kver) {
+                                                         const struct kver_uint kver) {
     // 'kver' (here and throughout) is the compile time guaranteed minimum kernel version,
     // ie. we're building (a version of) the bpf program for kver (or newer!) kernels.
     //
@@ -236,16 +236,16 @@
     //
     // For similar reasons this will fail with non-offloaded VLAN tags on < 4.19 kernels,
     // since those extend the ethernet header from 14 to 18 bytes.
-    return kver >= KVER(4, 19, 0)
+    return KVER_IS_AT_LEAST(kver, 4, 19, 0)
         ? bpf_skb_load_bytes_relative(skb, L3_off, to, len, BPF_HDR_START_NET)
         : bpf_skb_load_bytes(skb, L3_off, to, len);
 }
 
 static __always_inline inline void do_packet_tracing(
         const struct __sk_buff* const skb, const bool egress, const uint32_t uid,
-        const uint32_t tag, const bool enable_tracing, const unsigned kver) {
+        const uint32_t tag, const bool enable_tracing, const struct kver_uint kver) {
     if (!enable_tracing) return;
-    if (kver < KVER(5, 8, 0)) return;
+    if (!KVER_IS_AT_LEAST(kver, 5, 8, 0)) return;
 
     uint32_t mapKey = 0;
     bool* traceConfig = bpf_packet_trace_enabled_map_lookup_elem(&mapKey);
@@ -327,7 +327,7 @@
 }
 
 static __always_inline inline bool skip_owner_match(struct __sk_buff* skb, bool egress,
-                                                    const unsigned kver) {
+                                                    const struct kver_uint kver) {
     uint32_t flag = 0;
     if (skb->protocol == htons(ETH_P_IP)) {
         uint8_t proto;
@@ -372,11 +372,11 @@
 }
 
 static __always_inline inline bool ingress_should_discard(struct __sk_buff* skb,
-                                                          const unsigned kver) {
+                                                          const struct kver_uint kver) {
     // Require 4.19, since earlier kernels don't have bpf_skb_load_bytes_relative() which
     // provides relative to L3 header reads.  Without that we could fetch the wrong bytes.
     // Additionally earlier bpf verifiers are much harder to please.
-    if (kver < KVER(4, 19, 0)) return false;
+    if (!KVER_IS_AT_LEAST(kver, 4, 19, 0)) return false;
 
     IngressDiscardKey k = {};
     if (skb->protocol == htons(ETH_P_IP)) {
@@ -401,7 +401,7 @@
 }
 
 static __always_inline inline int bpf_owner_match(struct __sk_buff* skb, uint32_t uid,
-                                                  bool egress, const unsigned kver) {
+                                                  bool egress, const struct kver_uint kver) {
     if (is_system_uid(uid)) return PASS;
 
     if (skip_owner_match(skb, egress, kver)) return PASS;
@@ -435,7 +435,7 @@
                                                             const struct __sk_buff* const skb,
                                                             const StatsKey* const key,
                                                             const bool egress,
-                                                            const unsigned kver) {
+                                                            const struct kver_uint kver) {
     if (selectedMap == SELECT_MAP_A) {
         update_stats_map_A(skb, key, egress, kver);
     } else {
@@ -445,7 +445,7 @@
 
 static __always_inline inline int bpf_traffic_account(struct __sk_buff* skb, bool egress,
                                                       const bool enable_tracing,
-                                                      const unsigned kver) {
+                                                      const struct kver_uint kver) {
     uint32_t sock_uid = bpf_get_socket_uid(skb);
     uint64_t cookie = bpf_get_socket_cookie(skb);
     UidTagValue* utag = bpf_cookie_tag_map_lookup_elem(&cookie);
@@ -505,64 +505,64 @@
 
 // This program is optional, and enables tracing on Android U+, 5.8+ on user builds.
 DEFINE_BPF_PROG_EXT("cgroupskb/ingress/stats$trace_user", AID_ROOT, AID_SYSTEM,
-                    bpf_cgroup_ingress_trace_user, KVER(5, 8, 0), KVER_INF,
+                    bpf_cgroup_ingress_trace_user, KVER_5_8, KVER_INF,
                     BPFLOADER_IGNORED_ON_VERSION, BPFLOADER_MAX_VER, OPTIONAL,
                     "fs_bpf_netd_readonly", "",
                     IGNORE_ON_ENG, LOAD_ON_USER, IGNORE_ON_USERDEBUG)
 (struct __sk_buff* skb) {
-    return bpf_traffic_account(skb, INGRESS, TRACE_ON, KVER(5, 8, 0));
+    return bpf_traffic_account(skb, INGRESS, TRACE_ON, KVER_5_8);
 }
 
 // This program is required, and enables tracing on Android U+, 5.8+, userdebug/eng.
 DEFINE_BPF_PROG_EXT("cgroupskb/ingress/stats$trace", AID_ROOT, AID_SYSTEM,
-                    bpf_cgroup_ingress_trace, KVER(5, 8, 0), KVER_INF,
+                    bpf_cgroup_ingress_trace, KVER_5_8, KVER_INF,
                     BPFLOADER_IGNORED_ON_VERSION, BPFLOADER_MAX_VER, MANDATORY,
                     "fs_bpf_netd_readonly", "",
                     LOAD_ON_ENG, IGNORE_ON_USER, LOAD_ON_USERDEBUG)
 (struct __sk_buff* skb) {
-    return bpf_traffic_account(skb, INGRESS, TRACE_ON, KVER(5, 8, 0));
+    return bpf_traffic_account(skb, INGRESS, TRACE_ON, KVER_5_8);
 }
 
 DEFINE_NETD_BPF_PROG_KVER_RANGE("cgroupskb/ingress/stats$4_19", AID_ROOT, AID_SYSTEM,
-                                bpf_cgroup_ingress_4_19, KVER(4, 19, 0), KVER_INF)
+                                bpf_cgroup_ingress_4_19, KVER_4_19, KVER_INF)
 (struct __sk_buff* skb) {
-    return bpf_traffic_account(skb, INGRESS, TRACE_OFF, KVER(4, 19, 0));
+    return bpf_traffic_account(skb, INGRESS, TRACE_OFF, KVER_4_19);
 }
 
 DEFINE_NETD_BPF_PROG_KVER_RANGE("cgroupskb/ingress/stats$4_14", AID_ROOT, AID_SYSTEM,
-                                bpf_cgroup_ingress_4_14, KVER_NONE, KVER(4, 19, 0))
+                                bpf_cgroup_ingress_4_14, KVER_NONE, KVER_4_19)
 (struct __sk_buff* skb) {
     return bpf_traffic_account(skb, INGRESS, TRACE_OFF, KVER_NONE);
 }
 
 // This program is optional, and enables tracing on Android U+, 5.8+ on user builds.
 DEFINE_BPF_PROG_EXT("cgroupskb/egress/stats$trace_user", AID_ROOT, AID_SYSTEM,
-                    bpf_cgroup_egress_trace_user, KVER(5, 8, 0), KVER_INF,
+                    bpf_cgroup_egress_trace_user, KVER_5_8, KVER_INF,
                     BPFLOADER_IGNORED_ON_VERSION, BPFLOADER_MAX_VER, OPTIONAL,
                     "fs_bpf_netd_readonly", "",
                     LOAD_ON_ENG, IGNORE_ON_USER, LOAD_ON_USERDEBUG)
 (struct __sk_buff* skb) {
-    return bpf_traffic_account(skb, EGRESS, TRACE_ON, KVER(5, 8, 0));
+    return bpf_traffic_account(skb, EGRESS, TRACE_ON, KVER_5_8);
 }
 
 // This program is required, and enables tracing on Android U+, 5.8+, userdebug/eng.
 DEFINE_BPF_PROG_EXT("cgroupskb/egress/stats$trace", AID_ROOT, AID_SYSTEM,
-                    bpf_cgroup_egress_trace, KVER(5, 8, 0), KVER_INF,
+                    bpf_cgroup_egress_trace, KVER_5_8, KVER_INF,
                     BPFLOADER_IGNORED_ON_VERSION, BPFLOADER_MAX_VER, MANDATORY,
                     "fs_bpf_netd_readonly", "",
                     LOAD_ON_ENG, IGNORE_ON_USER, LOAD_ON_USERDEBUG)
 (struct __sk_buff* skb) {
-    return bpf_traffic_account(skb, EGRESS, TRACE_ON, KVER(5, 8, 0));
+    return bpf_traffic_account(skb, EGRESS, TRACE_ON, KVER_5_8);
 }
 
 DEFINE_NETD_BPF_PROG_KVER_RANGE("cgroupskb/egress/stats$4_19", AID_ROOT, AID_SYSTEM,
-                                bpf_cgroup_egress_4_19, KVER(4, 19, 0), KVER_INF)
+                                bpf_cgroup_egress_4_19, KVER_4_19, KVER_INF)
 (struct __sk_buff* skb) {
-    return bpf_traffic_account(skb, EGRESS, TRACE_OFF, KVER(4, 19, 0));
+    return bpf_traffic_account(skb, EGRESS, TRACE_OFF, KVER_4_19);
 }
 
 DEFINE_NETD_BPF_PROG_KVER_RANGE("cgroupskb/egress/stats$4_14", AID_ROOT, AID_SYSTEM,
-                                bpf_cgroup_egress_4_14, KVER_NONE, KVER(4, 19, 0))
+                                bpf_cgroup_egress_4_14, KVER_NONE, KVER_4_19)
 (struct __sk_buff* skb) {
     return bpf_traffic_account(skb, EGRESS, TRACE_OFF, KVER_NONE);
 }
@@ -637,9 +637,7 @@
     return BPF_NOMATCH;
 }
 
-DEFINE_NETD_BPF_PROG_KVER("cgroupsock/inet/create", AID_ROOT, AID_ROOT, inet_socket_create,
-                          KVER(4, 14, 0))
-(struct bpf_sock* sk) {
+static __always_inline inline uint8_t get_app_permissions() {
     uint64_t gid_uid = bpf_get_current_uid_gid();
     /*
      * A given app is guaranteed to have the same app ID in all the profiles in
@@ -649,13 +647,15 @@
      */
     uint32_t appId = (gid_uid & 0xffffffff) % AID_USER_OFFSET;  // == PER_USER_RANGE == 100000
     uint8_t* permissions = bpf_uid_permission_map_lookup_elem(&appId);
-    if (!permissions) {
-        // UID not in map. Default to just INTERNET permission.
-        return 1;
-    }
+    // if UID not in map, then default to just INTERNET permission.
+    return permissions ? *permissions : BPF_PERMISSION_INTERNET;
+}
 
+DEFINE_NETD_BPF_PROG_KVER("cgroupsock/inet/create", AID_ROOT, AID_ROOT, inet_socket_create,
+                          KVER_4_14)
+(struct bpf_sock* sk) {
     // A return value of 1 means allow, everything else means deny.
-    return (*permissions & BPF_PERMISSION_INTERNET) == BPF_PERMISSION_INTERNET;
+    return (get_app_permissions() & BPF_PERMISSION_INTERNET) ? 1 : 0;
 }
 
 LICENSE("Apache 2.0");
diff --git a/bpf_progs/offload.c b/bpf_progs/offload.c
index c752779..0aab777 100644
--- a/bpf_progs/offload.c
+++ b/bpf_progs/offload.c
@@ -125,7 +125,7 @@
                    TETHERING_GID)
 
 static inline __always_inline int do_forward6(struct __sk_buff* skb, const bool is_ethernet,
-        const bool downstream, const unsigned kver) {
+        const bool downstream, const struct kver_uint kver) {
     // Must be meta-ethernet IPv6 frame
     if (skb->protocol != htons(ETH_P_IPV6)) return TC_ACT_PIPE;
 
@@ -324,26 +324,26 @@
 //
 // Hence, these mandatory (must load successfully) implementations for 4.14+ kernels:
 DEFINE_BPF_PROG_KVER("schedcls/tether_downstream6_rawip$4_14", TETHERING_UID, TETHERING_GID,
-                     sched_cls_tether_downstream6_rawip_4_14, KVER(4, 14, 0))
+                     sched_cls_tether_downstream6_rawip_4_14, KVER_4_14)
 (struct __sk_buff* skb) {
-    return do_forward6(skb, RAWIP, DOWNSTREAM, KVER(4, 14, 0));
+    return do_forward6(skb, RAWIP, DOWNSTREAM, KVER_4_14);
 }
 
 DEFINE_BPF_PROG_KVER("schedcls/tether_upstream6_rawip$4_14", TETHERING_UID, TETHERING_GID,
-                     sched_cls_tether_upstream6_rawip_4_14, KVER(4, 14, 0))
+                     sched_cls_tether_upstream6_rawip_4_14, KVER_4_14)
 (struct __sk_buff* skb) {
-    return do_forward6(skb, RAWIP, UPSTREAM, KVER(4, 14, 0));
+    return do_forward6(skb, RAWIP, UPSTREAM, KVER_4_14);
 }
 
 // and define no-op stubs for pre-4.14 kernels.
 DEFINE_BPF_PROG_KVER_RANGE("schedcls/tether_downstream6_rawip$stub", TETHERING_UID, TETHERING_GID,
-                           sched_cls_tether_downstream6_rawip_stub, KVER_NONE, KVER(4, 14, 0))
+                           sched_cls_tether_downstream6_rawip_stub, KVER_NONE, KVER_4_14)
 (struct __sk_buff* skb) {
     return TC_ACT_PIPE;
 }
 
 DEFINE_BPF_PROG_KVER_RANGE("schedcls/tether_upstream6_rawip$stub", TETHERING_UID, TETHERING_GID,
-                           sched_cls_tether_upstream6_rawip_stub, KVER_NONE, KVER(4, 14, 0))
+                           sched_cls_tether_upstream6_rawip_stub, KVER_NONE, KVER_4_14)
 (struct __sk_buff* skb) {
     return TC_ACT_PIPE;
 }
@@ -358,7 +358,7 @@
         const int l2_header_size, void* data, const void* data_end,
         struct ethhdr* eth, struct iphdr* ip, const bool is_ethernet,
         const bool downstream, const bool updatetime, const bool is_tcp,
-        const unsigned kver) {
+        const struct kver_uint kver) {
     struct tcphdr* tcph = is_tcp ? (void*)(ip + 1) : NULL;
     struct udphdr* udph = is_tcp ? NULL : (void*)(ip + 1);
 
@@ -548,7 +548,7 @@
 }
 
 static inline __always_inline int do_forward4(struct __sk_buff* skb, const bool is_ethernet,
-        const bool downstream, const bool updatetime, const unsigned kver) {
+        const bool downstream, const bool updatetime, const struct kver_uint kver) {
     // Require ethernet dst mac address to be our unicast address.
     if (is_ethernet && (skb->pkt_type != PACKET_HOST)) return TC_ACT_PIPE;
 
@@ -646,27 +646,27 @@
 // Full featured (required) implementations for 5.8+ kernels (these are S+ by definition)
 
 DEFINE_BPF_PROG_KVER("schedcls/tether_downstream4_rawip$5_8", TETHERING_UID, TETHERING_GID,
-                     sched_cls_tether_downstream4_rawip_5_8, KVER(5, 8, 0))
+                     sched_cls_tether_downstream4_rawip_5_8, KVER_5_8)
 (struct __sk_buff* skb) {
-    return do_forward4(skb, RAWIP, DOWNSTREAM, UPDATETIME, KVER(5, 8, 0));
+    return do_forward4(skb, RAWIP, DOWNSTREAM, UPDATETIME, KVER_5_8);
 }
 
 DEFINE_BPF_PROG_KVER("schedcls/tether_upstream4_rawip$5_8", TETHERING_UID, TETHERING_GID,
-                     sched_cls_tether_upstream4_rawip_5_8, KVER(5, 8, 0))
+                     sched_cls_tether_upstream4_rawip_5_8, KVER_5_8)
 (struct __sk_buff* skb) {
-    return do_forward4(skb, RAWIP, UPSTREAM, UPDATETIME, KVER(5, 8, 0));
+    return do_forward4(skb, RAWIP, UPSTREAM, UPDATETIME, KVER_5_8);
 }
 
 DEFINE_BPF_PROG_KVER("schedcls/tether_downstream4_ether$5_8", TETHERING_UID, TETHERING_GID,
-                     sched_cls_tether_downstream4_ether_5_8, KVER(5, 8, 0))
+                     sched_cls_tether_downstream4_ether_5_8, KVER_5_8)
 (struct __sk_buff* skb) {
-    return do_forward4(skb, ETHER, DOWNSTREAM, UPDATETIME, KVER(5, 8, 0));
+    return do_forward4(skb, ETHER, DOWNSTREAM, UPDATETIME, KVER_5_8);
 }
 
 DEFINE_BPF_PROG_KVER("schedcls/tether_upstream4_ether$5_8", TETHERING_UID, TETHERING_GID,
-                     sched_cls_tether_upstream4_ether_5_8, KVER(5, 8, 0))
+                     sched_cls_tether_upstream4_ether_5_8, KVER_5_8)
 (struct __sk_buff* skb) {
-    return do_forward4(skb, ETHER, UPSTREAM, UPDATETIME, KVER(5, 8, 0));
+    return do_forward4(skb, ETHER, UPSTREAM, UPDATETIME, KVER_5_8);
 }
 
 // Full featured (optional) implementations for 4.14-S, 4.19-S & 5.4-S kernels
@@ -675,33 +675,33 @@
 DEFINE_OPTIONAL_BPF_PROG_KVER_RANGE("schedcls/tether_downstream4_rawip$opt",
                                     TETHERING_UID, TETHERING_GID,
                                     sched_cls_tether_downstream4_rawip_opt,
-                                    KVER(4, 14, 0), KVER(5, 8, 0))
+                                    KVER_4_14, KVER_5_8)
 (struct __sk_buff* skb) {
-    return do_forward4(skb, RAWIP, DOWNSTREAM, UPDATETIME, KVER(4, 14, 0));
+    return do_forward4(skb, RAWIP, DOWNSTREAM, UPDATETIME, KVER_4_14);
 }
 
 DEFINE_OPTIONAL_BPF_PROG_KVER_RANGE("schedcls/tether_upstream4_rawip$opt",
                                     TETHERING_UID, TETHERING_GID,
                                     sched_cls_tether_upstream4_rawip_opt,
-                                    KVER(4, 14, 0), KVER(5, 8, 0))
+                                    KVER_4_14, KVER_5_8)
 (struct __sk_buff* skb) {
-    return do_forward4(skb, RAWIP, UPSTREAM, UPDATETIME, KVER(4, 14, 0));
+    return do_forward4(skb, RAWIP, UPSTREAM, UPDATETIME, KVER_4_14);
 }
 
 DEFINE_OPTIONAL_BPF_PROG_KVER_RANGE("schedcls/tether_downstream4_ether$opt",
                                     TETHERING_UID, TETHERING_GID,
                                     sched_cls_tether_downstream4_ether_opt,
-                                    KVER(4, 14, 0), KVER(5, 8, 0))
+                                    KVER_4_14, KVER_5_8)
 (struct __sk_buff* skb) {
-    return do_forward4(skb, ETHER, DOWNSTREAM, UPDATETIME, KVER(4, 14, 0));
+    return do_forward4(skb, ETHER, DOWNSTREAM, UPDATETIME, KVER_4_14);
 }
 
 DEFINE_OPTIONAL_BPF_PROG_KVER_RANGE("schedcls/tether_upstream4_ether$opt",
                                     TETHERING_UID, TETHERING_GID,
                                     sched_cls_tether_upstream4_ether_opt,
-                                    KVER(4, 14, 0), KVER(5, 8, 0))
+                                    KVER_4_14, KVER_5_8)
 (struct __sk_buff* skb) {
-    return do_forward4(skb, ETHER, UPSTREAM, UPDATETIME, KVER(4, 14, 0));
+    return do_forward4(skb, ETHER, UPSTREAM, UPDATETIME, KVER_4_14);
 }
 
 // Partial (TCP-only: will not update 'last_used' field) implementations for 4.14+ kernels.
@@ -719,15 +719,15 @@
 // RAWIP: Required for 5.4-R kernels -- which always support bpf_skb_change_head().
 
 DEFINE_BPF_PROG_KVER_RANGE("schedcls/tether_downstream4_rawip$5_4", TETHERING_UID, TETHERING_GID,
-                           sched_cls_tether_downstream4_rawip_5_4, KVER(5, 4, 0), KVER(5, 8, 0))
+                           sched_cls_tether_downstream4_rawip_5_4, KVER_5_4, KVER_5_8)
 (struct __sk_buff* skb) {
-    return do_forward4(skb, RAWIP, DOWNSTREAM, NO_UPDATETIME, KVER(5, 4, 0));
+    return do_forward4(skb, RAWIP, DOWNSTREAM, NO_UPDATETIME, KVER_5_4);
 }
 
 DEFINE_BPF_PROG_KVER_RANGE("schedcls/tether_upstream4_rawip$5_4", TETHERING_UID, TETHERING_GID,
-                           sched_cls_tether_upstream4_rawip_5_4, KVER(5, 4, 0), KVER(5, 8, 0))
+                           sched_cls_tether_upstream4_rawip_5_4, KVER_5_4, KVER_5_8)
 (struct __sk_buff* skb) {
-    return do_forward4(skb, RAWIP, UPSTREAM, NO_UPDATETIME, KVER(5, 4, 0));
+    return do_forward4(skb, RAWIP, UPSTREAM, NO_UPDATETIME, KVER_5_4);
 }
 
 // RAWIP: Optional for 4.14/4.19 (R) kernels -- which support bpf_skb_change_head().
@@ -736,31 +736,31 @@
 DEFINE_OPTIONAL_BPF_PROG_KVER_RANGE("schedcls/tether_downstream4_rawip$4_14",
                                     TETHERING_UID, TETHERING_GID,
                                     sched_cls_tether_downstream4_rawip_4_14,
-                                    KVER(4, 14, 0), KVER(5, 4, 0))
+                                    KVER_4_14, KVER_5_4)
 (struct __sk_buff* skb) {
-    return do_forward4(skb, RAWIP, DOWNSTREAM, NO_UPDATETIME, KVER(4, 14, 0));
+    return do_forward4(skb, RAWIP, DOWNSTREAM, NO_UPDATETIME, KVER_4_14);
 }
 
 DEFINE_OPTIONAL_BPF_PROG_KVER_RANGE("schedcls/tether_upstream4_rawip$4_14",
                                     TETHERING_UID, TETHERING_GID,
                                     sched_cls_tether_upstream4_rawip_4_14,
-                                    KVER(4, 14, 0), KVER(5, 4, 0))
+                                    KVER_4_14, KVER_5_4)
 (struct __sk_buff* skb) {
-    return do_forward4(skb, RAWIP, UPSTREAM, NO_UPDATETIME, KVER(4, 14, 0));
+    return do_forward4(skb, RAWIP, UPSTREAM, NO_UPDATETIME, KVER_4_14);
 }
 
 // ETHER: Required for 4.14-Q/R, 4.19-Q/R & 5.4-R kernels.
 
 DEFINE_BPF_PROG_KVER_RANGE("schedcls/tether_downstream4_ether$4_14", TETHERING_UID, TETHERING_GID,
-                           sched_cls_tether_downstream4_ether_4_14, KVER(4, 14, 0), KVER(5, 8, 0))
+                           sched_cls_tether_downstream4_ether_4_14, KVER_4_14, KVER_5_8)
 (struct __sk_buff* skb) {
-    return do_forward4(skb, ETHER, DOWNSTREAM, NO_UPDATETIME, KVER(4, 14, 0));
+    return do_forward4(skb, ETHER, DOWNSTREAM, NO_UPDATETIME, KVER_4_14);
 }
 
 DEFINE_BPF_PROG_KVER_RANGE("schedcls/tether_upstream4_ether$4_14", TETHERING_UID, TETHERING_GID,
-                           sched_cls_tether_upstream4_ether_4_14, KVER(4, 14, 0), KVER(5, 8, 0))
+                           sched_cls_tether_upstream4_ether_4_14, KVER_4_14, KVER_5_8)
 (struct __sk_buff* skb) {
-    return do_forward4(skb, ETHER, UPSTREAM, NO_UPDATETIME, KVER(4, 14, 0));
+    return do_forward4(skb, ETHER, UPSTREAM, NO_UPDATETIME, KVER_4_14);
 }
 
 // Placeholder (no-op) implementations for older Q kernels
@@ -768,13 +768,13 @@
 // RAWIP: 4.9-P/Q, 4.14-P/Q & 4.19-Q kernels -- without bpf_skb_change_head() for tc programs
 
 DEFINE_BPF_PROG_KVER_RANGE("schedcls/tether_downstream4_rawip$stub", TETHERING_UID, TETHERING_GID,
-                           sched_cls_tether_downstream4_rawip_stub, KVER_NONE, KVER(5, 4, 0))
+                           sched_cls_tether_downstream4_rawip_stub, KVER_NONE, KVER_5_4)
 (struct __sk_buff* skb) {
     return TC_ACT_PIPE;
 }
 
 DEFINE_BPF_PROG_KVER_RANGE("schedcls/tether_upstream4_rawip$stub", TETHERING_UID, TETHERING_GID,
-                           sched_cls_tether_upstream4_rawip_stub, KVER_NONE, KVER(5, 4, 0))
+                           sched_cls_tether_upstream4_rawip_stub, KVER_NONE, KVER_5_4)
 (struct __sk_buff* skb) {
     return TC_ACT_PIPE;
 }
@@ -782,13 +782,13 @@
 // ETHER: 4.9-P/Q kernel
 
 DEFINE_BPF_PROG_KVER_RANGE("schedcls/tether_downstream4_ether$stub", TETHERING_UID, TETHERING_GID,
-                           sched_cls_tether_downstream4_ether_stub, KVER_NONE, KVER(4, 14, 0))
+                           sched_cls_tether_downstream4_ether_stub, KVER_NONE, KVER_4_14)
 (struct __sk_buff* skb) {
     return TC_ACT_PIPE;
 }
 
 DEFINE_BPF_PROG_KVER_RANGE("schedcls/tether_upstream4_ether$stub", TETHERING_UID, TETHERING_GID,
-                           sched_cls_tether_upstream4_ether_stub, KVER_NONE, KVER(4, 14, 0))
+                           sched_cls_tether_upstream4_ether_stub, KVER_NONE, KVER_4_14)
 (struct __sk_buff* skb) {
     return TC_ACT_PIPE;
 }
@@ -840,7 +840,7 @@
 }
 
 #define DEFINE_XDP_PROG(str, func) \
-    DEFINE_BPF_PROG_KVER(str, TETHERING_UID, TETHERING_GID, func, KVER(5, 9, 0))(struct xdp_md *ctx)
+    DEFINE_BPF_PROG_KVER(str, TETHERING_UID, TETHERING_GID, func, KVER_5_9)(struct xdp_md *ctx)
 
 DEFINE_XDP_PROG("xdp/tether_downstream_ether",
                  xdp_tether_downstream_ether) {
diff --git a/bpf_progs/test.c b/bpf_progs/test.c
index 68469c8..70b08b7 100644
--- a/bpf_progs/test.c
+++ b/bpf_progs/test.c
@@ -49,7 +49,7 @@
 DEFINE_BPF_MAP_GRW(bitmap, ARRAY, int, uint64_t, 2, TETHERING_GID)
 
 DEFINE_BPF_PROG_KVER("xdp/drop_ipv4_udp_ether", TETHERING_UID, TETHERING_GID,
-                      xdp_test, KVER(5, 9, 0))
+                      xdp_test, KVER_5_9)
 (struct xdp_md *ctx) {
     void *data = (void *)(long)ctx->data;
     void *data_end = (void *)(long)ctx->data_end;
diff --git a/staticlibs/native/bpf_headers/include/bpf/bpf_helpers.h b/staticlibs/native/bpf_headers/include/bpf/bpf_helpers.h
index abeb4f6..baff09b 100644
--- a/staticlibs/native/bpf_headers/include/bpf/bpf_helpers.h
+++ b/staticlibs/native/bpf_headers/include/bpf/bpf_helpers.h
@@ -105,9 +105,19 @@
  * implemented in the kernel sources.
  */
 
-#define KVER_NONE 0
-#define KVER(a, b, c) (((a) << 24) + ((b) << 16) + (c))
-#define KVER_INF 0xFFFFFFFFu
+struct kver_uint { unsigned int kver; };
+#define KVER_(v) ((struct kver_uint){ .kver = (v) })
+#define KVER(a, b, c) KVER_(((a) << 24) + ((b) << 16) + (c))
+#define KVER_NONE KVER_(0)
+#define KVER_4_14 KVER(4, 14, 0)
+#define KVER_4_19 KVER(4, 19, 0)
+#define KVER_5_4 KVER(5, 4, 0)
+#define KVER_5_8 KVER(5, 8, 0)
+#define KVER_5_9 KVER(5, 9, 0)
+#define KVER_5_15 KVER(5, 15, 0)
+#define KVER_INF KVER_(0xFFFFFFFFu)
+
+#define KVER_IS_AT_LEAST(kver, a, b, c) ((kver).kver >= KVER(a, b, c).kver)
 
 /*
  * BPFFS (ie. /sys/fs/bpf) labelling is as follows:
@@ -211,8 +221,8 @@
         .mode = (md),                                                       \
         .bpfloader_min_ver = (minloader),                                   \
         .bpfloader_max_ver = (maxloader),                                   \
-        .min_kver = (minkver),                                              \
-        .max_kver = (maxkver),                                              \
+        .min_kver = (minkver).kver,                                         \
+        .max_kver = (maxkver).kver,                                         \
         .selinux_context = (selinux),                                       \
         .pin_subdir = (pindir),                                             \
         .shared = (share).shared,                                           \
@@ -232,7 +242,7 @@
                                selinux, pindir, share, min_loader, max_loader, \
                                ignore_eng, ignore_user, ignore_userdebug)      \
     DEFINE_BPF_MAP_BASE(the_map, RINGBUF, 0, 0, size_bytes, usr, grp, md,      \
-                        selinux, pindir, share, KVER(5, 8, 0), KVER_INF,       \
+                        selinux, pindir, share, KVER_5_8, KVER_INF,            \
                         min_loader, max_loader, ignore_eng, ignore_user,       \
                         ignore_userdebug);                                     \
                                                                                \
@@ -364,8 +374,8 @@
     const struct bpf_prog_def SECTION("progs") the_prog##_def = {                        \
         .uid = (prog_uid),                                                               \
         .gid = (prog_gid),                                                               \
-        .min_kver = (min_kv),                                                            \
-        .max_kver = (max_kv),                                                            \
+        .min_kver = (min_kv).kver,                                                       \
+        .max_kver = (max_kv).kver,                                                       \
         .optional = (opt).optional,                                                      \
         .bpfloader_min_ver = (min_loader),                                               \
         .bpfloader_max_ver = (max_loader),                                               \
@@ -423,8 +433,8 @@
 
 // programs with no kernel version requirements
 #define DEFINE_BPF_PROG(SECTION_NAME, prog_uid, prog_gid, the_prog) \
-    DEFINE_BPF_PROG_KVER_RANGE_OPT(SECTION_NAME, prog_uid, prog_gid, the_prog, 0, KVER_INF, \
+    DEFINE_BPF_PROG_KVER_RANGE_OPT(SECTION_NAME, prog_uid, prog_gid, the_prog, KVER_NONE, KVER_INF, \
                                    MANDATORY)
 #define DEFINE_OPTIONAL_BPF_PROG(SECTION_NAME, prog_uid, prog_gid, the_prog) \
-    DEFINE_BPF_PROG_KVER_RANGE_OPT(SECTION_NAME, prog_uid, prog_gid, the_prog, 0, KVER_INF, \
+    DEFINE_BPF_PROG_KVER_RANGE_OPT(SECTION_NAME, prog_uid, prog_gid, the_prog, KVER_NONE, KVER_INF, \
                                    OPTIONAL)