Merge "Add methods to distinguish TetheringRequest types" into main
diff --git a/bpf/headers/include/bpf/BpfUtils.h b/bpf/headers/include/bpf/BpfUtils.h
index 9dd5822..9e8b2c7 100644
--- a/bpf/headers/include/bpf/BpfUtils.h
+++ b/bpf/headers/include/bpf/BpfUtils.h
@@ -63,9 +63,9 @@
// 4.9 kernels. The kernel code of socket release on pf_key socket will
// explicitly call synchronize_rcu() which is exactly what we need.
//
- // Linux 4.14/4.19/5.4/5.10/5.15/6.1 (and 6.3-rc5) still have this same behaviour.
+ // Linux 4.14/4.19/5.4/5.10/5.15/6.1/6.6/6.12 (& 6.13) have this behaviour.
// see net/key/af_key.c: pfkey_release() -> synchronize_rcu()
- // https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/net/key/af_key.c?h=v6.3-rc5#n185
+ // https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/net/key/af_key.c?h=v6.13#n185
const int pfSocket = socket(AF_KEY, SOCK_RAW | SOCK_CLOEXEC, PF_KEY_V2);
if (pfSocket < 0) {
diff --git a/bpf/headers/include/bpf_helpers.h b/bpf/headers/include/bpf_helpers.h
index 0bd3421..425c429 100644
--- a/bpf/headers/include/bpf_helpers.h
+++ b/bpf/headers/include/bpf_helpers.h
@@ -122,10 +122,7 @@
*/
#define CRITICAL(REASON) char _critical[] SECTION("critical") = (REASON)
-/*
- * Helper functions called from eBPF programs written in C. These are
- * implemented in the kernel sources.
- */
+// Helpers for writing kernel version specific bpf programs
struct kver_uint { unsigned int kver; };
#define KVER_(v) ((struct kver_uint){ .kver = (v) })
@@ -135,7 +132,6 @@
#define KVER_4_19 KVER(4, 19, 0)
#define KVER_5_4 KVER(5, 4, 0)
#define KVER_5_8 KVER(5, 8, 0)
-#define KVER_5_9 KVER(5, 9, 0)
#define KVER_5_10 KVER(5, 10, 0)
#define KVER_5_15 KVER(5, 15, 0)
#define KVER_6_1 KVER(6, 1, 0)
@@ -144,6 +140,27 @@
#define KVER_IS_AT_LEAST(kver, a, b, c) ((kver).kver >= KVER(a, b, c).kver)
+// Helpers for writing sdk level specific bpf programs
+//
+// Note: we choose to follow sdk api level values, but there is no real need for this:
+// These just need to be monotonically increasing. We could also use values ten or even
+// a hundred times larger to leave room for quarters or months. We may also just use
+// dates or something (2502 or 202506 for 25Q2) or even the mainline bpfloader version...
+// For now this easily suffices for our use case.
+
+struct sdk_level_uint { unsigned int sdk_level; };
+#define SDK_LEVEL_(v) ((struct sdk_level_uint){ .sdk_level = (v) })
+#define SDK_LEVEL_NONE SDK_LEVEL_(0)
+#define SDK_LEVEL_S SDK_LEVEL_(31) // Android 12
+#define SDK_LEVEL_Sv2 SDK_LEVEL_(32) // Android 12L
+#define SDK_LEVEL_T SDK_LEVEL_(33) // Android 13
+#define SDK_LEVEL_U SDK_LEVEL_(34) // Android 14
+#define SDK_LEVEL_V SDK_LEVEL_(35) // Android 15
+#define SDK_LEVEL_24Q3 SDK_LEVEL_V
+#define SDK_LEVEL_25Q2 SDK_LEVEL_(36) // Android 16
+
+#define SDK_LEVEL_IS_AT_LEAST(lvl, v) ((lvl).sdk_level >= (SDK_LEVEL_##v).sdk_level)
+
/*
* BPFFS (ie. /sys/fs/bpf) labelling is as follows:
* subdirectory selinux context mainline usecase / usable by
@@ -168,6 +185,11 @@
* See cs/p:aosp-master%20-file:prebuilts/%20file:genfs_contexts%20"genfscon%20bpf"
*/
+/*
+ * Helper functions called from eBPF programs written in C. These are
+ * implemented in the kernel sources.
+ */
+
/* generic functions */
/*
@@ -231,16 +253,24 @@
(ignore_userdebug).ignore_on_userdebug), \
"bpfloader min version must be >= 0.33 in order to use ignored_on");
+#define ABSOLUTE(x) ((x) < 0 ? -(x) : (x))
+
+#define DEFAULT_BPF_MAP_FLAGS(type, num_entries, mapflags) \
+ ( (mapflags) | \
+ ((num_entries) < 0 ? BPF_F_NO_PREALLOC : 0) | \
+ (type == BPF_MAP_TYPE_LPM_TRIE ? BPF_F_NO_PREALLOC : 0) \
+ )
+
#define DEFINE_BPF_MAP_BASE(the_map, TYPE, keysize, valuesize, num_entries, \
usr, grp, md, selinux, pindir, share, minkver, \
maxkver, minloader, maxloader, ignore_eng, \
- ignore_user, ignore_userdebug) \
+ ignore_user, ignore_userdebug, mapflags) \
const struct bpf_map_def SECTION("maps") the_map = { \
.type = BPF_MAP_TYPE_##TYPE, \
.key_size = (keysize), \
.value_size = (valuesize), \
- .max_entries = (num_entries), \
- .map_flags = 0, \
+ .max_entries = ABSOLUTE(num_entries), \
+ .map_flags = DEFAULT_BPF_MAP_FLAGS(BPF_MAP_TYPE_##TYPE, num_entries, mapflags), \
.uid = (usr), \
.gid = (grp), \
.mode = (md), \
@@ -260,16 +290,17 @@
// Type safe macro to declare a ring buffer and related output functions.
// Compatibility:
// * BPF ring buffers are only available kernels 5.8 and above. Any program
-// accessing the ring buffer should set a program level min_kver >= 5.8.
-// * The definition below sets a map min_kver of 5.8 which requires targeting
+// accessing the ring buffer should set a program level min_kver >= 5.10,
+// since 5.10 is the next LTS version.
+// * The definition below sets a map min_kver of 5.10 which requires targeting
// a BPFLOADER_MIN_VER >= BPFLOADER_S_VERSION.
#define DEFINE_BPF_RINGBUF_EXT(the_map, ValueType, size_bytes, usr, grp, md, \
selinux, pindir, share, min_loader, max_loader, \
ignore_eng, ignore_user, ignore_userdebug) \
DEFINE_BPF_MAP_BASE(the_map, RINGBUF, 0, 0, size_bytes, usr, grp, md, \
- selinux, pindir, share, KVER_5_8, KVER_INF, \
+ selinux, pindir, share, KVER_5_10, KVER_INF, \
min_loader, max_loader, ignore_eng, ignore_user, \
- ignore_userdebug); \
+ ignore_userdebug, 0); \
\
_Static_assert((size_bytes) >= 4096, "min 4 kiB ringbuffer size"); \
_Static_assert((size_bytes) <= 0x10000000, "max 256 MiB ringbuffer size"); \
@@ -317,11 +348,11 @@
/* type safe macro to declare a map and related accessor functions */
#define DEFINE_BPF_MAP_EXT(the_map, TYPE, KeyType, ValueType, num_entries, usr, grp, md, \
selinux, pindir, share, min_loader, max_loader, ignore_eng, \
- ignore_user, ignore_userdebug) \
+ ignore_user, ignore_userdebug, mapFlags) \
DEFINE_BPF_MAP_BASE(the_map, TYPE, sizeof(KeyType), sizeof(ValueType), \
num_entries, usr, grp, md, selinux, pindir, share, \
KVER_NONE, KVER_INF, min_loader, max_loader, \
- ignore_eng, ignore_user, ignore_userdebug); \
+ ignore_eng, ignore_user, ignore_userdebug, mapFlags); \
BPF_MAP_ASSERT_OK(BPF_MAP_TYPE_##TYPE, (num_entries), (md)); \
_Static_assert(sizeof(KeyType) < 1024, "aosp/2370288 requires < 1024 byte keys"); \
_Static_assert(sizeof(ValueType) < 65536, "aosp/2370288 requires < 65536 byte values"); \
@@ -359,13 +390,13 @@
#define DEFINE_BPF_MAP_KERNEL_INTERNAL(the_map, TYPE, KeyType, ValueType, num_entries) \
DEFINE_BPF_MAP_EXT(the_map, TYPE, KeyType, ValueType, num_entries, AID_ROOT, AID_ROOT, \
0000, "fs_bpf_loader", "", PRIVATE, BPFLOADER_MIN_VER, BPFLOADER_MAX_VER, \
- LOAD_ON_ENG, LOAD_ON_USER, LOAD_ON_USERDEBUG)
+ LOAD_ON_ENG, LOAD_ON_USER, LOAD_ON_USERDEBUG, 0)
#define DEFINE_BPF_MAP_UGM(the_map, TYPE, KeyType, ValueType, num_entries, usr, grp, md) \
DEFINE_BPF_MAP_EXT(the_map, TYPE, KeyType, ValueType, num_entries, usr, grp, md, \
DEFAULT_BPF_MAP_SELINUX_CONTEXT, DEFAULT_BPF_MAP_PIN_SUBDIR, \
PRIVATE, BPFLOADER_MIN_VER, BPFLOADER_MAX_VER, \
- LOAD_ON_ENG, LOAD_ON_USER, LOAD_ON_USERDEBUG)
+ LOAD_ON_ENG, LOAD_ON_USER, LOAD_ON_USERDEBUG, 0)
#define DEFINE_BPF_MAP(the_map, TYPE, KeyType, ValueType, num_entries) \
DEFINE_BPF_MAP_UGM(the_map, TYPE, KeyType, ValueType, num_entries, \
diff --git a/bpf/headers/include/bpf_map_def.h b/bpf/headers/include/bpf_map_def.h
index 2d6736c..d67da48 100644
--- a/bpf/headers/include/bpf_map_def.h
+++ b/bpf/headers/include/bpf_map_def.h
@@ -94,6 +94,10 @@
_Static_assert(_Alignof(enum bpf_map_type) == 4, "_Alignof enum bpf_map_type != 4");
// Linux kernel requires sizeof(int) == 4, sizeof(void*) == sizeof(long), sizeof(long long) == 8
+_Static_assert(sizeof(int) == 4, "sizeof int != 4");
+_Static_assert(__alignof__(int) == 4, "__alignof__ int != 4");
+_Static_assert(_Alignof(int) == 4, "_Alignof int != 4");
+
_Static_assert(sizeof(unsigned int) == 4, "sizeof unsigned int != 4");
_Static_assert(__alignof__(unsigned int) == 4, "__alignof__ unsigned int != 4");
_Static_assert(_Alignof(unsigned int) == 4, "_Alignof unsigned int != 4");
@@ -155,7 +159,7 @@
enum bpf_map_type type;
unsigned int key_size;
unsigned int value_size;
- unsigned int max_entries;
+ int max_entries; // negative means BPF_F_NO_PREALLOC, but *might* not work with S
unsigned int map_flags;
// The following are not supported by the Android bpfloader:
diff --git a/bpf/loader/NetBpfLoad.cpp b/bpf/loader/NetBpfLoad.cpp
index 038786c..53c7d49 100644
--- a/bpf/loader/NetBpfLoad.cpp
+++ b/bpf/loader/NetBpfLoad.cpp
@@ -616,9 +616,6 @@
if (type == BPF_MAP_TYPE_DEVMAP || type == BPF_MAP_TYPE_DEVMAP_HASH)
desired_map_flags |= BPF_F_RDONLY_PROG;
- if (type == BPF_MAP_TYPE_LPM_TRIE)
- desired_map_flags |= BPF_F_NO_PREALLOC;
-
// The .h file enforces that this is a power of two, and page size will
// also always be a power of two, so this logic is actually enough to
// force it to be a multiple of the page size, as required by the kernel.
@@ -794,7 +791,7 @@
.key_size = md[i].key_size,
.value_size = md[i].value_size,
.max_entries = max_entries,
- .map_flags = md[i].map_flags | (type == BPF_MAP_TYPE_LPM_TRIE ? BPF_F_NO_PREALLOC : 0),
+ .map_flags = md[i].map_flags,
};
if (isAtLeastKernelVersion(4, 15, 0))
strlcpy(req.map_name, mapNames[i].c_str(), sizeof(req.map_name));
diff --git a/bpf/progs/netd.c b/bpf/progs/netd.c
index ed0eed5..5510a20 100644
--- a/bpf/progs/netd.c
+++ b/bpf/progs/netd.c
@@ -46,14 +46,14 @@
DEFINE_BPF_MAP_EXT(the_map, TYPE, TypeOfKey, TypeOfValue, num_entries, \
AID_ROOT, AID_NET_BW_ACCT, 0060, "fs_bpf_net_shared", "", \
PRIVATE, BPFLOADER_MIN_VER, BPFLOADER_MAX_VER, \
- LOAD_ON_ENG, LOAD_ON_USER, LOAD_ON_USERDEBUG)
+ LOAD_ON_ENG, LOAD_ON_USER, LOAD_ON_USERDEBUG, 0)
// For maps netd only needs read only access to
#define DEFINE_BPF_MAP_RO_NETD(the_map, TYPE, TypeOfKey, TypeOfValue, num_entries) \
DEFINE_BPF_MAP_EXT(the_map, TYPE, TypeOfKey, TypeOfValue, num_entries, \
AID_ROOT, AID_NET_BW_ACCT, 0460, "fs_bpf_netd_readonly", "", \
PRIVATE, BPFLOADER_MIN_VER, BPFLOADER_MAX_VER, \
- LOAD_ON_ENG, LOAD_ON_USER, LOAD_ON_USERDEBUG)
+ LOAD_ON_ENG, LOAD_ON_USER, LOAD_ON_USERDEBUG, 0)
// For maps netd needs to be able to read and write
#define DEFINE_BPF_MAP_RW_NETD(the_map, TYPE, TypeOfKey, TypeOfValue, num_entries) \
@@ -92,7 +92,7 @@
DEFINE_BPF_MAP_EXT(packet_trace_enabled_map, ARRAY, uint32_t, bool, 1,
AID_ROOT, AID_SYSTEM, 0060, "fs_bpf_net_shared", "", PRIVATE,
BPFLOADER_MAINLINE_U_VERSION, BPFLOADER_MAX_VER, LOAD_ON_ENG,
- LOAD_ON_USER, LOAD_ON_USERDEBUG)
+ LOAD_ON_USER, LOAD_ON_USERDEBUG, 0)
// A ring buffer on which packet information is pushed.
DEFINE_BPF_RINGBUF_EXT(packet_trace_ringbuf, PacketTrace, PACKET_TRACE_BUF_SIZE,
@@ -110,31 +110,34 @@
// program (see XT_BPF_MODE_PATH_PINNED) and then the iptables binary (or rather
// the kernel acting on behalf of it) must be able to retrieve the pinned program
// for the reload to succeed
-#define DEFINE_XTBPF_PROG(SECTION_NAME, prog_uid, prog_gid, the_prog) \
- DEFINE_BPF_PROG(SECTION_NAME, prog_uid, prog_gid, the_prog)
+#define DEFINE_XTBPF_PROG(SECTION_NAME, the_prog) \
+ DEFINE_BPF_PROG(SECTION_NAME, AID_ROOT, AID_NET_ADMIN, the_prog)
// programs that need to be usable by netd, but not by netutils_wrappers
// (this is because these are currently attached by the mainline provided libnetd_updatable .so
// which is loaded into netd and thus runs as netd uid/gid/selinux context)
-#define DEFINE_NETD_BPF_PROG_KVER_RANGE(SECTION_NAME, prog_uid, prog_gid, the_prog, minKV, maxKV) \
- DEFINE_BPF_PROG_EXT(SECTION_NAME, prog_uid, prog_gid, the_prog, \
- minKV, maxKV, BPFLOADER_MIN_VER, BPFLOADER_MAX_VER, MANDATORY, \
+#define DEFINE_NETD_BPF_PROG_RANGES(SECTION_NAME, the_prog, minKV, maxKV, min_loader, max_loader) \
+ DEFINE_BPF_PROG_EXT(SECTION_NAME, AID_ROOT, AID_ROOT, the_prog, \
+ minKV, maxKV, min_loader, max_loader, MANDATORY, \
"fs_bpf_netd_readonly", "", LOAD_ON_ENG, LOAD_ON_USER, LOAD_ON_USERDEBUG)
-#define DEFINE_NETD_BPF_PROG_KVER(SECTION_NAME, prog_uid, prog_gid, the_prog, min_kv) \
- DEFINE_NETD_BPF_PROG_KVER_RANGE(SECTION_NAME, prog_uid, prog_gid, the_prog, min_kv, KVER_INF)
+#define DEFINE_NETD_BPF_PROG_KVER_RANGE(SECTION_NAME, the_prog, minKV, maxKV) \
+ DEFINE_NETD_BPF_PROG_RANGES(SECTION_NAME, the_prog, minKV, maxKV, BPFLOADER_MIN_VER, BPFLOADER_MAX_VER)
-#define DEFINE_NETD_BPF_PROG(SECTION_NAME, prog_uid, prog_gid, the_prog) \
- DEFINE_NETD_BPF_PROG_KVER(SECTION_NAME, prog_uid, prog_gid, the_prog, KVER_NONE)
+#define DEFINE_NETD_BPF_PROG_KVER(SECTION_NAME, the_prog, min_kv) \
+ DEFINE_NETD_BPF_PROG_KVER_RANGE(SECTION_NAME, the_prog, min_kv, KVER_INF)
-#define DEFINE_NETD_V_BPF_PROG_KVER(SECTION_NAME, prog_uid, prog_gid, the_prog, minKV) \
- DEFINE_BPF_PROG_EXT(SECTION_NAME, prog_uid, prog_gid, the_prog, minKV, \
+#define DEFINE_NETD_BPF_PROG(SECTION_NAME, the_prog) \
+ DEFINE_NETD_BPF_PROG_KVER(SECTION_NAME, the_prog, KVER_NONE)
+
+#define DEFINE_NETD_V_BPF_PROG_KVER(SECTION_NAME, the_prog, minKV) \
+ DEFINE_BPF_PROG_EXT(SECTION_NAME, AID_ROOT, AID_ROOT, the_prog, minKV, \
KVER_INF, BPFLOADER_MAINLINE_V_VERSION, BPFLOADER_MAX_VER, MANDATORY, \
"fs_bpf_netd_readonly", "", LOAD_ON_ENG, LOAD_ON_USER, LOAD_ON_USERDEBUG)
// programs that only need to be usable by the system server
-#define DEFINE_SYS_BPF_PROG(SECTION_NAME, prog_uid, prog_gid, the_prog) \
- DEFINE_BPF_PROG_EXT(SECTION_NAME, prog_uid, prog_gid, the_prog, KVER_NONE, KVER_INF, \
+#define DEFINE_SYS_BPF_PROG(SECTION_NAME, the_prog) \
+ DEFINE_BPF_PROG_EXT(SECTION_NAME, AID_ROOT, AID_NET_ADMIN, the_prog, KVER_NONE, KVER_INF, \
BPFLOADER_MIN_VER, BPFLOADER_MAX_VER, MANDATORY, \
"fs_bpf_net_shared", "", LOAD_ON_ENG, LOAD_ON_USER, LOAD_ON_USERDEBUG)
@@ -441,7 +444,8 @@
static __always_inline inline int bpf_traffic_account(struct __sk_buff* skb,
const struct egress_bool egress,
const bool enable_tracing,
- const struct kver_uint kver) {
+ const struct kver_uint kver,
+ const struct sdk_level_uint lvl) {
// sock_uid will be 'overflowuid' if !sk_fullsock(sk_to_full_sk(skb->sk))
uint32_t sock_uid = bpf_get_socket_uid(skb);
@@ -483,6 +487,10 @@
if (match == DROP_UNLESS_DNS) match = DROP;
}
+ if (SDK_LEVEL_IS_AT_LEAST(lvl, 25Q2) && (match != DROP)) {
+ // TODO: implement local network blocking
+ }
+
// If an outbound packet is going to be dropped, we do not count that traffic.
if (egress.egress && (match == DROP)) return DROP;
@@ -509,52 +517,48 @@
return match;
}
-// Tracing on Android U+ 5.8+
-DEFINE_BPF_PROG_EXT("cgroupskb/ingress/stats$trace", AID_ROOT, AID_SYSTEM,
- bpf_cgroup_ingress_trace, KVER_5_8, KVER_INF,
- BPFLOADER_MAINLINE_U_VERSION, BPFLOADER_MAX_VER, MANDATORY,
- "fs_bpf_netd_readonly", "",
- LOAD_ON_ENG, LOAD_ON_USER, LOAD_ON_USERDEBUG)
+// Tracing on Android U+ 5.10+
+DEFINE_NETD_BPF_PROG_RANGES("cgroupskb/ingress/stats$trace",
+ bpf_cgroup_ingress_trace, KVER_5_10, KVER_INF,
+ BPFLOADER_MAINLINE_U_VERSION, BPFLOADER_MAX_VER)
(struct __sk_buff* skb) {
- return bpf_traffic_account(skb, INGRESS, TRACE_ON, KVER_5_8);
+ return bpf_traffic_account(skb, INGRESS, TRACE_ON, KVER_5_10, SDK_LEVEL_U);
}
-DEFINE_NETD_BPF_PROG_KVER_RANGE("cgroupskb/ingress/stats$4_19", AID_ROOT, AID_SYSTEM,
+DEFINE_NETD_BPF_PROG_KVER_RANGE("cgroupskb/ingress/stats$4_19",
bpf_cgroup_ingress_4_19, KVER_4_19, KVER_INF)
(struct __sk_buff* skb) {
- return bpf_traffic_account(skb, INGRESS, TRACE_OFF, KVER_4_19);
+ return bpf_traffic_account(skb, INGRESS, TRACE_OFF, KVER_4_19, SDK_LEVEL_NONE);
}
-DEFINE_NETD_BPF_PROG_KVER_RANGE("cgroupskb/ingress/stats$4_14", AID_ROOT, AID_SYSTEM,
+DEFINE_NETD_BPF_PROG_KVER_RANGE("cgroupskb/ingress/stats$4_14",
bpf_cgroup_ingress_4_14, KVER_NONE, KVER_4_19)
(struct __sk_buff* skb) {
- return bpf_traffic_account(skb, INGRESS, TRACE_OFF, KVER_NONE);
+ return bpf_traffic_account(skb, INGRESS, TRACE_OFF, KVER_NONE, SDK_LEVEL_NONE);
}
-// Tracing on Android U+ 5.8+
-DEFINE_BPF_PROG_EXT("cgroupskb/egress/stats$trace", AID_ROOT, AID_SYSTEM,
- bpf_cgroup_egress_trace, KVER_5_8, KVER_INF,
- BPFLOADER_MAINLINE_U_VERSION, BPFLOADER_MAX_VER, MANDATORY,
- "fs_bpf_netd_readonly", "",
- LOAD_ON_ENG, LOAD_ON_USER, LOAD_ON_USERDEBUG)
+// Tracing on Android U+ 5.10+
+DEFINE_NETD_BPF_PROG_RANGES("cgroupskb/egress/stats$trace",
+ bpf_cgroup_egress_trace, KVER_5_10, KVER_INF,
+ BPFLOADER_MAINLINE_U_VERSION, BPFLOADER_MAX_VER)
(struct __sk_buff* skb) {
- return bpf_traffic_account(skb, EGRESS, TRACE_ON, KVER_5_8);
+ return bpf_traffic_account(skb, EGRESS, TRACE_ON, KVER_5_10, SDK_LEVEL_U);
}
-DEFINE_NETD_BPF_PROG_KVER_RANGE("cgroupskb/egress/stats$4_19", AID_ROOT, AID_SYSTEM,
+DEFINE_NETD_BPF_PROG_KVER_RANGE("cgroupskb/egress/stats$4_19",
bpf_cgroup_egress_4_19, KVER_4_19, KVER_INF)
(struct __sk_buff* skb) {
- return bpf_traffic_account(skb, EGRESS, TRACE_OFF, KVER_4_19);
+ return bpf_traffic_account(skb, EGRESS, TRACE_OFF, KVER_4_19, SDK_LEVEL_NONE);
}
-DEFINE_NETD_BPF_PROG_KVER_RANGE("cgroupskb/egress/stats$4_14", AID_ROOT, AID_SYSTEM,
+DEFINE_NETD_BPF_PROG_KVER_RANGE("cgroupskb/egress/stats$4_14",
bpf_cgroup_egress_4_14, KVER_NONE, KVER_4_19)
(struct __sk_buff* skb) {
- return bpf_traffic_account(skb, EGRESS, TRACE_OFF, KVER_NONE);
+ return bpf_traffic_account(skb, EGRESS, TRACE_OFF, KVER_NONE, SDK_LEVEL_NONE);
}
// WARNING: Android T's non-updatable netd depends on the name of this program.
-DEFINE_XTBPF_PROG("skfilter/egress/xtbpf", AID_ROOT, AID_NET_ADMIN, xt_bpf_egress_prog)
+DEFINE_XTBPF_PROG("skfilter/egress/xtbpf", xt_bpf_egress_prog)
(struct __sk_buff* skb) {
// Clat daemon does not generate new traffic, all its traffic is accounted for already
// on the v4-* interfaces (except for the 20 (or 28) extra bytes of IPv6 vs IPv4 overhead,
@@ -573,7 +577,7 @@
}
// WARNING: Android T's non-updatable netd depends on the name of this program.
-DEFINE_XTBPF_PROG("skfilter/ingress/xtbpf", AID_ROOT, AID_NET_ADMIN, xt_bpf_ingress_prog)
+DEFINE_XTBPF_PROG("skfilter/ingress/xtbpf", xt_bpf_ingress_prog)
(struct __sk_buff* skb) {
// Clat daemon traffic is not accounted by virtue of iptables raw prerouting drop rule
// (in clat_raw_PREROUTING chain), which triggers before this (in bw_raw_PREROUTING chain).
@@ -585,7 +589,7 @@
return XTBPF_MATCH;
}
-DEFINE_SYS_BPF_PROG("schedact/ingress/account", AID_ROOT, AID_NET_ADMIN,
+DEFINE_SYS_BPF_PROG("schedact/ingress/account",
tc_bpf_ingress_account_prog)
(struct __sk_buff* skb) {
if (is_received_skb(skb)) {
@@ -597,7 +601,7 @@
}
// WARNING: Android T's non-updatable netd depends on the name of this program.
-DEFINE_XTBPF_PROG("skfilter/allowlist/xtbpf", AID_ROOT, AID_NET_ADMIN, xt_bpf_allowlist_prog)
+DEFINE_XTBPF_PROG("skfilter/allowlist/xtbpf", xt_bpf_allowlist_prog)
(struct __sk_buff* skb) {
uint32_t sock_uid = bpf_get_socket_uid(skb);
if (is_system_uid(sock_uid)) return XTBPF_MATCH;
@@ -616,7 +620,7 @@
}
// WARNING: Android T's non-updatable netd depends on the name of this program.
-DEFINE_XTBPF_PROG("skfilter/denylist/xtbpf", AID_ROOT, AID_NET_ADMIN, xt_bpf_denylist_prog)
+DEFINE_XTBPF_PROG("skfilter/denylist/xtbpf", xt_bpf_denylist_prog)
(struct __sk_buff* skb) {
uint32_t sock_uid = bpf_get_socket_uid(skb);
UidOwnerValue* denylistMatch = bpf_uid_owner_map_lookup_elem(&sock_uid);
@@ -639,14 +643,12 @@
return permissions ? *permissions : BPF_PERMISSION_INTERNET;
}
-DEFINE_NETD_BPF_PROG_KVER("cgroupsock/inet_create", AID_ROOT, AID_ROOT, inet_socket_create,
- KVER_4_14)
+DEFINE_NETD_BPF_PROG_KVER("cgroupsock/inet_create", inet_socket_create, KVER_4_14)
(__unused struct bpf_sock* sk) {
return (get_app_permissions() & BPF_PERMISSION_INTERNET) ? BPF_ALLOW : BPF_DISALLOW;
}
-DEFINE_NETD_BPF_PROG_KVER("cgroupsockrelease/inet_release", AID_ROOT, AID_ROOT,
- inet_socket_release, KVER_5_10)
+DEFINE_NETD_BPF_PROG_KVER("cgroupsockrelease/inet_release", inet_socket_release, KVER_5_10)
(struct bpf_sock* sk) {
uint64_t cookie = bpf_get_sk_cookie(sk);
if (cookie) bpf_cookie_tag_map_delete_elem(&cookie);
@@ -699,47 +701,47 @@
return BPF_ALLOW;
}
-DEFINE_NETD_BPF_PROG_KVER("bind4/inet4_bind", AID_ROOT, AID_ROOT, inet4_bind, KVER_4_19)
+DEFINE_NETD_BPF_PROG_KVER("bind4/inet4_bind", inet4_bind, KVER_4_19)
(struct bpf_sock_addr *ctx) {
return block_port(ctx);
}
-DEFINE_NETD_BPF_PROG_KVER("bind6/inet6_bind", AID_ROOT, AID_ROOT, inet6_bind, KVER_4_19)
+DEFINE_NETD_BPF_PROG_KVER("bind6/inet6_bind", inet6_bind, KVER_4_19)
(struct bpf_sock_addr *ctx) {
return block_port(ctx);
}
-DEFINE_NETD_V_BPF_PROG_KVER("connect4/inet4_connect", AID_ROOT, AID_ROOT, inet4_connect, KVER_4_19)
+DEFINE_NETD_V_BPF_PROG_KVER("connect4/inet4_connect", inet4_connect, KVER_4_19)
(struct bpf_sock_addr *ctx) {
return check_localhost(ctx);
}
-DEFINE_NETD_V_BPF_PROG_KVER("connect6/inet6_connect", AID_ROOT, AID_ROOT, inet6_connect, KVER_4_19)
+DEFINE_NETD_V_BPF_PROG_KVER("connect6/inet6_connect", inet6_connect, KVER_4_19)
(struct bpf_sock_addr *ctx) {
return check_localhost(ctx);
}
-DEFINE_NETD_V_BPF_PROG_KVER("recvmsg4/udp4_recvmsg", AID_ROOT, AID_ROOT, udp4_recvmsg, KVER_4_19)
+DEFINE_NETD_V_BPF_PROG_KVER("recvmsg4/udp4_recvmsg", udp4_recvmsg, KVER_4_19)
(struct bpf_sock_addr *ctx) {
return check_localhost(ctx);
}
-DEFINE_NETD_V_BPF_PROG_KVER("recvmsg6/udp6_recvmsg", AID_ROOT, AID_ROOT, udp6_recvmsg, KVER_4_19)
+DEFINE_NETD_V_BPF_PROG_KVER("recvmsg6/udp6_recvmsg", udp6_recvmsg, KVER_4_19)
(struct bpf_sock_addr *ctx) {
return check_localhost(ctx);
}
-DEFINE_NETD_V_BPF_PROG_KVER("sendmsg4/udp4_sendmsg", AID_ROOT, AID_ROOT, udp4_sendmsg, KVER_4_19)
+DEFINE_NETD_V_BPF_PROG_KVER("sendmsg4/udp4_sendmsg", udp4_sendmsg, KVER_4_19)
(struct bpf_sock_addr *ctx) {
return check_localhost(ctx);
}
-DEFINE_NETD_V_BPF_PROG_KVER("sendmsg6/udp6_sendmsg", AID_ROOT, AID_ROOT, udp6_sendmsg, KVER_4_19)
+DEFINE_NETD_V_BPF_PROG_KVER("sendmsg6/udp6_sendmsg", udp6_sendmsg, KVER_4_19)
(struct bpf_sock_addr *ctx) {
return check_localhost(ctx);
}
-DEFINE_NETD_V_BPF_PROG_KVER("getsockopt/prog", AID_ROOT, AID_ROOT, getsockopt_prog, KVER_5_4)
+DEFINE_NETD_V_BPF_PROG_KVER("getsockopt/prog", getsockopt_prog, KVER_5_4)
(struct bpf_sockopt *ctx) {
// Tell kernel to return 'original' kernel reply (instead of the bpf modified buffer)
// This is important if the answer is larger than PAGE_SIZE (max size this bpf hook can provide)
@@ -747,7 +749,7 @@
return BPF_ALLOW;
}
-DEFINE_NETD_V_BPF_PROG_KVER("setsockopt/prog", AID_ROOT, AID_ROOT, setsockopt_prog, KVER_5_4)
+DEFINE_NETD_V_BPF_PROG_KVER("setsockopt/prog", setsockopt_prog, KVER_5_4)
(struct bpf_sockopt *ctx) {
// Tell kernel to use/process original buffer provided by userspace.
// This is important if it is larger than PAGE_SIZE (max size this bpf hook can handle).
diff --git a/bpf/progs/offload.c b/bpf/progs/offload.c
index 631908a..0f23844 100644
--- a/bpf/progs/offload.c
+++ b/bpf/progs/offload.c
@@ -609,27 +609,27 @@
// Full featured (required) implementations for 5.8+ kernels (these are S+ by definition)
DEFINE_BPF_PROG_KVER("schedcls/tether_downstream4_rawip$5_8", AID_ROOT, AID_NETWORK_STACK,
- sched_cls_tether_downstream4_rawip_5_8, KVER_5_8)
+ sched_cls_tether_downstream4_rawip_5_8, KVER_5_10)
(struct __sk_buff* skb) {
- return do_forward4(skb, RAWIP, DOWNSTREAM, UPDATETIME, KVER_5_8);
+ return do_forward4(skb, RAWIP, DOWNSTREAM, UPDATETIME, KVER_5_10);
}
DEFINE_BPF_PROG_KVER("schedcls/tether_upstream4_rawip$5_8", AID_ROOT, AID_NETWORK_STACK,
- sched_cls_tether_upstream4_rawip_5_8, KVER_5_8)
+ sched_cls_tether_upstream4_rawip_5_8, KVER_5_10)
(struct __sk_buff* skb) {
- return do_forward4(skb, RAWIP, UPSTREAM, UPDATETIME, KVER_5_8);
+ return do_forward4(skb, RAWIP, UPSTREAM, UPDATETIME, KVER_5_10);
}
DEFINE_BPF_PROG_KVER("schedcls/tether_downstream4_ether$5_8", AID_ROOT, AID_NETWORK_STACK,
- sched_cls_tether_downstream4_ether_5_8, KVER_5_8)
+ sched_cls_tether_downstream4_ether_5_8, KVER_5_10)
(struct __sk_buff* skb) {
- return do_forward4(skb, ETHER, DOWNSTREAM, UPDATETIME, KVER_5_8);
+ return do_forward4(skb, ETHER, DOWNSTREAM, UPDATETIME, KVER_5_10);
}
DEFINE_BPF_PROG_KVER("schedcls/tether_upstream4_ether$5_8", AID_ROOT, AID_NETWORK_STACK,
- sched_cls_tether_upstream4_ether_5_8, KVER_5_8)
+ sched_cls_tether_upstream4_ether_5_8, KVER_5_10)
(struct __sk_buff* skb) {
- return do_forward4(skb, ETHER, UPSTREAM, UPDATETIME, KVER_5_8);
+ return do_forward4(skb, ETHER, UPSTREAM, UPDATETIME, KVER_5_10);
}
// Full featured (optional) implementations for 4.14-S, 4.19-S & 5.4-S kernels
@@ -638,7 +638,7 @@
DEFINE_OPTIONAL_BPF_PROG_KVER_RANGE("schedcls/tether_downstream4_rawip$opt",
AID_ROOT, AID_NETWORK_STACK,
sched_cls_tether_downstream4_rawip_opt,
- KVER_4_14, KVER_5_8)
+ KVER_4_14, KVER_5_10)
(struct __sk_buff* skb) {
return do_forward4(skb, RAWIP, DOWNSTREAM, UPDATETIME, KVER_4_14);
}
@@ -646,7 +646,7 @@
DEFINE_OPTIONAL_BPF_PROG_KVER_RANGE("schedcls/tether_upstream4_rawip$opt",
AID_ROOT, AID_NETWORK_STACK,
sched_cls_tether_upstream4_rawip_opt,
- KVER_4_14, KVER_5_8)
+ KVER_4_14, KVER_5_10)
(struct __sk_buff* skb) {
return do_forward4(skb, RAWIP, UPSTREAM, UPDATETIME, KVER_4_14);
}
@@ -654,7 +654,7 @@
DEFINE_OPTIONAL_BPF_PROG_KVER_RANGE("schedcls/tether_downstream4_ether$opt",
AID_ROOT, AID_NETWORK_STACK,
sched_cls_tether_downstream4_ether_opt,
- KVER_4_14, KVER_5_8)
+ KVER_4_14, KVER_5_10)
(struct __sk_buff* skb) {
return do_forward4(skb, ETHER, DOWNSTREAM, UPDATETIME, KVER_4_14);
}
@@ -662,7 +662,7 @@
DEFINE_OPTIONAL_BPF_PROG_KVER_RANGE("schedcls/tether_upstream4_ether$opt",
AID_ROOT, AID_NETWORK_STACK,
sched_cls_tether_upstream4_ether_opt,
- KVER_4_14, KVER_5_8)
+ KVER_4_14, KVER_5_10)
(struct __sk_buff* skb) {
return do_forward4(skb, ETHER, UPSTREAM, UPDATETIME, KVER_4_14);
}
@@ -682,13 +682,13 @@
// RAWIP: Required for 5.4-R kernels -- which always support bpf_skb_change_head().
DEFINE_BPF_PROG_KVER_RANGE("schedcls/tether_downstream4_rawip$5_4", AID_ROOT, AID_NETWORK_STACK,
- sched_cls_tether_downstream4_rawip_5_4, KVER_5_4, KVER_5_8)
+ sched_cls_tether_downstream4_rawip_5_4, KVER_5_4, KVER_5_10)
(struct __sk_buff* skb) {
return do_forward4(skb, RAWIP, DOWNSTREAM, NO_UPDATETIME, KVER_5_4);
}
DEFINE_BPF_PROG_KVER_RANGE("schedcls/tether_upstream4_rawip$5_4", AID_ROOT, AID_NETWORK_STACK,
- sched_cls_tether_upstream4_rawip_5_4, KVER_5_4, KVER_5_8)
+ sched_cls_tether_upstream4_rawip_5_4, KVER_5_4, KVER_5_10)
(struct __sk_buff* skb) {
return do_forward4(skb, RAWIP, UPSTREAM, NO_UPDATETIME, KVER_5_4);
}
@@ -715,13 +715,13 @@
// ETHER: Required for 4.14-Q/R, 4.19-Q/R & 5.4-R kernels.
DEFINE_BPF_PROG_KVER_RANGE("schedcls/tether_downstream4_ether$4_14", AID_ROOT, AID_NETWORK_STACK,
- sched_cls_tether_downstream4_ether_4_14, KVER_4_14, KVER_5_8)
+ sched_cls_tether_downstream4_ether_4_14, KVER_4_14, KVER_5_10)
(struct __sk_buff* skb) {
return do_forward4(skb, ETHER, DOWNSTREAM, NO_UPDATETIME, KVER_4_14);
}
DEFINE_BPF_PROG_KVER_RANGE("schedcls/tether_upstream4_ether$4_14", AID_ROOT, AID_NETWORK_STACK,
- sched_cls_tether_upstream4_ether_4_14, KVER_4_14, KVER_5_8)
+ sched_cls_tether_upstream4_ether_4_14, KVER_4_14, KVER_5_10)
(struct __sk_buff* skb) {
return do_forward4(skb, ETHER, UPSTREAM, NO_UPDATETIME, KVER_4_14);
}
@@ -805,7 +805,7 @@
}
#define DEFINE_XDP_PROG(str, func) \
- DEFINE_BPF_PROG_KVER(str, AID_ROOT, AID_NETWORK_STACK, func, KVER_5_9)(struct xdp_md *ctx)
+ DEFINE_BPF_PROG_KVER(str, AID_ROOT, AID_NETWORK_STACK, func, KVER_5_10)(struct xdp_md *ctx)
DEFINE_XDP_PROG("xdp/tether_downstream_ether",
xdp_tether_downstream_ether) {
diff --git a/remoteauth/OWNERS b/remoteauth/OWNERS
index 25a32b9..ee46c1c 100644
--- a/remoteauth/OWNERS
+++ b/remoteauth/OWNERS
@@ -2,7 +2,6 @@
# Bug template url: http://b/new?component=1145231&template=1715387
billyhuang@google.com
boetger@google.com
-casbor@google.com
derekjedral@google.com
dlm@google.com
igorzas@google.com
diff --git a/staticlibs/device/com/android/net/module/util/netlink/NetlinkUtils.java b/staticlibs/device/com/android/net/module/util/netlink/NetlinkUtils.java
index e2544d3..0d96fc4 100644
--- a/staticlibs/device/com/android/net/module/util/netlink/NetlinkUtils.java
+++ b/staticlibs/device/com/android/net/module/util/netlink/NetlinkUtils.java
@@ -83,7 +83,7 @@
public static final int INET_DIAG_INFO = 2;
public static final int INET_DIAG_MARK = 15;
- public static final long IO_TIMEOUT_MS = 300L;
+ public static final long IO_TIMEOUT_MS = 3000L;
public static final int DEFAULT_RECV_BUFSIZE = 8 * 1024;
public static final int SOCKET_RECV_BUFSIZE = 64 * 1024;