blob: 4cbd751e04c4ec24879d97c0fbc8fed55918c989 [file] [log] [blame]
Ken Chen587d4232022-01-17 17:18:43 +08001/*
2 * Copyright (C) 2018 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Maciej Żenczykowskif7699522022-05-24 15:56:03 -070017// The resulting .o needs to load on the Android T Beta 3 bpfloader
18#define BPFLOADER_MIN_VER BPFLOADER_T_BETA3_VERSION
Maciej Żenczykowskiacebffb2022-05-16 16:05:15 -070019
Ken Chen587d4232022-01-17 17:18:43 +080020#include <bpf_helpers.h>
21#include <linux/bpf.h>
22#include <linux/if.h>
23#include <linux/if_ether.h>
24#include <linux/if_packet.h>
25#include <linux/in.h>
26#include <linux/in6.h>
27#include <linux/ip.h>
28#include <linux/ipv6.h>
29#include <linux/pkt_cls.h>
30#include <linux/tcp.h>
Ken Chen587d4232022-01-17 17:18:43 +080031#include <stdbool.h>
32#include <stdint.h>
33#include "bpf_net_helpers.h"
34#include "bpf_shared.h"
35
36// This is defined for cgroup bpf filter only.
37#define BPF_DROP_UNLESS_DNS 2
38#define BPF_PASS 1
39#define BPF_DROP 0
40
41// This is used for xt_bpf program only.
42#define BPF_NOMATCH 0
43#define BPF_MATCH 1
44
45#define BPF_EGRESS 0
46#define BPF_INGRESS 1
47
48#define IP_PROTO_OFF offsetof(struct iphdr, protocol)
49#define IPV6_PROTO_OFF offsetof(struct ipv6hdr, nexthdr)
Maciej Żenczykowski879839a12022-08-03 10:48:25 +000050
51// offsetof(struct iphdr, ihl) -- but that's a bitfield
Ken Chen587d4232022-01-17 17:18:43 +080052#define IPPROTO_IHL_OFF 0
Maciej Żenczykowski879839a12022-08-03 10:48:25 +000053
54// This is offsetof(struct tcphdr, "32 bit tcp flag field")
55// The tcp flags are after be16 source, dest & be32 seq, ack_seq, hence 12 bytes in.
56//
57// Note that TCP_FLAG_{ACK,PSH,RST,SYN,FIN} are htonl(0x00{10,08,04,02,01}0000)
58// see include/uapi/linux/tcp.h
59#define TCP_FLAG32_OFF 12
Ken Chen587d4232022-01-17 17:18:43 +080060
Maciej Żenczykowskia4a58a32022-06-13 17:56:06 -070061// For maps netd does not need to access
62#define DEFINE_BPF_MAP_NO_NETD(the_map, TYPE, TypeOfKey, TypeOfValue, num_entries) \
Maciej Żenczykowskicae181d2022-06-16 23:26:33 -070063 DEFINE_BPF_MAP_EXT(the_map, TYPE, TypeOfKey, TypeOfValue, num_entries, \
64 AID_ROOT, AID_NET_BW_ACCT, 0060, "fs_bpf_net_shared", "", false)
Maciej Żenczykowskia4a58a32022-06-13 17:56:06 -070065
66// For maps netd only needs read only access to
67#define DEFINE_BPF_MAP_RO_NETD(the_map, TYPE, TypeOfKey, TypeOfValue, num_entries) \
Maciej Żenczykowskicae181d2022-06-16 23:26:33 -070068 DEFINE_BPF_MAP_EXT(the_map, TYPE, TypeOfKey, TypeOfValue, num_entries, \
69 AID_ROOT, AID_NET_BW_ACCT, 0460, "fs_bpf_netd_readonly", "", false)
Maciej Żenczykowskia4a58a32022-06-13 17:56:06 -070070
71// For maps netd needs to be able to read and write
72#define DEFINE_BPF_MAP_RW_NETD(the_map, TYPE, TypeOfKey, TypeOfValue, num_entries) \
Maciej Żenczykowskicae181d2022-06-16 23:26:33 -070073 DEFINE_BPF_MAP_UGM(the_map, TYPE, TypeOfKey, TypeOfValue, num_entries, \
74 AID_ROOT, AID_NET_BW_ACCT, 0660)
Maciej Żenczykowskia4a58a32022-06-13 17:56:06 -070075
Maciej Żenczykowskib10e0552022-06-16 14:49:27 -070076// Bpf map arrays on creation are preinitialized to 0 and do not support deletion of a key,
77// see: kernel/bpf/arraymap.c array_map_delete_elem() returns -EINVAL (from both syscall and ebpf)
78// Additionally on newer kernels the bpf jit can optimize out the lookups.
79// only valid indexes are [0..CONFIGURATION_MAP_SIZE-1]
80DEFINE_BPF_MAP_RO_NETD(configuration_map, ARRAY, uint32_t, uint32_t, CONFIGURATION_MAP_SIZE)
81
Maciej Żenczykowski1b7c1f12022-11-21 09:39:23 +000082// TODO: consider whether we can merge some of these maps
83// for example it might be possible to merge 2 or 3 of:
84// uid_counterset_map + uid_owner_map + uid_permission_map
Maciej Żenczykowskia4a58a32022-06-13 17:56:06 -070085DEFINE_BPF_MAP_RW_NETD(cookie_tag_map, HASH, uint64_t, UidTagValue, COOKIE_UID_MAP_SIZE)
86DEFINE_BPF_MAP_NO_NETD(uid_counterset_map, HASH, uint32_t, uint8_t, UID_COUNTERSET_MAP_SIZE)
87DEFINE_BPF_MAP_NO_NETD(app_uid_stats_map, HASH, uint32_t, StatsValue, APP_STATS_MAP_SIZE)
88DEFINE_BPF_MAP_RW_NETD(stats_map_A, HASH, StatsKey, StatsValue, STATS_MAP_SIZE)
89DEFINE_BPF_MAP_RO_NETD(stats_map_B, HASH, StatsKey, StatsValue, STATS_MAP_SIZE)
90DEFINE_BPF_MAP_NO_NETD(iface_stats_map, HASH, uint32_t, StatsValue, IFACE_STATS_MAP_SIZE)
Maciej Żenczykowskia4a58a32022-06-13 17:56:06 -070091DEFINE_BPF_MAP_NO_NETD(uid_owner_map, HASH, uint32_t, UidOwnerValue, UID_OWNER_MAP_SIZE)
92DEFINE_BPF_MAP_RW_NETD(uid_permission_map, HASH, uint32_t, uint8_t, UID_OWNER_MAP_SIZE)
Ken Chen587d4232022-01-17 17:18:43 +080093
94/* never actually used from ebpf */
Maciej Żenczykowskia4a58a32022-06-13 17:56:06 -070095DEFINE_BPF_MAP_NO_NETD(iface_index_name_map, HASH, uint32_t, IfaceValue, IFACE_INDEX_NAME_MAP_SIZE)
Ken Chen587d4232022-01-17 17:18:43 +080096
Maciej Żenczykowskicae181d2022-06-16 23:26:33 -070097// iptables xt_bpf programs need to be usable by both netd and netutils_wrappers
Maciej Żenczykowski285f7052022-08-09 17:50:31 +000098// selinux contexts, because even non-xt_bpf iptables mutations are implemented as
Maciej Żenczykowski06085b02022-08-09 14:15:34 +000099// a full table dump, followed by an update in userspace, and then a reload into the kernel,
100// where any already in-use xt_bpf matchers are serialized as the path to the pinned
101// program (see XT_BPF_MODE_PATH_PINNED) and then the iptables binary (or rather
102// the kernel acting on behalf of it) must be able to retrieve the pinned program
103// for the reload to succeed
Maciej Żenczykowskicae181d2022-06-16 23:26:33 -0700104#define DEFINE_XTBPF_PROG(SECTION_NAME, prog_uid, prog_gid, the_prog) \
105 DEFINE_BPF_PROG(SECTION_NAME, prog_uid, prog_gid, the_prog)
106
107// programs that need to be usable by netd, but not by netutils_wrappers
Maciej Żenczykowski06085b02022-08-09 14:15:34 +0000108// (this is because these are currently attached by the mainline provided libnetd_updatable .so
109// which is loaded into netd and thus runs as netd uid/gid/selinux context)
Maciej Żenczykowski879839a12022-08-03 10:48:25 +0000110#define DEFINE_NETD_BPF_PROG_KVER_RANGE(SECTION_NAME, prog_uid, prog_gid, the_prog, minKV, maxKV) \
Maciej Żenczykowskicae181d2022-06-16 23:26:33 -0700111 DEFINE_BPF_PROG_EXT(SECTION_NAME, prog_uid, prog_gid, the_prog, \
Maciej Żenczykowski879839a12022-08-03 10:48:25 +0000112 minKV, maxKV, false, "fs_bpf_netd_readonly", "")
Maciej Żenczykowskicae181d2022-06-16 23:26:33 -0700113
Lorenzo Colitti3505b582022-10-27 19:36:27 +0900114#define DEFINE_NETD_BPF_PROG_KVER(SECTION_NAME, prog_uid, prog_gid, the_prog, min_kv) \
Maciej Żenczykowski879839a12022-08-03 10:48:25 +0000115 DEFINE_NETD_BPF_PROG_KVER_RANGE(SECTION_NAME, prog_uid, prog_gid, the_prog, min_kv, KVER_INF)
116
117#define DEFINE_NETD_BPF_PROG(SECTION_NAME, prog_uid, prog_gid, the_prog) \
118 DEFINE_NETD_BPF_PROG_KVER(SECTION_NAME, prog_uid, prog_gid, the_prog, KVER_NONE)
Lorenzo Colitti3505b582022-10-27 19:36:27 +0900119
Maciej Żenczykowskicae181d2022-06-16 23:26:33 -0700120// programs that only need to be usable by the system server
121#define DEFINE_SYS_BPF_PROG(SECTION_NAME, prog_uid, prog_gid, the_prog) \
122 DEFINE_BPF_PROG_EXT(SECTION_NAME, prog_uid, prog_gid, the_prog, \
123 KVER_NONE, KVER_INF, false, "fs_bpf_net_shared", "")
124
Ken Chen587d4232022-01-17 17:18:43 +0800125static __always_inline int is_system_uid(uint32_t uid) {
Maciej Żenczykowskib909d8a2022-06-15 00:40:43 -0700126 // MIN_SYSTEM_UID is AID_ROOT == 0, so uint32_t is *always* >= 0
127 // MAX_SYSTEM_UID is AID_NOBODY == 9999, while AID_APP_START == 10000
128 return (uid < AID_APP_START);
Ken Chen587d4232022-01-17 17:18:43 +0800129}
130
131/*
132 * Note: this blindly assumes an MTU of 1500, and that packets > MTU are always TCP,
133 * and that TCP is using the Linux default settings with TCP timestamp option enabled
134 * which uses 12 TCP option bytes per frame.
135 *
136 * These are not unreasonable assumptions:
137 *
138 * The internet does not really support MTUs greater than 1500, so most TCP traffic will
139 * be at that MTU, or slightly below it (worst case our upwards adjustment is too small).
140 *
141 * The chance our traffic isn't IP at all is basically zero, so the IP overhead correction
142 * is bound to be needed.
143 *
144 * Furthermore, the likelyhood that we're having to deal with GSO (ie. > MTU) packets that
145 * are not IP/TCP is pretty small (few other things are supported by Linux) and worse case
146 * our extra overhead will be slightly off, but probably still better than assuming none.
147 *
148 * Most servers are also Linux and thus support/default to using TCP timestamp option
149 * (and indeed TCP timestamp option comes from RFC 1323 titled "TCP Extensions for High
150 * Performance" which also defined TCP window scaling and are thus absolutely ancient...).
151 *
152 * All together this should be more correct than if we simply ignored GSO frames
153 * (ie. counted them as single packets with no extra overhead)
154 *
155 * Especially since the number of packets is important for any future clat offload correction.
156 * (which adjusts upward by 20 bytes per packet to account for ipv4 -> ipv6 header conversion)
157 */
158#define DEFINE_UPDATE_STATS(the_stats_map, TypeOfKey) \
159 static __always_inline inline void update_##the_stats_map(struct __sk_buff* skb, \
160 int direction, TypeOfKey* key) { \
161 StatsValue* value = bpf_##the_stats_map##_lookup_elem(key); \
162 if (!value) { \
163 StatsValue newValue = {}; \
164 bpf_##the_stats_map##_update_elem(key, &newValue, BPF_NOEXIST); \
165 value = bpf_##the_stats_map##_lookup_elem(key); \
166 } \
167 if (value) { \
168 const int mtu = 1500; \
169 uint64_t packets = 1; \
170 uint64_t bytes = skb->len; \
171 if (bytes > mtu) { \
172 bool is_ipv6 = (skb->protocol == htons(ETH_P_IPV6)); \
173 int ip_overhead = (is_ipv6 ? sizeof(struct ipv6hdr) : sizeof(struct iphdr)); \
174 int tcp_overhead = ip_overhead + sizeof(struct tcphdr) + 12; \
175 int mss = mtu - tcp_overhead; \
176 uint64_t payload = bytes - tcp_overhead; \
177 packets = (payload + mss - 1) / mss; \
178 bytes = tcp_overhead * packets + payload; \
179 } \
180 if (direction == BPF_EGRESS) { \
181 __sync_fetch_and_add(&value->txPackets, packets); \
182 __sync_fetch_and_add(&value->txBytes, bytes); \
183 } else if (direction == BPF_INGRESS) { \
184 __sync_fetch_and_add(&value->rxPackets, packets); \
185 __sync_fetch_and_add(&value->rxBytes, bytes); \
186 } \
187 } \
188 }
189
190DEFINE_UPDATE_STATS(app_uid_stats_map, uint32_t)
191DEFINE_UPDATE_STATS(iface_stats_map, uint32_t)
192DEFINE_UPDATE_STATS(stats_map_A, StatsKey)
193DEFINE_UPDATE_STATS(stats_map_B, StatsKey)
194
Maciej Żenczykowski879839a12022-08-03 10:48:25 +0000195// both of these return 0 on success or -EFAULT on failure (and zero out the buffer)
196static __always_inline inline int bpf_skb_load_bytes_net(const struct __sk_buff* skb, int off,
197 void* to, int len, bool is_4_19) {
198 return is_4_19
199 ? bpf_skb_load_bytes_relative(skb, off, to, len, BPF_HDR_START_NET)
200 : bpf_skb_load_bytes(skb, off, to, len);
201}
202
203static __always_inline inline bool skip_owner_match(struct __sk_buff* skb, bool is_4_19) {
Maciej Żenczykowski3621cbd2022-11-20 13:31:06 +0000204 uint32_t flag = 0;
Ken Chen587d4232022-01-17 17:18:43 +0800205 if (skb->protocol == htons(ETH_P_IP)) {
Ken Chen587d4232022-01-17 17:18:43 +0800206 uint8_t proto;
Maciej Żenczykowski879839a12022-08-03 10:48:25 +0000207 // no need to check for success, proto will be zeroed if bpf_skb_load_bytes_net() fails
208 (void)bpf_skb_load_bytes_net(skb, IP_PROTO_OFF, &proto, sizeof(proto), is_4_19);
209 if (proto == IPPROTO_ESP) return true;
210 if (proto != IPPROTO_TCP) return false; // handles read failure above
211 uint8_t ihl;
212 // we don't check for success, as this cannot fail, as it is earlier in the packet than
213 // proto, the reading of which must have succeeded, additionally the next read
214 // (a little bit deeper in the packet in spite of ihl being zeroed) of the tcp flags
215 // field will also fail, and that failure we already handle correctly
216 // (we also don't check that ihl in [0x45,0x4F] nor that ipv4 header checksum is correct)
217 (void)bpf_skb_load_bytes_net(skb, IPPROTO_IHL_OFF, &ihl, sizeof(ihl), is_4_19);
Maciej Żenczykowski879839a12022-08-03 10:48:25 +0000218 // if the read below fails, we'll just assume no TCP flags are set, which is fine.
219 (void)bpf_skb_load_bytes_net(skb, (ihl & 0xF) * 4 + TCP_FLAG32_OFF,
220 &flag, sizeof(flag), is_4_19);
Maciej Żenczykowski879839a12022-08-03 10:48:25 +0000221 } else if (skb->protocol == htons(ETH_P_IPV6)) {
222 uint8_t proto;
223 // no need to check for success, proto will be zeroed if bpf_skb_load_bytes_net() fails
224 (void)bpf_skb_load_bytes_net(skb, IPV6_PROTO_OFF, &proto, sizeof(proto), is_4_19);
225 if (proto == IPPROTO_ESP) return true;
226 if (proto != IPPROTO_TCP) return false; // handles read failure above
Maciej Żenczykowski879839a12022-08-03 10:48:25 +0000227 // if the read below fails, we'll just assume no TCP flags are set, which is fine.
228 (void)bpf_skb_load_bytes_net(skb, sizeof(struct ipv6hdr) + TCP_FLAG32_OFF,
229 &flag, sizeof(flag), is_4_19);
Maciej Żenczykowski3621cbd2022-11-20 13:31:06 +0000230 } else {
231 return false;
Ken Chen587d4232022-01-17 17:18:43 +0800232 }
Maciej Żenczykowski3621cbd2022-11-20 13:31:06 +0000233 return flag & TCP_FLAG_RST; // false on read failure
Ken Chen587d4232022-01-17 17:18:43 +0800234}
235
Maciej Żenczykowski879839a12022-08-03 10:48:25 +0000236static __always_inline inline BpfConfig getConfig(uint32_t configKey) {
Ken Chen587d4232022-01-17 17:18:43 +0800237 uint32_t mapSettingKey = configKey;
238 BpfConfig* config = bpf_configuration_map_lookup_elem(&mapSettingKey);
239 if (!config) {
240 // Couldn't read configuration entry. Assume everything is disabled.
241 return DEFAULT_CONFIG;
242 }
243 return *config;
244}
245
Maciej Żenczykowski474512a2022-06-07 23:22:53 +0000246// DROP_IF_SET is set of rules that BPF_DROP if rule is globally enabled, and per-uid bit is set
247#define DROP_IF_SET (STANDBY_MATCH | OEM_DENY_1_MATCH | OEM_DENY_2_MATCH | OEM_DENY_3_MATCH)
248// DROP_IF_UNSET is set of rules that should DROP if globally enabled, and per-uid bit is NOT set
249#define DROP_IF_UNSET (DOZABLE_MATCH | POWERSAVE_MATCH | RESTRICTED_MATCH | LOW_POWER_STANDBY_MATCH)
250
Maciej Żenczykowski879839a12022-08-03 10:48:25 +0000251static __always_inline inline int bpf_owner_match(struct __sk_buff* skb, uint32_t uid,
252 int direction, bool is_4_19) {
253 if (skip_owner_match(skb, is_4_19)) return BPF_PASS;
Ken Chen587d4232022-01-17 17:18:43 +0800254
255 if (is_system_uid(uid)) return BPF_PASS;
256
257 BpfConfig enabledRules = getConfig(UID_RULES_CONFIGURATION_KEY);
258
259 UidOwnerValue* uidEntry = bpf_uid_owner_map_lookup_elem(&uid);
Motomu Utsumi42edc602022-05-12 13:57:42 +0000260 uint32_t uidRules = uidEntry ? uidEntry->rule : 0;
Ken Chen587d4232022-01-17 17:18:43 +0800261 uint32_t allowed_iif = uidEntry ? uidEntry->iif : 0;
262
Maciej Żenczykowski474512a2022-06-07 23:22:53 +0000263 // Warning: funky bit-wise arithmetic: in parallel, for all DROP_IF_SET/UNSET rules
264 // check whether the rules are globally enabled, and if so whether the rules are
265 // set/unset for the specific uid. BPF_DROP if that is the case for ANY of the rules.
266 // We achieve this by masking out only the bits/rules we're interested in checking,
267 // and negating (via bit-wise xor) the bits/rules that should drop if unset.
268 if (enabledRules & (DROP_IF_SET | DROP_IF_UNSET) & (uidRules ^ DROP_IF_UNSET)) return BPF_DROP;
269
Motomu Utsumib08654c2022-05-11 05:56:26 +0000270 if (direction == BPF_INGRESS && skb->ifindex != 1) {
271 if (uidRules & IIF_MATCH) {
272 if (allowed_iif && skb->ifindex != allowed_iif) {
273 // Drops packets not coming from lo nor the allowed interface
274 // allowed interface=0 is a wildcard and does not drop packets
275 return BPF_DROP_UNLESS_DNS;
276 }
277 } else if (uidRules & LOCKDOWN_VPN_MATCH) {
278 // Drops packets not coming from lo and rule does not have IIF_MATCH but has
279 // LOCKDOWN_VPN_MATCH
Ken Chen587d4232022-01-17 17:18:43 +0800280 return BPF_DROP_UNLESS_DNS;
281 }
282 }
283 return BPF_PASS;
284}
285
286static __always_inline inline void update_stats_with_config(struct __sk_buff* skb, int direction,
Lorenzo Colitti60cbed32022-03-03 17:49:01 +0900287 StatsKey* key, uint32_t selectedMap) {
Ken Chen587d4232022-01-17 17:18:43 +0800288 if (selectedMap == SELECT_MAP_A) {
289 update_stats_map_A(skb, direction, key);
290 } else if (selectedMap == SELECT_MAP_B) {
291 update_stats_map_B(skb, direction, key);
292 }
293}
294
Maciej Żenczykowski879839a12022-08-03 10:48:25 +0000295static __always_inline inline int bpf_traffic_account(struct __sk_buff* skb, int direction,
296 bool is_4_19) {
Ken Chen587d4232022-01-17 17:18:43 +0800297 uint32_t sock_uid = bpf_get_socket_uid(skb);
298 uint64_t cookie = bpf_get_socket_cookie(skb);
299 UidTagValue* utag = bpf_cookie_tag_map_lookup_elem(&cookie);
300 uint32_t uid, tag;
301 if (utag) {
302 uid = utag->uid;
303 tag = utag->tag;
304 } else {
305 uid = sock_uid;
306 tag = 0;
307 }
308
309 // Always allow and never count clat traffic. Only the IPv4 traffic on the stacked
310 // interface is accounted for and subject to usage restrictions.
311 // TODO: remove sock_uid check once Nat464Xlat javaland adds the socket tag AID_CLAT for clat.
312 if (sock_uid == AID_CLAT || uid == AID_CLAT) {
313 return BPF_PASS;
314 }
315
Maciej Żenczykowski879839a12022-08-03 10:48:25 +0000316 int match = bpf_owner_match(skb, sock_uid, direction, is_4_19);
Ken Chen587d4232022-01-17 17:18:43 +0800317 if ((direction == BPF_EGRESS) && (match == BPF_DROP)) {
318 // If an outbound packet is going to be dropped, we do not count that
319 // traffic.
320 return match;
321 }
322
323// Workaround for secureVPN with VpnIsolation enabled, refer to b/159994981 for details.
324// Keep TAG_SYSTEM_DNS in sync with DnsResolver/include/netd_resolv/resolv.h
325// and TrafficStatsConstants.java
326#define TAG_SYSTEM_DNS 0xFFFFFF82
327 if (tag == TAG_SYSTEM_DNS && uid == AID_DNS) {
328 uid = sock_uid;
329 if (match == BPF_DROP_UNLESS_DNS) match = BPF_PASS;
330 } else {
331 if (match == BPF_DROP_UNLESS_DNS) match = BPF_DROP;
332 }
333
334 StatsKey key = {.uid = uid, .tag = tag, .counterSet = 0, .ifaceIndex = skb->ifindex};
335
336 uint8_t* counterSet = bpf_uid_counterset_map_lookup_elem(&uid);
337 if (counterSet) key.counterSet = (uint32_t)*counterSet;
338
339 uint32_t mapSettingKey = CURRENT_STATS_MAP_CONFIGURATION_KEY;
Lorenzo Colitti60cbed32022-03-03 17:49:01 +0900340 uint32_t* selectedMap = bpf_configuration_map_lookup_elem(&mapSettingKey);
Ken Chen587d4232022-01-17 17:18:43 +0800341
342 // Use asm("%0 &= 1" : "+r"(match)) before return match,
343 // to help kernel's bpf verifier, so that it can be 100% certain
344 // that the returned value is always BPF_NOMATCH(0) or BPF_MATCH(1).
345 if (!selectedMap) {
346 asm("%0 &= 1" : "+r"(match));
347 return match;
348 }
349
350 if (key.tag) {
351 update_stats_with_config(skb, direction, &key, *selectedMap);
352 key.tag = 0;
353 }
354
355 update_stats_with_config(skb, direction, &key, *selectedMap);
356 update_app_uid_stats_map(skb, direction, &uid);
357 asm("%0 &= 1" : "+r"(match));
358 return match;
359}
360
Maciej Żenczykowski879839a12022-08-03 10:48:25 +0000361DEFINE_NETD_BPF_PROG_KVER_RANGE("cgroupskb/ingress/stats$4_19", AID_ROOT, AID_SYSTEM,
362 bpf_cgroup_ingress_4_19, KVER(4, 19, 0), KVER_INF)
Ken Chen587d4232022-01-17 17:18:43 +0800363(struct __sk_buff* skb) {
Maciej Żenczykowski879839a12022-08-03 10:48:25 +0000364 return bpf_traffic_account(skb, BPF_INGRESS, /* is_4_19 */ true);
Ken Chen587d4232022-01-17 17:18:43 +0800365}
366
Maciej Żenczykowski879839a12022-08-03 10:48:25 +0000367DEFINE_NETD_BPF_PROG_KVER_RANGE("cgroupskb/ingress/stats$4_14", AID_ROOT, AID_SYSTEM,
368 bpf_cgroup_ingress_4_14, KVER_NONE, KVER(4, 19, 0))
Ken Chen587d4232022-01-17 17:18:43 +0800369(struct __sk_buff* skb) {
Maciej Żenczykowski879839a12022-08-03 10:48:25 +0000370 return bpf_traffic_account(skb, BPF_INGRESS, /* is_4_19 */ false);
371}
372
373DEFINE_NETD_BPF_PROG_KVER_RANGE("cgroupskb/egress/stats$4_19", AID_ROOT, AID_SYSTEM,
374 bpf_cgroup_egress_4_19, KVER(4, 19, 0), KVER_INF)
375(struct __sk_buff* skb) {
376 return bpf_traffic_account(skb, BPF_EGRESS, /* is_4_19 */ true);
377}
378
379DEFINE_NETD_BPF_PROG_KVER_RANGE("cgroupskb/egress/stats$4_14", AID_ROOT, AID_SYSTEM,
380 bpf_cgroup_egress_4_14, KVER_NONE, KVER(4, 19, 0))
381(struct __sk_buff* skb) {
382 return bpf_traffic_account(skb, BPF_EGRESS, /* is_4_19 */ false);
Ken Chen587d4232022-01-17 17:18:43 +0800383}
384
Maciej Żenczykowski12057372022-06-14 14:36:34 -0700385// WARNING: Android T's non-updatable netd depends on the name of this program.
Maciej Żenczykowskicae181d2022-06-16 23:26:33 -0700386DEFINE_XTBPF_PROG("skfilter/egress/xtbpf", AID_ROOT, AID_NET_ADMIN, xt_bpf_egress_prog)
Ken Chen587d4232022-01-17 17:18:43 +0800387(struct __sk_buff* skb) {
388 // Clat daemon does not generate new traffic, all its traffic is accounted for already
389 // on the v4-* interfaces (except for the 20 (or 28) extra bytes of IPv6 vs IPv4 overhead,
390 // but that can be corrected for later when merging v4-foo stats into interface foo's).
391 // TODO: remove sock_uid check once Nat464Xlat javaland adds the socket tag AID_CLAT for clat.
392 uint32_t sock_uid = bpf_get_socket_uid(skb);
393 if (sock_uid == AID_CLAT) return BPF_NOMATCH;
394 if (sock_uid == AID_SYSTEM) {
395 uint64_t cookie = bpf_get_socket_cookie(skb);
396 UidTagValue* utag = bpf_cookie_tag_map_lookup_elem(&cookie);
397 if (utag && utag->uid == AID_CLAT) return BPF_NOMATCH;
398 }
399
400 uint32_t key = skb->ifindex;
401 update_iface_stats_map(skb, BPF_EGRESS, &key);
402 return BPF_MATCH;
403}
404
Maciej Żenczykowski12057372022-06-14 14:36:34 -0700405// WARNING: Android T's non-updatable netd depends on the name of this program.
Maciej Żenczykowskicae181d2022-06-16 23:26:33 -0700406DEFINE_XTBPF_PROG("skfilter/ingress/xtbpf", AID_ROOT, AID_NET_ADMIN, xt_bpf_ingress_prog)
Ken Chen587d4232022-01-17 17:18:43 +0800407(struct __sk_buff* skb) {
408 // Clat daemon traffic is not accounted by virtue of iptables raw prerouting drop rule
409 // (in clat_raw_PREROUTING chain), which triggers before this (in bw_raw_PREROUTING chain).
410 // It will be accounted for on the v4-* clat interface instead.
411 // Keep that in mind when moving this out of iptables xt_bpf and into tc ingress (or xdp).
412
413 uint32_t key = skb->ifindex;
414 update_iface_stats_map(skb, BPF_INGRESS, &key);
415 return BPF_MATCH;
416}
417
Maciej Żenczykowskicae181d2022-06-16 23:26:33 -0700418DEFINE_SYS_BPF_PROG("schedact/ingress/account", AID_ROOT, AID_NET_ADMIN,
419 tc_bpf_ingress_account_prog)
Ken Chen587d4232022-01-17 17:18:43 +0800420(struct __sk_buff* skb) {
Patrick Rohr148aea82022-02-24 14:12:32 +0100421 if (is_received_skb(skb)) {
422 // Account for ingress traffic before tc drops it.
423 uint32_t key = skb->ifindex;
424 update_iface_stats_map(skb, BPF_INGRESS, &key);
425 }
Ken Chen587d4232022-01-17 17:18:43 +0800426 return TC_ACT_UNSPEC;
427}
428
Maciej Żenczykowski12057372022-06-14 14:36:34 -0700429// WARNING: Android T's non-updatable netd depends on the name of this program.
Maciej Żenczykowskicae181d2022-06-16 23:26:33 -0700430DEFINE_XTBPF_PROG("skfilter/allowlist/xtbpf", AID_ROOT, AID_NET_ADMIN, xt_bpf_allowlist_prog)
Ken Chen587d4232022-01-17 17:18:43 +0800431(struct __sk_buff* skb) {
432 uint32_t sock_uid = bpf_get_socket_uid(skb);
433 if (is_system_uid(sock_uid)) return BPF_MATCH;
434
435 // 65534 is the overflow 'nobody' uid, usually this being returned means
436 // that skb->sk is NULL during RX (early decap socket lookup failure),
437 // which commonly happens for incoming packets to an unconnected udp socket.
438 // Additionally bpf_get_socket_cookie() returns 0 if skb->sk is NULL
439 if ((sock_uid == 65534) && !bpf_get_socket_cookie(skb) && is_received_skb(skb))
440 return BPF_MATCH;
441
442 UidOwnerValue* allowlistMatch = bpf_uid_owner_map_lookup_elem(&sock_uid);
443 if (allowlistMatch) return allowlistMatch->rule & HAPPY_BOX_MATCH ? BPF_MATCH : BPF_NOMATCH;
444 return BPF_NOMATCH;
445}
446
Maciej Żenczykowski12057372022-06-14 14:36:34 -0700447// WARNING: Android T's non-updatable netd depends on the name of this program.
Maciej Żenczykowskicae181d2022-06-16 23:26:33 -0700448DEFINE_XTBPF_PROG("skfilter/denylist/xtbpf", AID_ROOT, AID_NET_ADMIN, xt_bpf_denylist_prog)
Ken Chen587d4232022-01-17 17:18:43 +0800449(struct __sk_buff* skb) {
450 uint32_t sock_uid = bpf_get_socket_uid(skb);
451 UidOwnerValue* denylistMatch = bpf_uid_owner_map_lookup_elem(&sock_uid);
452 if (denylistMatch) return denylistMatch->rule & PENALTY_BOX_MATCH ? BPF_MATCH : BPF_NOMATCH;
453 return BPF_NOMATCH;
454}
455
Lorenzo Colitti3505b582022-10-27 19:36:27 +0900456DEFINE_NETD_BPF_PROG_KVER("cgroupsock/inet/create", AID_ROOT, AID_ROOT, inet_socket_create,
457 KVER(4, 14, 0))
Ken Chen587d4232022-01-17 17:18:43 +0800458(struct bpf_sock* sk) {
459 uint64_t gid_uid = bpf_get_current_uid_gid();
460 /*
461 * A given app is guaranteed to have the same app ID in all the profiles in
462 * which it is installed, and install permission is granted to app for all
463 * user at install time so we only check the appId part of a request uid at
464 * run time. See UserHandle#isSameApp for detail.
465 */
Maciej Żenczykowskib909d8a2022-06-15 00:40:43 -0700466 uint32_t appId = (gid_uid & 0xffffffff) % AID_USER_OFFSET; // == PER_USER_RANGE == 100000
Ken Chen587d4232022-01-17 17:18:43 +0800467 uint8_t* permissions = bpf_uid_permission_map_lookup_elem(&appId);
468 if (!permissions) {
469 // UID not in map. Default to just INTERNET permission.
470 return 1;
471 }
472
473 // A return value of 1 means allow, everything else means deny.
474 return (*permissions & BPF_PERMISSION_INTERNET) == BPF_PERMISSION_INTERNET;
475}
476
477LICENSE("Apache 2.0");
Maciej Żenczykowskic41e35d2022-08-04 13:58:46 +0000478CRITICAL("Connectivity and netd");