blob: dd27bf9f93e5b09f0d83d6b20ae1a30d86af0d13 [file] [log] [blame]
Ken Chen335c0d42021-10-23 11:35:26 +08001/*
2 * Copyright (C) 2018 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#pragma once
18
19#include <linux/if.h>
20#include <linux/if_ether.h>
21#include <linux/in.h>
22#include <linux/in6.h>
Ken Chen335c0d42021-10-23 11:35:26 +080023
Maciej Żenczykowski12057372022-06-14 14:36:34 -070024#ifdef __cplusplus
25#include <string_view>
26#include "XtBpfProgLocations.h"
27#endif
28
Ken Chen335c0d42021-10-23 11:35:26 +080029// This header file is shared by eBPF kernel programs (C) and netd (C++) and
30// some of the maps are also accessed directly from Java mainline module code.
31//
32// Hence: explicitly pad all relevant structures and assert that their size
33// is the sum of the sizes of their fields.
34#define STRUCT_SIZE(name, size) _Static_assert(sizeof(name) == (size), "Incorrect struct size.")
35
36typedef struct {
37 uint32_t uid;
38 uint32_t tag;
39} UidTagValue;
40STRUCT_SIZE(UidTagValue, 2 * 4); // 8
41
42typedef struct {
43 uint32_t uid;
44 uint32_t tag;
45 uint32_t counterSet;
46 uint32_t ifaceIndex;
47} StatsKey;
48STRUCT_SIZE(StatsKey, 4 * 4); // 16
49
50typedef struct {
51 uint64_t rxPackets;
52 uint64_t rxBytes;
53 uint64_t txPackets;
54 uint64_t txBytes;
55} StatsValue;
56STRUCT_SIZE(StatsValue, 4 * 8); // 32
57
Maciej Żenczykowskia693bac2023-08-13 07:30:15 +000058#ifdef __cplusplus
59static inline StatsValue& operator+=(StatsValue& lhs, const StatsValue& rhs) {
60 lhs.rxPackets += rhs.rxPackets;
61 lhs.rxBytes += rhs.rxBytes;
62 lhs.txPackets += rhs.txPackets;
63 lhs.txBytes += rhs.txBytes;
64 return lhs;
65}
66#endif
67
Ken Chen335c0d42021-10-23 11:35:26 +080068typedef struct {
69 char name[IFNAMSIZ];
70} IfaceValue;
71STRUCT_SIZE(IfaceValue, 16);
72
73typedef struct {
Ryan Zuklie9419d252023-01-20 17:03:56 -080074 uint64_t timestampNs;
75 uint32_t ifindex;
76 uint32_t length;
77
78 uint32_t uid;
79 uint32_t tag;
80
81 __be16 sport;
82 __be16 dport;
83
Maciej Żenczykowskib9cf3472023-09-13 21:59:46 +000084 bool egress:1,
85 wakeup:1;
Ryan Zuklie9419d252023-01-20 17:03:56 -080086 uint8_t ipProto;
87 uint8_t tcpFlags;
88 uint8_t ipVersion; // 4=IPv4, 6=IPv6, 0=unknown
89} PacketTrace;
90STRUCT_SIZE(PacketTrace, 8+4+4 + 4+4 + 2+2 + 1+1+1+1);
91
Ken Chen335c0d42021-10-23 11:35:26 +080092// Since we cannot garbage collect the stats map since device boot, we need to make these maps as
93// large as possible. The maximum size of number of map entries we can have is depend on the rlimit
94// of MEM_LOCK granted to netd. The memory space needed by each map can be calculated by the
95// following fomula:
96// elem_size = 40 + roundup(key_size, 8) + roundup(value_size, 8)
97// cost = roundup_pow_of_two(max_entries) * 16 + elem_size * max_entries +
98// elem_size * number_of_CPU
99// And the cost of each map currently used is(assume the device have 8 CPUs):
100// cookie_tag_map: key: 8 bytes, value: 8 bytes, cost: 822592 bytes = 823Kbytes
101// uid_counter_set_map: key: 4 bytes, value: 1 bytes, cost: 145216 bytes = 145Kbytes
102// app_uid_stats_map: key: 4 bytes, value: 32 bytes, cost: 1062784 bytes = 1063Kbytes
103// uid_stats_map: key: 16 bytes, value: 32 bytes, cost: 1142848 bytes = 1143Kbytes
104// tag_stats_map: key: 16 bytes, value: 32 bytes, cost: 1142848 bytes = 1143Kbytes
105// iface_index_name_map:key: 4 bytes, value: 16 bytes, cost: 80896 bytes = 81Kbytes
106// iface_stats_map: key: 4 bytes, value: 32 bytes, cost: 97024 bytes = 97Kbytes
107// dozable_uid_map: key: 4 bytes, value: 1 bytes, cost: 145216 bytes = 145Kbytes
108// standby_uid_map: key: 4 bytes, value: 1 bytes, cost: 145216 bytes = 145Kbytes
109// powersave_uid_map: key: 4 bytes, value: 1 bytes, cost: 145216 bytes = 145Kbytes
Ryan Zuklie9419d252023-01-20 17:03:56 -0800110// packet_trace_ringbuf:key: 0 bytes, value: 24 bytes, cost: 32768 bytes = 32Kbytes
111// total: 4962Kbytes
Ken Chen335c0d42021-10-23 11:35:26 +0800112// It takes maximum 4.9MB kernel memory space if all maps are full, which requires any devices
113// running this module to have a memlock rlimit to be larger then 5MB. In the old qtaguid module,
114// we don't have a total limit for data entries but only have limitation of tags each uid can have.
115// (default is 1024 in kernel);
116
117// 'static' - otherwise these constants end up in .rodata in the resulting .o post compilation
118static const int COOKIE_UID_MAP_SIZE = 10000;
t-m-w56b32222022-10-18 19:04:10 -0400119static const int UID_COUNTERSET_MAP_SIZE = 4000;
Ken Chen335c0d42021-10-23 11:35:26 +0800120static const int APP_STATS_MAP_SIZE = 10000;
121static const int STATS_MAP_SIZE = 5000;
122static const int IFACE_INDEX_NAME_MAP_SIZE = 1000;
123static const int IFACE_STATS_MAP_SIZE = 1000;
124static const int CONFIGURATION_MAP_SIZE = 2;
t-m-w56b32222022-10-18 19:04:10 -0400125static const int UID_OWNER_MAP_SIZE = 4000;
Maciej Żenczykowski6109d942023-08-29 18:39:28 +0000126static const int INGRESS_DISCARD_MAP_SIZE = 100;
Ryan Zuklie9419d252023-01-20 17:03:56 -0800127static const int PACKET_TRACE_BUF_SIZE = 32 * 1024;
Ken Chen335c0d42021-10-23 11:35:26 +0800128
Maciej Żenczykowski12057372022-06-14 14:36:34 -0700129#ifdef __cplusplus
130
Maciej Żenczykowski6d116d02022-05-16 13:59:12 -0700131#define BPF_NETD_PATH "/sys/fs/bpf/netd_shared/"
Ken Chen335c0d42021-10-23 11:35:26 +0800132
Maciej Żenczykowski6d116d02022-05-16 13:59:12 -0700133#define BPF_EGRESS_PROG_PATH BPF_NETD_PATH "prog_netd_cgroupskb_egress_stats"
134#define BPF_INGRESS_PROG_PATH BPF_NETD_PATH "prog_netd_cgroupskb_ingress_stats"
Maciej Żenczykowski12057372022-06-14 14:36:34 -0700135
136#define ASSERT_STRING_EQUAL(s1, s2) \
Maciej Żenczykowskifa2b1dc2022-06-15 03:43:31 -0700137 static_assert(std::string_view(s1) == std::string_view(s2), "mismatch vs Android T netd")
Maciej Żenczykowski12057372022-06-14 14:36:34 -0700138
139/* -=-=-=-=- WARNING -=-=-=-=-
140 *
141 * These 4 xt_bpf program paths are actually defined by:
Maciej Żenczykowski200d3722022-06-15 01:06:27 -0700142 * //system/netd/include/mainline/XtBpfProgLocations.h
Maciej Żenczykowski12057372022-06-14 14:36:34 -0700143 * which is intentionally a non-automerged location.
144 *
145 * They are *UNCHANGEABLE* due to being hard coded in Android T's netd binary
146 * as such we have compile time asserts that things match.
147 * (which will be validated during build on mainline-prod branch against old system/netd)
148 *
149 * If you break this, netd on T will fail to start with your tethering mainline module.
150 */
151ASSERT_STRING_EQUAL(XT_BPF_INGRESS_PROG_PATH, BPF_NETD_PATH "prog_netd_skfilter_ingress_xtbpf");
152ASSERT_STRING_EQUAL(XT_BPF_EGRESS_PROG_PATH, BPF_NETD_PATH "prog_netd_skfilter_egress_xtbpf");
153ASSERT_STRING_EQUAL(XT_BPF_ALLOWLIST_PROG_PATH, BPF_NETD_PATH "prog_netd_skfilter_allowlist_xtbpf");
154ASSERT_STRING_EQUAL(XT_BPF_DENYLIST_PROG_PATH, BPF_NETD_PATH "prog_netd_skfilter_denylist_xtbpf");
155
Maciej Żenczykowski6d116d02022-05-16 13:59:12 -0700156#define CGROUP_SOCKET_PROG_PATH BPF_NETD_PATH "prog_netd_cgroupsock_inet_create"
Ken Chen335c0d42021-10-23 11:35:26 +0800157
158#define TC_BPF_INGRESS_ACCOUNT_PROG_NAME "prog_netd_schedact_ingress_account"
Maciej Żenczykowski6d116d02022-05-16 13:59:12 -0700159#define TC_BPF_INGRESS_ACCOUNT_PROG_PATH BPF_NETD_PATH TC_BPF_INGRESS_ACCOUNT_PROG_NAME
Ken Chen335c0d42021-10-23 11:35:26 +0800160
Maciej Żenczykowski6d116d02022-05-16 13:59:12 -0700161#define COOKIE_TAG_MAP_PATH BPF_NETD_PATH "map_netd_cookie_tag_map"
162#define UID_COUNTERSET_MAP_PATH BPF_NETD_PATH "map_netd_uid_counterset_map"
163#define APP_UID_STATS_MAP_PATH BPF_NETD_PATH "map_netd_app_uid_stats_map"
164#define STATS_MAP_A_PATH BPF_NETD_PATH "map_netd_stats_map_A"
165#define STATS_MAP_B_PATH BPF_NETD_PATH "map_netd_stats_map_B"
166#define IFACE_INDEX_NAME_MAP_PATH BPF_NETD_PATH "map_netd_iface_index_name_map"
167#define IFACE_STATS_MAP_PATH BPF_NETD_PATH "map_netd_iface_stats_map"
168#define CONFIGURATION_MAP_PATH BPF_NETD_PATH "map_netd_configuration_map"
169#define UID_OWNER_MAP_PATH BPF_NETD_PATH "map_netd_uid_owner_map"
170#define UID_PERMISSION_MAP_PATH BPF_NETD_PATH "map_netd_uid_permission_map"
Maciej Żenczykowski6109d942023-08-29 18:39:28 +0000171#define INGRESS_DISCARD_MAP_PATH BPF_NETD_PATH "map_netd_ingress_discard_map"
Ryan Zuklie9419d252023-01-20 17:03:56 -0800172#define PACKET_TRACE_RINGBUF_PATH BPF_NETD_PATH "map_netd_packet_trace_ringbuf"
173#define PACKET_TRACE_ENABLED_MAP_PATH BPF_NETD_PATH "map_netd_packet_trace_enabled_map"
Ken Chen335c0d42021-10-23 11:35:26 +0800174
Maciej Żenczykowski12057372022-06-14 14:36:34 -0700175#endif // __cplusplus
176
Motomu Utsumibe3ff1e2022-06-08 10:05:07 +0000177// LINT.IfChange(match_type)
Ken Chen335c0d42021-10-23 11:35:26 +0800178enum UidOwnerMatchType {
179 NO_MATCH = 0,
180 HAPPY_BOX_MATCH = (1 << 0),
181 PENALTY_BOX_MATCH = (1 << 1),
182 DOZABLE_MATCH = (1 << 2),
183 STANDBY_MATCH = (1 << 3),
184 POWERSAVE_MATCH = (1 << 4),
185 RESTRICTED_MATCH = (1 << 5),
Robert Horvath54423022022-01-27 19:53:27 +0100186 LOW_POWER_STANDBY_MATCH = (1 << 6),
187 IIF_MATCH = (1 << 7),
Motomu Utsumib08654c2022-05-11 05:56:26 +0000188 LOCKDOWN_VPN_MATCH = (1 << 8),
Motomu Utsumid9801492022-06-01 13:57:27 +0000189 OEM_DENY_1_MATCH = (1 << 9),
190 OEM_DENY_2_MATCH = (1 << 10),
Motomu Utsumi1d9054b2022-06-06 07:44:05 +0000191 OEM_DENY_3_MATCH = (1 << 11),
Ken Chen335c0d42021-10-23 11:35:26 +0800192};
Motomu Utsumibe3ff1e2022-06-08 10:05:07 +0000193// LINT.ThenChange(packages/modules/Connectivity/service/src/com/android/server/BpfNetMaps.java)
Ken Chen335c0d42021-10-23 11:35:26 +0800194
195enum BpfPermissionMatch {
196 BPF_PERMISSION_INTERNET = 1 << 2,
197 BPF_PERMISSION_UPDATE_DEVICE_STATS = 1 << 3,
198};
199// In production we use two identical stats maps to record per uid stats and
200// do swap and clean based on the configuration specified here. The statsMapType
201// value in configuration map specified which map is currently in use.
202enum StatsMapType {
203 SELECT_MAP_A,
204 SELECT_MAP_B,
205};
206
Lorenzo Colitti60cbed32022-03-03 17:49:01 +0900207// TODO: change the configuration object from a bitmask to an object with clearer
Ken Chen335c0d42021-10-23 11:35:26 +0800208// semantics, like a struct.
Lorenzo Colitti60cbed32022-03-03 17:49:01 +0900209typedef uint32_t BpfConfig;
Ken Chen335c0d42021-10-23 11:35:26 +0800210static const BpfConfig DEFAULT_CONFIG = 0;
211
212typedef struct {
213 // Allowed interface index. Only applicable if IIF_MATCH is set in the rule bitmask above.
214 uint32_t iif;
215 // A bitmask of enum values in UidOwnerMatchType.
216 uint32_t rule;
217} UidOwnerValue;
218STRUCT_SIZE(UidOwnerValue, 2 * 4); // 8
219
Maciej Żenczykowski6109d942023-08-29 18:39:28 +0000220typedef struct {
221 // The destination ip of the incoming packet. IPv4 uses IPv4-mapped IPv6 address format.
222 struct in6_addr daddr;
223} IngressDiscardKey;
224STRUCT_SIZE(IngressDiscardKey, 16); // 16
225
226typedef struct {
227 // Allowed interface indexes. Use same value multiple times if you just want to match 1 value.
228 uint32_t iif[2];
229} IngressDiscardValue;
230STRUCT_SIZE(IngressDiscardValue, 2 * 4); // 8
231
Lorenzo Colitti60cbed32022-03-03 17:49:01 +0900232// Entry in the configuration map that stores which UID rules are enabled.
Maciej Żenczykowskib10e0552022-06-16 14:49:27 -0700233#define UID_RULES_CONFIGURATION_KEY 0
Lorenzo Colitti60cbed32022-03-03 17:49:01 +0900234// Entry in the configuration map that stores which stats map is currently in use.
Maciej Żenczykowskib10e0552022-06-16 14:49:27 -0700235#define CURRENT_STATS_MAP_CONFIGURATION_KEY 1
Ken Chen335c0d42021-10-23 11:35:26 +0800236
Ken Chen335c0d42021-10-23 11:35:26 +0800237#undef STRUCT_SIZE
Ken Chenf7d23e12023-09-16 16:44:42 +0800238
239// DROP_IF_SET is set of rules that DROP if rule is globally enabled, and per-uid bit is set
240#define DROP_IF_SET (STANDBY_MATCH | OEM_DENY_1_MATCH | OEM_DENY_2_MATCH | OEM_DENY_3_MATCH)
241// DROP_IF_UNSET is set of rules that should DROP if globally enabled, and per-uid bit is NOT set
242#define DROP_IF_UNSET (DOZABLE_MATCH | POWERSAVE_MATCH | RESTRICTED_MATCH | LOW_POWER_STANDBY_MATCH)
243
244// Warning: funky bit-wise arithmetic: in parallel, for all DROP_IF_SET/UNSET rules
245// check whether the rules are globally enabled, and if so whether the rules are
246// set/unset for the specific uid. DROP if that is the case for ANY of the rules.
247// We achieve this by masking out only the bits/rules we're interested in checking,
248// and negating (via bit-wise xor) the bits/rules that should drop if unset.
249static inline bool isBlockedByUidRules(BpfConfig enabledRules, uint32_t uidRules) {
250 return enabledRules & (DROP_IF_SET | DROP_IF_UNSET) & (uidRules ^ DROP_IF_UNSET);
251}