blob: f960dcfc9b486973cb342379badc22ba969dd8a5 [file] [log] [blame]
Ken Chen335c0d42021-10-23 11:35:26 +08001/*
2 * Copyright (C) 2018 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#pragma once
18
Ken Chen784696f2023-10-27 20:33:55 +080019#include <cutils/android_filesystem_config.h>
Ken Chen335c0d42021-10-23 11:35:26 +080020#include <linux/if.h>
21#include <linux/if_ether.h>
22#include <linux/in.h>
23#include <linux/in6.h>
Ken Chen335c0d42021-10-23 11:35:26 +080024
Maciej Żenczykowski12057372022-06-14 14:36:34 -070025#ifdef __cplusplus
26#include <string_view>
27#include "XtBpfProgLocations.h"
28#endif
29
Ken Chen335c0d42021-10-23 11:35:26 +080030// This header file is shared by eBPF kernel programs (C) and netd (C++) and
31// some of the maps are also accessed directly from Java mainline module code.
32//
33// Hence: explicitly pad all relevant structures and assert that their size
34// is the sum of the sizes of their fields.
35#define STRUCT_SIZE(name, size) _Static_assert(sizeof(name) == (size), "Incorrect struct size.")
36
37typedef struct {
38 uint32_t uid;
39 uint32_t tag;
40} UidTagValue;
41STRUCT_SIZE(UidTagValue, 2 * 4); // 8
42
43typedef struct {
44 uint32_t uid;
45 uint32_t tag;
46 uint32_t counterSet;
47 uint32_t ifaceIndex;
48} StatsKey;
49STRUCT_SIZE(StatsKey, 4 * 4); // 16
50
51typedef struct {
52 uint64_t rxPackets;
53 uint64_t rxBytes;
54 uint64_t txPackets;
55 uint64_t txBytes;
56} StatsValue;
57STRUCT_SIZE(StatsValue, 4 * 8); // 32
58
Maciej Żenczykowskia693bac2023-08-13 07:30:15 +000059#ifdef __cplusplus
60static inline StatsValue& operator+=(StatsValue& lhs, const StatsValue& rhs) {
61 lhs.rxPackets += rhs.rxPackets;
62 lhs.rxBytes += rhs.rxBytes;
63 lhs.txPackets += rhs.txPackets;
64 lhs.txBytes += rhs.txBytes;
65 return lhs;
66}
67#endif
68
Ken Chen335c0d42021-10-23 11:35:26 +080069typedef struct {
70 char name[IFNAMSIZ];
71} IfaceValue;
72STRUCT_SIZE(IfaceValue, 16);
73
74typedef struct {
Ryan Zuklie9419d252023-01-20 17:03:56 -080075 uint64_t timestampNs;
76 uint32_t ifindex;
77 uint32_t length;
78
79 uint32_t uid;
80 uint32_t tag;
81
82 __be16 sport;
83 __be16 dport;
84
Maciej Żenczykowskib9cf3472023-09-13 21:59:46 +000085 bool egress:1,
86 wakeup:1;
Ryan Zuklie9419d252023-01-20 17:03:56 -080087 uint8_t ipProto;
88 uint8_t tcpFlags;
89 uint8_t ipVersion; // 4=IPv4, 6=IPv6, 0=unknown
90} PacketTrace;
91STRUCT_SIZE(PacketTrace, 8+4+4 + 4+4 + 2+2 + 1+1+1+1);
92
Ken Chen335c0d42021-10-23 11:35:26 +080093// Since we cannot garbage collect the stats map since device boot, we need to make these maps as
94// large as possible. The maximum size of number of map entries we can have is depend on the rlimit
95// of MEM_LOCK granted to netd. The memory space needed by each map can be calculated by the
96// following fomula:
97// elem_size = 40 + roundup(key_size, 8) + roundup(value_size, 8)
98// cost = roundup_pow_of_two(max_entries) * 16 + elem_size * max_entries +
99// elem_size * number_of_CPU
100// And the cost of each map currently used is(assume the device have 8 CPUs):
101// cookie_tag_map: key: 8 bytes, value: 8 bytes, cost: 822592 bytes = 823Kbytes
102// uid_counter_set_map: key: 4 bytes, value: 1 bytes, cost: 145216 bytes = 145Kbytes
103// app_uid_stats_map: key: 4 bytes, value: 32 bytes, cost: 1062784 bytes = 1063Kbytes
104// uid_stats_map: key: 16 bytes, value: 32 bytes, cost: 1142848 bytes = 1143Kbytes
105// tag_stats_map: key: 16 bytes, value: 32 bytes, cost: 1142848 bytes = 1143Kbytes
106// iface_index_name_map:key: 4 bytes, value: 16 bytes, cost: 80896 bytes = 81Kbytes
107// iface_stats_map: key: 4 bytes, value: 32 bytes, cost: 97024 bytes = 97Kbytes
108// dozable_uid_map: key: 4 bytes, value: 1 bytes, cost: 145216 bytes = 145Kbytes
109// standby_uid_map: key: 4 bytes, value: 1 bytes, cost: 145216 bytes = 145Kbytes
110// powersave_uid_map: key: 4 bytes, value: 1 bytes, cost: 145216 bytes = 145Kbytes
Ryan Zuklie9419d252023-01-20 17:03:56 -0800111// packet_trace_ringbuf:key: 0 bytes, value: 24 bytes, cost: 32768 bytes = 32Kbytes
112// total: 4962Kbytes
Ken Chen335c0d42021-10-23 11:35:26 +0800113// It takes maximum 4.9MB kernel memory space if all maps are full, which requires any devices
114// running this module to have a memlock rlimit to be larger then 5MB. In the old qtaguid module,
115// we don't have a total limit for data entries but only have limitation of tags each uid can have.
116// (default is 1024 in kernel);
117
118// 'static' - otherwise these constants end up in .rodata in the resulting .o post compilation
119static const int COOKIE_UID_MAP_SIZE = 10000;
t-m-w56b32222022-10-18 19:04:10 -0400120static const int UID_COUNTERSET_MAP_SIZE = 4000;
Ken Chen335c0d42021-10-23 11:35:26 +0800121static const int APP_STATS_MAP_SIZE = 10000;
122static const int STATS_MAP_SIZE = 5000;
123static const int IFACE_INDEX_NAME_MAP_SIZE = 1000;
124static const int IFACE_STATS_MAP_SIZE = 1000;
125static const int CONFIGURATION_MAP_SIZE = 2;
t-m-w56b32222022-10-18 19:04:10 -0400126static const int UID_OWNER_MAP_SIZE = 4000;
Maciej Żenczykowski6109d942023-08-29 18:39:28 +0000127static const int INGRESS_DISCARD_MAP_SIZE = 100;
Ryan Zuklie9419d252023-01-20 17:03:56 -0800128static const int PACKET_TRACE_BUF_SIZE = 32 * 1024;
Ken Chen24330172023-10-20 13:02:14 +0800129static const int DATA_SAVER_ENABLED_MAP_SIZE = 1;
Ken Chen335c0d42021-10-23 11:35:26 +0800130
Maciej Żenczykowski12057372022-06-14 14:36:34 -0700131#ifdef __cplusplus
132
Maciej Żenczykowski6d116d02022-05-16 13:59:12 -0700133#define BPF_NETD_PATH "/sys/fs/bpf/netd_shared/"
Ken Chen335c0d42021-10-23 11:35:26 +0800134
Maciej Żenczykowski6d116d02022-05-16 13:59:12 -0700135#define BPF_EGRESS_PROG_PATH BPF_NETD_PATH "prog_netd_cgroupskb_egress_stats"
136#define BPF_INGRESS_PROG_PATH BPF_NETD_PATH "prog_netd_cgroupskb_ingress_stats"
Maciej Żenczykowski12057372022-06-14 14:36:34 -0700137
138#define ASSERT_STRING_EQUAL(s1, s2) \
Maciej Żenczykowskifa2b1dc2022-06-15 03:43:31 -0700139 static_assert(std::string_view(s1) == std::string_view(s2), "mismatch vs Android T netd")
Maciej Żenczykowski12057372022-06-14 14:36:34 -0700140
141/* -=-=-=-=- WARNING -=-=-=-=-
142 *
143 * These 4 xt_bpf program paths are actually defined by:
Maciej Żenczykowski200d3722022-06-15 01:06:27 -0700144 * //system/netd/include/mainline/XtBpfProgLocations.h
Maciej Żenczykowski12057372022-06-14 14:36:34 -0700145 * which is intentionally a non-automerged location.
146 *
147 * They are *UNCHANGEABLE* due to being hard coded in Android T's netd binary
148 * as such we have compile time asserts that things match.
149 * (which will be validated during build on mainline-prod branch against old system/netd)
150 *
151 * If you break this, netd on T will fail to start with your tethering mainline module.
152 */
153ASSERT_STRING_EQUAL(XT_BPF_INGRESS_PROG_PATH, BPF_NETD_PATH "prog_netd_skfilter_ingress_xtbpf");
154ASSERT_STRING_EQUAL(XT_BPF_EGRESS_PROG_PATH, BPF_NETD_PATH "prog_netd_skfilter_egress_xtbpf");
155ASSERT_STRING_EQUAL(XT_BPF_ALLOWLIST_PROG_PATH, BPF_NETD_PATH "prog_netd_skfilter_allowlist_xtbpf");
156ASSERT_STRING_EQUAL(XT_BPF_DENYLIST_PROG_PATH, BPF_NETD_PATH "prog_netd_skfilter_denylist_xtbpf");
157
Maciej Żenczykowski6d116d02022-05-16 13:59:12 -0700158#define CGROUP_SOCKET_PROG_PATH BPF_NETD_PATH "prog_netd_cgroupsock_inet_create"
Ken Chen335c0d42021-10-23 11:35:26 +0800159
160#define TC_BPF_INGRESS_ACCOUNT_PROG_NAME "prog_netd_schedact_ingress_account"
Maciej Żenczykowski6d116d02022-05-16 13:59:12 -0700161#define TC_BPF_INGRESS_ACCOUNT_PROG_PATH BPF_NETD_PATH TC_BPF_INGRESS_ACCOUNT_PROG_NAME
Ken Chen335c0d42021-10-23 11:35:26 +0800162
Maciej Żenczykowski6d116d02022-05-16 13:59:12 -0700163#define COOKIE_TAG_MAP_PATH BPF_NETD_PATH "map_netd_cookie_tag_map"
164#define UID_COUNTERSET_MAP_PATH BPF_NETD_PATH "map_netd_uid_counterset_map"
165#define APP_UID_STATS_MAP_PATH BPF_NETD_PATH "map_netd_app_uid_stats_map"
166#define STATS_MAP_A_PATH BPF_NETD_PATH "map_netd_stats_map_A"
167#define STATS_MAP_B_PATH BPF_NETD_PATH "map_netd_stats_map_B"
168#define IFACE_INDEX_NAME_MAP_PATH BPF_NETD_PATH "map_netd_iface_index_name_map"
169#define IFACE_STATS_MAP_PATH BPF_NETD_PATH "map_netd_iface_stats_map"
170#define CONFIGURATION_MAP_PATH BPF_NETD_PATH "map_netd_configuration_map"
171#define UID_OWNER_MAP_PATH BPF_NETD_PATH "map_netd_uid_owner_map"
172#define UID_PERMISSION_MAP_PATH BPF_NETD_PATH "map_netd_uid_permission_map"
Maciej Żenczykowski6109d942023-08-29 18:39:28 +0000173#define INGRESS_DISCARD_MAP_PATH BPF_NETD_PATH "map_netd_ingress_discard_map"
Ryan Zuklie9419d252023-01-20 17:03:56 -0800174#define PACKET_TRACE_RINGBUF_PATH BPF_NETD_PATH "map_netd_packet_trace_ringbuf"
175#define PACKET_TRACE_ENABLED_MAP_PATH BPF_NETD_PATH "map_netd_packet_trace_enabled_map"
Ken Chen24330172023-10-20 13:02:14 +0800176#define DATA_SAVER_ENABLED_MAP_PATH BPF_NETD_PATH "map_netd_data_saver_enabled_map"
Ken Chen335c0d42021-10-23 11:35:26 +0800177
Maciej Żenczykowski12057372022-06-14 14:36:34 -0700178#endif // __cplusplus
179
Motomu Utsumibe3ff1e2022-06-08 10:05:07 +0000180// LINT.IfChange(match_type)
Ken Chen335c0d42021-10-23 11:35:26 +0800181enum UidOwnerMatchType {
182 NO_MATCH = 0,
183 HAPPY_BOX_MATCH = (1 << 0),
184 PENALTY_BOX_MATCH = (1 << 1),
185 DOZABLE_MATCH = (1 << 2),
186 STANDBY_MATCH = (1 << 3),
187 POWERSAVE_MATCH = (1 << 4),
188 RESTRICTED_MATCH = (1 << 5),
Robert Horvath54423022022-01-27 19:53:27 +0100189 LOW_POWER_STANDBY_MATCH = (1 << 6),
190 IIF_MATCH = (1 << 7),
Motomu Utsumib08654c2022-05-11 05:56:26 +0000191 LOCKDOWN_VPN_MATCH = (1 << 8),
Motomu Utsumid9801492022-06-01 13:57:27 +0000192 OEM_DENY_1_MATCH = (1 << 9),
193 OEM_DENY_2_MATCH = (1 << 10),
Motomu Utsumi1d9054b2022-06-06 07:44:05 +0000194 OEM_DENY_3_MATCH = (1 << 11),
Ken Chen335c0d42021-10-23 11:35:26 +0800195};
Ken Chencf25a542023-10-20 16:42:17 +0800196// LINT.ThenChange(../framework/src/android/net/BpfNetMapsConstants.java)
Ken Chen335c0d42021-10-23 11:35:26 +0800197
198enum BpfPermissionMatch {
199 BPF_PERMISSION_INTERNET = 1 << 2,
200 BPF_PERMISSION_UPDATE_DEVICE_STATS = 1 << 3,
201};
202// In production we use two identical stats maps to record per uid stats and
203// do swap and clean based on the configuration specified here. The statsMapType
204// value in configuration map specified which map is currently in use.
205enum StatsMapType {
206 SELECT_MAP_A,
207 SELECT_MAP_B,
208};
209
Lorenzo Colitti60cbed32022-03-03 17:49:01 +0900210// TODO: change the configuration object from a bitmask to an object with clearer
Ken Chen335c0d42021-10-23 11:35:26 +0800211// semantics, like a struct.
Lorenzo Colitti60cbed32022-03-03 17:49:01 +0900212typedef uint32_t BpfConfig;
Ken Chen335c0d42021-10-23 11:35:26 +0800213static const BpfConfig DEFAULT_CONFIG = 0;
214
215typedef struct {
216 // Allowed interface index. Only applicable if IIF_MATCH is set in the rule bitmask above.
217 uint32_t iif;
218 // A bitmask of enum values in UidOwnerMatchType.
219 uint32_t rule;
220} UidOwnerValue;
221STRUCT_SIZE(UidOwnerValue, 2 * 4); // 8
222
Maciej Żenczykowski6109d942023-08-29 18:39:28 +0000223typedef struct {
224 // The destination ip of the incoming packet. IPv4 uses IPv4-mapped IPv6 address format.
225 struct in6_addr daddr;
226} IngressDiscardKey;
227STRUCT_SIZE(IngressDiscardKey, 16); // 16
228
229typedef struct {
230 // Allowed interface indexes. Use same value multiple times if you just want to match 1 value.
231 uint32_t iif[2];
232} IngressDiscardValue;
233STRUCT_SIZE(IngressDiscardValue, 2 * 4); // 8
234
Lorenzo Colitti60cbed32022-03-03 17:49:01 +0900235// Entry in the configuration map that stores which UID rules are enabled.
Maciej Żenczykowskib10e0552022-06-16 14:49:27 -0700236#define UID_RULES_CONFIGURATION_KEY 0
Lorenzo Colitti60cbed32022-03-03 17:49:01 +0900237// Entry in the configuration map that stores which stats map is currently in use.
Maciej Żenczykowskib10e0552022-06-16 14:49:27 -0700238#define CURRENT_STATS_MAP_CONFIGURATION_KEY 1
Ken Chen335c0d42021-10-23 11:35:26 +0800239
Ken Chen335c0d42021-10-23 11:35:26 +0800240#undef STRUCT_SIZE
Ken Chenf7d23e12023-09-16 16:44:42 +0800241
242// DROP_IF_SET is set of rules that DROP if rule is globally enabled, and per-uid bit is set
243#define DROP_IF_SET (STANDBY_MATCH | OEM_DENY_1_MATCH | OEM_DENY_2_MATCH | OEM_DENY_3_MATCH)
244// DROP_IF_UNSET is set of rules that should DROP if globally enabled, and per-uid bit is NOT set
245#define DROP_IF_UNSET (DOZABLE_MATCH | POWERSAVE_MATCH | RESTRICTED_MATCH | LOW_POWER_STANDBY_MATCH)
246
247// Warning: funky bit-wise arithmetic: in parallel, for all DROP_IF_SET/UNSET rules
248// check whether the rules are globally enabled, and if so whether the rules are
249// set/unset for the specific uid. DROP if that is the case for ANY of the rules.
250// We achieve this by masking out only the bits/rules we're interested in checking,
251// and negating (via bit-wise xor) the bits/rules that should drop if unset.
252static inline bool isBlockedByUidRules(BpfConfig enabledRules, uint32_t uidRules) {
253 return enabledRules & (DROP_IF_SET | DROP_IF_UNSET) & (uidRules ^ DROP_IF_UNSET);
254}
Ken Chen784696f2023-10-27 20:33:55 +0800255
256static inline bool is_system_uid(uint32_t uid) {
257 // MIN_SYSTEM_UID is AID_ROOT == 0, so uint32_t is *always* >= 0
258 // MAX_SYSTEM_UID is AID_NOBODY == 9999, while AID_APP_START == 10000
259 return (uid < AID_APP_START);
260}