blob: 836e99833f28a7714f7aad046973f4cf1ca4ff55 [file] [log] [blame]
Ken Chen335c0d42021-10-23 11:35:26 +08001/*
2 * Copyright (C) 2018 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#pragma once
18
19#include <linux/if.h>
20#include <linux/if_ether.h>
21#include <linux/in.h>
22#include <linux/in6.h>
Ken Chen335c0d42021-10-23 11:35:26 +080023
Maciej Żenczykowski12057372022-06-14 14:36:34 -070024#ifdef __cplusplus
25#include <string_view>
26#include "XtBpfProgLocations.h"
27#endif
28
Ken Chen335c0d42021-10-23 11:35:26 +080029// This header file is shared by eBPF kernel programs (C) and netd (C++) and
30// some of the maps are also accessed directly from Java mainline module code.
31//
32// Hence: explicitly pad all relevant structures and assert that their size
33// is the sum of the sizes of their fields.
34#define STRUCT_SIZE(name, size) _Static_assert(sizeof(name) == (size), "Incorrect struct size.")
35
36typedef struct {
37 uint32_t uid;
38 uint32_t tag;
39} UidTagValue;
40STRUCT_SIZE(UidTagValue, 2 * 4); // 8
41
42typedef struct {
43 uint32_t uid;
44 uint32_t tag;
45 uint32_t counterSet;
46 uint32_t ifaceIndex;
47} StatsKey;
48STRUCT_SIZE(StatsKey, 4 * 4); // 16
49
50typedef struct {
51 uint64_t rxPackets;
52 uint64_t rxBytes;
53 uint64_t txPackets;
54 uint64_t txBytes;
55} StatsValue;
56STRUCT_SIZE(StatsValue, 4 * 8); // 32
57
Maciej Żenczykowskia693bac2023-08-13 07:30:15 +000058#ifdef __cplusplus
59static inline StatsValue& operator+=(StatsValue& lhs, const StatsValue& rhs) {
60 lhs.rxPackets += rhs.rxPackets;
61 lhs.rxBytes += rhs.rxBytes;
62 lhs.txPackets += rhs.txPackets;
63 lhs.txBytes += rhs.txBytes;
64 return lhs;
65}
66#endif
67
Ken Chen335c0d42021-10-23 11:35:26 +080068typedef struct {
69 char name[IFNAMSIZ];
70} IfaceValue;
71STRUCT_SIZE(IfaceValue, 16);
72
73typedef struct {
Ryan Zuklie9419d252023-01-20 17:03:56 -080074 uint64_t timestampNs;
75 uint32_t ifindex;
76 uint32_t length;
77
78 uint32_t uid;
79 uint32_t tag;
80
81 __be16 sport;
82 __be16 dport;
83
84 bool egress;
85 uint8_t ipProto;
86 uint8_t tcpFlags;
87 uint8_t ipVersion; // 4=IPv4, 6=IPv6, 0=unknown
88} PacketTrace;
89STRUCT_SIZE(PacketTrace, 8+4+4 + 4+4 + 2+2 + 1+1+1+1);
90
Ken Chen335c0d42021-10-23 11:35:26 +080091// Since we cannot garbage collect the stats map since device boot, we need to make these maps as
92// large as possible. The maximum size of number of map entries we can have is depend on the rlimit
93// of MEM_LOCK granted to netd. The memory space needed by each map can be calculated by the
94// following fomula:
95// elem_size = 40 + roundup(key_size, 8) + roundup(value_size, 8)
96// cost = roundup_pow_of_two(max_entries) * 16 + elem_size * max_entries +
97// elem_size * number_of_CPU
98// And the cost of each map currently used is(assume the device have 8 CPUs):
99// cookie_tag_map: key: 8 bytes, value: 8 bytes, cost: 822592 bytes = 823Kbytes
100// uid_counter_set_map: key: 4 bytes, value: 1 bytes, cost: 145216 bytes = 145Kbytes
101// app_uid_stats_map: key: 4 bytes, value: 32 bytes, cost: 1062784 bytes = 1063Kbytes
102// uid_stats_map: key: 16 bytes, value: 32 bytes, cost: 1142848 bytes = 1143Kbytes
103// tag_stats_map: key: 16 bytes, value: 32 bytes, cost: 1142848 bytes = 1143Kbytes
104// iface_index_name_map:key: 4 bytes, value: 16 bytes, cost: 80896 bytes = 81Kbytes
105// iface_stats_map: key: 4 bytes, value: 32 bytes, cost: 97024 bytes = 97Kbytes
106// dozable_uid_map: key: 4 bytes, value: 1 bytes, cost: 145216 bytes = 145Kbytes
107// standby_uid_map: key: 4 bytes, value: 1 bytes, cost: 145216 bytes = 145Kbytes
108// powersave_uid_map: key: 4 bytes, value: 1 bytes, cost: 145216 bytes = 145Kbytes
Ryan Zuklie9419d252023-01-20 17:03:56 -0800109// packet_trace_ringbuf:key: 0 bytes, value: 24 bytes, cost: 32768 bytes = 32Kbytes
110// total: 4962Kbytes
Ken Chen335c0d42021-10-23 11:35:26 +0800111// It takes maximum 4.9MB kernel memory space if all maps are full, which requires any devices
112// running this module to have a memlock rlimit to be larger then 5MB. In the old qtaguid module,
113// we don't have a total limit for data entries but only have limitation of tags each uid can have.
114// (default is 1024 in kernel);
115
116// 'static' - otherwise these constants end up in .rodata in the resulting .o post compilation
117static const int COOKIE_UID_MAP_SIZE = 10000;
t-m-w56b32222022-10-18 19:04:10 -0400118static const int UID_COUNTERSET_MAP_SIZE = 4000;
Ken Chen335c0d42021-10-23 11:35:26 +0800119static const int APP_STATS_MAP_SIZE = 10000;
120static const int STATS_MAP_SIZE = 5000;
121static const int IFACE_INDEX_NAME_MAP_SIZE = 1000;
122static const int IFACE_STATS_MAP_SIZE = 1000;
123static const int CONFIGURATION_MAP_SIZE = 2;
t-m-w56b32222022-10-18 19:04:10 -0400124static const int UID_OWNER_MAP_SIZE = 4000;
Maciej Żenczykowski6109d942023-08-29 18:39:28 +0000125static const int INGRESS_DISCARD_MAP_SIZE = 100;
Ryan Zuklie9419d252023-01-20 17:03:56 -0800126static const int PACKET_TRACE_BUF_SIZE = 32 * 1024;
Ken Chen335c0d42021-10-23 11:35:26 +0800127
Maciej Żenczykowski12057372022-06-14 14:36:34 -0700128#ifdef __cplusplus
129
Maciej Żenczykowski6d116d02022-05-16 13:59:12 -0700130#define BPF_NETD_PATH "/sys/fs/bpf/netd_shared/"
Ken Chen335c0d42021-10-23 11:35:26 +0800131
Maciej Żenczykowski6d116d02022-05-16 13:59:12 -0700132#define BPF_EGRESS_PROG_PATH BPF_NETD_PATH "prog_netd_cgroupskb_egress_stats"
133#define BPF_INGRESS_PROG_PATH BPF_NETD_PATH "prog_netd_cgroupskb_ingress_stats"
Maciej Żenczykowski12057372022-06-14 14:36:34 -0700134
135#define ASSERT_STRING_EQUAL(s1, s2) \
Maciej Żenczykowskifa2b1dc2022-06-15 03:43:31 -0700136 static_assert(std::string_view(s1) == std::string_view(s2), "mismatch vs Android T netd")
Maciej Żenczykowski12057372022-06-14 14:36:34 -0700137
138/* -=-=-=-=- WARNING -=-=-=-=-
139 *
140 * These 4 xt_bpf program paths are actually defined by:
Maciej Żenczykowski200d3722022-06-15 01:06:27 -0700141 * //system/netd/include/mainline/XtBpfProgLocations.h
Maciej Żenczykowski12057372022-06-14 14:36:34 -0700142 * which is intentionally a non-automerged location.
143 *
144 * They are *UNCHANGEABLE* due to being hard coded in Android T's netd binary
145 * as such we have compile time asserts that things match.
146 * (which will be validated during build on mainline-prod branch against old system/netd)
147 *
148 * If you break this, netd on T will fail to start with your tethering mainline module.
149 */
150ASSERT_STRING_EQUAL(XT_BPF_INGRESS_PROG_PATH, BPF_NETD_PATH "prog_netd_skfilter_ingress_xtbpf");
151ASSERT_STRING_EQUAL(XT_BPF_EGRESS_PROG_PATH, BPF_NETD_PATH "prog_netd_skfilter_egress_xtbpf");
152ASSERT_STRING_EQUAL(XT_BPF_ALLOWLIST_PROG_PATH, BPF_NETD_PATH "prog_netd_skfilter_allowlist_xtbpf");
153ASSERT_STRING_EQUAL(XT_BPF_DENYLIST_PROG_PATH, BPF_NETD_PATH "prog_netd_skfilter_denylist_xtbpf");
154
Maciej Żenczykowski6d116d02022-05-16 13:59:12 -0700155#define CGROUP_SOCKET_PROG_PATH BPF_NETD_PATH "prog_netd_cgroupsock_inet_create"
Ken Chen335c0d42021-10-23 11:35:26 +0800156
157#define TC_BPF_INGRESS_ACCOUNT_PROG_NAME "prog_netd_schedact_ingress_account"
Maciej Żenczykowski6d116d02022-05-16 13:59:12 -0700158#define TC_BPF_INGRESS_ACCOUNT_PROG_PATH BPF_NETD_PATH TC_BPF_INGRESS_ACCOUNT_PROG_NAME
Ken Chen335c0d42021-10-23 11:35:26 +0800159
Maciej Żenczykowski6d116d02022-05-16 13:59:12 -0700160#define COOKIE_TAG_MAP_PATH BPF_NETD_PATH "map_netd_cookie_tag_map"
161#define UID_COUNTERSET_MAP_PATH BPF_NETD_PATH "map_netd_uid_counterset_map"
162#define APP_UID_STATS_MAP_PATH BPF_NETD_PATH "map_netd_app_uid_stats_map"
163#define STATS_MAP_A_PATH BPF_NETD_PATH "map_netd_stats_map_A"
164#define STATS_MAP_B_PATH BPF_NETD_PATH "map_netd_stats_map_B"
165#define IFACE_INDEX_NAME_MAP_PATH BPF_NETD_PATH "map_netd_iface_index_name_map"
166#define IFACE_STATS_MAP_PATH BPF_NETD_PATH "map_netd_iface_stats_map"
167#define CONFIGURATION_MAP_PATH BPF_NETD_PATH "map_netd_configuration_map"
168#define UID_OWNER_MAP_PATH BPF_NETD_PATH "map_netd_uid_owner_map"
169#define UID_PERMISSION_MAP_PATH BPF_NETD_PATH "map_netd_uid_permission_map"
Maciej Żenczykowski6109d942023-08-29 18:39:28 +0000170#define INGRESS_DISCARD_MAP_PATH BPF_NETD_PATH "map_netd_ingress_discard_map"
Ryan Zuklie9419d252023-01-20 17:03:56 -0800171#define PACKET_TRACE_RINGBUF_PATH BPF_NETD_PATH "map_netd_packet_trace_ringbuf"
172#define PACKET_TRACE_ENABLED_MAP_PATH BPF_NETD_PATH "map_netd_packet_trace_enabled_map"
Ken Chen335c0d42021-10-23 11:35:26 +0800173
Maciej Żenczykowski12057372022-06-14 14:36:34 -0700174#endif // __cplusplus
175
Motomu Utsumibe3ff1e2022-06-08 10:05:07 +0000176// LINT.IfChange(match_type)
Ken Chen335c0d42021-10-23 11:35:26 +0800177enum UidOwnerMatchType {
178 NO_MATCH = 0,
179 HAPPY_BOX_MATCH = (1 << 0),
180 PENALTY_BOX_MATCH = (1 << 1),
181 DOZABLE_MATCH = (1 << 2),
182 STANDBY_MATCH = (1 << 3),
183 POWERSAVE_MATCH = (1 << 4),
184 RESTRICTED_MATCH = (1 << 5),
Robert Horvath54423022022-01-27 19:53:27 +0100185 LOW_POWER_STANDBY_MATCH = (1 << 6),
186 IIF_MATCH = (1 << 7),
Motomu Utsumib08654c2022-05-11 05:56:26 +0000187 LOCKDOWN_VPN_MATCH = (1 << 8),
Motomu Utsumid9801492022-06-01 13:57:27 +0000188 OEM_DENY_1_MATCH = (1 << 9),
189 OEM_DENY_2_MATCH = (1 << 10),
Motomu Utsumi1d9054b2022-06-06 07:44:05 +0000190 OEM_DENY_3_MATCH = (1 << 11),
Ken Chen335c0d42021-10-23 11:35:26 +0800191};
Motomu Utsumibe3ff1e2022-06-08 10:05:07 +0000192// LINT.ThenChange(packages/modules/Connectivity/service/src/com/android/server/BpfNetMaps.java)
Ken Chen335c0d42021-10-23 11:35:26 +0800193
194enum BpfPermissionMatch {
195 BPF_PERMISSION_INTERNET = 1 << 2,
196 BPF_PERMISSION_UPDATE_DEVICE_STATS = 1 << 3,
197};
198// In production we use two identical stats maps to record per uid stats and
199// do swap and clean based on the configuration specified here. The statsMapType
200// value in configuration map specified which map is currently in use.
201enum StatsMapType {
202 SELECT_MAP_A,
203 SELECT_MAP_B,
204};
205
Lorenzo Colitti60cbed32022-03-03 17:49:01 +0900206// TODO: change the configuration object from a bitmask to an object with clearer
Ken Chen335c0d42021-10-23 11:35:26 +0800207// semantics, like a struct.
Lorenzo Colitti60cbed32022-03-03 17:49:01 +0900208typedef uint32_t BpfConfig;
Ken Chen335c0d42021-10-23 11:35:26 +0800209static const BpfConfig DEFAULT_CONFIG = 0;
210
211typedef struct {
212 // Allowed interface index. Only applicable if IIF_MATCH is set in the rule bitmask above.
213 uint32_t iif;
214 // A bitmask of enum values in UidOwnerMatchType.
215 uint32_t rule;
216} UidOwnerValue;
217STRUCT_SIZE(UidOwnerValue, 2 * 4); // 8
218
Maciej Żenczykowski6109d942023-08-29 18:39:28 +0000219typedef struct {
220 // The destination ip of the incoming packet. IPv4 uses IPv4-mapped IPv6 address format.
221 struct in6_addr daddr;
222} IngressDiscardKey;
223STRUCT_SIZE(IngressDiscardKey, 16); // 16
224
225typedef struct {
226 // Allowed interface indexes. Use same value multiple times if you just want to match 1 value.
227 uint32_t iif[2];
228} IngressDiscardValue;
229STRUCT_SIZE(IngressDiscardValue, 2 * 4); // 8
230
Lorenzo Colitti60cbed32022-03-03 17:49:01 +0900231// Entry in the configuration map that stores which UID rules are enabled.
Maciej Żenczykowskib10e0552022-06-16 14:49:27 -0700232#define UID_RULES_CONFIGURATION_KEY 0
Lorenzo Colitti60cbed32022-03-03 17:49:01 +0900233// Entry in the configuration map that stores which stats map is currently in use.
Maciej Żenczykowskib10e0552022-06-16 14:49:27 -0700234#define CURRENT_STATS_MAP_CONFIGURATION_KEY 1
Ken Chen335c0d42021-10-23 11:35:26 +0800235
Ken Chen335c0d42021-10-23 11:35:26 +0800236#undef STRUCT_SIZE