Ken Chen | 335c0d4 | 2021-10-23 11:35:26 +0800 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2018 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | #pragma once |
| 18 | |
Ken Chen | 784696f | 2023-10-27 20:33:55 +0800 | [diff] [blame] | 19 | #include <cutils/android_filesystem_config.h> |
Ken Chen | 335c0d4 | 2021-10-23 11:35:26 +0800 | [diff] [blame] | 20 | #include <linux/if.h> |
| 21 | #include <linux/if_ether.h> |
| 22 | #include <linux/in.h> |
| 23 | #include <linux/in6.h> |
Ken Chen | 335c0d4 | 2021-10-23 11:35:26 +0800 | [diff] [blame] | 24 | |
Maciej Żenczykowski | 1205737 | 2022-06-14 14:36:34 -0700 | [diff] [blame] | 25 | #ifdef __cplusplus |
| 26 | #include <string_view> |
| 27 | #include "XtBpfProgLocations.h" |
| 28 | #endif |
| 29 | |
Ken Chen | 335c0d4 | 2021-10-23 11:35:26 +0800 | [diff] [blame] | 30 | // This header file is shared by eBPF kernel programs (C) and netd (C++) and |
| 31 | // some of the maps are also accessed directly from Java mainline module code. |
| 32 | // |
| 33 | // Hence: explicitly pad all relevant structures and assert that their size |
| 34 | // is the sum of the sizes of their fields. |
| 35 | #define STRUCT_SIZE(name, size) _Static_assert(sizeof(name) == (size), "Incorrect struct size.") |
| 36 | |
| 37 | typedef struct { |
| 38 | uint32_t uid; |
| 39 | uint32_t tag; |
| 40 | } UidTagValue; |
| 41 | STRUCT_SIZE(UidTagValue, 2 * 4); // 8 |
| 42 | |
| 43 | typedef struct { |
| 44 | uint32_t uid; |
| 45 | uint32_t tag; |
| 46 | uint32_t counterSet; |
| 47 | uint32_t ifaceIndex; |
| 48 | } StatsKey; |
| 49 | STRUCT_SIZE(StatsKey, 4 * 4); // 16 |
| 50 | |
| 51 | typedef struct { |
| 52 | uint64_t rxPackets; |
| 53 | uint64_t rxBytes; |
| 54 | uint64_t txPackets; |
| 55 | uint64_t txBytes; |
| 56 | } StatsValue; |
| 57 | STRUCT_SIZE(StatsValue, 4 * 8); // 32 |
| 58 | |
Maciej Żenczykowski | a693bac | 2023-08-13 07:30:15 +0000 | [diff] [blame] | 59 | #ifdef __cplusplus |
| 60 | static inline StatsValue& operator+=(StatsValue& lhs, const StatsValue& rhs) { |
| 61 | lhs.rxPackets += rhs.rxPackets; |
| 62 | lhs.rxBytes += rhs.rxBytes; |
| 63 | lhs.txPackets += rhs.txPackets; |
| 64 | lhs.txBytes += rhs.txBytes; |
| 65 | return lhs; |
| 66 | } |
| 67 | #endif |
| 68 | |
Ken Chen | 335c0d4 | 2021-10-23 11:35:26 +0800 | [diff] [blame] | 69 | typedef struct { |
| 70 | char name[IFNAMSIZ]; |
| 71 | } IfaceValue; |
| 72 | STRUCT_SIZE(IfaceValue, 16); |
| 73 | |
| 74 | typedef struct { |
Ryan Zuklie | 9419d25 | 2023-01-20 17:03:56 -0800 | [diff] [blame] | 75 | uint64_t timestampNs; |
| 76 | uint32_t ifindex; |
| 77 | uint32_t length; |
| 78 | |
| 79 | uint32_t uid; |
| 80 | uint32_t tag; |
| 81 | |
| 82 | __be16 sport; |
| 83 | __be16 dport; |
| 84 | |
Maciej Żenczykowski | b9cf347 | 2023-09-13 21:59:46 +0000 | [diff] [blame] | 85 | bool egress:1, |
| 86 | wakeup:1; |
Ryan Zuklie | 9419d25 | 2023-01-20 17:03:56 -0800 | [diff] [blame] | 87 | uint8_t ipProto; |
| 88 | uint8_t tcpFlags; |
| 89 | uint8_t ipVersion; // 4=IPv4, 6=IPv6, 0=unknown |
| 90 | } PacketTrace; |
| 91 | STRUCT_SIZE(PacketTrace, 8+4+4 + 4+4 + 2+2 + 1+1+1+1); |
| 92 | |
Ken Chen | 335c0d4 | 2021-10-23 11:35:26 +0800 | [diff] [blame] | 93 | // Since we cannot garbage collect the stats map since device boot, we need to make these maps as |
| 94 | // large as possible. The maximum size of number of map entries we can have is depend on the rlimit |
| 95 | // of MEM_LOCK granted to netd. The memory space needed by each map can be calculated by the |
| 96 | // following fomula: |
| 97 | // elem_size = 40 + roundup(key_size, 8) + roundup(value_size, 8) |
| 98 | // cost = roundup_pow_of_two(max_entries) * 16 + elem_size * max_entries + |
| 99 | // elem_size * number_of_CPU |
| 100 | // And the cost of each map currently used is(assume the device have 8 CPUs): |
| 101 | // cookie_tag_map: key: 8 bytes, value: 8 bytes, cost: 822592 bytes = 823Kbytes |
| 102 | // uid_counter_set_map: key: 4 bytes, value: 1 bytes, cost: 145216 bytes = 145Kbytes |
| 103 | // app_uid_stats_map: key: 4 bytes, value: 32 bytes, cost: 1062784 bytes = 1063Kbytes |
| 104 | // uid_stats_map: key: 16 bytes, value: 32 bytes, cost: 1142848 bytes = 1143Kbytes |
| 105 | // tag_stats_map: key: 16 bytes, value: 32 bytes, cost: 1142848 bytes = 1143Kbytes |
| 106 | // iface_index_name_map:key: 4 bytes, value: 16 bytes, cost: 80896 bytes = 81Kbytes |
| 107 | // iface_stats_map: key: 4 bytes, value: 32 bytes, cost: 97024 bytes = 97Kbytes |
| 108 | // dozable_uid_map: key: 4 bytes, value: 1 bytes, cost: 145216 bytes = 145Kbytes |
| 109 | // standby_uid_map: key: 4 bytes, value: 1 bytes, cost: 145216 bytes = 145Kbytes |
| 110 | // powersave_uid_map: key: 4 bytes, value: 1 bytes, cost: 145216 bytes = 145Kbytes |
Ryan Zuklie | 9419d25 | 2023-01-20 17:03:56 -0800 | [diff] [blame] | 111 | // packet_trace_ringbuf:key: 0 bytes, value: 24 bytes, cost: 32768 bytes = 32Kbytes |
| 112 | // total: 4962Kbytes |
Ken Chen | 335c0d4 | 2021-10-23 11:35:26 +0800 | [diff] [blame] | 113 | // It takes maximum 4.9MB kernel memory space if all maps are full, which requires any devices |
| 114 | // running this module to have a memlock rlimit to be larger then 5MB. In the old qtaguid module, |
| 115 | // we don't have a total limit for data entries but only have limitation of tags each uid can have. |
| 116 | // (default is 1024 in kernel); |
| 117 | |
| 118 | // 'static' - otherwise these constants end up in .rodata in the resulting .o post compilation |
| 119 | static const int COOKIE_UID_MAP_SIZE = 10000; |
t-m-w | 56b3222 | 2022-10-18 19:04:10 -0400 | [diff] [blame] | 120 | static const int UID_COUNTERSET_MAP_SIZE = 4000; |
Ken Chen | 335c0d4 | 2021-10-23 11:35:26 +0800 | [diff] [blame] | 121 | static const int APP_STATS_MAP_SIZE = 10000; |
| 122 | static const int STATS_MAP_SIZE = 5000; |
| 123 | static const int IFACE_INDEX_NAME_MAP_SIZE = 1000; |
| 124 | static const int IFACE_STATS_MAP_SIZE = 1000; |
| 125 | static const int CONFIGURATION_MAP_SIZE = 2; |
t-m-w | 56b3222 | 2022-10-18 19:04:10 -0400 | [diff] [blame] | 126 | static const int UID_OWNER_MAP_SIZE = 4000; |
Maciej Żenczykowski | 6109d94 | 2023-08-29 18:39:28 +0000 | [diff] [blame] | 127 | static const int INGRESS_DISCARD_MAP_SIZE = 100; |
Ryan Zuklie | 9419d25 | 2023-01-20 17:03:56 -0800 | [diff] [blame] | 128 | static const int PACKET_TRACE_BUF_SIZE = 32 * 1024; |
Ken Chen | 2433017 | 2023-10-20 13:02:14 +0800 | [diff] [blame^] | 129 | static const int DATA_SAVER_ENABLED_MAP_SIZE = 1; |
Ken Chen | 335c0d4 | 2021-10-23 11:35:26 +0800 | [diff] [blame] | 130 | |
Maciej Żenczykowski | 1205737 | 2022-06-14 14:36:34 -0700 | [diff] [blame] | 131 | #ifdef __cplusplus |
| 132 | |
Maciej Żenczykowski | 6d116d0 | 2022-05-16 13:59:12 -0700 | [diff] [blame] | 133 | #define BPF_NETD_PATH "/sys/fs/bpf/netd_shared/" |
Ken Chen | 335c0d4 | 2021-10-23 11:35:26 +0800 | [diff] [blame] | 134 | |
Maciej Żenczykowski | 6d116d0 | 2022-05-16 13:59:12 -0700 | [diff] [blame] | 135 | #define BPF_EGRESS_PROG_PATH BPF_NETD_PATH "prog_netd_cgroupskb_egress_stats" |
| 136 | #define BPF_INGRESS_PROG_PATH BPF_NETD_PATH "prog_netd_cgroupskb_ingress_stats" |
Maciej Żenczykowski | 1205737 | 2022-06-14 14:36:34 -0700 | [diff] [blame] | 137 | |
| 138 | #define ASSERT_STRING_EQUAL(s1, s2) \ |
Maciej Żenczykowski | fa2b1dc | 2022-06-15 03:43:31 -0700 | [diff] [blame] | 139 | static_assert(std::string_view(s1) == std::string_view(s2), "mismatch vs Android T netd") |
Maciej Żenczykowski | 1205737 | 2022-06-14 14:36:34 -0700 | [diff] [blame] | 140 | |
| 141 | /* -=-=-=-=- WARNING -=-=-=-=- |
| 142 | * |
| 143 | * These 4 xt_bpf program paths are actually defined by: |
Maciej Żenczykowski | 200d372 | 2022-06-15 01:06:27 -0700 | [diff] [blame] | 144 | * //system/netd/include/mainline/XtBpfProgLocations.h |
Maciej Żenczykowski | 1205737 | 2022-06-14 14:36:34 -0700 | [diff] [blame] | 145 | * which is intentionally a non-automerged location. |
| 146 | * |
| 147 | * They are *UNCHANGEABLE* due to being hard coded in Android T's netd binary |
| 148 | * as such we have compile time asserts that things match. |
| 149 | * (which will be validated during build on mainline-prod branch against old system/netd) |
| 150 | * |
| 151 | * If you break this, netd on T will fail to start with your tethering mainline module. |
| 152 | */ |
| 153 | ASSERT_STRING_EQUAL(XT_BPF_INGRESS_PROG_PATH, BPF_NETD_PATH "prog_netd_skfilter_ingress_xtbpf"); |
| 154 | ASSERT_STRING_EQUAL(XT_BPF_EGRESS_PROG_PATH, BPF_NETD_PATH "prog_netd_skfilter_egress_xtbpf"); |
| 155 | ASSERT_STRING_EQUAL(XT_BPF_ALLOWLIST_PROG_PATH, BPF_NETD_PATH "prog_netd_skfilter_allowlist_xtbpf"); |
| 156 | ASSERT_STRING_EQUAL(XT_BPF_DENYLIST_PROG_PATH, BPF_NETD_PATH "prog_netd_skfilter_denylist_xtbpf"); |
| 157 | |
Maciej Żenczykowski | 6d116d0 | 2022-05-16 13:59:12 -0700 | [diff] [blame] | 158 | #define CGROUP_SOCKET_PROG_PATH BPF_NETD_PATH "prog_netd_cgroupsock_inet_create" |
Ken Chen | 335c0d4 | 2021-10-23 11:35:26 +0800 | [diff] [blame] | 159 | |
| 160 | #define TC_BPF_INGRESS_ACCOUNT_PROG_NAME "prog_netd_schedact_ingress_account" |
Maciej Żenczykowski | 6d116d0 | 2022-05-16 13:59:12 -0700 | [diff] [blame] | 161 | #define TC_BPF_INGRESS_ACCOUNT_PROG_PATH BPF_NETD_PATH TC_BPF_INGRESS_ACCOUNT_PROG_NAME |
Ken Chen | 335c0d4 | 2021-10-23 11:35:26 +0800 | [diff] [blame] | 162 | |
Maciej Żenczykowski | 6d116d0 | 2022-05-16 13:59:12 -0700 | [diff] [blame] | 163 | #define COOKIE_TAG_MAP_PATH BPF_NETD_PATH "map_netd_cookie_tag_map" |
| 164 | #define UID_COUNTERSET_MAP_PATH BPF_NETD_PATH "map_netd_uid_counterset_map" |
| 165 | #define APP_UID_STATS_MAP_PATH BPF_NETD_PATH "map_netd_app_uid_stats_map" |
| 166 | #define STATS_MAP_A_PATH BPF_NETD_PATH "map_netd_stats_map_A" |
| 167 | #define STATS_MAP_B_PATH BPF_NETD_PATH "map_netd_stats_map_B" |
| 168 | #define IFACE_INDEX_NAME_MAP_PATH BPF_NETD_PATH "map_netd_iface_index_name_map" |
| 169 | #define IFACE_STATS_MAP_PATH BPF_NETD_PATH "map_netd_iface_stats_map" |
| 170 | #define CONFIGURATION_MAP_PATH BPF_NETD_PATH "map_netd_configuration_map" |
| 171 | #define UID_OWNER_MAP_PATH BPF_NETD_PATH "map_netd_uid_owner_map" |
| 172 | #define UID_PERMISSION_MAP_PATH BPF_NETD_PATH "map_netd_uid_permission_map" |
Maciej Żenczykowski | 6109d94 | 2023-08-29 18:39:28 +0000 | [diff] [blame] | 173 | #define INGRESS_DISCARD_MAP_PATH BPF_NETD_PATH "map_netd_ingress_discard_map" |
Ryan Zuklie | 9419d25 | 2023-01-20 17:03:56 -0800 | [diff] [blame] | 174 | #define PACKET_TRACE_RINGBUF_PATH BPF_NETD_PATH "map_netd_packet_trace_ringbuf" |
| 175 | #define PACKET_TRACE_ENABLED_MAP_PATH BPF_NETD_PATH "map_netd_packet_trace_enabled_map" |
Ken Chen | 2433017 | 2023-10-20 13:02:14 +0800 | [diff] [blame^] | 176 | #define DATA_SAVER_ENABLED_MAP_PATH BPF_NETD_PATH "map_netd_data_saver_enabled_map" |
Ken Chen | 335c0d4 | 2021-10-23 11:35:26 +0800 | [diff] [blame] | 177 | |
Maciej Żenczykowski | 1205737 | 2022-06-14 14:36:34 -0700 | [diff] [blame] | 178 | #endif // __cplusplus |
| 179 | |
Motomu Utsumi | be3ff1e | 2022-06-08 10:05:07 +0000 | [diff] [blame] | 180 | // LINT.IfChange(match_type) |
Ken Chen | 335c0d4 | 2021-10-23 11:35:26 +0800 | [diff] [blame] | 181 | enum UidOwnerMatchType { |
| 182 | NO_MATCH = 0, |
| 183 | HAPPY_BOX_MATCH = (1 << 0), |
| 184 | PENALTY_BOX_MATCH = (1 << 1), |
| 185 | DOZABLE_MATCH = (1 << 2), |
| 186 | STANDBY_MATCH = (1 << 3), |
| 187 | POWERSAVE_MATCH = (1 << 4), |
| 188 | RESTRICTED_MATCH = (1 << 5), |
Robert Horvath | 5442302 | 2022-01-27 19:53:27 +0100 | [diff] [blame] | 189 | LOW_POWER_STANDBY_MATCH = (1 << 6), |
| 190 | IIF_MATCH = (1 << 7), |
Motomu Utsumi | b08654c | 2022-05-11 05:56:26 +0000 | [diff] [blame] | 191 | LOCKDOWN_VPN_MATCH = (1 << 8), |
Motomu Utsumi | d980149 | 2022-06-01 13:57:27 +0000 | [diff] [blame] | 192 | OEM_DENY_1_MATCH = (1 << 9), |
| 193 | OEM_DENY_2_MATCH = (1 << 10), |
Motomu Utsumi | 1d9054b | 2022-06-06 07:44:05 +0000 | [diff] [blame] | 194 | OEM_DENY_3_MATCH = (1 << 11), |
Ken Chen | 335c0d4 | 2021-10-23 11:35:26 +0800 | [diff] [blame] | 195 | }; |
Ken Chen | cf25a54 | 2023-10-20 16:42:17 +0800 | [diff] [blame] | 196 | // LINT.ThenChange(../framework/src/android/net/BpfNetMapsConstants.java) |
Ken Chen | 335c0d4 | 2021-10-23 11:35:26 +0800 | [diff] [blame] | 197 | |
| 198 | enum BpfPermissionMatch { |
| 199 | BPF_PERMISSION_INTERNET = 1 << 2, |
| 200 | BPF_PERMISSION_UPDATE_DEVICE_STATS = 1 << 3, |
| 201 | }; |
| 202 | // In production we use two identical stats maps to record per uid stats and |
| 203 | // do swap and clean based on the configuration specified here. The statsMapType |
| 204 | // value in configuration map specified which map is currently in use. |
| 205 | enum StatsMapType { |
| 206 | SELECT_MAP_A, |
| 207 | SELECT_MAP_B, |
| 208 | }; |
| 209 | |
Lorenzo Colitti | 60cbed3 | 2022-03-03 17:49:01 +0900 | [diff] [blame] | 210 | // TODO: change the configuration object from a bitmask to an object with clearer |
Ken Chen | 335c0d4 | 2021-10-23 11:35:26 +0800 | [diff] [blame] | 211 | // semantics, like a struct. |
Lorenzo Colitti | 60cbed3 | 2022-03-03 17:49:01 +0900 | [diff] [blame] | 212 | typedef uint32_t BpfConfig; |
Ken Chen | 335c0d4 | 2021-10-23 11:35:26 +0800 | [diff] [blame] | 213 | static const BpfConfig DEFAULT_CONFIG = 0; |
| 214 | |
| 215 | typedef struct { |
| 216 | // Allowed interface index. Only applicable if IIF_MATCH is set in the rule bitmask above. |
| 217 | uint32_t iif; |
| 218 | // A bitmask of enum values in UidOwnerMatchType. |
| 219 | uint32_t rule; |
| 220 | } UidOwnerValue; |
| 221 | STRUCT_SIZE(UidOwnerValue, 2 * 4); // 8 |
| 222 | |
Maciej Żenczykowski | 6109d94 | 2023-08-29 18:39:28 +0000 | [diff] [blame] | 223 | typedef struct { |
| 224 | // The destination ip of the incoming packet. IPv4 uses IPv4-mapped IPv6 address format. |
| 225 | struct in6_addr daddr; |
| 226 | } IngressDiscardKey; |
| 227 | STRUCT_SIZE(IngressDiscardKey, 16); // 16 |
| 228 | |
| 229 | typedef struct { |
| 230 | // Allowed interface indexes. Use same value multiple times if you just want to match 1 value. |
| 231 | uint32_t iif[2]; |
| 232 | } IngressDiscardValue; |
| 233 | STRUCT_SIZE(IngressDiscardValue, 2 * 4); // 8 |
| 234 | |
Lorenzo Colitti | 60cbed3 | 2022-03-03 17:49:01 +0900 | [diff] [blame] | 235 | // Entry in the configuration map that stores which UID rules are enabled. |
Maciej Żenczykowski | b10e055 | 2022-06-16 14:49:27 -0700 | [diff] [blame] | 236 | #define UID_RULES_CONFIGURATION_KEY 0 |
Lorenzo Colitti | 60cbed3 | 2022-03-03 17:49:01 +0900 | [diff] [blame] | 237 | // Entry in the configuration map that stores which stats map is currently in use. |
Maciej Żenczykowski | b10e055 | 2022-06-16 14:49:27 -0700 | [diff] [blame] | 238 | #define CURRENT_STATS_MAP_CONFIGURATION_KEY 1 |
Ken Chen | 335c0d4 | 2021-10-23 11:35:26 +0800 | [diff] [blame] | 239 | |
Ken Chen | 335c0d4 | 2021-10-23 11:35:26 +0800 | [diff] [blame] | 240 | #undef STRUCT_SIZE |
Ken Chen | f7d23e1 | 2023-09-16 16:44:42 +0800 | [diff] [blame] | 241 | |
| 242 | // DROP_IF_SET is set of rules that DROP if rule is globally enabled, and per-uid bit is set |
| 243 | #define DROP_IF_SET (STANDBY_MATCH | OEM_DENY_1_MATCH | OEM_DENY_2_MATCH | OEM_DENY_3_MATCH) |
| 244 | // DROP_IF_UNSET is set of rules that should DROP if globally enabled, and per-uid bit is NOT set |
| 245 | #define DROP_IF_UNSET (DOZABLE_MATCH | POWERSAVE_MATCH | RESTRICTED_MATCH | LOW_POWER_STANDBY_MATCH) |
| 246 | |
| 247 | // Warning: funky bit-wise arithmetic: in parallel, for all DROP_IF_SET/UNSET rules |
| 248 | // check whether the rules are globally enabled, and if so whether the rules are |
| 249 | // set/unset for the specific uid. DROP if that is the case for ANY of the rules. |
| 250 | // We achieve this by masking out only the bits/rules we're interested in checking, |
| 251 | // and negating (via bit-wise xor) the bits/rules that should drop if unset. |
| 252 | static inline bool isBlockedByUidRules(BpfConfig enabledRules, uint32_t uidRules) { |
| 253 | return enabledRules & (DROP_IF_SET | DROP_IF_UNSET) & (uidRules ^ DROP_IF_UNSET); |
| 254 | } |
Ken Chen | 784696f | 2023-10-27 20:33:55 +0800 | [diff] [blame] | 255 | |
| 256 | static inline bool is_system_uid(uint32_t uid) { |
| 257 | // MIN_SYSTEM_UID is AID_ROOT == 0, so uint32_t is *always* >= 0 |
| 258 | // MAX_SYSTEM_UID is AID_NOBODY == 9999, while AID_APP_START == 10000 |
| 259 | return (uid < AID_APP_START); |
| 260 | } |