blob: fe2525584aeef4022814d869b703fe9d863bfa6d [file] [log] [blame]
Ken Chen1647f602021-10-05 21:55:22 +08001/**
2 * Copyright (c) 2022, The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Maciej Żenczykowskia81daca2024-05-20 14:33:17 +000017#define LOG_TAG "NetdUpdatable"
Ken Chen1647f602021-10-05 21:55:22 +080018
19#include "BpfHandler.h"
20
21#include <linux/bpf.h>
Nick Wille5076a022023-06-01 18:39:25 +000022#include <inttypes.h>
Ken Chen1647f602021-10-05 21:55:22 +080023
24#include <android-base/unique_fd.h>
Maciej Żenczykowski65075bb2023-06-01 23:09:14 +000025#include <android-modules-utils/sdk_level.h>
Ken Chen1647f602021-10-05 21:55:22 +080026#include <bpf/WaitForProgsLoaded.h>
27#include <log/log.h>
28#include <netdutils/UidConstants.h>
29#include <private/android_filesystem_config.h>
30
31#include "BpfSyscallWrappers.h"
32
33namespace android {
34namespace net {
35
36using base::unique_fd;
Maciej Żenczykowski15f97312024-06-13 14:11:28 -070037using base::WaitForProperty;
Ken Chen1647f602021-10-05 21:55:22 +080038using bpf::getSocketCookie;
39using bpf::retrieveProgram;
40using netdutils::Status;
41using netdutils::statusFromErrno;
42
43constexpr int PER_UID_STATS_ENTRIES_LIMIT = 500;
44// At most 90% of the stats map may be used by tagged traffic entries. This ensures
45// that 10% of the map is always available to count untagged traffic, one entry per UID.
46// Otherwise, apps would be able to avoid data usage accounting entirely by filling up the
47// map with tagged traffic entries.
48constexpr int TOTAL_UID_STATS_ENTRIES_LIMIT = STATS_MAP_SIZE * 0.9;
49
50static_assert(STATS_MAP_SIZE - TOTAL_UID_STATS_ENTRIES_LIMIT > 100,
51 "The limit for stats map is to high, stats data may be lost due to overflow");
52
53static Status attachProgramToCgroup(const char* programPath, const unique_fd& cgroupFd,
54 bpf_attach_type type) {
55 unique_fd cgroupProg(retrieveProgram(programPath));
Maciej Żenczykowski25824452023-06-14 10:08:31 +000056 if (!cgroupProg.ok()) {
Ken Chend6ea75a2023-11-28 23:29:47 +080057 return statusFromErrno(errno, fmt::format("Failed to get program from {}", programPath));
Ken Chen1647f602021-10-05 21:55:22 +080058 }
59 if (android::bpf::attachProgram(type, cgroupProg, cgroupFd)) {
Ken Chend6ea75a2023-11-28 23:29:47 +080060 return statusFromErrno(errno, fmt::format("Program {} attach failed", programPath));
Ken Chen1647f602021-10-05 21:55:22 +080061 }
62 return netdutils::status::ok;
63}
64
Maciej Żenczykowskic576c0d2022-08-07 22:18:15 +000065static Status checkProgramAccessible(const char* programPath) {
66 unique_fd prog(retrieveProgram(programPath));
Maciej Żenczykowski25824452023-06-14 10:08:31 +000067 if (!prog.ok()) {
Ken Chend6ea75a2023-11-28 23:29:47 +080068 return statusFromErrno(errno, fmt::format("Failed to get program from {}", programPath));
Maciej Żenczykowskic576c0d2022-08-07 22:18:15 +000069 }
70 return netdutils::status::ok;
71}
72
Ken Chen1647f602021-10-05 21:55:22 +080073static Status initPrograms(const char* cg2_path) {
Ken Chend6ea75a2023-11-28 23:29:47 +080074 if (!cg2_path) return Status("cg2_path is NULL");
Ken Chen5d146492023-11-27 17:05:43 +080075
Maciej Żenczykowskic2dd01c2023-09-01 21:02:36 +000076 // This code was mainlined in T, so this should be trivially satisfied.
Ken Chend6ea75a2023-11-28 23:29:47 +080077 if (!modules::sdklevel::IsAtLeastT()) return Status("S- platform is unsupported");
Maciej Żenczykowskic2dd01c2023-09-01 21:02:36 +000078
79 // S requires eBPF support which was only added in 4.9, so this should be satisfied.
Ken Chen5d146492023-11-27 17:05:43 +080080 if (!bpf::isAtLeastKernelVersion(4, 9, 0)) {
Ken Chend6ea75a2023-11-28 23:29:47 +080081 return Status("kernel version < 4.9.0 is unsupported");
Ken Chen5d146492023-11-27 17:05:43 +080082 }
Maciej Żenczykowskic2dd01c2023-09-01 21:02:36 +000083
84 // U bumps the kernel requirement up to 4.14
Ken Chen5d146492023-11-27 17:05:43 +080085 if (modules::sdklevel::IsAtLeastU() && !bpf::isAtLeastKernelVersion(4, 14, 0)) {
Ken Chend6ea75a2023-11-28 23:29:47 +080086 return Status("U+ platform with kernel version < 4.14.0 is unsupported");
Ken Chen5d146492023-11-27 17:05:43 +080087 }
Maciej Żenczykowskic2dd01c2023-09-01 21:02:36 +000088
Maciej Żenczykowskic2dd01c2023-09-01 21:02:36 +000089 // U mandates this mount point (though it should also be the case on T)
Ken Chen5d146492023-11-27 17:05:43 +080090 if (modules::sdklevel::IsAtLeastU() && !!strcmp(cg2_path, "/sys/fs/cgroup")) {
Ken Chend6ea75a2023-11-28 23:29:47 +080091 return Status("U+ platform with cg2_path != /sys/fs/cgroup is unsupported");
Ken Chen5d146492023-11-27 17:05:43 +080092 }
Maciej Żenczykowski65075bb2023-06-01 23:09:14 +000093
Ken Chen1647f602021-10-05 21:55:22 +080094 unique_fd cg_fd(open(cg2_path, O_DIRECTORY | O_RDONLY | O_CLOEXEC));
Maciej Żenczykowski25824452023-06-14 10:08:31 +000095 if (!cg_fd.ok()) {
96 const int err = errno;
97 ALOGE("Failed to open the cgroup directory: %s", strerror(err));
98 return statusFromErrno(err, "Open the cgroup directory failed");
Ken Chen1647f602021-10-05 21:55:22 +080099 }
Maciej Żenczykowskic576c0d2022-08-07 22:18:15 +0000100 RETURN_IF_NOT_OK(checkProgramAccessible(XT_BPF_ALLOWLIST_PROG_PATH));
101 RETURN_IF_NOT_OK(checkProgramAccessible(XT_BPF_DENYLIST_PROG_PATH));
102 RETURN_IF_NOT_OK(checkProgramAccessible(XT_BPF_EGRESS_PROG_PATH));
103 RETURN_IF_NOT_OK(checkProgramAccessible(XT_BPF_INGRESS_PROG_PATH));
Ken Chen1647f602021-10-05 21:55:22 +0800104 RETURN_IF_NOT_OK(attachProgramToCgroup(BPF_EGRESS_PROG_PATH, cg_fd, BPF_CGROUP_INET_EGRESS));
105 RETURN_IF_NOT_OK(attachProgramToCgroup(BPF_INGRESS_PROG_PATH, cg_fd, BPF_CGROUP_INET_INGRESS));
Lorenzo Colitti3505b582022-10-27 19:36:27 +0900106
107 // For the devices that support cgroup socket filter, the socket filter
108 // should be loaded successfully by bpfloader. So we attach the filter to
109 // cgroup if the program is pinned properly.
110 // TODO: delete the if statement once all devices should support cgroup
111 // socket filter (ie. the minimum kernel version required is 4.14).
Maciej Żenczykowski03093622023-02-22 22:09:25 +0000112 if (bpf::isAtLeastKernelVersion(4, 14, 0)) {
Maciej Żenczykowski22db5902024-05-10 06:44:08 -0700113 RETURN_IF_NOT_OK(attachProgramToCgroup(CGROUP_INET_CREATE_PROG_PATH,
114 cg_fd, BPF_CGROUP_INET_SOCK_CREATE));
115 }
116
117 if (modules::sdklevel::IsAtLeastV()) {
118 if (bpf::isAtLeastKernelVersion(5, 15, 0)) {
119 RETURN_IF_NOT_OK(attachProgramToCgroup(CGROUP_CONNECT4_PROG_PATH,
120 cg_fd, BPF_CGROUP_INET4_CONNECT));
121 RETURN_IF_NOT_OK(attachProgramToCgroup(CGROUP_CONNECT6_PROG_PATH,
122 cg_fd, BPF_CGROUP_INET6_CONNECT)) ;
123 RETURN_IF_NOT_OK(attachProgramToCgroup(CGROUP_UDP4_RECVMSG_PROG_PATH,
124 cg_fd, BPF_CGROUP_UDP4_RECVMSG));
125 RETURN_IF_NOT_OK(attachProgramToCgroup(CGROUP_UDP6_RECVMSG_PROG_PATH,
126 cg_fd, BPF_CGROUP_UDP6_RECVMSG));
127 RETURN_IF_NOT_OK(attachProgramToCgroup(CGROUP_UDP4_SENDMSG_PROG_PATH,
128 cg_fd, BPF_CGROUP_UDP4_SENDMSG));
129 RETURN_IF_NOT_OK(attachProgramToCgroup(CGROUP_UDP6_SENDMSG_PROG_PATH,
130 cg_fd, BPF_CGROUP_UDP6_SENDMSG));
131 RETURN_IF_NOT_OK(attachProgramToCgroup(CGROUP_GETSOCKOPT_PROG_PATH,
132 cg_fd, BPF_CGROUP_GETSOCKOPT));
133 RETURN_IF_NOT_OK(attachProgramToCgroup(CGROUP_SETSOCKOPT_PROG_PATH,
134 cg_fd, BPF_CGROUP_SETSOCKOPT));
135 RETURN_IF_NOT_OK(attachProgramToCgroup(CGROUP_INET_RELEASE_PROG_PATH,
136 cg_fd, BPF_CGROUP_INET_SOCK_RELEASE));
137 }
Lorenzo Colitti3505b582022-10-27 19:36:27 +0900138 }
Maciej Żenczykowski5b2611d2023-10-04 00:44:56 +0000139
Maciej Żenczykowski5b2611d2023-10-04 00:44:56 +0000140 if (bpf::isAtLeastKernelVersion(4, 19, 0)) {
Maciej Żenczykowski3cb494f2023-10-04 21:35:41 +0000141 RETURN_IF_NOT_OK(attachProgramToCgroup(
142 "/sys/fs/bpf/netd_readonly/prog_block_bind4_block_port",
143 cg_fd, BPF_CGROUP_INET4_BIND));
144 RETURN_IF_NOT_OK(attachProgramToCgroup(
145 "/sys/fs/bpf/netd_readonly/prog_block_bind6_block_port",
146 cg_fd, BPF_CGROUP_INET6_BIND));
147
148 // This should trivially pass, since we just attached up above,
149 // but BPF_PROG_QUERY is only implemented on 4.19+ kernels.
Maciej Żenczykowski5b2611d2023-10-04 00:44:56 +0000150 if (bpf::queryProgram(cg_fd, BPF_CGROUP_INET_EGRESS) <= 0) abort();
151 if (bpf::queryProgram(cg_fd, BPF_CGROUP_INET_INGRESS) <= 0) abort();
152 if (bpf::queryProgram(cg_fd, BPF_CGROUP_INET_SOCK_CREATE) <= 0) abort();
Maciej Żenczykowski3cb494f2023-10-04 21:35:41 +0000153 if (bpf::queryProgram(cg_fd, BPF_CGROUP_INET4_BIND) <= 0) abort();
154 if (bpf::queryProgram(cg_fd, BPF_CGROUP_INET6_BIND) <= 0) abort();
Maciej Żenczykowski5b2611d2023-10-04 00:44:56 +0000155 }
156
Maciej Żenczykowski22db5902024-05-10 06:44:08 -0700157 if (modules::sdklevel::IsAtLeastV()) {
158 if (bpf::isAtLeastKernelVersion(5, 15, 0)) {
159 if (bpf::queryProgram(cg_fd, BPF_CGROUP_INET4_CONNECT) <= 0) abort();
160 if (bpf::queryProgram(cg_fd, BPF_CGROUP_INET6_CONNECT) <= 0) abort();
161 if (bpf::queryProgram(cg_fd, BPF_CGROUP_UDP4_RECVMSG) <= 0) abort();
162 if (bpf::queryProgram(cg_fd, BPF_CGROUP_UDP6_RECVMSG) <= 0) abort();
163 if (bpf::queryProgram(cg_fd, BPF_CGROUP_UDP4_SENDMSG) <= 0) abort();
164 if (bpf::queryProgram(cg_fd, BPF_CGROUP_UDP6_SENDMSG) <= 0) abort();
165 if (bpf::queryProgram(cg_fd, BPF_CGROUP_GETSOCKOPT) <= 0) abort();
166 if (bpf::queryProgram(cg_fd, BPF_CGROUP_SETSOCKOPT) <= 0) abort();
167 if (bpf::queryProgram(cg_fd, BPF_CGROUP_INET_SOCK_RELEASE) <= 0) abort();
168 }
169 }
170
Ken Chen1647f602021-10-05 21:55:22 +0800171 return netdutils::status::ok;
172}
173
174BpfHandler::BpfHandler()
175 : mPerUidStatsEntriesLimit(PER_UID_STATS_ENTRIES_LIMIT),
176 mTotalUidStatsEntriesLimit(TOTAL_UID_STATS_ENTRIES_LIMIT) {}
177
178BpfHandler::BpfHandler(uint32_t perUidLimit, uint32_t totalLimit)
179 : mPerUidStatsEntriesLimit(perUidLimit), mTotalUidStatsEntriesLimit(totalLimit) {}
180
Maciej Żenczykowski15f97312024-06-13 14:11:28 -0700181static bool mainlineNetBpfLoadDone() {
182 return !access("/sys/fs/bpf/netd_shared/mainline_done", F_OK);
183}
184
Maciej Żenczykowski732a1412024-03-14 00:17:18 -0700185// copied with minor changes from waitForProgsLoaded()
186// p/m/C's staticlibs/native/bpf_headers/include/bpf/WaitForProgsLoaded.h
187static inline void waitForNetProgsLoaded() {
188 // infinite loop until success with 5/10/20/40/60/60/60... delay
189 for (int delay = 5;; delay *= 2) {
190 if (delay > 60) delay = 60;
Maciej Żenczykowski15f97312024-06-13 14:11:28 -0700191 if (WaitForProperty("init.svc.mdnsd_netbpfload", "stopped", std::chrono::seconds(delay))
192 && mainlineNetBpfLoadDone())
Maciej Żenczykowski732a1412024-03-14 00:17:18 -0700193 return;
Maciej Żenczykowski15f97312024-06-13 14:11:28 -0700194 ALOGW("Waited %ds for init.svc.mdnsd_netbpfload=stopped, still waiting...", delay);
Maciej Żenczykowski732a1412024-03-14 00:17:18 -0700195 }
196}
197
Ken Chen1647f602021-10-05 21:55:22 +0800198Status BpfHandler::init(const char* cg2_path) {
Maciej Żenczykowski15f97312024-06-13 14:11:28 -0700199 // Note: netd *can* be restarted, so this might get called a second time after boot is complete
200 // at which point we don't need to (and shouldn't) wait for (more importantly start) loading bpf
201
Maciej Żenczykowski23d2c1e2024-03-28 22:54:01 -0700202 if (base::GetProperty("bpf.progs_loaded", "") != "1") {
Maciej Żenczykowski15f97312024-06-13 14:11:28 -0700203 // AOSP platform netd & mainline don't need this (at least prior to U QPR3),
204 // but there could be platform provided (xt_)bpf programs that oem/vendor
205 // modified netd (which calls us during init) depends on...
206 ALOGI("Waiting for platform BPF programs");
Maciej Żenczykowski23d2c1e2024-03-28 22:54:01 -0700207 android::bpf::waitForProgsLoaded();
Maciej Żenczykowski732a1412024-03-14 00:17:18 -0700208 }
Maciej Żenczykowski15f97312024-06-13 14:11:28 -0700209
Maciej Żenczykowski231598b2024-06-14 04:39:03 -0700210 if (!mainlineNetBpfLoadDone()) {
211 const bool enforce_mainline = false; // TODO: flip to true
212
Maciej Żenczykowski72628992024-06-14 13:46:11 -0700213 // We're on < U QPR3 & it's the first time netd is starting up (unless crashlooping)
214 //
215 // On U QPR3+ netbpfload is guaranteed to run before the platform bpfloader,
216 // so waitForProgsLoaded() implies mainlineNetBpfLoadDone().
Maciej Żenczykowski15f97312024-06-13 14:11:28 -0700217 if (!base::SetProperty("ctl.start", "mdnsd_netbpfload")) {
218 ALOGE("Failed to set property ctl.start=mdnsd_netbpfload, see dmesg for reason.");
Maciej Żenczykowski231598b2024-06-14 04:39:03 -0700219 if (enforce_mainline) abort();
Maciej Żenczykowski15f97312024-06-13 14:11:28 -0700220 }
221
Maciej Żenczykowski231598b2024-06-14 04:39:03 -0700222 if (enforce_mainline) {
223 ALOGI("Waiting for Networking BPF programs");
224 waitForNetProgsLoaded();
225 ALOGI("Networking BPF programs are loaded");
226 } else {
227 ALOGI("Started mdnsd_netbpfload asynchronously.");
228 }
Maciej Żenczykowski15f97312024-06-13 14:11:28 -0700229 }
230
Ken Chen1647f602021-10-05 21:55:22 +0800231 ALOGI("BPF programs are loaded");
232
233 RETURN_IF_NOT_OK(initPrograms(cg2_path));
234 RETURN_IF_NOT_OK(initMaps());
235
236 return netdutils::status::ok;
237}
238
Maciej Żenczykowski52018c82024-06-04 16:05:16 +0000239static void mapLockTest(void) {
240 // The maps must be R/W, and as yet unopened (or more specifically not yet lock'ed).
241 const char * const m1 = BPF_NETD_PATH "map_netd_lock_array_test_map";
242 const char * const m2 = BPF_NETD_PATH "map_netd_lock_hash_test_map";
243
Maciej Żenczykowski7eb7d672024-06-14 13:55:09 -0700244 unique_fd fd0(bpf::mapRetrieveExclusiveRW(m1)); if (!fd0.ok()) abort(); // grabs exclusive lock
Maciej Żenczykowski52018c82024-06-04 16:05:16 +0000245
246 unique_fd fd1(bpf::mapRetrieveExclusiveRW(m2)); if (!fd1.ok()) abort(); // no conflict with fd0
247 unique_fd fd2(bpf::mapRetrieveExclusiveRW(m2)); if ( fd2.ok()) abort(); // busy due to fd1
248 unique_fd fd3(bpf::mapRetrieveRO(m2)); if (!fd3.ok()) abort(); // no lock taken
249 unique_fd fd4(bpf::mapRetrieveRW(m2)); if ( fd4.ok()) abort(); // busy due to fd1
250 fd1.reset(); // releases exclusive lock
251 unique_fd fd5(bpf::mapRetrieveRO(m2)); if (!fd5.ok()) abort(); // no lock taken
252 unique_fd fd6(bpf::mapRetrieveRW(m2)); if (!fd6.ok()) abort(); // now ok
253 unique_fd fd7(bpf::mapRetrieveRO(m2)); if (!fd7.ok()) abort(); // no lock taken
254 unique_fd fd8(bpf::mapRetrieveExclusiveRW(m2)); if ( fd8.ok()) abort(); // busy due to fd6
Maciej Żenczykowski7eb7d672024-06-14 13:55:09 -0700255
256 fd0.reset(); // releases exclusive lock
257 unique_fd fd9(bpf::mapRetrieveWO(m1)); if (!fd9.ok()) abort(); // grabs exclusive lock
Maciej Żenczykowski52018c82024-06-04 16:05:16 +0000258}
259
Ken Chen1647f602021-10-05 21:55:22 +0800260Status BpfHandler::initMaps() {
Maciej Żenczykowski52018c82024-06-04 16:05:16 +0000261 mapLockTest();
262
Ken Chen1647f602021-10-05 21:55:22 +0800263 RETURN_IF_NOT_OK(mStatsMapA.init(STATS_MAP_A_PATH));
264 RETURN_IF_NOT_OK(mStatsMapB.init(STATS_MAP_B_PATH));
265 RETURN_IF_NOT_OK(mConfigurationMap.init(CONFIGURATION_MAP_PATH));
Ken Chen1647f602021-10-05 21:55:22 +0800266 RETURN_IF_NOT_OK(mUidPermissionMap.init(UID_PERMISSION_MAP_PATH));
Maciej Żenczykowskib5868a02022-08-31 04:21:01 +0000267 // initialized last so mCookieTagMap.isValid() implies everything else is valid too
268 RETURN_IF_NOT_OK(mCookieTagMap.init(COOKIE_TAG_MAP_PATH));
Ken Chen322ffcb2022-05-23 22:27:40 +0800269 ALOGI("%s successfully", __func__);
Ken Chen1647f602021-10-05 21:55:22 +0800270
271 return netdutils::status::ok;
272}
273
274bool BpfHandler::hasUpdateDeviceStatsPermission(uid_t uid) {
275 // This implementation is the same logic as method ActivityManager#checkComponentPermission.
276 // It implies that the real uid can never be the same as PER_USER_RANGE.
277 uint32_t appId = uid % PER_USER_RANGE;
278 auto permission = mUidPermissionMap.readValue(appId);
279 if (permission.ok() && (permission.value() & BPF_PERMISSION_UPDATE_DEVICE_STATS)) {
280 return true;
281 }
282 return ((appId == AID_ROOT) || (appId == AID_SYSTEM) || (appId == AID_DNS));
283}
284
285int BpfHandler::tagSocket(int sockFd, uint32_t tag, uid_t chargeUid, uid_t realUid) {
Maciej Żenczykowski4938d402022-08-14 14:36:20 +0000286 if (!mCookieTagMap.isValid()) return -EPERM;
287
288 if (chargeUid != realUid && !hasUpdateDeviceStatsPermission(realUid)) return -EPERM;
Ken Chen1647f602021-10-05 21:55:22 +0800289
Hungming Chen436547e2022-02-18 17:52:11 +0800290 // Note that tagging the socket to AID_CLAT is only implemented in JNI ClatCoordinator.
291 // The process is not allowed to tag socket to AID_CLAT via tagSocket() which would cause
292 // process data usage accounting to be bypassed. Tagging AID_CLAT is used for avoiding counting
293 // CLAT traffic data usage twice. See packages/modules/Connectivity/service/jni/
294 // com_android_server_connectivity_ClatCoordinator.cpp
Maciej Żenczykowski4938d402022-08-14 14:36:20 +0000295 if (chargeUid == AID_CLAT) return -EPERM;
Hungming Chen436547e2022-02-18 17:52:11 +0800296
Hungming Chen478c0eb2022-03-04 21:16:59 +0800297 // The socket destroy listener only monitors on the group {INET_TCP, INET_UDP, INET6_TCP,
298 // INET6_UDP}. Tagging listener unsupported socket causes that the tag can't be removed from
299 // tag map automatically. Eventually, the tag map may run out of space because of dead tag
Hungming Chenbcc0f5b2022-03-07 14:13:49 +0800300 // entries. Note that although tagSocket() of net client has already denied the family which
301 // is neither AF_INET nor AF_INET6, the family validation is still added here just in case.
302 // See tagSocket in system/netd/client/NetdClient.cpp and
303 // TrafficController::makeSkDestroyListener in
Hungming Chen478c0eb2022-03-04 21:16:59 +0800304 // packages/modules/Connectivity/service/native/TrafficController.cpp
305 // TODO: remove this once the socket destroy listener can detect more types of socket destroy.
Hungming Chenbcc0f5b2022-03-07 14:13:49 +0800306 int socketFamily;
307 socklen_t familyLen = sizeof(socketFamily);
308 if (getsockopt(sockFd, SOL_SOCKET, SO_DOMAIN, &socketFamily, &familyLen)) {
309 ALOGE("Failed to getsockopt SO_DOMAIN: %s, fd: %d", strerror(errno), sockFd);
Hungming Chen478c0eb2022-03-04 21:16:59 +0800310 return -errno;
Hungming Chenbcc0f5b2022-03-07 14:13:49 +0800311 }
312 if (socketFamily != AF_INET && socketFamily != AF_INET6) {
313 ALOGE("Unsupported family: %d", socketFamily);
314 return -EAFNOSUPPORT;
315 }
316
317 int socketProto;
318 socklen_t protoLen = sizeof(socketProto);
319 if (getsockopt(sockFd, SOL_SOCKET, SO_PROTOCOL, &socketProto, &protoLen)) {
320 ALOGE("Failed to getsockopt SO_PROTOCOL: %s, fd: %d", strerror(errno), sockFd);
321 return -errno;
322 }
323 if (socketProto != IPPROTO_UDP && socketProto != IPPROTO_TCP) {
324 ALOGE("Unsupported protocol: %d", socketProto);
325 return -EPROTONOSUPPORT;
Hungming Chen478c0eb2022-03-04 21:16:59 +0800326 }
327
Ken Chen1647f602021-10-05 21:55:22 +0800328 uint64_t sock_cookie = getSocketCookie(sockFd);
Maciej Żenczykowskid3fe1542023-02-23 03:56:45 +0000329 if (!sock_cookie) return -errno;
Maciej Żenczykowski4938d402022-08-14 14:36:20 +0000330
Ken Chen1647f602021-10-05 21:55:22 +0800331 UidTagValue newKey = {.uid = (uint32_t)chargeUid, .tag = tag};
332
333 uint32_t totalEntryCount = 0;
334 uint32_t perUidEntryCount = 0;
335 // Now we go through the stats map and count how many entries are associated
336 // with chargeUid. If the uid entry hit the limit for each chargeUid, we block
Maciej Żenczykowskib5868a02022-08-31 04:21:01 +0000337 // the request to prevent the map from overflow. Note though that it isn't really
338 // safe here to iterate over the map since it might be modified by the system server,
339 // which might toggle the live stats map and clean it.
Ken Chen1647f602021-10-05 21:55:22 +0800340 const auto countUidStatsEntries = [chargeUid, &totalEntryCount, &perUidEntryCount](
341 const StatsKey& key,
Maciej Żenczykowski7e2f53e2023-09-28 01:08:28 +0000342 const BpfMapRO<StatsKey, StatsValue>&) {
Ken Chen1647f602021-10-05 21:55:22 +0800343 if (key.uid == chargeUid) {
344 perUidEntryCount++;
345 }
346 totalEntryCount++;
347 return base::Result<void>();
348 };
349 auto configuration = mConfigurationMap.readValue(CURRENT_STATS_MAP_CONFIGURATION_KEY);
350 if (!configuration.ok()) {
Maciej Żenczykowski85d4a5e2023-08-20 16:41:50 +0000351 ALOGE("Failed to get current configuration: %s",
352 strerror(configuration.error().code()));
Ken Chen1647f602021-10-05 21:55:22 +0800353 return -configuration.error().code();
354 }
355 if (configuration.value() != SELECT_MAP_A && configuration.value() != SELECT_MAP_B) {
356 ALOGE("unknown configuration value: %d", configuration.value());
357 return -EINVAL;
358 }
359
Maciej Żenczykowski7e2f53e2023-09-28 01:08:28 +0000360 BpfMapRO<StatsKey, StatsValue>& currentMap =
Ken Chen1647f602021-10-05 21:55:22 +0800361 (configuration.value() == SELECT_MAP_A) ? mStatsMapA : mStatsMapB;
362 base::Result<void> res = currentMap.iterate(countUidStatsEntries);
363 if (!res.ok()) {
Maciej Żenczykowski85d4a5e2023-08-20 16:41:50 +0000364 ALOGE("Failed to count the stats entry in map: %s",
Ken Chen1647f602021-10-05 21:55:22 +0800365 strerror(res.error().code()));
366 return -res.error().code();
367 }
368
369 if (totalEntryCount > mTotalUidStatsEntriesLimit ||
370 perUidEntryCount > mPerUidStatsEntriesLimit) {
371 ALOGE("Too many stats entries in the map, total count: %u, chargeUid(%u) count: %u,"
372 " blocking tag request to prevent map overflow",
373 totalEntryCount, chargeUid, perUidEntryCount);
374 return -EMFILE;
375 }
376 // Update the tag information of a socket to the cookieUidMap. Use BPF_ANY
377 // flag so it will insert a new entry to the map if that value doesn't exist
Maciej Żenczykowskib5868a02022-08-31 04:21:01 +0000378 // yet and update the tag if there is already a tag stored. Since the eBPF
Ken Chen1647f602021-10-05 21:55:22 +0800379 // program in kernel only read this map, and is protected by rcu read lock. It
Maciej Żenczykowskib5868a02022-08-31 04:21:01 +0000380 // should be fine to concurrently update the map while eBPF program is running.
Ken Chen1647f602021-10-05 21:55:22 +0800381 res = mCookieTagMap.writeValue(sock_cookie, newKey, BPF_ANY);
382 if (!res.ok()) {
Maciej Żenczykowski85d4a5e2023-08-20 16:41:50 +0000383 ALOGE("Failed to tag the socket: %s", strerror(res.error().code()));
Ken Chen1647f602021-10-05 21:55:22 +0800384 return -res.error().code();
385 }
Nick Wille5076a022023-06-01 18:39:25 +0000386 ALOGD("Socket with cookie %" PRIu64 " tagged successfully with tag %" PRIu32 " uid %u "
387 "and real uid %u", sock_cookie, tag, chargeUid, realUid);
Ken Chen1647f602021-10-05 21:55:22 +0800388 return 0;
389}
390
391int BpfHandler::untagSocket(int sockFd) {
Maciej Żenczykowski4938d402022-08-14 14:36:20 +0000392 uint64_t sock_cookie = getSocketCookie(sockFd);
Maciej Żenczykowskid3fe1542023-02-23 03:56:45 +0000393 if (!sock_cookie) return -errno;
Maciej Żenczykowski4938d402022-08-14 14:36:20 +0000394
395 if (!mCookieTagMap.isValid()) return -EPERM;
Ken Chen1647f602021-10-05 21:55:22 +0800396 base::Result<void> res = mCookieTagMap.deleteValue(sock_cookie);
397 if (!res.ok()) {
Maciej Żenczykowskie0f58462022-05-17 13:59:22 -0700398 ALOGE("Failed to untag socket: %s", strerror(res.error().code()));
Ken Chen1647f602021-10-05 21:55:22 +0800399 return -res.error().code();
400 }
Nick Wille5076a022023-06-01 18:39:25 +0000401 ALOGD("Socket with cookie %" PRIu64 " untagged successfully.", sock_cookie);
Ken Chen1647f602021-10-05 21:55:22 +0800402 return 0;
403}
404
405} // namespace net
406} // namespace android