blob: dc131fc5060ba6f9c15deb061aba36bab617c3ea [file] [log] [blame]
Maciej Żenczykowski60c159f2023-10-02 14:54:48 -07001/*
Maciej Żenczykowski49140b92024-08-07 15:06:07 -07002 * Copyright (C) 2018-2024 The Android Open Source Project
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -07003 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Maciej Żenczykowski49140b92024-08-07 15:06:07 -070017#define LOG_TAG "NetBpfLoad"
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -070018
Elliott Hughescd7f3bf2025-05-22 16:37:33 -040019#include <algorithm>
Maciej Żenczykowski49140b92024-08-07 15:06:07 -070020#include <arpa/inet.h>
Motomu Utsumib3d3c2a2025-03-18 15:06:34 +090021#include <bpf/btf.h>
Motomu Utsumia7693582025-02-05 17:40:08 +090022#include <bpf/libbpf.h>
Maciej Żenczykowski49140b92024-08-07 15:06:07 -070023#include <dirent.h>
24#include <elf.h>
25#include <errno.h>
26#include <error.h>
27#include <fcntl.h>
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -070028#include <fstream>
Maciej Żenczykowski49140b92024-08-07 15:06:07 -070029#include <inttypes.h>
30#include <iostream>
Maciej Żenczykowski49140b92024-08-07 15:06:07 -070031#include <linux/unistd.h>
32#include <log/log.h>
33#include <net/if.h>
34#include <optional>
35#include <stdint.h>
36#include <stdio.h>
37#include <stdlib.h>
38#include <string.h>
39#include <string>
40#include <sys/mman.h>
41#include <sys/socket.h>
42#include <sys/stat.h>
43#include <sys/types.h>
44#include <sys/utsname.h>
45#include <sys/wait.h>
46#include <sysexits.h>
47#include <unistd.h>
48#include <unordered_map>
49#include <vector>
50
51#include <android-base/cmsg.h>
52#include <android-base/file.h>
53#include <android-base/logging.h>
54#include <android-base/macros.h>
55#include <android-base/properties.h>
Motomu Utsumi1a5cc5b2025-03-18 15:25:13 +090056#include <android-base/scopeguard.h>
Maciej Żenczykowski49140b92024-08-07 15:06:07 -070057#include <android-base/stringprintf.h>
58#include <android-base/strings.h>
59#include <android-base/unique_fd.h>
60#include <android/api-level.h>
61
Maciej Żenczykowskif7eb2bf2025-06-10 01:56:48 -070062#define BPF_SUPPORT_CMD_FIXUP
Maciej Żenczykowski49140b92024-08-07 15:06:07 -070063#include "BpfSyscallWrappers.h"
64#include "bpf/BpfUtils.h"
Maciej Żenczykowskid6028352024-08-19 15:20:04 -070065#include "bpf_map_def.h"
Maciej Żenczykowski49140b92024-08-07 15:06:07 -070066
Maciej Żenczykowski2d52f8c2024-09-25 22:14:04 +000067// The following matches bpf_helpers.h, which is only for inclusion in bpf code
Maciej Żenczykowski8c097782025-03-04 13:11:56 -080068#define BPFLOADER_MAINLINE_S_VERSION 42u
Maciej Żenczykowski199fd352025-02-13 15:17:08 -080069#define BPFLOADER_MAINLINE_25Q2_VERSION 47u
Maciej Żenczykowski2d52f8c2024-09-25 22:14:04 +000070
Motomu Utsumi52a3ba72025-07-25 10:41:53 +090071using android::base::borrowed_fd;
Maciej Żenczykowski49140b92024-08-07 15:06:07 -070072using android::base::EndsWith;
Maciej Żenczykowski8a767282024-09-04 10:56:55 -070073using android::base::GetIntProperty;
74using android::base::GetProperty;
75using android::base::InitLogging;
76using android::base::KernelLogger;
77using android::base::SetProperty;
78using android::base::Split;
Maciej Żenczykowski49140b92024-08-07 15:06:07 -070079using android::base::StartsWith;
Maciej Żenczykowski8a767282024-09-04 10:56:55 -070080using android::base::Tokenize;
Maciej Żenczykowski49140b92024-08-07 15:06:07 -070081using android::base::unique_fd;
82using std::ifstream;
83using std::ios;
84using std::optional;
85using std::string;
86using std::vector;
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -070087
88namespace android {
89namespace bpf {
90
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -070091// Returns the build type string (from ro.build.type).
Maciej Żenczykowski49140b92024-08-07 15:06:07 -070092const std::string& getBuildType() {
Maciej Żenczykowski8a767282024-09-04 10:56:55 -070093 static std::string t = GetProperty("ro.build.type", "unknown");
Maciej Żenczykowski49140b92024-08-07 15:06:07 -070094 return t;
95}
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -070096
97// The following functions classify the 3 Android build types.
98inline bool isEng() {
99 return getBuildType() == "eng";
100}
Maciej Żenczykowski49140b92024-08-07 15:06:07 -0700101
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -0700102inline bool isUser() {
103 return getBuildType() == "user";
104}
Maciej Żenczykowski49140b92024-08-07 15:06:07 -0700105
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -0700106inline bool isUserdebug() {
107 return getBuildType() == "userdebug";
108}
109
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -0700110#define BPF_FS_PATH "/sys/fs/bpf/"
111
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -0700112static unsigned int page_size = static_cast<unsigned int>(getpagesize());
113
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -0700114typedef struct {
115 const char* name;
116 enum bpf_prog_type type;
Maciej Żenczykowski346831c2024-08-12 17:49:10 +0000117 enum bpf_attach_type attach_type;
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -0700118} sectionType;
119
120/*
121 * Map section name prefixes to program types, the section name will be:
122 * SECTION(<prefix>/<name-of-program>)
123 * For example:
124 * SECTION("tracepoint/sched_switch_func") where sched_switch_funcs
125 * is the name of the program, and tracepoint is the type.
126 *
127 * However, be aware that you should not be directly using the SECTION() macro.
Maciej Żenczykowski3a085152024-09-18 23:45:52 +0000128 * Instead use the DEFINE_(BPF|XDP)_(PROG|MAP)... & LICENSE macros.
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -0700129 *
130 * Programs shipped inside the tethering apex should be limited to networking stuff,
131 * as KPROBE, PERF_EVENT, TRACEPOINT are dangerous to use from mainline updatable code,
132 * since they are less stable abi/api and may conflict with platform uses of bpf.
133 */
134sectionType sectionNameTypes[] = {
135 {"bind4/", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_BIND},
136 {"bind6/", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_BIND},
Maciej Żenczykowski346831c2024-08-12 17:49:10 +0000137 {"cgroupskb/", BPF_PROG_TYPE_CGROUP_SKB},
138 {"cgroupsock/", BPF_PROG_TYPE_CGROUP_SOCK},
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -0700139 {"cgroupsockcreate/", BPF_PROG_TYPE_CGROUP_SOCK, BPF_CGROUP_INET_SOCK_CREATE},
140 {"cgroupsockrelease/", BPF_PROG_TYPE_CGROUP_SOCK, BPF_CGROUP_INET_SOCK_RELEASE},
141 {"connect4/", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_CONNECT},
142 {"connect6/", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_CONNECT},
143 {"egress/", BPF_PROG_TYPE_CGROUP_SKB, BPF_CGROUP_INET_EGRESS},
144 {"getsockopt/", BPF_PROG_TYPE_CGROUP_SOCKOPT, BPF_CGROUP_GETSOCKOPT},
145 {"ingress/", BPF_PROG_TYPE_CGROUP_SKB, BPF_CGROUP_INET_INGRESS},
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -0700146 {"postbind4/", BPF_PROG_TYPE_CGROUP_SOCK, BPF_CGROUP_INET4_POST_BIND},
147 {"postbind6/", BPF_PROG_TYPE_CGROUP_SOCK, BPF_CGROUP_INET6_POST_BIND},
148 {"recvmsg4/", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_UDP4_RECVMSG},
149 {"recvmsg6/", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_UDP6_RECVMSG},
Maciej Żenczykowski346831c2024-08-12 17:49:10 +0000150 {"schedact/", BPF_PROG_TYPE_SCHED_ACT},
151 {"schedcls/", BPF_PROG_TYPE_SCHED_CLS},
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -0700152 {"sendmsg4/", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_UDP4_SENDMSG},
153 {"sendmsg6/", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_UDP6_SENDMSG},
154 {"setsockopt/", BPF_PROG_TYPE_CGROUP_SOCKOPT, BPF_CGROUP_SETSOCKOPT},
Maciej Żenczykowski346831c2024-08-12 17:49:10 +0000155 {"skfilter/", BPF_PROG_TYPE_SOCKET_FILTER},
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -0700156 {"sockops/", BPF_PROG_TYPE_SOCK_OPS, BPF_CGROUP_SOCK_OPS},
157 {"sysctl", BPF_PROG_TYPE_CGROUP_SYSCTL, BPF_CGROUP_SYSCTL},
Maciej Żenczykowski346831c2024-08-12 17:49:10 +0000158 {"xdp/", BPF_PROG_TYPE_XDP},
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -0700159};
160
161typedef struct {
162 enum bpf_prog_type type;
Maciej Żenczykowski346831c2024-08-12 17:49:10 +0000163 enum bpf_attach_type attach_type;
Motomu Utsumi8645b6e2025-07-23 12:04:50 +0900164 string name; // The canonicalized section name.
165 string program_name;
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -0700166 vector<char> data;
167 vector<char> rel_data;
168 optional<struct bpf_prog_def> prog_def;
169
Maciej Żenczykowskib4bade92024-08-14 23:06:54 +0000170 unique_fd prog_fd; // fd after loading
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -0700171} codeSection;
172
173static int readElfHeader(ifstream& elfFile, Elf64_Ehdr* eh) {
174 elfFile.seekg(0);
175 if (elfFile.fail()) return -1;
176
177 if (!elfFile.read((char*)eh, sizeof(*eh))) return -1;
178
179 return 0;
180}
181
Maciej Żenczykowskib4bade92024-08-14 23:06:54 +0000182// Reads all section header tables into an Shdr array
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -0700183static int readSectionHeadersAll(ifstream& elfFile, vector<Elf64_Shdr>& shTable) {
184 Elf64_Ehdr eh;
185 int ret = 0;
186
187 ret = readElfHeader(elfFile, &eh);
188 if (ret) return ret;
189
190 elfFile.seekg(eh.e_shoff);
191 if (elfFile.fail()) return -1;
192
Maciej Żenczykowskib4bade92024-08-14 23:06:54 +0000193 // Read shdr table entries
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -0700194 shTable.resize(eh.e_shnum);
195
196 if (!elfFile.read((char*)shTable.data(), (eh.e_shnum * eh.e_shentsize))) return -ENOMEM;
197
198 return 0;
199}
200
Maciej Żenczykowskib4bade92024-08-14 23:06:54 +0000201// Read a section by its index - for ex to get sec hdr strtab blob
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -0700202static int readSectionByIdx(ifstream& elfFile, int id, vector<char>& sec) {
203 vector<Elf64_Shdr> shTable;
204 int ret = readSectionHeadersAll(elfFile, shTable);
205 if (ret) return ret;
206
207 elfFile.seekg(shTable[id].sh_offset);
208 if (elfFile.fail()) return -1;
209
210 sec.resize(shTable[id].sh_size);
211 if (!elfFile.read(sec.data(), shTable[id].sh_size)) return -1;
212
213 return 0;
214}
215
Maciej Żenczykowskib4bade92024-08-14 23:06:54 +0000216// Read whole section header string table
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -0700217static int readSectionHeaderStrtab(ifstream& elfFile, vector<char>& strtab) {
218 Elf64_Ehdr eh;
219 int ret = readElfHeader(elfFile, &eh);
220 if (ret) return ret;
221
222 ret = readSectionByIdx(elfFile, eh.e_shstrndx, strtab);
223 if (ret) return ret;
224
225 return 0;
226}
227
Maciej Żenczykowskib4bade92024-08-14 23:06:54 +0000228// Get name from offset in strtab
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -0700229static int getSymName(ifstream& elfFile, int nameOff, string& name) {
230 int ret;
231 vector<char> secStrTab;
232
233 ret = readSectionHeaderStrtab(elfFile, secStrTab);
234 if (ret) return ret;
235
236 if (nameOff >= (int)secStrTab.size()) return -1;
237
238 name = string((char*)secStrTab.data() + nameOff);
239 return 0;
240}
241
Maciej Żenczykowskib4bade92024-08-14 23:06:54 +0000242// Reads a full section by name - example to get the GPL license
Motomu Utsumi99a7b732025-07-17 10:59:49 +0900243template <typename T>
244static int readSectionByName(const char* name, ifstream& elfFile, vector<T>& data) {
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -0700245 vector<char> secStrTab;
246 vector<Elf64_Shdr> shTable;
247 int ret;
248
249 ret = readSectionHeadersAll(elfFile, shTable);
250 if (ret) return ret;
251
252 ret = readSectionHeaderStrtab(elfFile, secStrTab);
253 if (ret) return ret;
254
255 for (int i = 0; i < (int)shTable.size(); i++) {
256 char* secname = secStrTab.data() + shTable[i].sh_name;
257 if (!secname) continue;
258
259 if (!strcmp(secname, name)) {
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -0700260 elfFile.seekg(shTable[i].sh_offset);
261 if (elfFile.fail()) return -1;
262
Motomu Utsumi99a7b732025-07-17 10:59:49 +0900263 if (shTable[i].sh_size % sizeof(T)) return -1;
264 data.resize(shTable[i].sh_size / sizeof(T));
265 if (!elfFile.read(reinterpret_cast<char*>(data.data()), shTable[i].sh_size))
266 return -1;
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -0700267
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -0700268 return 0;
269 }
270 }
271 return -2;
272}
273
Maciej Żenczykowski213c9222024-08-15 15:52:43 -0700274unsigned int readSectionUint(const char* name, ifstream& elfFile) {
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -0700275 vector<char> theBytes;
276 int ret = readSectionByName(name, elfFile, theBytes);
277 if (ret) {
Maciej Żenczykowski213c9222024-08-15 15:52:43 -0700278 ALOGE("Couldn't find section %s.", name);
279 abort();
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -0700280 } else if (theBytes.size() < sizeof(unsigned int)) {
Maciej Żenczykowski213c9222024-08-15 15:52:43 -0700281 ALOGE("Section %s is too short.", name);
282 abort();
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -0700283 } else {
284 // decode first 4 bytes as LE32 uint, there will likely be more bytes due to alignment.
285 unsigned int value = static_cast<unsigned char>(theBytes[3]);
286 value <<= 8;
287 value += static_cast<unsigned char>(theBytes[2]);
288 value <<= 8;
289 value += static_cast<unsigned char>(theBytes[1]);
290 value <<= 8;
291 value += static_cast<unsigned char>(theBytes[0]);
Maciej Żenczykowskidbdd90f2024-08-22 23:42:58 +0000292 ALOGD("Section %s value is %u [0x%x]", name, value, value);
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -0700293 return value;
294 }
295}
296
297static int readSectionByType(ifstream& elfFile, int type, vector<char>& data) {
298 int ret;
299 vector<Elf64_Shdr> shTable;
300
301 ret = readSectionHeadersAll(elfFile, shTable);
302 if (ret) return ret;
303
304 for (int i = 0; i < (int)shTable.size(); i++) {
305 if ((int)shTable[i].sh_type != type) continue;
306
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -0700307 elfFile.seekg(shTable[i].sh_offset);
308 if (elfFile.fail()) return -1;
309
Motomu Utsumi1ae10a02025-07-17 10:45:19 +0900310 data.resize(shTable[i].sh_size);
311 if (!elfFile.read(data.data(), shTable[i].sh_size)) return -1;
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -0700312
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -0700313 return 0;
314 }
315 return -2;
316}
317
318static bool symCompare(Elf64_Sym a, Elf64_Sym b) {
319 return (a.st_value < b.st_value);
320}
321
322static int readSymTab(ifstream& elfFile, int sort, vector<Elf64_Sym>& data) {
323 int ret, numElems;
324 Elf64_Sym* buf;
325 vector<char> secData;
326
327 ret = readSectionByType(elfFile, SHT_SYMTAB, secData);
328 if (ret) return ret;
329
330 buf = (Elf64_Sym*)secData.data();
331 numElems = (secData.size() / sizeof(Elf64_Sym));
332 data.assign(buf, buf + numElems);
333
334 if (sort) std::sort(data.begin(), data.end(), symCompare);
335 return 0;
336}
337
338static enum bpf_prog_type getSectionType(string& name) {
339 for (auto& snt : sectionNameTypes)
340 if (StartsWith(name, snt.name)) return snt.type;
341
342 return BPF_PROG_TYPE_UNSPEC;
343}
344
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -0700345static int getSectionSymNames(ifstream& elfFile, const string& sectionName, vector<string>& names,
346 optional<unsigned> symbolType = std::nullopt) {
347 int ret;
348 string name;
349 vector<Elf64_Sym> symtab;
350 vector<Elf64_Shdr> shTable;
351
352 ret = readSymTab(elfFile, 1 /* sort */, symtab);
353 if (ret) return ret;
354
Maciej Żenczykowskib4bade92024-08-14 23:06:54 +0000355 // Get index of section
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -0700356 ret = readSectionHeadersAll(elfFile, shTable);
357 if (ret) return ret;
358
359 int sec_idx = -1;
360 for (int i = 0; i < (int)shTable.size(); i++) {
361 ret = getSymName(elfFile, shTable[i].sh_name, name);
362 if (ret) return ret;
363
364 if (!name.compare(sectionName)) {
365 sec_idx = i;
366 break;
367 }
368 }
369
Maciej Żenczykowskib4bade92024-08-14 23:06:54 +0000370 // No section found with matching name
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -0700371 if (sec_idx == -1) {
372 ALOGW("No %s section could be found in elf object", sectionName.c_str());
373 return -1;
374 }
375
376 for (int i = 0; i < (int)symtab.size(); i++) {
377 if (symbolType.has_value() && ELF_ST_TYPE(symtab[i].st_info) != symbolType) continue;
378
379 if (symtab[i].st_shndx == sec_idx) {
380 string s;
381 ret = getSymName(elfFile, symtab[i].st_name, s);
382 if (ret) return ret;
383 names.push_back(s);
384 }
385 }
386
387 return 0;
388}
389
Maciej Żenczykowskib4bade92024-08-14 23:06:54 +0000390// Read a section by its index - for ex to get sec hdr strtab blob
Maciej Żenczykowskie666d852024-08-15 15:03:38 -0700391static int readCodeSections(ifstream& elfFile, vector<codeSection>& cs) {
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -0700392 vector<Elf64_Shdr> shTable;
393 int entries, ret = 0;
394
395 ret = readSectionHeadersAll(elfFile, shTable);
396 if (ret) return ret;
397 entries = shTable.size();
398
399 vector<struct bpf_prog_def> pd;
Maciej Żenczykowskiac40f522025-07-30 09:29:51 -0700400 ret = readSectionByName("progs", elfFile, pd);
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -0700401 if (ret) return ret;
402 vector<string> progDefNames;
403 ret = getSectionSymNames(elfFile, "progs", progDefNames);
404 if (!pd.empty() && ret) return ret;
405
406 for (int i = 0; i < entries; i++) {
407 string name;
408 codeSection cs_temp;
409 cs_temp.type = BPF_PROG_TYPE_UNSPEC;
410
411 ret = getSymName(elfFile, shTable[i].sh_name, name);
412 if (ret) return ret;
413
414 enum bpf_prog_type ptype = getSectionType(name);
415
416 if (ptype == BPF_PROG_TYPE_UNSPEC) continue;
417
418 // This must be done before '/' is replaced with '_'.
Maciej Żenczykowski346831c2024-08-12 17:49:10 +0000419 for (auto& snt : sectionNameTypes)
420 if (StartsWith(name, snt.name)) cs_temp.attach_type = snt.attach_type;
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -0700421
422 string oldName = name;
423
424 // convert all slashes to underscores
425 std::replace(name.begin(), name.end(), '/', '_');
426
427 cs_temp.type = ptype;
428 cs_temp.name = name;
429
430 ret = readSectionByIdx(elfFile, i, cs_temp.data);
431 if (ret) return ret;
432 ALOGV("Loaded code section %d (%s)", i, name.c_str());
433
434 vector<string> csSymNames;
435 ret = getSectionSymNames(elfFile, oldName, csSymNames, STT_FUNC);
436 if (ret || !csSymNames.size()) return ret;
Motomu Utsumi8645b6e2025-07-23 12:04:50 +0900437 cs_temp.program_name = csSymNames[0];
Motomu Utsumi62b1c882025-03-21 15:13:00 +0900438 for (size_t j = 0; j < progDefNames.size(); ++j) {
439 if (!progDefNames[j].compare(csSymNames[0] + "_def")) {
440 cs_temp.prog_def = pd[j];
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -0700441 break;
442 }
443 }
444
Maciej Żenczykowskib4bade92024-08-14 23:06:54 +0000445 // Check for rel section
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -0700446 if (cs_temp.data.size() > 0 && i < entries) {
447 ret = getSymName(elfFile, shTable[i + 1].sh_name, name);
448 if (ret) return ret;
449
450 if (name == (".rel" + oldName)) {
451 ret = readSectionByIdx(elfFile, i + 1, cs_temp.rel_data);
452 if (ret) return ret;
453 ALOGV("Loaded relo section %d (%s)", i, name.c_str());
454 }
455 }
456
457 if (cs_temp.data.size() > 0) {
458 cs.push_back(std::move(cs_temp));
459 ALOGV("Adding section %d to cs list", i);
460 }
461 }
462 return 0;
463}
464
465static int getSymNameByIdx(ifstream& elfFile, int index, string& name) {
466 vector<Elf64_Sym> symtab;
467 int ret = 0;
468
469 ret = readSymTab(elfFile, 0 /* !sort */, symtab);
470 if (ret) return ret;
471
472 if (index >= (int)symtab.size()) return -1;
473
474 return getSymName(elfFile, symtab[index].st_name, name);
475}
476
477static bool mapMatchesExpectations(const unique_fd& fd, const string& mapName,
478 const struct bpf_map_def& mapDef, const enum bpf_map_type type) {
479 // bpfGetFd... family of functions require at minimum a 4.14 kernel,
480 // so on 4.9-T kernels just pretend the map matches our expectations.
481 // Additionally we'll get almost equivalent test coverage on newer devices/kernels.
482 // This is because the primary failure mode we're trying to detect here
483 // is either a source code misconfiguration (which is likely kernel independent)
484 // or a newly introduced kernel feature/bug (which is unlikely to get backported to 4.9).
485 if (!isAtLeastKernelVersion(4, 14, 0)) return true;
486
Maciej Żenczykowskib3dba542025-07-30 18:12:35 -0700487 if (strcmp(mapName.c_str(), mapDef.name())) abort();
488
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -0700489 // Assuming fd is a valid Bpf Map file descriptor then
490 // all the following should always succeed on a 4.14+ kernel.
491 // If they somehow do fail, they'll return -1 (and set errno),
492 // which should then cause (among others) a key_size mismatch.
493 int fd_type = bpfGetFdMapType(fd);
494 int fd_key_size = bpfGetFdKeySize(fd);
495 int fd_value_size = bpfGetFdValueSize(fd);
496 int fd_max_entries = bpfGetFdMaxEntries(fd);
497 int fd_map_flags = bpfGetFdMapFlags(fd);
498
499 // DEVMAPs are readonly from the bpf program side's point of view, as such
500 // the kernel in kernel/bpf/devmap.c dev_map_init_map() will set the flag
501 int desired_map_flags = (int)mapDef.map_flags;
502 if (type == BPF_MAP_TYPE_DEVMAP || type == BPF_MAP_TYPE_DEVMAP_HASH)
503 desired_map_flags |= BPF_F_RDONLY_PROG;
504
505 // The .h file enforces that this is a power of two, and page size will
506 // also always be a power of two, so this logic is actually enough to
507 // force it to be a multiple of the page size, as required by the kernel.
508 unsigned int desired_max_entries = mapDef.max_entries;
509 if (type == BPF_MAP_TYPE_RINGBUF) {
510 if (desired_max_entries < page_size) desired_max_entries = page_size;
511 }
512
513 // The following checks should *never* trigger, if one of them somehow does,
514 // it probably means a bpf .o file has been changed/replaced at runtime
515 // and bpfloader was manually rerun (normally it should only run *once*
516 // early during the boot process).
517 // Another possibility is that something is misconfigured in the code:
518 // most likely a shared map is declared twice differently.
519 // But such a change should never be checked into the source tree...
520 if ((fd_type == type) &&
521 (fd_key_size == (int)mapDef.key_size) &&
522 (fd_value_size == (int)mapDef.value_size) &&
523 (fd_max_entries == (int)desired_max_entries) &&
524 (fd_map_flags == desired_map_flags)) {
525 return true;
526 }
527
Maciej Żenczykowski4b3937b2025-05-29 01:24:47 -0700528 ALOGE("bpf map name %s mismatch: desired/found (errno: %d): "
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -0700529 "type:%d/%d key:%u/%d value:%u/%d entries:%u/%d flags:%u/%d",
Maciej Żenczykowski4b3937b2025-05-29 01:24:47 -0700530 mapName.c_str(), errno, type, fd_type, mapDef.key_size, fd_key_size,
531 mapDef.value_size, fd_value_size, mapDef.max_entries, fd_max_entries,
532 desired_map_flags, fd_map_flags);
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -0700533 return false;
534}
535
Motomu Utsumib3d3c2a2025-03-18 15:06:34 +0900536static int setBtfDatasecSize(ifstream &elfFile, struct btf *btf,
537 struct btf_type *bt) {
538 const char *name = btf__name_by_offset(btf, bt->name_off);
539 if (!name) {
540 ALOGE("Couldn't resolve section name, errno: %d", errno);
541 return -errno;
542 }
543
544 vector<char> data;
545 int ret = readSectionByName(name, elfFile, data);
546 if (ret) {
547 ALOGE("Couldn't read section %s, ret: %d", name, ret);
548 return ret;
549 }
550 bt->size = data.size();
551 return 0;
552}
553
Motomu Utsumiefe33312025-03-18 15:08:15 +0900554static int getSymOffsetByName(ifstream &elfFile, const char *name, int *off) {
555 vector<Elf64_Sym> symtab;
556 int ret = readSymTab(elfFile, 1 /* sort */, symtab);
557 if (ret) return ret;
558 for (int i = 0; i < (int)symtab.size(); i++) {
559 string s;
560 ret = getSymName(elfFile, symtab[i].st_name, s);
561 if (ret) continue;
562 if (!strcmp(s.c_str(), name)) {
563 *off = symtab[i].st_value;
564 return 0;
565 }
566 }
567 return -1;
568}
569
570static int setBtfVarOffset(ifstream &elfFile, struct btf *btf,
571 struct btf_type *datasecBt) {
572 int i, vars = btf_vlen(datasecBt);
573 struct btf_var_secinfo *vsi;
574 const char *datasecName = btf__name_by_offset(btf, datasecBt->name_off);
575 if (!datasecName) {
576 ALOGE("Couldn't resolve section name, errno: %d", errno);
577 return -errno;
578 }
579
580 for (i = 0, vsi = btf_var_secinfos(datasecBt); i < vars; i++, vsi++) {
581 const struct btf_type *varBt = btf__type_by_id(btf, vsi->type);
582 if (!varBt || !btf_is_var(varBt)) {
583 ALOGE("Found non VAR kind btf_type, section: %s id: %d", datasecName,
584 vsi->type);
585 return -1;
586 }
587
588 const struct btf_var *var = btf_var(varBt);
589 if (var->linkage == BTF_VAR_STATIC) continue;
590
591 const char *varName = btf__name_by_offset(btf, varBt->name_off);
592 if (!varName) {
593 ALOGE("Failed to resolve var name, section: %s", datasecName);
594 return -1;
595 }
596
597 int off;
598 int ret = getSymOffsetByName(elfFile, varName, &off);
599 if (ret) {
600 ALOGE("No offset found in symbol table, section: %s, var: %s, ret: %d",
601 datasecName, varName, ret);
602 return ret;
603 }
604 vsi->offset = off;
605 }
606 return 0;
607}
608
Motomu Utsumi4a3b5372025-06-04 09:15:36 +0900609#define BTF_INFO_ENC(kind, kind_flag, vlen) \
610 ((!!(kind_flag) << 31) | ((kind) << 24) | ((vlen) & BTF_MAX_VLEN))
611#define BTF_INT_ENC(encoding, bits_offset, nr_bits) \
612 ((encoding) << 24 | (bits_offset) << 16 | (nr_bits))
613
614static int sanitizeBtf(struct btf *btf) {
615 for (unsigned int i = 1; i < btf__type_cnt(btf); ++i) {
616 struct btf_type *bt = (struct btf_type *)btf__type_by_id(btf, i);
617
618 // Replace BTF_KIND_VAR (5.2+) with BTF_KIND_INT (4.18+)
619 if (btf_is_var(bt)) {
620 bt->info = BTF_INFO_ENC(BTF_KIND_INT, 0, 0);
621 // using size = 1 is the safest choice, 4 will be too
622 // big and cause kernel BTF validation failure if
623 // original variable took less than 4 bytes
624 bt->size = 1;
625 *(int *)(bt + 1) = BTF_INT_ENC(0, 0, 8);
626 continue;
627 }
628
629 // Replace BTF_KIND_FUNC_PROTO (5.0+) with BTF_KIND_ENUM (4.18+)
630 if (btf_is_func_proto(bt)) {
631 int vlen = btf_vlen(bt);
632 bt->info = BTF_INFO_ENC(BTF_KIND_ENUM, 0, vlen);
633 bt->size = sizeof(__u32); // kernel enforced
634 continue;
635 }
636
637 // Replace BTF_KIND_FUNC (5.0+) with BTF_KIND_TYPEDEF (4.18+)
638 if (btf_is_func(bt)) {
639 bt->info = BTF_INFO_ENC(BTF_KIND_TYPEDEF, 0, 0);
640 continue;
641 }
642
643 // Replace BTF_KIND_DATASEC (5.2+) with BTF_KIND_STRUCT (4.18+)
644 if (btf_is_datasec(bt)) {
645 const struct btf_var_secinfo *v = btf_var_secinfos(bt);
646 struct btf_member *m = btf_members(bt);
647 char *name;
648
649 name = (char *)btf__name_by_offset(btf, bt->name_off);
650 while (*name) {
651 if (*name == '.' || *name == '?') *name = '_';
652 name++;
653 }
654
655 int vlen = btf_vlen(bt);
656 bt->info = BTF_INFO_ENC(BTF_KIND_STRUCT, 0, vlen);
657 for (int j = 0; j < vlen; j++, v++, m++) {
658 // order of field assignments is important
659 m->offset = v->offset * 8;
660 m->type = v->type;
661 // preserve variable name as member name
662 const struct btf_type *vt = btf__type_by_id(btf, v->type);
663 m->name_off = vt->name_off;
664 }
665 }
666 }
667 return 0;
668}
669
Motomu Utsumi21536e52025-03-18 15:09:27 +0900670static int loadBtf(ifstream &elfFile, struct btf *btf) {
671 int ret;
672 for (unsigned int i = 1; i < btf__type_cnt(btf); ++i) {
673 struct btf_type *bt = (struct btf_type *)btf__type_by_id(btf, i);
674 if (!btf_is_datasec(bt)) continue;
675 ret = setBtfDatasecSize(elfFile, btf, bt);
676 if (ret) return ret;
677 ret = setBtfVarOffset(elfFile, btf, bt);
678 if (ret) return ret;
679 }
680
Motomu Utsumi4a3b5372025-06-04 09:15:36 +0900681 if (!isAtLeastKernelVersion(5, 10, 0)) {
682 // Likely unnecessary on kernel 5.4 but untested.
683 sanitizeBtf(btf);
684 }
685
Motomu Utsumi21536e52025-03-18 15:09:27 +0900686 ret = btf__load_into_kernel(btf);
687 if (ret) {
688 if (errno != EINVAL) {
689 ALOGE("btf__load_into_kernel failed, errno: %d", errno);
690 return ret;
691 };
692 // For BTF_KIND_FUNC, newer kernels can read the BTF_INFO_VLEN bits of
693 // struct btf_type to distinguish static vs. global vs. extern
694 // functions, but older kernels enforce that only the BTF_INFO_KIND bits
695 // can be set. Retry with non-BTF_INFO_KIND bits zeroed out to handle
696 // this case.
697 for (unsigned int i = 1; i < btf__type_cnt(btf); ++i) {
698 struct btf_type *bt = (struct btf_type *)btf__type_by_id(btf, i);
699 if (btf_is_func(bt)) {
700 bt->info = (BTF_INFO_KIND(bt->info)) << 24;
701 }
702 }
703 ret = btf__load_into_kernel(btf);
704 if (ret) {
705 ALOGE("btf__load_into_kernel retry failed, errno: %d", errno);
706 return ret;
707 };
708 }
709 return 0;
710}
711
Motomu Utsumi59b20992025-03-18 15:10:19 +0900712int getKeyValueTids(const struct btf *btf, const char *mapName,
713 uint32_t expectedKeySize, uint32_t expectedValueSize,
714 uint32_t *keyTypeId, uint32_t *valueTypeId) {
715 const struct btf_type *kvBt;
716 const struct btf_member *key, *value;
717 const size_t max_name = 256;
718 char kvTypeName[max_name];
719 int64_t keySize, valueSize;
Motomu Utsumia6ffae22025-03-21 11:00:10 +0900720 int32_t kvId;
Motomu Utsumi59b20992025-03-18 15:10:19 +0900721
722 if (snprintf(kvTypeName, max_name, "____btf_map_%s", mapName) == max_name) {
723 ALOGE("____btf_map_%s is too long", mapName);
724 return -1;
725 }
726
727 kvId = btf__find_by_name(btf, kvTypeName);
728 if (kvId < 0) {
729 ALOGE("section not found, map: %s typeName: %s", mapName, kvTypeName);
730 return -1;
731 }
732
733 kvBt = btf__type_by_id(btf, kvId);
734 if (!kvBt) {
735 ALOGE("Couldn't find BTF type, map: %s id: %u", mapName, kvId);
736 return -1;
737 }
738
739 if (!btf_is_struct(kvBt) || btf_vlen(kvBt) < 2) {
740 ALOGE("Non Struct kind or invalid vlen, map: %s id: %u", mapName, kvId);
741 return -1;
742 }
743
744 key = btf_members(kvBt);
745 value = key + 1;
746
747 keySize = btf__resolve_size(btf, key->type);
748 if (keySize < 0) {
749 ALOGE("Couldn't get key size, map: %s errno: %d", mapName, errno);
750 return -1;
751 }
752
753 valueSize = btf__resolve_size(btf, value->type);
754 if (valueSize < 0) {
755 ALOGE("Couldn't get value size, map: %s errno: %d", mapName, errno);
756 return -1;
757 }
758
759 if (expectedKeySize != keySize || expectedValueSize != valueSize) {
760 ALOGE("Key value size mismatch, map: %s key size: %d expected key size: "
761 "%d value size: %d expected value size: %d",
762 mapName, (uint32_t)keySize, expectedKeySize, (uint32_t)valueSize,
763 expectedValueSize);
764 return -1;
765 }
766
767 *keyTypeId = key->type;
768 *valueTypeId = value->type;
769
770 return 0;
771}
772
Motomu Utsumi1a5cc5b2025-03-18 15:25:13 +0900773static bool isBtfSupported(enum bpf_map_type type) {
774 return type != BPF_MAP_TYPE_DEVMAP_HASH && type != BPF_MAP_TYPE_RINGBUF;
775}
776
Maciej Żenczykowski47e2ba02025-07-30 18:21:48 -0700777static int pinMap(const borrowed_fd& fd, const struct bpf_map_def& mapDef) {
Motomu Utsumi77b0b252025-07-15 13:36:20 +0900778 int ret;
Maciej Żenczykowski08641532025-07-30 15:22:07 -0700779 if (mapDef.create_location[0]) {
780 ret = bpfFdPin(fd, mapDef.create_location);
Motomu Utsumi77b0b252025-07-15 13:36:20 +0900781 if (ret) {
782 const int err = errno;
Maciej Żenczykowski08641532025-07-30 15:22:07 -0700783 ALOGE("create %s -> %d [%d:%s]", mapDef.create_location, ret, err, strerror(err));
Motomu Utsumi77b0b252025-07-15 13:36:20 +0900784 return -err;
785 }
Maciej Żenczykowski08641532025-07-30 15:22:07 -0700786 ret = renameat2(AT_FDCWD, mapDef.create_location,
Maciej Żenczykowski47e2ba02025-07-30 18:21:48 -0700787 AT_FDCWD, mapDef.pin_location, RENAME_NOREPLACE);
Motomu Utsumi77b0b252025-07-15 13:36:20 +0900788 if (ret) {
789 const int err = errno;
Maciej Żenczykowski47e2ba02025-07-30 18:21:48 -0700790 ALOGE("rename %s %s -> %d [%d:%s]", mapDef.create_location, mapDef.pin_location, ret,
Motomu Utsumi77b0b252025-07-15 13:36:20 +0900791 err, strerror(err));
792 return -err;
793 }
794 } else {
Maciej Żenczykowski47e2ba02025-07-30 18:21:48 -0700795 ret = bpfFdPin(fd, mapDef.pin_location);
Motomu Utsumi77b0b252025-07-15 13:36:20 +0900796 if (ret) {
797 const int err = errno;
Maciej Żenczykowski47e2ba02025-07-30 18:21:48 -0700798 ALOGE("pin %s -> %d [%d:%s]", mapDef.pin_location, ret, err, strerror(err));
Motomu Utsumi77b0b252025-07-15 13:36:20 +0900799 return -err;
800 }
801 }
Maciej Żenczykowski47e2ba02025-07-30 18:21:48 -0700802 ret = chmod(mapDef.pin_location, mapDef.mode);
Motomu Utsumi77b0b252025-07-15 13:36:20 +0900803 if (ret) {
804 const int err = errno;
Maciej Żenczykowski47e2ba02025-07-30 18:21:48 -0700805 ALOGE("chmod(%s, 0%o) = %d [%d:%s]", mapDef.pin_location, mapDef.mode, ret, err,
Motomu Utsumi77b0b252025-07-15 13:36:20 +0900806 strerror(err));
807 return -err;
808 }
Maciej Żenczykowski47e2ba02025-07-30 18:21:48 -0700809 ret = chown(mapDef.pin_location, (uid_t)mapDef.uid, (gid_t)mapDef.gid);
Motomu Utsumi77b0b252025-07-15 13:36:20 +0900810 if (ret) {
811 const int err = errno;
Maciej Żenczykowski47e2ba02025-07-30 18:21:48 -0700812 ALOGE("chown(%s, %u, %u) = %d [%d:%s]", mapDef.pin_location, mapDef.uid, mapDef.gid,
Motomu Utsumi77b0b252025-07-15 13:36:20 +0900813 ret, err, strerror(err));
814 return -err;
815 }
816
817 if (isAtLeastKernelVersion(4, 14, 0)) {
818 int mapId = bpfGetFdMapId(fd);
819 if (mapId == -1) {
820 const int err = errno;
821 ALOGE("bpfGetFdMapId failed, errno: %d", err);
822 return -err;
823 }
Maciej Żenczykowski47e2ba02025-07-30 18:21:48 -0700824 ALOGI("map %s id %d", mapDef.pin_location, mapId);
Motomu Utsumi77b0b252025-07-15 13:36:20 +0900825 }
826 return 0;
827}
828
Motomu Utsumie5dcaf72025-07-01 14:44:18 +0900829static int readMapNames(ifstream& elfFile, vector<string>& mapNames) {
830 int ret = getSectionSymNames(elfFile, ".android_maps", mapNames);
831 if (ret) return ret;
832
833 const string suffix = "_def";
834 for (string& name : mapNames) {
835 if (EndsWith(name, suffix)) {
836 name.erase(name.length() - suffix.length());
837 } else {
838 ALOGE("Failed to get map names, invalid symbol in .android_maps: %s", name.c_str());
839 return 1;
840 }
841 }
842 return 0;
843}
844
Motomu Utsumic80abe12025-07-18 10:14:37 +0900845static bool isMapTypeSupported(enum bpf_map_type type) {
846 if (type == BPF_MAP_TYPE_LPM_TRIE && !isAtLeastKernelVersion(4, 14, 0)) {
847 // On Linux Kernels older than 4.14 this map type doesn't exist - autoskip.
848 return false;
849 }
850 return true;
851}
852
853static enum bpf_map_type sanitizeMapType(enum bpf_map_type type) {
854 if (type == BPF_MAP_TYPE_DEVMAP && !isAtLeastKernelVersion(4, 14, 0)) {
855 // On Linux Kernels older than 4.14 this map type doesn't exist, but it can kind
856 // of be approximated: ARRAY has the same userspace api, though it is not usable
857 // by the same ebpf programs. However, that's okay because the bpf_redirect_map()
858 // helper doesn't exist on 4.9-T anyway (so the bpf program would fail to load,
859 // and thus needs to be tagged as 4.14+ either way), so there's nothing useful you
860 // could do with a DEVMAP anyway (that isn't already provided by an ARRAY)...
861 // Hence using an ARRAY instead of a DEVMAP simply makes life easier for userspace.
862 return BPF_MAP_TYPE_ARRAY;
863 }
864 if (type == BPF_MAP_TYPE_DEVMAP_HASH && !isAtLeastKernelVersion(5, 4, 0)) {
865 // On Linux Kernels older than 5.4 this map type doesn't exist, but it can kind
866 // of be approximated: HASH has the same userspace visible api.
867 // However it cannot be used by ebpf programs in the same way.
868 // Since bpf_redirect_map() only requires 4.14, a program using a DEVMAP_HASH map
869 // would fail to load (due to trying to redirect to a HASH instead of DEVMAP_HASH).
870 // One must thus tag any BPF_MAP_TYPE_DEVMAP_HASH + bpf_redirect_map() using
871 // programs as being 5.4+...
872 return BPF_MAP_TYPE_HASH;
873 }
874 // No sanitization is required.
875 return type;
876}
877
Maciej Żenczykowski3330faf2025-07-30 17:35:57 -0700878static int createMaps(ifstream& elfFile, vector<unique_fd>& mapFds,
Maciej Żenczykowski960c3372025-07-28 15:09:22 -0700879 const unsigned int bpfloader_ver) {
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -0700880 int ret;
Motomu Utsumi99a7b732025-07-17 10:59:49 +0900881 vector<char> btfData;
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -0700882 vector<struct bpf_map_def> md;
883 vector<string> mapNames;
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -0700884
Motomu Utsumi99a7b732025-07-17 10:59:49 +0900885 ret = readSectionByName(".android_maps", elfFile, md);
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -0700886 if (ret == -2) return 0; // no maps to read
887 if (ret) return ret;
888
Motomu Utsumie5dcaf72025-07-01 14:44:18 +0900889 ret = readMapNames(elfFile, mapNames);
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -0700890 if (ret) return ret;
891
Motomu Utsumi6f1cecc2025-03-19 19:49:08 +0900892 struct btf *btf = NULL;
Patrick Rohr03b97432025-04-17 08:37:01 -0700893 auto btfGuard = base::make_scope_guard([&btf] { if (btf) btf__free(btf); });
Motomu Utsumi4a3b5372025-06-04 09:15:36 +0900894 if (isAtLeastKernelVersion(4, 19, 0)) {
Motomu Utsumi6f1cecc2025-03-19 19:49:08 +0900895 // On Linux Kernels older than 4.18 BPF_BTF_LOAD command doesn't exist.
896 ret = readSectionByName(".BTF", elfFile, btfData);
897 if (ret) {
898 ALOGE("Failed to read .BTF section, ret:%d", ret);
899 return ret;
900 }
Motomu Utsumi62b1c882025-03-21 15:13:00 +0900901 btf = btf__new(btfData.data(), btfData.size());
Motomu Utsumi6f1cecc2025-03-19 19:49:08 +0900902 if (btf == NULL) {
903 ALOGE("btf__new failed, errno: %d", errno);
904 return -errno;
905 }
Motomu Utsumi1a5cc5b2025-03-18 15:25:13 +0900906
Motomu Utsumi6f1cecc2025-03-19 19:49:08 +0900907 ret = loadBtf(elfFile, btf);
908 if (ret) return ret;
909 }
Motomu Utsumi1a5cc5b2025-03-18 15:25:13 +0900910
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -0700911 unsigned kvers = kernelVersion();
912
913 for (int i = 0; i < (int)mapNames.size(); i++) {
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -0700914 if (bpfloader_ver < md[i].bpfloader_min_ver) {
Maciej Żenczykowskidbdd90f2024-08-22 23:42:58 +0000915 ALOGD("skipping map %s which requires bpfloader min ver 0x%05x", mapNames[i].c_str(),
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -0700916 md[i].bpfloader_min_ver);
917 mapFds.push_back(unique_fd());
918 continue;
919 }
920
921 if (bpfloader_ver >= md[i].bpfloader_max_ver) {
Maciej Żenczykowskidbdd90f2024-08-22 23:42:58 +0000922 ALOGD("skipping map %s which requires bpfloader max ver 0x%05x", mapNames[i].c_str(),
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -0700923 md[i].bpfloader_max_ver);
924 mapFds.push_back(unique_fd());
925 continue;
926 }
927
928 if (kvers < md[i].min_kver) {
Maciej Żenczykowskidbdd90f2024-08-22 23:42:58 +0000929 ALOGD("skipping map %s which requires kernel version 0x%x >= 0x%x",
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -0700930 mapNames[i].c_str(), kvers, md[i].min_kver);
931 mapFds.push_back(unique_fd());
932 continue;
933 }
934
935 if (kvers >= md[i].max_kver) {
Maciej Żenczykowskidbdd90f2024-08-22 23:42:58 +0000936 ALOGD("skipping map %s which requires kernel version 0x%x < 0x%x",
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -0700937 mapNames[i].c_str(), kvers, md[i].max_kver);
938 mapFds.push_back(unique_fd());
939 continue;
940 }
941
Motomu Utsumic80abe12025-07-18 10:14:37 +0900942 if (!isMapTypeSupported(md[i].type)) {
943 ALOGD("skipping unsupported map type(%d): %s", md[i].type, mapNames[i].c_str());
Maciej Żenczykowski87019832025-02-03 22:04:26 -0800944 mapFds.push_back(unique_fd());
945 continue;
946 }
Motomu Utsumic80abe12025-07-18 10:14:37 +0900947 enum bpf_map_type type = sanitizeMapType(md[i].type);
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -0700948
949 // The .h file enforces that this is a power of two, and page size will
950 // also always be a power of two, so this logic is actually enough to
951 // force it to be a multiple of the page size, as required by the kernel.
952 unsigned int max_entries = md[i].max_entries;
953 if (type == BPF_MAP_TYPE_RINGBUF) {
954 if (max_entries < page_size) max_entries = page_size;
955 }
956
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -0700957 unique_fd fd;
958 int saved_errno;
959
Maciej Żenczykowski47e2ba02025-07-30 18:21:48 -0700960 if (access(md[i].pin_location, F_OK) == 0) {
961 fd.reset(mapRetrieveRO(md[i].pin_location));
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -0700962 saved_errno = errno;
963 ALOGD("bpf_create_map reusing map %s, ret: %d", mapNames[i].c_str(), fd.get());
Maciej Żenczykowskib71cd4f2025-05-20 06:41:01 -0700964 abort();
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -0700965 } else {
966 union bpf_attr req = {
967 .map_type = type,
968 .key_size = md[i].key_size,
969 .value_size = md[i].value_size,
970 .max_entries = max_entries,
Maciej Żenczykowskic1a9f4a2025-01-20 12:09:13 -0800971 .map_flags = md[i].map_flags,
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -0700972 };
973 if (isAtLeastKernelVersion(4, 15, 0))
974 strlcpy(req.map_name, mapNames[i].c_str(), sizeof(req.map_name));
Motomu Utsumi1a5cc5b2025-03-18 15:25:13 +0900975
Motomu Utsumi6f1cecc2025-03-19 19:49:08 +0900976 bool haveBtf = btf && isBtfSupported(type);
Maciej Żenczykowski9fef9302025-03-18 20:09:34 -0700977 if (haveBtf) {
Motomu Utsumi1a5cc5b2025-03-18 15:25:13 +0900978 uint32_t kTid, vTid;
979 ret = getKeyValueTids(btf, mapNames[i].c_str(), md[i].key_size,
980 md[i].value_size, &kTid, &vTid);
981 if (ret) return ret;
982 req.btf_fd = btf__fd(btf);
983 req.btf_key_type_id = kTid;
984 req.btf_value_type_id = vTid;
Motomu Utsumi1a5cc5b2025-03-18 15:25:13 +0900985 }
986
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -0700987 fd.reset(bpf(BPF_MAP_CREATE, req));
988 saved_errno = errno;
Maciej Żenczykowski8597e3a2024-08-28 15:42:01 -0700989 if (fd.ok()) {
Maciej Żenczykowski9fef9302025-03-18 20:09:34 -0700990 ALOGD("bpf_create_map[%s] btf:%d -> %d",
991 mapNames[i].c_str(), haveBtf, fd.get());
Maciej Żenczykowski8597e3a2024-08-28 15:42:01 -0700992 } else {
Maciej Żenczykowski9fef9302025-03-18 20:09:34 -0700993 ALOGE("bpf_create_map[%s] btf:%d -> %d errno:%d",
994 mapNames[i].c_str(), haveBtf, fd.get(), saved_errno);
Maciej Żenczykowski8597e3a2024-08-28 15:42:01 -0700995 }
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -0700996 }
997
998 if (!fd.ok()) return -saved_errno;
999
1000 // When reusing a pinned map, we need to check the map type/sizes/etc match, but for
1001 // safety (since reuse code path is rare) run these checks even if we just created it.
1002 // We assume failure is due to pinned map mismatch, hence the 'NOT UNIQUE' return code.
1003 if (!mapMatchesExpectations(fd, mapNames[i], md[i], type)) return -ENOTUNIQ;
1004
Maciej Żenczykowski47e2ba02025-07-30 18:21:48 -07001005 ret = pinMap(fd, md[i]);
Motomu Utsumi77b0b252025-07-15 13:36:20 +09001006 if (ret) return ret;
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -07001007
1008 mapFds.push_back(std::move(fd));
1009 }
1010
1011 return ret;
1012}
1013
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -07001014static void applyRelo(void* insnsPtr, Elf64_Addr offset, int fd) {
1015 int insnIndex;
1016 struct bpf_insn *insn, *insns;
1017
1018 insns = (struct bpf_insn*)(insnsPtr);
1019
1020 insnIndex = offset / sizeof(struct bpf_insn);
1021 insn = &insns[insnIndex];
1022
1023 // Occasionally might be useful for relocation debugging, but pretty spammy
1024 if (0) {
1025 ALOGV("applying relo to instruction at byte offset: %llu, "
1026 "insn offset %d, insn %llx",
1027 (unsigned long long)offset, insnIndex, *(unsigned long long*)insn);
1028 }
1029
1030 if (insn->code != (BPF_LD | BPF_IMM | BPF_DW)) {
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -07001031 ALOGE("invalid relo for insn %d: code 0x%x", insnIndex, insn->code);
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -07001032 return;
1033 }
1034
1035 insn->imm = fd;
1036 insn->src_reg = BPF_PSEUDO_MAP_FD;
1037}
1038
1039static void applyMapRelo(ifstream& elfFile, vector<unique_fd> &mapFds, vector<codeSection>& cs) {
1040 vector<string> mapNames;
1041
Motomu Utsumie5dcaf72025-07-01 14:44:18 +09001042 int ret = readMapNames(elfFile, mapNames);
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -07001043 if (ret) return;
1044
1045 for (int k = 0; k != (int)cs.size(); k++) {
1046 Elf64_Rel* rel = (Elf64_Rel*)(cs[k].rel_data.data());
1047 int n_rel = cs[k].rel_data.size() / sizeof(*rel);
1048
1049 for (int i = 0; i < n_rel; i++) {
1050 int symIndex = ELF64_R_SYM(rel[i].r_info);
1051 string symName;
1052
1053 ret = getSymNameByIdx(elfFile, symIndex, symName);
1054 if (ret) return;
1055
Maciej Żenczykowskib4bade92024-08-14 23:06:54 +00001056 // Find the map fd and apply relo
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -07001057 for (int j = 0; j < (int)mapNames.size(); j++) {
1058 if (!mapNames[j].compare(symName)) {
1059 applyRelo(cs[k].data.data(), rel[i].r_offset, mapFds[j]);
1060 break;
1061 }
1062 }
1063 }
1064 }
1065}
1066
Maciej Żenczykowskif6dfae82025-07-29 16:51:37 -07001067static int pinProg(const borrowed_fd& fd, const struct bpf_prog_def& progDef,
1068 const string& progPinLoc) {
Motomu Utsumi597f3af2025-07-16 14:23:46 +09001069 int ret;
Maciej Żenczykowski08641532025-07-30 15:22:07 -07001070 if (progDef.create_location[0]) {
1071 ret = bpfFdPin(fd, progDef.create_location);
Motomu Utsumi597f3af2025-07-16 14:23:46 +09001072 if (ret) {
1073 const int err = errno;
Maciej Żenczykowski08641532025-07-30 15:22:07 -07001074 ALOGE("create %s -> %d [%d:%s]", progDef.create_location, ret, err, strerror(err));
Motomu Utsumi597f3af2025-07-16 14:23:46 +09001075 return -err;
1076 }
Maciej Żenczykowski08641532025-07-30 15:22:07 -07001077 ret = renameat2(AT_FDCWD, progDef.create_location,
Motomu Utsumi597f3af2025-07-16 14:23:46 +09001078 AT_FDCWD, progPinLoc.c_str(), RENAME_NOREPLACE);
1079 if (ret) {
1080 const int err = errno;
Maciej Żenczykowski08641532025-07-30 15:22:07 -07001081 ALOGE("rename %s %s -> %d [%d:%s]", progDef.create_location, progPinLoc.c_str(), ret,
Motomu Utsumi597f3af2025-07-16 14:23:46 +09001082 err, strerror(err));
1083 return -err;
1084 }
1085 } else {
1086 ret = bpfFdPin(fd, progPinLoc.c_str());
1087 if (ret) {
1088 const int err = errno;
1089 ALOGE("create %s -> %d [%d:%s]", progPinLoc.c_str(), ret, err, strerror(err));
1090 return -err;
1091 }
1092 }
1093 if (chmod(progPinLoc.c_str(), 0440)) {
1094 const int err = errno;
1095 ALOGE("chmod %s 0440 -> [%d:%s]", progPinLoc.c_str(), err, strerror(err));
1096 return -err;
1097 }
1098 if (chown(progPinLoc.c_str(), (uid_t)progDef.uid,
1099 (gid_t)progDef.gid)) {
1100 const int err = errno;
1101 ALOGE("chown %s %d %d -> [%d:%s]", progPinLoc.c_str(), progDef.uid,
1102 progDef.gid, err, strerror(err));
1103 return -err;
1104 }
1105 return 0;
1106}
1107
Motomu Utsumi52a3ba72025-07-25 10:41:53 +09001108static int validateProg(const borrowed_fd& fd, string& progPinLoc,
1109 const unsigned int bpfloader_ver) {
Motomu Utsumi1d25bb32025-07-16 14:26:59 +09001110 if (!isAtLeastKernelVersion(4, 14, 0)) {
1111 return 0;
1112 }
1113 int progId = bpfGetFdProgId(fd);
1114 if (progId == -1) {
1115 const int err = errno;
1116 ALOGE("bpfGetFdProgId failed, errno: %d", err);
1117 return -err;
1118 }
1119
1120 int jitLen = bpfGetFdJitProgLen(fd);
1121 if (jitLen == -1) {
1122 const int err = errno;
1123 ALOGE("bpfGetFdJitProgLen failed, ret: %d", err);
1124 return -err;
1125 }
1126
1127 int xlatLen = bpfGetFdXlatProgLen(fd);
1128 if (xlatLen == -1) {
1129 const int err = errno;
1130 ALOGE("bpfGetFdXlatProgLen failed, ret: %d", err);
1131 return -err;
1132 }
1133 ALOGI("prog %s id %d len jit:%d xlat:%d", progPinLoc.c_str(), progId, jitLen, xlatLen);
1134
1135 if (!jitLen && bpfloader_ver >= BPFLOADER_MAINLINE_25Q2_VERSION) {
1136 ALOGE("Kernel eBPF JIT failure for %s", progPinLoc.c_str());
1137 return -ENOTSUP;
1138 }
1139 return 0;
1140}
1141
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -07001142static int loadCodeSections(const char* elfPath, vector<codeSection>& cs, const string& license,
Maciej Żenczykowski05da6b02025-07-28 15:34:43 -07001143 const unsigned int bpfloader_ver) {
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -07001144 unsigned kvers = kernelVersion();
1145
1146 if (!kvers) {
1147 ALOGE("unable to get kernel version");
1148 return -EINVAL;
1149 }
1150
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -07001151 for (int i = 0; i < (int)cs.size(); i++) {
1152 unique_fd& fd = cs[i].prog_fd;
1153 int ret;
1154 string name = cs[i].name;
1155
1156 if (!cs[i].prog_def.has_value()) {
1157 ALOGE("[%d] '%s' missing program definition! bad bpf.o build?", i, name.c_str());
1158 return -EINVAL;
1159 }
1160
1161 unsigned min_kver = cs[i].prog_def->min_kver;
1162 unsigned max_kver = cs[i].prog_def->max_kver;
1163 ALOGD("cs[%d].name:%s min_kver:%x .max_kver:%x (kvers:%x)", i, name.c_str(), min_kver,
1164 max_kver, kvers);
1165 if (kvers < min_kver) continue;
1166 if (kvers >= max_kver) continue;
1167
1168 unsigned bpfMinVer = cs[i].prog_def->bpfloader_min_ver;
1169 unsigned bpfMaxVer = cs[i].prog_def->bpfloader_max_ver;
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -07001170
1171 ALOGD("cs[%d].name:%s requires bpfloader version [0x%05x,0x%05x)", i, name.c_str(),
1172 bpfMinVer, bpfMaxVer);
1173 if (bpfloader_ver < bpfMinVer) continue;
1174 if (bpfloader_ver >= bpfMaxVer) continue;
1175
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -07001176 // strip any potential $foo suffix
1177 // this can be used to provide duplicate programs
1178 // conditionally loaded based on running kernel version
1179 name = name.substr(0, name.find_last_of('$'));
1180
1181 bool reuse = false;
Maciej Żenczykowskiae1a9ff2025-07-30 17:30:45 -07001182 string progPinLoc = string(cs[i].prog_def->pin_prefix) + name;
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -07001183 if (access(progPinLoc.c_str(), F_OK) == 0) {
1184 fd.reset(retrieveProgram(progPinLoc.c_str()));
1185 ALOGD("New bpf prog load reusing prog %s, ret: %d (%s)", progPinLoc.c_str(), fd.get(),
Maciej Żenczykowski37ba9392025-02-13 16:00:26 -08001186 !fd.ok() ? std::strerror(errno) : "ok");
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -07001187 reuse = true;
1188 } else {
Maciej Żenczykowskiea1d8f62024-09-05 09:38:14 -07001189 static char log_buf[1 << 20]; // 1 MiB logging buffer
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -07001190
1191 union bpf_attr req = {
1192 .prog_type = cs[i].type,
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -07001193 .insn_cnt = static_cast<__u32>(cs[i].data.size() / sizeof(struct bpf_insn)),
Maciej Żenczykowski52be6a82024-08-26 17:18:39 -07001194 .insns = ptr_to_u64(cs[i].data.data()),
1195 .license = ptr_to_u64(license.c_str()),
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -07001196 .log_level = 1,
Maciej Żenczykowskiea1d8f62024-09-05 09:38:14 -07001197 .log_size = sizeof(log_buf),
1198 .log_buf = ptr_to_u64(log_buf),
Maciej Żenczykowski346831c2024-08-12 17:49:10 +00001199 .expected_attach_type = cs[i].attach_type,
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -07001200 };
1201 if (isAtLeastKernelVersion(4, 15, 0))
1202 strlcpy(req.prog_name, cs[i].name.c_str(), sizeof(req.prog_name));
1203 fd.reset(bpf(BPF_PROG_LOAD, req));
1204
Maciej Żenczykowskiea1d8f62024-09-05 09:38:14 -07001205 // Kernel should have NULL terminated the log buffer, but force it anyway for safety
1206 log_buf[sizeof(log_buf) - 1] = 0;
1207
1208 // Strip out final newline if present
1209 int log_chars = strlen(log_buf);
1210 if (log_chars && log_buf[log_chars - 1] == '\n') log_buf[--log_chars] = 0;
1211
1212 bool log_oneline = !strchr(log_buf, '\n');
1213
1214 ALOGD("BPF_PROG_LOAD call for %s (%s) returned '%s' fd: %d (%s)", elfPath,
1215 cs[i].name.c_str(), log_oneline ? log_buf : "{multiline}",
Maciej Żenczykowski37ba9392025-02-13 16:00:26 -08001216 fd.get(), !fd.ok() ? std::strerror(errno) : "ok");
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -07001217
1218 if (!fd.ok()) {
Maciej Żenczykowskiea1d8f62024-09-05 09:38:14 -07001219 // kernel NULL terminates log_buf, so this checks for non-empty string
Maciej Żenczykowskif1259922025-07-15 14:57:23 -07001220 if (log_buf[0] && !isUser()) {
Maciej Żenczykowskiea1d8f62024-09-05 09:38:14 -07001221 vector<string> lines = Split(log_buf, "\n");
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -07001222
Maciej Żenczykowski9f8b17e2024-08-29 12:07:35 -07001223 ALOGW("BPF_PROG_LOAD - BEGIN log_buf contents:");
1224 for (const auto& line : lines) ALOGW("%s", line.c_str());
1225 ALOGW("BPF_PROG_LOAD - END log_buf contents.");
1226 }
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -07001227
1228 if (cs[i].prog_def->optional) {
Maciej Żenczykowskibfc0b612024-08-28 17:45:25 -07001229 ALOGW("failed program %s is marked optional - continuing...",
1230 cs[i].name.c_str());
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -07001231 continue;
1232 }
Maciej Żenczykowskibfc0b612024-08-28 17:45:25 -07001233 ALOGE("non-optional program %s failed to load.", cs[i].name.c_str());
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -07001234 }
1235 }
1236
1237 if (!fd.ok()) return fd.get();
1238
1239 if (!reuse) {
Maciej Żenczykowskif6dfae82025-07-29 16:51:37 -07001240 ret = pinProg(fd, cs[i].prog_def.value(), progPinLoc);
Motomu Utsumi597f3af2025-07-16 14:23:46 +09001241 if (ret) return ret;
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -07001242 }
Motomu Utsumi1d25bb32025-07-16 14:26:59 +09001243 ret = validateProg(fd, progPinLoc, bpfloader_ver);
1244 if (ret) return ret;
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -07001245 }
1246
1247 return 0;
1248}
1249
Motomu Utsumidbada872025-07-23 12:50:34 +09001250static int prepareLoadMaps(const struct bpf_object* obj, const vector<struct bpf_map_def>& md,
1251 const vector<string>& mapNames, const unsigned int bpfloader_ver) {
Motomu Utsumif1b1afb2025-07-23 12:10:52 +09001252 unsigned kvers = kernelVersion();
1253
1254 for (int i = 0; i < (int)mapNames.size(); i++) {
1255 struct bpf_map* m = bpf_object__find_map_by_name(obj, mapNames[i].c_str());
1256 if (!m) {
1257 ALOGE("bpf_object does not contain map: %s", mapNames[i].c_str());
1258 return -1;
1259 }
1260
1261 if (bpfloader_ver < md[i].bpfloader_min_ver || bpfloader_ver >= md[i].bpfloader_max_ver) {
1262 ALOGD("skipping map %s: bpfloader 0x%05x is outside required range [0x%05x, 0x%05x)",
1263 mapNames[i].c_str(), bpfloader_ver,
1264 md[i].bpfloader_min_ver, md[i].bpfloader_max_ver);
1265 bpf_map__set_autocreate(m, false);
1266 continue;
1267 }
1268
1269 if (kvers < md[i].min_kver || kvers >= md[i].max_kver) {
1270 ALOGD("skipping map %s: kernel version 0x%x is outside required range [0x%x, 0x%x)",
1271 mapNames[i].c_str(), kvers, md[i].min_kver, md[i].max_kver);
1272 bpf_map__set_autocreate(m, false);
1273 continue;
1274 }
1275
1276 if (!isMapTypeSupported(md[i].type)) {
1277 ALOGD("skipping unsupported map type(%d): %s", md[i].type, mapNames[i].c_str());
1278 bpf_map__set_autocreate(m, false);
1279 continue;
1280 }
1281
1282 bpf_map__set_type(m, sanitizeMapType(md[i].type));
1283 bpf_map__set_map_flags(m, md[i].map_flags);
1284 }
1285 return 0;
1286}
1287
Motomu Utsumidbada872025-07-23 12:50:34 +09001288static int prepareLoadProgs(const struct bpf_object* obj, const vector<codeSection>& cs,
1289 const unsigned int bpfloader_ver) {
Motomu Utsumi3cdcc472025-07-23 12:22:14 +09001290 unsigned kvers = kernelVersion();
1291
1292 for (int i = 0; i < (int)cs.size(); i++) {
1293 string name = cs[i].name;
1294 if (!cs[i].prog_def.has_value()) {
1295 ALOGE("[%d] '%s' missing program definition! bad bpf.o build?", i, name.c_str());
1296 return -EINVAL;
1297 }
1298 string program_name = cs[i].program_name;
1299 struct bpf_program* prog = bpf_object__find_program_by_name(obj, program_name.c_str());
1300 if (!prog) {
1301 ALOGE("bpf_object does not contain program: %s", cs[i].program_name.c_str());
1302 return -1;
1303 }
1304
1305 unsigned min_kver = cs[i].prog_def->min_kver;
1306 unsigned max_kver = cs[i].prog_def->max_kver;
1307 if (kvers < min_kver || kvers >= max_kver) {
1308 ALOGD("skipping prog %s: kernel version 0x%x is outside required range [0x%x, 0x%x)",
1309 name.c_str(), kvers, min_kver, max_kver);
1310 bpf_program__set_autoload(prog, false);
1311 continue;
1312 }
1313
1314 unsigned bpfMinVer = cs[i].prog_def->bpfloader_min_ver;
1315 unsigned bpfMaxVer = cs[i].prog_def->bpfloader_max_ver;
1316 if (bpfloader_ver < bpfMinVer || bpfloader_ver >= bpfMaxVer) {
1317 ALOGD("skipping prog %s: bpfloader 0x%05x is outside required range [0x%05x, 0x%05x)",
1318 name.c_str(), bpfloader_ver, bpfMinVer, bpfMaxVer);
1319 bpf_program__set_autoload(prog, false);
1320 continue;
1321 }
1322
1323 if (cs[i].prog_def->optional) {
1324 // TODO: Support optional program
1325 ALOGE("Optional program cannot be loaded by libbpf");
1326 return -1;
1327 }
1328
1329 bpf_program__set_type(prog, cs[i].type);
1330 bpf_program__set_expected_attach_type(prog, cs[i].attach_type);
1331 }
1332 return 0;
1333}
1334
Maciej Żenczykowski3330faf2025-07-30 17:35:57 -07001335static int pinMaps(const struct bpf_object* obj,
Maciej Żenczykowski7ce493d2025-07-28 15:13:07 -07001336 const vector<struct bpf_map_def>& md, const vector<string>& mapNames) {
Motomu Utsumicd5bdf52025-07-23 12:32:08 +09001337 int ret;
Motomu Utsumicd5bdf52025-07-23 12:32:08 +09001338
1339 for (int i = 0; i < (int)mapNames.size(); i++) {
1340 struct bpf_map* m = bpf_object__find_map_by_name(obj, mapNames[i].c_str());
1341 if (!m) {
1342 ALOGE("bpf_object does not contain map: %s", mapNames[i].c_str());
1343 return -1;
1344 }
1345 // This map was skipped
1346 if (!bpf_map__autocreate(m)) continue;
1347
Maciej Żenczykowski47e2ba02025-07-30 18:21:48 -07001348 if (access(md[i].pin_location, F_OK) == 0) {
Motomu Utsumicd5bdf52025-07-23 12:32:08 +09001349 ALOGE("Reusing map is not supported: %s", mapNames[i].c_str());
1350 return -1;
1351 }
1352
Maciej Żenczykowski47e2ba02025-07-30 18:21:48 -07001353 ret = pinMap(bpf_map__fd(m), md[i]);
Motomu Utsumicd5bdf52025-07-23 12:32:08 +09001354 if (ret) return ret;
1355 }
1356 return 0;
1357}
1358
Maciej Żenczykowski3330faf2025-07-30 17:35:57 -07001359static int pinProgs(const struct bpf_object * obj,
Maciej Żenczykowskif8e88362025-07-28 15:33:00 -07001360 const vector<codeSection>& cs, const unsigned int bpfloader_ver) {
Motomu Utsumi5dbe6562025-07-23 12:45:40 +09001361 int ret;
Motomu Utsumi5dbe6562025-07-23 12:45:40 +09001362
1363 for (int i = 0; i < (int)cs.size(); i++) {
1364 string program_name = cs[i].program_name;
1365 struct bpf_program* prog = bpf_object__find_program_by_name(obj, program_name.c_str());
1366 if (!prog) {
1367 ALOGE("bpf_object does not contain program: %s", program_name.c_str());
1368 return -1;
1369 }
1370 // This program was skipped
1371 if (!bpf_program__autoload(prog)) continue;
1372
1373 string name = cs[i].name;
1374 name = name.substr(0, name.find_last_of('$'));
Maciej Żenczykowskiae1a9ff2025-07-30 17:30:45 -07001375 string progPinLoc = string(cs[i].prog_def->pin_prefix) + name;
Motomu Utsumi5dbe6562025-07-23 12:45:40 +09001376 if (access(progPinLoc.c_str(), F_OK) == 0) {
1377 // TODO: Skip loading lower priority program
1378 ALOGI("Higher priority program is already pinned, skip pinning %s", cs[i].name.c_str());
1379 continue;
1380 }
1381
1382 int fd = bpf_program__fd(prog);
Maciej Żenczykowskif6dfae82025-07-29 16:51:37 -07001383 ret = pinProg(fd, cs[i].prog_def.value(), progPinLoc);
Motomu Utsumi5dbe6562025-07-23 12:45:40 +09001384 if (ret) return ret;
1385 ret = validateProg(fd, progPinLoc, bpfloader_ver);
1386 if (ret) return ret;
1387 }
1388 return 0;
1389}
1390
Maciej Żenczykowskiff3b4182025-07-28 15:02:07 -07001391static int loadProgByLibbpf(const char* const elfPath, const unsigned int bpfloader_ver) {
Motomu Utsumidbada872025-07-23 12:50:34 +09001392 int ret;
1393 vector<string> mapNames;
1394 vector<struct bpf_map_def> md;
1395 vector<codeSection> cs;
1396
1397 ifstream elfFile(elfPath, ios::in | ios::binary);
1398 if (!elfFile.is_open()) return -1;
1399
1400 LIBBPF_OPTS(bpf_object_open_opts, opts,
1401 .bpf_token_path = "",
1402 );
1403 struct bpf_object* obj = bpf_object__open_file(elfPath, &opts);
1404 if (!obj) return -1;
1405 auto objGuard = base::make_scope_guard([&obj] { bpf_object__close(obj); });
1406
1407 ret = readSectionByName(".android_maps", elfFile, md);
1408 if (ret) return ret;
1409
1410 ret = readMapNames(elfFile, mapNames);
1411 if (ret) return ret;
1412
1413 ret = prepareLoadMaps(obj, md, mapNames, bpfloader_ver);
1414 if (ret) return ret;
1415
1416 ret = readCodeSections(elfFile, cs);
1417 if (ret && ret != -ENOENT) return ret;
1418
1419 ret = prepareLoadProgs(obj, cs, bpfloader_ver);
1420 if (ret) return ret;
1421
1422 ret = bpf_object__load(obj);
1423 if (ret) return ret;
1424
Maciej Żenczykowski3330faf2025-07-30 17:35:57 -07001425 ret = pinMaps(obj, md, mapNames);
Motomu Utsumidbada872025-07-23 12:50:34 +09001426 if (ret) return ret;
1427
Maciej Żenczykowski3330faf2025-07-30 17:35:57 -07001428 ret = pinProgs(obj, cs, bpfloader_ver);
Motomu Utsumidbada872025-07-23 12:50:34 +09001429 if (ret) return ret;
1430
1431 return 0;
1432}
1433
Maciej Żenczykowski12751c52025-07-28 15:06:10 -07001434int loadProg(const char* const elfPath, const unsigned int bpfloader_ver) {
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -07001435 vector<char> license;
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -07001436 vector<codeSection> cs;
1437 vector<unique_fd> mapFds;
1438 int ret;
1439
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -07001440 ifstream elfFile(elfPath, ios::in | ios::binary);
1441 if (!elfFile.is_open()) return -1;
1442
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -07001443 ret = readSectionByName("license", elfFile, license);
1444 if (ret) {
1445 ALOGE("Couldn't find license in %s", elfPath);
1446 return ret;
1447 } else {
Maciej Żenczykowski3a085152024-09-18 23:45:52 +00001448 ALOGD("Loading ELF object %s with license %s",
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -07001449 elfPath, (char*)license.data());
1450 }
1451
Maciej Żenczykowskic4a1cae2025-07-15 14:23:51 -07001452 ALOGD("BpfLoader ver 0x%05x processing ELF object %s", bpfloader_ver, elfPath);
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -07001453
Maciej Żenczykowski3330faf2025-07-30 17:35:57 -07001454 ret = createMaps(elfFile, mapFds, bpfloader_ver);
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -07001455 if (ret) {
1456 ALOGE("Failed to create maps: (ret=%d) in %s", ret, elfPath);
1457 return ret;
1458 }
1459
1460 for (int i = 0; i < (int)mapFds.size(); i++)
1461 ALOGV("map_fd found at %d is %d in %s", i, mapFds[i].get(), elfPath);
1462
Maciej Żenczykowski1e487172024-09-05 09:27:35 -07001463 ret = readCodeSections(elfFile, cs);
Maciej Żenczykowski66893bf2025-05-06 02:59:22 -07001464 if (ret == -ENOENT) return 0;
Maciej Żenczykowski1e487172024-09-05 09:27:35 -07001465 if (ret) {
1466 ALOGE("Couldn't read all code sections in %s", elfPath);
1467 return ret;
1468 }
1469
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -07001470 applyMapRelo(elfFile, mapFds, cs);
1471
Maciej Żenczykowski05da6b02025-07-28 15:34:43 -07001472 ret = loadCodeSections(elfPath, cs, string(license.data()), bpfloader_ver);
Maciej Żenczykowski6e1b4252024-08-07 15:03:44 -07001473 if (ret) ALOGE("Failed to load programs, loadCodeSections ret=%d", ret);
1474
1475 return ret;
1476}
1477
Maciej Żenczykowski75c2def2024-04-25 14:19:14 -07001478static bool exists(const char* const path) {
Maciej Żenczykowski60c159f2023-10-02 14:54:48 -07001479 int v = access(path, F_OK);
Maciej Żenczykowski731acfe2024-04-30 10:09:57 +00001480 if (!v) return true;
Maciej Żenczykowski60c159f2023-10-02 14:54:48 -07001481 if (errno == ENOENT) return false;
1482 ALOGE("FATAL: access(%s, F_OK) -> %d [%d:%s]", path, v, errno, strerror(errno));
1483 abort(); // can only hit this if permissions (likely selinux) are screwed up
1484}
1485
Maciej Żenczykowski78fa8612024-08-26 17:22:25 -07001486#define APEXROOT "/apex/com.android.tethering"
Maciej Żenczykowski82ce2ca2025-05-14 14:49:28 -07001487#define BPFROOT APEXROOT "/etc/bpf/mainline/"
Maciej Żenczykowski60c159f2023-10-02 14:54:48 -07001488
Maciej Żenczykowski58464e62025-07-28 14:58:56 -07001489static int loadObject(const unsigned int bpfloader_ver,
Motomu Utsumie50e55d2025-07-23 12:59:23 +09001490 const char* const fname, const bool useLibbpf = false) {
Maciej Żenczykowski82ce2ca2025-05-14 14:49:28 -07001491 string progPath = string(BPFROOT) + fname;
Maciej Żenczykowskiff3b4182025-07-28 15:02:07 -07001492 int ret = useLibbpf ? loadProgByLibbpf(progPath.c_str(), bpfloader_ver) :
Maciej Żenczykowski12751c52025-07-28 15:06:10 -07001493 loadProg(progPath.c_str(), bpfloader_ver);
Maciej Żenczykowski82ce2ca2025-05-14 14:49:28 -07001494 if (ret) {
Motomu Utsumie50e55d2025-07-23 12:59:23 +09001495 ALOGE("Failed to load object: %s, ret: %s, libbpf: %d",
1496 progPath.c_str(), std::strerror(-ret), useLibbpf);
Maciej Żenczykowski82ce2ca2025-05-14 14:49:28 -07001497 return 1;
Maciej Żenczykowski60c159f2023-10-02 14:54:48 -07001498 }
Motomu Utsumie50e55d2025-07-23 12:59:23 +09001499 ALOGD("Loaded object: %s, libbpf: %d", progPath.c_str(), useLibbpf);
Maciej Żenczykowski82ce2ca2025-05-14 14:49:28 -07001500 return 0;
1501}
1502
1503static int loadAllObjects(const unsigned int bpfloader_ver) {
1504 // S+ Tethering mainline module (network_stack): tether offload
1505 // loads under /sys/fs/bpf/tethering:
Maciej Żenczykowski58464e62025-07-28 14:58:56 -07001506 if (loadObject(bpfloader_ver, "offload.o")) return 1;
Motomu Utsumi4efd5442025-07-29 19:37:13 +09001507 if (loadObject(bpfloader_ver, "test.o", isAtLeast25Q3)) return 1;
Maciej Żenczykowski82ce2ca2025-05-14 14:49:28 -07001508 if (isAtLeastT) {
1509 // T+ Tethering mainline module loads under:
1510 // /sys/fs/bpf/net_shared: shared with netd & system server
Motomu Utsumif5044b72025-07-29 19:43:40 +09001511 if (loadObject(bpfloader_ver, "clatd.o", isAtLeast25Q3)) return 1;
Motomu Utsumi0062e892025-07-29 19:49:02 +09001512 if (loadObject(bpfloader_ver, "dscpPolicy.o", isAtLeast25Q3)) return 1;
Maciej Żenczykowski82ce2ca2025-05-14 14:49:28 -07001513
1514 // /sys/fs/bpf/netd_shared: shared with netd & system server
1515 // - netutils_wrapper (for iptables xt_bpf) has access to programs
1516
1517 // WARNING: Android T+ non-updatable netd depends on both of the
1518 // 'netd_shared' & 'netd' strings for xt_bpf programs it loads
Motomu Utsumi15aedc92025-07-29 19:53:35 +09001519 if (loadObject(bpfloader_ver, "netd.o", isAtLeast25Q3)) return 1;
Maciej Żenczykowski82ce2ca2025-05-14 14:49:28 -07001520
1521 // /sys/fs/bpf/netd_readonly: shared with netd & system server
1522 // - netutils_wrapper has no access, netd has read only access
1523
1524 // /sys/fs/bpf/net_private: not shared, just network_stack
1525 }
1526 return 0;
Maciej Żenczykowski60c159f2023-10-02 14:54:48 -07001527}
1528
Maciej Żenczykowski57de4bf2025-05-12 17:52:04 -07001529static int createDir(const char* const dir) {
1530 mode_t prevUmask = umask(0);
Maciej Żenczykowski60c159f2023-10-02 14:54:48 -07001531
Maciej Żenczykowski57de4bf2025-05-12 17:52:04 -07001532 errno = 0;
1533 int ret = mkdir(dir, S_ISVTX | S_IRWXU | S_IRWXG | S_IRWXO);
1534 if (ret && errno != EEXIST) {
1535 const int err = errno;
Maciej Żenczykowski60c159f2023-10-02 14:54:48 -07001536 umask(prevUmask);
Maciej Żenczykowski57de4bf2025-05-12 17:52:04 -07001537 ALOGE("Failed to create directory: %s, ret: %s", dir, std::strerror(err));
1538 return -err;
Maciej Żenczykowski60c159f2023-10-02 14:54:48 -07001539 }
Maciej Żenczykowski57de4bf2025-05-12 17:52:04 -07001540
1541 umask(prevUmask);
Maciej Żenczykowski60c159f2023-10-02 14:54:48 -07001542 return 0;
1543}
1544
1545// Technically 'value' doesn't need to be newline terminated, but it's best
1546// to include a newline to match 'echo "value" > /proc/sys/...foo' behaviour,
1547// which is usually how kernel devs test the actual sysctl interfaces.
Maciej Żenczykowskic9b0a832025-07-22 12:30:32 -07001548static int writeFile(const char *filename, const char *value) {
Maciej Żenczykowski8a767282024-09-04 10:56:55 -07001549 unique_fd fd(open(filename, O_WRONLY | O_CLOEXEC));
Maciej Żenczykowski60c159f2023-10-02 14:54:48 -07001550 if (fd < 0) {
1551 const int err = errno;
1552 ALOGE("open('%s', O_WRONLY | O_CLOEXEC) -> %s", filename, strerror(err));
1553 return -err;
1554 }
1555 int len = strlen(value);
1556 int v = write(fd, value, len);
1557 if (v < 0) {
1558 const int err = errno;
1559 ALOGE("write('%s', '%s', %d) -> %s", filename, value, len, strerror(err));
1560 return -err;
1561 }
1562 if (v != len) {
Maciej Żenczykowski60c159f2023-10-02 14:54:48 -07001563 ALOGE("write('%s', '%s', %d) -> short write [%d]", filename, value, len, v);
1564 return -EINVAL;
1565 }
1566 return 0;
1567}
1568
Maciej Żenczykowskib60599b2024-02-09 12:30:52 -08001569#define APEX_MOUNT_POINT "/apex/com.android.tethering"
Maciej Żenczykowski2fe2db52024-02-07 01:23:58 +00001570const char * const platformBpfLoader = "/system/bin/bpfloader";
Yu-Ting Tseng9b15fa02024-10-28 11:16:35 -07001571const char *const uprobestatsBpfLoader =
1572 "/apex/com.android.uprobestats/bin/uprobestatsbpfload";
Maciej Żenczykowskib60599b2024-02-09 12:30:52 -08001573
Maciej Żenczykowski75c2def2024-04-25 14:19:14 -07001574static int logTetheringApexVersion(void) {
Maciej Żenczykowskib60599b2024-02-09 12:30:52 -08001575 char * found_blockdev = NULL;
1576 FILE * f = NULL;
1577 char buf[4096];
1578
1579 f = fopen("/proc/mounts", "re");
1580 if (!f) return 1;
1581
1582 // /proc/mounts format: block_device [space] mount_point [space] other stuff... newline
1583 while (fgets(buf, sizeof(buf), f)) {
1584 char * blockdev = buf;
1585 char * space = strchr(blockdev, ' ');
1586 if (!space) continue;
1587 *space = '\0';
1588 char * mntpath = space + 1;
1589 space = strchr(mntpath, ' ');
1590 if (!space) continue;
1591 *space = '\0';
1592 if (strcmp(mntpath, APEX_MOUNT_POINT)) continue;
1593 found_blockdev = strdup(blockdev);
1594 break;
1595 }
1596 fclose(f);
1597 f = NULL;
1598
1599 if (!found_blockdev) return 2;
Maciej Żenczykowski5c057ed2024-04-30 11:59:13 +00001600 ALOGV("Found Tethering Apex mounted from blockdev %s", found_blockdev);
Maciej Żenczykowskib60599b2024-02-09 12:30:52 -08001601
1602 f = fopen("/proc/mounts", "re");
1603 if (!f) { free(found_blockdev); return 3; }
1604
1605 while (fgets(buf, sizeof(buf), f)) {
1606 char * blockdev = buf;
1607 char * space = strchr(blockdev, ' ');
1608 if (!space) continue;
1609 *space = '\0';
1610 char * mntpath = space + 1;
1611 space = strchr(mntpath, ' ');
1612 if (!space) continue;
1613 *space = '\0';
1614 if (strcmp(blockdev, found_blockdev)) continue;
1615 if (strncmp(mntpath, APEX_MOUNT_POINT "@", strlen(APEX_MOUNT_POINT "@"))) continue;
1616 char * at = strchr(mntpath, '@');
1617 if (!at) continue;
1618 char * ver = at + 1;
1619 ALOGI("Tethering APEX version %s", ver);
1620 }
1621 fclose(f);
1622 free(found_blockdev);
1623 return 0;
1624}
Maciej Żenczykowski2fe2db52024-02-07 01:23:58 +00001625
Maciej Żenczykowski68eab892024-05-24 03:17:59 -07001626static bool hasGSM() {
Maciej Żenczykowski8a767282024-09-04 10:56:55 -07001627 static string ph = GetProperty("gsm.current.phone-type", "");
Maciej Żenczykowski68eab892024-05-24 03:17:59 -07001628 static bool gsm = (ph != "");
1629 static bool logged = false;
1630 if (!logged) {
1631 logged = true;
1632 ALOGI("hasGSM(gsm.current.phone-type='%s'): %s", ph.c_str(), gsm ? "true" : "false");
1633 }
1634 return gsm;
1635}
1636
1637static bool isTV() {
1638 if (hasGSM()) return false; // TVs don't do GSM
1639
Maciej Żenczykowski8a767282024-09-04 10:56:55 -07001640 static string key = GetProperty("ro.oem.key1", "");
Maciej Żenczykowski68eab892024-05-24 03:17:59 -07001641 static bool tv = StartsWith(key, "ATV00");
1642 static bool logged = false;
1643 if (!logged) {
1644 logged = true;
1645 ALOGI("isTV(ro.oem.key1='%s'): %s.", key.c_str(), tv ? "true" : "false");
1646 }
1647 return tv;
1648}
1649
Maciej Żenczykowski6e6b2092024-06-24 23:57:41 +00001650static bool isWear() {
Maciej Żenczykowski8a767282024-09-04 10:56:55 -07001651 static string wearSdkStr = GetProperty("ro.cw_build.wear_sdk.version", "");
1652 static int wearSdkInt = GetIntProperty("ro.cw_build.wear_sdk.version", 0);
1653 static string buildChars = GetProperty("ro.build.characteristics", "");
1654 static vector<string> v = Tokenize(buildChars, ",");
Maciej Żenczykowski6e6b2092024-06-24 23:57:41 +00001655 static bool watch = (std::find(v.begin(), v.end(), "watch") != v.end());
1656 static bool wear = (wearSdkInt > 0) || watch;
1657 static bool logged = false;
1658 if (!logged) {
1659 logged = true;
1660 ALOGI("isWear(ro.cw_build.wear_sdk.version=%d[%s] ro.build.characteristics='%s'): %s",
1661 wearSdkInt, wearSdkStr.c_str(), buildChars.c_str(), wear ? "true" : "false");
1662 }
1663 return wear;
1664}
1665
Motomu Utsumi712088d2025-03-18 14:52:02 +09001666static int libbpfPrint(enum libbpf_print_level lvl, const char *const formatStr,
1667 va_list argList) {
Motomu Utsumia84eb0b2025-07-23 13:26:19 +09001668#ifndef NETBPFLOAD_VERBOSE_LOG
1669 if (lvl != LIBBPF_WARN) return 0;
1670#endif
Motomu Utsumi712088d2025-03-18 14:52:02 +09001671 int32_t prio;
1672 switch (lvl) {
1673 case LIBBPF_WARN:
1674 prio = ANDROID_LOG_WARN;
1675 break;
1676 case LIBBPF_INFO:
1677 prio = ANDROID_LOG_INFO;
1678 break;
1679 case LIBBPF_DEBUG:
1680 prio = ANDROID_LOG_DEBUG;
1681 break;
1682 }
Motomu Utsumi47fcb862025-07-28 11:19:54 +09001683 if (!formatStr) {
1684 LOG_PRI(prio, LOG_TAG, "libbpf (null format string)");
1685 return 0;
1686 }
1687
1688 // Print each line to avoid being truncated.
1689 char *s = NULL;
1690 int ret = vasprintf(&s, formatStr, argList);
1691 if (ret == -1) {
1692 LOG_PRI(prio, LOG_TAG, "libbpf (format failure)");
1693 return 0;
1694 }
Motomu Utsumi712088d2025-03-18 14:52:02 +09001695 int len = strlen(s);
1696 if (len && s[len - 1] == '\n')
1697 s[len - 1] = 0;
Motomu Utsumi47fcb862025-07-28 11:19:54 +09001698 vector<string> lines = Split(s, "\n");
1699 for (const auto& line : lines) LOG_PRI(prio, LOG_TAG, "%s", line.c_str());
Motomu Utsumi712088d2025-03-18 14:52:02 +09001700 free(s);
1701 return 0;
1702}
1703
Maciej Żenczykowski6d151ef2024-04-30 23:55:57 -07001704static int doLoad(char** argv, char * const envp[]) {
Maciej Żenczykowski0b477492025-03-04 22:12:42 -08001705 if (!isAtLeastS) {
1706 ALOGE("Impossible - not reachable on Android <S.");
1707 // for safety, we don't fail, this is a just-in-case workaround
1708 // for any possible busted 'optimized' start everything vendor init hacks on R
1709 return 0;
1710 }
Motomu Utsumi712088d2025-03-18 14:52:02 +09001711 libbpf_set_print(libbpfPrint);
Maciej Żenczykowski0b477492025-03-04 22:12:42 -08001712
Maciej Żenczykowski15f97312024-06-13 14:11:28 -07001713 const bool runningAsRoot = !getuid(); // true iff U QPR3 or V+
Maciej Żenczykowski7b95d992024-06-13 18:18:11 -07001714
Maciej Żenczykowskidb9171f2025-01-14 16:22:46 -08001715 const int first_api_level = GetIntProperty("ro.board.first_api_level", api_level);
Maciej Żenczykowski1c2187a2024-09-03 16:03:45 -07001716
Maciej Żenczykowski03ef12c2024-02-10 21:34:22 +00001717 // last in U QPR2 beta1
1718 const bool has_platform_bpfloader_rc = exists("/system/etc/init/bpfloader.rc");
1719 // first in U QPR2 beta~2
1720 const bool has_platform_netbpfload_rc = exists("/system/etc/init/netbpfload.rc");
1721
Maciej Żenczykowski62956142024-06-13 15:32:57 -07001722 // Version of Network BpfLoader depends on the Android OS version
Maciej Żenczykowski8c097782025-03-04 13:11:56 -08001723 unsigned int bpfloader_ver = BPFLOADER_MAINLINE_S_VERSION; // [42u]
Maciej Żenczykowski1a3b54f2024-06-13 15:35:46 -07001724 if (isAtLeastT) ++bpfloader_ver; // [43] BPFLOADER_MAINLINE_T_VERSION
1725 if (isAtLeastU) ++bpfloader_ver; // [44] BPFLOADER_MAINLINE_U_VERSION
1726 if (runningAsRoot) ++bpfloader_ver; // [45] BPFLOADER_MAINLINE_U_QPR3_VERSION
1727 if (isAtLeastV) ++bpfloader_ver; // [46] BPFLOADER_MAINLINE_V_VERSION
Maciej Żenczykowski98975122025-01-14 14:57:24 -08001728 if (isAtLeast25Q2) ++bpfloader_ver; // [47] BPFLOADER_MAINLINE_25Q2_VERSION
Maciej Żenczykowskic5b9f5e2025-05-06 02:35:27 -07001729 if (isAtLeast25Q3) ++bpfloader_ver; // [48] BPFLOADER_MAINLINE_25Q3_VERSION
1730 if (isAtLeast25Q4) ++bpfloader_ver; // [49] BPFLOADER_MAINLINE_25Q4_VERSION
1731 if (isAtLeast26Q1) ++bpfloader_ver; // [50] BPFLOADER_MAINLINE_26Q1_VERSION
1732 if (isAtLeast26Q2) ++bpfloader_ver; // [51] BPFLOADER_MAINLINE_26Q2_VERSION
Maciej Żenczykowski62956142024-06-13 15:32:57 -07001733
Motomu Utsumia7693582025-02-05 17:40:08 +09001734 ALOGI("NetBpfLoad v0.%u (%s) api:%d/%d kver:%07x (%s) libbpf: v%u.%u "
1735 "uid:%d rc:%d%d",
Maciej Żenczykowskidb9171f2025-01-14 16:22:46 -08001736 bpfloader_ver, argv[0], android_get_device_api_level(), api_level,
Motomu Utsumia7693582025-02-05 17:40:08 +09001737 kernelVersion(), describeArch(), libbpf_major_version(),
1738 libbpf_minor_version(), getuid(), has_platform_bpfloader_rc,
1739 has_platform_netbpfload_rc);
Maciej Żenczykowski041be522023-10-23 23:34:52 -07001740
Maciej Żenczykowski03ef12c2024-02-10 21:34:22 +00001741 if (!has_platform_bpfloader_rc && !has_platform_netbpfload_rc) {
1742 ALOGE("Unable to find platform's bpfloader & netbpfload init scripts.");
1743 return 1;
1744 }
1745
1746 if (has_platform_bpfloader_rc && has_platform_netbpfload_rc) {
1747 ALOGE("Platform has *both* bpfloader & netbpfload init scripts.");
1748 return 1;
1749 }
1750
Maciej Żenczykowskib60599b2024-02-09 12:30:52 -08001751 logTetheringApexVersion();
1752
Maciej Żenczykowskic834fdb2024-06-02 22:24:01 +00001753 // both S and T require kernel 4.9 (and eBpf support)
Maciej Żenczykowski0b477492025-03-04 22:12:42 -08001754 if (!isAtLeastKernelVersion(4, 9, 0)) {
1755 ALOGE("Android S & T require kernel 4.9.");
Maciej Żenczykowski041be522023-10-23 23:34:52 -07001756 return 1;
1757 }
1758
Maciej Żenczykowskic834fdb2024-06-02 22:24:01 +00001759 // U bumps the kernel requirement up to 4.14
Maciej Żenczykowski75c2def2024-04-25 14:19:14 -07001760 if (isAtLeastU && !isAtLeastKernelVersion(4, 14, 0)) {
Maciej Żenczykowski041be522023-10-23 23:34:52 -07001761 ALOGE("Android U requires kernel 4.14.");
1762 return 1;
1763 }
1764
Maciej Żenczykowskic834fdb2024-06-02 22:24:01 +00001765 // V bumps the kernel requirement up to 4.19
1766 // see also: //system/netd/tests/kernel_test.cpp TestKernel419
Maciej Żenczykowski75c2def2024-04-25 14:19:14 -07001767 if (isAtLeastV && !isAtLeastKernelVersion(4, 19, 0)) {
Maciej Żenczykowski041be522023-10-23 23:34:52 -07001768 ALOGE("Android V requires kernel 4.19.");
Maciej Żenczykowski60c159f2023-10-02 14:54:48 -07001769 return 1;
1770 }
1771
Maciej Żenczykowskidb9171f2025-01-14 16:22:46 -08001772 // 25Q2 bumps the kernel requirement up to 5.4
Maciej Żenczykowski76f66b62024-09-27 02:46:00 +00001773 // see also: //system/netd/tests/kernel_test.cpp TestKernel54
Maciej Żenczykowski98975122025-01-14 14:57:24 -08001774 if (isAtLeast25Q2 && !isAtLeastKernelVersion(5, 4, 0)) {
1775 ALOGE("Android 25Q2 requires kernel 5.4.");
Maciej Żenczykowski76f66b62024-09-27 02:46:00 +00001776 return 1;
1777 }
1778
Maciej Żenczykowskiec1115e2025-05-06 04:51:09 -07001779 // 25Q4 bumps the kernel requirement up to 5.10
1780 // see also: //system/netd/tests/kernel_test.cpp TestKernel510
1781 if (isAtLeast25Q4 && !isAtLeastKernelVersion(5, 10, 0)) {
1782 ALOGE("Android 25Q4 requires kernel 5.10.");
1783 return 1;
1784 }
1785
Maciej Żenczykowskic834fdb2024-06-02 22:24:01 +00001786 // Technically already required by U, but only enforce on V+
1787 // see also: //system/netd/tests/kernel_test.cpp TestKernel64Bit
1788 if (isAtLeastV && isKernel32Bit() && isAtLeastKernelVersion(5, 16, 0)) {
1789 ALOGE("Android V+ platform with 32 bit kernel version >= 5.16.0 is unsupported");
1790 if (!isTV()) return 1;
1791 }
1792
Maciej Żenczykowski127715a2025-02-10 21:52:01 -08001793 if (isKernel32Bit() && isAtLeast25Q2) {
1794 ALOGE("Android 25Q2 requires 64 bit kernel.");
1795 return 1;
1796 }
1797
Maciej Żenczykowski9b6a9942024-09-03 16:08:35 -07001798 // 6.6 is highest version supported by Android V, so this is effectively W+ (sdk=36+)
1799 if (isKernel32Bit() && isAtLeastKernelVersion(6, 7, 0)) {
1800 ALOGE("Android platform with 32 bit kernel version >= 6.7.0 is unsupported");
1801 return 1;
1802 }
1803
Maciej Żenczykowskic834fdb2024-06-02 22:24:01 +00001804 // Various known ABI layout issues, particularly wrt. bpf and ipsec/xfrm.
1805 if (isAtLeastV && isKernel32Bit() && isX86()) {
Maciej Żenczykowski7f6a4262024-02-17 00:42:42 +00001806 ALOGE("Android V requires X86 kernel to be 64-bit.");
Maciej Żenczykowski68eab892024-05-24 03:17:59 -07001807 if (!isTV()) return 1;
Maciej Żenczykowski7f6a4262024-02-17 00:42:42 +00001808 }
1809
Maciej Żenczykowskic982a4b2024-04-25 23:04:09 -07001810 if (isAtLeastV) {
1811 bool bad = false;
1812
1813 if (!isLtsKernel()) {
Maciej Żenczykowski76f66b62024-09-27 02:46:00 +00001814 ALOGW("Android V+ only supports LTS kernels.");
Maciej Żenczykowskic982a4b2024-04-25 23:04:09 -07001815 bad = true;
1816 }
1817
1818#define REQUIRE(maj, min, sub) \
1819 if (isKernelVersion(maj, min) && !isAtLeastKernelVersion(maj, min, sub)) { \
Maciej Żenczykowski76f66b62024-09-27 02:46:00 +00001820 ALOGW("Android V+ requires %d.%d kernel to be %d.%d.%d+.", maj, min, maj, min, sub); \
Maciej Żenczykowskic982a4b2024-04-25 23:04:09 -07001821 bad = true; \
1822 }
1823
1824 REQUIRE(4, 19, 236)
1825 REQUIRE(5, 4, 186)
1826 REQUIRE(5, 10, 199)
1827 REQUIRE(5, 15, 136)
1828 REQUIRE(6, 1, 57)
1829 REQUIRE(6, 6, 0)
Maciej Żenczykowski06f38e32024-12-11 07:12:59 -08001830 REQUIRE(6, 12, 0)
Maciej Żenczykowskic982a4b2024-04-25 23:04:09 -07001831
1832#undef REQUIRE
1833
Maciej Żenczykowski4a0838c2024-06-14 20:22:20 +00001834 if (bad) {
Maciej Żenczykowskic982a4b2024-04-25 23:04:09 -07001835 ALOGE("Unsupported kernel version (%07x).", kernelVersion());
1836 }
1837 }
1838
Maciej Żenczykowski726b58f2024-09-03 15:42:46 -07001839 /* Android 14/U should only launch on 64-bit kernels
1840 * T launches on 5.10/5.15
1841 * U launches on 5.15/6.1
1842 * So >=5.16 implies isKernel64Bit()
1843 *
1844 * We thus added a test to V VTS which requires 5.16+ devices to use 64-bit kernels.
1845 *
1846 * Starting with Android V, which is the first to support a post 6.1 Linux Kernel,
1847 * we also require 64-bit userspace.
1848 *
1849 * There are various known issues with 32-bit userspace talking to various
1850 * kernel interfaces (especially CAP_NET_ADMIN ones) on a 64-bit kernel.
1851 * Some of these have userspace or kernel workarounds/hacks.
1852 * Some of them don't...
1853 * We're going to be removing the hacks.
1854 * (for example "ANDROID: xfrm: remove in_compat_syscall() checks").
1855 * Note: this check/enforcement only applies to *system* userspace code,
1856 * it does not affect unprivileged apps, the 32-on-64 compatibility
1857 * problems are AFAIK limited to various CAP_NET_ADMIN protected interfaces.
1858 *
1859 * Additionally the 32-bit kernel jit support is poor,
1860 * and 32-bit userspace on 64-bit kernel bpf ringbuffer compatibility is broken.
Lorenzo Colittid95c0c62024-12-04 15:16:15 +09001861 * Note, however, that TV and Wear devices will continue to support 32-bit userspace
1862 * on ARM64.
Maciej Żenczykowski726b58f2024-09-03 15:42:46 -07001863 */
Maciej Żenczykowski75c2def2024-04-25 14:19:14 -07001864 if (isUserspace32bit() && isAtLeastKernelVersion(6, 2, 0)) {
Maciej Żenczykowski1c2187a2024-09-03 16:03:45 -07001865 // Stuff won't work reliably, but...
Lorenzo Colittid95c0c62024-12-04 15:16:15 +09001866 if (isArm() && (isTV() || isWear())) {
1867 // exempt Arm TV or Wear devices (arm32 ABI is far less problematic than x86-32)
1868 ALOGW("[Arm TV/Wear] 32-bit userspace unsupported on 6.2+ kernels.");
Maciej Żenczykowski73238632025-02-24 14:50:09 -08001869 } else if (first_api_level <= 33 /*T*/ && isArm()) {
Maciej Żenczykowski1c2187a2024-09-03 16:03:45 -07001870 // also exempt Arm devices upgrading with major kernel rev from T-
1871 // might possibly be better for them to run with a newer kernel...
1872 ALOGW("[Arm KernelUpRev] 32-bit userspace unsupported on 6.2+ kernels.");
1873 } else if (isArm()) {
1874 ALOGE("[Arm] 64-bit userspace required on 6.2+ kernels (%d).", first_api_level);
1875 return 1;
1876 } else { // x86 since RiscV cannot be 32-bit
1877 ALOGE("[x86] 64-bit userspace required on 6.2+ kernels.");
1878 return 1;
1879 }
Maciej Żenczykowski60c159f2023-10-02 14:54:48 -07001880 }
1881
Maciej Żenczykowski1da19452025-05-16 11:07:51 -07001882 // Linux 6.12 was an LTS released at the end of 2024 (Nov 17),
1883 // and was first supported by Android 16 / 25Q2 (released in June 2025).
1884 // The next Linux LTS should be released near the end of 2025,
1885 // and will likely be 6.18.
1886 // Since officially Android only supports LTS, 6.13+ really means 6.18+,
1887 // and won't be supported before 2026, most likely Android 17 / 26Q2.
1888 // 6.13+ (implying 26Q2+) requires 64-bit userspace.
1889 if (isUserspace32bit() && isAtLeastKernelVersion(6, 13, 0)) {
1890 // due to previous check only reachable on Arm && (<=T kernel uprev || TV || Wear)
1891 ALOGE("64-bit userspace required on 6.13+ kernels.");
1892 return 1;
1893 }
1894
Maciej Żenczykowski25e26222025-03-20 23:25:39 -07001895 if (isAtLeast25Q2) {
1896 FILE * f = fopen("/system/etc/init/netbpfload.rc", "re");
1897 if (!f) {
1898 ALOGE("failure opening /system/etc/init/netbpfload.rc");
1899 return 1;
1900 }
1901 int y = -1, q = -1, a = -1, b = -1, c = -1;
1902 int v = fscanf(f, "# %d %d %d %d %d #", &y, &q, &a, &b, &c);
1903 ALOGI("detected %d of 5: %dQ%d api:%d.%d.%d", v, y, q, a, b, c);
1904 fclose(f);
Maciej Żenczykowskicb555722025-05-01 06:12:00 -07001905 if (v != 5) return 1;
1906 if (y < 2025 || y > 2099) return 1;
1907 if (q < 1 || q > 4) return 1;
1908 if (a < 36) return 1;
1909 if (b < 0 || b > 4) return 1;
1910 if (c < 0) return 1;
Maciej Żenczykowski25e26222025-03-20 23:25:39 -07001911 }
1912
Maciej Żenczykowski60c159f2023-10-02 14:54:48 -07001913 // Ensure we can determine the Android build type.
Maciej Żenczykowski75c2def2024-04-25 14:19:14 -07001914 if (!isEng() && !isUser() && !isUserdebug()) {
Maciej Żenczykowski60c159f2023-10-02 14:54:48 -07001915 ALOGE("Failed to determine the build type: got %s, want 'eng', 'user', or 'userdebug'",
Maciej Żenczykowski75c2def2024-04-25 14:19:14 -07001916 getBuildType().c_str());
Maciej Żenczykowski60c159f2023-10-02 14:54:48 -07001917 return 1;
1918 }
1919
Maciej Żenczykowski48e476b2024-06-13 14:06:49 -07001920 if (runningAsRoot) {
1921 // Note: writing this proc file requires being root (always the case on V+)
1922
Maciej Żenczykowskif33f1282023-10-24 04:41:54 -07001923 // Linux 5.16-rc1 changed the default to 2 (disabled but changeable),
1924 // but we need 0 (enabled)
1925 // (this writeFile is known to fail on at least 4.19, but always defaults to 0 on
1926 // pre-5.13, on 5.13+ it depends on CONFIG_BPF_UNPRIV_DEFAULT_OFF)
Maciej Żenczykowskic9b0a832025-07-22 12:30:32 -07001927 if (writeFile("/proc/sys/kernel/unprivileged_bpf_disabled", "0\n") &&
Maciej Żenczykowski75c2def2024-04-25 14:19:14 -07001928 isAtLeastKernelVersion(5, 13, 0)) return 1;
Maciej Żenczykowski732a1412024-03-14 00:17:18 -07001929 }
Maciej Żenczykowski60c159f2023-10-02 14:54:48 -07001930
Maciej Żenczykowski732a1412024-03-14 00:17:18 -07001931 if (isAtLeastU) {
Maciej Żenczykowski48e476b2024-06-13 14:06:49 -07001932 // Note: writing these proc files requires CAP_NET_ADMIN
1933 // and sepolicy which is only present on U+,
1934 // on Android T and earlier versions they're written from the 'load_bpf_programs'
1935 // trigger (ie. by init itself) instead.
1936
Maciej Żenczykowskif33f1282023-10-24 04:41:54 -07001937 // Enable the eBPF JIT -- but do note that on 64-bit kernels it is likely
1938 // already force enabled by the kernel config option BPF_JIT_ALWAYS_ON.
1939 // (Note: this (open) will fail with ENOENT 'No such file or directory' if
1940 // kernel does not have CONFIG_BPF_JIT=y)
1941 // BPF_JIT is required by R VINTF (which means 4.14/4.19/5.4 kernels),
1942 // but 4.14/4.19 were released with P & Q, and only 5.4 is new in R+.
Maciej Żenczykowskic9b0a832025-07-22 12:30:32 -07001943 if (writeFile("/proc/sys/net/core/bpf_jit_enable", "1\n")) return 1;
Maciej Żenczykowski60c159f2023-10-02 14:54:48 -07001944
Maciej Żenczykowskif33f1282023-10-24 04:41:54 -07001945 // Enable JIT kallsyms export for privileged users only
1946 // (Note: this (open) will fail with ENOENT 'No such file or directory' if
1947 // kernel does not have CONFIG_HAVE_EBPF_JIT=y)
Maciej Żenczykowskic9b0a832025-07-22 12:30:32 -07001948 if (writeFile("/proc/sys/net/core/bpf_jit_kallsyms", "1\n")) return 1;
Maciej Żenczykowskif33f1282023-10-24 04:41:54 -07001949 }
Maciej Żenczykowski60c159f2023-10-02 14:54:48 -07001950
Maciej Żenczykowski18523cb2025-06-07 05:59:44 -07001951 if (runningAsRoot) { // implies U QPR3+ and kernel 4.14+
1952 // There should not be any programs or maps yet
1953 errno = 0;
1954 uint32_t progId = bpfGetNextProgId(0); // expect 0 with errno == ENOENT
1955 if (progId || errno != ENOENT) {
1956 ALOGE("bpfGetNextProgId(zero) returned %u (errno %d)", progId, errno);
1957 return 1;
1958 }
1959 errno = 0;
1960 uint32_t mapId = bpfGetNextMapId(0); // expect 0 with errno == ENOENT
1961 if (mapId || errno != ENOENT) {
1962 ALOGE("bpfGetNextMapId(zero) returned %u (errno %d)", mapId, errno);
1963 return 1;
1964 }
1965 } else if (isAtLeastKernelVersion(4, 14, 0)) { // implies S through U QPR2
1966 // bpfGetNext{Prog,Map}Id require 4.14+
1967 // furthermore since we're not running as root, we're not the initial
1968 // platform bpfloader, so there may already be some maps & programs.
1969 uint32_t mapId = 0;
1970 while (true) {
1971 errno = 0;
1972 uint32_t next = bpfGetNextMapId(mapId);
1973 if (!next && errno == ENOENT) break;
1974 if (next <= mapId) {
1975 ALOGE("bpfGetNextMapId(%u) returned %u errno %d", mapId, next, errno);
1976 return 1;
1977 }
1978 mapId = next;
1979 }
1980 // mapId is now the last map id, creating a new map should change that
1981 unique_fd map(createMap(BPF_MAP_TYPE_ARRAY, sizeof(int), sizeof(int), 1, 0));
1982 errno = 0;
1983 uint32_t next = bpfGetNextMapId(mapId);
1984 if (next <= mapId) {
1985 // We should fail here on Xiaomi S 4.14.180 due to kernel uapi bug,
1986 // which causes bpfGetNextMapId to behave as bpfGetNextProgId,
1987 // and thus it should return 0 with errno == ENOENT.
1988 ALOGE("bpfGetNextMapId(final %d) returned %d errno %d", mapId, next, errno);
Maciej Żenczykowskif7eb2bf2025-06-10 01:56:48 -07001989 if (next || errno != ENOENT) return 1;
1990 if (isAtLeastT || isAtLeastKernelVersion(4, 20, 0)) return 1;
1991 // implies Android S with 4.14 or 4.19 kernel
1992 ALOGW("Enabling bpfCmdFixupIsNeeded.");
1993 bpfCmdFixupIsNeeded = true;
Maciej Żenczykowski18523cb2025-06-07 05:59:44 -07001994 }
1995 } else { // implies S/T with 4.9 kernel
1996 // nothing we can do.
1997 }
1998
Maciej Żenczykowski60c159f2023-10-02 14:54:48 -07001999 // Create all the pin subdirectories
Maciej Żenczykowski08641532025-07-30 15:22:07 -07002000 // (this must be done first to allow create_location and pin_subdir functionality,
Maciej Żenczykowski60c159f2023-10-02 14:54:48 -07002001 // which could otherwise fail with ENOENT during object pinning or renaming,
2002 // due to ordering issues)
Maciej Żenczykowski57de4bf2025-05-12 17:52:04 -07002003 if (createDir("/sys/fs/bpf/tethering")) return 1;
2004 // This is technically T+ but S also needs it for the 'mainline_done' file.
2005 if (createDir("/sys/fs/bpf/netd_shared")) return 1;
Maciej Żenczykowski60c159f2023-10-02 14:54:48 -07002006
Maciej Żenczykowski0b477492025-03-04 22:12:42 -08002007 if (isAtLeastT) {
Maciej Żenczykowski57de4bf2025-05-12 17:52:04 -07002008 if (createDir("/sys/fs/bpf/netd_readonly")) return 1;
2009 if (createDir("/sys/fs/bpf/net_shared")) return 1;
2010 if (createDir("/sys/fs/bpf/net_private")) return 1;
2011
2012 // This one is primarily meant for triggering genfscon rules.
2013 if (createDir("/sys/fs/bpf/loader")) return 1;
Maciej Żenczykowski0b477492025-03-04 22:12:42 -08002014 }
Maciej Żenczykowskia9209da2024-02-29 02:01:20 +00002015
Maciej Żenczykowski60c159f2023-10-02 14:54:48 -07002016 // Load all ELF objects, create programs and maps, and pin them
Maciej Żenczykowski82ce2ca2025-05-14 14:49:28 -07002017 if (loadAllObjects(bpfloader_ver)) {
2018 ALOGE("=== CRITICAL FAILURE LOADING BPF PROGRAMS ===");
2019 ALOGE("If this triggers reliably, you're probably missing kernel options or patches.");
2020 ALOGE("If this triggers randomly, you might be hitting some memory allocation "
2021 "problems or startup script race.");
2022 ALOGE("--- DO NOT EXPECT SYSTEM TO BOOT SUCCESSFULLY ---");
2023 sleep(20);
2024 return 2;
Maciej Żenczykowski60c159f2023-10-02 14:54:48 -07002025 }
2026
Maciej Żenczykowski83d5ad12025-06-17 00:48:44 -07002027 {
2028 // Create a trivial bpf map: a two element array [int->int]
2029 unique_fd map(createMap(BPF_MAP_TYPE_ARRAY, sizeof(int), sizeof(int), 2, 0));
2030
2031 int zero = 0;
2032 int kernel_bugs = bpfCmdFixupIsNeeded;
2033 if (writeToMapEntry(map, &zero, &kernel_bugs, BPF_ANY)) {
2034 ALOGE("Failure to write into index 0 of kernel bugs array.");
2035 return 1;
2036 }
2037
2038 int one = 1;
2039 int value = 123;
2040 if (writeToMapEntry(map, &one, &value, BPF_ANY)) {
2041 ALOGE("Critical kernel bug - failure to write into index 1 of 2 element bpf map array.");
2042 if (isAtLeastT) return 1;
2043 }
2044
2045 int ret = bpfFdPin(map, "/sys/fs/bpf/tethering/map_kernel_bugs");
2046 if (ret) {
2047 const int err = errno;
2048 ALOGE("pin -> %d [%d:%s]", ret, err, strerror(err));
2049 return -err;
2050 }
Maciej Żenczykowski60c159f2023-10-02 14:54:48 -07002051 }
2052
Maciej Żenczykowski15f97312024-06-13 14:11:28 -07002053 // leave a flag that we're done
Maciej Żenczykowski57de4bf2025-05-12 17:52:04 -07002054 if (createDir("/sys/fs/bpf/netd_shared/mainline_done")) return 1;
Maciej Żenczykowski58c18222023-10-20 14:40:16 -07002055
Maciej Żenczykowski15f97312024-06-13 14:11:28 -07002056 // platform bpfloader will only succeed when run as root
2057 if (!runningAsRoot) {
2058 // unreachable on U QPR3+ which always runs netbpfload as root
2059
2060 ALOGI("mainline done, no need to transfer control to platform bpf loader.");
2061 return 0;
Maciej Żenczykowski732a1412024-03-14 00:17:18 -07002062 }
2063
Maciej Żenczykowski15f97312024-06-13 14:11:28 -07002064 // unreachable before U QPR3
Yu-Ting Tsengcb19e1b2024-12-10 14:55:04 -08002065 if (exists(uprobestatsBpfLoader)) {
Yu-Ting Tseng9b15fa02024-10-28 11:16:35 -07002066 ALOGI("done, transferring control to uprobestatsbpfload.");
2067 const char *args[] = {
2068 uprobestatsBpfLoader,
2069 NULL,
2070 };
2071 execve(args[0], (char **)args, envp);
Yu-Ting Tsengcb19e1b2024-12-10 14:55:04 -08002072 ALOGI("unable to execute uprobestatsbpfload, transferring control to "
2073 "platform bpfloader.");
Yu-Ting Tseng9b15fa02024-10-28 11:16:35 -07002074 }
Maciej Żenczykowski15f97312024-06-13 14:11:28 -07002075
2076 // platform BpfLoader *needs* to run as root
2077 const char * args[] = { platformBpfLoader, NULL, };
2078 execve(args[0], (char**)args, envp);
2079 ALOGE("FATAL: execve('%s'): %d[%s]", platformBpfLoader, errno, strerror(errno));
2080 return 1;
Maciej Żenczykowski60c159f2023-10-02 14:54:48 -07002081}
Maciej Żenczykowski75c2def2024-04-25 14:19:14 -07002082
2083} // namespace bpf
2084} // namespace android
2085
Maciej Żenczykowski6d151ef2024-04-30 23:55:57 -07002086int main(int argc, char** argv, char * const envp[]) {
Maciej Żenczykowski0b477492025-03-04 22:12:42 -08002087 if (android::bpf::isAtLeastT) {
2088 InitLogging(argv, &KernelLogger);
2089 } else {
2090 // S lacks the sepolicy to make non-root uid KernelLogger viable
2091 InitLogging(argv);
2092 }
Maciej Żenczykowski6d151ef2024-04-30 23:55:57 -07002093
2094 if (argc == 2 && !strcmp(argv[1], "done")) {
2095 // we're being re-exec'ed from platform bpfloader to 'finalize' things
Maciej Żenczykowski8a767282024-09-04 10:56:55 -07002096 if (!SetProperty("bpf.progs_loaded", "1")) {
Maciej Żenczykowski6d151ef2024-04-30 23:55:57 -07002097 ALOGE("Failed to set bpf.progs_loaded property to 1.");
2098 return 125;
2099 }
Maciej Żenczykowski66f16292024-05-06 23:52:33 -07002100 ALOGI("success.");
Maciej Żenczykowski6d151ef2024-04-30 23:55:57 -07002101 return 0;
2102 }
2103
2104 return android::bpf::doLoad(argv, envp);
Maciej Żenczykowski75c2def2024-04-25 14:19:14 -07002105}