Maciej Żenczykowski | a728a70 | 2021-01-11 19:08:33 -0800 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2021 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | #pragma once |
| 18 | |
Patrick Rohr | 99ace23 | 2024-11-22 10:35:29 -0800 | [diff] [blame^] | 19 | #include <android-base/unique_fd.h> |
Maciej Żenczykowski | 52018c8 | 2024-06-04 16:05:16 +0000 | [diff] [blame] | 20 | #include <stdlib.h> |
| 21 | #include <unistd.h> |
Maciej Żenczykowski | a728a70 | 2021-01-11 19:08:33 -0800 | [diff] [blame] | 22 | #include <linux/bpf.h> |
| 23 | #include <linux/unistd.h> |
Maciej Żenczykowski | 52018c8 | 2024-06-04 16:05:16 +0000 | [diff] [blame] | 24 | #include <sys/file.h> |
| 25 | |
Maciej Żenczykowski | a728a70 | 2021-01-11 19:08:33 -0800 | [diff] [blame] | 26 | |
Maciej Żenczykowski | a728a70 | 2021-01-11 19:08:33 -0800 | [diff] [blame] | 27 | namespace android { |
| 28 | namespace bpf { |
| 29 | |
Patrick Rohr | 99ace23 | 2024-11-22 10:35:29 -0800 | [diff] [blame^] | 30 | using ::android::base::borrowed_fd; |
| 31 | |
Maciej Żenczykowski | be02054 | 2023-06-13 18:47:57 -0700 | [diff] [blame] | 32 | inline uint64_t ptr_to_u64(const void * const x) { |
| 33 | return (uint64_t)(uintptr_t)x; |
| 34 | } |
| 35 | |
Maciej Żenczykowski | a728a70 | 2021-01-11 19:08:33 -0800 | [diff] [blame] | 36 | /* Note: bpf_attr is a union which might have a much larger size then the anonymous struct portion |
| 37 | * of it that we are using. The kernel's bpf() system call will perform a strict check to ensure |
| 38 | * all unused portions are zero. It will fail with E2BIG if we don't fully zero bpf_attr. |
| 39 | */ |
| 40 | |
Maciej Żenczykowski | d8c03fe | 2022-12-16 20:59:00 +0000 | [diff] [blame] | 41 | inline int bpf(enum bpf_cmd cmd, const bpf_attr& attr) { |
Maciej Żenczykowski | a728a70 | 2021-01-11 19:08:33 -0800 | [diff] [blame] | 42 | return syscall(__NR_bpf, cmd, &attr, sizeof(attr)); |
| 43 | } |
| 44 | |
Maciej Żenczykowski | 340e2ff | 2023-10-03 07:25:38 +0000 | [diff] [blame] | 45 | // this version is meant for use with cmd's which mutate the argument |
| 46 | inline int bpf(enum bpf_cmd cmd, bpf_attr *attr) { |
| 47 | return syscall(__NR_bpf, cmd, attr, sizeof(*attr)); |
| 48 | } |
| 49 | |
Maciej Żenczykowski | a728a70 | 2021-01-11 19:08:33 -0800 | [diff] [blame] | 50 | inline int createMap(bpf_map_type map_type, uint32_t key_size, uint32_t value_size, |
| 51 | uint32_t max_entries, uint32_t map_flags) { |
| 52 | return bpf(BPF_MAP_CREATE, { |
| 53 | .map_type = map_type, |
| 54 | .key_size = key_size, |
| 55 | .value_size = value_size, |
| 56 | .max_entries = max_entries, |
| 57 | .map_flags = map_flags, |
| 58 | }); |
| 59 | } |
| 60 | |
Maciej Żenczykowski | c6e4122 | 2023-06-12 22:50:02 -0700 | [diff] [blame] | 61 | // Note: |
| 62 | // 'map_type' must be one of BPF_MAP_TYPE_{ARRAY,HASH}_OF_MAPS |
| 63 | // 'value_size' must be sizeof(u32), ie. 4 |
| 64 | // 'inner_map_fd' is basically a template specifying {map_type, key_size, value_size, max_entries, map_flags} |
| 65 | // of the inner map type (and possibly only key_size/value_size actually matter?). |
| 66 | inline int createOuterMap(bpf_map_type map_type, uint32_t key_size, uint32_t value_size, |
Patrick Rohr | 99ace23 | 2024-11-22 10:35:29 -0800 | [diff] [blame^] | 67 | uint32_t max_entries, uint32_t map_flags, |
| 68 | const borrowed_fd& inner_map_fd) { |
Maciej Żenczykowski | c6e4122 | 2023-06-12 22:50:02 -0700 | [diff] [blame] | 69 | return bpf(BPF_MAP_CREATE, { |
| 70 | .map_type = map_type, |
| 71 | .key_size = key_size, |
| 72 | .value_size = value_size, |
| 73 | .max_entries = max_entries, |
| 74 | .map_flags = map_flags, |
Patrick Rohr | 99ace23 | 2024-11-22 10:35:29 -0800 | [diff] [blame^] | 75 | .inner_map_fd = static_cast<__u32>(inner_map_fd.get()), |
Maciej Żenczykowski | c6e4122 | 2023-06-12 22:50:02 -0700 | [diff] [blame] | 76 | }); |
| 77 | } |
| 78 | |
Patrick Rohr | 99ace23 | 2024-11-22 10:35:29 -0800 | [diff] [blame^] | 79 | inline int writeToMapEntry(const borrowed_fd& map_fd, const void* key, const void* value, |
Maciej Żenczykowski | a728a70 | 2021-01-11 19:08:33 -0800 | [diff] [blame] | 80 | uint64_t flags) { |
| 81 | return bpf(BPF_MAP_UPDATE_ELEM, { |
Patrick Rohr | 99ace23 | 2024-11-22 10:35:29 -0800 | [diff] [blame^] | 82 | .map_fd = static_cast<__u32>(map_fd.get()), |
Maciej Żenczykowski | a728a70 | 2021-01-11 19:08:33 -0800 | [diff] [blame] | 83 | .key = ptr_to_u64(key), |
| 84 | .value = ptr_to_u64(value), |
| 85 | .flags = flags, |
| 86 | }); |
| 87 | } |
| 88 | |
Patrick Rohr | 99ace23 | 2024-11-22 10:35:29 -0800 | [diff] [blame^] | 89 | inline int findMapEntry(const borrowed_fd& map_fd, const void* key, void* value) { |
Maciej Żenczykowski | a728a70 | 2021-01-11 19:08:33 -0800 | [diff] [blame] | 90 | return bpf(BPF_MAP_LOOKUP_ELEM, { |
Patrick Rohr | 99ace23 | 2024-11-22 10:35:29 -0800 | [diff] [blame^] | 91 | .map_fd = static_cast<__u32>(map_fd.get()), |
Maciej Żenczykowski | a728a70 | 2021-01-11 19:08:33 -0800 | [diff] [blame] | 92 | .key = ptr_to_u64(key), |
| 93 | .value = ptr_to_u64(value), |
| 94 | }); |
| 95 | } |
| 96 | |
Patrick Rohr | 99ace23 | 2024-11-22 10:35:29 -0800 | [diff] [blame^] | 97 | inline int deleteMapEntry(const borrowed_fd& map_fd, const void* key) { |
Maciej Żenczykowski | a728a70 | 2021-01-11 19:08:33 -0800 | [diff] [blame] | 98 | return bpf(BPF_MAP_DELETE_ELEM, { |
Patrick Rohr | 99ace23 | 2024-11-22 10:35:29 -0800 | [diff] [blame^] | 99 | .map_fd = static_cast<__u32>(map_fd.get()), |
Maciej Żenczykowski | a728a70 | 2021-01-11 19:08:33 -0800 | [diff] [blame] | 100 | .key = ptr_to_u64(key), |
| 101 | }); |
| 102 | } |
| 103 | |
Patrick Rohr | 99ace23 | 2024-11-22 10:35:29 -0800 | [diff] [blame^] | 104 | inline int getNextMapKey(const borrowed_fd& map_fd, const void* key, void* next_key) { |
Maciej Żenczykowski | a728a70 | 2021-01-11 19:08:33 -0800 | [diff] [blame] | 105 | return bpf(BPF_MAP_GET_NEXT_KEY, { |
Patrick Rohr | 99ace23 | 2024-11-22 10:35:29 -0800 | [diff] [blame^] | 106 | .map_fd = static_cast<__u32>(map_fd.get()), |
Maciej Żenczykowski | a728a70 | 2021-01-11 19:08:33 -0800 | [diff] [blame] | 107 | .key = ptr_to_u64(key), |
| 108 | .next_key = ptr_to_u64(next_key), |
| 109 | }); |
| 110 | } |
| 111 | |
Patrick Rohr | 99ace23 | 2024-11-22 10:35:29 -0800 | [diff] [blame^] | 112 | inline int getFirstMapKey(const borrowed_fd& map_fd, void* firstKey) { |
Maciej Żenczykowski | a728a70 | 2021-01-11 19:08:33 -0800 | [diff] [blame] | 113 | return getNextMapKey(map_fd, NULL, firstKey); |
| 114 | } |
| 115 | |
Patrick Rohr | 99ace23 | 2024-11-22 10:35:29 -0800 | [diff] [blame^] | 116 | inline int bpfFdPin(const borrowed_fd& map_fd, const char* pathname) { |
Maciej Żenczykowski | a728a70 | 2021-01-11 19:08:33 -0800 | [diff] [blame] | 117 | return bpf(BPF_OBJ_PIN, { |
| 118 | .pathname = ptr_to_u64(pathname), |
Patrick Rohr | 99ace23 | 2024-11-22 10:35:29 -0800 | [diff] [blame^] | 119 | .bpf_fd = static_cast<__u32>(map_fd.get()), |
Maciej Żenczykowski | a728a70 | 2021-01-11 19:08:33 -0800 | [diff] [blame] | 120 | }); |
| 121 | } |
| 122 | |
| 123 | inline int bpfFdGet(const char* pathname, uint32_t flag) { |
| 124 | return bpf(BPF_OBJ_GET, { |
| 125 | .pathname = ptr_to_u64(pathname), |
| 126 | .file_flags = flag, |
| 127 | }); |
| 128 | } |
| 129 | |
Patrick Rohr | 99ace23 | 2024-11-22 10:35:29 -0800 | [diff] [blame^] | 130 | int bpfGetFdMapId(const borrowed_fd& map_fd); |
Maciej Żenczykowski | 52018c8 | 2024-06-04 16:05:16 +0000 | [diff] [blame] | 131 | |
| 132 | inline int bpfLock(int fd, short type) { |
Maciej Żenczykowski | 52018c8 | 2024-06-04 16:05:16 +0000 | [diff] [blame] | 133 | if (fd < 0) return fd; // pass any errors straight through |
Maciej Żenczykowski | 04fb386 | 2024-06-15 00:14:16 +0000 | [diff] [blame] | 134 | #ifdef BPF_MAP_LOCKLESS_FOR_TEST |
| 135 | return fd; |
| 136 | #endif |
Maciej Żenczykowski | 52018c8 | 2024-06-04 16:05:16 +0000 | [diff] [blame] | 137 | int mapId = bpfGetFdMapId(fd); |
Maciej Żenczykowski | 4acfa1f | 2024-06-14 14:16:31 -0700 | [diff] [blame] | 138 | int saved_errno = errno; |
Maciej Żenczykowski | 4acfa1f | 2024-06-14 14:16:31 -0700 | [diff] [blame] | 139 | // 4.14+ required to fetch map id, but we don't want to call isAtLeastKernelVersion |
| 140 | if (mapId == -1 && saved_errno == EINVAL) return fd; |
Maciej Żenczykowski | 52018c8 | 2024-06-04 16:05:16 +0000 | [diff] [blame] | 141 | if (mapId <= 0) abort(); // should not be possible |
| 142 | |
| 143 | // on __LP64__ (aka. 64-bit userspace) 'struct flock64' is the same as 'struct flock' |
| 144 | struct flock64 fl = { |
| 145 | .l_type = type, // short: F_{RD,WR,UN}LCK |
| 146 | .l_whence = SEEK_SET, // short: SEEK_{SET,CUR,END} |
| 147 | .l_start = mapId, // off_t: start offset |
| 148 | .l_len = 1, // off_t: number of bytes |
| 149 | }; |
| 150 | |
| 151 | // see: bionic/libc/bionic/fcntl.cpp: iff !__LP64__ this uses fcntl64 |
| 152 | int ret = fcntl(fd, F_OFD_SETLK, &fl); |
| 153 | if (!ret) return fd; // success |
| 154 | close(fd); |
| 155 | return ret; // most likely -1 with errno == EAGAIN, due to already held lock |
| 156 | } |
| 157 | |
Maciej Żenczykowski | 0fff839 | 2024-06-15 02:43:12 -0700 | [diff] [blame] | 158 | inline int mapRetrieveLocklessRW(const char* pathname) { |
| 159 | return bpfFdGet(pathname, 0); |
Maciej Żenczykowski | 52018c8 | 2024-06-04 16:05:16 +0000 | [diff] [blame] | 160 | } |
Maciej Żenczykowski | 0fff839 | 2024-06-15 02:43:12 -0700 | [diff] [blame] | 161 | |
| 162 | inline int mapRetrieveExclusiveRW(const char* pathname) { |
| 163 | return bpfLock(mapRetrieveLocklessRW(pathname), F_WRLCK); |
| 164 | } |
| 165 | |
Maciej Żenczykowski | a728a70 | 2021-01-11 19:08:33 -0800 | [diff] [blame] | 166 | inline int mapRetrieveRW(const char* pathname) { |
Maciej Żenczykowski | 0fff839 | 2024-06-15 02:43:12 -0700 | [diff] [blame] | 167 | return bpfLock(mapRetrieveLocklessRW(pathname), F_RDLCK); |
Maciej Żenczykowski | a728a70 | 2021-01-11 19:08:33 -0800 | [diff] [blame] | 168 | } |
| 169 | |
| 170 | inline int mapRetrieveRO(const char* pathname) { |
Maciej Żenczykowski | dfef229 | 2024-06-04 13:48:36 +0000 | [diff] [blame] | 171 | return bpfFdGet(pathname, BPF_F_RDONLY); |
Maciej Żenczykowski | a728a70 | 2021-01-11 19:08:33 -0800 | [diff] [blame] | 172 | } |
| 173 | |
Maciej Żenczykowski | 52018c8 | 2024-06-04 16:05:16 +0000 | [diff] [blame] | 174 | // WARNING: it's impossible to grab a shared (ie. read) lock on a write-only fd, |
Maciej Żenczykowski | 7eb7d67 | 2024-06-14 13:55:09 -0700 | [diff] [blame] | 175 | // so we instead choose to grab an exclusive (ie. write) lock. |
Maciej Żenczykowski | a728a70 | 2021-01-11 19:08:33 -0800 | [diff] [blame] | 176 | inline int mapRetrieveWO(const char* pathname) { |
Maciej Żenczykowski | 7eb7d67 | 2024-06-14 13:55:09 -0700 | [diff] [blame] | 177 | return bpfLock(bpfFdGet(pathname, BPF_F_WRONLY), F_WRLCK); |
Maciej Żenczykowski | a728a70 | 2021-01-11 19:08:33 -0800 | [diff] [blame] | 178 | } |
| 179 | |
| 180 | inline int retrieveProgram(const char* pathname) { |
| 181 | return bpfFdGet(pathname, BPF_F_RDONLY); |
| 182 | } |
| 183 | |
Maciej Żenczykowski | e950f6d | 2024-04-26 11:52:25 -0700 | [diff] [blame] | 184 | inline bool usableProgram(const char* pathname) { |
| 185 | int fd = retrieveProgram(pathname); |
| 186 | bool ok = (fd >= 0); |
| 187 | if (ok) close(fd); |
| 188 | return ok; |
| 189 | } |
| 190 | |
Patrick Rohr | 99ace23 | 2024-11-22 10:35:29 -0800 | [diff] [blame^] | 191 | inline int attachProgram(bpf_attach_type type, const borrowed_fd& prog_fd, |
| 192 | const borrowed_fd& cg_fd, uint32_t flags = 0) { |
Maciej Żenczykowski | a728a70 | 2021-01-11 19:08:33 -0800 | [diff] [blame] | 193 | return bpf(BPF_PROG_ATTACH, { |
Patrick Rohr | 99ace23 | 2024-11-22 10:35:29 -0800 | [diff] [blame^] | 194 | .target_fd = static_cast<__u32>(cg_fd.get()), |
| 195 | .attach_bpf_fd = static_cast<__u32>(prog_fd.get()), |
Maciej Żenczykowski | a728a70 | 2021-01-11 19:08:33 -0800 | [diff] [blame] | 196 | .attach_type = type, |
KaiWen Zheng | cfe2f2a | 2022-02-08 09:38:50 +0800 | [diff] [blame] | 197 | .attach_flags = flags, |
Maciej Żenczykowski | a728a70 | 2021-01-11 19:08:33 -0800 | [diff] [blame] | 198 | }); |
| 199 | } |
| 200 | |
Patrick Rohr | 99ace23 | 2024-11-22 10:35:29 -0800 | [diff] [blame^] | 201 | inline int detachProgram(bpf_attach_type type, const borrowed_fd& cg_fd) { |
Maciej Żenczykowski | a728a70 | 2021-01-11 19:08:33 -0800 | [diff] [blame] | 202 | return bpf(BPF_PROG_DETACH, { |
Patrick Rohr | 99ace23 | 2024-11-22 10:35:29 -0800 | [diff] [blame^] | 203 | .target_fd = static_cast<__u32>(cg_fd.get()), |
Maciej Żenczykowski | a728a70 | 2021-01-11 19:08:33 -0800 | [diff] [blame] | 204 | .attach_type = type, |
| 205 | }); |
| 206 | } |
| 207 | |
Patrick Rohr | 99ace23 | 2024-11-22 10:35:29 -0800 | [diff] [blame^] | 208 | inline int queryProgram(const borrowed_fd& cg_fd, |
Maciej Żenczykowski | 340e2ff | 2023-10-03 07:25:38 +0000 | [diff] [blame] | 209 | enum bpf_attach_type attach_type, |
| 210 | __u32 query_flags = 0, |
| 211 | __u32 attach_flags = 0) { |
| 212 | int prog_id = -1; // equivalent to an array of one integer. |
| 213 | bpf_attr arg = { |
| 214 | .query = { |
Patrick Rohr | 99ace23 | 2024-11-22 10:35:29 -0800 | [diff] [blame^] | 215 | .target_fd = static_cast<__u32>(cg_fd.get()), |
Maciej Żenczykowski | 340e2ff | 2023-10-03 07:25:38 +0000 | [diff] [blame] | 216 | .attach_type = attach_type, |
| 217 | .query_flags = query_flags, |
| 218 | .attach_flags = attach_flags, |
| 219 | .prog_ids = ptr_to_u64(&prog_id), // pointer to output array |
| 220 | .prog_cnt = 1, // in: space - nr of ints in the array, out: used |
| 221 | } |
| 222 | }; |
| 223 | int v = bpf(BPF_PROG_QUERY, &arg); |
| 224 | if (v) return v; // error case |
| 225 | if (!arg.query.prog_cnt) return 0; // no program, kernel never returns zero id |
| 226 | return prog_id; // return actual id |
| 227 | } |
| 228 | |
Patrick Rohr | 99ace23 | 2024-11-22 10:35:29 -0800 | [diff] [blame^] | 229 | inline int detachSingleProgram(bpf_attach_type type, const borrowed_fd& prog_fd, |
| 230 | const borrowed_fd& cg_fd) { |
KaiWen Zheng | cfe2f2a | 2022-02-08 09:38:50 +0800 | [diff] [blame] | 231 | return bpf(BPF_PROG_DETACH, { |
Patrick Rohr | 99ace23 | 2024-11-22 10:35:29 -0800 | [diff] [blame^] | 232 | .target_fd = static_cast<__u32>(cg_fd.get()), |
| 233 | .attach_bpf_fd = static_cast<__u32>(prog_fd.get()), |
KaiWen Zheng | cfe2f2a | 2022-02-08 09:38:50 +0800 | [diff] [blame] | 234 | .attach_type = type, |
| 235 | }); |
| 236 | } |
| 237 | |
Ryan Zuklie | 2669e24 | 2022-11-30 11:12:41 -0800 | [diff] [blame] | 238 | // Available in 4.12 and later kernels. |
Patrick Rohr | 99ace23 | 2024-11-22 10:35:29 -0800 | [diff] [blame^] | 239 | inline int runProgram(const borrowed_fd& prog_fd, const void* data, |
Ryan Zuklie | 2669e24 | 2022-11-30 11:12:41 -0800 | [diff] [blame] | 240 | const uint32_t data_size) { |
| 241 | return bpf(BPF_PROG_RUN, { |
| 242 | .test = { |
Patrick Rohr | 99ace23 | 2024-11-22 10:35:29 -0800 | [diff] [blame^] | 243 | .prog_fd = static_cast<__u32>(prog_fd.get()), |
Ryan Zuklie | 2669e24 | 2022-11-30 11:12:41 -0800 | [diff] [blame] | 244 | .data_size_in = data_size, |
Maciej Żenczykowski | 325f675 | 2023-09-06 23:50:47 +0000 | [diff] [blame] | 245 | .data_in = ptr_to_u64(data), |
Ryan Zuklie | 2669e24 | 2022-11-30 11:12:41 -0800 | [diff] [blame] | 246 | }, |
| 247 | }); |
| 248 | } |
| 249 | |
Maciej Żenczykowski | 0ce7758 | 2022-06-20 18:11:03 -0700 | [diff] [blame] | 250 | // BPF_OBJ_GET_INFO_BY_FD requires 4.14+ kernel |
| 251 | // |
| 252 | // Note: some fields are only defined in newer kernels (ie. the map_info struct grows |
| 253 | // over time), so we need to check that the field we're interested in is actually |
| 254 | // supported/returned by the running kernel. We do this by checking it is fully |
| 255 | // within the bounds of the struct size as reported by the kernel. |
Maciej Żenczykowski | 008f51e | 2023-06-13 18:27:57 -0700 | [diff] [blame] | 256 | #define DEFINE_BPF_GET_FD(TYPE, NAME, FIELD) \ |
Patrick Rohr | 99ace23 | 2024-11-22 10:35:29 -0800 | [diff] [blame^] | 257 | inline int bpfGetFd ## NAME(const borrowed_fd& fd) { \ |
Maciej Żenczykowski | 008f51e | 2023-06-13 18:27:57 -0700 | [diff] [blame] | 258 | struct bpf_ ## TYPE ## _info info = {}; \ |
Maciej Żenczykowski | 5c5fae7 | 2022-05-25 12:58:31 -0700 | [diff] [blame] | 259 | union bpf_attr attr = { .info = { \ |
Patrick Rohr | 99ace23 | 2024-11-22 10:35:29 -0800 | [diff] [blame^] | 260 | .bpf_fd = static_cast<__u32>(fd.get()), \ |
Maciej Żenczykowski | 008f51e | 2023-06-13 18:27:57 -0700 | [diff] [blame] | 261 | .info_len = sizeof(info), \ |
| 262 | .info = ptr_to_u64(&info), \ |
Maciej Żenczykowski | 5c5fae7 | 2022-05-25 12:58:31 -0700 | [diff] [blame] | 263 | }}; \ |
| 264 | int rv = bpf(BPF_OBJ_GET_INFO_BY_FD, attr); \ |
| 265 | if (rv) return rv; \ |
Maciej Żenczykowski | 008f51e | 2023-06-13 18:27:57 -0700 | [diff] [blame] | 266 | if (attr.info.info_len < offsetof(bpf_ ## TYPE ## _info, FIELD) + sizeof(info.FIELD)) { \ |
Maciej Żenczykowski | 5c5fae7 | 2022-05-25 12:58:31 -0700 | [diff] [blame] | 267 | errno = EOPNOTSUPP; \ |
| 268 | return -1; \ |
| 269 | }; \ |
Maciej Żenczykowski | 008f51e | 2023-06-13 18:27:57 -0700 | [diff] [blame] | 270 | return info.FIELD; \ |
Maciej Żenczykowski | 5c5fae7 | 2022-05-25 12:58:31 -0700 | [diff] [blame] | 271 | } |
| 272 | |
Maciej Żenczykowski | 008f51e | 2023-06-13 18:27:57 -0700 | [diff] [blame] | 273 | // All 7 of these fields are already present in Linux v4.14 (even ACK 4.14-P) |
Maciej Żenczykowski | 5c5fae7 | 2022-05-25 12:58:31 -0700 | [diff] [blame] | 274 | // while BPF_OBJ_GET_INFO_BY_FD is not implemented at all in v4.9 (even ACK 4.9-Q) |
Patrick Rohr | 99ace23 | 2024-11-22 10:35:29 -0800 | [diff] [blame^] | 275 | DEFINE_BPF_GET_FD(map, MapType, type) // int bpfGetFdMapType(const borrowed_fd& map_fd) |
| 276 | DEFINE_BPF_GET_FD(map, MapId, id) // int bpfGetFdMapId(const borrowed_fd& map_fd) |
| 277 | DEFINE_BPF_GET_FD(map, KeySize, key_size) // int bpfGetFdKeySize(const borrowed_fd& map_fd) |
| 278 | DEFINE_BPF_GET_FD(map, ValueSize, value_size) // int bpfGetFdValueSize(const borrowed_fd& map_fd) |
| 279 | DEFINE_BPF_GET_FD(map, MaxEntries, max_entries) // int bpfGetFdMaxEntries(const borrowed_fd& map_fd) |
| 280 | DEFINE_BPF_GET_FD(map, MapFlags, map_flags) // int bpfGetFdMapFlags(const borrowed_fd& map_fd) |
| 281 | DEFINE_BPF_GET_FD(prog, ProgId, id) // int bpfGetFdProgId(const borrowed_fd& prog_fd) |
Maciej Żenczykowski | 5c5fae7 | 2022-05-25 12:58:31 -0700 | [diff] [blame] | 282 | |
Maciej Żenczykowski | 008f51e | 2023-06-13 18:27:57 -0700 | [diff] [blame] | 283 | #undef DEFINE_BPF_GET_FD |
Maciej Żenczykowski | 5c5fae7 | 2022-05-25 12:58:31 -0700 | [diff] [blame] | 284 | |
Maciej Żenczykowski | a728a70 | 2021-01-11 19:08:33 -0800 | [diff] [blame] | 285 | } // namespace bpf |
| 286 | } // namespace android |
| 287 | |