blob: 6814f9471847fd0821b2fdc954bbf02fcdaf0c02 [file] [log] [blame]
Chenbo Feng75b410b2018-10-10 15:01:19 -07001/*
2 * Copyright (C) 2017 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef BPF_BPFUTILS_H
18#define BPF_BPFUTILS_H
19
20#include <linux/bpf.h>
21#include <linux/if_ether.h>
Chenbo Feng75b410b2018-10-10 15:01:19 -070022#include <linux/unistd.h>
23#include <net/if.h>
24#include <stdlib.h>
25#include <string.h>
26#include <sys/socket.h>
27
Steven Morelande7cd2a72020-01-10 17:49:35 -080028#include <string>
29
Chenbo Feng75b410b2018-10-10 15:01:19 -070030#include "android-base/unique_fd.h"
Chenbo Feng75b410b2018-10-10 15:01:19 -070031
Chenbo Feng75b410b2018-10-10 15:01:19 -070032#define ptr_to_u64(x) ((uint64_t)(uintptr_t)(x))
Chenbo Feng0a1a9a12019-04-09 12:05:04 -070033
Chenbo Feng75b410b2018-10-10 15:01:19 -070034namespace android {
35namespace bpf {
36
Chenbo Feng79b7e612018-12-11 12:24:23 -080037enum class BpfLevel {
38 // Devices shipped before P or kernel version is lower than 4.9 do not
39 // have eBPF enabled.
40 NONE,
41 // Devices shipped in P with android 4.9 kernel only have the basic eBPF
42 // functionalities such as xt_bpf and cgroup skb filter.
Maciej Żenczykowski8a09a5a2020-02-17 17:54:51 -080043 BASIC_4_9,
Chenbo Feng79b7e612018-12-11 12:24:23 -080044 // For devices that have 4.14 kernel. It supports advanced features like
45 // map_in_map and cgroup socket filter.
Maciej Żenczykowski8a09a5a2020-02-17 17:54:51 -080046 EXTENDED_4_14,
47 EXTENDED_4_19,
48 EXTENDED_5_4,
Chenbo Feng79b7e612018-12-11 12:24:23 -080049};
50
Chenbo Feng75b410b2018-10-10 15:01:19 -070051constexpr const int OVERFLOW_COUNTERSET = 2;
52
53constexpr const uint64_t NONEXISTENT_COOKIE = 0;
54
55constexpr const int MINIMUM_API_REQUIRED = 28;
56
Maciej Żenczykowskib479fd62020-01-16 02:18:11 -080057/* Note: bpf_attr is a union which might have a much larger size then the anonymous struct portion
58 * of it that we are using. The kernel's bpf() system call will perform a strict check to ensure
59 * all unused portions are zero. It will fail with E2BIG if we don't fully zero bpf_attr.
60 */
61
Maciej Żenczykowski06caf872020-02-11 16:42:21 -080062inline int bpf(int cmd, const bpf_attr& attr) {
Maciej Żenczykowskib479fd62020-01-16 02:18:11 -080063 return syscall(__NR_bpf, cmd, &attr, sizeof(attr));
64}
65
Maciej Żenczykowski06caf872020-02-11 16:42:21 -080066inline int createMap(bpf_map_type map_type, uint32_t key_size, uint32_t value_size,
67 uint32_t max_entries, uint32_t map_flags) {
Maciej Żenczykowskib479fd62020-01-16 02:18:11 -080068 return bpf(BPF_MAP_CREATE, {
69 .map_type = map_type,
70 .key_size = key_size,
71 .value_size = value_size,
72 .max_entries = max_entries,
73 .map_flags = map_flags,
74 });
75}
76
Maciej Żenczykowski06caf872020-02-11 16:42:21 -080077inline int writeToMapEntry(const base::unique_fd& map_fd, const void* key, const void* value,
78 uint64_t flags) {
Maciej Żenczykowskib479fd62020-01-16 02:18:11 -080079 return bpf(BPF_MAP_UPDATE_ELEM, {
80 .map_fd = static_cast<__u32>(map_fd.get()),
81 .key = ptr_to_u64(key),
82 .value = ptr_to_u64(value),
83 .flags = flags,
84 });
85}
86
Maciej Żenczykowski06caf872020-02-11 16:42:21 -080087inline int findMapEntry(const base::unique_fd& map_fd, const void* key, void* value) {
Maciej Żenczykowskib479fd62020-01-16 02:18:11 -080088 return bpf(BPF_MAP_LOOKUP_ELEM, {
89 .map_fd = static_cast<__u32>(map_fd.get()),
90 .key = ptr_to_u64(key),
91 .value = ptr_to_u64(value),
92 });
93}
94
Maciej Żenczykowski06caf872020-02-11 16:42:21 -080095inline int deleteMapEntry(const base::unique_fd& map_fd, const void* key) {
Maciej Żenczykowskib479fd62020-01-16 02:18:11 -080096 return bpf(BPF_MAP_DELETE_ELEM, {
97 .map_fd = static_cast<__u32>(map_fd.get()),
98 .key = ptr_to_u64(key),
99 });
100}
101
Maciej Żenczykowski06caf872020-02-11 16:42:21 -0800102inline int getNextMapKey(const base::unique_fd& map_fd, const void* key, void* next_key) {
Maciej Żenczykowskib479fd62020-01-16 02:18:11 -0800103 return bpf(BPF_MAP_GET_NEXT_KEY, {
104 .map_fd = static_cast<__u32>(map_fd.get()),
105 .key = ptr_to_u64(key),
106 .next_key = ptr_to_u64(next_key),
107 });
108}
109
Maciej Żenczykowski06caf872020-02-11 16:42:21 -0800110inline int getFirstMapKey(const base::unique_fd& map_fd, void* firstKey) {
Maciej Żenczykowskib479fd62020-01-16 02:18:11 -0800111 return getNextMapKey(map_fd, NULL, firstKey);
112}
113
Maciej Żenczykowski06caf872020-02-11 16:42:21 -0800114inline int bpfFdPin(const base::unique_fd& map_fd, const char* pathname) {
Maciej Żenczykowskib479fd62020-01-16 02:18:11 -0800115 return bpf(BPF_OBJ_PIN, {
116 .pathname = ptr_to_u64(pathname),
117 .bpf_fd = static_cast<__u32>(map_fd.get()),
118 });
119}
120
Maciej Żenczykowski06caf872020-02-11 16:42:21 -0800121inline int bpfFdGet(const char* pathname, uint32_t flag) {
Maciej Żenczykowskib479fd62020-01-16 02:18:11 -0800122 return bpf(BPF_OBJ_GET, {
123 .pathname = ptr_to_u64(pathname),
124 .file_flags = flag,
125 });
126}
127
Maciej Żenczykowski06caf872020-02-11 16:42:21 -0800128inline int mapRetrieve(const char* pathname, uint32_t flag) {
Maciej Żenczykowskib479fd62020-01-16 02:18:11 -0800129 return bpfFdGet(pathname, flag);
130}
131
Maciej Żenczykowskiaa295c82020-06-16 17:02:48 -0700132inline int mapRetrieveRW(const char* pathname) {
133 return mapRetrieve(pathname, 0);
134}
135
136inline int mapRetrieveRO(const char* pathname) {
137 return mapRetrieve(pathname, BPF_F_RDONLY);
138}
139
140inline int mapRetrieveWO(const char* pathname) {
141 return mapRetrieve(pathname, BPF_F_WRONLY);
142}
143
144inline int retrieveProgram(const char* pathname) {
145 return bpfFdGet(pathname, BPF_F_RDONLY);
146}
147
Maciej Żenczykowski06caf872020-02-11 16:42:21 -0800148inline int attachProgram(bpf_attach_type type, const base::unique_fd& prog_fd,
149 const base::unique_fd& cg_fd) {
Maciej Żenczykowskib479fd62020-01-16 02:18:11 -0800150 return bpf(BPF_PROG_ATTACH, {
Maciej Żenczykowski289742f2020-01-17 19:18:26 -0800151 .target_fd = static_cast<__u32>(cg_fd.get()),
152 .attach_bpf_fd = static_cast<__u32>(prog_fd.get()),
Maciej Żenczykowskib479fd62020-01-16 02:18:11 -0800153 .attach_type = type,
154 });
155}
156
Maciej Żenczykowski06caf872020-02-11 16:42:21 -0800157inline int detachProgram(bpf_attach_type type, const base::unique_fd& cg_fd) {
Maciej Żenczykowskib479fd62020-01-16 02:18:11 -0800158 return bpf(BPF_PROG_DETACH, {
Maciej Żenczykowski289742f2020-01-17 19:18:26 -0800159 .target_fd = static_cast<__u32>(cg_fd.get()),
Maciej Żenczykowskib479fd62020-01-16 02:18:11 -0800160 .attach_type = type,
161 });
162}
163
Chenbo Feng75b410b2018-10-10 15:01:19 -0700164uint64_t getSocketCookie(int sockFd);
Maciej Żenczykowskic3a640d2020-02-11 15:01:21 -0800165int synchronizeKernelRCU();
Chenbo Feng0a1a9a12019-04-09 12:05:04 -0700166int setrlimitForTest();
Maciej Żenczykowski07375e22020-02-19 14:23:59 -0800167unsigned kernelVersion();
Chenbo Feng79b7e612018-12-11 12:24:23 -0800168std::string BpfLevelToString(BpfLevel BpfLevel);
169BpfLevel getBpfSupportLevel();
Maciej Żenczykowskic3a640d2020-02-11 15:01:21 -0800170
Maciej Żenczykowski06caf872020-02-11 16:42:21 -0800171inline bool isBpfSupported() {
Maciej Żenczykowskic3a640d2020-02-11 15:01:21 -0800172 return getBpfSupportLevel() != BpfLevel::NONE;
173}
Chenbo Feng75b410b2018-10-10 15:01:19 -0700174
Chenbo Feng79b7e612018-12-11 12:24:23 -0800175#define SKIP_IF_BPF_NOT_SUPPORTED \
176 do { \
Maciej Żenczykowskic3a640d2020-02-11 15:01:21 -0800177 if (!android::bpf::isBpfSupported()) { \
Chenbo Feng79b7e612018-12-11 12:24:23 -0800178 GTEST_LOG_(INFO) << "This test is skipped since bpf is not available\n"; \
179 return; \
180 } \
181 } while (0)
182
Maciej Żenczykowskic3a640d2020-02-11 15:01:21 -0800183#define SKIP_IF_BPF_SUPPORTED \
184 do { \
185 if (android::bpf::isBpfSupported()) return; \
Chenbo Feng75b410b2018-10-10 15:01:19 -0700186 } while (0)
187
Maciej Żenczykowski8a09a5a2020-02-17 17:54:51 -0800188#define SKIP_IF_EXTENDED_BPF_NOT_SUPPORTED \
189 do { \
190 if (android::bpf::getBpfSupportLevel() < android::bpf::BpfLevel::EXTENDED_4_14) { \
191 GTEST_LOG_(INFO) << "This test is skipped since extended bpf feature" \
192 << "not supported\n"; \
193 return; \
194 } \
Maciej Żenczykowski672b0e72020-02-12 04:15:20 -0800195 } while (0)
196
Chenbo Feng75b410b2018-10-10 15:01:19 -0700197} // namespace bpf
198} // namespace android
199
200#endif