blob: be3cc8f109a0d657f35903bdf67ba80312702130 [file] [log] [blame]
Chenbo Feng75b410b2018-10-10 15:01:19 -07001/*
2 * Copyright (C) 2017 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef BPF_BPFUTILS_H
18#define BPF_BPFUTILS_H
19
20#include <linux/bpf.h>
21#include <linux/if_ether.h>
Chenbo Feng75b410b2018-10-10 15:01:19 -070022#include <linux/unistd.h>
23#include <net/if.h>
24#include <stdlib.h>
25#include <string.h>
26#include <sys/socket.h>
27
Steven Morelande7cd2a72020-01-10 17:49:35 -080028#include <string>
29
Chenbo Feng75b410b2018-10-10 15:01:19 -070030#include "android-base/unique_fd.h"
Chenbo Feng75b410b2018-10-10 15:01:19 -070031
Chenbo Feng75b410b2018-10-10 15:01:19 -070032#define ptr_to_u64(x) ((uint64_t)(uintptr_t)(x))
Chenbo Feng0a1a9a12019-04-09 12:05:04 -070033
Chenbo Feng75b410b2018-10-10 15:01:19 -070034namespace android {
35namespace bpf {
36
Chenbo Feng79b7e612018-12-11 12:24:23 -080037enum class BpfLevel {
38 // Devices shipped before P or kernel version is lower than 4.9 do not
39 // have eBPF enabled.
40 NONE,
41 // Devices shipped in P with android 4.9 kernel only have the basic eBPF
42 // functionalities such as xt_bpf and cgroup skb filter.
43 BASIC,
44 // For devices that have 4.14 kernel. It supports advanced features like
45 // map_in_map and cgroup socket filter.
46 EXTENDED,
47};
48
Chenbo Feng75b410b2018-10-10 15:01:19 -070049constexpr const int OVERFLOW_COUNTERSET = 2;
50
51constexpr const uint64_t NONEXISTENT_COOKIE = 0;
52
53constexpr const int MINIMUM_API_REQUIRED = 28;
54
Maciej Żenczykowskib479fd62020-01-16 02:18:11 -080055/* Note: bpf_attr is a union which might have a much larger size then the anonymous struct portion
56 * of it that we are using. The kernel's bpf() system call will perform a strict check to ensure
57 * all unused portions are zero. It will fail with E2BIG if we don't fully zero bpf_attr.
58 */
59
Maciej Żenczykowski06caf872020-02-11 16:42:21 -080060inline int bpf(int cmd, const bpf_attr& attr) {
Maciej Żenczykowskib479fd62020-01-16 02:18:11 -080061 return syscall(__NR_bpf, cmd, &attr, sizeof(attr));
62}
63
Maciej Żenczykowski06caf872020-02-11 16:42:21 -080064inline int createMap(bpf_map_type map_type, uint32_t key_size, uint32_t value_size,
65 uint32_t max_entries, uint32_t map_flags) {
Maciej Żenczykowskib479fd62020-01-16 02:18:11 -080066 return bpf(BPF_MAP_CREATE, {
67 .map_type = map_type,
68 .key_size = key_size,
69 .value_size = value_size,
70 .max_entries = max_entries,
71 .map_flags = map_flags,
72 });
73}
74
Maciej Żenczykowski06caf872020-02-11 16:42:21 -080075inline int writeToMapEntry(const base::unique_fd& map_fd, const void* key, const void* value,
76 uint64_t flags) {
Maciej Żenczykowskib479fd62020-01-16 02:18:11 -080077 return bpf(BPF_MAP_UPDATE_ELEM, {
78 .map_fd = static_cast<__u32>(map_fd.get()),
79 .key = ptr_to_u64(key),
80 .value = ptr_to_u64(value),
81 .flags = flags,
82 });
83}
84
Maciej Żenczykowski06caf872020-02-11 16:42:21 -080085inline int findMapEntry(const base::unique_fd& map_fd, const void* key, void* value) {
Maciej Żenczykowskib479fd62020-01-16 02:18:11 -080086 return bpf(BPF_MAP_LOOKUP_ELEM, {
87 .map_fd = static_cast<__u32>(map_fd.get()),
88 .key = ptr_to_u64(key),
89 .value = ptr_to_u64(value),
90 });
91}
92
Maciej Żenczykowski06caf872020-02-11 16:42:21 -080093inline int deleteMapEntry(const base::unique_fd& map_fd, const void* key) {
Maciej Żenczykowskib479fd62020-01-16 02:18:11 -080094 return bpf(BPF_MAP_DELETE_ELEM, {
95 .map_fd = static_cast<__u32>(map_fd.get()),
96 .key = ptr_to_u64(key),
97 });
98}
99
Maciej Żenczykowski06caf872020-02-11 16:42:21 -0800100inline int getNextMapKey(const base::unique_fd& map_fd, const void* key, void* next_key) {
Maciej Żenczykowskib479fd62020-01-16 02:18:11 -0800101 return bpf(BPF_MAP_GET_NEXT_KEY, {
102 .map_fd = static_cast<__u32>(map_fd.get()),
103 .key = ptr_to_u64(key),
104 .next_key = ptr_to_u64(next_key),
105 });
106}
107
Maciej Żenczykowski06caf872020-02-11 16:42:21 -0800108inline int getFirstMapKey(const base::unique_fd& map_fd, void* firstKey) {
Maciej Żenczykowskib479fd62020-01-16 02:18:11 -0800109 return getNextMapKey(map_fd, NULL, firstKey);
110}
111
Maciej Żenczykowski06caf872020-02-11 16:42:21 -0800112inline int bpfFdPin(const base::unique_fd& map_fd, const char* pathname) {
Maciej Żenczykowskib479fd62020-01-16 02:18:11 -0800113 return bpf(BPF_OBJ_PIN, {
114 .pathname = ptr_to_u64(pathname),
115 .bpf_fd = static_cast<__u32>(map_fd.get()),
116 });
117}
118
Maciej Żenczykowski06caf872020-02-11 16:42:21 -0800119inline int bpfFdGet(const char* pathname, uint32_t flag) {
Maciej Żenczykowskib479fd62020-01-16 02:18:11 -0800120 return bpf(BPF_OBJ_GET, {
121 .pathname = ptr_to_u64(pathname),
122 .file_flags = flag,
123 });
124}
125
Maciej Żenczykowski06caf872020-02-11 16:42:21 -0800126inline int mapRetrieve(const char* pathname, uint32_t flag) {
Maciej Żenczykowskib479fd62020-01-16 02:18:11 -0800127 return bpfFdGet(pathname, flag);
128}
129
Maciej Żenczykowski06caf872020-02-11 16:42:21 -0800130inline int attachProgram(bpf_attach_type type, const base::unique_fd& prog_fd,
131 const base::unique_fd& cg_fd) {
Maciej Żenczykowskib479fd62020-01-16 02:18:11 -0800132 return bpf(BPF_PROG_ATTACH, {
Maciej Żenczykowski289742f2020-01-17 19:18:26 -0800133 .target_fd = static_cast<__u32>(cg_fd.get()),
134 .attach_bpf_fd = static_cast<__u32>(prog_fd.get()),
Maciej Żenczykowskib479fd62020-01-16 02:18:11 -0800135 .attach_type = type,
136 });
137}
138
Maciej Żenczykowski06caf872020-02-11 16:42:21 -0800139inline int detachProgram(bpf_attach_type type, const base::unique_fd& cg_fd) {
Maciej Żenczykowskib479fd62020-01-16 02:18:11 -0800140 return bpf(BPF_PROG_DETACH, {
Maciej Żenczykowski289742f2020-01-17 19:18:26 -0800141 .target_fd = static_cast<__u32>(cg_fd.get()),
Maciej Żenczykowskib479fd62020-01-16 02:18:11 -0800142 .attach_type = type,
143 });
144}
145
Chenbo Feng75b410b2018-10-10 15:01:19 -0700146uint64_t getSocketCookie(int sockFd);
Maciej Żenczykowskic3a640d2020-02-11 15:01:21 -0800147int synchronizeKernelRCU();
Chenbo Feng0a1a9a12019-04-09 12:05:04 -0700148int setrlimitForTest();
Chenbo Feng79b7e612018-12-11 12:24:23 -0800149std::string BpfLevelToString(BpfLevel BpfLevel);
150BpfLevel getBpfSupportLevel();
Maciej Żenczykowskic3a640d2020-02-11 15:01:21 -0800151
Maciej Żenczykowski06caf872020-02-11 16:42:21 -0800152inline bool isBpfSupported() {
Maciej Żenczykowskic3a640d2020-02-11 15:01:21 -0800153 return getBpfSupportLevel() != BpfLevel::NONE;
154}
Chenbo Feng75b410b2018-10-10 15:01:19 -0700155
Chenbo Feng79b7e612018-12-11 12:24:23 -0800156#define SKIP_IF_BPF_NOT_SUPPORTED \
157 do { \
Maciej Żenczykowskic3a640d2020-02-11 15:01:21 -0800158 if (!android::bpf::isBpfSupported()) { \
Chenbo Feng79b7e612018-12-11 12:24:23 -0800159 GTEST_LOG_(INFO) << "This test is skipped since bpf is not available\n"; \
160 return; \
161 } \
162 } while (0)
163
Maciej Żenczykowskic3a640d2020-02-11 15:01:21 -0800164#define SKIP_IF_BPF_SUPPORTED \
165 do { \
166 if (android::bpf::isBpfSupported()) return; \
Chenbo Feng75b410b2018-10-10 15:01:19 -0700167 } while (0)
168
Maciej Żenczykowski672b0e72020-02-12 04:15:20 -0800169#define SKIP_IF_EXTENDED_BPF_NOT_SUPPORTED \
170 do { \
171 if (android::bpf::getBpfSupportLevel() < android::bpf::BpfLevel::EXTENDED) { \
172 GTEST_LOG_(INFO) << "This test is skipped since extended bpf feature" \
173 << "not supported\n"; \
174 return; \
175 } \
176 } while (0)
177
Chenbo Feng75b410b2018-10-10 15:01:19 -0700178} // namespace bpf
179} // namespace android
180
181#endif