blob: e56033d7bd60b6c896bcd7cfe7fad44486f0c409 [file] [log] [blame]
Chenbo Feng75b410b2018-10-10 15:01:19 -07001/*
2 * Copyright (C) 2018 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17/*
18 * This h file together with bpf_kern.c is used for compiling the eBPF kernel
19 * program. To generate the bpf_kern.o file manually, use the clang prebuilt in
20 * this android tree to compile the files with --target=bpf options. For
21 * example, in system/netd/ directory, execute the following command:
22 * $: ANDROID_BASE_DIRECTORY/prebuilts/clang/host/linux-x86/clang-4691093/bin/clang \
23 * -I ANDROID_BASE_DIRECTORY/bionic/libc/kernel/uapi/ \
24 * -I ANDROID_BASE_DIRECTORY/system/netd/bpfloader/ \
25 * -I ANDROID_BASE_DIRECTORY/bionic/libc/kernel/android/uapi/ \
26 * -I ANDROID_BASE_DIRECTORY/bionic/libc/include \
27 * -I ANDROID_BASE_DIRECTORY/system/netd/libbpf/include \
28 * --target=bpf -O2 -c bpfloader/bpf_kern.c -o bpfloader/bpf_kern.o
29 */
30
31#include <linux/bpf.h>
32#include <linux/if.h>
33#include <linux/if_ether.h>
34#include <linux/in.h>
35#include <linux/in6.h>
36#include <linux/ip.h>
37#include <linux/ipv6.h>
38#include <stdbool.h>
39#include <stdint.h>
40#include "bpf/bpf_shared.h"
41
42#define ELF_SEC(NAME) __attribute__((section(NAME), used))
43
44struct uid_tag {
45 uint32_t uid;
46 uint32_t tag;
47};
48
49struct stats_key {
50 uint32_t uid;
51 uint32_t tag;
52 uint32_t counterSet;
53 uint32_t ifaceIndex;
54};
55
56struct stats_value {
57 uint64_t rxPackets;
58 uint64_t rxBytes;
59 uint64_t txPackets;
60 uint64_t txBytes;
61};
62
63/* helper functions called from eBPF programs written in C */
64static void* (*find_map_entry)(uint64_t map, void* key) = (void*)BPF_FUNC_map_lookup_elem;
65static int (*write_to_map_entry)(uint64_t map, void* key, void* value,
66 uint64_t flags) = (void*)BPF_FUNC_map_update_elem;
67static int (*delete_map_entry)(uint64_t map, void* key) = (void*)BPF_FUNC_map_delete_elem;
68static uint64_t (*get_socket_cookie)(struct __sk_buff* skb) = (void*)BPF_FUNC_get_socket_cookie;
69static uint32_t (*get_socket_uid)(struct __sk_buff* skb) = (void*)BPF_FUNC_get_socket_uid;
70static int (*bpf_skb_load_bytes)(struct __sk_buff* skb, int off, void* to,
71 int len) = (void*)BPF_FUNC_skb_load_bytes;
72
73// This is defined for cgroup bpf filter only.
74#define BPF_PASS 1
75#define BPF_DROP 0
76
77// This is used for xt_bpf program only.
78#define BPF_NOMATCH 0
79#define BPF_MATCH 1
80
81#define BPF_EGRESS 0
82#define BPF_INGRESS 1
83
84#define IP_PROTO_OFF offsetof(struct iphdr, protocol)
85#define IPV6_PROTO_OFF offsetof(struct ipv6hdr, nexthdr)
86#define IPPROTO_IHL_OFF 0
87#define TCP_FLAG_OFF 13
88#define RST_OFFSET 2
89
90static __always_inline int is_system_uid(uint32_t uid) {
91 return (uid <= MAX_SYSTEM_UID) && (uid >= MIN_SYSTEM_UID);
92}
93
94static __always_inline inline void bpf_update_stats(struct __sk_buff* skb, uint64_t map,
95 int direction, void *key) {
96 struct stats_value* value;
97 value = find_map_entry(map, key);
98 if (!value) {
99 struct stats_value newValue = {};
100 write_to_map_entry(map, key, &newValue, BPF_NOEXIST);
101 value = find_map_entry(map, key);
102 }
103 if (value) {
104 if (direction == BPF_EGRESS) {
105 __sync_fetch_and_add(&value->txPackets, 1);
106 __sync_fetch_and_add(&value->txBytes, skb->len);
107 } else if (direction == BPF_INGRESS) {
108 __sync_fetch_and_add(&value->rxPackets, 1);
109 __sync_fetch_and_add(&value->rxBytes, skb->len);
110 }
111 }
112}
113
114static inline bool skip_owner_match(struct __sk_buff* skb) {
115 int offset = -1;
116 int ret = 0;
117 if (skb->protocol == ETH_P_IP) {
118 offset = IP_PROTO_OFF;
119 uint8_t proto, ihl;
120 uint16_t flag;
121 ret = bpf_skb_load_bytes(skb, offset, &proto, 1);
122 if (!ret) {
123 if (proto == IPPROTO_ESP) {
124 return true;
125 } else if (proto == IPPROTO_TCP) {
126 ret = bpf_skb_load_bytes(skb, IPPROTO_IHL_OFF, &ihl, 1);
127 ihl = ihl & 0x0F;
128 ret = bpf_skb_load_bytes(skb, ihl * 4 + TCP_FLAG_OFF, &flag, 1);
129 if (ret == 0 && (flag >> RST_OFFSET & 1)) {
130 return true;
131 }
132 }
133 }
134 } else if (skb->protocol == ETH_P_IPV6) {
135 offset = IPV6_PROTO_OFF;
136 uint8_t proto;
137 ret = bpf_skb_load_bytes(skb, offset, &proto, 1);
138 if (!ret) {
139 if (proto == IPPROTO_ESP) {
140 return true;
141 } else if (proto == IPPROTO_TCP) {
142 uint16_t flag;
143 ret = bpf_skb_load_bytes(skb, sizeof(struct ipv6hdr) + TCP_FLAG_OFF, &flag, 1);
144 if (ret == 0 && (flag >> RST_OFFSET & 1)) {
145 return true;
146 }
147 }
148 }
149 }
150 return false;
151}
152
153static __always_inline BpfConfig getConfig() {
154 uint32_t mapSettingKey = CONFIGURATION_KEY;
155 BpfConfig* config = find_map_entry(CONFIGURATION_MAP, &mapSettingKey);
156 if (!config) {
157 // Couldn't read configuration entry. Assume everything is disabled.
158 return DEFAULT_CONFIG;
159 }
160 return *config;
161}
162
163static inline int bpf_owner_match(struct __sk_buff* skb, uint32_t uid) {
164 if (skip_owner_match(skb)) return BPF_PASS;
165
166 if ((uid <= MAX_SYSTEM_UID) && (uid >= MIN_SYSTEM_UID)) return BPF_PASS;
167
168 BpfConfig enabledRules = getConfig();
169 if (!enabledRules) {
170 return BPF_PASS;
171 }
172
173 uint8_t* uidEntry = find_map_entry(UID_OWNER_MAP, &uid);
174 uint8_t uidRules = uidEntry ? *uidEntry : 0;
175 if ((enabledRules & DOZABLE_MATCH) && !(uidRules & DOZABLE_MATCH)) {
176 return BPF_DROP;
177 }
178 if ((enabledRules & STANDBY_MATCH) && (uidRules & STANDBY_MATCH)) {
179 return BPF_DROP;
180 }
181 if ((enabledRules & POWERSAVE_MATCH) && !(uidRules & POWERSAVE_MATCH)) {
182 return BPF_DROP;
183 }
184 return BPF_PASS;
185}
186
187static __always_inline inline int bpf_traffic_account(struct __sk_buff* skb, int direction) {
188 uint32_t sock_uid = get_socket_uid(skb);
189 int match = bpf_owner_match(skb, sock_uid);
190 if ((direction == BPF_EGRESS) && (match == BPF_DROP)) {
191 // If an outbound packet is going to be dropped, we do not count that
192 // traffic.
193 return match;
194 }
195
196 uint64_t cookie = get_socket_cookie(skb);
197 struct uid_tag* utag = find_map_entry(COOKIE_TAG_MAP, &cookie);
198 uint32_t uid, tag;
199 if (utag) {
200 uid = utag->uid;
201 tag = utag->tag;
202 } else {
203 uid = sock_uid;
204 tag = 0;
205 }
206
207 struct stats_key key = {.uid = uid, .tag = tag, .counterSet = 0, .ifaceIndex = skb->ifindex};
208
209 uint8_t* counterSet = find_map_entry(UID_COUNTERSET_MAP, &uid);
210 if (counterSet) key.counterSet = (uint32_t)*counterSet;
211
212 if (tag) {
213 bpf_update_stats(skb, TAG_STATS_MAP, direction, &key);
214 }
215
216 key.tag = 0;
217 bpf_update_stats(skb, UID_STATS_MAP, direction, &key);
218 bpf_update_stats(skb, APP_UID_STATS_MAP, direction, &uid);
219 return match;
220}