blob: 25edbd43d1ff6f07e8a4f0d836a9194604e323a7 [file] [log] [blame]
Chenbo Feng75b410b2018-10-10 15:01:19 -07001/*
2 * Copyright (C) 2018 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17/*
18 * This h file together with bpf_kern.c is used for compiling the eBPF kernel
Chenbo Feng4c9e9ec2018-10-16 20:31:52 -070019 * program.
Chenbo Feng75b410b2018-10-10 15:01:19 -070020 */
21
22#include <linux/bpf.h>
23#include <linux/if.h>
24#include <linux/if_ether.h>
25#include <linux/in.h>
26#include <linux/in6.h>
27#include <linux/ip.h>
28#include <linux/ipv6.h>
29#include <stdbool.h>
30#include <stdint.h>
Chenbo Feng4c9e9ec2018-10-16 20:31:52 -070031#include "netdbpf/bpf_shared.h"
Chenbo Feng75b410b2018-10-10 15:01:19 -070032
33#define ELF_SEC(NAME) __attribute__((section(NAME), used))
34
35struct uid_tag {
36 uint32_t uid;
37 uint32_t tag;
38};
39
40struct stats_key {
41 uint32_t uid;
42 uint32_t tag;
43 uint32_t counterSet;
44 uint32_t ifaceIndex;
45};
46
47struct stats_value {
48 uint64_t rxPackets;
49 uint64_t rxBytes;
50 uint64_t txPackets;
51 uint64_t txBytes;
52};
53
54/* helper functions called from eBPF programs written in C */
55static void* (*find_map_entry)(uint64_t map, void* key) = (void*)BPF_FUNC_map_lookup_elem;
56static int (*write_to_map_entry)(uint64_t map, void* key, void* value,
57 uint64_t flags) = (void*)BPF_FUNC_map_update_elem;
58static int (*delete_map_entry)(uint64_t map, void* key) = (void*)BPF_FUNC_map_delete_elem;
59static uint64_t (*get_socket_cookie)(struct __sk_buff* skb) = (void*)BPF_FUNC_get_socket_cookie;
60static uint32_t (*get_socket_uid)(struct __sk_buff* skb) = (void*)BPF_FUNC_get_socket_uid;
61static int (*bpf_skb_load_bytes)(struct __sk_buff* skb, int off, void* to,
62 int len) = (void*)BPF_FUNC_skb_load_bytes;
63
64// This is defined for cgroup bpf filter only.
65#define BPF_PASS 1
66#define BPF_DROP 0
67
68// This is used for xt_bpf program only.
69#define BPF_NOMATCH 0
70#define BPF_MATCH 1
71
72#define BPF_EGRESS 0
73#define BPF_INGRESS 1
74
75#define IP_PROTO_OFF offsetof(struct iphdr, protocol)
76#define IPV6_PROTO_OFF offsetof(struct ipv6hdr, nexthdr)
77#define IPPROTO_IHL_OFF 0
78#define TCP_FLAG_OFF 13
79#define RST_OFFSET 2
80
81static __always_inline int is_system_uid(uint32_t uid) {
82 return (uid <= MAX_SYSTEM_UID) && (uid >= MIN_SYSTEM_UID);
83}
84
85static __always_inline inline void bpf_update_stats(struct __sk_buff* skb, uint64_t map,
86 int direction, void *key) {
87 struct stats_value* value;
88 value = find_map_entry(map, key);
89 if (!value) {
90 struct stats_value newValue = {};
91 write_to_map_entry(map, key, &newValue, BPF_NOEXIST);
92 value = find_map_entry(map, key);
93 }
94 if (value) {
95 if (direction == BPF_EGRESS) {
96 __sync_fetch_and_add(&value->txPackets, 1);
97 __sync_fetch_and_add(&value->txBytes, skb->len);
98 } else if (direction == BPF_INGRESS) {
99 __sync_fetch_and_add(&value->rxPackets, 1);
100 __sync_fetch_and_add(&value->rxBytes, skb->len);
101 }
102 }
103}
104
105static inline bool skip_owner_match(struct __sk_buff* skb) {
106 int offset = -1;
107 int ret = 0;
108 if (skb->protocol == ETH_P_IP) {
109 offset = IP_PROTO_OFF;
110 uint8_t proto, ihl;
111 uint16_t flag;
112 ret = bpf_skb_load_bytes(skb, offset, &proto, 1);
113 if (!ret) {
114 if (proto == IPPROTO_ESP) {
115 return true;
116 } else if (proto == IPPROTO_TCP) {
117 ret = bpf_skb_load_bytes(skb, IPPROTO_IHL_OFF, &ihl, 1);
118 ihl = ihl & 0x0F;
119 ret = bpf_skb_load_bytes(skb, ihl * 4 + TCP_FLAG_OFF, &flag, 1);
120 if (ret == 0 && (flag >> RST_OFFSET & 1)) {
121 return true;
122 }
123 }
124 }
125 } else if (skb->protocol == ETH_P_IPV6) {
126 offset = IPV6_PROTO_OFF;
127 uint8_t proto;
128 ret = bpf_skb_load_bytes(skb, offset, &proto, 1);
129 if (!ret) {
130 if (proto == IPPROTO_ESP) {
131 return true;
132 } else if (proto == IPPROTO_TCP) {
133 uint16_t flag;
134 ret = bpf_skb_load_bytes(skb, sizeof(struct ipv6hdr) + TCP_FLAG_OFF, &flag, 1);
135 if (ret == 0 && (flag >> RST_OFFSET & 1)) {
136 return true;
137 }
138 }
139 }
140 }
141 return false;
142}
143
144static __always_inline BpfConfig getConfig() {
145 uint32_t mapSettingKey = CONFIGURATION_KEY;
146 BpfConfig* config = find_map_entry(CONFIGURATION_MAP, &mapSettingKey);
147 if (!config) {
148 // Couldn't read configuration entry. Assume everything is disabled.
149 return DEFAULT_CONFIG;
150 }
151 return *config;
152}
153
154static inline int bpf_owner_match(struct __sk_buff* skb, uint32_t uid) {
155 if (skip_owner_match(skb)) return BPF_PASS;
156
157 if ((uid <= MAX_SYSTEM_UID) && (uid >= MIN_SYSTEM_UID)) return BPF_PASS;
158
159 BpfConfig enabledRules = getConfig();
160 if (!enabledRules) {
161 return BPF_PASS;
162 }
163
164 uint8_t* uidEntry = find_map_entry(UID_OWNER_MAP, &uid);
165 uint8_t uidRules = uidEntry ? *uidEntry : 0;
166 if ((enabledRules & DOZABLE_MATCH) && !(uidRules & DOZABLE_MATCH)) {
167 return BPF_DROP;
168 }
169 if ((enabledRules & STANDBY_MATCH) && (uidRules & STANDBY_MATCH)) {
170 return BPF_DROP;
171 }
172 if ((enabledRules & POWERSAVE_MATCH) && !(uidRules & POWERSAVE_MATCH)) {
173 return BPF_DROP;
174 }
175 return BPF_PASS;
176}
177
178static __always_inline inline int bpf_traffic_account(struct __sk_buff* skb, int direction) {
179 uint32_t sock_uid = get_socket_uid(skb);
180 int match = bpf_owner_match(skb, sock_uid);
181 if ((direction == BPF_EGRESS) && (match == BPF_DROP)) {
182 // If an outbound packet is going to be dropped, we do not count that
183 // traffic.
184 return match;
185 }
186
187 uint64_t cookie = get_socket_cookie(skb);
188 struct uid_tag* utag = find_map_entry(COOKIE_TAG_MAP, &cookie);
189 uint32_t uid, tag;
190 if (utag) {
191 uid = utag->uid;
192 tag = utag->tag;
193 } else {
194 uid = sock_uid;
195 tag = 0;
196 }
197
198 struct stats_key key = {.uid = uid, .tag = tag, .counterSet = 0, .ifaceIndex = skb->ifindex};
199
200 uint8_t* counterSet = find_map_entry(UID_COUNTERSET_MAP, &uid);
201 if (counterSet) key.counterSet = (uint32_t)*counterSet;
202
203 if (tag) {
204 bpf_update_stats(skb, TAG_STATS_MAP, direction, &key);
205 }
206
207 key.tag = 0;
208 bpf_update_stats(skb, UID_STATS_MAP, direction, &key);
209 bpf_update_stats(skb, APP_UID_STATS_MAP, direction, &uid);
210 return match;
211}