Merge "Replace clang-format with symlink."
diff --git a/bpfloader/Android.bp b/bpfloader/Android.bp
index 81cdbe1..261a824 100644
--- a/bpfloader/Android.bp
+++ b/bpfloader/Android.bp
@@ -47,8 +47,7 @@
required: [
"netd.o",
-// Uncomment once security related patches ready
-// "time_in_state.o",
+ "time_in_state.o",
],
}
diff --git a/libbpf_android/BpfUtils.cpp b/libbpf_android/BpfUtils.cpp
index 9f482d6..f5741bb 100644
--- a/libbpf_android/BpfUtils.cpp
+++ b/libbpf_android/BpfUtils.cpp
@@ -23,6 +23,7 @@
#include <linux/bpf.h>
#include <linux/if_ether.h>
#include <linux/in.h>
+#include <linux/pfkeyv2.h>
#include <stdlib.h>
#include <string.h>
#include <sys/mman.h>
@@ -43,7 +44,9 @@
using android::netdutils::MemBlock;
using android::netdutils::Slice;
-constexpr size_t LOG_BUF_SIZE = 65536;
+// The buffer size for the buffer that records program loading logs, needs to be large enough for
+// the largest kernel program.
+constexpr size_t LOG_BUF_SIZE = 0x20000;
namespace android {
namespace bpf {
@@ -212,6 +215,27 @@
return sock_cookie;
}
+int synchronizeKernelRCU() {
+ // This is a temporary hack for network stats map swap on devices running
+ // 4.9 kernels. The kernel code of socket release on pf_key socket will
+ // explicitly call synchronize_rcu() which is exactly what we need.
+ int pfSocket = socket(AF_KEY, SOCK_RAW | SOCK_CLOEXEC, PF_KEY_V2);
+
+ if (pfSocket < 0) {
+ int ret = -errno;
+ ALOGE("create PF_KEY socket failed: %s", strerror(errno));
+ return ret;
+ }
+
+ // When closing socket, synchronize_rcu() gets called in sock_release().
+ if (close(pfSocket)) {
+ int ret = -errno;
+ ALOGE("failed to close the PF_KEY socket: %s", strerror(errno));
+ return ret;
+ }
+ return 0;
+}
+
bool hasBpfSupport() {
struct utsname buf;
int kernel_version_major;
diff --git a/libbpf_android/Loader.cpp b/libbpf_android/Loader.cpp
index 332c0b4..d8771c1 100644
--- a/libbpf_android/Loader.cpp
+++ b/libbpf_android/Loader.cpp
@@ -67,6 +67,7 @@
{ "tracepoint", BPF_PROG_TYPE_TRACEPOINT },
{ "skfilter", BPF_PROG_TYPE_SOCKET_FILTER },
{ "cgroupskb", BPF_PROG_TYPE_CGROUP_SKB },
+ { "schedcls", BPF_PROG_TYPE_SCHED_CLS },
/* End of table */
{ "END", BPF_PROG_TYPE_UNSPEC },
diff --git a/libbpf_android/include/bpf/BpfUtils.h b/libbpf_android/include/bpf/BpfUtils.h
index 6bb1f0f..426dc02 100644
--- a/libbpf_android/include/bpf/BpfUtils.h
+++ b/libbpf_android/include/bpf/BpfUtils.h
@@ -150,6 +150,7 @@
bool hasBpfSupport();
int parseProgramsFromFile(const char* path, BpfProgInfo* programs, size_t size,
const std::vector<BpfMapInfo>& mapPatterns);
+int synchronizeKernelRCU();
#define SKIP_IF_BPF_NOT_SUPPORTED \
do { \
diff --git a/progs/netd.h b/progs/netd.h
index 6b2d103..ff97753 100644
--- a/progs/netd.h
+++ b/progs/netd.h
@@ -110,21 +110,21 @@
.type = BPF_MAP_TYPE_HASH,
.key_size = sizeof(uint32_t),
.value_size = sizeof(struct stats_value),
- .max_entries = UID_STATS_MAP_SIZE,
+ .max_entries = APP_STATS_MAP_SIZE,
};
-struct bpf_map_def SEC("maps") uid_stats_map = {
+struct bpf_map_def SEC("maps") stats_map_A = {
.type = BPF_MAP_TYPE_HASH,
.key_size = sizeof(struct stats_key),
.value_size = sizeof(struct stats_value),
- .max_entries = UID_STATS_MAP_SIZE,
+ .max_entries = STATS_MAP_SIZE,
};
-struct bpf_map_def SEC("maps") tag_stats_map = {
+struct bpf_map_def SEC("maps") stats_map_B = {
.type = BPF_MAP_TYPE_HASH,
.key_size = sizeof(struct stats_key),
.value_size = sizeof(struct stats_value),
- .max_entries = TAG_STATS_MAP_SIZE,
+ .max_entries = STATS_MAP_SIZE,
};
struct bpf_map_def SEC("maps") iface_stats_map = {
@@ -218,8 +218,8 @@
return false;
}
-static __always_inline BpfConfig getConfig() {
- uint32_t mapSettingKey = CONFIGURATION_KEY;
+static __always_inline BpfConfig getConfig(uint32_t configKey) {
+ uint32_t mapSettingKey = configKey;
BpfConfig* config = find_map_entry(&configuration_map, &mapSettingKey);
if (!config) {
// Couldn't read configuration entry. Assume everything is disabled.
@@ -233,7 +233,7 @@
if ((uid <= MAX_SYSTEM_UID) && (uid >= MIN_SYSTEM_UID)) return BPF_PASS;
- BpfConfig enabledRules = getConfig();
+ BpfConfig enabledRules = getConfig(UID_RULES_CONFIGURATION_KEY);
if (!enabledRules) {
return BPF_PASS;
}
@@ -252,6 +252,15 @@
return BPF_PASS;
}
+static __always_inline inline void update_stats_with_config(struct __sk_buff* skb, int direction,
+ void* key, uint8_t selectedMap) {
+ if (selectedMap == SELECT_MAP_A) {
+ bpf_update_stats(skb, &stats_map_A, direction, key);
+ } else if (selectedMap == SELECT_MAP_B) {
+ bpf_update_stats(skb, &stats_map_B, direction, key);
+ }
+}
+
static __always_inline inline int bpf_traffic_account(struct __sk_buff* skb, int direction) {
uint32_t sock_uid = get_socket_uid(skb);
int match = bpf_owner_match(skb, sock_uid);
@@ -277,12 +286,18 @@
uint8_t* counterSet = find_map_entry(&uid_counterset_map, &uid);
if (counterSet) key.counterSet = (uint32_t)*counterSet;
+ uint32_t mapSettingKey = CURRENT_STATS_MAP_CONFIGURATION_KEY;
+ uint8_t* selectedMap = find_map_entry(&configuration_map, &mapSettingKey);
+ if (!selectedMap) {
+ return match;
+ }
+
if (tag) {
- bpf_update_stats(skb, &tag_stats_map, direction, &key);
+ update_stats_with_config(skb, direction, &key, *selectedMap);
}
key.tag = 0;
- bpf_update_stats(skb, &uid_stats_map, direction, &key);
+ update_stats_with_config(skb, direction, &key, *selectedMap);
bpf_update_stats(skb, &app_uid_stats_map, direction, &uid);
return match;
}