bpf: move bpf_headers into /bpf subdirectory

Generated via:
  git mv staticlibs/native/bpf_headers bpf/headers
  cd bpf/headers/include
  git mv bpf/bpf_helpers.h .
  git mv bpf/bpf_map_def.h .
manually fix up bpf/bpf_map_def.h include path

bpf_helpers.h is only used by bpf programs so the 'bpf/'
prefix is spurious.

bpf_map_def.h is only used by bpf programs and the two
bpfloaders, which effectively means the 'bpf/' prefix isn't
useful.

Test: TreeHugger
Signed-off-by: Maciej Żenczykowski <maze@google.com>
Change-Id: Ic3fc4e354a15821c27c23b7dbc019daad9f6ad85
diff --git a/bpf/headers/Android.bp b/bpf/headers/Android.bp
new file mode 100644
index 0000000..d55584a
--- /dev/null
+++ b/bpf/headers/Android.bp
@@ -0,0 +1,66 @@
+// Copyright (C) 2021 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package {
+    default_team: "trendy_team_fwk_core_networking",
+    default_applicable_licenses: ["Android-Apache-2.0"],
+}
+
+cc_library_headers {
+    name: "bpf_headers",
+    vendor_available: true,
+    recovery_available: true,
+    host_supported: true,
+    native_bridge_supported: true,
+    header_libs: ["bpf_syscall_wrappers"],
+    export_header_lib_headers: ["bpf_syscall_wrappers"],
+    export_include_dirs: ["include"],
+    cflags: [
+        "-Wall",
+        "-Werror",
+    ],
+    sdk_version: "30",
+    min_sdk_version: "30",
+    apex_available: [
+        "//apex_available:platform",
+        "com.android.art.debug",
+        "com.android.os.statsd",
+        "com.android.resolv",
+        "com.android.tethering",
+    ],
+}
+
+cc_test {
+    // TODO: Rename to bpf_map_test and modify .gcls as well.
+    name: "libbpf_android_test",
+    srcs: [
+        "BpfMapTest.cpp",
+        "BpfRingbufTest.cpp",
+    ],
+    defaults: ["bpf_defaults"],
+    cflags: [
+        "-Wall",
+        "-Werror",
+        "-Wno-error=unused-variable",
+    ],
+    header_libs: ["bpf_headers"],
+    static_libs: ["libgmock"],
+    shared_libs: [
+        "libbase",
+        "liblog",
+        "libutils",
+    ],
+    require_root: true,
+    test_suites: ["general-tests"],
+}
diff --git a/bpf/headers/BpfMapTest.cpp b/bpf/headers/BpfMapTest.cpp
new file mode 100644
index 0000000..862114d
--- /dev/null
+++ b/bpf/headers/BpfMapTest.cpp
@@ -0,0 +1,254 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fstream>
+#include <iostream>
+#include <string>
+#include <vector>
+
+#include <fcntl.h>
+#include <inttypes.h>
+#include <linux/inet_diag.h>
+#include <linux/sock_diag.h>
+#include <net/if.h>
+#include <sys/socket.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include <gtest/gtest.h>
+
+#include <android-base/stringprintf.h>
+#include <android-base/strings.h>
+
+#define BPF_MAP_MAKE_VISIBLE_FOR_TESTING
+#include "bpf/BpfMap.h"
+#include "bpf/BpfUtils.h"
+
+using ::testing::Test;
+
+namespace android {
+namespace bpf {
+
+using base::Result;
+using base::unique_fd;
+
+constexpr uint32_t TEST_MAP_SIZE = 10;
+constexpr uint32_t TEST_KEY1 = 1;
+constexpr uint32_t TEST_VALUE1 = 10;
+constexpr const char PINNED_MAP_PATH[] = "/sys/fs/bpf/testMap";
+
+class BpfMapTest : public testing::Test {
+  protected:
+    BpfMapTest() {}
+
+    void SetUp() {
+        EXPECT_EQ(0, setrlimitForTest());
+        if (!access(PINNED_MAP_PATH, R_OK)) {
+            EXPECT_EQ(0, remove(PINNED_MAP_PATH));
+        }
+    }
+
+    void TearDown() {
+        if (!access(PINNED_MAP_PATH, R_OK)) {
+            EXPECT_EQ(0, remove(PINNED_MAP_PATH));
+        }
+    }
+
+    void checkMapInvalid(BpfMap<uint32_t, uint32_t>& map) {
+        EXPECT_FALSE(map.isValid());
+        EXPECT_EQ(-1, map.getMap().get());
+    }
+
+    void checkMapValid(BpfMap<uint32_t, uint32_t>& map) {
+        EXPECT_LE(0, map.getMap().get());
+        EXPECT_TRUE(map.isValid());
+    }
+
+    void writeToMapAndCheck(BpfMap<uint32_t, uint32_t>& map, uint32_t key, uint32_t value) {
+        ASSERT_RESULT_OK(map.writeValue(key, value, BPF_ANY));
+        uint32_t value_read;
+        ASSERT_EQ(0, findMapEntry(map.getMap(), &key, &value_read));
+        checkValueAndStatus(value, value_read);
+    }
+
+    void checkValueAndStatus(uint32_t refValue, Result<uint32_t> value) {
+        ASSERT_RESULT_OK(value);
+        ASSERT_EQ(refValue, value.value());
+    }
+
+    void populateMap(uint32_t total, BpfMap<uint32_t, uint32_t>& map) {
+        for (uint32_t key = 0; key < total; key++) {
+            uint32_t value = key * 10;
+            EXPECT_RESULT_OK(map.writeValue(key, value, BPF_ANY));
+        }
+    }
+
+    void expectMapEmpty(BpfMap<uint32_t, uint32_t>& map) {
+        Result<bool> isEmpty = map.isEmpty();
+        ASSERT_RESULT_OK(isEmpty);
+        ASSERT_TRUE(isEmpty.value());
+    }
+};
+
+TEST_F(BpfMapTest, constructor) {
+    BpfMap<uint32_t, uint32_t> testMap1;
+    checkMapInvalid(testMap1);
+
+    BpfMap<uint32_t, uint32_t> testMap2;
+    ASSERT_RESULT_OK(testMap2.resetMap(BPF_MAP_TYPE_HASH, TEST_MAP_SIZE, BPF_F_NO_PREALLOC));
+    checkMapValid(testMap2);
+}
+
+TEST_F(BpfMapTest, basicHelpers) {
+    BpfMap<uint32_t, uint32_t> testMap;
+    ASSERT_RESULT_OK(testMap.resetMap(BPF_MAP_TYPE_HASH, TEST_MAP_SIZE, BPF_F_NO_PREALLOC));
+    uint32_t key = TEST_KEY1;
+    uint32_t value_write = TEST_VALUE1;
+    writeToMapAndCheck(testMap, key, value_write);
+    Result<uint32_t> value_read = testMap.readValue(key);
+    checkValueAndStatus(value_write, value_read);
+    Result<uint32_t> key_read = testMap.getFirstKey();
+    checkValueAndStatus(key, key_read);
+    ASSERT_RESULT_OK(testMap.deleteValue(key));
+    ASSERT_GT(0, findMapEntry(testMap.getMap(), &key, &value_read));
+    ASSERT_EQ(ENOENT, errno);
+}
+
+TEST_F(BpfMapTest, reset) {
+    BpfMap<uint32_t, uint32_t> testMap;
+    ASSERT_RESULT_OK(testMap.resetMap(BPF_MAP_TYPE_HASH, TEST_MAP_SIZE, BPF_F_NO_PREALLOC));
+    uint32_t key = TEST_KEY1;
+    uint32_t value_write = TEST_VALUE1;
+    writeToMapAndCheck(testMap, key, value_write);
+
+    testMap.reset(-1);
+    checkMapInvalid(testMap);
+    ASSERT_GT(0, findMapEntry(testMap.getMap(), &key, &value_write));
+    ASSERT_EQ(EBADF, errno);
+}
+
+TEST_F(BpfMapTest, moveConstructor) {
+    BpfMap<uint32_t, uint32_t> testMap1;
+    ASSERT_RESULT_OK(testMap1.resetMap(BPF_MAP_TYPE_HASH, TEST_MAP_SIZE, BPF_F_NO_PREALLOC));
+    BpfMap<uint32_t, uint32_t> testMap2;
+    testMap2 = std::move(testMap1);
+    uint32_t key = TEST_KEY1;
+    checkMapInvalid(testMap1);
+    uint32_t value = TEST_VALUE1;
+    writeToMapAndCheck(testMap2, key, value);
+}
+
+TEST_F(BpfMapTest, SetUpMap) {
+    EXPECT_NE(0, access(PINNED_MAP_PATH, R_OK));
+    BpfMap<uint32_t, uint32_t> testMap1;
+    ASSERT_RESULT_OK(testMap1.resetMap(BPF_MAP_TYPE_HASH, TEST_MAP_SIZE, BPF_F_NO_PREALLOC));
+    ASSERT_EQ(0, bpfFdPin(testMap1.getMap(), PINNED_MAP_PATH));
+    EXPECT_EQ(0, access(PINNED_MAP_PATH, R_OK));
+    checkMapValid(testMap1);
+    BpfMap<uint32_t, uint32_t> testMap2;
+    EXPECT_RESULT_OK(testMap2.init(PINNED_MAP_PATH));
+    checkMapValid(testMap2);
+    uint32_t key = TEST_KEY1;
+    uint32_t value = TEST_VALUE1;
+    writeToMapAndCheck(testMap1, key, value);
+    Result<uint32_t> value_read = testMap2.readValue(key);
+    checkValueAndStatus(value, value_read);
+}
+
+TEST_F(BpfMapTest, iterate) {
+    BpfMap<uint32_t, uint32_t> testMap;
+    ASSERT_RESULT_OK(testMap.resetMap(BPF_MAP_TYPE_HASH, TEST_MAP_SIZE, BPF_F_NO_PREALLOC));
+    populateMap(TEST_MAP_SIZE, testMap);
+    int totalCount = 0;
+    int totalSum = 0;
+    const auto iterateWithDeletion = [&totalCount, &totalSum](const uint32_t& key,
+                                                              BpfMap<uint32_t, uint32_t>& map) {
+        EXPECT_GE((uint32_t)TEST_MAP_SIZE, key);
+        totalCount++;
+        totalSum += key;
+        return map.deleteValue(key);
+    };
+    EXPECT_RESULT_OK(testMap.iterate(iterateWithDeletion));
+    EXPECT_EQ((int)TEST_MAP_SIZE, totalCount);
+    EXPECT_EQ(((1 + TEST_MAP_SIZE - 1) * (TEST_MAP_SIZE - 1)) / 2, (uint32_t)totalSum);
+    expectMapEmpty(testMap);
+}
+
+TEST_F(BpfMapTest, iterateWithValue) {
+    BpfMap<uint32_t, uint32_t> testMap;
+    ASSERT_RESULT_OK(testMap.resetMap(BPF_MAP_TYPE_HASH, TEST_MAP_SIZE, BPF_F_NO_PREALLOC));
+    populateMap(TEST_MAP_SIZE, testMap);
+    int totalCount = 0;
+    int totalSum = 0;
+    const auto iterateWithDeletion = [&totalCount, &totalSum](const uint32_t& key,
+                                                              const uint32_t& value,
+                                                              BpfMap<uint32_t, uint32_t>& map) {
+        EXPECT_GE((uint32_t)TEST_MAP_SIZE, key);
+        EXPECT_EQ(value, key * 10);
+        totalCount++;
+        totalSum += value;
+        return map.deleteValue(key);
+    };
+    EXPECT_RESULT_OK(testMap.iterateWithValue(iterateWithDeletion));
+    EXPECT_EQ((int)TEST_MAP_SIZE, totalCount);
+    EXPECT_EQ(((1 + TEST_MAP_SIZE - 1) * (TEST_MAP_SIZE - 1)) * 5, (uint32_t)totalSum);
+    expectMapEmpty(testMap);
+}
+
+TEST_F(BpfMapTest, mapIsEmpty) {
+    BpfMap<uint32_t, uint32_t> testMap;
+    ASSERT_RESULT_OK(testMap.resetMap(BPF_MAP_TYPE_HASH, TEST_MAP_SIZE, BPF_F_NO_PREALLOC));
+    expectMapEmpty(testMap);
+    uint32_t key = TEST_KEY1;
+    uint32_t value_write = TEST_VALUE1;
+    writeToMapAndCheck(testMap, key, value_write);
+    Result<bool> isEmpty = testMap.isEmpty();
+    ASSERT_RESULT_OK(isEmpty);
+    ASSERT_FALSE(isEmpty.value());
+    ASSERT_RESULT_OK(testMap.deleteValue(key));
+    ASSERT_GT(0, findMapEntry(testMap.getMap(), &key, &value_write));
+    ASSERT_EQ(ENOENT, errno);
+    expectMapEmpty(testMap);
+    int entriesSeen = 0;
+    EXPECT_RESULT_OK(testMap.iterate(
+            [&entriesSeen](const unsigned int&,
+                           const BpfMap<unsigned int, unsigned int>&) -> Result<void> {
+                entriesSeen++;
+                return {};
+            }));
+    EXPECT_EQ(0, entriesSeen);
+    EXPECT_RESULT_OK(testMap.iterateWithValue(
+            [&entriesSeen](const unsigned int&, const unsigned int&,
+                           const BpfMap<unsigned int, unsigned int>&) -> Result<void> {
+                entriesSeen++;
+                return {};
+            }));
+    EXPECT_EQ(0, entriesSeen);
+}
+
+TEST_F(BpfMapTest, mapClear) {
+    BpfMap<uint32_t, uint32_t> testMap;
+    ASSERT_RESULT_OK(testMap.resetMap(BPF_MAP_TYPE_HASH, TEST_MAP_SIZE));
+    populateMap(TEST_MAP_SIZE, testMap);
+    Result<bool> isEmpty = testMap.isEmpty();
+    ASSERT_RESULT_OK(isEmpty);
+    ASSERT_FALSE(*isEmpty);
+    ASSERT_RESULT_OK(testMap.clear());
+    expectMapEmpty(testMap);
+}
+
+}  // namespace bpf
+}  // namespace android
diff --git a/bpf/headers/BpfRingbufTest.cpp b/bpf/headers/BpfRingbufTest.cpp
new file mode 100644
index 0000000..e81fb92
--- /dev/null
+++ b/bpf/headers/BpfRingbufTest.cpp
@@ -0,0 +1,157 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <android-base/file.h>
+#include <android-base/macros.h>
+#include <android-base/result-gmock.h>
+#include <gmock/gmock.h>
+#include <gtest/gtest.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include "BpfSyscallWrappers.h"
+#include "bpf/BpfRingbuf.h"
+#include "bpf/BpfUtils.h"
+#include "bpf/KernelUtils.h"
+
+#define TEST_RINGBUF_MAGIC_NUM 12345
+
+namespace android {
+namespace bpf {
+using ::android::base::testing::HasError;
+using ::android::base::testing::HasValue;
+using ::android::base::testing::WithCode;
+using ::testing::AllOf;
+using ::testing::Gt;
+using ::testing::HasSubstr;
+using ::testing::Lt;
+
+class BpfRingbufTest : public ::testing::Test {
+ protected:
+  BpfRingbufTest()
+      : mProgPath("/sys/fs/bpf/prog_bpfRingbufProg_skfilter_ringbuf_test"),
+        mRingbufPath("/sys/fs/bpf/map_bpfRingbufProg_test_ringbuf") {}
+
+  void SetUp() {
+    if (!android::bpf::isAtLeastKernelVersion(5, 8, 0)) {
+      GTEST_SKIP() << "BPF ring buffers not supported below 5.8";
+    }
+
+    errno = 0;
+    mProgram.reset(retrieveProgram(mProgPath.c_str()));
+    EXPECT_EQ(errno, 0);
+    ASSERT_GE(mProgram.get(), 0)
+        << mProgPath << " was either not found or inaccessible.";
+  }
+
+  void RunProgram() {
+    char fake_skb[128] = {};
+    EXPECT_EQ(runProgram(mProgram, fake_skb, sizeof(fake_skb)), 0);
+  }
+
+  void RunTestN(int n) {
+    int run_count = 0;
+    uint64_t output = 0;
+    auto callback = [&](const uint64_t& value) {
+      output = value;
+      run_count++;
+    };
+
+    auto result = BpfRingbuf<uint64_t>::Create(mRingbufPath.c_str());
+    ASSERT_RESULT_OK(result);
+    EXPECT_TRUE(result.value()->isEmpty());
+
+    struct timespec t1, t2;
+    EXPECT_EQ(0, clock_gettime(CLOCK_MONOTONIC, &t1));
+    EXPECT_FALSE(result.value()->wait(1000 /*ms*/));  // false because wait should timeout
+    EXPECT_EQ(0, clock_gettime(CLOCK_MONOTONIC, &t2));
+    long long time1 = t1.tv_sec * 1000000000LL + t1.tv_nsec;
+    long long time2 = t2.tv_sec * 1000000000LL + t2.tv_nsec;
+    EXPECT_GE(time2 - time1, 1000000000 /*ns*/);  // 1000 ms as ns
+
+    for (int i = 0; i < n; i++) {
+      RunProgram();
+    }
+
+    EXPECT_FALSE(result.value()->isEmpty());
+
+    EXPECT_EQ(0, clock_gettime(CLOCK_MONOTONIC, &t1));
+    EXPECT_TRUE(result.value()->wait());
+    EXPECT_EQ(0, clock_gettime(CLOCK_MONOTONIC, &t2));
+    time1 = t1.tv_sec * 1000000000LL + t1.tv_nsec;
+    time2 = t2.tv_sec * 1000000000LL + t2.tv_nsec;
+    EXPECT_LE(time2 - time1, 1000000 /*ns*/);  // in x86 CF testing < 5000 ns
+
+    EXPECT_THAT(result.value()->ConsumeAll(callback), HasValue(n));
+    EXPECT_TRUE(result.value()->isEmpty());
+    EXPECT_EQ(output, TEST_RINGBUF_MAGIC_NUM);
+    EXPECT_EQ(run_count, n);
+  }
+
+  std::string mProgPath;
+  std::string mRingbufPath;
+  android::base::unique_fd mProgram;
+};
+
+TEST_F(BpfRingbufTest, ConsumeSingle) { RunTestN(1); }
+TEST_F(BpfRingbufTest, ConsumeMultiple) { RunTestN(3); }
+
+TEST_F(BpfRingbufTest, FillAndWrap) {
+  int run_count = 0;
+  auto callback = [&](const uint64_t&) { run_count++; };
+
+  auto result = BpfRingbuf<uint64_t>::Create(mRingbufPath.c_str());
+  ASSERT_RESULT_OK(result);
+
+  // 4kb buffer with 16 byte payloads (8 byte data, 8 byte header) should fill
+  // after 255 iterations. Exceed that so that some events are dropped.
+  constexpr int iterations = 300;
+  for (int i = 0; i < iterations; i++) {
+    RunProgram();
+  }
+
+  // Some events were dropped, but consume all that succeeded.
+  EXPECT_THAT(result.value()->ConsumeAll(callback),
+              HasValue(AllOf(Gt(250), Lt(260))));
+  EXPECT_THAT(run_count, AllOf(Gt(250), Lt(260)));
+
+  // After consuming everything, we should be able to use the ring buffer again.
+  run_count = 0;
+  RunProgram();
+  EXPECT_THAT(result.value()->ConsumeAll(callback), HasValue(1));
+  EXPECT_EQ(run_count, 1);
+}
+
+TEST_F(BpfRingbufTest, WrongTypeSize) {
+  // The program under test writes 8-byte uint64_t values so a ringbuffer for
+  // 1-byte uint8_t values will fail to read from it. Note that the map_def does
+  // not specify the value size, so we fail on read, not creation.
+  auto result = BpfRingbuf<uint8_t>::Create(mRingbufPath.c_str());
+  ASSERT_RESULT_OK(result);
+
+  RunProgram();
+
+  EXPECT_THAT(result.value()->ConsumeAll([](const uint8_t&) {}),
+              HasError(WithCode(EMSGSIZE)));
+}
+
+TEST_F(BpfRingbufTest, InvalidPath) {
+  EXPECT_THAT(BpfRingbuf<int>::Create("/sys/fs/bpf/bad_path"),
+              HasError(WithCode(ENOENT)));
+}
+
+}  // namespace bpf
+}  // namespace android
diff --git a/bpf/headers/TEST_MAPPING b/bpf/headers/TEST_MAPPING
new file mode 100644
index 0000000..9ec8a40
--- /dev/null
+++ b/bpf/headers/TEST_MAPPING
@@ -0,0 +1,7 @@
+{
+  "presubmit": [
+    {
+      "name": "libbpf_android_test"
+    }
+  ]
+}
diff --git a/bpf/headers/include/bpf/BpfClassic.h b/bpf/headers/include/bpf/BpfClassic.h
new file mode 100644
index 0000000..81be37d
--- /dev/null
+++ b/bpf/headers/include/bpf/BpfClassic.h
@@ -0,0 +1,184 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+// Accept the full packet
+#define BPF_ACCEPT BPF_STMT(BPF_RET | BPF_K, 0xFFFFFFFF)
+
+// Reject the packet
+#define BPF_REJECT BPF_STMT(BPF_RET | BPF_K, 0)
+
+// Note arguments to BPF_JUMP(opcode, operand, true_offset, false_offset)
+
+// If not equal, jump over count instructions
+#define BPF_JUMP_IF_NOT_EQUAL(v, count) \
+	BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, (v), 0, (count))
+
+// *TWO* instructions: compare and if not equal jump over the accept statement
+#define BPF2_ACCEPT_IF_EQUAL(v) \
+	BPF_JUMP_IF_NOT_EQUAL((v), 1), \
+	BPF_ACCEPT
+
+// *TWO* instructions: compare and if equal jump over the reject statement
+#define BPF2_REJECT_IF_NOT_EQUAL(v) \
+	BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, (v), 1, 0), \
+	BPF_REJECT
+
+// *TWO* instructions: compare and if greater or equal jump over the reject statement
+#define BPF2_REJECT_IF_LESS_THAN(v) \
+	BPF_JUMP(BPF_JMP | BPF_JGE | BPF_K, (v), 1, 0), \
+	BPF_REJECT
+
+// *TWO* instructions: compare and if *NOT* greater jump over the reject statement
+#define BPF2_REJECT_IF_GREATER_THAN(v) \
+	BPF_JUMP(BPF_JMP | BPF_JGT | BPF_K, (v), 0, 1), \
+	BPF_REJECT
+
+// *THREE* instructions: compare and if *NOT* in range [lo, hi], jump over the reject statement
+#define BPF3_REJECT_IF_NOT_IN_RANGE(lo, hi) \
+	BPF_JUMP(BPF_JMP | BPF_JGE | BPF_K, (lo), 0, 1), \
+	BPF_JUMP(BPF_JMP | BPF_JGT | BPF_K, (hi), 0, 1), \
+	BPF_REJECT
+
+// *TWO* instructions: compare and if none of the bits are set jump over the reject statement
+#define BPF2_REJECT_IF_ANY_MASKED_BITS_SET(v) \
+	BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, (v), 0, 1), \
+	BPF_REJECT
+
+// loads skb->protocol
+#define BPF_LOAD_SKB_PROTOCOL \
+	BPF_STMT(BPF_LD | BPF_H | BPF_ABS, (__u32)SKF_AD_OFF + SKF_AD_PROTOCOL)
+
+// 8-bit load relative to start of link layer (mac/ethernet) header.
+#define BPF_LOAD_MAC_RELATIVE_U8(ofs) \
+	BPF_STMT(BPF_LD | BPF_B | BPF_ABS, (__u32)SKF_LL_OFF + (ofs))
+
+// Big/Network Endian 16-bit load relative to start of link layer (mac/ethernet) header.
+#define BPF_LOAD_MAC_RELATIVE_BE16(ofs) \
+	BPF_STMT(BPF_LD | BPF_H | BPF_ABS, (__u32)SKF_LL_OFF + (ofs))
+
+// Big/Network Endian 32-bit load relative to start of link layer (mac/ethernet) header.
+#define BPF_LOAD_MAC_RELATIVE_BE32(ofs) \
+	BPF_STMT(BPF_LD | BPF_W | BPF_ABS, (__u32)SKF_LL_OFF + (ofs))
+
+// 8-bit load relative to start of network (IPv4/IPv6) header.
+#define BPF_LOAD_NET_RELATIVE_U8(ofs) \
+	BPF_STMT(BPF_LD | BPF_B | BPF_ABS, (__u32)SKF_NET_OFF + (ofs))
+
+// Big/Network Endian 16-bit load relative to start of network (IPv4/IPv6) header.
+#define BPF_LOAD_NET_RELATIVE_BE16(ofs) \
+	BPF_STMT(BPF_LD | BPF_H | BPF_ABS, (__u32)SKF_NET_OFF + (ofs))
+
+// Big/Network Endian 32-bit load relative to start of network (IPv4/IPv6) header.
+#define BPF_LOAD_NET_RELATIVE_BE32(ofs) \
+	BPF_STMT(BPF_LD | BPF_W | BPF_ABS, (__u32)SKF_NET_OFF + (ofs))
+
+#define field_sizeof(struct_type,field) sizeof(((struct_type *)0)->field)
+
+// 8-bit load from IPv4 header field.
+#define BPF_LOAD_IPV4_U8(field) \
+	BPF_LOAD_NET_RELATIVE_U8(({ \
+	  _Static_assert(field_sizeof(struct iphdr, field) == 1, "field of wrong size"); \
+	  offsetof(iphdr, field); \
+	}))
+
+// Big/Network Endian 16-bit load from IPv4 header field.
+#define BPF_LOAD_IPV4_BE16(field) \
+	BPF_LOAD_NET_RELATIVE_BE16(({ \
+	  _Static_assert(field_sizeof(struct iphdr, field) == 2, "field of wrong size"); \
+	  offsetof(iphdr, field); \
+	}))
+
+// Big/Network Endian 32-bit load from IPv4 header field.
+#define BPF_LOAD_IPV4_BE32(field) \
+	BPF_LOAD_NET_RELATIVE_BE32(({ \
+	  _Static_assert(field_sizeof(struct iphdr, field) == 4, "field of wrong size"); \
+	  offsetof(iphdr, field); \
+	}))
+
+// 8-bit load from IPv6 header field.
+#define BPF_LOAD_IPV6_U8(field) \
+	BPF_LOAD_NET_RELATIVE_U8(({ \
+	  _Static_assert(field_sizeof(struct ipv6hdr, field) == 1, "field of wrong size"); \
+	  offsetof(ipv6hdr, field); \
+	}))
+
+// Big/Network Endian 16-bit load from IPv6 header field.
+#define BPF_LOAD_IPV6_BE16(field) \
+	BPF_LOAD_NET_RELATIVE_BE16(({ \
+	  _Static_assert(field_sizeof(struct ipv6hdr, field) == 2, "field of wrong size"); \
+	  offsetof(ipv6hdr, field); \
+	}))
+
+// Big/Network Endian 32-bit load from IPv6 header field.
+#define BPF_LOAD_IPV6_BE32(field) \
+	BPF_LOAD_NET_RELATIVE_BE32(({ \
+	  _Static_assert(field_sizeof(struct ipv6hdr, field) == 4, "field of wrong size"); \
+	  offsetof(ipv6hdr, field); \
+	}))
+
+// Load the length of the IPv4 header into X index register.
+// ie. X := 4 * IPv4.IHL, where IPv4.IHL is the bottom nibble
+// of the first byte of the IPv4 (aka network layer) header.
+#define BPF_LOADX_NET_RELATIVE_IPV4_HLEN \
+    BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, (__u32)SKF_NET_OFF)
+
+// Blindly assumes no IPv6 extension headers, just does X := 40
+// You may later adjust this as you parse through IPv6 ext hdrs.
+#define BPF_LOADX_CONSTANT_IPV6_HLEN \
+    BPF_STMT(BPF_LDX | BPF_W | BPF_IMM, sizeof(struct ipv6hdr))
+
+// NOTE: all the following require X to be setup correctly (v4: 20+, v6: 40+)
+
+// 8-bit load from L4 (TCP/UDP/...) header
+#define BPF_LOAD_NETX_RELATIVE_L4_U8(ofs) \
+    BPF_STMT(BPF_LD | BPF_B | BPF_IND, (__u32)SKF_NET_OFF + (ofs))
+
+// Big/Network Endian 16-bit load from L4 (TCP/UDP/...) header
+#define BPF_LOAD_NETX_RELATIVE_L4_BE16(ofs) \
+    BPF_STMT(BPF_LD | BPF_H | BPF_IND, (__u32)SKF_NET_OFF + (ofs))
+
+// Big/Network Endian 32-bit load from L4 (TCP/UDP/...) header
+#define BPF_LOAD_NETX_RELATIVE_L4_BE32(ofs) \
+    BPF_STMT(BPF_LD | BPF_W | BPF_IND, (__u32)SKF_NET_OFF + (ofs))
+
+// Both ICMPv4 and ICMPv6 start with u8 type, u8 code
+#define BPF_LOAD_NETX_RELATIVE_ICMP_TYPE BPF_LOAD_NETX_RELATIVE_L4_U8(0)
+#define BPF_LOAD_NETX_RELATIVE_ICMP_CODE BPF_LOAD_NETX_RELATIVE_L4_U8(1)
+
+// IPv6 extension headers (HOPOPTS, DSTOPS, FRAG) begin with a u8 nexthdr
+#define BPF_LOAD_NETX_RELATIVE_V6EXTHDR_NEXTHDR BPF_LOAD_NETX_RELATIVE_L4_U8(0)
+
+// IPv6 fragment header is always exactly 8 bytes long
+#define BPF_LOAD_CONSTANT_V6FRAGHDR_LEN \
+    BPF_STMT(BPF_LD | BPF_IMM, 8)
+
+// HOPOPTS/DSTOPS follow up with 'u8 len', counting 8 byte units, (0->8, 1->16)
+// *THREE* instructions
+#define BPF3_LOAD_NETX_RELATIVE_V6EXTHDR_LEN \
+    BPF_LOAD_NETX_RELATIVE_L4_U8(1), \
+    BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1), \
+    BPF_STMT(BPF_ALU | BPF_LSH | BPF_K, 3)
+
+// *TWO* instructions: A += X; X := A
+#define BPF2_ADD_A_TO_X \
+    BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0), \
+    BPF_STMT(BPF_MISC | BPF_TAX, 0)
+
+// UDP/UDPLITE/TCP/SCTP/DCCP all start with be16 srcport, dstport
+#define BPF_LOAD_NETX_RELATIVE_SRC_PORT BPF_LOAD_NETX_RELATIVE_L4_BE16(0)
+#define BPF_LOAD_NETX_RELATIVE_DST_PORT BPF_LOAD_NETX_RELATIVE_L4_BE16(2)
diff --git a/bpf/headers/include/bpf/BpfMap.h b/bpf/headers/include/bpf/BpfMap.h
new file mode 100644
index 0000000..1037beb
--- /dev/null
+++ b/bpf/headers/include/bpf/BpfMap.h
@@ -0,0 +1,381 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <linux/bpf.h>
+
+#include <android/log.h>
+#include <android-base/result.h>
+#include <android-base/stringprintf.h>
+#include <android-base/unique_fd.h>
+
+#include "BpfSyscallWrappers.h"
+#include "bpf/BpfUtils.h"
+
+#include <functional>
+
+namespace android {
+namespace bpf {
+
+using base::Result;
+using base::unique_fd;
+using std::function;
+
+// This is a class wrapper for eBPF maps. The eBPF map is a special in-kernel
+// data structure that stores data in <Key, Value> pairs. It can be read/write
+// from userspace by passing syscalls with the map file descriptor. This class
+// is used to generalize the procedure of interacting with eBPF maps and hide
+// the implementation detail from other process. Besides the basic syscalls
+// wrapper, it also provides some useful helper functions as well as an iterator
+// nested class to iterate the map more easily.
+//
+// NOTE: A kernel eBPF map may be accessed by both kernel and userspace
+// processes at the same time. Or if the map is pinned as a virtual file, it can
+// be obtained by multiple eBPF map class object and accessed concurrently.
+// Though the map class object and the underlying kernel map are thread safe, it
+// is not safe to iterate over a map while another thread or process is deleting
+// from it. In this case the iteration can return duplicate entries.
+template <class Key, class Value>
+class BpfMapRO {
+  public:
+    BpfMapRO<Key, Value>() {};
+
+    // explicitly force no copy constructor, since it would need to dup the fd
+    // (later on, for testing, we still make available a copy assignment operator)
+    BpfMapRO<Key, Value>(const BpfMapRO<Key, Value>&) = delete;
+
+  protected:
+    void abortOnMismatch(bool writable) const {
+        if (!mMapFd.ok()) abort();
+        if (isAtLeastKernelVersion(4, 14, 0)) {
+            int flags = bpfGetFdMapFlags(mMapFd);
+            if (flags < 0) abort();
+            if (flags & BPF_F_WRONLY) abort();
+            if (writable && (flags & BPF_F_RDONLY)) abort();
+            if (bpfGetFdKeySize(mMapFd) != sizeof(Key)) abort();
+            if (bpfGetFdValueSize(mMapFd) != sizeof(Value)) abort();
+        }
+    }
+
+  public:
+    explicit BpfMapRO<Key, Value>(const char* pathname) {
+        mMapFd.reset(mapRetrieveRO(pathname));
+        abortOnMismatch(/* writable */ false);
+    }
+
+    Result<Key> getFirstKey() const {
+        Key firstKey;
+        if (getFirstMapKey(mMapFd, &firstKey)) {
+            return ErrnoErrorf("BpfMap::getFirstKey() failed");
+        }
+        return firstKey;
+    }
+
+    Result<Key> getNextKey(const Key& key) const {
+        Key nextKey;
+        if (getNextMapKey(mMapFd, &key, &nextKey)) {
+            return ErrnoErrorf("BpfMap::getNextKey() failed");
+        }
+        return nextKey;
+    }
+
+    Result<Value> readValue(const Key key) const {
+        Value value;
+        if (findMapEntry(mMapFd, &key, &value)) {
+            return ErrnoErrorf("BpfMap::readValue() failed");
+        }
+        return value;
+    }
+
+  protected:
+    [[clang::reinitializes]] Result<void> init(const char* path, int fd, bool writable) {
+        mMapFd.reset(fd);
+        if (!mMapFd.ok()) {
+            return ErrnoErrorf("Pinned map not accessible or does not exist: ({})", path);
+        }
+        // Normally we should return an error here instead of calling abort,
+        // but this cannot happen at runtime without a massive code bug (K/V type mismatch)
+        // and as such it's better to just blow the system up and let the developer fix it.
+        // Crashes are much more likely to be noticed than logs and missing functionality.
+        abortOnMismatch(writable);
+        return {};
+    }
+
+  public:
+    // Function that tries to get map from a pinned path.
+    [[clang::reinitializes]] Result<void> init(const char* path) {
+        return init(path, mapRetrieveRO(path), /* writable */ false);
+    }
+
+    // Iterate through the map and handle each key retrieved based on the filter
+    // without modification of map content.
+    Result<void> iterate(
+            const function<Result<void>(const Key& key,
+                                        const BpfMapRO<Key, Value>& map)>& filter) const;
+
+    // Iterate through the map and get each <key, value> pair, handle each <key,
+    // value> pair based on the filter without modification of map content.
+    Result<void> iterateWithValue(
+            const function<Result<void>(const Key& key, const Value& value,
+                                        const BpfMapRO<Key, Value>& map)>& filter) const;
+
+#ifdef BPF_MAP_MAKE_VISIBLE_FOR_TESTING
+    const unique_fd& getMap() const { return mMapFd; };
+
+    // Copy assignment operator - due to need for fd duping, should not be used in non-test code.
+    BpfMapRO<Key, Value>& operator=(const BpfMapRO<Key, Value>& other) {
+        if (this != &other) mMapFd.reset(fcntl(other.mMapFd.get(), F_DUPFD_CLOEXEC, 0));
+        return *this;
+    }
+#else
+    BpfMapRO<Key, Value>& operator=(const BpfMapRO<Key, Value>&) = delete;
+#endif
+
+    // Move assignment operator
+    BpfMapRO<Key, Value>& operator=(BpfMapRO<Key, Value>&& other) noexcept {
+        if (this != &other) {
+            mMapFd = std::move(other.mMapFd);
+            other.reset();
+        }
+        return *this;
+    }
+
+#ifdef BPF_MAP_MAKE_VISIBLE_FOR_TESTING
+    // Note that unique_fd.reset() carefully saves and restores the errno,
+    // and BpfMap.reset() won't touch the errno if passed in fd is negative either,
+    // hence you can do something like BpfMap.reset(systemcall()) and then
+    // check BpfMap.isValid() and look at errno and see why systemcall() failed.
+    [[clang::reinitializes]] void reset(int fd) {
+        mMapFd.reset(fd);
+        if (mMapFd.ok()) abortOnMismatch(/* writable */ false);  // false isn't ideal
+    }
+
+    // unique_fd has an implicit int conversion defined, which combined with the above
+    // reset(int) would result in double ownership of the fd, hence we either need a custom
+    // implementation of reset(unique_fd), or to delete it and thus cause compile failures
+    // to catch this and prevent it.
+    void reset(unique_fd fd) = delete;
+#endif
+
+    [[clang::reinitializes]] void reset() {
+        mMapFd.reset();
+    }
+
+    bool isValid() const { return mMapFd.ok(); }
+
+    Result<bool> isEmpty() const {
+        auto key = getFirstKey();
+        if (key.ok()) return false;
+        if (key.error().code() == ENOENT) return true;
+        return key.error();
+    }
+
+  protected:
+    unique_fd mMapFd;
+};
+
+template <class Key, class Value>
+Result<void> BpfMapRO<Key, Value>::iterate(
+        const function<Result<void>(const Key& key,
+                                    const BpfMapRO<Key, Value>& map)>& filter) const {
+    Result<Key> curKey = getFirstKey();
+    while (curKey.ok()) {
+        const Result<Key>& nextKey = getNextKey(curKey.value());
+        Result<void> status = filter(curKey.value(), *this);
+        if (!status.ok()) return status;
+        curKey = nextKey;
+    }
+    if (curKey.error().code() == ENOENT) return {};
+    return curKey.error();
+}
+
+template <class Key, class Value>
+Result<void> BpfMapRO<Key, Value>::iterateWithValue(
+        const function<Result<void>(const Key& key, const Value& value,
+                                    const BpfMapRO<Key, Value>& map)>& filter) const {
+    Result<Key> curKey = getFirstKey();
+    while (curKey.ok()) {
+        const Result<Key>& nextKey = getNextKey(curKey.value());
+        Result<Value> curValue = readValue(curKey.value());
+        if (!curValue.ok()) return curValue.error();
+        Result<void> status = filter(curKey.value(), curValue.value(), *this);
+        if (!status.ok()) return status;
+        curKey = nextKey;
+    }
+    if (curKey.error().code() == ENOENT) return {};
+    return curKey.error();
+}
+
+template <class Key, class Value>
+class BpfMap : public BpfMapRO<Key, Value> {
+  protected:
+    using BpfMapRO<Key, Value>::mMapFd;
+    using BpfMapRO<Key, Value>::abortOnMismatch;
+
+  public:
+    using BpfMapRO<Key, Value>::getFirstKey;
+    using BpfMapRO<Key, Value>::getNextKey;
+    using BpfMapRO<Key, Value>::readValue;
+
+    BpfMap<Key, Value>() {};
+
+    explicit BpfMap<Key, Value>(const char* pathname) {
+        mMapFd.reset(mapRetrieveRW(pathname));
+        abortOnMismatch(/* writable */ true);
+    }
+
+    // Function that tries to get map from a pinned path.
+    [[clang::reinitializes]] Result<void> init(const char* path) {
+        return BpfMapRO<Key,Value>::init(path, mapRetrieveRW(path), /* writable */ true);
+    }
+
+    Result<void> writeValue(const Key& key, const Value& value, uint64_t flags) {
+        if (writeToMapEntry(mMapFd, &key, &value, flags)) {
+            return ErrnoErrorf("BpfMap::writeValue() failed");
+        }
+        return {};
+    }
+
+    Result<void> deleteValue(const Key& key) {
+        if (deleteMapEntry(mMapFd, &key)) {
+            return ErrnoErrorf("BpfMap::deleteValue() failed");
+        }
+        return {};
+    }
+
+    Result<void> clear() {
+        while (true) {
+            auto key = getFirstKey();
+            if (!key.ok()) {
+                if (key.error().code() == ENOENT) return {};  // empty: success
+                return key.error();                           // Anything else is an error
+            }
+            auto res = deleteValue(key.value());
+            if (!res.ok()) {
+                // Someone else could have deleted the key, so ignore ENOENT
+                if (res.error().code() == ENOENT) continue;
+                ALOGE("Failed to delete data %s", strerror(res.error().code()));
+                return res.error();
+            }
+        }
+    }
+
+#ifdef BPF_MAP_MAKE_VISIBLE_FOR_TESTING
+    [[clang::reinitializes]] Result<void> resetMap(bpf_map_type map_type,
+                                                   uint32_t max_entries,
+                                                   uint32_t map_flags = 0) {
+        if (map_flags & BPF_F_WRONLY) abort();
+        if (map_flags & BPF_F_RDONLY) abort();
+        mMapFd.reset(createMap(map_type, sizeof(Key), sizeof(Value), max_entries,
+                               map_flags));
+        if (!mMapFd.ok()) return ErrnoErrorf("BpfMap::resetMap() failed");
+        abortOnMismatch(/* writable */ true);
+        return {};
+    }
+#endif
+
+    // Iterate through the map and handle each key retrieved based on the filter
+    // without modification of map content.
+    Result<void> iterate(
+            const function<Result<void>(const Key& key,
+                                        const BpfMap<Key, Value>& map)>& filter) const;
+
+    // Iterate through the map and get each <key, value> pair, handle each <key,
+    // value> pair based on the filter without modification of map content.
+    Result<void> iterateWithValue(
+            const function<Result<void>(const Key& key, const Value& value,
+                                        const BpfMap<Key, Value>& map)>& filter) const;
+
+    // Iterate through the map and handle each key retrieved based on the filter
+    Result<void> iterate(
+            const function<Result<void>(const Key& key,
+                                        BpfMap<Key, Value>& map)>& filter);
+
+    // Iterate through the map and get each <key, value> pair, handle each <key,
+    // value> pair based on the filter.
+    Result<void> iterateWithValue(
+            const function<Result<void>(const Key& key, const Value& value,
+                                        BpfMap<Key, Value>& map)>& filter);
+
+};
+
+template <class Key, class Value>
+Result<void> BpfMap<Key, Value>::iterate(
+        const function<Result<void>(const Key& key,
+                                    const BpfMap<Key, Value>& map)>& filter) const {
+    Result<Key> curKey = getFirstKey();
+    while (curKey.ok()) {
+        const Result<Key>& nextKey = getNextKey(curKey.value());
+        Result<void> status = filter(curKey.value(), *this);
+        if (!status.ok()) return status;
+        curKey = nextKey;
+    }
+    if (curKey.error().code() == ENOENT) return {};
+    return curKey.error();
+}
+
+template <class Key, class Value>
+Result<void> BpfMap<Key, Value>::iterateWithValue(
+        const function<Result<void>(const Key& key, const Value& value,
+                                    const BpfMap<Key, Value>& map)>& filter) const {
+    Result<Key> curKey = getFirstKey();
+    while (curKey.ok()) {
+        const Result<Key>& nextKey = getNextKey(curKey.value());
+        Result<Value> curValue = readValue(curKey.value());
+        if (!curValue.ok()) return curValue.error();
+        Result<void> status = filter(curKey.value(), curValue.value(), *this);
+        if (!status.ok()) return status;
+        curKey = nextKey;
+    }
+    if (curKey.error().code() == ENOENT) return {};
+    return curKey.error();
+}
+
+template <class Key, class Value>
+Result<void> BpfMap<Key, Value>::iterate(
+        const function<Result<void>(const Key& key,
+                                    BpfMap<Key, Value>& map)>& filter) {
+    Result<Key> curKey = getFirstKey();
+    while (curKey.ok()) {
+        const Result<Key>& nextKey = getNextKey(curKey.value());
+        Result<void> status = filter(curKey.value(), *this);
+        if (!status.ok()) return status;
+        curKey = nextKey;
+    }
+    if (curKey.error().code() == ENOENT) return {};
+    return curKey.error();
+}
+
+template <class Key, class Value>
+Result<void> BpfMap<Key, Value>::iterateWithValue(
+        const function<Result<void>(const Key& key, const Value& value,
+                                    BpfMap<Key, Value>& map)>& filter) {
+    Result<Key> curKey = getFirstKey();
+    while (curKey.ok()) {
+        const Result<Key>& nextKey = getNextKey(curKey.value());
+        Result<Value> curValue = readValue(curKey.value());
+        if (!curValue.ok()) return curValue.error();
+        Result<void> status = filter(curKey.value(), curValue.value(), *this);
+        if (!status.ok()) return status;
+        curKey = nextKey;
+    }
+    if (curKey.error().code() == ENOENT) return {};
+    return curKey.error();
+}
+
+}  // namespace bpf
+}  // namespace android
diff --git a/bpf/headers/include/bpf/BpfRingbuf.h b/bpf/headers/include/bpf/BpfRingbuf.h
new file mode 100644
index 0000000..4bcd259
--- /dev/null
+++ b/bpf/headers/include/bpf/BpfRingbuf.h
@@ -0,0 +1,292 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <android-base/result.h>
+#include <android-base/unique_fd.h>
+#include <linux/bpf.h>
+#include <poll.h>
+#include <sys/epoll.h>
+#include <sys/mman.h>
+#include <utils/Log.h>
+
+#include "bpf/BpfUtils.h"
+
+#include <atomic>
+
+namespace android {
+namespace bpf {
+
+// BpfRingbufBase contains the non-templated functionality of BPF ring buffers.
+class BpfRingbufBase {
+ public:
+  virtual ~BpfRingbufBase() {
+    if (mConsumerPos) munmap(mConsumerPos, mConsumerSize);
+    if (mProducerPos) munmap(mProducerPos, mProducerSize);
+    mConsumerPos = nullptr;
+    mProducerPos = nullptr;
+  }
+
+  bool isEmpty(void);
+
+  // returns !isEmpty() for convenience
+  bool wait(int timeout_ms = -1);
+
+ protected:
+  // Non-initializing constructor, used by Create.
+  BpfRingbufBase(size_t value_size) : mValueSize(value_size) {}
+
+  // Full construction that aborts on error (use Create/Init to handle errors).
+  BpfRingbufBase(const char* path, size_t value_size) : mValueSize(value_size) {
+    if (auto status = Init(path); !status.ok()) {
+      ALOGE("BpfRingbuf init failed: %s", status.error().message().c_str());
+      abort();
+    }
+  }
+
+  // Delete copy constructor (class owns raw pointers).
+  BpfRingbufBase(const BpfRingbufBase&) = delete;
+
+  // Initialize the base ringbuffer components. Must be called exactly once.
+  base::Result<void> Init(const char* path);
+
+  // Consumes all messages from the ring buffer, passing them to the callback.
+  base::Result<int> ConsumeAll(
+      const std::function<void(const void*)>& callback);
+
+  // Replicates c-style void* "byte-wise" pointer addition.
+  template <typename Ptr>
+  static Ptr pointerAddBytes(void* base, ssize_t offset_bytes) {
+    return reinterpret_cast<Ptr>(reinterpret_cast<char*>(base) + offset_bytes);
+  }
+
+  // Rounds len by clearing bitmask, adding header, and aligning to 8 bytes.
+  static uint32_t roundLength(uint32_t len) {
+    len &= ~(BPF_RINGBUF_BUSY_BIT | BPF_RINGBUF_DISCARD_BIT);
+    len += BPF_RINGBUF_HDR_SZ;
+    return (len + 7) & ~7;
+  }
+
+  const size_t mValueSize;
+
+  size_t mConsumerSize;
+  size_t mProducerSize;
+  unsigned long mPosMask;
+  android::base::unique_fd mRingFd;
+
+  void* mDataPos = nullptr;
+  // The kernel uses an "unsigned long" type for both consumer and producer position.
+  // Unsigned long is a 4 byte value on a 32-bit kernel, and an 8 byte value on a 64-bit kernel.
+  // To support 32-bit kernels, producer pos is capped at 4 bytes (despite it being 8 bytes on
+  // 64-bit kernels) and all comparisons of consumer and producer pos only compare the low-order 4
+  // bytes (an inequality comparison is performed to support overflow).
+  // This solution is bitness agnostic. The consumer only increments the 8 byte consumer pos, which,
+  // in a little-endian architecture, is safe since the entire page is mapped into memory and a
+  // 32-bit kernel will just ignore the high-order bits.
+  std::atomic_uint64_t* mConsumerPos = nullptr;
+  std::atomic_uint32_t* mProducerPos = nullptr;
+
+  // In order to guarantee atomic access in a 32 bit userspace environment, atomic_uint64_t is used
+  // in addition to std::atomic<T>::is_always_lock_free that guarantees that read / write operations
+  // are indeed atomic.
+  // Since std::atomic does not support wrapping preallocated memory, an additional static assert on
+  // the size of the atomic and the underlying type is added to ensure a reinterpret_cast from type
+  // to its atomic version is safe (is_always_lock_free being true should provide additional
+  // confidence).
+  static_assert(std::atomic_uint64_t::is_always_lock_free);
+  static_assert(std::atomic_uint32_t::is_always_lock_free);
+  static_assert(sizeof(std::atomic_uint64_t) == sizeof(uint64_t));
+  static_assert(sizeof(std::atomic_uint32_t) == sizeof(uint32_t));
+};
+
+// This is a class wrapper for eBPF ring buffers. An eBPF ring buffer is a
+// special type of eBPF map used for sending messages from eBPF to userspace.
+// The implementation relies on fast shared memory and atomics for the producer
+// and consumer management. Ring buffers are a faster alternative to eBPF perf
+// buffers.
+//
+// This class is thread compatible, but not thread safe.
+//
+// Note: A kernel eBPF ring buffer may be accessed by both kernel and userspace
+// processes at the same time. However, the userspace consumers of a given ring
+// buffer all share a single read pointer. There is no guarantee which readers
+// will read which messages.
+template <typename Value>
+class BpfRingbuf : public BpfRingbufBase {
+ public:
+  using MessageCallback = std::function<void(const Value&)>;
+
+  // Creates a ringbuffer wrapper from a pinned path. This initialization will
+  // abort on error. To handle errors, initialize with Create instead.
+  BpfRingbuf(const char* path) : BpfRingbufBase(path, sizeof(Value)) {}
+
+  // Creates a ringbuffer wrapper from a pinned path. There are no guarantees
+  // that the ringbuf outputs messaged of type `Value`, only that they are the
+  // same size. Size is only checked in ConsumeAll.
+  static base::Result<std::unique_ptr<BpfRingbuf<Value>>> Create(
+      const char* path);
+
+  int epoll_ctl_add(int epfd, struct epoll_event *event) {
+    return epoll_ctl(epfd, EPOLL_CTL_ADD, mRingFd.get(), event);
+  }
+
+  int epoll_ctl_mod(int epfd, struct epoll_event *event) {
+    return epoll_ctl(epfd, EPOLL_CTL_MOD, mRingFd.get(), event);
+  }
+
+  int epoll_ctl_del(int epfd) {
+    return epoll_ctl(epfd, EPOLL_CTL_DEL, mRingFd.get(), NULL);
+  }
+
+  // Consumes all messages from the ring buffer, passing them to the callback.
+  // Returns the number of messages consumed or a non-ok result on error. If the
+  // ring buffer has no pending messages an OK result with count 0 is returned.
+  base::Result<int> ConsumeAll(const MessageCallback& callback);
+
+ protected:
+  // Empty ctor for use by Create.
+  BpfRingbuf() : BpfRingbufBase(sizeof(Value)) {}
+};
+
+
+inline base::Result<void> BpfRingbufBase::Init(const char* path) {
+  mRingFd.reset(mapRetrieveExclusiveRW(path));
+  if (!mRingFd.ok()) {
+    return android::base::ErrnoError()
+           << "failed to retrieve ringbuffer at " << path;
+  }
+
+  int map_type = android::bpf::bpfGetFdMapType(mRingFd);
+  if (map_type != BPF_MAP_TYPE_RINGBUF) {
+    errno = EINVAL;
+    return android::base::ErrnoError()
+           << "bpf map has wrong type: want BPF_MAP_TYPE_RINGBUF ("
+           << BPF_MAP_TYPE_RINGBUF << ") got " << map_type;
+  }
+
+  int max_entries = android::bpf::bpfGetFdMaxEntries(mRingFd);
+  if (max_entries < 0) {
+    return android::base::ErrnoError()
+           << "failed to read max_entries from ringbuf";
+  }
+  if (max_entries == 0) {
+    errno = EINVAL;
+    return android::base::ErrnoError() << "max_entries must be non-zero";
+  }
+
+  mPosMask = max_entries - 1;
+  mConsumerSize = getpagesize();
+  mProducerSize = getpagesize() + 2 * max_entries;
+
+  {
+    void* ptr = mmap(NULL, mConsumerSize, PROT_READ | PROT_WRITE, MAP_SHARED,
+                     mRingFd, 0);
+    if (ptr == MAP_FAILED) {
+      return android::base::ErrnoError()
+             << "failed to mmap ringbuf consumer pages";
+    }
+    mConsumerPos = reinterpret_cast<decltype(mConsumerPos)>(ptr);
+  }
+
+  {
+    void* ptr = mmap(NULL, mProducerSize, PROT_READ, MAP_SHARED, mRingFd,
+                     mConsumerSize);
+    if (ptr == MAP_FAILED) {
+      return android::base::ErrnoError()
+             << "failed to mmap ringbuf producer page";
+    }
+    mProducerPos = reinterpret_cast<decltype(mProducerPos)>(ptr);
+  }
+
+  mDataPos = pointerAddBytes<void*>(mProducerPos, getpagesize());
+  return {};
+}
+
+inline bool BpfRingbufBase::isEmpty(void) {
+  uint32_t prod_pos = mProducerPos->load(std::memory_order_relaxed);
+  uint64_t cons_pos = mConsumerPos->load(std::memory_order_relaxed);
+  return (cons_pos & 0xFFFFFFFF) == prod_pos;
+}
+
+inline bool BpfRingbufBase::wait(int timeout_ms) {
+  // possible optimization: if (!isEmpty()) return true;
+  struct pollfd pfd = {  // 1-element array
+    .fd = mRingFd.get(),
+    .events = POLLIN,
+  };
+  (void)poll(&pfd, 1, timeout_ms);  // 'best effort' poll
+  return !isEmpty();
+}
+
+inline base::Result<int> BpfRingbufBase::ConsumeAll(
+    const std::function<void(const void*)>& callback) {
+  int64_t count = 0;
+  uint32_t prod_pos = mProducerPos->load(std::memory_order_acquire);
+  // Only userspace writes to mConsumerPos, so no need to use std::memory_order_acquire
+  uint64_t cons_pos = mConsumerPos->load(std::memory_order_relaxed);
+  while ((cons_pos & 0xFFFFFFFF) != prod_pos) {
+    // Find the start of the entry for this read (wrapping is done here).
+    void* start_ptr = pointerAddBytes<void*>(mDataPos, cons_pos & mPosMask);
+
+    // The entry has an 8 byte header containing the sample length.
+    // struct bpf_ringbuf_hdr {
+    //   u32 len;
+    //   u32 pg_off;
+    // };
+    uint32_t length = *reinterpret_cast<volatile uint32_t*>(start_ptr);
+
+    // If the sample isn't committed, we're caught up with the producer.
+    if (length & BPF_RINGBUF_BUSY_BIT) return count;
+
+    cons_pos += roundLength(length);
+
+    if ((length & BPF_RINGBUF_DISCARD_BIT) == 0) {
+      if (length != mValueSize) {
+        mConsumerPos->store(cons_pos, std::memory_order_release);
+        errno = EMSGSIZE;
+        return android::base::ErrnoError()
+               << "BPF ring buffer message has unexpected size (want "
+               << mValueSize << " bytes, got " << length << " bytes)";
+      }
+      callback(pointerAddBytes<const void*>(start_ptr, BPF_RINGBUF_HDR_SZ));
+      count++;
+    }
+
+    mConsumerPos->store(cons_pos, std::memory_order_release);
+  }
+
+  return count;
+}
+
+template <typename Value>
+inline base::Result<std::unique_ptr<BpfRingbuf<Value>>>
+BpfRingbuf<Value>::Create(const char* path) {
+  auto rb = std::unique_ptr<BpfRingbuf>(new BpfRingbuf);
+  if (auto status = rb->Init(path); !status.ok()) return status.error();
+  return rb;
+}
+
+template <typename Value>
+inline base::Result<int> BpfRingbuf<Value>::ConsumeAll(
+    const MessageCallback& callback) {
+  return BpfRingbufBase::ConsumeAll([&](const void* value) {
+    callback(*reinterpret_cast<const Value*>(value));
+  });
+}
+
+}  // namespace bpf
+}  // namespace android
diff --git a/bpf/headers/include/bpf/BpfUtils.h b/bpf/headers/include/bpf/BpfUtils.h
new file mode 100644
index 0000000..9dd5822
--- /dev/null
+++ b/bpf/headers/include/bpf/BpfUtils.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <errno.h>
+#include <linux/if_ether.h>
+#include <linux/pfkeyv2.h>
+#include <net/if.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/resource.h>
+#include <sys/socket.h>
+#include <sys/utsname.h>
+
+#include <log/log.h>
+
+#include "KernelUtils.h"
+
+namespace android {
+namespace bpf {
+
+// See kernel's net/core/sock_diag.c __sock_gen_cookie()
+// the implementation of which guarantees 0 will never be returned,
+// primarily because 0 is used to mean not yet initialized,
+// and socket cookies are only assigned on first fetch.
+constexpr const uint64_t NONEXISTENT_COOKIE = 0;
+
+static inline uint64_t getSocketCookie(int sockFd) {
+    uint64_t sock_cookie;
+    socklen_t cookie_len = sizeof(sock_cookie);
+    if (getsockopt(sockFd, SOL_SOCKET, SO_COOKIE, &sock_cookie, &cookie_len)) {
+        // Failure is almost certainly either EBADF or ENOTSOCK
+        const int err = errno;
+        ALOGE("Failed to get socket cookie: %s\n", strerror(err));
+        errno = err;
+        return NONEXISTENT_COOKIE;
+    }
+    if (cookie_len != sizeof(sock_cookie)) {
+        // This probably cannot actually happen, but...
+        ALOGE("Failed to get socket cookie: len %d != 8\n", cookie_len);
+        errno = 523; // EBADCOOKIE: kernel internal, seems reasonable enough...
+        return NONEXISTENT_COOKIE;
+    }
+    return sock_cookie;
+}
+
+static inline int synchronizeKernelRCU() {
+    // This is a temporary hack for network stats map swap on devices running
+    // 4.9 kernels. The kernel code of socket release on pf_key socket will
+    // explicitly call synchronize_rcu() which is exactly what we need.
+    //
+    // Linux 4.14/4.19/5.4/5.10/5.15/6.1 (and 6.3-rc5) still have this same behaviour.
+    // see net/key/af_key.c: pfkey_release() -> synchronize_rcu()
+    // https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/net/key/af_key.c?h=v6.3-rc5#n185
+    const int pfSocket = socket(AF_KEY, SOCK_RAW | SOCK_CLOEXEC, PF_KEY_V2);
+
+    if (pfSocket < 0) {
+        const int err = errno;
+        ALOGE("create PF_KEY socket failed: %s", strerror(err));
+        return -err;
+    }
+
+    // When closing socket, synchronize_rcu() gets called in sock_release().
+    if (close(pfSocket)) {
+        const int err = errno;
+        ALOGE("failed to close the PF_KEY socket: %s", strerror(err));
+        return -err;
+    }
+    return 0;
+}
+
+static inline int setrlimitForTest() {
+    // Set the memory rlimit for the test process if the default MEMLOCK rlimit is not enough.
+    struct rlimit limit = {
+            .rlim_cur = 1073741824,  // 1 GiB
+            .rlim_max = 1073741824,  // 1 GiB
+    };
+    const int res = setrlimit(RLIMIT_MEMLOCK, &limit);
+    if (res) ALOGE("Failed to set the default MEMLOCK rlimit: %s", strerror(errno));
+    return res;
+}
+
+}  // namespace bpf
+}  // namespace android
diff --git a/bpf/headers/include/bpf/KernelUtils.h b/bpf/headers/include/bpf/KernelUtils.h
new file mode 100644
index 0000000..417a5c4
--- /dev/null
+++ b/bpf/headers/include/bpf/KernelUtils.h
@@ -0,0 +1,189 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <stdio.h>
+#include <string.h>
+#include <sys/personality.h>
+#include <sys/utsname.h>
+
+namespace android {
+namespace bpf {
+
+#define KVER(a, b, c) (((a) << 24) + ((b) << 16) + (c))
+
+static inline unsigned uncachedKernelVersion() {
+    struct utsname buf;
+    if (uname(&buf)) return 0;
+
+    unsigned kver_major = 0;
+    unsigned kver_minor = 0;
+    unsigned kver_sub = 0;
+    (void)sscanf(buf.release, "%u.%u.%u", &kver_major, &kver_minor, &kver_sub);
+    return KVER(kver_major, kver_minor, kver_sub);
+}
+
+static inline unsigned kernelVersion() {
+    static unsigned kver = uncachedKernelVersion();
+    return kver;
+}
+
+static inline bool isAtLeastKernelVersion(unsigned major, unsigned minor, unsigned sub) {
+    return kernelVersion() >= KVER(major, minor, sub);
+}
+
+static inline bool isKernelVersion(unsigned major, unsigned minor) {
+    return isAtLeastKernelVersion(major, minor, 0) && !isAtLeastKernelVersion(major, minor + 1, 0);
+}
+
+static inline bool __unused isLtsKernel() {
+    return isKernelVersion(4,  4) ||  // minimum for Android R
+           isKernelVersion(4,  9) ||  // minimum for Android S & T
+           isKernelVersion(4, 14) ||  // minimum for Android U
+           isKernelVersion(4, 19) ||  // minimum for Android V
+           isKernelVersion(5,  4) ||  // first supported in Android R
+           isKernelVersion(5, 10) ||  // first supported in Android S
+           isKernelVersion(5, 15) ||  // first supported in Android T
+           isKernelVersion(6,  1) ||  // first supported in Android U
+           isKernelVersion(6,  6);    // first supported in Android V
+}
+
+// Figure out the bitness of userspace.
+// Trivial and known at compile time.
+static constexpr bool isUserspace32bit() {
+    return sizeof(void*) == 4;
+}
+
+static constexpr bool isUserspace64bit() {
+    return sizeof(void*) == 8;
+}
+
+#if defined(__LP64__)
+static_assert(isUserspace64bit(), "huh? LP64 must have 64-bit userspace");
+#elif defined(__ILP32__)
+static_assert(isUserspace32bit(), "huh? ILP32 must have 32-bit userspace");
+#else
+#error "huh? must be either LP64 (64-bit userspace) or ILP32 (32-bit userspace)"
+#endif
+
+static_assert(isUserspace32bit() || isUserspace64bit(), "must be either 32 or 64 bit");
+
+// Figure out the bitness of the kernel.
+static inline bool isKernel64Bit() {
+    // a 64-bit userspace requires a 64-bit kernel
+    if (isUserspace64bit()) return true;
+
+    static bool init = false;
+    static bool cache = false;
+    if (init) return cache;
+
+    // Retrieve current personality - on Linux this system call *cannot* fail.
+    int p = personality(0xffffffff);
+    // But if it does just assume kernel and userspace (which is 32-bit) match...
+    if (p == -1) return false;
+
+    // This will effectively mask out the bottom 8 bits, and switch to 'native'
+    // personality, and then return the previous personality of this thread
+    // (likely PER_LINUX or PER_LINUX32) with any extra options unmodified.
+    int q = personality((p & ~PER_MASK) | PER_LINUX);
+    // Per man page this theoretically could error out with EINVAL,
+    // but kernel code analysis suggests setting PER_LINUX cannot fail.
+    // Either way, assume kernel and userspace (which is 32-bit) match...
+    if (q != p) return false;
+
+    struct utsname u;
+    (void)uname(&u);  // only possible failure is EFAULT, but u is on stack.
+
+    // Switch back to previous personality.
+    // Theoretically could fail with EINVAL on arm64 with no 32-bit support,
+    // but then we wouldn't have fetched 'p' from the kernel in the first place.
+    // Either way there's nothing meaningful we can do in case of error.
+    // Since PER_LINUX32 vs PER_LINUX only affects uname.machine it doesn't
+    // really hurt us either.  We're really just switching back to be 'clean'.
+    (void)personality(p);
+
+    // Possible values of utsname.machine observed on x86_64 desktop (arm via qemu):
+    //   x86_64 i686 aarch64 armv7l
+    // additionally observed on arm device:
+    //   armv8l
+    // presumably also might just be possible:
+    //   i386 i486 i586
+    // and there might be other weird arm32 cases.
+    // We note that the 64 is present in both 64-bit archs,
+    // and in general is likely to be present in only 64-bit archs.
+    cache = !!strstr(u.machine, "64");
+    init = true;
+    return cache;
+}
+
+static inline __unused bool isKernel32Bit() {
+    return !isKernel64Bit();
+}
+
+static constexpr bool isArm() {
+#if defined(__arm__)
+    static_assert(isUserspace32bit(), "huh? arm must be 32 bit");
+    return true;
+#elif defined(__aarch64__)
+    static_assert(isUserspace64bit(), "aarch64 must be LP64 - no support for ILP32");
+    return true;
+#else
+    return false;
+#endif
+}
+
+static constexpr bool isX86() {
+#if defined(__i386__)
+    static_assert(isUserspace32bit(), "huh? i386 must be 32 bit");
+    return true;
+#elif defined(__x86_64__)
+    static_assert(isUserspace64bit(), "x86_64 must be LP64 - no support for ILP32 (x32)");
+    return true;
+#else
+    return false;
+#endif
+}
+
+static constexpr bool isRiscV() {
+#if defined(__riscv)
+    static_assert(isUserspace64bit(), "riscv must be 64 bit");
+    return true;
+#else
+    return false;
+#endif
+}
+
+static_assert(isArm() || isX86() || isRiscV(), "Unknown architecture");
+
+static __unused const char * describeArch() {
+    // ordered so as to make it easier to compile time optimize,
+    // only thing not known at compile time is isKernel64Bit()
+    if (isUserspace64bit()) {
+        if (isArm()) return "64-on-aarch64";
+        if (isX86()) return "64-on-x86-64";
+        if (isRiscV()) return "64-on-riscv64";
+    } else if (isKernel64Bit()) {
+        if (isArm()) return "32-on-aarch64";
+        if (isX86()) return "32-on-x86-64";
+    } else {
+        if (isArm()) return "32-on-arm32";
+        if (isX86()) return "32-on-x86-32";
+    }
+}
+
+}  // namespace bpf
+}  // namespace android
diff --git a/bpf/headers/include/bpf/WaitForProgsLoaded.h b/bpf/headers/include/bpf/WaitForProgsLoaded.h
new file mode 100644
index 0000000..bc4168e
--- /dev/null
+++ b/bpf/headers/include/bpf/WaitForProgsLoaded.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ * Android BPF library - public API
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <log/log.h>
+
+#include <android-base/properties.h>
+
+namespace android {
+namespace bpf {
+
+// Wait for bpfloader to load BPF programs.
+static inline void waitForProgsLoaded() {
+    // infinite loop until success with 5/10/20/40/60/60/60... delay
+    for (int delay = 5;; delay *= 2) {
+        if (delay > 60) delay = 60;
+        if (android::base::WaitForProperty("bpf.progs_loaded", "1", std::chrono::seconds(delay)))
+            return;
+        ALOGW("Waited %ds for bpf.progs_loaded, still waiting...", delay);
+    }
+}
+
+}  // namespace bpf
+}  // namespace android
diff --git a/bpf/headers/include/bpf_helpers.h b/bpf/headers/include/bpf_helpers.h
new file mode 100644
index 0000000..c94f1d8
--- /dev/null
+++ b/bpf/headers/include/bpf_helpers.h
@@ -0,0 +1,453 @@
+/* Common BPF helpers to be used by all BPF programs loaded by Android */
+
+#include <linux/bpf.h>
+#include <stdbool.h>
+#include <stdint.h>
+
+#include "bpf_map_def.h"
+
+/******************************************************************************
+ * WARNING: CHANGES TO THIS FILE OUTSIDE OF AOSP/MAIN ARE LIKELY TO BREAK     *
+ * DEVICE COMPATIBILITY WITH MAINLINE MODULES SHIPPING EBPF CODE.             *
+ *                                                                            *
+ * THIS WILL LIKELY RESULT IN BRICKED DEVICES AT SOME ARBITRARY FUTURE TIME   *
+ *                                                                            *
+ * THAT GOES ESPECIALLY FOR THE 'SECTION' 'LICENSE' AND 'CRITICAL' MACROS     *
+ *                                                                            *
+ * We strongly suggest that if you need changes to bpfloader functionality    *
+ * you get your changes reviewed and accepted into aosp/master.               *
+ *                                                                            *
+ ******************************************************************************/
+
+// The actual versions of the bpfloader that shipped in various Android releases
+
+// Android P/Q/R: BpfLoader was initially part of netd,
+// this was later split out into a standalone binary, but was unversioned.
+
+// Android S / 12 (api level 31) - added 'tethering' mainline eBPF support
+#define BPFLOADER_S_VERSION 2u
+
+// Android T / 13 (api level 33) - support for shared/selinux_context/pindir
+#define BPFLOADER_T_VERSION 19u
+
+// BpfLoader v0.25+ support obj@ver.o files
+#define BPFLOADER_OBJ_AT_VER_VERSION 25u
+
+// Bpfloader v0.33+ supports {map,prog}.ignore_on_{eng,user,userdebug}
+#define BPFLOADER_IGNORED_ON_VERSION 33u
+
+// Android U / 14 (api level 34) - various new program types added
+#define BPFLOADER_U_VERSION 38u
+
+// Android U QPR2 / 14 (api level 34) - platform only
+// (note: the platform bpfloader in V isn't really versioned at all,
+//  as there is no need as it can only load objects compiled at the
+//  same time as itself and the rest of the platform)
+#define BPFLOADER_U_QPR2_VERSION 41u
+#define BPFLOADER_PLATFORM_VERSION BPFLOADER_U_QPR2_VERSION
+
+// Android Mainline - this bpfloader should eventually go back to T (or even S)
+// Note: this value (and the following +1u's) are hardcoded in NetBpfLoad.cpp
+#define BPFLOADER_MAINLINE_VERSION 42u
+
+// Android Mainline BpfLoader when running on Android T
+#define BPFLOADER_MAINLINE_T_VERSION (BPFLOADER_MAINLINE_VERSION + 1u)
+
+// Android Mainline BpfLoader when running on Android U
+#define BPFLOADER_MAINLINE_U_VERSION (BPFLOADER_MAINLINE_T_VERSION + 1u)
+
+// Android Mainline BpfLoader when running on Android U QPR3
+#define BPFLOADER_MAINLINE_U_QPR3_VERSION (BPFLOADER_MAINLINE_U_VERSION + 1u)
+
+// Android Mainline BpfLoader when running on Android V
+#define BPFLOADER_MAINLINE_V_VERSION (BPFLOADER_MAINLINE_U_QPR3_VERSION + 1u)
+
+/* For mainline module use, you can #define BPFLOADER_{MIN/MAX}_VER
+ * before #include "bpf_helpers.h" to change which bpfloaders will
+ * process the resulting .o file.
+ *
+ * While this will work outside of mainline too, there just is no point to
+ * using it when the .o and the bpfloader ship in sync with each other.
+ * In which case it's just best to use the default.
+ */
+#ifndef BPFLOADER_MIN_VER
+#define BPFLOADER_MIN_VER BPFLOADER_PLATFORM_VERSION  // inclusive, ie. >=
+#endif
+
+#ifndef BPFLOADER_MAX_VER
+#define BPFLOADER_MAX_VER 0x10000u  // exclusive, ie. < v1.0
+#endif
+
+/* place things in different elf sections */
+#define SECTION(NAME) __attribute__((section(NAME), used))
+
+/* Must be present in every program, example usage:
+ *   LICENSE("GPL"); or LICENSE("Apache 2.0");
+ *
+ * We also take this opportunity to embed a bunch of other useful values in
+ * the resulting .o (This is to enable some limited forward compatibility
+ * with mainline module shipped ebpf programs)
+ *
+ * The bpfloader_{min/max}_ver defines the [min, max) range of bpfloader
+ * versions that should load this .o file (bpfloaders outside of this range
+ * will simply ignore/skip this *entire* .o)
+ * The [inclusive,exclusive) matches what we do for kernel ver dependencies.
+ *
+ * The size_of_bpf_{map,prog}_def allow the bpfloader to load programs where
+ * these structures have been extended with additional fields (they will of
+ * course simply be ignored then).
+ *
+ * If missing, bpfloader_{min/max}_ver default to 0/0x10000 ie. [v0.0, v1.0),
+ * while size_of_bpf_{map/prog}_def default to 32/20 which are the v0.0 sizes.
+ *
+ * This macro also disables loading BTF map debug information, as versions
+ * of the platform bpfloader that support BTF require fork-exec of btfloader
+ * which causes a regression in boot time.
+ */
+#define LICENSE(NAME)                                                                              \
+    unsigned int _bpfloader_min_ver SECTION("bpfloader_min_ver") = BPFLOADER_MIN_VER;              \
+    unsigned int _bpfloader_max_ver SECTION("bpfloader_max_ver") = BPFLOADER_MAX_VER;              \
+    size_t _size_of_bpf_map_def SECTION("size_of_bpf_map_def") = sizeof(struct bpf_map_def);       \
+    size_t _size_of_bpf_prog_def SECTION("size_of_bpf_prog_def") = sizeof(struct bpf_prog_def);    \
+    unsigned _btf_min_bpfloader_ver SECTION("btf_min_bpfloader_ver") = BPFLOADER_MAINLINE_VERSION; \
+    unsigned _btf_user_min_bpfloader_ver SECTION("btf_user_min_bpfloader_ver") = 0xFFFFFFFFu;      \
+    char _license[] SECTION("license") = (NAME)
+
+/* flag the resulting bpf .o file as critical to system functionality,
+ * loading all kernel version appropriate programs in it must succeed
+ * for bpfloader success
+ */
+#define CRITICAL(REASON) char _critical[] SECTION("critical") = (REASON)
+
+/*
+ * Helper functions called from eBPF programs written in C. These are
+ * implemented in the kernel sources.
+ */
+
+struct kver_uint { unsigned int kver; };
+#define KVER_(v) ((struct kver_uint){ .kver = (v) })
+#define KVER(a, b, c) KVER_(((a) << 24) + ((b) << 16) + (c))
+#define KVER_NONE KVER_(0)
+#define KVER_4_14 KVER(4, 14, 0)
+#define KVER_4_19 KVER(4, 19, 0)
+#define KVER_5_4  KVER(5, 4, 0)
+#define KVER_5_8  KVER(5, 8, 0)
+#define KVER_5_9  KVER(5, 9, 0)
+#define KVER_5_10 KVER(5, 10, 0)
+#define KVER_5_15 KVER(5, 15, 0)
+#define KVER_6_1  KVER(6, 1, 0)
+#define KVER_6_6  KVER(6, 6, 0)
+#define KVER_INF KVER_(0xFFFFFFFFu)
+
+#define KVER_IS_AT_LEAST(kver, a, b, c) ((kver).kver >= KVER(a, b, c).kver)
+
+/*
+ * BPFFS (ie. /sys/fs/bpf) labelling is as follows:
+ *   subdirectory   selinux context      mainline  usecase / usable by
+ *   /              fs_bpf               no [*]    core operating system (ie. platform)
+ *   /loader        fs_bpf_loader        no, U+    (as yet unused)
+ *   /net_private   fs_bpf_net_private   yes, T+   network_stack
+ *   /net_shared    fs_bpf_net_shared    yes, T+   network_stack & system_server
+ *   /netd_readonly fs_bpf_netd_readonly yes, T+   network_stack & system_server & r/o to netd
+ *   /netd_shared   fs_bpf_netd_shared   yes, T+   network_stack & system_server & netd [**]
+ *   /tethering     fs_bpf_tethering     yes, S+   network_stack
+ *   /vendor        fs_bpf_vendor        no, T+    vendor
+ *
+ * [*] initial support for bpf was added back in P,
+ *     but things worked differently back then with no bpfloader,
+ *     and instead netd doing stuff by hand,
+ *     bpfloader with pinning into /sys/fs/bpf was (I believe) added in Q
+ *     (and was definitely there in R).
+ *
+ * [**] additionally bpf programs are accessible to netutils_wrapper
+ *      for use by iptables xt_bpf extensions.
+ *
+ * See cs/p:aosp-master%20-file:prebuilts/%20file:genfs_contexts%20"genfscon%20bpf"
+ */
+
+/* generic functions */
+
+/*
+ * Type-unsafe bpf map functions - avoid if possible.
+ *
+ * Using these it is possible to pass in keys/values of the wrong type/size,
+ * or, for 'bpf_map_lookup_elem_unsafe' receive into a pointer to the wrong type.
+ * You will not get a compile time failure, and for certain types of errors you
+ * might not even get a failure from the kernel's ebpf verifier during program load,
+ * instead stuff might just not work right at runtime.
+ *
+ * Instead please use:
+ *   DEFINE_BPF_MAP(foo_map, TYPE, KeyType, ValueType, num_entries)
+ * where TYPE can be something like HASH or ARRAY, and num_entries is an integer.
+ *
+ * This defines the map (hence this should not be used in a header file included
+ * from multiple locations) and provides type safe accessors:
+ *   ValueType * bpf_foo_map_lookup_elem(const KeyType *)
+ *   int bpf_foo_map_update_elem(const KeyType *, const ValueType *, flags)
+ *   int bpf_foo_map_delete_elem(const KeyType *)
+ *
+ * This will make sure that if you change the type of a map you'll get compile
+ * errors at any spots you forget to update with the new type.
+ *
+ * Note: these all take pointers to const map because from the C/eBPF point of view
+ * the map struct is really just a readonly map definition of the in kernel object.
+ * Runtime modification of the map defining struct is meaningless, since
+ * the contents is only ever used during bpf program loading & map creation
+ * by the bpf loader, and not by the eBPF program itself.
+ */
+static void* (*bpf_map_lookup_elem_unsafe)(const struct bpf_map_def* map,
+                                           const void* key) = (void*)BPF_FUNC_map_lookup_elem;
+static int (*bpf_map_update_elem_unsafe)(const struct bpf_map_def* map, const void* key,
+                                         const void* value, unsigned long long flags) = (void*)
+        BPF_FUNC_map_update_elem;
+static int (*bpf_map_delete_elem_unsafe)(const struct bpf_map_def* map,
+                                         const void* key) = (void*)BPF_FUNC_map_delete_elem;
+static int (*bpf_ringbuf_output_unsafe)(const struct bpf_map_def* ringbuf,
+                                        const void* data, __u64 size, __u64 flags) = (void*)
+        BPF_FUNC_ringbuf_output;
+static void* (*bpf_ringbuf_reserve_unsafe)(const struct bpf_map_def* ringbuf,
+                                           __u64 size, __u64 flags) = (void*)
+        BPF_FUNC_ringbuf_reserve;
+static void (*bpf_ringbuf_submit_unsafe)(const void* data, __u64 flags) = (void*)
+        BPF_FUNC_ringbuf_submit;
+
+#define BPF_ANNOTATE_KV_PAIR(name, type_key, type_val)  \
+        struct ____btf_map_##name {                     \
+                type_key key;                           \
+                type_val value;                         \
+        };                                              \
+        struct ____btf_map_##name                       \
+        __attribute__ ((section(".maps." #name), used)) \
+                ____btf_map_##name = { }
+
+#define BPF_ASSERT_LOADER_VERSION(min_loader, ignore_eng, ignore_user, ignore_userdebug) \
+    _Static_assert(                                                                      \
+        (min_loader) >= BPFLOADER_IGNORED_ON_VERSION ||                                  \
+            !((ignore_eng).ignore_on_eng ||                                              \
+              (ignore_user).ignore_on_user ||                                            \
+              (ignore_userdebug).ignore_on_userdebug),                                   \
+        "bpfloader min version must be >= 0.33 in order to use ignored_on");
+
+#define DEFINE_BPF_MAP_BASE(the_map, TYPE, keysize, valuesize, num_entries, \
+                            usr, grp, md, selinux, pindir, share, minkver,  \
+                            maxkver, minloader, maxloader, ignore_eng,      \
+                            ignore_user, ignore_userdebug)                  \
+    const struct bpf_map_def SECTION("maps") the_map = {                    \
+        .type = BPF_MAP_TYPE_##TYPE,                                        \
+        .key_size = (keysize),                                              \
+        .value_size = (valuesize),                                          \
+        .max_entries = (num_entries),                                       \
+        .map_flags = 0,                                                     \
+        .uid = (usr),                                                       \
+        .gid = (grp),                                                       \
+        .mode = (md),                                                       \
+        .bpfloader_min_ver = (minloader),                                   \
+        .bpfloader_max_ver = (maxloader),                                   \
+        .min_kver = (minkver).kver,                                         \
+        .max_kver = (maxkver).kver,                                         \
+        .selinux_context = (selinux),                                       \
+        .pin_subdir = (pindir),                                             \
+        .shared = (share).shared,                                           \
+        .ignore_on_eng = (ignore_eng).ignore_on_eng,                        \
+        .ignore_on_user = (ignore_user).ignore_on_user,                     \
+        .ignore_on_userdebug = (ignore_userdebug).ignore_on_userdebug,      \
+    };                                                                      \
+    BPF_ASSERT_LOADER_VERSION(minloader, ignore_eng, ignore_user, ignore_userdebug);
+
+// Type safe macro to declare a ring buffer and related output functions.
+// Compatibility:
+// * BPF ring buffers are only available kernels 5.8 and above. Any program
+//   accessing the ring buffer should set a program level min_kver >= 5.8.
+// * The definition below sets a map min_kver of 5.8 which requires targeting
+//   a BPFLOADER_MIN_VER >= BPFLOADER_S_VERSION.
+#define DEFINE_BPF_RINGBUF_EXT(the_map, ValueType, size_bytes, usr, grp, md,   \
+                               selinux, pindir, share, min_loader, max_loader, \
+                               ignore_eng, ignore_user, ignore_userdebug)      \
+    DEFINE_BPF_MAP_BASE(the_map, RINGBUF, 0, 0, size_bytes, usr, grp, md,      \
+                        selinux, pindir, share, KVER_5_8, KVER_INF,            \
+                        min_loader, max_loader, ignore_eng, ignore_user,       \
+                        ignore_userdebug);                                     \
+                                                                               \
+    _Static_assert((size_bytes) >= 4096, "min 4 kiB ringbuffer size");         \
+    _Static_assert((size_bytes) <= 0x10000000, "max 256 MiB ringbuffer size"); \
+    _Static_assert(((size_bytes) & ((size_bytes) - 1)) == 0,                   \
+                   "ring buffer size must be a power of two");                 \
+                                                                               \
+    static inline __always_inline __unused int bpf_##the_map##_output(         \
+            const ValueType* v) {                                              \
+        return bpf_ringbuf_output_unsafe(&the_map, v, sizeof(*v), 0);          \
+    }                                                                          \
+                                                                               \
+    static inline __always_inline __unused                                     \
+            ValueType* bpf_##the_map##_reserve() {                             \
+        return bpf_ringbuf_reserve_unsafe(&the_map, sizeof(ValueType), 0);     \
+    }                                                                          \
+                                                                               \
+    static inline __always_inline __unused void bpf_##the_map##_submit(        \
+            const ValueType* v) {                                              \
+        bpf_ringbuf_submit_unsafe(v, 0);                                       \
+    }
+
+/* There exist buggy kernels with pre-T OS, that due to
+ * kernel patch "[ALPS05162612] bpf: fix ubsan error"
+ * do not support userspace writes into non-zero index of bpf map arrays.
+ *
+ * We use this assert to prevent us from being able to define such a map.
+ */
+
+#ifdef THIS_BPF_PROGRAM_IS_FOR_TEST_PURPOSES_ONLY
+#define BPF_MAP_ASSERT_OK(type, entries, mode)
+#elif BPFLOADER_MIN_VER >= BPFLOADER_T_VERSION
+#define BPF_MAP_ASSERT_OK(type, entries, mode)
+#else
+#define BPF_MAP_ASSERT_OK(type, entries, mode) \
+  _Static_assert(((type) != BPF_MAP_TYPE_ARRAY) || ((entries) <= 1) || !((mode) & 0222), \
+  "Writable arrays with more than 1 element not supported on pre-T devices.")
+#endif
+
+/* type safe macro to declare a map and related accessor functions */
+#define DEFINE_BPF_MAP_EXT(the_map, TYPE, KeyType, ValueType, num_entries, usr, grp, md,         \
+                           selinux, pindir, share, min_loader, max_loader, ignore_eng,           \
+                           ignore_user, ignore_userdebug)                                        \
+  DEFINE_BPF_MAP_BASE(the_map, TYPE, sizeof(KeyType), sizeof(ValueType),                         \
+                      num_entries, usr, grp, md, selinux, pindir, share,                         \
+                      KVER_NONE, KVER_INF, min_loader, max_loader,                               \
+                      ignore_eng, ignore_user, ignore_userdebug);                                \
+    BPF_MAP_ASSERT_OK(BPF_MAP_TYPE_##TYPE, (num_entries), (md));                                 \
+    _Static_assert(sizeof(KeyType) < 1024, "aosp/2370288 requires < 1024 byte keys");            \
+    _Static_assert(sizeof(ValueType) < 65536, "aosp/2370288 requires < 65536 byte values");      \
+    BPF_ANNOTATE_KV_PAIR(the_map, KeyType, ValueType);                                           \
+                                                                                                 \
+    static inline __always_inline __unused ValueType* bpf_##the_map##_lookup_elem(               \
+            const KeyType* k) {                                                                  \
+        return bpf_map_lookup_elem_unsafe(&the_map, k);                                          \
+    };                                                                                           \
+                                                                                                 \
+    static inline __always_inline __unused int bpf_##the_map##_update_elem(                      \
+            const KeyType* k, const ValueType* v, unsigned long long flags) {                    \
+        return bpf_map_update_elem_unsafe(&the_map, k, v, flags);                                \
+    };                                                                                           \
+                                                                                                 \
+    static inline __always_inline __unused int bpf_##the_map##_delete_elem(const KeyType* k) {   \
+        return bpf_map_delete_elem_unsafe(&the_map, k);                                          \
+    };
+
+#ifndef DEFAULT_BPF_MAP_SELINUX_CONTEXT
+#define DEFAULT_BPF_MAP_SELINUX_CONTEXT ""
+#endif
+
+#ifndef DEFAULT_BPF_MAP_PIN_SUBDIR
+#define DEFAULT_BPF_MAP_PIN_SUBDIR ""
+#endif
+
+#ifndef DEFAULT_BPF_MAP_UID
+#define DEFAULT_BPF_MAP_UID AID_ROOT
+#elif BPFLOADER_MIN_VER < 28u
+#error "Bpf Map UID must be left at default of AID_ROOT for BpfLoader prior to v0.28"
+#endif
+
+#define DEFINE_BPF_MAP_UGM(the_map, TYPE, KeyType, ValueType, num_entries, usr, grp, md)     \
+    DEFINE_BPF_MAP_EXT(the_map, TYPE, KeyType, ValueType, num_entries, usr, grp, md,         \
+                       DEFAULT_BPF_MAP_SELINUX_CONTEXT, DEFAULT_BPF_MAP_PIN_SUBDIR, PRIVATE, \
+                       BPFLOADER_MIN_VER, BPFLOADER_MAX_VER, LOAD_ON_ENG,                    \
+                       LOAD_ON_USER, LOAD_ON_USERDEBUG)
+
+#define DEFINE_BPF_MAP(the_map, TYPE, KeyType, ValueType, num_entries) \
+    DEFINE_BPF_MAP_UGM(the_map, TYPE, KeyType, ValueType, num_entries, \
+                       DEFAULT_BPF_MAP_UID, AID_ROOT, 0600)
+
+#define DEFINE_BPF_MAP_RO(the_map, TYPE, KeyType, ValueType, num_entries, gid) \
+    DEFINE_BPF_MAP_UGM(the_map, TYPE, KeyType, ValueType, num_entries, \
+                       DEFAULT_BPF_MAP_UID, gid, 0440)
+
+#define DEFINE_BPF_MAP_GWO(the_map, TYPE, KeyType, ValueType, num_entries, gid) \
+    DEFINE_BPF_MAP_UGM(the_map, TYPE, KeyType, ValueType, num_entries, \
+                       DEFAULT_BPF_MAP_UID, gid, 0620)
+
+#define DEFINE_BPF_MAP_GRO(the_map, TYPE, KeyType, ValueType, num_entries, gid) \
+    DEFINE_BPF_MAP_UGM(the_map, TYPE, KeyType, ValueType, num_entries, \
+                       DEFAULT_BPF_MAP_UID, gid, 0640)
+
+#define DEFINE_BPF_MAP_GRW(the_map, TYPE, KeyType, ValueType, num_entries, gid) \
+    DEFINE_BPF_MAP_UGM(the_map, TYPE, KeyType, ValueType, num_entries, \
+                       DEFAULT_BPF_MAP_UID, gid, 0660)
+
+// LLVM eBPF builtins: they directly generate BPF_LD_ABS/BPF_LD_IND (skb may be ignored?)
+unsigned long long load_byte(void* skb, unsigned long long off) asm("llvm.bpf.load.byte");
+unsigned long long load_half(void* skb, unsigned long long off) asm("llvm.bpf.load.half");
+unsigned long long load_word(void* skb, unsigned long long off) asm("llvm.bpf.load.word");
+
+static int (*bpf_probe_read)(void* dst, int size, void* unsafe_ptr) = (void*) BPF_FUNC_probe_read;
+static int (*bpf_probe_read_str)(void* dst, int size, void* unsafe_ptr) = (void*) BPF_FUNC_probe_read_str;
+static int (*bpf_probe_read_user)(void* dst, int size, const void* unsafe_ptr) = (void*)BPF_FUNC_probe_read_user;
+static int (*bpf_probe_read_user_str)(void* dst, int size, const void* unsafe_ptr) = (void*) BPF_FUNC_probe_read_user_str;
+static unsigned long long (*bpf_ktime_get_ns)(void) = (void*) BPF_FUNC_ktime_get_ns;
+static unsigned long long (*bpf_ktime_get_boot_ns)(void) = (void*)BPF_FUNC_ktime_get_boot_ns;
+static int (*bpf_trace_printk)(const char* fmt, int fmt_size, ...) = (void*) BPF_FUNC_trace_printk;
+static unsigned long long (*bpf_get_current_pid_tgid)(void) = (void*) BPF_FUNC_get_current_pid_tgid;
+static unsigned long long (*bpf_get_current_uid_gid)(void) = (void*) BPF_FUNC_get_current_uid_gid;
+static unsigned long long (*bpf_get_smp_processor_id)(void) = (void*) BPF_FUNC_get_smp_processor_id;
+static long (*bpf_get_stackid)(void* ctx, void* map, uint64_t flags) = (void*) BPF_FUNC_get_stackid;
+static long (*bpf_get_current_comm)(void* buf, uint32_t buf_size) = (void*) BPF_FUNC_get_current_comm;
+
+#define DEFINE_BPF_PROG_EXT(SECTION_NAME, prog_uid, prog_gid, the_prog, min_kv, max_kv,  \
+                            min_loader, max_loader, opt, selinux, pindir, ignore_eng,    \
+                            ignore_user, ignore_userdebug)                               \
+    const struct bpf_prog_def SECTION("progs") the_prog##_def = {                        \
+        .uid = (prog_uid),                                                               \
+        .gid = (prog_gid),                                                               \
+        .min_kver = (min_kv).kver,                                                       \
+        .max_kver = (max_kv).kver,                                                       \
+        .optional = (opt).optional,                                                      \
+        .bpfloader_min_ver = (min_loader),                                               \
+        .bpfloader_max_ver = (max_loader),                                               \
+        .selinux_context = (selinux),                                                    \
+        .pin_subdir = (pindir),                                                          \
+        .ignore_on_eng = (ignore_eng).ignore_on_eng,                                     \
+        .ignore_on_user = (ignore_user).ignore_on_user,                                  \
+        .ignore_on_userdebug = (ignore_userdebug).ignore_on_userdebug,                   \
+    };                                                                                   \
+    SECTION(SECTION_NAME)                                                                \
+    int the_prog
+
+#define DEFINE_BPF_PROG_KVER_RANGE_OPT(SECTION_NAME, prog_uid, prog_gid, the_prog, min_kv, max_kv, \
+                                       opt)                                                        \
+    DEFINE_BPF_PROG_EXT(SECTION_NAME, prog_uid, prog_gid, the_prog, min_kv, max_kv,                \
+                        BPFLOADER_MIN_VER, BPFLOADER_MAX_VER, opt, "", "",                         \
+                        LOAD_ON_ENG, LOAD_ON_USER, LOAD_ON_USERDEBUG)
+
+// Programs (here used in the sense of functions/sections) marked optional are allowed to fail
+// to load (for example due to missing kernel patches).
+// The bpfloader will just ignore these failures and continue processing the next section.
+//
+// A non-optional program (function/section) failing to load causes a failure and aborts
+// processing of the entire .o, if the .o is additionally marked critical, this will result
+// in the entire bpfloader process terminating with a failure and not setting the bpf.progs_loaded
+// system property.  This in turn results in waitForProgsLoaded() never finishing.
+//
+// ie. a non-optional program in a critical .o is mandatory for kernels matching the min/max kver.
+
+// programs requiring a kernel version >= min_kv && < max_kv
+#define DEFINE_BPF_PROG_KVER_RANGE(SECTION_NAME, prog_uid, prog_gid, the_prog, min_kv, max_kv) \
+    DEFINE_BPF_PROG_KVER_RANGE_OPT(SECTION_NAME, prog_uid, prog_gid, the_prog, min_kv, max_kv, \
+                                   MANDATORY)
+#define DEFINE_OPTIONAL_BPF_PROG_KVER_RANGE(SECTION_NAME, prog_uid, prog_gid, the_prog, min_kv, \
+                                            max_kv)                                             \
+    DEFINE_BPF_PROG_KVER_RANGE_OPT(SECTION_NAME, prog_uid, prog_gid, the_prog, min_kv, max_kv, \
+                                   OPTIONAL)
+
+// programs requiring a kernel version >= min_kv
+#define DEFINE_BPF_PROG_KVER(SECTION_NAME, prog_uid, prog_gid, the_prog, min_kv)                 \
+    DEFINE_BPF_PROG_KVER_RANGE_OPT(SECTION_NAME, prog_uid, prog_gid, the_prog, min_kv, KVER_INF, \
+                                   MANDATORY)
+#define DEFINE_OPTIONAL_BPF_PROG_KVER(SECTION_NAME, prog_uid, prog_gid, the_prog, min_kv)        \
+    DEFINE_BPF_PROG_KVER_RANGE_OPT(SECTION_NAME, prog_uid, prog_gid, the_prog, min_kv, KVER_INF, \
+                                   OPTIONAL)
+
+// programs with no kernel version requirements
+#define DEFINE_BPF_PROG(SECTION_NAME, prog_uid, prog_gid, the_prog) \
+    DEFINE_BPF_PROG_KVER_RANGE_OPT(SECTION_NAME, prog_uid, prog_gid, the_prog, KVER_NONE, KVER_INF, \
+                                   MANDATORY)
+#define DEFINE_OPTIONAL_BPF_PROG(SECTION_NAME, prog_uid, prog_gid, the_prog) \
+    DEFINE_BPF_PROG_KVER_RANGE_OPT(SECTION_NAME, prog_uid, prog_gid, the_prog, KVER_NONE, KVER_INF, \
+                                   OPTIONAL)
diff --git a/bpf/headers/include/bpf_map_def.h b/bpf/headers/include/bpf_map_def.h
new file mode 100644
index 0000000..2d6736c
--- /dev/null
+++ b/bpf/headers/include/bpf_map_def.h
@@ -0,0 +1,261 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+/* This file is separate because it's included both by eBPF programs (via include
+ * in bpf_helpers.h) and directly by the boot time bpfloader (Loader.cpp).
+ */
+
+#include <linux/bpf.h>
+
+// Pull in AID_* constants from //system/core/libcutils/include/private/android_filesystem_config.h
+#include <cutils/android_filesystem_config.h>
+
+/******************************************************************************
+ *                                                                            *
+ *                          ! ! ! W A R N I N G ! ! !                         *
+ *                                                                            *
+ * CHANGES TO THESE STRUCTURE DEFINITIONS OUTSIDE OF AOSP/MAIN *WILL* BREAK   *
+ * MAINLINE MODULE COMPATIBILITY                                              *
+ *                                                                            *
+ * AND THUS MAY RESULT IN YOUR DEVICE BRICKING AT SOME ARBITRARY POINT IN     *
+ * THE FUTURE                                                                 *
+ *                                                                            *
+ * (and even in aosp/master you may only append new fields at the very end,   *
+ *  you may *never* delete fields, change their types, ordering, insert in    *
+ *  the middle, etc.  If a mainline module using the old definition has       *
+ *  already shipped (which happens roughly monthly), then it's set in stone)  *
+ *                                                                            *
+ ******************************************************************************/
+
+/*
+ * The bpf_{map,prog}_def structures are compiled for different architectures.
+ * Once by the BPF compiler for the BPF architecture, and once by a C++
+ * compiler for the native Android architecture for the bpfloader.
+ *
+ * For things to work, their layout must be the same between the two.
+ * The BPF architecture is platform independent ('64-bit LSB bpf').
+ * So this effectively means these structures must be the same layout
+ * on 5 architectures, all of them little endian:
+ *   64-bit BPF, x86_64, arm  and  32-bit x86 and arm
+ *
+ * As such for any types we use inside of these structs we must make sure that
+ * the size and alignment are the same, so the same amount of padding is used.
+ *
+ * Currently we only use: bool, enum bpf_map_type and unsigned int.
+ * Additionally we use char for padding.
+ *
+ * !!! WARNING: HERE BE DRAGONS !!!
+ *
+ * Be particularly careful with 64-bit integers.
+ * You will need to manually override their alignment to 8 bytes.
+ *
+ * To quote some parts of https://gcc.gnu.org/bugzilla/show_bug.cgi?id=69560
+ *
+ * Some types have weaker alignment requirements when they are structure members.
+ *
+ * unsigned long long on x86 is such a type.
+ *
+ * C distinguishes C11 _Alignof (the minimum alignment the type is guaranteed
+ * to have in all contexts, so 4, see min_align_of_type) from GNU C __alignof
+ * (the normal alignment of the type, so 8).
+ *
+ * alignof / _Alignof == minimum alignment required by target ABI
+ * __alignof / __alignof__ == preferred alignment
+ *
+ * When in a struct, apparently the minimum alignment is used.
+ */
+
+_Static_assert(sizeof(bool) == 1, "sizeof bool != 1");
+_Static_assert(__alignof__(bool) == 1, "__alignof__ bool != 1");
+_Static_assert(_Alignof(bool) == 1, "_Alignof bool != 1");
+
+_Static_assert(sizeof(char) == 1, "sizeof char != 1");
+_Static_assert(__alignof__(char) == 1, "__alignof__ char != 1");
+_Static_assert(_Alignof(char) == 1, "_Alignof char != 1");
+
+// This basically verifies that an enum is 'just' a 32-bit int
+_Static_assert(sizeof(enum bpf_map_type) == 4, "sizeof enum bpf_map_type != 4");
+_Static_assert(__alignof__(enum bpf_map_type) == 4, "__alignof__ enum bpf_map_type != 4");
+_Static_assert(_Alignof(enum bpf_map_type) == 4, "_Alignof enum bpf_map_type != 4");
+
+// Linux kernel requires sizeof(int) == 4, sizeof(void*) == sizeof(long), sizeof(long long) == 8
+_Static_assert(sizeof(unsigned int) == 4, "sizeof unsigned int != 4");
+_Static_assert(__alignof__(unsigned int) == 4, "__alignof__ unsigned int != 4");
+_Static_assert(_Alignof(unsigned int) == 4, "_Alignof unsigned int != 4");
+
+// We don't currently use any 64-bit types in these structs, so this is purely to document issue.
+// Here sizeof & __alignof__ are consistent, but _Alignof is not: compile for 'aosp_cf_x86_phone'
+_Static_assert(sizeof(unsigned long long) == 8, "sizeof unsigned long long != 8");
+_Static_assert(__alignof__(unsigned long long) == 8, "__alignof__ unsigned long long != 8");
+// BPF wants 8, but 32-bit x86 wants 4
+//_Static_assert(_Alignof(unsigned long long) == 8, "_Alignof unsigned long long != 8");
+
+
+// for maps:
+struct shared_bool { bool shared; };
+#define PRIVATE ((struct shared_bool){ .shared = false })
+#define SHARED ((struct shared_bool){ .shared = true })
+
+// for programs:
+struct optional_bool { bool optional; };
+#define MANDATORY ((struct optional_bool){ .optional = false })
+#define OPTIONAL ((struct optional_bool){ .optional = true })
+
+// for both maps and programs:
+struct ignore_on_eng_bool { bool ignore_on_eng; };
+#define LOAD_ON_ENG ((struct ignore_on_eng_bool){ .ignore_on_eng = false })
+#define IGNORE_ON_ENG ((struct ignore_on_eng_bool){ .ignore_on_eng = true })
+
+struct ignore_on_user_bool { bool ignore_on_user; };
+#define LOAD_ON_USER ((struct ignore_on_user_bool){ .ignore_on_user = false })
+#define IGNORE_ON_USER ((struct ignore_on_user_bool){ .ignore_on_user = true })
+
+struct ignore_on_userdebug_bool { bool ignore_on_userdebug; };
+#define LOAD_ON_USERDEBUG ((struct ignore_on_userdebug_bool){ .ignore_on_userdebug = false })
+#define IGNORE_ON_USERDEBUG ((struct ignore_on_userdebug_bool){ .ignore_on_userdebug = true })
+
+
+// Length of strings (incl. selinux_context and pin_subdir)
+// in the bpf_map_def and bpf_prog_def structs.
+//
+// WARNING: YOU CANNOT *EVER* CHANGE THESE
+// as this would affect the structure size in backwards incompatible ways
+// and break mainline module loading on older Android T devices
+#define BPF_SELINUX_CONTEXT_CHAR_ARRAY_SIZE 32
+#define BPF_PIN_SUBDIR_CHAR_ARRAY_SIZE 32
+
+/*
+ * Map structure to be used by Android eBPF C programs. The Android eBPF loader
+ * uses this structure from eBPF object to create maps at boot time.
+ *
+ * The eBPF C program should define structure in the maps section using
+ * SECTION("maps") otherwise it will be ignored by the eBPF loader.
+ *
+ * For example:
+ *   const struct bpf_map_def SECTION("maps") mymap { .type=... , .key_size=... }
+ *
+ * See 'bpf_helpers.h' for helpful macros for eBPF program use.
+ */
+struct bpf_map_def {
+    enum bpf_map_type type;
+    unsigned int key_size;
+    unsigned int value_size;
+    unsigned int max_entries;
+    unsigned int map_flags;
+
+    // The following are not supported by the Android bpfloader:
+    //   unsigned int inner_map_idx;
+    //   unsigned int numa_node;
+
+    unsigned int zero;  // uid_t, for compat with old (buggy) bpfloader must be AID_ROOT == 0
+    unsigned int gid;   // gid_t
+    unsigned int mode;  // mode_t
+
+    // The following fields were added in version 0.1
+    unsigned int bpfloader_min_ver;  // if missing, defaults to 0, ie. v0.0
+    unsigned int bpfloader_max_ver;  // if missing, defaults to 0x10000, ie. v1.0
+
+    // The following fields were added in version 0.2 (S)
+    // kernelVersion() must be >= min_kver and < max_kver
+    unsigned int min_kver;
+    unsigned int max_kver;
+
+    // The following fields were added in version 0.18 (T)
+    //
+    // These are fixed length strings, padded with null bytes
+    //
+    // Warning: supported values depend on .o location
+    // (additionally a newer Android OS and/or bpfloader may support more values)
+    //
+    // overrides default selinux context (which is based on pin subdir)
+    char selinux_context[BPF_SELINUX_CONTEXT_CHAR_ARRAY_SIZE];
+    //
+    // overrides default prefix (which is based on .o location)
+    char pin_subdir[BPF_PIN_SUBDIR_CHAR_ARRAY_SIZE];
+
+    bool shared;  // use empty string as 'file' component of pin path - allows cross .o map sharing
+
+    // The following 3 ignore_on_* fields were added in version 0.32 (U). These are ignored in
+    // older bpfloader versions, and zero in programs compiled before 0.32.
+    bool ignore_on_eng:1;
+    bool ignore_on_user:1;
+    bool ignore_on_userdebug:1;
+    // The following 5 ignore_on_* fields were added in version 0.38 (U). These are ignored in
+    // older bpfloader versions, and zero in programs compiled before 0.38.
+    // These are tests on the kernel architecture, ie. they ignore userspace bit-ness.
+    bool ignore_on_arm32:1;
+    bool ignore_on_aarch64:1;
+    bool ignore_on_x86_32:1;
+    bool ignore_on_x86_64:1;
+    bool ignore_on_riscv64:1;
+
+    char pad0[2];  // manually pad up to 4 byte alignment, may be used for extensions in the future
+
+    unsigned int uid;   // uid_t
+};
+
+_Static_assert(sizeof(((struct bpf_map_def *)0)->selinux_context) == 32, "must be 32 bytes");
+_Static_assert(sizeof(((struct bpf_map_def *)0)->pin_subdir) == 32, "must be 32 bytes");
+
+// This needs to be updated whenever the above structure definition is expanded.
+_Static_assert(sizeof(struct bpf_map_def) == 120, "sizeof struct bpf_map_def != 120");
+_Static_assert(__alignof__(struct bpf_map_def) == 4, "__alignof__ struct bpf_map_def != 4");
+_Static_assert(_Alignof(struct bpf_map_def) == 4, "_Alignof struct bpf_map_def != 4");
+
+struct bpf_prog_def {
+    unsigned int uid;
+    unsigned int gid;
+
+    // kernelVersion() must be >= min_kver and < max_kver
+    unsigned int min_kver;
+    unsigned int max_kver;
+
+    bool optional;  // program section (ie. function) may fail to load, continue onto next func.
+
+    // The following 3 ignore_on_* fields were added in version 0.33 (U). These are ignored in
+    // older bpfloader versions, and zero in programs compiled before 0.33.
+    bool ignore_on_eng:1;
+    bool ignore_on_user:1;
+    bool ignore_on_userdebug:1;
+    // The following 5 ignore_on_* fields were added in version 0.38 (U). These are ignored in
+    // older bpfloader versions, and zero in programs compiled before 0.38.
+    // These are tests on the kernel architecture, ie. they ignore userspace bit-ness.
+    bool ignore_on_arm32:1;
+    bool ignore_on_aarch64:1;
+    bool ignore_on_x86_32:1;
+    bool ignore_on_x86_64:1;
+    bool ignore_on_riscv64:1;
+
+    char pad0[2];  // manually pad up to 4 byte alignment, may be used for extensions in the future
+
+    // The following fields were added in version 0.1
+    unsigned int bpfloader_min_ver;  // if missing, defaults to 0, ie. v0.0
+    unsigned int bpfloader_max_ver;  // if missing, defaults to 0x10000, ie. v1.0
+
+    // The following fields were added in version 0.18, see description up above in bpf_map_def
+    char selinux_context[BPF_SELINUX_CONTEXT_CHAR_ARRAY_SIZE];
+    char pin_subdir[BPF_PIN_SUBDIR_CHAR_ARRAY_SIZE];
+};
+
+_Static_assert(sizeof(((struct bpf_prog_def *)0)->selinux_context) == 32, "must be 32 bytes");
+_Static_assert(sizeof(((struct bpf_prog_def *)0)->pin_subdir) == 32, "must be 32 bytes");
+
+// This needs to be updated whenever the above structure definition is expanded.
+_Static_assert(sizeof(struct bpf_prog_def) == 92, "sizeof struct bpf_prog_def != 92");
+_Static_assert(__alignof__(struct bpf_prog_def) == 4, "__alignof__ struct bpf_prog_def != 4");
+_Static_assert(_Alignof(struct bpf_prog_def) == 4, "_Alignof struct bpf_prog_def != 4");
diff --git a/bpf/loader/NetBpfLoad.cpp b/bpf/loader/NetBpfLoad.cpp
index 00362b4..a10c9e3 100644
--- a/bpf/loader/NetBpfLoad.cpp
+++ b/bpf/loader/NetBpfLoad.cpp
@@ -60,7 +60,7 @@
 
 #include "BpfSyscallWrappers.h"
 #include "bpf/BpfUtils.h"
-#include "bpf/bpf_map_def.h"
+#include "bpf_map_def.h"
 
 using android::base::EndsWith;
 using android::base::StartsWith;