blob: 9e69efc42f8e951334f6a41feb85d563b679dabd [file] [log] [blame]
Ken Chen1647f602021-10-05 21:55:22 +08001/**
2 * Copyright (c) 2022, The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#pragma once
18
Ken Chen1647f602021-10-05 21:55:22 +080019#include <netdutils/Status.h>
20#include "bpf/BpfMap.h"
Maciej Żenczykowski513474c2022-12-08 16:20:43 +000021#include "netd.h"
Ken Chen1647f602021-10-05 21:55:22 +080022
23using android::bpf::BpfMap;
Maciej Żenczykowski21ce7212022-06-13 17:28:41 -070024using android::bpf::BpfMapRO;
Ken Chen1647f602021-10-05 21:55:22 +080025
26namespace android {
27namespace net {
28
29class BpfHandler {
30 public:
31 BpfHandler();
32 BpfHandler(const BpfHandler&) = delete;
33 BpfHandler& operator=(const BpfHandler&) = delete;
34 netdutils::Status init(const char* cg2_path);
35 /*
36 * Tag the socket with the specified tag and uid. In the qtaguid module, the
37 * first tag request that grab the spinlock of rb_tree can update the tag
38 * information first and other request need to wait until it finish. All the
39 * tag request will be addressed in the order of they obtaining the spinlock.
40 * In the eBPF implementation, the kernel will try to update the eBPF map
41 * entry with the tag request. And the hashmap update process is protected by
42 * the spinlock initialized with the map. So the behavior of two modules
43 * should be the same. No additional lock needed.
44 */
45 int tagSocket(int sockFd, uint32_t tag, uid_t chargeUid, uid_t realUid);
46
47 /*
48 * The untag process is similar to tag socket and both old qtaguid module and
49 * new eBPF module have spinlock inside the kernel for concurrent update. No
50 * external lock is required.
51 */
52 int untagSocket(int sockFd);
53
54 private:
55 // For testing
56 BpfHandler(uint32_t perUidLimit, uint32_t totalLimit);
57
58 netdutils::Status initMaps();
59 bool hasUpdateDeviceStatsPermission(uid_t uid);
60
61 BpfMap<uint64_t, UidTagValue> mCookieTagMap;
Maciej Żenczykowski7e2f53e2023-09-28 01:08:28 +000062 BpfMapRO<StatsKey, StatsValue> mStatsMapA;
Maciej Żenczykowski21ce7212022-06-13 17:28:41 -070063 BpfMapRO<StatsKey, StatsValue> mStatsMapB;
Maciej Żenczykowskib10e0552022-06-16 14:49:27 -070064 BpfMapRO<uint32_t, uint32_t> mConfigurationMap;
Maciej Żenczykowski7e2f53e2023-09-28 01:08:28 +000065 BpfMapRO<uint32_t, uint8_t> mUidPermissionMap;
Ken Chen1647f602021-10-05 21:55:22 +080066
Ken Chen1647f602021-10-05 21:55:22 +080067 // The limit on the number of stats entries a uid can have in the per uid stats map. BpfHandler
68 // will block that specific uid from tagging new sockets after the limit is reached.
69 const uint32_t mPerUidStatsEntriesLimit;
70
71 // The limit on the total number of stats entries in the per uid stats map. BpfHandler will
72 // block all tagging requests after the limit is reached.
73 const uint32_t mTotalUidStatsEntriesLimit;
74
75 // For testing
76 friend class BpfHandlerTest;
77};
78
79} // namespace net
80} // namespace android