[NFCT.TETHER.6] Migrate tetherOffloadGetAndClearStats from netd to mainline

A preparation for updating BPF map in mainline module.

Test: atest TetheringCoverageTests
Change-Id: Id87b88f6dfcdfe5765756442ed880933cd1c6baf
diff --git a/Tethering/apishim/30/com/android/networkstack/tethering/apishim/api30/BpfCoordinatorShimImpl.java b/Tethering/apishim/30/com/android/networkstack/tethering/apishim/api30/BpfCoordinatorShimImpl.java
index 8ef6d6f..eafa3ea 100644
--- a/Tethering/apishim/30/com/android/networkstack/tethering/apishim/api30/BpfCoordinatorShimImpl.java
+++ b/Tethering/apishim/30/com/android/networkstack/tethering/apishim/api30/BpfCoordinatorShimImpl.java
@@ -117,6 +117,21 @@
     }
 
     @Override
+    @Nullable
+    public TetherStatsValue tetherOffloadGetAndClearStats(int ifIndex) {
+        try {
+            final TetherStatsParcel stats =
+                    mNetd.tetherOffloadGetAndClearStats(ifIndex);
+            return new TetherStatsValue(stats.rxPackets, stats.rxBytes, 0 /* rxErrors */,
+                    stats.txPackets, stats.txBytes, 0 /* txErrors */);
+        } catch (RemoteException | ServiceSpecificException e) {
+            mLog.e("Exception when cleanup tether stats for upstream index "
+                    + ifIndex + ": ", e);
+            return null;
+        }
+    }
+
+    @Override
     public String toString() {
         return "Netd used";
     }
diff --git a/Tethering/apishim/31/com/android/networkstack/tethering/apishim/api31/BpfCoordinatorShimImpl.java b/Tethering/apishim/31/com/android/networkstack/tethering/apishim/api31/BpfCoordinatorShimImpl.java
index f94b5a9..4ebf914 100644
--- a/Tethering/apishim/31/com/android/networkstack/tethering/apishim/api31/BpfCoordinatorShimImpl.java
+++ b/Tethering/apishim/31/com/android/networkstack/tethering/apishim/api31/BpfCoordinatorShimImpl.java
@@ -20,6 +20,7 @@
 
 import android.net.util.SharedLog;
 import android.system.ErrnoException;
+import android.system.Os;
 import android.system.OsConstants;
 import android.util.SparseArray;
 
@@ -36,6 +37,8 @@
 import com.android.networkstack.tethering.TetherStatsKey;
 import com.android.networkstack.tethering.TetherStatsValue;
 
+import java.io.FileDescriptor;
+
 /**
  * Bpf coordinator class for API shims.
  */
@@ -43,6 +46,11 @@
         extends com.android.networkstack.tethering.apishim.common.BpfCoordinatorShim {
     private static final String TAG = "api31.BpfCoordinatorShimImpl";
 
+    // AF_KEY socket type. See include/linux/socket.h.
+    private static final int AF_KEY = 15;
+    // PFKEYv2 constants. See include/uapi/linux/pfkeyv2.h.
+    private static final int PF_KEY_V2 = 2;
+
     @NonNull
     private final SharedLog mLog;
 
@@ -178,6 +186,53 @@
     }
 
     @Override
+    @Nullable
+    public TetherStatsValue tetherOffloadGetAndClearStats(int ifIndex) {
+        if (!isInitialized()) return null;
+
+        // getAndClearTetherOffloadStats is called after all offload rules have already been
+        // deleted for the given upstream interface. Before starting to do cleanup stuff in this
+        // function, use synchronizeKernelRCU to make sure that all the current running eBPF
+        // programs are finished on all CPUs, especially the unfinished packet processing. After
+        // synchronizeKernelRCU returned, we can safely read or delete on the stats map or the
+        // limit map.
+        final int res = synchronizeKernelRCU();
+        if (res != 0) {
+            // Error log but don't return. Do as much cleanup as possible.
+            mLog.e("synchronize_rcu() failed: " + res);
+        }
+
+        TetherStatsValue statsValue = null;
+        try {
+            statsValue = mBpfStatsMap.getValue(new TetherStatsKey(ifIndex));
+        } catch (ErrnoException e) {
+            mLog.e("Could not get stats entry for interface index " + ifIndex + ": ", e);
+            return null;
+        }
+
+        if (statsValue == null) {
+            mLog.e("Could not get stats entry for interface index " + ifIndex);
+            return null;
+        }
+
+        try {
+            mBpfStatsMap.deleteEntry(new TetherStatsKey(ifIndex));
+        } catch (ErrnoException e) {
+            mLog.e("Could not delete stats entry for interface index " + ifIndex + ": ", e);
+            return null;
+        }
+
+        try {
+            mBpfLimitMap.deleteEntry(new TetherLimitKey(ifIndex));
+        } catch (ErrnoException e) {
+            mLog.e("Could not delete limit for interface index " + ifIndex + ": ", e);
+            return null;
+        }
+
+        return statsValue;
+    }
+
+    @Override
     public String toString() {
         return "mBpfIngressMap{"
                 + (mBpfIngressMap != null ? "initialized" : "not initialized") + "}, "
@@ -187,4 +242,36 @@
                 + (mBpfLimitMap != null ? "initialized" : "not initialized") + "} "
                 + "}";
     }
+
+    /**
+     * Call synchronize_rcu() to block until all existing RCU read-side critical sections have
+     * been completed.
+     * Note that BpfCoordinatorTest have no permissions to create or close pf_key socket. It is
+     * okay for now because the caller #bpfGetAndClearStats doesn't care the result of this
+     * function. The tests don't be broken.
+     * TODO: Wrap this function into Dependencies for mocking in tests.
+     */
+    private int synchronizeKernelRCU() {
+        // This is a temporary hack for network stats map swap on devices running
+        // 4.9 kernels. The kernel code of socket release on pf_key socket will
+        // explicitly call synchronize_rcu() which is exactly what we need.
+        FileDescriptor pfSocket;
+        try {
+            pfSocket = Os.socket(AF_KEY, OsConstants.SOCK_RAW | OsConstants.SOCK_CLOEXEC,
+                    PF_KEY_V2);
+        } catch (ErrnoException e) {
+            mLog.e("create PF_KEY socket failed: ", e);
+            return e.errno;
+        }
+
+        // When closing socket, synchronize_rcu() gets called in sock_release().
+        try {
+            Os.close(pfSocket);
+        } catch (ErrnoException e) {
+            mLog.e("failed to close the PF_KEY socket: ", e);
+            return e.errno;
+        }
+
+        return 0;
+    }
 }
diff --git a/Tethering/apishim/common/com/android/networkstack/tethering/apishim/common/BpfCoordinatorShim.java b/Tethering/apishim/common/com/android/networkstack/tethering/apishim/common/BpfCoordinatorShim.java
index de65ea5..61abfa3 100644
--- a/Tethering/apishim/common/com/android/networkstack/tethering/apishim/common/BpfCoordinatorShim.java
+++ b/Tethering/apishim/common/com/android/networkstack/tethering/apishim/common/BpfCoordinatorShim.java
@@ -89,5 +89,24 @@
     */
     @Nullable
     public abstract boolean tetherOffloadSetInterfaceQuota(int ifIndex, long quotaBytes);
+
+    /**
+     * Return BPF tethering offload statistics and clear the stats for a given upstream.
+     *
+     * Must only be called once all offload rules have already been deleted for the given upstream
+     * interface. The existing stats will be fetched and returned. The stats and the limit for the
+     * given upstream interface will be deleted as well.
+     *
+     * The stats and limit for a given upstream interface must be initialized (using
+     * tetherOffloadSetInterfaceQuota) before any offload will occur on that interface.
+     *
+     * Note that this can be only called while the BPF maps were initialized.
+     *
+     * @param ifIndex Index of upstream interface.
+     * @return TetherStatsValue, which contains the given upstream interface's tethering statistics
+     *         since tethering was first started on that upstream interface.
+     */
+    @Nullable
+    public abstract TetherStatsValue tetherOffloadGetAndClearStats(int ifIndex);
 }
 
diff --git a/Tethering/src/com/android/networkstack/tethering/BpfCoordinator.java b/Tethering/src/com/android/networkstack/tethering/BpfCoordinator.java
index d5ab550..64ac37c 100644
--- a/Tethering/src/com/android/networkstack/tethering/BpfCoordinator.java
+++ b/Tethering/src/com/android/networkstack/tethering/BpfCoordinator.java
@@ -34,15 +34,12 @@
 import android.net.NetworkStats;
 import android.net.NetworkStats.Entry;
 import android.net.TetherOffloadRuleParcel;
-import android.net.TetherStatsParcel;
 import android.net.ip.IpServer;
 import android.net.netstats.provider.NetworkStatsProvider;
 import android.net.util.SharedLog;
 import android.net.util.TetheringUtils.ForwardedStats;
 import android.os.ConditionVariable;
 import android.os.Handler;
-import android.os.RemoteException;
-import android.os.ServiceSpecificException;
 import android.system.ErrnoException;
 import android.text.TextUtils;
 import android.util.Log;
@@ -361,22 +358,19 @@
         // Do cleanup functionality if there is no more rule on the given upstream.
         final int upstreamIfindex = rule.upstreamIfindex;
         if (!isAnyRuleOnUpstream(upstreamIfindex)) {
-            try {
-                final TetherStatsParcel stats =
-                        mNetd.tetherOffloadGetAndClearStats(upstreamIfindex);
-                SparseArray<TetherStatsValue> tetherStatsList =
-                        new SparseArray<TetherStatsValue>();
-                tetherStatsList.put(stats.ifIndex, new TetherStatsValue(stats.rxPackets,
-                        stats.rxBytes, 0 /* rxErrors */, stats.txPackets, stats.txBytes,
-                        0 /* txErrors */));
-
-                // Update the last stats delta and delete the local cache for a given upstream.
-                updateQuotaAndStatsFromSnapshot(tetherStatsList);
-                mStats.remove(upstreamIfindex);
-            } catch (RemoteException | ServiceSpecificException e) {
-                Log.wtf(TAG, "Exception when cleanup tether stats for upstream index "
-                        + upstreamIfindex + ": ", e);
+            final TetherStatsValue statsValue =
+                    mBpfCoordinatorShim.tetherOffloadGetAndClearStats(upstreamIfindex);
+            if (statsValue == null) {
+                Log.wtf(TAG, "Fail to cleanup tether stats for upstream index " + upstreamIfindex);
+                return;
             }
+
+            SparseArray<TetherStatsValue> tetherStatsList = new SparseArray<TetherStatsValue>();
+            tetherStatsList.put(upstreamIfindex, statsValue);
+
+            // Update the last stats delta and delete the local cache for a given upstream.
+            updateQuotaAndStatsFromSnapshot(tetherStatsList);
+            mStats.remove(upstreamIfindex);
         }
     }
 
diff --git a/Tethering/tests/unit/src/com/android/networkstack/tethering/BpfCoordinatorTest.java b/Tethering/tests/unit/src/com/android/networkstack/tethering/BpfCoordinatorTest.java
index 183571f..4abaf03 100644
--- a/Tethering/tests/unit/src/com/android/networkstack/tethering/BpfCoordinatorTest.java
+++ b/Tethering/tests/unit/src/com/android/networkstack/tethering/BpfCoordinatorTest.java
@@ -138,6 +138,11 @@
         }
 
         @Override
+        public boolean deleteEntry(Struct key) throws ErrnoException {
+            return mMap.remove(key) != null;
+        }
+
+        @Override
         public V getValue(@NonNull K key) throws ErrnoException {
             // Return value for a given key. Otherwise, return null without an error ENOENT.
             // BpfMap#getValue treats that the entry is not found as no error.
@@ -253,12 +258,16 @@
     }
 
     // Update a stats entry or create if not exists.
+    private void updateStatsEntryToStatsMap(@NonNull TetherStatsParcel stats) throws Exception {
+        final TetherStatsKey key = new TetherStatsKey(stats.ifIndex);
+        final TetherStatsValue value = new TetherStatsValue(stats.rxPackets, stats.rxBytes,
+                0L /* rxErrors */, stats.txPackets, stats.txBytes, 0L /* txErrors */);
+        mBpfStatsMap.updateEntry(key, value);
+    }
+
     private void updateStatsEntry(@NonNull TetherStatsParcel stats) throws Exception {
         if (mDeps.isAtLeastS()) {
-            final TetherStatsKey key = new TetherStatsKey(stats.ifIndex);
-            final TetherStatsValue value = new TetherStatsValue(stats.rxPackets, stats.rxBytes,
-                    0L /* rxErrors */, stats.txPackets, stats.txBytes, 0L /* txErrors */);
-            mBpfStatsMap.updateEntry(key, value);
+            updateStatsEntryToStatsMap(stats);
         } else {
             when(mNetd.tetherOffloadGetStats()).thenReturn(new TetherStatsParcel[] {stats});
         }
@@ -282,6 +291,21 @@
         waitForIdle();
     }
 
+    // In tests, the stats need to be set before deleting the last rule.
+    // The reason is that BpfCoordinator#tetherOffloadRuleRemove reads the stats
+    // of the deleting interface after the last rule deleted. #tetherOffloadRuleRemove
+    // does the interface cleanup failed if there is no stats for the deleting interface.
+    // Note that the mocked tetherOffloadGetAndClearStats of netd replaces all stats entries
+    // because it doesn't store the previous entries.
+    private void updateStatsEntryForTetherOffloadGetAndClearStats(TetherStatsParcel stats)
+            throws Exception {
+        if (mDeps.isAtLeastS()) {
+            updateStatsEntryToStatsMap(stats);
+        } else {
+            when(mNetd.tetherOffloadGetAndClearStats(stats.ifIndex)).thenReturn(stats);
+        }
+    }
+
     private void clearStatsInvocations() {
         if (mDeps.isAtLeastS()) {
             clearInvocations(mBpfStatsMap);
@@ -377,6 +401,17 @@
         }
     }
 
+    private void verifyTetherOffloadGetAndClearStats(@Nullable InOrder inOrder, int ifIndex)
+            throws Exception {
+        if (mDeps.isAtLeastS()) {
+            inOrder.verify(mBpfStatsMap).getValue(new TetherStatsKey(ifIndex));
+            inOrder.verify(mBpfStatsMap).deleteEntry(new TetherStatsKey(ifIndex));
+            inOrder.verify(mBpfLimitMap).deleteEntry(new TetherLimitKey(ifIndex));
+        } else {
+            inOrder.verify(mNetd).tetherOffloadGetAndClearStats(ifIndex);
+        }
+    }
+
     // S+ and R api minimum tests.
     // The following tests are used to provide minimum checking for the APIs on different flow.
     // The auto merge is not enabled on mainline prod. The code flow R may be verified at the
@@ -396,17 +431,23 @@
         final Integer mobileIfIndex = 100;
         coordinator.addUpstreamNameToLookupTable(mobileIfIndex, mobileIface);
 
+        // InOrder is required because mBpfStatsMap may be accessed by both
+        // BpfCoordinator#tetherOffloadRuleAdd and BpfCoordinator#tetherOffloadGetAndClearStats.
+        // The #verifyTetherOffloadGetAndClearStats can't distinguish who has ever called
+        // mBpfStatsMap#getValue and get a wrong calling count which counts all.
+        final InOrder inOrder = inOrder(mNetd, mBpfIngressMap, mBpfLimitMap, mBpfStatsMap);
         final Ipv6ForwardingRule rule = buildTestForwardingRule(mobileIfIndex, NEIGH_A, MAC_A);
         coordinator.tetherOffloadRuleAdd(mIpServer, rule);
-        verifyTetherOffloadRuleAdd(null, rule);
-        verifyTetherOffloadSetInterfaceQuota(null, mobileIfIndex, QUOTA_UNLIMITED,
+        verifyTetherOffloadRuleAdd(inOrder, rule);
+        verifyTetherOffloadSetInterfaceQuota(inOrder, mobileIfIndex, QUOTA_UNLIMITED,
                 true /* isInit */);
 
         // Removing the last rule on current upstream immediately sends the cleanup stuff to netd.
-        when(mNetd.tetherOffloadGetAndClearStats(mobileIfIndex))
-                .thenReturn(buildTestTetherStatsParcel(mobileIfIndex, 0, 0, 0, 0));
+        updateStatsEntryForTetherOffloadGetAndClearStats(
+                buildTestTetherStatsParcel(mobileIfIndex, 0, 0, 0, 0));
         coordinator.tetherOffloadRuleRemove(mIpServer, rule);
-        verifyTetherOffloadRuleRemove(null, rule);
+        verifyTetherOffloadRuleRemove(inOrder, rule);
+        verifyTetherOffloadGetAndClearStats(inOrder, mobileIfIndex);
     }
 
     // TODO: remove once presubmit tests on R even the code is submitted on S.
@@ -717,11 +758,11 @@
         verifyNeverTetherOffloadSetInterfaceQuota(inOrder);
 
         // Removing the last rule on current upstream immediately sends the cleanup stuff to netd.
-        when(mNetd.tetherOffloadGetAndClearStats(mobileIfIndex))
-                .thenReturn(buildTestTetherStatsParcel(mobileIfIndex, 0, 0, 0, 0));
+        updateStatsEntryForTetherOffloadGetAndClearStats(
+                buildTestTetherStatsParcel(mobileIfIndex, 0, 0, 0, 0));
         coordinator.tetherOffloadRuleRemove(mIpServer, ruleA);
         verifyTetherOffloadRuleRemove(inOrder, ruleA);
-        inOrder.verify(mNetd).tetherOffloadGetAndClearStats(mobileIfIndex);
+        verifyTetherOffloadGetAndClearStats(inOrder, mobileIfIndex);
         inOrder.verifyNoMoreInteractions();
     }
 
@@ -767,8 +808,8 @@
                 mobileIfIndex, NEIGH_A, MAC_A);
         final Ipv6ForwardingRule mobileRuleB = buildTestForwardingRule(
                 mobileIfIndex, NEIGH_B, MAC_B);
-        when(mNetd.tetherOffloadGetAndClearStats(ethIfIndex))
-                .thenReturn(buildTestTetherStatsParcel(ethIfIndex, 10, 20, 30, 40));
+        updateStatsEntryForTetherOffloadGetAndClearStats(
+                buildTestTetherStatsParcel(ethIfIndex, 10, 20, 30, 40));
 
         // Update the existing rules for upstream changes. The rules are removed and re-added one
         // by one for updating upstream interface index by #tetherOffloadRuleUpdate.
@@ -778,16 +819,16 @@
         verifyTetherOffloadSetInterfaceQuota(inOrder, mobileIfIndex, QUOTA_UNLIMITED,
                 true /* isInit */);
         verifyTetherOffloadRuleRemove(inOrder, ethernetRuleB);
-        inOrder.verify(mNetd).tetherOffloadGetAndClearStats(ethIfIndex);
+        verifyTetherOffloadGetAndClearStats(inOrder, ethIfIndex);
         verifyTetherOffloadRuleAdd(inOrder, mobileRuleB);
 
         // [3] Clear all rules for a given IpServer.
-        when(mNetd.tetherOffloadGetAndClearStats(mobileIfIndex))
-                .thenReturn(buildTestTetherStatsParcel(mobileIfIndex, 50, 60, 70, 80));
+        updateStatsEntryForTetherOffloadGetAndClearStats(
+                buildTestTetherStatsParcel(mobileIfIndex, 50, 60, 70, 80));
         coordinator.tetherOffloadRuleClear(mIpServer);
         verifyTetherOffloadRuleRemove(inOrder, mobileRuleA);
         verifyTetherOffloadRuleRemove(inOrder, mobileRuleB);
-        inOrder.verify(mNetd).tetherOffloadGetAndClearStats(mobileIfIndex);
+        verifyTetherOffloadGetAndClearStats(inOrder, mobileIfIndex);
 
         // [4] Force pushing stats update to verify that the last diff of stats is reported on all
         // upstreams.