Add separate mutex for BPF ring buffer.

This covers the ring buffer and ConsumeAll methods. This change also
removes unnecessary GUARDED_BY for TraceIfaces (making it static) and
mCallback (making it const).

This should be close to a no-op. The new lock is taken in addition to,
or instead of in the few places it's needed.

Bug: 320700806
Test: atest libnetworkstats_test
Change-Id: I45d36eba033afae1c03eac2f8d5b2ac4c9e78716
diff --git a/service-t/native/libs/libnetworkstats/NetworkTracePoller.cpp b/service-t/native/libs/libnetworkstats/NetworkTracePoller.cpp
index 241d5fa..c8761b5 100644
--- a/service-t/native/libs/libnetworkstats/NetworkTracePoller.cpp
+++ b/service-t/native/libs/libnetworkstats/NetworkTracePoller.cpp
@@ -41,9 +41,9 @@
   // The task runner is sequential so these can't run on top of each other.
   runner->PostDelayedTask([=, this]() { PollAndSchedule(runner, poll_ms); }, poll_ms);
 
-  if (mMutex.try_lock()) {
+  if (mBufferMutex.try_lock()) {
     ConsumeAllLocked();
-    mMutex.unlock();
+    mBufferMutex.unlock();
   }
 }
 
@@ -51,6 +51,7 @@
   ALOGD("Starting datasource");
 
   std::scoped_lock<std::mutex> lock(mMutex);
+  std::scoped_lock<std::mutex> block(mBufferMutex);
   if (mSessionCount > 0) {
     if (mPollMs != pollMs) {
       // Nothing technical prevents mPollMs from changing, it's just unclear
@@ -97,6 +98,7 @@
   ALOGD("Stopping datasource");
 
   std::scoped_lock<std::mutex> lock(mMutex);
+  std::scoped_lock<std::mutex> block(mBufferMutex);
   if (mSessionCount == 0) return false;  // This should never happen
 
   // If this isn't the last session, don't clean up yet.
@@ -145,7 +147,7 @@
 }
 
 bool NetworkTracePoller::ConsumeAll() {
-  std::scoped_lock<std::mutex> lock(mMutex);
+  std::scoped_lock<std::mutex> lock(mBufferMutex);
   return ConsumeAllLocked();
 }
 
diff --git a/service-t/native/libs/libnetworkstats/include/netdbpf/NetworkTracePoller.h b/service-t/native/libs/libnetworkstats/include/netdbpf/NetworkTracePoller.h
index 092ab64..790a8b1 100644
--- a/service-t/native/libs/libnetworkstats/include/netdbpf/NetworkTracePoller.h
+++ b/service-t/native/libs/libnetworkstats/include/netdbpf/NetworkTracePoller.h
@@ -50,7 +50,7 @@
   bool Stop() EXCLUDES(mMutex);
 
   // Consumes all available events from the ringbuffer.
-  bool ConsumeAll() EXCLUDES(mMutex);
+  bool ConsumeAll() EXCLUDES(mBufferMutex);
 
  private:
   // Poll the ring buffer for new data and schedule another run of ourselves
@@ -59,15 +59,20 @@
   // and thus a deadlock while resetting the TaskRunner. The runner pointer is
   // always valid within tasks run by that runner.
   void PollAndSchedule(perfetto::base::TaskRunner* runner, uint32_t poll_ms);
-  bool ConsumeAllLocked() REQUIRES(mMutex);
+  bool ConsumeAllLocked() REQUIRES(mBufferMutex);
 
   // Record sparse iface stats via atrace. This queries the per-iface stats maps
   // for any iface present in the vector of packets. This is inexact, but should
   // have sufficient coverage given these are cumulative counters.
-  void TraceIfaces(const std::vector<PacketTrace>& packets) REQUIRES(mMutex);
+  static void TraceIfaces(const std::vector<PacketTrace>& packets);
 
   std::mutex mMutex;
 
+  // The mBufferMutex protects the ring buffer. This allows separate protected
+  // access of mTaskRunner in Stop (to terminate) and mRingBuffer in ConsumeAll.
+  // Without this separation, Stop() can deadlock.
+  std::mutex mBufferMutex;
+
   // Records the number of successfully started active sessions so that only the
   // first active session attempts setup and only the last cleans up. Note that
   // the session count will remain zero if Start fails. It is expected that Stop
@@ -78,10 +83,10 @@
   uint32_t mPollMs GUARDED_BY(mMutex);
 
   // The function to process PacketTrace, typically a Perfetto sink.
-  EventSink mCallback GUARDED_BY(mMutex);
+  const EventSink mCallback;
 
   // The BPF ring buffer handle.
-  std::unique_ptr<BpfRingbuf<PacketTrace>> mRingBuffer GUARDED_BY(mMutex);
+  std::unique_ptr<BpfRingbuf<PacketTrace>> mRingBuffer GUARDED_BY(mBufferMutex);
 
   // The packet tracing config map (really a 1-element array).
   BpfMap<uint32_t, bool> mConfigurationMap GUARDED_BY(mMutex);