Merge ConsumeAllLocked into ConsumeAll

There no longer needs to be separate locked and unlocked methods now
that the buffer has its own mutex.

The most notable change is that within PollAndSchedule, we go from using
try_lock to using the scoped_lock in ConsumeAll. The try_lock was meant
to prevent the following deadlock:
- Stop() takes mMutex and calls mTaskRunner.reset()
- mTaskRunner awaits completion of final closure (PollAndSchedule)
- PollAndSchedule is blocked trying to acquire mMutex

Now that ConsumeAll depends only on mBufferMutex, a more elegant
solution is to just ensure that Stop() isn't holding mBufferMutex while
calling mTaskRunner.reset().

Bug: 320700806
Test: atest libnetworkstats_test
Change-Id: Id5df18bc5809ba193bbe1aa0b97121bd5a8afdcd
diff --git a/service-t/native/libs/libnetworkstats/NetworkTracePoller.cpp b/service-t/native/libs/libnetworkstats/NetworkTracePoller.cpp
index c8761b5..de927d7 100644
--- a/service-t/native/libs/libnetworkstats/NetworkTracePoller.cpp
+++ b/service-t/native/libs/libnetworkstats/NetworkTracePoller.cpp
@@ -41,17 +41,13 @@
   // The task runner is sequential so these can't run on top of each other.
   runner->PostDelayedTask([=, this]() { PollAndSchedule(runner, poll_ms); }, poll_ms);
 
-  if (mBufferMutex.try_lock()) {
-    ConsumeAllLocked();
-    mBufferMutex.unlock();
-  }
+  ConsumeAll();
 }
 
 bool NetworkTracePoller::Start(uint32_t pollMs) {
   ALOGD("Starting datasource");
 
   std::scoped_lock<std::mutex> lock(mMutex);
-  std::scoped_lock<std::mutex> block(mBufferMutex);
   if (mSessionCount > 0) {
     if (mPollMs != pollMs) {
       // Nothing technical prevents mPollMs from changing, it's just unclear
@@ -77,7 +73,10 @@
     return false;
   }
 
-  mRingBuffer = std::move(*rb);
+  {
+    std::scoped_lock<std::mutex> block(mBufferMutex);
+    mRingBuffer = std::move(*rb);
+  }
 
   auto res = mConfigurationMap.writeValue(0, true, BPF_ANY);
   if (!res.ok()) {
@@ -98,7 +97,6 @@
   ALOGD("Stopping datasource");
 
   std::scoped_lock<std::mutex> lock(mMutex);
-  std::scoped_lock<std::mutex> block(mBufferMutex);
   if (mSessionCount == 0) return false;  // This should never happen
 
   // If this isn't the last session, don't clean up yet.
@@ -116,10 +114,14 @@
   // Drain remaining events from the ring buffer now that tracing is disabled.
   // This prevents the next trace from seeing stale events and allows writing
   // the last batch of events to Perfetto.
-  ConsumeAllLocked();
+  ConsumeAll();
 
   mTaskRunner.reset();
-  mRingBuffer.reset();
+
+  {
+    std::scoped_lock<std::mutex> block(mBufferMutex);
+    mRingBuffer.reset();
+  }
 
   return res.ok();
 }
@@ -148,10 +150,6 @@
 
 bool NetworkTracePoller::ConsumeAll() {
   std::scoped_lock<std::mutex> lock(mBufferMutex);
-  return ConsumeAllLocked();
-}
-
-bool NetworkTracePoller::ConsumeAllLocked() {
   if (mRingBuffer == nullptr) {
     ALOGW("Tracing is not active");
     return false;
diff --git a/service-t/native/libs/libnetworkstats/include/netdbpf/NetworkTracePoller.h b/service-t/native/libs/libnetworkstats/include/netdbpf/NetworkTracePoller.h
index 790a8b1..72fa66e 100644
--- a/service-t/native/libs/libnetworkstats/include/netdbpf/NetworkTracePoller.h
+++ b/service-t/native/libs/libnetworkstats/include/netdbpf/NetworkTracePoller.h
@@ -59,7 +59,6 @@
   // and thus a deadlock while resetting the TaskRunner. The runner pointer is
   // always valid within tasks run by that runner.
   void PollAndSchedule(perfetto::base::TaskRunner* runner, uint32_t poll_ms);
-  bool ConsumeAllLocked() REQUIRES(mBufferMutex);
 
   // Record sparse iface stats via atrace. This queries the per-iface stats maps
   // for any iface present in the vector of packets. This is inexact, but should