Merge changes Ic547e18b,I7b0f6b70

* changes:
  Enable serialization_fuzzer to be built for the host
  Move serialize/deserialize fuzzing to separate function
diff --git a/cmds/dumpstate/tests/dumpstate_smoke_test.cpp b/cmds/dumpstate/tests/dumpstate_smoke_test.cpp
index 2c800c6..1c6583e 100644
--- a/cmds/dumpstate/tests/dumpstate_smoke_test.cpp
+++ b/cmds/dumpstate/tests/dumpstate_smoke_test.cpp
@@ -401,7 +401,7 @@
     SectionExists("batterystats", /* bytes= */ 1000);
 }
 
-TEST_F(BugreportSectionTest, WifiSectionGenerated) {
+TEST_F(BugreportSectionTest, DISABLED_WifiSectionGenerated) {
     SectionExists("wifi", /* bytes= */ 100000);
 }
 
diff --git a/libs/binder/MemoryHeapBase.cpp b/libs/binder/MemoryHeapBase.cpp
index e4ea60f..e1cbc19 100644
--- a/libs/binder/MemoryHeapBase.cpp
+++ b/libs/binder/MemoryHeapBase.cpp
@@ -49,7 +49,7 @@
     int fd = ashmem_create_region(name == nullptr ? "MemoryHeapBase" : name, size);
     ALOGE_IF(fd<0, "error creating ashmem region: %s", strerror(errno));
     if (fd >= 0) {
-        if (mapfd(fd, size) == NO_ERROR) {
+        if (mapfd(fd, true, size) == NO_ERROR) {
             if (flags & READ_ONLY) {
                 ashmem_set_prot_region(fd, PROT_READ);
             }
@@ -70,7 +70,7 @@
     if (fd >= 0) {
         const size_t pagesize = getpagesize();
         size = ((size + pagesize-1) & ~(pagesize-1));
-        if (mapfd(fd, size) == NO_ERROR) {
+        if (mapfd(fd, false, size) == NO_ERROR) {
             mDevice = device;
         }
     }
@@ -82,7 +82,7 @@
 {
     const size_t pagesize = getpagesize();
     size = ((size + pagesize-1) & ~(pagesize-1));
-    mapfd(fcntl(fd, F_DUPFD_CLOEXEC, 0), size, offset);
+    mapfd(fcntl(fd, F_DUPFD_CLOEXEC, 0), false, size, offset);
 }
 
 status_t MemoryHeapBase::init(int fd, void *base, size_t size, int flags, const char* device)
@@ -98,7 +98,7 @@
     return NO_ERROR;
 }
 
-status_t MemoryHeapBase::mapfd(int fd, size_t size, off_t offset)
+status_t MemoryHeapBase::mapfd(int fd, bool writeableByCaller, size_t size, off_t offset)
 {
     if (size == 0) {
         // try to figure out the size automatically
@@ -116,8 +116,12 @@
     }
 
     if ((mFlags & DONT_MAP_LOCALLY) == 0) {
+        int prot = PROT_READ;
+        if (writeableByCaller || (mFlags & READ_ONLY) == 0) {
+            prot |= PROT_WRITE;
+        }
         void* base = (uint8_t*)mmap(nullptr, size,
-                PROT_READ|PROT_WRITE, MAP_SHARED, fd, offset);
+                prot, MAP_SHARED, fd, offset);
         if (base == MAP_FAILED) {
             ALOGE("mmap(fd=%d, size=%zu) failed (%s)",
                     fd, size, strerror(errno));
diff --git a/libs/binder/Parcel.cpp b/libs/binder/Parcel.cpp
index a9c19b3..40dd09b 100644
--- a/libs/binder/Parcel.cpp
+++ b/libs/binder/Parcel.cpp
@@ -77,9 +77,8 @@
 // many things compile this into prebuilts on the stack
 static_assert(sizeof(Parcel) == 60 || sizeof(Parcel) == 120);
 
-static pthread_mutex_t gParcelGlobalAllocSizeLock = PTHREAD_MUTEX_INITIALIZER;
-static size_t gParcelGlobalAllocSize = 0;
-static size_t gParcelGlobalAllocCount = 0;
+static std::atomic<size_t> gParcelGlobalAllocCount;
+static std::atomic<size_t> gParcelGlobalAllocSize;
 
 static size_t gMaxFds = 0;
 
@@ -275,17 +274,11 @@
 }
 
 size_t Parcel::getGlobalAllocSize() {
-    pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
-    size_t size = gParcelGlobalAllocSize;
-    pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
-    return size;
+    return gParcelGlobalAllocSize.load();
 }
 
 size_t Parcel::getGlobalAllocCount() {
-    pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
-    size_t count = gParcelGlobalAllocCount;
-    pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
-    return count;
+    return gParcelGlobalAllocCount.load();
 }
 
 const uint8_t* Parcel::data() const
@@ -2630,16 +2623,8 @@
         releaseObjects();
         if (mData) {
             LOG_ALLOC("Parcel %p: freeing with %zu capacity", this, mDataCapacity);
-            pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
-            if (mDataCapacity <= gParcelGlobalAllocSize) {
-              gParcelGlobalAllocSize = gParcelGlobalAllocSize - mDataCapacity;
-            } else {
-              gParcelGlobalAllocSize = 0;
-            }
-            if (gParcelGlobalAllocCount > 0) {
-              gParcelGlobalAllocCount--;
-            }
-            pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
+            gParcelGlobalAllocSize -= mDataCapacity;
+            gParcelGlobalAllocCount--;
             free(mData);
         }
         if (mObjects) free(mObjects);
@@ -2685,13 +2670,15 @@
 
     if (data) {
         LOG_ALLOC("Parcel %p: restart from %zu to %zu capacity", this, mDataCapacity, desired);
-        pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
-        gParcelGlobalAllocSize += desired;
-        gParcelGlobalAllocSize -= mDataCapacity;
+        if (mDataCapacity > desired) {
+            gParcelGlobalAllocSize -= (mDataCapacity - desired);
+        } else {
+            gParcelGlobalAllocSize += (desired - mDataCapacity);
+        }
+
         if (!mData) {
             gParcelGlobalAllocCount++;
         }
-        pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
         mData = data;
         mDataCapacity = desired;
     }
@@ -2779,10 +2766,8 @@
         mOwner = nullptr;
 
         LOG_ALLOC("Parcel %p: taking ownership of %zu capacity", this, desired);
-        pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
         gParcelGlobalAllocSize += desired;
         gParcelGlobalAllocCount++;
-        pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
 
         mData = data;
         mObjects = objects;
@@ -2830,10 +2815,8 @@
             if (data) {
                 LOG_ALLOC("Parcel %p: continue from %zu to %zu capacity", this, mDataCapacity,
                         desired);
-                pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
                 gParcelGlobalAllocSize += desired;
                 gParcelGlobalAllocSize -= mDataCapacity;
-                pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
                 mData = data;
                 mDataCapacity = desired;
             } else {
@@ -2865,10 +2848,8 @@
         }
 
         LOG_ALLOC("Parcel %p: allocating with %zu capacity", this, desired);
-        pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
         gParcelGlobalAllocSize += desired;
         gParcelGlobalAllocCount++;
-        pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
 
         mData = data;
         mDataSize = mDataPos = 0;
diff --git a/libs/binder/include/binder/MemoryHeapBase.h b/libs/binder/include/binder/MemoryHeapBase.h
index 52bd5de..0ece121 100644
--- a/libs/binder/include/binder/MemoryHeapBase.h
+++ b/libs/binder/include/binder/MemoryHeapBase.h
@@ -51,6 +51,8 @@
 
     /*
      * maps memory from ashmem, with the given name for debugging
+     * if the READ_ONLY flag is set, the memory will be writeable by the calling process,
+     * but not by others. this is NOT the case with the other ctors.
      */
     explicit MemoryHeapBase(size_t size, uint32_t flags = 0, char const* name = nullptr);
 
@@ -78,7 +80,7 @@
             int flags = 0, const char* device = nullptr);
 
 private:
-    status_t mapfd(int fd, size_t size, off_t offset = 0);
+    status_t mapfd(int fd, bool writeableByCaller, size_t size, off_t offset = 0);
 
     int         mFD;
     size_t      mSize;
diff --git a/libs/cputimeinstate/cputimeinstate.cpp b/libs/cputimeinstate/cputimeinstate.cpp
index 5e785b6..6058430 100644
--- a/libs/cputimeinstate/cputimeinstate.cpp
+++ b/libs/cputimeinstate/cputimeinstate.cpp
@@ -425,6 +425,7 @@
 
     uint64_t newLastUpdate = lastUpdate ? *lastUpdate : 0;
     do {
+        if (key.bucket > (gNCpus - 1) / CPUS_PER_ENTRY) return {};
         if (lastUpdate) {
             auto uidUpdated = uidUpdatedSince(key.uid, *lastUpdate, &newLastUpdate);
             if (!uidUpdated.has_value()) return {};
diff --git a/libs/cputimeinstate/testtimeinstate.cpp b/libs/cputimeinstate/testtimeinstate.cpp
index ea2a200..0d5f412 100644
--- a/libs/cputimeinstate/testtimeinstate.cpp
+++ b/libs/cputimeinstate/testtimeinstate.cpp
@@ -387,6 +387,28 @@
     }
 }
 
+TEST(TimeInStateTest, AllUidConcurrentTimesFailsOnInvalidBucket) {
+    uint32_t uid = 0;
+    {
+        // Find an unused UID
+        auto map = getUidsConcurrentTimes();
+        ASSERT_TRUE(map.has_value());
+        ASSERT_FALSE(map->empty());
+        for (const auto &kv : *map) uid = std::max(uid, kv.first);
+        ++uid;
+    }
+    android::base::unique_fd fd{
+        bpf_obj_get(BPF_FS_PATH "map_time_in_state_uid_concurrent_times_map")};
+    ASSERT_GE(fd, 0);
+    uint32_t nCpus = get_nprocs_conf();
+    uint32_t maxBucket = (nCpus - 1) / CPUS_PER_ENTRY;
+    time_key_t key = {.uid = uid, .bucket = maxBucket + 1};
+    std::vector<concurrent_val_t> vals(nCpus);
+    ASSERT_FALSE(writeToMapEntry(fd, &key, vals.data(), BPF_NOEXIST));
+    EXPECT_FALSE(getUidsConcurrentTimes().has_value());
+    ASSERT_FALSE(deleteMapEntry(fd, &key));
+}
+
 TEST(TimeInStateTest, AllUidTimesConsistent) {
     auto tisMap = getUidsCpuFreqTimes();
     ASSERT_TRUE(tisMap.has_value());
diff --git a/services/gpuservice/tests/unittests/AndroidTest.xml b/services/gpuservice/tests/unittests/AndroidTest.xml
index 66f51c7..72976a8 100644
--- a/services/gpuservice/tests/unittests/AndroidTest.xml
+++ b/services/gpuservice/tests/unittests/AndroidTest.xml
@@ -18,6 +18,7 @@
         <option name="cleanup" value="true" />
         <option name="push" value="gpuservice_unittest->/data/local/tmp/gpuservice_unittest" />
     </target_preparer>
+    <target_preparer class="com.android.tradefed.targetprep.RootTargetPreparer" />
     <option name="test-suite-tag" value="apct" />
     <test class="com.android.tradefed.testtype.GTest" >
         <option name="native-test-device-path" value="/data/local/tmp" />