Merge "Fix memory ordering issues; document IMemory peculiarities"
diff --git a/libs/binder/IMemory.cpp b/libs/binder/IMemory.cpp
index 55ad6cc..790fa8c 100644
--- a/libs/binder/IMemory.cpp
+++ b/libs/binder/IMemory.cpp
@@ -16,6 +16,7 @@
 
 #define LOG_TAG "IMemory"
 
+#include <atomic>
 #include <stdint.h>
 #include <stdio.h>
 #include <stdlib.h>
@@ -29,7 +30,6 @@
 #include <cutils/log.h>
 #include <utils/KeyedVector.h>
 #include <utils/threads.h>
-#include <utils/Atomic.h>
 #include <binder/Parcel.h>
 #include <utils/CallStack.h>
 
@@ -56,12 +56,15 @@
     struct heap_info_t {
         sp<IMemoryHeap> heap;
         int32_t         count;
+        // Note that this cannot be meaningfully copied.
     };
 
     void free_heap(const wp<IBinder>& binder);
 
-    Mutex mHeapCacheLock;
+    Mutex mHeapCacheLock;  // Protects entire vector below.
     KeyedVector< wp<IBinder>, heap_info_t > mHeapCache;
+    // We do not use the copy-on-write capabilities of KeyedVector.
+    // TODO: Reimplemement based on standard C++ container?
 };
 
 static sp<HeapCache> gHeapCache = new HeapCache();
@@ -105,7 +108,7 @@
     void assertMapped() const;
     void assertReallyMapped() const;
 
-    mutable volatile int32_t mHeapId;
+    mutable std::atomic<int32_t> mHeapId;
     mutable void*       mBase;
     mutable size_t      mSize;
     mutable uint32_t    mFlags;
@@ -248,8 +251,9 @@
 }
 
 BpMemoryHeap::~BpMemoryHeap() {
-    if (mHeapId != -1) {
-        close(mHeapId);
+    int32_t heapId = mHeapId.load(memory_order_relaxed);
+    if (heapId != -1) {
+        close(heapId);
         if (mRealHeap) {
             // by construction we're the last one
             if (mBase != MAP_FAILED) {
@@ -257,7 +261,7 @@
 
                 if (VERBOSE) {
                     ALOGD("UNMAPPING binder=%p, heap=%p, size=%zu, fd=%d",
-                            binder.get(), this, mSize, mHeapId);
+                            binder.get(), this, mSize, heapId);
                     CallStack stack(LOG_TAG);
                 }
 
@@ -273,17 +277,21 @@
 
 void BpMemoryHeap::assertMapped() const
 {
-    if (mHeapId == -1) {
+    int32_t heapId = mHeapId.load(memory_order_acquire);
+    if (heapId == -1) {
         sp<IBinder> binder(IInterface::asBinder(const_cast<BpMemoryHeap*>(this)));
         sp<BpMemoryHeap> heap(static_cast<BpMemoryHeap*>(find_heap(binder).get()));
         heap->assertReallyMapped();
         if (heap->mBase != MAP_FAILED) {
             Mutex::Autolock _l(mLock);
-            if (mHeapId == -1) {
+            if (mHeapId.load(memory_order_relaxed) == -1) {
                 mBase   = heap->mBase;
                 mSize   = heap->mSize;
                 mOffset = heap->mOffset;
-                android_atomic_write( dup( heap->mHeapId ), &mHeapId );
+                int fd = dup(heap->mHeapId.load(memory_order_relaxed));
+                ALOGE_IF(fd==-1, "cannot dup fd=%d",
+                        heap->mHeapId.load(memory_order_relaxed));
+                mHeapId.store(fd, memory_order_release);
             }
         } else {
             // something went wrong
@@ -294,7 +302,8 @@
 
 void BpMemoryHeap::assertReallyMapped() const
 {
-    if (mHeapId == -1) {
+    int32_t heapId = mHeapId.load(memory_order_acquire);
+    if (heapId == -1) {
 
         // remote call without mLock held, worse case scenario, we end up
         // calling transact() from multiple threads, but that's not a problem,
@@ -313,7 +322,7 @@
                 parcel_fd, size, err, strerror(-err));
 
         Mutex::Autolock _l(mLock);
-        if (mHeapId == -1) {
+        if (mHeapId.load(memory_order_relaxed) == -1) {
             int fd = dup( parcel_fd );
             ALOGE_IF(fd==-1, "cannot dup fd=%d, size=%zd, err=%d (%s)",
                     parcel_fd, size, err, strerror(errno));
@@ -322,7 +331,6 @@
             if (!(flags & READ_ONLY)) {
                 access |= PROT_WRITE;
             }
-
             mRealHeap = true;
             mBase = mmap(0, size, access, MAP_SHARED, fd, offset);
             if (mBase == MAP_FAILED) {
@@ -333,7 +341,7 @@
                 mSize = size;
                 mFlags = flags;
                 mOffset = offset;
-                android_atomic_write(fd, &mHeapId);
+                mHeapId.store(fd, memory_order_release);
             }
         }
     }
@@ -341,7 +349,8 @@
 
 int BpMemoryHeap::getHeapID() const {
     assertMapped();
-    return mHeapId;
+    // We either stored mHeapId ourselves, or loaded it with acquire semantics.
+    return mHeapId.load(memory_order_relaxed);
 }
 
 void* BpMemoryHeap::getBase() const {
@@ -418,9 +427,10 @@
                 "found binder=%p, heap=%p, size=%zu, fd=%d, count=%d",
                 binder.get(), info.heap.get(),
                 static_cast<BpMemoryHeap*>(info.heap.get())->mSize,
-                static_cast<BpMemoryHeap*>(info.heap.get())->mHeapId,
+                static_cast<BpMemoryHeap*>(info.heap.get())
+                    ->mHeapId.load(memory_order_relaxed),
                 info.count);
-        android_atomic_inc(&info.count);
+        ++info.count;
         return info.heap;
     } else {
         heap_info_t info;
@@ -445,13 +455,13 @@
         ssize_t i = mHeapCache.indexOfKey(binder);
         if (i>=0) {
             heap_info_t& info(mHeapCache.editValueAt(i));
-            int32_t c = android_atomic_dec(&info.count);
-            if (c == 1) {
+            if (--info.count == 0) {
                 ALOGD_IF(VERBOSE,
                         "removing binder=%p, heap=%p, size=%zu, fd=%d, count=%d",
                         binder.unsafe_get(), info.heap.get(),
                         static_cast<BpMemoryHeap*>(info.heap.get())->mSize,
-                        static_cast<BpMemoryHeap*>(info.heap.get())->mHeapId,
+                        static_cast<BpMemoryHeap*>(info.heap.get())
+                            ->mHeapId.load(memory_order_relaxed),
                         info.count);
                 rel = mHeapCache.valueAt(i).heap;
                 mHeapCache.removeItemsAt(i);
@@ -482,7 +492,7 @@
         ALOGD("hey=%p, heap=%p, count=%d, (fd=%d, base=%p, size=%zu)",
                 mHeapCache.keyAt(i).unsafe_get(),
                 info.heap.get(), info.count,
-                h->mHeapId, h->mBase, h->mSize);
+                h->mHeapId.load(memory_order_relaxed), h->mBase, h->mSize);
     }
 }