Make more use of benchmark::DoNotOptimize in benchmarks.

A lot of these benchmarks predate DoNotOptimize and rolled their own
hacks.

Bug: http://b/148307629
Test: ran benchmarks before & after and got similar results
Change-Id: If44699d261b687f6253af709edda58f4c90fb285
diff --git a/benchmarks/get_heap_size_benchmark.cpp b/benchmarks/get_heap_size_benchmark.cpp
index c3680dc..47d5b18 100644
--- a/benchmarks/get_heap_size_benchmark.cpp
+++ b/benchmarks/get_heap_size_benchmark.cpp
@@ -30,8 +30,6 @@
 #include <benchmark/benchmark.h>
 #include "util.h"
 
-static volatile size_t sink;
-
 static constexpr int NTHREADS = 5;
 
 static std::atomic<int> thread_count;
@@ -57,7 +55,7 @@
     sched_yield();
   }
   for (auto _ : state) {
-    sink = mallinfo().uordblks;
+    benchmark::DoNotOptimize(mallinfo().uordblks);
   }
   for (int i = 0; i < 5; i++) {
     int res = pthread_join(t[i], NULL);
diff --git a/benchmarks/stdio_benchmark.cpp b/benchmarks/stdio_benchmark.cpp
index 037bbd9..03f3f29 100644
--- a/benchmarks/stdio_benchmark.cpp
+++ b/benchmarks/stdio_benchmark.cpp
@@ -155,9 +155,8 @@
   while (state.KeepRunning()) {
     FILE* fp = fopen("/dev/zero", "re");
     if (no_locking) __fsetlocking(fp, FSETLOCKING_BYCALLER);
-    volatile int c __attribute__((unused));
     for (size_t i = 0; i < nbytes; ++i) {
-      c = fgetc(fp);
+      benchmark::DoNotOptimize(fgetc(fp));
     }
     fclose(fp);
   }
diff --git a/benchmarks/stdlib_benchmark.cpp b/benchmarks/stdlib_benchmark.cpp
index 45b953f..b6ea58d 100644
--- a/benchmarks/stdlib_benchmark.cpp
+++ b/benchmarks/stdlib_benchmark.cpp
@@ -189,9 +189,8 @@
         buf[l++] = i, buf[l++] = j, buf[l++] = 0x80, buf[l++] = k;
   buf[l++] = 0;
 
-  volatile size_t c __attribute__((unused)) = 0;
   for (auto _ : state) {
-    c = mbstowcs(widebuf_aligned, buf_aligned, 500000);
+    benchmark::DoNotOptimize(mbstowcs(widebuf_aligned, buf_aligned, 500000));
   }
 
   state.SetBytesProcessed(uint64_t(state.iterations()) * uint64_t(500000));
diff --git a/benchmarks/string_benchmark.cpp b/benchmarks/string_benchmark.cpp
index d176675..9be54c7 100644
--- a/benchmarks/string_benchmark.cpp
+++ b/benchmarks/string_benchmark.cpp
@@ -31,9 +31,8 @@
   char* src_aligned = GetAlignedPtrFilled(&src, src_alignment, nbytes, 'x');
   char* dst_aligned = GetAlignedPtrFilled(&dst, dst_alignment, nbytes, 'x');
 
-  volatile int c __attribute__((unused)) = 0;
   while (state.KeepRunning()) {
-    c += memcmp(dst_aligned, src_aligned, nbytes);
+    benchmark::DoNotOptimize(memcmp(dst_aligned, src_aligned, nbytes));
   }
 
   state.SetBytesProcessed(uint64_t(state.iterations()) * uint64_t(nbytes));
@@ -129,9 +128,8 @@
   char* buf_aligned = GetAlignedPtrFilled(&buf, alignment, nbytes + 1, 'x');
   buf_aligned[nbytes - 1] = '\0';
 
-  volatile int c __attribute__((unused)) = 0;
   while (state.KeepRunning()) {
-    c += strlen(buf_aligned);
+    benchmark::DoNotOptimize(strlen(buf_aligned));
   }
 
   state.SetBytesProcessed(uint64_t(state.iterations()) * uint64_t(nbytes));
@@ -239,9 +237,8 @@
   s1_aligned[nbytes - 1] = '\0';
   s2_aligned[nbytes - 1] = '\0';
 
-  volatile int c __attribute__((unused));
   while (state.KeepRunning()) {
-    c = strcmp(s1_aligned, s2_aligned);
+    benchmark::DoNotOptimize(strcmp(s1_aligned, s2_aligned));
   }
 
   state.SetBytesProcessed(uint64_t(state.iterations()) * uint64_t(nbytes));
@@ -258,9 +255,8 @@
   char* s1_aligned = GetAlignedPtrFilled(&s1, s1_alignment, nbytes, 'x');
   char* s2_aligned = GetAlignedPtrFilled(&s2, s2_alignment, nbytes, 'x');
 
-  volatile int c __attribute__((unused));
   for (auto _ : state) {
-    c = strncmp(s1_aligned, s2_aligned, nbytes);
+    benchmark::DoNotOptimize(strncmp(s1_aligned, s2_aligned, nbytes));
   }
 
   state.SetBytesProcessed(uint64_t(state.iterations()) * uint64_t(nbytes));