Merge "Encryptedstore IO throughput" into main
diff --git a/tests/aidl/com/android/microdroid/testservice/IBenchmarkService.aidl b/tests/aidl/com/android/microdroid/testservice/IBenchmarkService.aidl
index 4d043f6..ca752e1 100644
--- a/tests/aidl/com/android/microdroid/testservice/IBenchmarkService.aidl
+++ b/tests/aidl/com/android/microdroid/testservice/IBenchmarkService.aidl
@@ -27,6 +27,13 @@
      */
     double measureReadRate(String filename, boolean isRand);
 
+    /**
+     * Measures the write rate for writing the given file, creating the file if necessary
+     *
+     * @return The write rate in MB/s.
+     */
+    double measureWriteRate(String filename, long size_bytes);
+
     /** Returns an entry from /proc/meminfo. */
     long getMemInfoEntry(String name);
 
diff --git a/tests/benchmark/src/java/com/android/microdroid/benchmark/MicrodroidBenchmarks.java b/tests/benchmark/src/java/com/android/microdroid/benchmark/MicrodroidBenchmarks.java
index 1d827b9..915fc82 100644
--- a/tests/benchmark/src/java/com/android/microdroid/benchmark/MicrodroidBenchmarks.java
+++ b/tests/benchmark/src/java/com/android/microdroid/benchmark/MicrodroidBenchmarks.java
@@ -101,6 +101,7 @@
     private static final double NANO_TO_MICRO = 1_000.0;
     private static final String MICRODROID_IMG_PREFIX = "microdroid_";
     private static final String MICRODROID_IMG_SUFFIX = ".img";
+    static final long ENCRYPTED_STORE_SIZE = 1_073_741_824; // 1G
 
     @Parameterized.Parameters(name = "protectedVm={0},os={1}")
     public static Collection<Object[]> params() {
@@ -1097,4 +1098,58 @@
                 "latency/writeRollbackProtectedSecretWithRefreshSession",
                 "us");
     }
+
+    @Test
+    public void encryptedstoreIoRate() throws Exception {
+        VirtualMachineConfig config =
+                newVmConfigBuilderWithPayloadConfig("assets/vm_config_io.json")
+                        .setDebugLevel(DEBUG_LEVEL_NONE)
+                        .setShouldUseHugepages(true)
+                        .setEncryptedStorageBytes(ENCRYPTED_STORE_SIZE)
+                        .build();
+        List<Double> writeThroughput = new ArrayList<>(IO_TEST_TRIAL_COUNT);
+        List<Double> readThroughput = new ArrayList<>(IO_TEST_TRIAL_COUNT);
+
+        for (int i = 0; i < IO_TEST_TRIAL_COUNT; ++i) {
+            String vmName = "vm_encryptedstore_io" + i;
+            VirtualMachine vm = forceCreateNewVirtualMachine(vmName, config);
+            BenchmarkVmListener.create(new EncryptedstoreBenchmarkListener(writeThroughput, true))
+                    .runToFinish(TAG, vm);
+            // Rerun the VM & read the storage!
+            BenchmarkVmListener.create(new EncryptedstoreBenchmarkListener(readThroughput, false))
+                    .runToFinish(TAG, vm);
+        }
+        reportMetrics(writeThroughput, "encryptedstore/sequential_write", "mb_per_sec");
+        reportMetrics(readThroughput, "encryptedstore/sequential_read", "mb_per_sec");
+    }
+
+    private static class EncryptedstoreBenchmarkListener
+            implements BenchmarkVmListener.InnerListener {
+        private static final String FILENAME = "/mnt/encryptedstore/test_file";
+
+        private final List<Double> mIoThroughput;
+        // Set to true iff write is to be measured, read throughput is measured otherwise
+        private final boolean mMeasureWrite;
+
+        EncryptedstoreBenchmarkListener(List<Double> ioThroughput, boolean measureWrite) {
+            mIoThroughput = ioThroughput;
+            mMeasureWrite = measureWrite;
+        }
+
+        @Override
+        public void onPayloadReady(VirtualMachine vm, IBenchmarkService benchmarkService)
+                throws RemoteException {
+            double rate;
+            if (mMeasureWrite) {
+                // Fill 3/4 of the storage by writing (random) data into a file!
+                rate =
+                        benchmarkService.measureWriteRate(
+                                FILENAME, /*sizeBytes */ (ENCRYPTED_STORE_SIZE * 3) / 4);
+            } else {
+                // Sequentially read the file, just written.
+                rate = benchmarkService.measureReadRate(FILENAME, /*isRand */ false);
+            }
+            mIoThroughput.add(rate);
+        }
+    }
 }
diff --git a/tests/benchmark/src/native/benchmarkbinary.cpp b/tests/benchmark/src/native/benchmarkbinary.cpp
index 5d93b93..5e46712 100644
--- a/tests/benchmark/src/native/benchmarkbinary.cpp
+++ b/tests/benchmark/src/native/benchmarkbinary.cpp
@@ -67,6 +67,15 @@
         return resultStatus(res);
     }
 
+    ndk::ScopedAStatus measureWriteRate(const std::string& filename, int64_t size_bytes,
+                                        double* out) override {
+        auto res = measure_write_rate(filename, size_bytes);
+        if (res.ok()) {
+            *out = res.value();
+        }
+        return resultStatus(res);
+    }
+
     ndk::ScopedAStatus getMemInfoEntry(const std::string& name, int64_t* out) override {
         auto value = read_meminfo_entry(name);
         if (!value.ok()) {
@@ -144,6 +153,49 @@
         return {file_size_mb / elapsed_seconds};
     }
 
+    /**
+     * Measures the throughput of writing random data to the given file.
+     * @return The write rate in MB/s.
+     */
+    Result<double> measure_write_rate(const std::string& filename, int64_t size_bytes) {
+        struct stat file_stats;
+        const int64_t block_count = size_bytes / kBlockSizeBytes;
+        char buf[kBlockSizeBytes];
+        int fd_rand = open("/dev/urandom", O_RDONLY);
+        read(fd_rand, buf, kBlockSizeBytes);
+
+        struct timespec start;
+        if (clock_gettime(CLOCK_MONOTONIC, &start) == -1) {
+            return ErrnoError() << "failed to clock_gettime";
+        }
+        // TODO(b/390648694): Ideally open with O_SYNC instead of syncfs().
+        unique_fd fd(open(filename.c_str(), O_CREAT | O_WRONLY, 00666));
+        if (fd.get() == -1) {
+            return ErrnoError() << "Write: opening " << filename << " failed";
+        }
+        if (stat(filename.c_str(), &file_stats) == -1) {
+            return Error() << "failed to get file stats";
+        }
+
+        for (auto i = 0; i < block_count; ++i) {
+            auto bytes = write(fd, buf, kBlockSizeBytes);
+            if (bytes == 0) {
+                return Error() << "unexpected end of file";
+            } else if (bytes == -1) {
+                return ErrnoError() << "failed to write";
+            }
+        }
+        syncfs(fd);
+        struct timespec finish;
+        if (clock_gettime(CLOCK_MONOTONIC, &finish) == -1) {
+            return ErrnoError() << "failed to clock_gettime";
+        }
+        double elapsed_seconds =
+                finish.tv_sec - start.tv_sec + (finish.tv_nsec - start.tv_nsec) / 1e9;
+        double file_size_mb = (double)size_bytes / kNumBytesPerMB;
+        return {file_size_mb / elapsed_seconds};
+    }
+
     void* alloc_anon_memory(long mb) {
         long bytes = mb << 20;
         void* p = malloc(bytes);