riscv64: fix debuggerd_test build.

This adds the missing assembler for riscv64, even though I don't have a
working tombstoned yet to test it with. There's a distinct possibility
we'll be back to fix the test (because although "register 1" is harmless
for the other architectures, it's the ra register on riscv64; the default
link register), but at least this lets us build the test.

I've also simplified all the assembly to be the simplest sequence I
know that writes 0 to address 0 (because if there was a reason to use
so many instructions before, I want to know what it is so I can write
the missing comment!).

Test: treehugger
Change-Id: I10d117eaedf361d9759a450e0973d07c4f97090e
diff --git a/debuggerd/debuggerd_test.cpp b/debuggerd/debuggerd_test.cpp
index 517f2df..a00a202 100644
--- a/debuggerd/debuggerd_test.cpp
+++ b/debuggerd/debuggerd_test.cpp
@@ -2437,35 +2437,42 @@
 #if defined(__arm__)
     asm volatile(
         "mov r1, %[base]\n"
-        "mov r2, 0\n"
-        "str r3, [r2]\n"
+        "mov r2, #0\n"
+        "str r2, [r2]\n"
         : [base] "+r"(ptr)
         :
-        : "r1", "r2", "r3", "memory");
+        : "r1", "r2", "memory");
 #elif defined(__aarch64__)
     asm volatile(
         "mov x1, %[base]\n"
-        "mov x2, 0\n"
-        "str x3, [x2]\n"
+        "mov x2, #0\n"
+        "str xzr, [x2]\n"
         : [base] "+r"(ptr)
         :
-        : "x1", "x2", "x3", "memory");
+        : "x1", "x2", "memory");
+#elif defined(__riscv)
+    // TODO: x1 is ra (the link register) on riscv64, so this might have
+    // unintended consequences, but we'll need to change the .cfi_escape if so.
+    asm volatile(
+        "mv x1, %[base]\n"
+        "sw zero, 0(zero)\n"
+        : [base] "+r"(ptr)
+        :
+        : "x1", "memory");
 #elif defined(__i386__)
     asm volatile(
         "mov %[base], %%ecx\n"
-        "movl $0, %%edi\n"
-        "movl 0(%%edi), %%edx\n"
+        "movl $0, 0\n"
         : [base] "+r"(ptr)
         :
-        : "edi", "ecx", "edx", "memory");
+        : "ecx", "memory");
 #elif defined(__x86_64__)
     asm volatile(
         "mov %[base], %%rdx\n"
-        "movq 0, %%rdi\n"
-        "movq 0(%%rdi), %%rcx\n"
+        "movq $0, 0\n"
         : [base] "+r"(ptr)
         :
-        : "rcx", "rdx", "rdi", "memory");
+        : "rdx", "memory");
 #else
 #error "Unsupported architecture"
 #endif