blob: 610cb45d09139fc36b107e5c53f29a596b9c05e1 [file] [log] [blame]
Peter Collingbourne6f1fd682020-01-29 16:27:31 -08001/*
2 * Copyright (C) 2020 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29#pragma once
30
Florian Mayer4edc20d2024-10-30 14:24:26 -070031#include <stddef.h>
Peter Collingbourne6f1fd682020-01-29 16:27:31 -080032#include <sys/auxv.h>
Florian Mayera586ba72024-06-20 13:43:36 -070033#include <sys/mman.h>
Mitch Phillips4cded972021-01-07 17:32:00 -080034#include <sys/prctl.h>
35
Florian Mayera586ba72024-06-20 13:43:36 -070036#include "page.h"
37
Mitch Phillips4cded972021-01-07 17:32:00 -080038// Note: Most PR_MTE_* constants come from the upstream kernel. This tag mask
39// allows for the hardware to provision any nonzero tag. Zero tags are reserved
40// for scudo to use for the chunk headers in order to prevent linear heap
41// overflow/underflow.
42#define PR_MTE_TAG_SET_NONZERO (0xfffeUL << PR_MTE_TAG_SHIFT)
Peter Collingbourne6f1fd682020-01-29 16:27:31 -080043
Peter Collingbourne6f1fd682020-01-29 16:27:31 -080044inline bool mte_supported() {
Peter Collingbourne7e201172020-12-21 14:08:38 -080045#if defined(__aarch64__)
Peter Collingbourne6f1fd682020-01-29 16:27:31 -080046 static bool supported = getauxval(AT_HWCAP2) & HWCAP2_MTE;
47#else
48 static bool supported = false;
49#endif
50 return supported;
51}
Peter Collingbourne6f1fd682020-01-29 16:27:31 -080052
Florian Mayer4edc20d2024-10-30 14:24:26 -070053inline void* get_tagged_address(const void* ptr) {
54#if defined(__aarch64__)
55 if (mte_supported()) {
56 __asm__ __volatile__(".arch_extension mte; ldg %0, [%0]" : "+r"(ptr));
57 }
58#endif // aarch64
59 return const_cast<void*>(ptr);
60}
61
62// Inserts a random tag tag to `ptr`, using any of the set lower 16 bits in
63// `mask` to exclude the corresponding tag from being generated. Note: This does
64// not tag memory. This generates a pointer to be used with set_memory_tag.
65inline void* insert_random_tag(const void* ptr, __attribute__((unused)) uint64_t mask = 0) {
66#if defined(__aarch64__)
67 if (mte_supported() && ptr) {
68 __asm__ __volatile__(".arch_extension mte; irg %0, %0, %1" : "+r"(ptr) : "r"(mask));
69 }
70#endif // aarch64
71 return const_cast<void*>(ptr);
72}
73
74// Stores the address tag in `ptr` to memory, at `ptr`.
75inline void set_memory_tag(__attribute__((unused)) void* ptr) {
76#if defined(__aarch64__)
77 if (mte_supported()) {
78 __asm__ __volatile__(".arch_extension mte; stg %0, [%0]" : "+r"(ptr));
79 }
80#endif // aarch64
81}
82
Peter Collingbourne6f1fd682020-01-29 16:27:31 -080083#ifdef __aarch64__
Peter Collingbournec8cef932020-02-14 19:19:32 -080084class ScopedDisableMTE {
85 size_t prev_tco_;
86
87 public:
88 ScopedDisableMTE() {
Peter Collingbourne6f1fd682020-01-29 16:27:31 -080089 if (mte_supported()) {
Peter Collingbournec8cef932020-02-14 19:19:32 -080090 __asm__ __volatile__(".arch_extension mte; mrs %0, tco; msr tco, #1" : "=r"(prev_tco_));
Peter Collingbourne6f1fd682020-01-29 16:27:31 -080091 }
Peter Collingbourne6f1fd682020-01-29 16:27:31 -080092 }
93
94 ~ScopedDisableMTE() {
Peter Collingbourne6f1fd682020-01-29 16:27:31 -080095 if (mte_supported()) {
Peter Collingbournec8cef932020-02-14 19:19:32 -080096 __asm__ __volatile__(".arch_extension mte; msr tco, %0" : : "r"(prev_tco_));
Peter Collingbourne6f1fd682020-01-29 16:27:31 -080097 }
Peter Collingbourne6f1fd682020-01-29 16:27:31 -080098 }
99};
Florian Mayera586ba72024-06-20 13:43:36 -0700100
101// N.B. that this is NOT the pagesize, but 4096. This is hardcoded in the codegen.
102// See
103// https://github.com/search?q=repo%3Allvm/llvm-project%20AArch64StackTagging%3A%3AinsertBaseTaggedPointer&type=code
104constexpr size_t kStackMteRingbufferSizeMultiplier = 4096;
105
106inline size_t stack_mte_ringbuffer_size(uintptr_t size_cls) {
107 return kStackMteRingbufferSizeMultiplier * (1 << size_cls);
108}
109
110inline size_t stack_mte_ringbuffer_size_from_pointer(uintptr_t ptr) {
111 // The size in the top byte is not the size_cls, but the number of "pages" (not OS pages, but
112 // kStackMteRingbufferSizeMultiplier).
113 return kStackMteRingbufferSizeMultiplier * (ptr >> 56ULL);
114}
115
116inline uintptr_t stack_mte_ringbuffer_size_add_to_pointer(uintptr_t ptr, uintptr_t size_cls) {
117 return ptr | ((1ULL << size_cls) << 56ULL);
118}
119
Florian Mayer756e7652024-11-08 15:35:43 -0800120inline void stack_mte_free_ringbuffer(uintptr_t stack_mte_tls) {
121 size_t size = stack_mte_ringbuffer_size_from_pointer(stack_mte_tls);
122 void* ptr = reinterpret_cast<void*>(stack_mte_tls & ((1ULL << 56ULL) - 1ULL));
123 munmap(ptr, size);
124}
125
Florian Mayera586ba72024-06-20 13:43:36 -0700126inline void* stack_mte_ringbuffer_allocate(size_t n, const char* name) {
127 if (n > 7) return nullptr;
128 // Allocation needs to be aligned to 2*size to make the fancy code-gen work.
129 // So we allocate 3*size - pagesz bytes, which will always contain size bytes
130 // aligned to 2*size, and unmap the unneeded part.
131 // See
132 // https://github.com/search?q=repo%3Allvm/llvm-project%20AArch64StackTagging%3A%3AinsertBaseTaggedPointer&type=code
133 //
134 // In the worst case, we get an allocation that is one page past the properly
135 // aligned address, in which case we have to unmap the previous
136 // 2*size - pagesz bytes. In that case, we still have size properly aligned
137 // bytes left.
138 size_t size = stack_mte_ringbuffer_size(n);
139 size_t pgsize = page_size();
140
141 size_t alloc_size = __BIONIC_ALIGN(3 * size - pgsize, pgsize);
142 void* allocation_ptr =
143 mmap(nullptr, alloc_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
144 if (allocation_ptr == MAP_FAILED)
145 return nullptr;
146 uintptr_t allocation = reinterpret_cast<uintptr_t>(allocation_ptr);
147
148 size_t alignment = 2 * size;
149 uintptr_t aligned_allocation = __BIONIC_ALIGN(allocation, alignment);
150 if (allocation != aligned_allocation) {
151 munmap(reinterpret_cast<void*>(allocation), aligned_allocation - allocation);
152 }
153 if (aligned_allocation + size != allocation + alloc_size) {
154 munmap(reinterpret_cast<void*>(aligned_allocation + size),
155 (allocation + alloc_size) - (aligned_allocation + size));
156 }
157
158 if (name) {
159 prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, reinterpret_cast<void*>(aligned_allocation), size, name);
160 }
161
162 // We store the size in the top byte of the pointer (which is ignored)
163 return reinterpret_cast<void*>(stack_mte_ringbuffer_size_add_to_pointer(aligned_allocation, n));
164}
Peter Collingbournec8cef932020-02-14 19:19:32 -0800165#else
166struct ScopedDisableMTE {
167 // Silence unused variable warnings in non-aarch64 builds.
168 ScopedDisableMTE() {}
169};
170#endif