blob: 98b3d275f6746f659c00c4b1aa53c81d67772cbd [file] [log] [blame]
Peter Collingbourne6f1fd682020-01-29 16:27:31 -08001/*
2 * Copyright (C) 2020 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29#pragma once
30
31#include <sys/auxv.h>
Florian Mayera586ba72024-06-20 13:43:36 -070032#include <sys/mman.h>
Mitch Phillips4cded972021-01-07 17:32:00 -080033#include <sys/prctl.h>
34
Florian Mayera586ba72024-06-20 13:43:36 -070035#include "page.h"
36
Mitch Phillips4cded972021-01-07 17:32:00 -080037// Note: Most PR_MTE_* constants come from the upstream kernel. This tag mask
38// allows for the hardware to provision any nonzero tag. Zero tags are reserved
39// for scudo to use for the chunk headers in order to prevent linear heap
40// overflow/underflow.
41#define PR_MTE_TAG_SET_NONZERO (0xfffeUL << PR_MTE_TAG_SHIFT)
Peter Collingbourne6f1fd682020-01-29 16:27:31 -080042
Peter Collingbourne6f1fd682020-01-29 16:27:31 -080043inline bool mte_supported() {
Peter Collingbourne7e201172020-12-21 14:08:38 -080044#if defined(__aarch64__)
Peter Collingbourne6f1fd682020-01-29 16:27:31 -080045 static bool supported = getauxval(AT_HWCAP2) & HWCAP2_MTE;
46#else
47 static bool supported = false;
48#endif
49 return supported;
50}
Peter Collingbourne6f1fd682020-01-29 16:27:31 -080051
Peter Collingbourne6f1fd682020-01-29 16:27:31 -080052#ifdef __aarch64__
Peter Collingbournec8cef932020-02-14 19:19:32 -080053class ScopedDisableMTE {
54 size_t prev_tco_;
55
56 public:
57 ScopedDisableMTE() {
Peter Collingbourne6f1fd682020-01-29 16:27:31 -080058 if (mte_supported()) {
Peter Collingbournec8cef932020-02-14 19:19:32 -080059 __asm__ __volatile__(".arch_extension mte; mrs %0, tco; msr tco, #1" : "=r"(prev_tco_));
Peter Collingbourne6f1fd682020-01-29 16:27:31 -080060 }
Peter Collingbourne6f1fd682020-01-29 16:27:31 -080061 }
62
63 ~ScopedDisableMTE() {
Peter Collingbourne6f1fd682020-01-29 16:27:31 -080064 if (mte_supported()) {
Peter Collingbournec8cef932020-02-14 19:19:32 -080065 __asm__ __volatile__(".arch_extension mte; msr tco, %0" : : "r"(prev_tco_));
Peter Collingbourne6f1fd682020-01-29 16:27:31 -080066 }
Peter Collingbourne6f1fd682020-01-29 16:27:31 -080067 }
68};
Florian Mayera586ba72024-06-20 13:43:36 -070069
70// N.B. that this is NOT the pagesize, but 4096. This is hardcoded in the codegen.
71// See
72// https://github.com/search?q=repo%3Allvm/llvm-project%20AArch64StackTagging%3A%3AinsertBaseTaggedPointer&type=code
73constexpr size_t kStackMteRingbufferSizeMultiplier = 4096;
74
75inline size_t stack_mte_ringbuffer_size(uintptr_t size_cls) {
76 return kStackMteRingbufferSizeMultiplier * (1 << size_cls);
77}
78
79inline size_t stack_mte_ringbuffer_size_from_pointer(uintptr_t ptr) {
80 // The size in the top byte is not the size_cls, but the number of "pages" (not OS pages, but
81 // kStackMteRingbufferSizeMultiplier).
82 return kStackMteRingbufferSizeMultiplier * (ptr >> 56ULL);
83}
84
85inline uintptr_t stack_mte_ringbuffer_size_add_to_pointer(uintptr_t ptr, uintptr_t size_cls) {
86 return ptr | ((1ULL << size_cls) << 56ULL);
87}
88
89inline void* stack_mte_ringbuffer_allocate(size_t n, const char* name) {
90 if (n > 7) return nullptr;
91 // Allocation needs to be aligned to 2*size to make the fancy code-gen work.
92 // So we allocate 3*size - pagesz bytes, which will always contain size bytes
93 // aligned to 2*size, and unmap the unneeded part.
94 // See
95 // https://github.com/search?q=repo%3Allvm/llvm-project%20AArch64StackTagging%3A%3AinsertBaseTaggedPointer&type=code
96 //
97 // In the worst case, we get an allocation that is one page past the properly
98 // aligned address, in which case we have to unmap the previous
99 // 2*size - pagesz bytes. In that case, we still have size properly aligned
100 // bytes left.
101 size_t size = stack_mte_ringbuffer_size(n);
102 size_t pgsize = page_size();
103
104 size_t alloc_size = __BIONIC_ALIGN(3 * size - pgsize, pgsize);
105 void* allocation_ptr =
106 mmap(nullptr, alloc_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
107 if (allocation_ptr == MAP_FAILED)
108 return nullptr;
109 uintptr_t allocation = reinterpret_cast<uintptr_t>(allocation_ptr);
110
111 size_t alignment = 2 * size;
112 uintptr_t aligned_allocation = __BIONIC_ALIGN(allocation, alignment);
113 if (allocation != aligned_allocation) {
114 munmap(reinterpret_cast<void*>(allocation), aligned_allocation - allocation);
115 }
116 if (aligned_allocation + size != allocation + alloc_size) {
117 munmap(reinterpret_cast<void*>(aligned_allocation + size),
118 (allocation + alloc_size) - (aligned_allocation + size));
119 }
120
121 if (name) {
122 prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, reinterpret_cast<void*>(aligned_allocation), size, name);
123 }
124
125 // We store the size in the top byte of the pointer (which is ignored)
126 return reinterpret_cast<void*>(stack_mte_ringbuffer_size_add_to_pointer(aligned_allocation, n));
127}
Peter Collingbournec8cef932020-02-14 19:19:32 -0800128#else
129struct ScopedDisableMTE {
130 // Silence unused variable warnings in non-aarch64 builds.
131 ScopedDisableMTE() {}
132};
133#endif