]>
Commit | Line | Data |
---|---|---|
d08b9f0c ST |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* | |
3 | * Shadow Call Stack support. | |
4 | * | |
5 | * Copyright (C) 2019 Google LLC | |
6 | */ | |
7 | ||
a2abe7cb | 8 | #include <linux/cpuhotplug.h> |
d08b9f0c | 9 | #include <linux/kasan.h> |
628d06a4 | 10 | #include <linux/mm.h> |
d08b9f0c | 11 | #include <linux/scs.h> |
a2abe7cb | 12 | #include <linux/vmalloc.h> |
628d06a4 | 13 | #include <linux/vmstat.h> |
d08b9f0c | 14 | |
9beccca0 AB |
15 | #ifdef CONFIG_DYNAMIC_SCS |
16 | DEFINE_STATIC_KEY_FALSE(dynamic_scs_enabled); | |
17 | #endif | |
18 | ||
bee348fa WD |
19 | static void __scs_account(void *s, int account) |
20 | { | |
a2abe7cb | 21 | struct page *scs_page = vmalloc_to_page(s); |
bee348fa | 22 | |
991e7673 | 23 | mod_node_page_state(page_pgdat(scs_page), NR_KERNEL_SCS_KB, |
bee348fa WD |
24 | account * (SCS_SIZE / SZ_1K)); |
25 | } | |
26 | ||
a2abe7cb ST |
27 | /* Matches NR_CACHED_STACKS for VMAP_STACK */ |
28 | #define NR_CACHED_SCS 2 | |
29 | static DEFINE_PER_CPU(void *, scs_cache[NR_CACHED_SCS]); | |
30 | ||
31 | static void *__scs_alloc(int node) | |
d08b9f0c | 32 | { |
a2abe7cb ST |
33 | int i; |
34 | void *s; | |
35 | ||
36 | for (i = 0; i < NR_CACHED_SCS; i++) { | |
37 | s = this_cpu_xchg(scs_cache[i], NULL); | |
38 | if (s) { | |
f6e39794 AK |
39 | s = kasan_unpoison_vmalloc(s, SCS_SIZE, |
40 | KASAN_VMALLOC_PROT_NORMAL); | |
a2abe7cb | 41 | memset(s, 0, SCS_SIZE); |
f6e39794 | 42 | goto out; |
a2abe7cb ST |
43 | } |
44 | } | |
45 | ||
f6e39794 | 46 | s = __vmalloc_node_range(SCS_SIZE, 1, VMALLOC_START, VMALLOC_END, |
a2abe7cb ST |
47 | GFP_SCS, PAGE_KERNEL, 0, node, |
48 | __builtin_return_address(0)); | |
f6e39794 AK |
49 | |
50 | out: | |
51 | return kasan_reset_tag(s); | |
a2abe7cb | 52 | } |
bee348fa | 53 | |
a2abe7cb ST |
54 | void *scs_alloc(int node) |
55 | { | |
56 | void *s; | |
57 | ||
58 | s = __scs_alloc(node); | |
bee348fa WD |
59 | if (!s) |
60 | return NULL; | |
d08b9f0c | 61 | |
bee348fa WD |
62 | *__scs_magic(s) = SCS_END_MAGIC; |
63 | ||
64 | /* | |
65 | * Poison the allocation to catch unintentional accesses to | |
66 | * the shadow stack when KASAN is enabled. | |
67 | */ | |
a2abe7cb | 68 | kasan_poison_vmalloc(s, SCS_SIZE); |
bee348fa | 69 | __scs_account(s, 1); |
d08b9f0c ST |
70 | return s; |
71 | } | |
72 | ||
a2abe7cb | 73 | void scs_free(void *s) |
d08b9f0c | 74 | { |
a2abe7cb ST |
75 | int i; |
76 | ||
bee348fa | 77 | __scs_account(s, -1); |
a2abe7cb ST |
78 | |
79 | /* | |
80 | * We cannot sleep as this can be called in interrupt context, | |
81 | * so use this_cpu_cmpxchg to update the cache, and vfree_atomic | |
82 | * to free the stack. | |
83 | */ | |
84 | ||
85 | for (i = 0; i < NR_CACHED_SCS; i++) | |
86 | if (this_cpu_cmpxchg(scs_cache[i], 0, s) == NULL) | |
87 | return; | |
88 | ||
f6e39794 | 89 | kasan_unpoison_vmalloc(s, SCS_SIZE, KASAN_VMALLOC_PROT_NORMAL); |
a2abe7cb ST |
90 | vfree_atomic(s); |
91 | } | |
92 | ||
93 | static int scs_cleanup(unsigned int cpu) | |
94 | { | |
95 | int i; | |
96 | void **cache = per_cpu_ptr(scs_cache, cpu); | |
97 | ||
98 | for (i = 0; i < NR_CACHED_SCS; i++) { | |
99 | vfree(cache[i]); | |
100 | cache[i] = NULL; | |
101 | } | |
102 | ||
103 | return 0; | |
d08b9f0c ST |
104 | } |
105 | ||
106 | void __init scs_init(void) | |
107 | { | |
9beccca0 AB |
108 | if (!scs_is_enabled()) |
109 | return; | |
a2abe7cb ST |
110 | cpuhp_setup_state(CPUHP_BP_PREPARE_DYN, "scs:scs_cache", NULL, |
111 | scs_cleanup); | |
d08b9f0c ST |
112 | } |
113 | ||
114 | int scs_prepare(struct task_struct *tsk, int node) | |
115 | { | |
9beccca0 | 116 | void *s; |
d08b9f0c | 117 | |
9beccca0 AB |
118 | if (!scs_is_enabled()) |
119 | return 0; | |
120 | ||
121 | s = scs_alloc(node); | |
d08b9f0c ST |
122 | if (!s) |
123 | return -ENOMEM; | |
124 | ||
51189c7a | 125 | task_scs(tsk) = task_scs_sp(tsk) = s; |
d08b9f0c ST |
126 | return 0; |
127 | } | |
128 | ||
5bbaf9d1 ST |
129 | static void scs_check_usage(struct task_struct *tsk) |
130 | { | |
131 | static unsigned long highest; | |
132 | ||
133 | unsigned long *p, prev, curr = highest, used = 0; | |
134 | ||
135 | if (!IS_ENABLED(CONFIG_DEBUG_STACK_USAGE)) | |
136 | return; | |
137 | ||
138 | for (p = task_scs(tsk); p < __scs_magic(tsk); ++p) { | |
139 | if (!READ_ONCE_NOCHECK(*p)) | |
140 | break; | |
333ed746 | 141 | used += sizeof(*p); |
5bbaf9d1 ST |
142 | } |
143 | ||
144 | while (used > curr) { | |
145 | prev = cmpxchg_relaxed(&highest, curr, used); | |
146 | ||
147 | if (prev == curr) { | |
148 | pr_info("%s (%d): highest shadow stack usage: %lu bytes\n", | |
149 | tsk->comm, task_pid_nr(tsk), used); | |
150 | break; | |
151 | } | |
152 | ||
153 | curr = prev; | |
154 | } | |
155 | } | |
156 | ||
d08b9f0c ST |
157 | void scs_release(struct task_struct *tsk) |
158 | { | |
159 | void *s = task_scs(tsk); | |
160 | ||
9beccca0 | 161 | if (!scs_is_enabled() || !s) |
d08b9f0c ST |
162 | return; |
163 | ||
88485be5 WD |
164 | WARN(task_scs_end_corrupted(tsk), |
165 | "corrupted shadow stack detected when freeing task\n"); | |
5bbaf9d1 | 166 | scs_check_usage(tsk); |
d08b9f0c ST |
167 | scs_free(s); |
168 | } |