]> Git Repo - linux.git/blob - drivers/firmware/efi/unaccepted_memory.c
Linux 6.14-rc3
[linux.git] / drivers / firmware / efi / unaccepted_memory.c
1 // SPDX-License-Identifier: GPL-2.0-only
2
3 #include <linux/efi.h>
4 #include <linux/memblock.h>
5 #include <linux/spinlock.h>
6 #include <linux/crash_dump.h>
7 #include <linux/nmi.h>
8 #include <asm/unaccepted_memory.h>
9
10 /* Protects unaccepted memory bitmap and accepting_list */
11 static DEFINE_SPINLOCK(unaccepted_memory_lock);
12
13 struct accept_range {
14         struct list_head list;
15         unsigned long start;
16         unsigned long end;
17 };
18
19 static LIST_HEAD(accepting_list);
20
21 /*
22  * accept_memory() -- Consult bitmap and accept the memory if needed.
23  *
24  * Only memory that is explicitly marked as unaccepted in the bitmap requires
25  * an action. All the remaining memory is implicitly accepted and doesn't need
26  * acceptance.
27  *
28  * No need to accept:
29  *  - anything if the system has no unaccepted table;
30  *  - memory that is below phys_base;
31  *  - memory that is above the memory that addressable by the bitmap;
32  */
33 void accept_memory(phys_addr_t start, unsigned long size)
34 {
35         struct efi_unaccepted_memory *unaccepted;
36         unsigned long range_start, range_end;
37         struct accept_range range, *entry;
38         phys_addr_t end = start + size;
39         unsigned long flags;
40         u64 unit_size;
41
42         unaccepted = efi_get_unaccepted_table();
43         if (!unaccepted)
44                 return;
45
46         unit_size = unaccepted->unit_size;
47
48         /*
49          * Only care for the part of the range that is represented
50          * in the bitmap.
51          */
52         if (start < unaccepted->phys_base)
53                 start = unaccepted->phys_base;
54         if (end < unaccepted->phys_base)
55                 return;
56
57         /* Translate to offsets from the beginning of the bitmap */
58         start -= unaccepted->phys_base;
59         end -= unaccepted->phys_base;
60
61         /*
62          * load_unaligned_zeropad() can lead to unwanted loads across page
63          * boundaries. The unwanted loads are typically harmless. But, they
64          * might be made to totally unrelated or even unmapped memory.
65          * load_unaligned_zeropad() relies on exception fixup (#PF, #GP and now
66          * #VE) to recover from these unwanted loads.
67          *
68          * But, this approach does not work for unaccepted memory. For TDX, a
69          * load from unaccepted memory will not lead to a recoverable exception
70          * within the guest. The guest will exit to the VMM where the only
71          * recourse is to terminate the guest.
72          *
73          * There are two parts to fix this issue and comprehensively avoid
74          * access to unaccepted memory. Together these ensure that an extra
75          * "guard" page is accepted in addition to the memory that needs to be
76          * used:
77          *
78          * 1. Implicitly extend the range_contains_unaccepted_memory(start, size)
79          *    checks up to the next unit_size if 'start+size' is aligned on a
80          *    unit_size boundary.
81          *
82          * 2. Implicitly extend accept_memory(start, size) to the next unit_size
83          *    if 'size+end' is aligned on a unit_size boundary. (immediately
84          *    following this comment)
85          */
86         if (!(end % unit_size))
87                 end += unit_size;
88
89         /* Make sure not to overrun the bitmap */
90         if (end > unaccepted->size * unit_size * BITS_PER_BYTE)
91                 end = unaccepted->size * unit_size * BITS_PER_BYTE;
92
93         range.start = start / unit_size;
94         range.end = DIV_ROUND_UP(end, unit_size);
95 retry:
96         spin_lock_irqsave(&unaccepted_memory_lock, flags);
97
98         /*
99          * Check if anybody works on accepting the same range of the memory.
100          *
101          * The check is done with unit_size granularity. It is crucial to catch
102          * all accept requests to the same unit_size block, even if they don't
103          * overlap on physical address level.
104          */
105         list_for_each_entry(entry, &accepting_list, list) {
106                 if (entry->end <= range.start)
107                         continue;
108                 if (entry->start >= range.end)
109                         continue;
110
111                 /*
112                  * Somebody else accepting the range. Or at least part of it.
113                  *
114                  * Drop the lock and retry until it is complete.
115                  */
116                 spin_unlock_irqrestore(&unaccepted_memory_lock, flags);
117                 goto retry;
118         }
119
120         /*
121          * Register that the range is about to be accepted.
122          * Make sure nobody else will accept it.
123          */
124         list_add(&range.list, &accepting_list);
125
126         range_start = range.start;
127         for_each_set_bitrange_from(range_start, range_end, unaccepted->bitmap,
128                                    range.end) {
129                 unsigned long phys_start, phys_end;
130                 unsigned long len = range_end - range_start;
131
132                 phys_start = range_start * unit_size + unaccepted->phys_base;
133                 phys_end = range_end * unit_size + unaccepted->phys_base;
134
135                 /*
136                  * Keep interrupts disabled until the accept operation is
137                  * complete in order to prevent deadlocks.
138                  *
139                  * Enabling interrupts before calling arch_accept_memory()
140                  * creates an opportunity for an interrupt handler to request
141                  * acceptance for the same memory. The handler will continuously
142                  * spin with interrupts disabled, preventing other task from
143                  * making progress with the acceptance process.
144                  */
145                 spin_unlock(&unaccepted_memory_lock);
146
147                 arch_accept_memory(phys_start, phys_end);
148
149                 spin_lock(&unaccepted_memory_lock);
150                 bitmap_clear(unaccepted->bitmap, range_start, len);
151         }
152
153         list_del(&range.list);
154
155         touch_softlockup_watchdog();
156
157         spin_unlock_irqrestore(&unaccepted_memory_lock, flags);
158 }
159
160 bool range_contains_unaccepted_memory(phys_addr_t start, unsigned long size)
161 {
162         struct efi_unaccepted_memory *unaccepted;
163         phys_addr_t end = start + size;
164         unsigned long flags;
165         bool ret = false;
166         u64 unit_size;
167
168         unaccepted = efi_get_unaccepted_table();
169         if (!unaccepted)
170                 return false;
171
172         unit_size = unaccepted->unit_size;
173
174         /*
175          * Only care for the part of the range that is represented
176          * in the bitmap.
177          */
178         if (start < unaccepted->phys_base)
179                 start = unaccepted->phys_base;
180         if (end < unaccepted->phys_base)
181                 return false;
182
183         /* Translate to offsets from the beginning of the bitmap */
184         start -= unaccepted->phys_base;
185         end -= unaccepted->phys_base;
186
187         /*
188          * Also consider the unaccepted state of the *next* page. See fix #1 in
189          * the comment on load_unaligned_zeropad() in accept_memory().
190          */
191         if (!(end % unit_size))
192                 end += unit_size;
193
194         /* Make sure not to overrun the bitmap */
195         if (end > unaccepted->size * unit_size * BITS_PER_BYTE)
196                 end = unaccepted->size * unit_size * BITS_PER_BYTE;
197
198         spin_lock_irqsave(&unaccepted_memory_lock, flags);
199         while (start < end) {
200                 if (test_bit(start / unit_size, unaccepted->bitmap)) {
201                         ret = true;
202                         break;
203                 }
204
205                 start += unit_size;
206         }
207         spin_unlock_irqrestore(&unaccepted_memory_lock, flags);
208
209         return ret;
210 }
211
212 #ifdef CONFIG_PROC_VMCORE
213 static bool unaccepted_memory_vmcore_pfn_is_ram(struct vmcore_cb *cb,
214                                                 unsigned long pfn)
215 {
216         return !pfn_is_unaccepted_memory(pfn);
217 }
218
219 static struct vmcore_cb vmcore_cb = {
220         .pfn_is_ram = unaccepted_memory_vmcore_pfn_is_ram,
221 };
222
223 static int __init unaccepted_memory_init_kdump(void)
224 {
225         register_vmcore_cb(&vmcore_cb);
226         return 0;
227 }
228 core_initcall(unaccepted_memory_init_kdump);
229 #endif /* CONFIG_PROC_VMCORE */
This page took 0.04377 seconds and 4 git commands to generate.