1 // SPDX-License-Identifier: GPL-2.0-only
4 #include <linux/memblock.h>
5 #include <linux/spinlock.h>
6 #include <linux/crash_dump.h>
8 #include <asm/unaccepted_memory.h>
10 /* Protects unaccepted memory bitmap and accepting_list */
11 static DEFINE_SPINLOCK(unaccepted_memory_lock);
14 struct list_head list;
19 static LIST_HEAD(accepting_list);
22 * accept_memory() -- Consult bitmap and accept the memory if needed.
24 * Only memory that is explicitly marked as unaccepted in the bitmap requires
25 * an action. All the remaining memory is implicitly accepted and doesn't need
29 * - anything if the system has no unaccepted table;
30 * - memory that is below phys_base;
31 * - memory that is above the memory that addressable by the bitmap;
33 void accept_memory(phys_addr_t start, phys_addr_t end)
35 struct efi_unaccepted_memory *unaccepted;
36 unsigned long range_start, range_end;
37 struct accept_range range, *entry;
41 unaccepted = efi_get_unaccepted_table();
45 unit_size = unaccepted->unit_size;
48 * Only care for the part of the range that is represented
51 if (start < unaccepted->phys_base)
52 start = unaccepted->phys_base;
53 if (end < unaccepted->phys_base)
56 /* Translate to offsets from the beginning of the bitmap */
57 start -= unaccepted->phys_base;
58 end -= unaccepted->phys_base;
61 * load_unaligned_zeropad() can lead to unwanted loads across page
62 * boundaries. The unwanted loads are typically harmless. But, they
63 * might be made to totally unrelated or even unmapped memory.
64 * load_unaligned_zeropad() relies on exception fixup (#PF, #GP and now
65 * #VE) to recover from these unwanted loads.
67 * But, this approach does not work for unaccepted memory. For TDX, a
68 * load from unaccepted memory will not lead to a recoverable exception
69 * within the guest. The guest will exit to the VMM where the only
70 * recourse is to terminate the guest.
72 * There are two parts to fix this issue and comprehensively avoid
73 * access to unaccepted memory. Together these ensure that an extra
74 * "guard" page is accepted in addition to the memory that needs to be
77 * 1. Implicitly extend the range_contains_unaccepted_memory(start, end)
78 * checks up to end+unit_size if 'end' is aligned on a unit_size
81 * 2. Implicitly extend accept_memory(start, end) to end+unit_size if
82 * 'end' is aligned on a unit_size boundary. (immediately following
85 if (!(end % unit_size))
88 /* Make sure not to overrun the bitmap */
89 if (end > unaccepted->size * unit_size * BITS_PER_BYTE)
90 end = unaccepted->size * unit_size * BITS_PER_BYTE;
92 range.start = start / unit_size;
93 range.end = DIV_ROUND_UP(end, unit_size);
95 spin_lock_irqsave(&unaccepted_memory_lock, flags);
98 * Check if anybody works on accepting the same range of the memory.
100 * The check is done with unit_size granularity. It is crucial to catch
101 * all accept requests to the same unit_size block, even if they don't
102 * overlap on physical address level.
104 list_for_each_entry(entry, &accepting_list, list) {
105 if (entry->end <= range.start)
107 if (entry->start >= range.end)
111 * Somebody else accepting the range. Or at least part of it.
113 * Drop the lock and retry until it is complete.
115 spin_unlock_irqrestore(&unaccepted_memory_lock, flags);
120 * Register that the range is about to be accepted.
121 * Make sure nobody else will accept it.
123 list_add(&range.list, &accepting_list);
125 range_start = range.start;
126 for_each_set_bitrange_from(range_start, range_end, unaccepted->bitmap,
128 unsigned long phys_start, phys_end;
129 unsigned long len = range_end - range_start;
131 phys_start = range_start * unit_size + unaccepted->phys_base;
132 phys_end = range_end * unit_size + unaccepted->phys_base;
135 * Keep interrupts disabled until the accept operation is
136 * complete in order to prevent deadlocks.
138 * Enabling interrupts before calling arch_accept_memory()
139 * creates an opportunity for an interrupt handler to request
140 * acceptance for the same memory. The handler will continuously
141 * spin with interrupts disabled, preventing other task from
142 * making progress with the acceptance process.
144 spin_unlock(&unaccepted_memory_lock);
146 arch_accept_memory(phys_start, phys_end);
148 spin_lock(&unaccepted_memory_lock);
149 bitmap_clear(unaccepted->bitmap, range_start, len);
152 list_del(&range.list);
154 touch_softlockup_watchdog();
156 spin_unlock_irqrestore(&unaccepted_memory_lock, flags);
159 bool range_contains_unaccepted_memory(phys_addr_t start, phys_addr_t end)
161 struct efi_unaccepted_memory *unaccepted;
166 unaccepted = efi_get_unaccepted_table();
170 unit_size = unaccepted->unit_size;
173 * Only care for the part of the range that is represented
176 if (start < unaccepted->phys_base)
177 start = unaccepted->phys_base;
178 if (end < unaccepted->phys_base)
181 /* Translate to offsets from the beginning of the bitmap */
182 start -= unaccepted->phys_base;
183 end -= unaccepted->phys_base;
186 * Also consider the unaccepted state of the *next* page. See fix #1 in
187 * the comment on load_unaligned_zeropad() in accept_memory().
189 if (!(end % unit_size))
192 /* Make sure not to overrun the bitmap */
193 if (end > unaccepted->size * unit_size * BITS_PER_BYTE)
194 end = unaccepted->size * unit_size * BITS_PER_BYTE;
196 spin_lock_irqsave(&unaccepted_memory_lock, flags);
197 while (start < end) {
198 if (test_bit(start / unit_size, unaccepted->bitmap)) {
205 spin_unlock_irqrestore(&unaccepted_memory_lock, flags);
210 #ifdef CONFIG_PROC_VMCORE
211 static bool unaccepted_memory_vmcore_pfn_is_ram(struct vmcore_cb *cb,
214 return !pfn_is_unaccepted_memory(pfn);
217 static struct vmcore_cb vmcore_cb = {
218 .pfn_is_ram = unaccepted_memory_vmcore_pfn_is_ram,
221 static int __init unaccepted_memory_init_kdump(void)
223 register_vmcore_cb(&vmcore_cb);
226 core_initcall(unaccepted_memory_init_kdump);
227 #endif /* CONFIG_PROC_VMCORE */