1 // SPDX-License-Identifier: GPL-2.0-only
3 * handle transition of Linux booting another kernel
8 #include <linux/kexec.h>
9 #include <linux/delay.h>
10 #include <linux/numa.h>
11 #include <linux/ftrace.h>
12 #include <linux/suspend.h>
13 #include <linux/gfp.h>
16 #include <asm/pgalloc.h>
17 #include <asm/tlbflush.h>
18 #include <asm/mmu_context.h>
20 #include <asm/io_apic.h>
21 #include <asm/cpufeature.h>
23 #include <asm/set_memory.h>
24 #include <asm/debugreg.h>
26 static void set_gdt(void *newgdt, __u16 limit)
28 struct desc_ptr curgdt;
30 /* ia32 supports unaligned loads & stores */
32 curgdt.address = (unsigned long)newgdt;
37 static void load_segments(void)
40 #define STR(X) __STR(X)
42 __asm__ __volatile__ (
43 "\tljmp $"STR(__KERNEL_CS)",$1f\n"
45 "\tmovl $"STR(__KERNEL_DS)",%%eax\n"
49 : : : "eax", "memory");
54 static void machine_kexec_free_page_tables(struct kimage *image)
56 free_pages((unsigned long)image->arch.pgd, PGD_ALLOCATION_ORDER);
57 image->arch.pgd = NULL;
59 free_page((unsigned long)image->arch.pmd0);
60 image->arch.pmd0 = NULL;
61 free_page((unsigned long)image->arch.pmd1);
62 image->arch.pmd1 = NULL;
64 free_page((unsigned long)image->arch.pte0);
65 image->arch.pte0 = NULL;
66 free_page((unsigned long)image->arch.pte1);
67 image->arch.pte1 = NULL;
70 static int machine_kexec_alloc_page_tables(struct kimage *image)
72 image->arch.pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
73 PGD_ALLOCATION_ORDER);
75 image->arch.pmd0 = (pmd_t *)get_zeroed_page(GFP_KERNEL);
76 image->arch.pmd1 = (pmd_t *)get_zeroed_page(GFP_KERNEL);
78 image->arch.pte0 = (pte_t *)get_zeroed_page(GFP_KERNEL);
79 image->arch.pte1 = (pte_t *)get_zeroed_page(GFP_KERNEL);
80 if (!image->arch.pgd ||
82 !image->arch.pmd0 || !image->arch.pmd1 ||
84 !image->arch.pte0 || !image->arch.pte1) {
90 static void machine_kexec_page_table_set_one(
91 pgd_t *pgd, pmd_t *pmd, pte_t *pte,
92 unsigned long vaddr, unsigned long paddr)
97 pgd += pgd_index(vaddr);
99 if (!(pgd_val(*pgd) & _PAGE_PRESENT))
100 set_pgd(pgd, __pgd(__pa(pmd) | _PAGE_PRESENT));
102 p4d = p4d_offset(pgd, vaddr);
103 pud = pud_offset(p4d, vaddr);
104 pmd = pmd_offset(pud, vaddr);
105 if (!(pmd_val(*pmd) & _PAGE_PRESENT))
106 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
107 pte = pte_offset_kernel(pmd, vaddr);
108 set_pte(pte, pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
111 static void machine_kexec_prepare_page_tables(struct kimage *image)
116 control_page = page_address(image->control_code_page);
117 #ifdef CONFIG_X86_PAE
118 pmd = image->arch.pmd0;
120 machine_kexec_page_table_set_one(
121 image->arch.pgd, pmd, image->arch.pte0,
122 (unsigned long)control_page, __pa(control_page));
123 #ifdef CONFIG_X86_PAE
124 pmd = image->arch.pmd1;
126 machine_kexec_page_table_set_one(
127 image->arch.pgd, pmd, image->arch.pte1,
128 __pa(control_page), __pa(control_page));
132 * A architecture hook called to validate the
133 * proposed image and prepare the control pages
134 * as needed. The pages for KEXEC_CONTROL_PAGE_SIZE
135 * have been allocated, but the segments have yet
136 * been copied into the kernel.
138 * Do what every setup is needed on image and the
139 * reboot code buffer to allow us to avoid allocations
142 * - Make control page executable.
143 * - Allocate page tables
144 * - Setup page tables
146 int machine_kexec_prepare(struct kimage *image)
150 set_memory_x((unsigned long)page_address(image->control_code_page), 1);
151 error = machine_kexec_alloc_page_tables(image);
154 machine_kexec_prepare_page_tables(image);
159 * Undo anything leftover by machine_kexec_prepare
160 * when an image is freed.
162 void machine_kexec_cleanup(struct kimage *image)
164 set_memory_nx((unsigned long)page_address(image->control_code_page), 1);
165 machine_kexec_free_page_tables(image);
169 * Do not allocate memory (or fail in any way) in machine_kexec().
170 * We are past the point of no return, committed to rebooting now.
172 void machine_kexec(struct kimage *image)
174 unsigned long page_list[PAGES_NR];
176 int save_ftrace_enabled;
177 asmlinkage unsigned long
178 (*relocate_kernel_ptr)(unsigned long indirection_page,
179 unsigned long control_page,
180 unsigned long start_address,
181 unsigned int has_pae,
182 unsigned int preserve_context);
184 #ifdef CONFIG_KEXEC_JUMP
185 if (image->preserve_context)
186 save_processor_state();
189 save_ftrace_enabled = __ftrace_enabled_save();
191 /* Interrupts aren't acceptable while we reboot */
193 hw_breakpoint_disable();
195 if (image->preserve_context) {
196 #ifdef CONFIG_X86_IO_APIC
198 * We need to put APICs in legacy mode so that we can
199 * get timer interrupts in second kernel. kexec/kdump
200 * paths already have calls to restore_boot_irq_mode()
201 * in one form or other. kexec jump path also need one.
204 restore_boot_irq_mode();
208 control_page = page_address(image->control_code_page);
209 memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
211 relocate_kernel_ptr = control_page;
212 page_list[PA_CONTROL_PAGE] = __pa(control_page);
213 page_list[VA_CONTROL_PAGE] = (unsigned long)control_page;
214 page_list[PA_PGD] = __pa(image->arch.pgd);
216 if (image->type == KEXEC_TYPE_DEFAULT)
217 page_list[PA_SWAP_PAGE] = (page_to_pfn(image->swap_page)
221 * The segment registers are funny things, they have both a
222 * visible and an invisible part. Whenever the visible part is
223 * set to a specific selector, the invisible part is loaded
224 * with from a table in memory. At no other time is the
225 * descriptor table in memory accessed.
227 * I take advantage of this here by force loading the
228 * segments, before I zap the gdt with an invalid value.
232 * The gdt & idt are now invalid.
233 * If you want to load them you must set up your own idt & gdt.
235 idt_invalidate(phys_to_virt(0));
236 set_gdt(phys_to_virt(0), 0);
239 image->start = relocate_kernel_ptr((unsigned long)image->head,
240 (unsigned long)page_list,
242 boot_cpu_has(X86_FEATURE_PAE),
243 image->preserve_context);
245 #ifdef CONFIG_KEXEC_JUMP
246 if (image->preserve_context)
247 restore_processor_state();
250 __ftrace_enabled_restore(save_ftrace_enabled);