2 * handle transition of Linux booting another kernel
5 * This source code is licensed under the GNU General Public License,
6 * Version 2. See the file COPYING for more details.
10 #include <linux/kexec.h>
11 #include <linux/string.h>
12 #include <linux/gfp.h>
13 #include <linux/reboot.h>
14 #include <linux/numa.h>
15 #include <linux/ftrace.h>
17 #include <linux/suspend.h>
19 #include <asm/pgtable.h>
20 #include <asm/tlbflush.h>
21 #include <asm/mmu_context.h>
22 #include <asm/debugreg.h>
24 static int init_one_level2_page(struct kimage *image, pgd_t *pgd,
33 pgd += pgd_index(addr);
34 if (!pgd_present(*pgd)) {
35 page = kimage_alloc_control_pages(image, 0);
38 pud = (pud_t *)page_address(page);
40 set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE));
42 pud = pud_offset(pgd, addr);
43 if (!pud_present(*pud)) {
44 page = kimage_alloc_control_pages(image, 0);
47 pmd = (pmd_t *)page_address(page);
49 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
51 pmd = pmd_offset(pud, addr);
52 if (!pmd_present(*pmd))
53 set_pmd(pmd, __pmd(addr | __PAGE_KERNEL_LARGE_EXEC));
59 static void init_level2_page(pmd_t *level2p, unsigned long addr)
61 unsigned long end_addr;
64 end_addr = addr + PUD_SIZE;
65 while (addr < end_addr) {
66 set_pmd(level2p++, __pmd(addr | __PAGE_KERNEL_LARGE_EXEC));
71 static int init_level3_page(struct kimage *image, pud_t *level3p,
72 unsigned long addr, unsigned long last_addr)
74 unsigned long end_addr;
79 end_addr = addr + PGDIR_SIZE;
80 while ((addr < last_addr) && (addr < end_addr)) {
84 page = kimage_alloc_control_pages(image, 0);
89 level2p = (pmd_t *)page_address(page);
90 init_level2_page(level2p, addr);
91 set_pud(level3p++, __pud(__pa(level2p) | _KERNPG_TABLE));
94 /* clear the unused entries */
95 while (addr < end_addr) {
104 static int init_level4_page(struct kimage *image, pgd_t *level4p,
105 unsigned long addr, unsigned long last_addr)
107 unsigned long end_addr;
112 end_addr = addr + (PTRS_PER_PGD * PGDIR_SIZE);
113 while ((addr < last_addr) && (addr < end_addr)) {
117 page = kimage_alloc_control_pages(image, 0);
122 level3p = (pud_t *)page_address(page);
123 result = init_level3_page(image, level3p, addr, last_addr);
126 set_pgd(level4p++, __pgd(__pa(level3p) | _KERNPG_TABLE));
129 /* clear the unused entries */
130 while (addr < end_addr) {
131 pgd_clear(level4p++);
138 static void free_transition_pgtable(struct kimage *image)
140 free_page((unsigned long)image->arch.pud);
141 free_page((unsigned long)image->arch.pmd);
142 free_page((unsigned long)image->arch.pte);
145 static int init_transition_pgtable(struct kimage *image, pgd_t *pgd)
150 unsigned long vaddr, paddr;
151 int result = -ENOMEM;
153 vaddr = (unsigned long)relocate_kernel;
154 paddr = __pa(page_address(image->control_code_page)+PAGE_SIZE);
155 pgd += pgd_index(vaddr);
156 if (!pgd_present(*pgd)) {
157 pud = (pud_t *)get_zeroed_page(GFP_KERNEL);
160 image->arch.pud = pud;
161 set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE));
163 pud = pud_offset(pgd, vaddr);
164 if (!pud_present(*pud)) {
165 pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL);
168 image->arch.pmd = pmd;
169 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
171 pmd = pmd_offset(pud, vaddr);
172 if (!pmd_present(*pmd)) {
173 pte = (pte_t *)get_zeroed_page(GFP_KERNEL);
176 image->arch.pte = pte;
177 set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
179 pte = pte_offset_kernel(pmd, vaddr);
180 set_pte(pte, pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
183 free_transition_pgtable(image);
188 static int init_pgtable(struct kimage *image, unsigned long start_pgtable)
192 level4p = (pgd_t *)__va(start_pgtable);
193 result = init_level4_page(image, level4p, 0, max_pfn << PAGE_SHIFT);
197 * image->start may be outside 0 ~ max_pfn, for example when
198 * jump back to original kernel from kexeced kernel
200 result = init_one_level2_page(image, level4p, image->start);
203 return init_transition_pgtable(image, level4p);
206 static void set_idt(void *newidt, u16 limit)
208 struct desc_ptr curidt;
210 /* x86-64 supports unaliged loads & stores */
212 curidt.address = (unsigned long)newidt;
214 __asm__ __volatile__ (
221 static void set_gdt(void *newgdt, u16 limit)
223 struct desc_ptr curgdt;
225 /* x86-64 supports unaligned loads & stores */
227 curgdt.address = (unsigned long)newgdt;
229 __asm__ __volatile__ (
235 static void load_segments(void)
237 __asm__ __volatile__ (
243 : : "a" (__KERNEL_DS) : "memory"
247 int machine_kexec_prepare(struct kimage *image)
249 unsigned long start_pgtable;
252 /* Calculate the offsets */
253 start_pgtable = page_to_pfn(image->control_code_page) << PAGE_SHIFT;
255 /* Setup the identity mapped 64bit page table */
256 result = init_pgtable(image, start_pgtable);
263 void machine_kexec_cleanup(struct kimage *image)
265 free_transition_pgtable(image);
269 * Do not allocate memory (or fail in any way) in machine_kexec().
270 * We are past the point of no return, committed to rebooting now.
272 void machine_kexec(struct kimage *image)
274 unsigned long page_list[PAGES_NR];
276 int save_ftrace_enabled;
278 #ifdef CONFIG_KEXEC_JUMP
279 if (image->preserve_context)
280 save_processor_state();
283 save_ftrace_enabled = __ftrace_enabled_save();
285 /* Interrupts aren't acceptable while we reboot */
287 hw_breakpoint_disable();
289 if (image->preserve_context) {
290 #ifdef CONFIG_X86_IO_APIC
292 * We need to put APICs in legacy mode so that we can
293 * get timer interrupts in second kernel. kexec/kdump
294 * paths already have calls to disable_IO_APIC() in
295 * one form or other. kexec jump path also need
302 control_page = page_address(image->control_code_page) + PAGE_SIZE;
303 memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
305 page_list[PA_CONTROL_PAGE] = virt_to_phys(control_page);
306 page_list[VA_CONTROL_PAGE] = (unsigned long)control_page;
307 page_list[PA_TABLE_PAGE] =
308 (unsigned long)__pa(page_address(image->control_code_page));
310 if (image->type == KEXEC_TYPE_DEFAULT)
311 page_list[PA_SWAP_PAGE] = (page_to_pfn(image->swap_page)
315 * The segment registers are funny things, they have both a
316 * visible and an invisible part. Whenever the visible part is
317 * set to a specific selector, the invisible part is loaded
318 * with from a table in memory. At no other time is the
319 * descriptor table in memory accessed.
321 * I take advantage of this here by force loading the
322 * segments, before I zap the gdt with an invalid value.
326 * The gdt & idt are now invalid.
327 * If you want to load them you must set up your own idt & gdt.
329 set_gdt(phys_to_virt(0), 0);
330 set_idt(phys_to_virt(0), 0);
333 image->start = relocate_kernel((unsigned long)image->head,
334 (unsigned long)page_list,
336 image->preserve_context);
338 #ifdef CONFIG_KEXEC_JUMP
339 if (image->preserve_context)
340 restore_processor_state();
343 __ftrace_enabled_restore(save_ftrace_enabled);
346 void arch_crash_save_vmcoreinfo(void)
348 VMCOREINFO_SYMBOL(phys_base);
349 VMCOREINFO_SYMBOL(init_level4_pgt);
352 VMCOREINFO_SYMBOL(node_data);
353 VMCOREINFO_LENGTH(node_data, MAX_NUMNODES);