2 * kexec.c - kexec system call
5 * This source code is licensed under the GNU General Public License,
6 * Version 2. See the file COPYING for more details.
9 #define pr_fmt(fmt) "kexec: " fmt
11 #include <linux/capability.h>
13 #include <linux/file.h>
14 #include <linux/slab.h>
16 #include <linux/kexec.h>
17 #include <linux/mutex.h>
18 #include <linux/list.h>
19 #include <linux/highmem.h>
20 #include <linux/syscalls.h>
21 #include <linux/reboot.h>
22 #include <linux/ioport.h>
23 #include <linux/hardirq.h>
24 #include <linux/elf.h>
25 #include <linux/elfcore.h>
26 #include <linux/utsname.h>
27 #include <linux/numa.h>
28 #include <linux/suspend.h>
29 #include <linux/device.h>
30 #include <linux/freezer.h>
32 #include <linux/cpu.h>
33 #include <linux/console.h>
34 #include <linux/vmalloc.h>
35 #include <linux/swap.h>
36 #include <linux/syscore_ops.h>
37 #include <linux/compiler.h>
38 #include <linux/hugetlb.h>
41 #include <asm/uaccess.h>
43 #include <asm/sections.h>
45 #include <crypto/hash.h>
46 #include <crypto/sha.h>
48 /* Per cpu memory for storing cpu states in case of system crash. */
49 note_buf_t __percpu *crash_notes;
51 /* vmcoreinfo stuff */
52 static unsigned char vmcoreinfo_data[VMCOREINFO_BYTES];
53 u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4];
54 size_t vmcoreinfo_size;
55 size_t vmcoreinfo_max_size = sizeof(vmcoreinfo_data);
57 /* Flag to indicate we are going to kexec a new kernel */
58 bool kexec_in_progress = false;
61 * Declare these symbols weak so that if architecture provides a purgatory,
62 * these will be overridden.
64 char __weak kexec_purgatory[0];
65 size_t __weak kexec_purgatory_size = 0;
67 #ifdef CONFIG_KEXEC_FILE
68 static int kexec_calculate_store_digests(struct kimage *image);
71 /* Location of the reserved area for the crash kernel */
72 struct resource crashk_res = {
73 .name = "Crash kernel",
76 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
78 struct resource crashk_low_res = {
79 .name = "Crash kernel",
82 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
85 int kexec_should_crash(struct task_struct *p)
87 if (in_interrupt() || !p->pid || is_global_init(p) || panic_on_oops)
93 * When kexec transitions to the new kernel there is a one-to-one
94 * mapping between physical and virtual addresses. On processors
95 * where you can disable the MMU this is trivial, and easy. For
96 * others it is still a simple predictable page table to setup.
98 * In that environment kexec copies the new kernel to its final
99 * resting place. This means I can only support memory whose
100 * physical address can fit in an unsigned long. In particular
101 * addresses where (pfn << PAGE_SHIFT) > ULONG_MAX cannot be handled.
102 * If the assembly stub has more restrictive requirements
103 * KEXEC_SOURCE_MEMORY_LIMIT and KEXEC_DEST_MEMORY_LIMIT can be
104 * defined more restrictively in <asm/kexec.h>.
106 * The code for the transition from the current kernel to the
107 * the new kernel is placed in the control_code_buffer, whose size
108 * is given by KEXEC_CONTROL_PAGE_SIZE. In the best case only a single
109 * page of memory is necessary, but some architectures require more.
110 * Because this memory must be identity mapped in the transition from
111 * virtual to physical addresses it must live in the range
112 * 0 - TASK_SIZE, as only the user space mappings are arbitrarily
115 * The assembly stub in the control code buffer is passed a linked list
116 * of descriptor pages detailing the source pages of the new kernel,
117 * and the destination addresses of those source pages. As this data
118 * structure is not used in the context of the current OS, it must
121 * The code has been made to work with highmem pages and will use a
122 * destination page in its final resting place (if it happens
123 * to allocate it). The end product of this is that most of the
124 * physical address space, and most of RAM can be used.
126 * Future directions include:
127 * - allocating a page table with the control code buffer identity
128 * mapped, to simplify machine_kexec and make kexec_on_panic more
133 * KIMAGE_NO_DEST is an impossible destination address..., for
134 * allocating pages whose destination address we do not care about.
136 #define KIMAGE_NO_DEST (-1UL)
138 static int kimage_is_destination_range(struct kimage *image,
139 unsigned long start, unsigned long end);
140 static struct page *kimage_alloc_page(struct kimage *image,
144 static int copy_user_segment_list(struct kimage *image,
145 unsigned long nr_segments,
146 struct kexec_segment __user *segments)
149 size_t segment_bytes;
151 /* Read in the segments */
152 image->nr_segments = nr_segments;
153 segment_bytes = nr_segments * sizeof(*segments);
154 ret = copy_from_user(image->segment, segments, segment_bytes);
161 static int sanity_check_segment_list(struct kimage *image)
164 unsigned long nr_segments = image->nr_segments;
167 * Verify we have good destination addresses. The caller is
168 * responsible for making certain we don't attempt to load
169 * the new image into invalid or reserved areas of RAM. This
170 * just verifies it is an address we can use.
172 * Since the kernel does everything in page size chunks ensure
173 * the destination addresses are page aligned. Too many
174 * special cases crop of when we don't do this. The most
175 * insidious is getting overlapping destination addresses
176 * simply because addresses are changed to page size
179 result = -EADDRNOTAVAIL;
180 for (i = 0; i < nr_segments; i++) {
181 unsigned long mstart, mend;
183 mstart = image->segment[i].mem;
184 mend = mstart + image->segment[i].memsz;
185 if ((mstart & ~PAGE_MASK) || (mend & ~PAGE_MASK))
187 if (mend >= KEXEC_DESTINATION_MEMORY_LIMIT)
191 /* Verify our destination addresses do not overlap.
192 * If we alloed overlapping destination addresses
193 * through very weird things can happen with no
194 * easy explanation as one segment stops on another.
197 for (i = 0; i < nr_segments; i++) {
198 unsigned long mstart, mend;
201 mstart = image->segment[i].mem;
202 mend = mstart + image->segment[i].memsz;
203 for (j = 0; j < i; j++) {
204 unsigned long pstart, pend;
205 pstart = image->segment[j].mem;
206 pend = pstart + image->segment[j].memsz;
207 /* Do the segments overlap ? */
208 if ((mend > pstart) && (mstart < pend))
213 /* Ensure our buffer sizes are strictly less than
214 * our memory sizes. This should always be the case,
215 * and it is easier to check up front than to be surprised
219 for (i = 0; i < nr_segments; i++) {
220 if (image->segment[i].bufsz > image->segment[i].memsz)
225 * Verify we have good destination addresses. Normally
226 * the caller is responsible for making certain we don't
227 * attempt to load the new image into invalid or reserved
228 * areas of RAM. But crash kernels are preloaded into a
229 * reserved area of ram. We must ensure the addresses
230 * are in the reserved area otherwise preloading the
231 * kernel could corrupt things.
234 if (image->type == KEXEC_TYPE_CRASH) {
235 result = -EADDRNOTAVAIL;
236 for (i = 0; i < nr_segments; i++) {
237 unsigned long mstart, mend;
239 mstart = image->segment[i].mem;
240 mend = mstart + image->segment[i].memsz - 1;
241 /* Ensure we are within the crash kernel limits */
242 if ((mstart < crashk_res.start) ||
243 (mend > crashk_res.end))
251 static struct kimage *do_kimage_alloc_init(void)
253 struct kimage *image;
255 /* Allocate a controlling structure */
256 image = kzalloc(sizeof(*image), GFP_KERNEL);
261 image->entry = &image->head;
262 image->last_entry = &image->head;
263 image->control_page = ~0; /* By default this does not apply */
264 image->type = KEXEC_TYPE_DEFAULT;
266 /* Initialize the list of control pages */
267 INIT_LIST_HEAD(&image->control_pages);
269 /* Initialize the list of destination pages */
270 INIT_LIST_HEAD(&image->dest_pages);
272 /* Initialize the list of unusable pages */
273 INIT_LIST_HEAD(&image->unusable_pages);
278 static void kimage_free_page_list(struct list_head *list);
280 static int kimage_alloc_init(struct kimage **rimage, unsigned long entry,
281 unsigned long nr_segments,
282 struct kexec_segment __user *segments,
286 struct kimage *image;
287 bool kexec_on_panic = flags & KEXEC_ON_CRASH;
289 if (kexec_on_panic) {
290 /* Verify we have a valid entry point */
291 if ((entry < crashk_res.start) || (entry > crashk_res.end))
292 return -EADDRNOTAVAIL;
295 /* Allocate and initialize a controlling structure */
296 image = do_kimage_alloc_init();
300 image->start = entry;
302 ret = copy_user_segment_list(image, nr_segments, segments);
306 ret = sanity_check_segment_list(image);
310 /* Enable the special crash kernel control page allocation policy. */
311 if (kexec_on_panic) {
312 image->control_page = crashk_res.start;
313 image->type = KEXEC_TYPE_CRASH;
317 * Find a location for the control code buffer, and add it
318 * the vector of segments so that it's pages will also be
319 * counted as destination pages.
322 image->control_code_page = kimage_alloc_control_pages(image,
323 get_order(KEXEC_CONTROL_PAGE_SIZE));
324 if (!image->control_code_page) {
325 pr_err("Could not allocate control_code_buffer\n");
329 if (!kexec_on_panic) {
330 image->swap_page = kimage_alloc_control_pages(image, 0);
331 if (!image->swap_page) {
332 pr_err("Could not allocate swap buffer\n");
333 goto out_free_control_pages;
339 out_free_control_pages:
340 kimage_free_page_list(&image->control_pages);
346 #ifdef CONFIG_KEXEC_FILE
347 static int copy_file_from_fd(int fd, void **buf, unsigned long *buf_len)
349 struct fd f = fdget(fd);
358 ret = vfs_getattr(&f.file->f_path, &stat);
362 if (stat.size > INT_MAX) {
367 /* Don't hand 0 to vmalloc, it whines. */
368 if (stat.size == 0) {
373 *buf = vmalloc(stat.size);
380 while (pos < stat.size) {
381 bytes = kernel_read(f.file, pos, (char *)(*buf) + pos,
394 if (pos != stat.size) {
406 /* Architectures can provide this probe function */
407 int __weak arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
408 unsigned long buf_len)
413 void * __weak arch_kexec_kernel_image_load(struct kimage *image)
415 return ERR_PTR(-ENOEXEC);
418 void __weak arch_kimage_file_post_load_cleanup(struct kimage *image)
422 int __weak arch_kexec_kernel_verify_sig(struct kimage *image, void *buf,
423 unsigned long buf_len)
425 return -EKEYREJECTED;
428 /* Apply relocations of type RELA */
430 arch_kexec_apply_relocations_add(const Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
433 pr_err("RELA relocation unsupported.\n");
437 /* Apply relocations of type REL */
439 arch_kexec_apply_relocations(const Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
442 pr_err("REL relocation unsupported.\n");
447 * Free up memory used by kernel, initrd, and comand line. This is temporary
448 * memory allocation which is not needed any more after these buffers have
449 * been loaded into separate segments and have been copied elsewhere.
451 static void kimage_file_post_load_cleanup(struct kimage *image)
453 struct purgatory_info *pi = &image->purgatory_info;
455 vfree(image->kernel_buf);
456 image->kernel_buf = NULL;
458 vfree(image->initrd_buf);
459 image->initrd_buf = NULL;
461 kfree(image->cmdline_buf);
462 image->cmdline_buf = NULL;
464 vfree(pi->purgatory_buf);
465 pi->purgatory_buf = NULL;
470 /* See if architecture has anything to cleanup post load */
471 arch_kimage_file_post_load_cleanup(image);
474 * Above call should have called into bootloader to free up
475 * any data stored in kimage->image_loader_data. It should
476 * be ok now to free it up.
478 kfree(image->image_loader_data);
479 image->image_loader_data = NULL;
483 * In file mode list of segments is prepared by kernel. Copy relevant
484 * data from user space, do error checking, prepare segment list
487 kimage_file_prepare_segments(struct kimage *image, int kernel_fd, int initrd_fd,
488 const char __user *cmdline_ptr,
489 unsigned long cmdline_len, unsigned flags)
494 ret = copy_file_from_fd(kernel_fd, &image->kernel_buf,
495 &image->kernel_buf_len);
499 /* Call arch image probe handlers */
500 ret = arch_kexec_kernel_image_probe(image, image->kernel_buf,
501 image->kernel_buf_len);
506 #ifdef CONFIG_KEXEC_VERIFY_SIG
507 ret = arch_kexec_kernel_verify_sig(image, image->kernel_buf,
508 image->kernel_buf_len);
510 pr_debug("kernel signature verification failed.\n");
513 pr_debug("kernel signature verification successful.\n");
515 /* It is possible that there no initramfs is being loaded */
516 if (!(flags & KEXEC_FILE_NO_INITRAMFS)) {
517 ret = copy_file_from_fd(initrd_fd, &image->initrd_buf,
518 &image->initrd_buf_len);
524 image->cmdline_buf = kzalloc(cmdline_len, GFP_KERNEL);
525 if (!image->cmdline_buf) {
530 ret = copy_from_user(image->cmdline_buf, cmdline_ptr,
537 image->cmdline_buf_len = cmdline_len;
539 /* command line should be a string with last byte null */
540 if (image->cmdline_buf[cmdline_len - 1] != '\0') {
546 /* Call arch image load handlers */
547 ldata = arch_kexec_kernel_image_load(image);
550 ret = PTR_ERR(ldata);
554 image->image_loader_data = ldata;
556 /* In case of error, free up all allocated memory in this function */
558 kimage_file_post_load_cleanup(image);
563 kimage_file_alloc_init(struct kimage **rimage, int kernel_fd,
564 int initrd_fd, const char __user *cmdline_ptr,
565 unsigned long cmdline_len, unsigned long flags)
568 struct kimage *image;
569 bool kexec_on_panic = flags & KEXEC_FILE_ON_CRASH;
571 image = do_kimage_alloc_init();
575 image->file_mode = 1;
577 if (kexec_on_panic) {
578 /* Enable special crash kernel control page alloc policy. */
579 image->control_page = crashk_res.start;
580 image->type = KEXEC_TYPE_CRASH;
583 ret = kimage_file_prepare_segments(image, kernel_fd, initrd_fd,
584 cmdline_ptr, cmdline_len, flags);
588 ret = sanity_check_segment_list(image);
590 goto out_free_post_load_bufs;
593 image->control_code_page = kimage_alloc_control_pages(image,
594 get_order(KEXEC_CONTROL_PAGE_SIZE));
595 if (!image->control_code_page) {
596 pr_err("Could not allocate control_code_buffer\n");
597 goto out_free_post_load_bufs;
600 if (!kexec_on_panic) {
601 image->swap_page = kimage_alloc_control_pages(image, 0);
602 if (!image->swap_page) {
603 pr_err(KERN_ERR "Could not allocate swap buffer\n");
604 goto out_free_control_pages;
610 out_free_control_pages:
611 kimage_free_page_list(&image->control_pages);
612 out_free_post_load_bufs:
613 kimage_file_post_load_cleanup(image);
618 #else /* CONFIG_KEXEC_FILE */
619 static inline void kimage_file_post_load_cleanup(struct kimage *image) { }
620 #endif /* CONFIG_KEXEC_FILE */
622 static int kimage_is_destination_range(struct kimage *image,
628 for (i = 0; i < image->nr_segments; i++) {
629 unsigned long mstart, mend;
631 mstart = image->segment[i].mem;
632 mend = mstart + image->segment[i].memsz;
633 if ((end > mstart) && (start < mend))
640 static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order)
644 pages = alloc_pages(gfp_mask, order);
646 unsigned int count, i;
647 pages->mapping = NULL;
648 set_page_private(pages, order);
650 for (i = 0; i < count; i++)
651 SetPageReserved(pages + i);
657 static void kimage_free_pages(struct page *page)
659 unsigned int order, count, i;
661 order = page_private(page);
663 for (i = 0; i < count; i++)
664 ClearPageReserved(page + i);
665 __free_pages(page, order);
668 static void kimage_free_page_list(struct list_head *list)
670 struct list_head *pos, *next;
672 list_for_each_safe(pos, next, list) {
675 page = list_entry(pos, struct page, lru);
676 list_del(&page->lru);
677 kimage_free_pages(page);
681 static struct page *kimage_alloc_normal_control_pages(struct kimage *image,
684 /* Control pages are special, they are the intermediaries
685 * that are needed while we copy the rest of the pages
686 * to their final resting place. As such they must
687 * not conflict with either the destination addresses
688 * or memory the kernel is already using.
690 * The only case where we really need more than one of
691 * these are for architectures where we cannot disable
692 * the MMU and must instead generate an identity mapped
693 * page table for all of the memory.
695 * At worst this runs in O(N) of the image size.
697 struct list_head extra_pages;
702 INIT_LIST_HEAD(&extra_pages);
704 /* Loop while I can allocate a page and the page allocated
705 * is a destination page.
708 unsigned long pfn, epfn, addr, eaddr;
710 pages = kimage_alloc_pages(GFP_KERNEL, order);
713 pfn = page_to_pfn(pages);
715 addr = pfn << PAGE_SHIFT;
716 eaddr = epfn << PAGE_SHIFT;
717 if ((epfn >= (KEXEC_CONTROL_MEMORY_LIMIT >> PAGE_SHIFT)) ||
718 kimage_is_destination_range(image, addr, eaddr)) {
719 list_add(&pages->lru, &extra_pages);
725 /* Remember the allocated page... */
726 list_add(&pages->lru, &image->control_pages);
728 /* Because the page is already in it's destination
729 * location we will never allocate another page at
730 * that address. Therefore kimage_alloc_pages
731 * will not return it (again) and we don't need
732 * to give it an entry in image->segment[].
735 /* Deal with the destination pages I have inadvertently allocated.
737 * Ideally I would convert multi-page allocations into single
738 * page allocations, and add everything to image->dest_pages.
740 * For now it is simpler to just free the pages.
742 kimage_free_page_list(&extra_pages);
747 static struct page *kimage_alloc_crash_control_pages(struct kimage *image,
750 /* Control pages are special, they are the intermediaries
751 * that are needed while we copy the rest of the pages
752 * to their final resting place. As such they must
753 * not conflict with either the destination addresses
754 * or memory the kernel is already using.
756 * Control pages are also the only pags we must allocate
757 * when loading a crash kernel. All of the other pages
758 * are specified by the segments and we just memcpy
759 * into them directly.
761 * The only case where we really need more than one of
762 * these are for architectures where we cannot disable
763 * the MMU and must instead generate an identity mapped
764 * page table for all of the memory.
766 * Given the low demand this implements a very simple
767 * allocator that finds the first hole of the appropriate
768 * size in the reserved memory region, and allocates all
769 * of the memory up to and including the hole.
771 unsigned long hole_start, hole_end, size;
775 size = (1 << order) << PAGE_SHIFT;
776 hole_start = (image->control_page + (size - 1)) & ~(size - 1);
777 hole_end = hole_start + size - 1;
778 while (hole_end <= crashk_res.end) {
781 if (hole_end > KEXEC_CRASH_CONTROL_MEMORY_LIMIT)
783 /* See if I overlap any of the segments */
784 for (i = 0; i < image->nr_segments; i++) {
785 unsigned long mstart, mend;
787 mstart = image->segment[i].mem;
788 mend = mstart + image->segment[i].memsz - 1;
789 if ((hole_end >= mstart) && (hole_start <= mend)) {
790 /* Advance the hole to the end of the segment */
791 hole_start = (mend + (size - 1)) & ~(size - 1);
792 hole_end = hole_start + size - 1;
796 /* If I don't overlap any segments I have found my hole! */
797 if (i == image->nr_segments) {
798 pages = pfn_to_page(hole_start >> PAGE_SHIFT);
803 image->control_page = hole_end;
809 struct page *kimage_alloc_control_pages(struct kimage *image,
812 struct page *pages = NULL;
814 switch (image->type) {
815 case KEXEC_TYPE_DEFAULT:
816 pages = kimage_alloc_normal_control_pages(image, order);
818 case KEXEC_TYPE_CRASH:
819 pages = kimage_alloc_crash_control_pages(image, order);
826 static int kimage_add_entry(struct kimage *image, kimage_entry_t entry)
828 if (*image->entry != 0)
831 if (image->entry == image->last_entry) {
832 kimage_entry_t *ind_page;
835 page = kimage_alloc_page(image, GFP_KERNEL, KIMAGE_NO_DEST);
839 ind_page = page_address(page);
840 *image->entry = virt_to_phys(ind_page) | IND_INDIRECTION;
841 image->entry = ind_page;
842 image->last_entry = ind_page +
843 ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1);
845 *image->entry = entry;
852 static int kimage_set_destination(struct kimage *image,
853 unsigned long destination)
857 destination &= PAGE_MASK;
858 result = kimage_add_entry(image, destination | IND_DESTINATION);
860 image->destination = destination;
866 static int kimage_add_page(struct kimage *image, unsigned long page)
871 result = kimage_add_entry(image, page | IND_SOURCE);
873 image->destination += PAGE_SIZE;
879 static void kimage_free_extra_pages(struct kimage *image)
881 /* Walk through and free any extra destination pages I may have */
882 kimage_free_page_list(&image->dest_pages);
884 /* Walk through and free any unusable pages I have cached */
885 kimage_free_page_list(&image->unusable_pages);
888 static void kimage_terminate(struct kimage *image)
890 if (*image->entry != 0)
893 *image->entry = IND_DONE;
896 #define for_each_kimage_entry(image, ptr, entry) \
897 for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \
898 ptr = (entry & IND_INDIRECTION) ? \
899 phys_to_virt((entry & PAGE_MASK)) : ptr + 1)
901 static void kimage_free_entry(kimage_entry_t entry)
905 page = pfn_to_page(entry >> PAGE_SHIFT);
906 kimage_free_pages(page);
909 static void kimage_free(struct kimage *image)
911 kimage_entry_t *ptr, entry;
912 kimage_entry_t ind = 0;
917 kimage_free_extra_pages(image);
918 for_each_kimage_entry(image, ptr, entry) {
919 if (entry & IND_INDIRECTION) {
920 /* Free the previous indirection page */
921 if (ind & IND_INDIRECTION)
922 kimage_free_entry(ind);
923 /* Save this indirection page until we are
927 } else if (entry & IND_SOURCE)
928 kimage_free_entry(entry);
930 /* Free the final indirection page */
931 if (ind & IND_INDIRECTION)
932 kimage_free_entry(ind);
934 /* Handle any machine specific cleanup */
935 machine_kexec_cleanup(image);
937 /* Free the kexec control pages... */
938 kimage_free_page_list(&image->control_pages);
941 * Free up any temporary buffers allocated. This might hit if
942 * error occurred much later after buffer allocation.
944 if (image->file_mode)
945 kimage_file_post_load_cleanup(image);
950 static kimage_entry_t *kimage_dst_used(struct kimage *image,
953 kimage_entry_t *ptr, entry;
954 unsigned long destination = 0;
956 for_each_kimage_entry(image, ptr, entry) {
957 if (entry & IND_DESTINATION)
958 destination = entry & PAGE_MASK;
959 else if (entry & IND_SOURCE) {
960 if (page == destination)
962 destination += PAGE_SIZE;
969 static struct page *kimage_alloc_page(struct kimage *image,
971 unsigned long destination)
974 * Here we implement safeguards to ensure that a source page
975 * is not copied to its destination page before the data on
976 * the destination page is no longer useful.
978 * To do this we maintain the invariant that a source page is
979 * either its own destination page, or it is not a
980 * destination page at all.
982 * That is slightly stronger than required, but the proof
983 * that no problems will not occur is trivial, and the
984 * implementation is simply to verify.
986 * When allocating all pages normally this algorithm will run
987 * in O(N) time, but in the worst case it will run in O(N^2)
988 * time. If the runtime is a problem the data structures can
995 * Walk through the list of destination pages, and see if I
998 list_for_each_entry(page, &image->dest_pages, lru) {
999 addr = page_to_pfn(page) << PAGE_SHIFT;
1000 if (addr == destination) {
1001 list_del(&page->lru);
1007 kimage_entry_t *old;
1009 /* Allocate a page, if we run out of memory give up */
1010 page = kimage_alloc_pages(gfp_mask, 0);
1013 /* If the page cannot be used file it away */
1014 if (page_to_pfn(page) >
1015 (KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) {
1016 list_add(&page->lru, &image->unusable_pages);
1019 addr = page_to_pfn(page) << PAGE_SHIFT;
1021 /* If it is the destination page we want use it */
1022 if (addr == destination)
1025 /* If the page is not a destination page use it */
1026 if (!kimage_is_destination_range(image, addr,
1031 * I know that the page is someones destination page.
1032 * See if there is already a source page for this
1033 * destination page. And if so swap the source pages.
1035 old = kimage_dst_used(image, addr);
1038 unsigned long old_addr;
1039 struct page *old_page;
1041 old_addr = *old & PAGE_MASK;
1042 old_page = pfn_to_page(old_addr >> PAGE_SHIFT);
1043 copy_highpage(page, old_page);
1044 *old = addr | (*old & ~PAGE_MASK);
1046 /* The old page I have found cannot be a
1047 * destination page, so return it if it's
1048 * gfp_flags honor the ones passed in.
1050 if (!(gfp_mask & __GFP_HIGHMEM) &&
1051 PageHighMem(old_page)) {
1052 kimage_free_pages(old_page);
1059 /* Place the page on the destination list I
1060 * will use it later.
1062 list_add(&page->lru, &image->dest_pages);
1069 static int kimage_load_normal_segment(struct kimage *image,
1070 struct kexec_segment *segment)
1072 unsigned long maddr;
1073 size_t ubytes, mbytes;
1075 unsigned char __user *buf = NULL;
1076 unsigned char *kbuf = NULL;
1079 if (image->file_mode)
1080 kbuf = segment->kbuf;
1083 ubytes = segment->bufsz;
1084 mbytes = segment->memsz;
1085 maddr = segment->mem;
1087 result = kimage_set_destination(image, maddr);
1094 size_t uchunk, mchunk;
1096 page = kimage_alloc_page(image, GFP_HIGHUSER, maddr);
1101 result = kimage_add_page(image, page_to_pfn(page)
1107 /* Start with a clear page */
1109 ptr += maddr & ~PAGE_MASK;
1110 mchunk = min_t(size_t, mbytes,
1111 PAGE_SIZE - (maddr & ~PAGE_MASK));
1112 uchunk = min(ubytes, mchunk);
1114 /* For file based kexec, source pages are in kernel memory */
1115 if (image->file_mode)
1116 memcpy(ptr, kbuf, uchunk);
1118 result = copy_from_user(ptr, buf, uchunk);
1126 if (image->file_mode)
1136 static int kimage_load_crash_segment(struct kimage *image,
1137 struct kexec_segment *segment)
1139 /* For crash dumps kernels we simply copy the data from
1140 * user space to it's destination.
1141 * We do things a page at a time for the sake of kmap.
1143 unsigned long maddr;
1144 size_t ubytes, mbytes;
1146 unsigned char __user *buf = NULL;
1147 unsigned char *kbuf = NULL;
1150 if (image->file_mode)
1151 kbuf = segment->kbuf;
1154 ubytes = segment->bufsz;
1155 mbytes = segment->memsz;
1156 maddr = segment->mem;
1160 size_t uchunk, mchunk;
1162 page = pfn_to_page(maddr >> PAGE_SHIFT);
1168 ptr += maddr & ~PAGE_MASK;
1169 mchunk = min_t(size_t, mbytes,
1170 PAGE_SIZE - (maddr & ~PAGE_MASK));
1171 uchunk = min(ubytes, mchunk);
1172 if (mchunk > uchunk) {
1173 /* Zero the trailing part of the page */
1174 memset(ptr + uchunk, 0, mchunk - uchunk);
1177 /* For file based kexec, source pages are in kernel memory */
1178 if (image->file_mode)
1179 memcpy(ptr, kbuf, uchunk);
1181 result = copy_from_user(ptr, buf, uchunk);
1182 kexec_flush_icache_page(page);
1190 if (image->file_mode)
1200 static int kimage_load_segment(struct kimage *image,
1201 struct kexec_segment *segment)
1203 int result = -ENOMEM;
1205 switch (image->type) {
1206 case KEXEC_TYPE_DEFAULT:
1207 result = kimage_load_normal_segment(image, segment);
1209 case KEXEC_TYPE_CRASH:
1210 result = kimage_load_crash_segment(image, segment);
1218 * Exec Kernel system call: for obvious reasons only root may call it.
1220 * This call breaks up into three pieces.
1221 * - A generic part which loads the new kernel from the current
1222 * address space, and very carefully places the data in the
1225 * - A generic part that interacts with the kernel and tells all of
1226 * the devices to shut down. Preventing on-going dmas, and placing
1227 * the devices in a consistent state so a later kernel can
1228 * reinitialize them.
1230 * - A machine specific part that includes the syscall number
1231 * and then copies the image to it's final destination. And
1232 * jumps into the image at entry.
1234 * kexec does not sync, or unmount filesystems so if you need
1235 * that to happen you need to do that yourself.
1237 struct kimage *kexec_image;
1238 struct kimage *kexec_crash_image;
1239 int kexec_load_disabled;
1241 static DEFINE_MUTEX(kexec_mutex);
1243 SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments,
1244 struct kexec_segment __user *, segments, unsigned long, flags)
1246 struct kimage **dest_image, *image;
1249 /* We only trust the superuser with rebooting the system. */
1250 if (!capable(CAP_SYS_BOOT) || kexec_load_disabled)
1254 * Verify we have a legal set of flags
1255 * This leaves us room for future extensions.
1257 if ((flags & KEXEC_FLAGS) != (flags & ~KEXEC_ARCH_MASK))
1260 /* Verify we are on the appropriate architecture */
1261 if (((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH) &&
1262 ((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH_DEFAULT))
1265 /* Put an artificial cap on the number
1266 * of segments passed to kexec_load.
1268 if (nr_segments > KEXEC_SEGMENT_MAX)
1274 /* Because we write directly to the reserved memory
1275 * region when loading crash kernels we need a mutex here to
1276 * prevent multiple crash kernels from attempting to load
1277 * simultaneously, and to prevent a crash kernel from loading
1278 * over the top of a in use crash kernel.
1280 * KISS: always take the mutex.
1282 if (!mutex_trylock(&kexec_mutex))
1285 dest_image = &kexec_image;
1286 if (flags & KEXEC_ON_CRASH)
1287 dest_image = &kexec_crash_image;
1288 if (nr_segments > 0) {
1291 /* Loading another kernel to reboot into */
1292 if ((flags & KEXEC_ON_CRASH) == 0)
1293 result = kimage_alloc_init(&image, entry, nr_segments,
1295 /* Loading another kernel to switch to if this one crashes */
1296 else if (flags & KEXEC_ON_CRASH) {
1297 /* Free any current crash dump kernel before
1300 kimage_free(xchg(&kexec_crash_image, NULL));
1301 result = kimage_alloc_init(&image, entry, nr_segments,
1303 crash_map_reserved_pages();
1308 if (flags & KEXEC_PRESERVE_CONTEXT)
1309 image->preserve_context = 1;
1310 result = machine_kexec_prepare(image);
1314 for (i = 0; i < nr_segments; i++) {
1315 result = kimage_load_segment(image, &image->segment[i]);
1319 kimage_terminate(image);
1320 if (flags & KEXEC_ON_CRASH)
1321 crash_unmap_reserved_pages();
1323 /* Install the new kernel, and Uninstall the old */
1324 image = xchg(dest_image, image);
1327 mutex_unlock(&kexec_mutex);
1334 * Add and remove page tables for crashkernel memory
1336 * Provide an empty default implementation here -- architecture
1337 * code may override this
1339 void __weak crash_map_reserved_pages(void)
1342 void __weak crash_unmap_reserved_pages(void)
1345 #ifdef CONFIG_COMPAT
1346 COMPAT_SYSCALL_DEFINE4(kexec_load, compat_ulong_t, entry,
1347 compat_ulong_t, nr_segments,
1348 struct compat_kexec_segment __user *, segments,
1349 compat_ulong_t, flags)
1351 struct compat_kexec_segment in;
1352 struct kexec_segment out, __user *ksegments;
1353 unsigned long i, result;
1355 /* Don't allow clients that don't understand the native
1356 * architecture to do anything.
1358 if ((flags & KEXEC_ARCH_MASK) == KEXEC_ARCH_DEFAULT)
1361 if (nr_segments > KEXEC_SEGMENT_MAX)
1364 ksegments = compat_alloc_user_space(nr_segments * sizeof(out));
1365 for (i = 0; i < nr_segments; i++) {
1366 result = copy_from_user(&in, &segments[i], sizeof(in));
1370 out.buf = compat_ptr(in.buf);
1371 out.bufsz = in.bufsz;
1373 out.memsz = in.memsz;
1375 result = copy_to_user(&ksegments[i], &out, sizeof(out));
1380 return sys_kexec_load(entry, nr_segments, ksegments, flags);
1384 #ifdef CONFIG_KEXEC_FILE
1385 SYSCALL_DEFINE5(kexec_file_load, int, kernel_fd, int, initrd_fd,
1386 unsigned long, cmdline_len, const char __user *, cmdline_ptr,
1387 unsigned long, flags)
1390 struct kimage **dest_image, *image;
1392 /* We only trust the superuser with rebooting the system. */
1393 if (!capable(CAP_SYS_BOOT) || kexec_load_disabled)
1396 /* Make sure we have a legal set of flags */
1397 if (flags != (flags & KEXEC_FILE_FLAGS))
1402 if (!mutex_trylock(&kexec_mutex))
1405 dest_image = &kexec_image;
1406 if (flags & KEXEC_FILE_ON_CRASH)
1407 dest_image = &kexec_crash_image;
1409 if (flags & KEXEC_FILE_UNLOAD)
1413 * In case of crash, new kernel gets loaded in reserved region. It is
1414 * same memory where old crash kernel might be loaded. Free any
1415 * current crash dump kernel before we corrupt it.
1417 if (flags & KEXEC_FILE_ON_CRASH)
1418 kimage_free(xchg(&kexec_crash_image, NULL));
1420 ret = kimage_file_alloc_init(&image, kernel_fd, initrd_fd, cmdline_ptr,
1421 cmdline_len, flags);
1425 ret = machine_kexec_prepare(image);
1429 ret = kexec_calculate_store_digests(image);
1433 for (i = 0; i < image->nr_segments; i++) {
1434 struct kexec_segment *ksegment;
1436 ksegment = &image->segment[i];
1437 pr_debug("Loading segment %d: buf=0x%p bufsz=0x%zx mem=0x%lx memsz=0x%zx\n",
1438 i, ksegment->buf, ksegment->bufsz, ksegment->mem,
1441 ret = kimage_load_segment(image, &image->segment[i]);
1446 kimage_terminate(image);
1449 * Free up any temporary buffers allocated which are not needed
1450 * after image has been loaded
1452 kimage_file_post_load_cleanup(image);
1454 image = xchg(dest_image, image);
1456 mutex_unlock(&kexec_mutex);
1461 #endif /* CONFIG_KEXEC_FILE */
1463 void crash_kexec(struct pt_regs *regs)
1465 /* Take the kexec_mutex here to prevent sys_kexec_load
1466 * running on one cpu from replacing the crash kernel
1467 * we are using after a panic on a different cpu.
1469 * If the crash kernel was not located in a fixed area
1470 * of memory the xchg(&kexec_crash_image) would be
1471 * sufficient. But since I reuse the memory...
1473 if (mutex_trylock(&kexec_mutex)) {
1474 if (kexec_crash_image) {
1475 struct pt_regs fixed_regs;
1477 crash_setup_regs(&fixed_regs, regs);
1478 crash_save_vmcoreinfo();
1479 machine_crash_shutdown(&fixed_regs);
1480 machine_kexec(kexec_crash_image);
1482 mutex_unlock(&kexec_mutex);
1486 size_t crash_get_memory_size(void)
1489 mutex_lock(&kexec_mutex);
1490 if (crashk_res.end != crashk_res.start)
1491 size = resource_size(&crashk_res);
1492 mutex_unlock(&kexec_mutex);
1496 void __weak crash_free_reserved_phys_range(unsigned long begin,
1501 for (addr = begin; addr < end; addr += PAGE_SIZE)
1502 free_reserved_page(pfn_to_page(addr >> PAGE_SHIFT));
1505 int crash_shrink_memory(unsigned long new_size)
1508 unsigned long start, end;
1509 unsigned long old_size;
1510 struct resource *ram_res;
1512 mutex_lock(&kexec_mutex);
1514 if (kexec_crash_image) {
1518 start = crashk_res.start;
1519 end = crashk_res.end;
1520 old_size = (end == 0) ? 0 : end - start + 1;
1521 if (new_size >= old_size) {
1522 ret = (new_size == old_size) ? 0 : -EINVAL;
1526 ram_res = kzalloc(sizeof(*ram_res), GFP_KERNEL);
1532 start = roundup(start, KEXEC_CRASH_MEM_ALIGN);
1533 end = roundup(start + new_size, KEXEC_CRASH_MEM_ALIGN);
1535 crash_map_reserved_pages();
1536 crash_free_reserved_phys_range(end, crashk_res.end);
1538 if ((start == end) && (crashk_res.parent != NULL))
1539 release_resource(&crashk_res);
1541 ram_res->start = end;
1542 ram_res->end = crashk_res.end;
1543 ram_res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
1544 ram_res->name = "System RAM";
1546 crashk_res.end = end - 1;
1548 insert_resource(&iomem_resource, ram_res);
1549 crash_unmap_reserved_pages();
1552 mutex_unlock(&kexec_mutex);
1556 static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data,
1559 struct elf_note note;
1561 note.n_namesz = strlen(name) + 1;
1562 note.n_descsz = data_len;
1564 memcpy(buf, ¬e, sizeof(note));
1565 buf += (sizeof(note) + 3)/4;
1566 memcpy(buf, name, note.n_namesz);
1567 buf += (note.n_namesz + 3)/4;
1568 memcpy(buf, data, note.n_descsz);
1569 buf += (note.n_descsz + 3)/4;
1574 static void final_note(u32 *buf)
1576 struct elf_note note;
1581 memcpy(buf, ¬e, sizeof(note));
1584 void crash_save_cpu(struct pt_regs *regs, int cpu)
1586 struct elf_prstatus prstatus;
1589 if ((cpu < 0) || (cpu >= nr_cpu_ids))
1592 /* Using ELF notes here is opportunistic.
1593 * I need a well defined structure format
1594 * for the data I pass, and I need tags
1595 * on the data to indicate what information I have
1596 * squirrelled away. ELF notes happen to provide
1597 * all of that, so there is no need to invent something new.
1599 buf = (u32 *)per_cpu_ptr(crash_notes, cpu);
1602 memset(&prstatus, 0, sizeof(prstatus));
1603 prstatus.pr_pid = current->pid;
1604 elf_core_copy_kernel_regs(&prstatus.pr_reg, regs);
1605 buf = append_elf_note(buf, KEXEC_CORE_NOTE_NAME, NT_PRSTATUS,
1606 &prstatus, sizeof(prstatus));
1610 static int __init crash_notes_memory_init(void)
1612 /* Allocate memory for saving cpu registers. */
1613 crash_notes = alloc_percpu(note_buf_t);
1615 pr_warn("Kexec: Memory allocation for saving cpu register states failed\n");
1620 subsys_initcall(crash_notes_memory_init);
1624 * parsing the "crashkernel" commandline
1626 * this code is intended to be called from architecture specific code
1631 * This function parses command lines in the format
1633 * crashkernel=ramsize-range:size[,...][@offset]
1635 * The function returns 0 on success and -EINVAL on failure.
1637 static int __init parse_crashkernel_mem(char *cmdline,
1638 unsigned long long system_ram,
1639 unsigned long long *crash_size,
1640 unsigned long long *crash_base)
1642 char *cur = cmdline, *tmp;
1644 /* for each entry of the comma-separated list */
1646 unsigned long long start, end = ULLONG_MAX, size;
1648 /* get the start of the range */
1649 start = memparse(cur, &tmp);
1651 pr_warn("crashkernel: Memory value expected\n");
1656 pr_warn("crashkernel: '-' expected\n");
1661 /* if no ':' is here, than we read the end */
1663 end = memparse(cur, &tmp);
1665 pr_warn("crashkernel: Memory value expected\n");
1670 pr_warn("crashkernel: end <= start\n");
1676 pr_warn("crashkernel: ':' expected\n");
1681 size = memparse(cur, &tmp);
1683 pr_warn("Memory value expected\n");
1687 if (size >= system_ram) {
1688 pr_warn("crashkernel: invalid size\n");
1693 if (system_ram >= start && system_ram < end) {
1697 } while (*cur++ == ',');
1699 if (*crash_size > 0) {
1700 while (*cur && *cur != ' ' && *cur != '@')
1704 *crash_base = memparse(cur, &tmp);
1706 pr_warn("Memory value expected after '@'\n");
1716 * That function parses "simple" (old) crashkernel command lines like
1718 * crashkernel=size[@offset]
1720 * It returns 0 on success and -EINVAL on failure.
1722 static int __init parse_crashkernel_simple(char *cmdline,
1723 unsigned long long *crash_size,
1724 unsigned long long *crash_base)
1726 char *cur = cmdline;
1728 *crash_size = memparse(cmdline, &cur);
1729 if (cmdline == cur) {
1730 pr_warn("crashkernel: memory value expected\n");
1735 *crash_base = memparse(cur+1, &cur);
1736 else if (*cur != ' ' && *cur != '\0') {
1737 pr_warn("crashkernel: unrecognized char\n");
1744 #define SUFFIX_HIGH 0
1745 #define SUFFIX_LOW 1
1746 #define SUFFIX_NULL 2
1747 static __initdata char *suffix_tbl[] = {
1748 [SUFFIX_HIGH] = ",high",
1749 [SUFFIX_LOW] = ",low",
1750 [SUFFIX_NULL] = NULL,
1754 * That function parses "suffix" crashkernel command lines like
1756 * crashkernel=size,[high|low]
1758 * It returns 0 on success and -EINVAL on failure.
1760 static int __init parse_crashkernel_suffix(char *cmdline,
1761 unsigned long long *crash_size,
1762 unsigned long long *crash_base,
1765 char *cur = cmdline;
1767 *crash_size = memparse(cmdline, &cur);
1768 if (cmdline == cur) {
1769 pr_warn("crashkernel: memory value expected\n");
1773 /* check with suffix */
1774 if (strncmp(cur, suffix, strlen(suffix))) {
1775 pr_warn("crashkernel: unrecognized char\n");
1778 cur += strlen(suffix);
1779 if (*cur != ' ' && *cur != '\0') {
1780 pr_warn("crashkernel: unrecognized char\n");
1787 static __init char *get_last_crashkernel(char *cmdline,
1791 char *p = cmdline, *ck_cmdline = NULL;
1793 /* find crashkernel and use the last one if there are more */
1794 p = strstr(p, name);
1796 char *end_p = strchr(p, ' ');
1800 end_p = p + strlen(p);
1805 /* skip the one with any known suffix */
1806 for (i = 0; suffix_tbl[i]; i++) {
1807 q = end_p - strlen(suffix_tbl[i]);
1808 if (!strncmp(q, suffix_tbl[i],
1809 strlen(suffix_tbl[i])))
1814 q = end_p - strlen(suffix);
1815 if (!strncmp(q, suffix, strlen(suffix)))
1819 p = strstr(p+1, name);
1828 static int __init __parse_crashkernel(char *cmdline,
1829 unsigned long long system_ram,
1830 unsigned long long *crash_size,
1831 unsigned long long *crash_base,
1835 char *first_colon, *first_space;
1838 BUG_ON(!crash_size || !crash_base);
1842 ck_cmdline = get_last_crashkernel(cmdline, name, suffix);
1847 ck_cmdline += strlen(name);
1850 return parse_crashkernel_suffix(ck_cmdline, crash_size,
1851 crash_base, suffix);
1853 * if the commandline contains a ':', then that's the extended
1854 * syntax -- if not, it must be the classic syntax
1856 first_colon = strchr(ck_cmdline, ':');
1857 first_space = strchr(ck_cmdline, ' ');
1858 if (first_colon && (!first_space || first_colon < first_space))
1859 return parse_crashkernel_mem(ck_cmdline, system_ram,
1860 crash_size, crash_base);
1862 return parse_crashkernel_simple(ck_cmdline, crash_size, crash_base);
1866 * That function is the entry point for command line parsing and should be
1867 * called from the arch-specific code.
1869 int __init parse_crashkernel(char *cmdline,
1870 unsigned long long system_ram,
1871 unsigned long long *crash_size,
1872 unsigned long long *crash_base)
1874 return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base,
1875 "crashkernel=", NULL);
1878 int __init parse_crashkernel_high(char *cmdline,
1879 unsigned long long system_ram,
1880 unsigned long long *crash_size,
1881 unsigned long long *crash_base)
1883 return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base,
1884 "crashkernel=", suffix_tbl[SUFFIX_HIGH]);
1887 int __init parse_crashkernel_low(char *cmdline,
1888 unsigned long long system_ram,
1889 unsigned long long *crash_size,
1890 unsigned long long *crash_base)
1892 return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base,
1893 "crashkernel=", suffix_tbl[SUFFIX_LOW]);
1896 static void update_vmcoreinfo_note(void)
1898 u32 *buf = vmcoreinfo_note;
1900 if (!vmcoreinfo_size)
1902 buf = append_elf_note(buf, VMCOREINFO_NOTE_NAME, 0, vmcoreinfo_data,
1907 void crash_save_vmcoreinfo(void)
1909 vmcoreinfo_append_str("CRASHTIME=%ld\n", get_seconds());
1910 update_vmcoreinfo_note();
1913 void vmcoreinfo_append_str(const char *fmt, ...)
1919 va_start(args, fmt);
1920 r = vscnprintf(buf, sizeof(buf), fmt, args);
1923 r = min(r, vmcoreinfo_max_size - vmcoreinfo_size);
1925 memcpy(&vmcoreinfo_data[vmcoreinfo_size], buf, r);
1927 vmcoreinfo_size += r;
1931 * provide an empty default implementation here -- architecture
1932 * code may override this
1934 void __weak arch_crash_save_vmcoreinfo(void)
1937 unsigned long __weak paddr_vmcoreinfo_note(void)
1939 return __pa((unsigned long)(char *)&vmcoreinfo_note);
1942 static int __init crash_save_vmcoreinfo_init(void)
1944 VMCOREINFO_OSRELEASE(init_uts_ns.name.release);
1945 VMCOREINFO_PAGESIZE(PAGE_SIZE);
1947 VMCOREINFO_SYMBOL(init_uts_ns);
1948 VMCOREINFO_SYMBOL(node_online_map);
1950 VMCOREINFO_SYMBOL(swapper_pg_dir);
1952 VMCOREINFO_SYMBOL(_stext);
1953 VMCOREINFO_SYMBOL(vmap_area_list);
1955 #ifndef CONFIG_NEED_MULTIPLE_NODES
1956 VMCOREINFO_SYMBOL(mem_map);
1957 VMCOREINFO_SYMBOL(contig_page_data);
1959 #ifdef CONFIG_SPARSEMEM
1960 VMCOREINFO_SYMBOL(mem_section);
1961 VMCOREINFO_LENGTH(mem_section, NR_SECTION_ROOTS);
1962 VMCOREINFO_STRUCT_SIZE(mem_section);
1963 VMCOREINFO_OFFSET(mem_section, section_mem_map);
1965 VMCOREINFO_STRUCT_SIZE(page);
1966 VMCOREINFO_STRUCT_SIZE(pglist_data);
1967 VMCOREINFO_STRUCT_SIZE(zone);
1968 VMCOREINFO_STRUCT_SIZE(free_area);
1969 VMCOREINFO_STRUCT_SIZE(list_head);
1970 VMCOREINFO_SIZE(nodemask_t);
1971 VMCOREINFO_OFFSET(page, flags);
1972 VMCOREINFO_OFFSET(page, _count);
1973 VMCOREINFO_OFFSET(page, mapping);
1974 VMCOREINFO_OFFSET(page, lru);
1975 VMCOREINFO_OFFSET(page, _mapcount);
1976 VMCOREINFO_OFFSET(page, private);
1977 VMCOREINFO_OFFSET(pglist_data, node_zones);
1978 VMCOREINFO_OFFSET(pglist_data, nr_zones);
1979 #ifdef CONFIG_FLAT_NODE_MEM_MAP
1980 VMCOREINFO_OFFSET(pglist_data, node_mem_map);
1982 VMCOREINFO_OFFSET(pglist_data, node_start_pfn);
1983 VMCOREINFO_OFFSET(pglist_data, node_spanned_pages);
1984 VMCOREINFO_OFFSET(pglist_data, node_id);
1985 VMCOREINFO_OFFSET(zone, free_area);
1986 VMCOREINFO_OFFSET(zone, vm_stat);
1987 VMCOREINFO_OFFSET(zone, spanned_pages);
1988 VMCOREINFO_OFFSET(free_area, free_list);
1989 VMCOREINFO_OFFSET(list_head, next);
1990 VMCOREINFO_OFFSET(list_head, prev);
1991 VMCOREINFO_OFFSET(vmap_area, va_start);
1992 VMCOREINFO_OFFSET(vmap_area, list);
1993 VMCOREINFO_LENGTH(zone.free_area, MAX_ORDER);
1994 log_buf_kexec_setup();
1995 VMCOREINFO_LENGTH(free_area.free_list, MIGRATE_TYPES);
1996 VMCOREINFO_NUMBER(NR_FREE_PAGES);
1997 VMCOREINFO_NUMBER(PG_lru);
1998 VMCOREINFO_NUMBER(PG_private);
1999 VMCOREINFO_NUMBER(PG_swapcache);
2000 VMCOREINFO_NUMBER(PG_slab);
2001 #ifdef CONFIG_MEMORY_FAILURE
2002 VMCOREINFO_NUMBER(PG_hwpoison);
2004 VMCOREINFO_NUMBER(PG_head_mask);
2005 VMCOREINFO_NUMBER(PAGE_BUDDY_MAPCOUNT_VALUE);
2006 #ifdef CONFIG_HUGETLBFS
2007 VMCOREINFO_SYMBOL(free_huge_page);
2010 arch_crash_save_vmcoreinfo();
2011 update_vmcoreinfo_note();
2016 subsys_initcall(crash_save_vmcoreinfo_init);
2018 #ifdef CONFIG_KEXEC_FILE
2019 static int __kexec_add_segment(struct kimage *image, char *buf,
2020 unsigned long bufsz, unsigned long mem,
2021 unsigned long memsz)
2023 struct kexec_segment *ksegment;
2025 ksegment = &image->segment[image->nr_segments];
2026 ksegment->kbuf = buf;
2027 ksegment->bufsz = bufsz;
2028 ksegment->mem = mem;
2029 ksegment->memsz = memsz;
2030 image->nr_segments++;
2035 static int locate_mem_hole_top_down(unsigned long start, unsigned long end,
2036 struct kexec_buf *kbuf)
2038 struct kimage *image = kbuf->image;
2039 unsigned long temp_start, temp_end;
2041 temp_end = min(end, kbuf->buf_max);
2042 temp_start = temp_end - kbuf->memsz;
2045 /* align down start */
2046 temp_start = temp_start & (~(kbuf->buf_align - 1));
2048 if (temp_start < start || temp_start < kbuf->buf_min)
2051 temp_end = temp_start + kbuf->memsz - 1;
2054 * Make sure this does not conflict with any of existing
2057 if (kimage_is_destination_range(image, temp_start, temp_end)) {
2058 temp_start = temp_start - PAGE_SIZE;
2062 /* We found a suitable memory range */
2066 /* If we are here, we found a suitable memory range */
2067 __kexec_add_segment(image, kbuf->buffer, kbuf->bufsz, temp_start,
2070 /* Success, stop navigating through remaining System RAM ranges */
2074 static int locate_mem_hole_bottom_up(unsigned long start, unsigned long end,
2075 struct kexec_buf *kbuf)
2077 struct kimage *image = kbuf->image;
2078 unsigned long temp_start, temp_end;
2080 temp_start = max(start, kbuf->buf_min);
2083 temp_start = ALIGN(temp_start, kbuf->buf_align);
2084 temp_end = temp_start + kbuf->memsz - 1;
2086 if (temp_end > end || temp_end > kbuf->buf_max)
2089 * Make sure this does not conflict with any of existing
2092 if (kimage_is_destination_range(image, temp_start, temp_end)) {
2093 temp_start = temp_start + PAGE_SIZE;
2097 /* We found a suitable memory range */
2101 /* If we are here, we found a suitable memory range */
2102 __kexec_add_segment(image, kbuf->buffer, kbuf->bufsz, temp_start,
2105 /* Success, stop navigating through remaining System RAM ranges */
2109 static int locate_mem_hole_callback(u64 start, u64 end, void *arg)
2111 struct kexec_buf *kbuf = (struct kexec_buf *)arg;
2112 unsigned long sz = end - start + 1;
2114 /* Returning 0 will take to next memory range */
2115 if (sz < kbuf->memsz)
2118 if (end < kbuf->buf_min || start > kbuf->buf_max)
2122 * Allocate memory top down with-in ram range. Otherwise bottom up
2126 return locate_mem_hole_top_down(start, end, kbuf);
2127 return locate_mem_hole_bottom_up(start, end, kbuf);
2131 * Helper function for placing a buffer in a kexec segment. This assumes
2132 * that kexec_mutex is held.
2134 int kexec_add_buffer(struct kimage *image, char *buffer, unsigned long bufsz,
2135 unsigned long memsz, unsigned long buf_align,
2136 unsigned long buf_min, unsigned long buf_max,
2137 bool top_down, unsigned long *load_addr)
2140 struct kexec_segment *ksegment;
2141 struct kexec_buf buf, *kbuf;
2144 /* Currently adding segment this way is allowed only in file mode */
2145 if (!image->file_mode)
2148 if (image->nr_segments >= KEXEC_SEGMENT_MAX)
2152 * Make sure we are not trying to add buffer after allocating
2153 * control pages. All segments need to be placed first before
2154 * any control pages are allocated. As control page allocation
2155 * logic goes through list of segments to make sure there are
2156 * no destination overlaps.
2158 if (!list_empty(&image->control_pages)) {
2163 memset(&buf, 0, sizeof(struct kexec_buf));
2165 kbuf->image = image;
2166 kbuf->buffer = buffer;
2167 kbuf->bufsz = bufsz;
2169 kbuf->memsz = ALIGN(memsz, PAGE_SIZE);
2170 kbuf->buf_align = max(buf_align, PAGE_SIZE);
2171 kbuf->buf_min = buf_min;
2172 kbuf->buf_max = buf_max;
2173 kbuf->top_down = top_down;
2175 /* Walk the RAM ranges and allocate a suitable range for the buffer */
2176 if (image->type == KEXEC_TYPE_CRASH)
2177 ret = walk_iomem_res("Crash kernel",
2178 IORESOURCE_MEM | IORESOURCE_BUSY,
2179 crashk_res.start, crashk_res.end, kbuf,
2180 locate_mem_hole_callback);
2182 ret = walk_system_ram_res(0, -1, kbuf,
2183 locate_mem_hole_callback);
2185 /* A suitable memory range could not be found for buffer */
2186 return -EADDRNOTAVAIL;
2189 /* Found a suitable memory range */
2190 ksegment = &image->segment[image->nr_segments - 1];
2191 *load_addr = ksegment->mem;
2195 /* Calculate and store the digest of segments */
2196 static int kexec_calculate_store_digests(struct kimage *image)
2198 struct crypto_shash *tfm;
2199 struct shash_desc *desc;
2200 int ret = 0, i, j, zero_buf_sz, sha_region_sz;
2201 size_t desc_size, nullsz;
2204 struct kexec_sha_region *sha_regions;
2205 struct purgatory_info *pi = &image->purgatory_info;
2207 zero_buf = __va(page_to_pfn(ZERO_PAGE(0)) << PAGE_SHIFT);
2208 zero_buf_sz = PAGE_SIZE;
2210 tfm = crypto_alloc_shash("sha256", 0, 0);
2216 desc_size = crypto_shash_descsize(tfm) + sizeof(*desc);
2217 desc = kzalloc(desc_size, GFP_KERNEL);
2223 sha_region_sz = KEXEC_SEGMENT_MAX * sizeof(struct kexec_sha_region);
2224 sha_regions = vzalloc(sha_region_sz);
2231 ret = crypto_shash_init(desc);
2233 goto out_free_sha_regions;
2235 digest = kzalloc(SHA256_DIGEST_SIZE, GFP_KERNEL);
2238 goto out_free_sha_regions;
2241 for (j = i = 0; i < image->nr_segments; i++) {
2242 struct kexec_segment *ksegment;
2244 ksegment = &image->segment[i];
2246 * Skip purgatory as it will be modified once we put digest
2247 * info in purgatory.
2249 if (ksegment->kbuf == pi->purgatory_buf)
2252 ret = crypto_shash_update(desc, ksegment->kbuf,
2258 * Assume rest of the buffer is filled with zero and
2259 * update digest accordingly.
2261 nullsz = ksegment->memsz - ksegment->bufsz;
2263 unsigned long bytes = nullsz;
2265 if (bytes > zero_buf_sz)
2266 bytes = zero_buf_sz;
2267 ret = crypto_shash_update(desc, zero_buf, bytes);
2276 sha_regions[j].start = ksegment->mem;
2277 sha_regions[j].len = ksegment->memsz;
2282 ret = crypto_shash_final(desc, digest);
2284 goto out_free_digest;
2285 ret = kexec_purgatory_get_set_symbol(image, "sha_regions",
2286 sha_regions, sha_region_sz, 0);
2288 goto out_free_digest;
2290 ret = kexec_purgatory_get_set_symbol(image, "sha256_digest",
2291 digest, SHA256_DIGEST_SIZE, 0);
2293 goto out_free_digest;
2298 out_free_sha_regions:
2308 /* Actually load purgatory. Lot of code taken from kexec-tools */
2309 static int __kexec_load_purgatory(struct kimage *image, unsigned long min,
2310 unsigned long max, int top_down)
2312 struct purgatory_info *pi = &image->purgatory_info;
2313 unsigned long align, buf_align, bss_align, buf_sz, bss_sz, bss_pad;
2314 unsigned long memsz, entry, load_addr, curr_load_addr, bss_addr, offset;
2315 unsigned char *buf_addr, *src;
2316 int i, ret = 0, entry_sidx = -1;
2317 const Elf_Shdr *sechdrs_c;
2318 Elf_Shdr *sechdrs = NULL;
2319 void *purgatory_buf = NULL;
2322 * sechdrs_c points to section headers in purgatory and are read
2323 * only. No modifications allowed.
2325 sechdrs_c = (void *)pi->ehdr + pi->ehdr->e_shoff;
2328 * We can not modify sechdrs_c[] and its fields. It is read only.
2329 * Copy it over to a local copy where one can store some temporary
2330 * data and free it at the end. We need to modify ->sh_addr and
2331 * ->sh_offset fields to keep track of permanent and temporary
2332 * locations of sections.
2334 sechdrs = vzalloc(pi->ehdr->e_shnum * sizeof(Elf_Shdr));
2338 memcpy(sechdrs, sechdrs_c, pi->ehdr->e_shnum * sizeof(Elf_Shdr));
2341 * We seem to have multiple copies of sections. First copy is which
2342 * is embedded in kernel in read only section. Some of these sections
2343 * will be copied to a temporary buffer and relocated. And these
2344 * sections will finally be copied to their final destination at
2345 * segment load time.
2347 * Use ->sh_offset to reflect section address in memory. It will
2348 * point to original read only copy if section is not allocatable.
2349 * Otherwise it will point to temporary copy which will be relocated.
2351 * Use ->sh_addr to contain final address of the section where it
2352 * will go during execution time.
2354 for (i = 0; i < pi->ehdr->e_shnum; i++) {
2355 if (sechdrs[i].sh_type == SHT_NOBITS)
2358 sechdrs[i].sh_offset = (unsigned long)pi->ehdr +
2359 sechdrs[i].sh_offset;
2363 * Identify entry point section and make entry relative to section
2366 entry = pi->ehdr->e_entry;
2367 for (i = 0; i < pi->ehdr->e_shnum; i++) {
2368 if (!(sechdrs[i].sh_flags & SHF_ALLOC))
2371 if (!(sechdrs[i].sh_flags & SHF_EXECINSTR))
2374 /* Make entry section relative */
2375 if (sechdrs[i].sh_addr <= pi->ehdr->e_entry &&
2376 ((sechdrs[i].sh_addr + sechdrs[i].sh_size) >
2377 pi->ehdr->e_entry)) {
2379 entry -= sechdrs[i].sh_addr;
2384 /* Determine how much memory is needed to load relocatable object. */
2390 for (i = 0; i < pi->ehdr->e_shnum; i++) {
2391 if (!(sechdrs[i].sh_flags & SHF_ALLOC))
2394 align = sechdrs[i].sh_addralign;
2395 if (sechdrs[i].sh_type != SHT_NOBITS) {
2396 if (buf_align < align)
2398 buf_sz = ALIGN(buf_sz, align);
2399 buf_sz += sechdrs[i].sh_size;
2402 if (bss_align < align)
2404 bss_sz = ALIGN(bss_sz, align);
2405 bss_sz += sechdrs[i].sh_size;
2409 /* Determine the bss padding required to align bss properly */
2411 if (buf_sz & (bss_align - 1))
2412 bss_pad = bss_align - (buf_sz & (bss_align - 1));
2414 memsz = buf_sz + bss_pad + bss_sz;
2416 /* Allocate buffer for purgatory */
2417 purgatory_buf = vzalloc(buf_sz);
2418 if (!purgatory_buf) {
2423 if (buf_align < bss_align)
2424 buf_align = bss_align;
2426 /* Add buffer to segment list */
2427 ret = kexec_add_buffer(image, purgatory_buf, buf_sz, memsz,
2428 buf_align, min, max, top_down,
2429 &pi->purgatory_load_addr);
2433 /* Load SHF_ALLOC sections */
2434 buf_addr = purgatory_buf;
2435 load_addr = curr_load_addr = pi->purgatory_load_addr;
2436 bss_addr = load_addr + buf_sz + bss_pad;
2438 for (i = 0; i < pi->ehdr->e_shnum; i++) {
2439 if (!(sechdrs[i].sh_flags & SHF_ALLOC))
2442 align = sechdrs[i].sh_addralign;
2443 if (sechdrs[i].sh_type != SHT_NOBITS) {
2444 curr_load_addr = ALIGN(curr_load_addr, align);
2445 offset = curr_load_addr - load_addr;
2446 /* We already modifed ->sh_offset to keep src addr */
2447 src = (char *) sechdrs[i].sh_offset;
2448 memcpy(buf_addr + offset, src, sechdrs[i].sh_size);
2450 /* Store load address and source address of section */
2451 sechdrs[i].sh_addr = curr_load_addr;
2454 * This section got copied to temporary buffer. Update
2455 * ->sh_offset accordingly.
2457 sechdrs[i].sh_offset = (unsigned long)(buf_addr + offset);
2459 /* Advance to the next address */
2460 curr_load_addr += sechdrs[i].sh_size;
2462 bss_addr = ALIGN(bss_addr, align);
2463 sechdrs[i].sh_addr = bss_addr;
2464 bss_addr += sechdrs[i].sh_size;
2468 /* Update entry point based on load address of text section */
2469 if (entry_sidx >= 0)
2470 entry += sechdrs[entry_sidx].sh_addr;
2472 /* Make kernel jump to purgatory after shutdown */
2473 image->start = entry;
2475 /* Used later to get/set symbol values */
2476 pi->sechdrs = sechdrs;
2479 * Used later to identify which section is purgatory and skip it
2480 * from checksumming.
2482 pi->purgatory_buf = purgatory_buf;
2486 vfree(purgatory_buf);
2490 static int kexec_apply_relocations(struct kimage *image)
2493 struct purgatory_info *pi = &image->purgatory_info;
2494 Elf_Shdr *sechdrs = pi->sechdrs;
2496 /* Apply relocations */
2497 for (i = 0; i < pi->ehdr->e_shnum; i++) {
2498 Elf_Shdr *section, *symtab;
2500 if (sechdrs[i].sh_type != SHT_RELA &&
2501 sechdrs[i].sh_type != SHT_REL)
2505 * For section of type SHT_RELA/SHT_REL,
2506 * ->sh_link contains section header index of associated
2507 * symbol table. And ->sh_info contains section header
2508 * index of section to which relocations apply.
2510 if (sechdrs[i].sh_info >= pi->ehdr->e_shnum ||
2511 sechdrs[i].sh_link >= pi->ehdr->e_shnum)
2514 section = &sechdrs[sechdrs[i].sh_info];
2515 symtab = &sechdrs[sechdrs[i].sh_link];
2517 if (!(section->sh_flags & SHF_ALLOC))
2521 * symtab->sh_link contain section header index of associated
2524 if (symtab->sh_link >= pi->ehdr->e_shnum)
2525 /* Invalid section number? */
2529 * Respective archicture needs to provide support for applying
2530 * relocations of type SHT_RELA/SHT_REL.
2532 if (sechdrs[i].sh_type == SHT_RELA)
2533 ret = arch_kexec_apply_relocations_add(pi->ehdr,
2535 else if (sechdrs[i].sh_type == SHT_REL)
2536 ret = arch_kexec_apply_relocations(pi->ehdr,
2545 /* Load relocatable purgatory object and relocate it appropriately */
2546 int kexec_load_purgatory(struct kimage *image, unsigned long min,
2547 unsigned long max, int top_down,
2548 unsigned long *load_addr)
2550 struct purgatory_info *pi = &image->purgatory_info;
2553 if (kexec_purgatory_size <= 0)
2556 if (kexec_purgatory_size < sizeof(Elf_Ehdr))
2559 pi->ehdr = (Elf_Ehdr *)kexec_purgatory;
2561 if (memcmp(pi->ehdr->e_ident, ELFMAG, SELFMAG) != 0
2562 || pi->ehdr->e_type != ET_REL
2563 || !elf_check_arch(pi->ehdr)
2564 || pi->ehdr->e_shentsize != sizeof(Elf_Shdr))
2567 if (pi->ehdr->e_shoff >= kexec_purgatory_size
2568 || (pi->ehdr->e_shnum * sizeof(Elf_Shdr) >
2569 kexec_purgatory_size - pi->ehdr->e_shoff))
2572 ret = __kexec_load_purgatory(image, min, max, top_down);
2576 ret = kexec_apply_relocations(image);
2580 *load_addr = pi->purgatory_load_addr;
2584 vfree(pi->purgatory_buf);
2588 static Elf_Sym *kexec_purgatory_find_symbol(struct purgatory_info *pi,
2597 if (!pi->sechdrs || !pi->ehdr)
2600 sechdrs = pi->sechdrs;
2603 for (i = 0; i < ehdr->e_shnum; i++) {
2604 if (sechdrs[i].sh_type != SHT_SYMTAB)
2607 if (sechdrs[i].sh_link >= ehdr->e_shnum)
2608 /* Invalid strtab section number */
2610 strtab = (char *)sechdrs[sechdrs[i].sh_link].sh_offset;
2611 syms = (Elf_Sym *)sechdrs[i].sh_offset;
2613 /* Go through symbols for a match */
2614 for (k = 0; k < sechdrs[i].sh_size/sizeof(Elf_Sym); k++) {
2615 if (ELF_ST_BIND(syms[k].st_info) != STB_GLOBAL)
2618 if (strcmp(strtab + syms[k].st_name, name) != 0)
2621 if (syms[k].st_shndx == SHN_UNDEF ||
2622 syms[k].st_shndx >= ehdr->e_shnum) {
2623 pr_debug("Symbol: %s has bad section index %d.\n",
2624 name, syms[k].st_shndx);
2628 /* Found the symbol we are looking for */
2636 void *kexec_purgatory_get_symbol_addr(struct kimage *image, const char *name)
2638 struct purgatory_info *pi = &image->purgatory_info;
2642 sym = kexec_purgatory_find_symbol(pi, name);
2644 return ERR_PTR(-EINVAL);
2646 sechdr = &pi->sechdrs[sym->st_shndx];
2649 * Returns the address where symbol will finally be loaded after
2650 * kexec_load_segment()
2652 return (void *)(sechdr->sh_addr + sym->st_value);
2656 * Get or set value of a symbol. If "get_value" is true, symbol value is
2657 * returned in buf otherwise symbol value is set based on value in buf.
2659 int kexec_purgatory_get_set_symbol(struct kimage *image, const char *name,
2660 void *buf, unsigned int size, bool get_value)
2664 struct purgatory_info *pi = &image->purgatory_info;
2667 sym = kexec_purgatory_find_symbol(pi, name);
2671 if (sym->st_size != size) {
2672 pr_err("symbol %s size mismatch: expected %lu actual %u\n",
2673 name, (unsigned long)sym->st_size, size);
2677 sechdrs = pi->sechdrs;
2679 if (sechdrs[sym->st_shndx].sh_type == SHT_NOBITS) {
2680 pr_err("symbol %s is in a bss section. Cannot %s\n", name,
2681 get_value ? "get" : "set");
2685 sym_buf = (unsigned char *)sechdrs[sym->st_shndx].sh_offset +
2689 memcpy((void *)buf, sym_buf, size);
2691 memcpy((void *)sym_buf, buf, size);
2695 #endif /* CONFIG_KEXEC_FILE */
2698 * Move into place and start executing a preloaded standalone
2699 * executable. If nothing was preloaded return an error.
2701 int kernel_kexec(void)
2705 if (!mutex_trylock(&kexec_mutex))
2712 #ifdef CONFIG_KEXEC_JUMP
2713 if (kexec_image->preserve_context) {
2714 lock_system_sleep();
2715 pm_prepare_console();
2716 error = freeze_processes();
2719 goto Restore_console;
2722 error = dpm_suspend_start(PMSG_FREEZE);
2724 goto Resume_console;
2725 /* At this point, dpm_suspend_start() has been called,
2726 * but *not* dpm_suspend_end(). We *must* call
2727 * dpm_suspend_end() now. Otherwise, drivers for
2728 * some devices (e.g. interrupt controllers) become
2729 * desynchronized with the actual state of the
2730 * hardware at resume time, and evil weirdness ensues.
2732 error = dpm_suspend_end(PMSG_FREEZE);
2734 goto Resume_devices;
2735 error = disable_nonboot_cpus();
2738 local_irq_disable();
2739 error = syscore_suspend();
2745 kexec_in_progress = true;
2746 kernel_restart_prepare(NULL);
2747 migrate_to_reboot_cpu();
2750 * migrate_to_reboot_cpu() disables CPU hotplug assuming that
2751 * no further code needs to use CPU hotplug (which is true in
2752 * the reboot case). However, the kexec path depends on using
2753 * CPU hotplug again; so re-enable it here.
2755 cpu_hotplug_enable();
2756 pr_emerg("Starting new kernel\n");
2760 machine_kexec(kexec_image);
2762 #ifdef CONFIG_KEXEC_JUMP
2763 if (kexec_image->preserve_context) {
2768 enable_nonboot_cpus();
2769 dpm_resume_start(PMSG_RESTORE);
2771 dpm_resume_end(PMSG_RESTORE);
2776 pm_restore_console();
2777 unlock_system_sleep();
2782 mutex_unlock(&kexec_mutex);