2 * kexec.c - kexec system call
5 * This source code is licensed under the GNU General Public License,
6 * Version 2. See the file COPYING for more details.
9 #include <linux/capability.h>
11 #include <linux/file.h>
12 #include <linux/slab.h>
14 #include <linux/kexec.h>
15 #include <linux/mutex.h>
16 #include <linux/list.h>
17 #include <linux/highmem.h>
18 #include <linux/syscalls.h>
19 #include <linux/reboot.h>
20 #include <linux/ioport.h>
21 #include <linux/hardirq.h>
22 #include <linux/elf.h>
23 #include <linux/elfcore.h>
24 #include <linux/utsname.h>
25 #include <linux/numa.h>
26 #include <linux/suspend.h>
27 #include <linux/device.h>
28 #include <linux/freezer.h>
30 #include <linux/cpu.h>
31 #include <linux/console.h>
32 #include <linux/vmalloc.h>
33 #include <linux/swap.h>
34 #include <linux/syscore_ops.h>
37 #include <asm/uaccess.h>
39 #include <asm/sections.h>
41 /* Per cpu memory for storing cpu states in case of system crash. */
42 note_buf_t __percpu *crash_notes;
44 /* vmcoreinfo stuff */
45 static unsigned char vmcoreinfo_data[VMCOREINFO_BYTES];
46 u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4];
47 size_t vmcoreinfo_size;
48 size_t vmcoreinfo_max_size = sizeof(vmcoreinfo_data);
50 /* Location of the reserved area for the crash kernel */
51 struct resource crashk_res = {
52 .name = "Crash kernel",
55 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
57 struct resource crashk_low_res = {
58 .name = "Crash kernel",
61 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
64 int kexec_should_crash(struct task_struct *p)
66 if (in_interrupt() || !p->pid || is_global_init(p) || panic_on_oops)
72 * When kexec transitions to the new kernel there is a one-to-one
73 * mapping between physical and virtual addresses. On processors
74 * where you can disable the MMU this is trivial, and easy. For
75 * others it is still a simple predictable page table to setup.
77 * In that environment kexec copies the new kernel to its final
78 * resting place. This means I can only support memory whose
79 * physical address can fit in an unsigned long. In particular
80 * addresses where (pfn << PAGE_SHIFT) > ULONG_MAX cannot be handled.
81 * If the assembly stub has more restrictive requirements
82 * KEXEC_SOURCE_MEMORY_LIMIT and KEXEC_DEST_MEMORY_LIMIT can be
83 * defined more restrictively in <asm/kexec.h>.
85 * The code for the transition from the current kernel to the
86 * the new kernel is placed in the control_code_buffer, whose size
87 * is given by KEXEC_CONTROL_PAGE_SIZE. In the best case only a single
88 * page of memory is necessary, but some architectures require more.
89 * Because this memory must be identity mapped in the transition from
90 * virtual to physical addresses it must live in the range
91 * 0 - TASK_SIZE, as only the user space mappings are arbitrarily
94 * The assembly stub in the control code buffer is passed a linked list
95 * of descriptor pages detailing the source pages of the new kernel,
96 * and the destination addresses of those source pages. As this data
97 * structure is not used in the context of the current OS, it must
100 * The code has been made to work with highmem pages and will use a
101 * destination page in its final resting place (if it happens
102 * to allocate it). The end product of this is that most of the
103 * physical address space, and most of RAM can be used.
105 * Future directions include:
106 * - allocating a page table with the control code buffer identity
107 * mapped, to simplify machine_kexec and make kexec_on_panic more
112 * KIMAGE_NO_DEST is an impossible destination address..., for
113 * allocating pages whose destination address we do not care about.
115 #define KIMAGE_NO_DEST (-1UL)
117 static int kimage_is_destination_range(struct kimage *image,
118 unsigned long start, unsigned long end);
119 static struct page *kimage_alloc_page(struct kimage *image,
123 static int do_kimage_alloc(struct kimage **rimage, unsigned long entry,
124 unsigned long nr_segments,
125 struct kexec_segment __user *segments)
127 size_t segment_bytes;
128 struct kimage *image;
132 /* Allocate a controlling structure */
134 image = kzalloc(sizeof(*image), GFP_KERNEL);
139 image->entry = &image->head;
140 image->last_entry = &image->head;
141 image->control_page = ~0; /* By default this does not apply */
142 image->start = entry;
143 image->type = KEXEC_TYPE_DEFAULT;
145 /* Initialize the list of control pages */
146 INIT_LIST_HEAD(&image->control_pages);
148 /* Initialize the list of destination pages */
149 INIT_LIST_HEAD(&image->dest_pages);
151 /* Initialize the list of unusable pages */
152 INIT_LIST_HEAD(&image->unuseable_pages);
154 /* Read in the segments */
155 image->nr_segments = nr_segments;
156 segment_bytes = nr_segments * sizeof(*segments);
157 result = copy_from_user(image->segment, segments, segment_bytes);
164 * Verify we have good destination addresses. The caller is
165 * responsible for making certain we don't attempt to load
166 * the new image into invalid or reserved areas of RAM. This
167 * just verifies it is an address we can use.
169 * Since the kernel does everything in page size chunks ensure
170 * the destination addresses are page aligned. Too many
171 * special cases crop of when we don't do this. The most
172 * insidious is getting overlapping destination addresses
173 * simply because addresses are changed to page size
176 result = -EADDRNOTAVAIL;
177 for (i = 0; i < nr_segments; i++) {
178 unsigned long mstart, mend;
180 mstart = image->segment[i].mem;
181 mend = mstart + image->segment[i].memsz;
182 if ((mstart & ~PAGE_MASK) || (mend & ~PAGE_MASK))
184 if (mend >= KEXEC_DESTINATION_MEMORY_LIMIT)
188 /* Verify our destination addresses do not overlap.
189 * If we alloed overlapping destination addresses
190 * through very weird things can happen with no
191 * easy explanation as one segment stops on another.
194 for (i = 0; i < nr_segments; i++) {
195 unsigned long mstart, mend;
198 mstart = image->segment[i].mem;
199 mend = mstart + image->segment[i].memsz;
200 for (j = 0; j < i; j++) {
201 unsigned long pstart, pend;
202 pstart = image->segment[j].mem;
203 pend = pstart + image->segment[j].memsz;
204 /* Do the segments overlap ? */
205 if ((mend > pstart) && (mstart < pend))
210 /* Ensure our buffer sizes are strictly less than
211 * our memory sizes. This should always be the case,
212 * and it is easier to check up front than to be surprised
216 for (i = 0; i < nr_segments; i++) {
217 if (image->segment[i].bufsz > image->segment[i].memsz)
232 static void kimage_free_page_list(struct list_head *list);
234 static int kimage_normal_alloc(struct kimage **rimage, unsigned long entry,
235 unsigned long nr_segments,
236 struct kexec_segment __user *segments)
239 struct kimage *image;
241 /* Allocate and initialize a controlling structure */
243 result = do_kimage_alloc(&image, entry, nr_segments, segments);
248 * Find a location for the control code buffer, and add it
249 * the vector of segments so that it's pages will also be
250 * counted as destination pages.
253 image->control_code_page = kimage_alloc_control_pages(image,
254 get_order(KEXEC_CONTROL_PAGE_SIZE));
255 if (!image->control_code_page) {
256 printk(KERN_ERR "Could not allocate control_code_buffer\n");
260 image->swap_page = kimage_alloc_control_pages(image, 0);
261 if (!image->swap_page) {
262 printk(KERN_ERR "Could not allocate swap buffer\n");
270 kimage_free_page_list(&image->control_pages);
276 static int kimage_crash_alloc(struct kimage **rimage, unsigned long entry,
277 unsigned long nr_segments,
278 struct kexec_segment __user *segments)
281 struct kimage *image;
285 /* Verify we have a valid entry point */
286 if ((entry < crashk_res.start) || (entry > crashk_res.end)) {
287 result = -EADDRNOTAVAIL;
291 /* Allocate and initialize a controlling structure */
292 result = do_kimage_alloc(&image, entry, nr_segments, segments);
296 /* Enable the special crash kernel control page
299 image->control_page = crashk_res.start;
300 image->type = KEXEC_TYPE_CRASH;
303 * Verify we have good destination addresses. Normally
304 * the caller is responsible for making certain we don't
305 * attempt to load the new image into invalid or reserved
306 * areas of RAM. But crash kernels are preloaded into a
307 * reserved area of ram. We must ensure the addresses
308 * are in the reserved area otherwise preloading the
309 * kernel could corrupt things.
311 result = -EADDRNOTAVAIL;
312 for (i = 0; i < nr_segments; i++) {
313 unsigned long mstart, mend;
315 mstart = image->segment[i].mem;
316 mend = mstart + image->segment[i].memsz - 1;
317 /* Ensure we are within the crash kernel limits */
318 if ((mstart < crashk_res.start) || (mend > crashk_res.end))
323 * Find a location for the control code buffer, and add
324 * the vector of segments so that it's pages will also be
325 * counted as destination pages.
328 image->control_code_page = kimage_alloc_control_pages(image,
329 get_order(KEXEC_CONTROL_PAGE_SIZE));
330 if (!image->control_code_page) {
331 printk(KERN_ERR "Could not allocate control_code_buffer\n");
344 static int kimage_is_destination_range(struct kimage *image,
350 for (i = 0; i < image->nr_segments; i++) {
351 unsigned long mstart, mend;
353 mstart = image->segment[i].mem;
354 mend = mstart + image->segment[i].memsz;
355 if ((end > mstart) && (start < mend))
362 static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order)
366 pages = alloc_pages(gfp_mask, order);
368 unsigned int count, i;
369 pages->mapping = NULL;
370 set_page_private(pages, order);
372 for (i = 0; i < count; i++)
373 SetPageReserved(pages + i);
379 static void kimage_free_pages(struct page *page)
381 unsigned int order, count, i;
383 order = page_private(page);
385 for (i = 0; i < count; i++)
386 ClearPageReserved(page + i);
387 __free_pages(page, order);
390 static void kimage_free_page_list(struct list_head *list)
392 struct list_head *pos, *next;
394 list_for_each_safe(pos, next, list) {
397 page = list_entry(pos, struct page, lru);
398 list_del(&page->lru);
399 kimage_free_pages(page);
403 static struct page *kimage_alloc_normal_control_pages(struct kimage *image,
406 /* Control pages are special, they are the intermediaries
407 * that are needed while we copy the rest of the pages
408 * to their final resting place. As such they must
409 * not conflict with either the destination addresses
410 * or memory the kernel is already using.
412 * The only case where we really need more than one of
413 * these are for architectures where we cannot disable
414 * the MMU and must instead generate an identity mapped
415 * page table for all of the memory.
417 * At worst this runs in O(N) of the image size.
419 struct list_head extra_pages;
424 INIT_LIST_HEAD(&extra_pages);
426 /* Loop while I can allocate a page and the page allocated
427 * is a destination page.
430 unsigned long pfn, epfn, addr, eaddr;
432 pages = kimage_alloc_pages(GFP_KERNEL, order);
435 pfn = page_to_pfn(pages);
437 addr = pfn << PAGE_SHIFT;
438 eaddr = epfn << PAGE_SHIFT;
439 if ((epfn >= (KEXEC_CONTROL_MEMORY_LIMIT >> PAGE_SHIFT)) ||
440 kimage_is_destination_range(image, addr, eaddr)) {
441 list_add(&pages->lru, &extra_pages);
447 /* Remember the allocated page... */
448 list_add(&pages->lru, &image->control_pages);
450 /* Because the page is already in it's destination
451 * location we will never allocate another page at
452 * that address. Therefore kimage_alloc_pages
453 * will not return it (again) and we don't need
454 * to give it an entry in image->segment[].
457 /* Deal with the destination pages I have inadvertently allocated.
459 * Ideally I would convert multi-page allocations into single
460 * page allocations, and add everything to image->dest_pages.
462 * For now it is simpler to just free the pages.
464 kimage_free_page_list(&extra_pages);
469 static struct page *kimage_alloc_crash_control_pages(struct kimage *image,
472 /* Control pages are special, they are the intermediaries
473 * that are needed while we copy the rest of the pages
474 * to their final resting place. As such they must
475 * not conflict with either the destination addresses
476 * or memory the kernel is already using.
478 * Control pages are also the only pags we must allocate
479 * when loading a crash kernel. All of the other pages
480 * are specified by the segments and we just memcpy
481 * into them directly.
483 * The only case where we really need more than one of
484 * these are for architectures where we cannot disable
485 * the MMU and must instead generate an identity mapped
486 * page table for all of the memory.
488 * Given the low demand this implements a very simple
489 * allocator that finds the first hole of the appropriate
490 * size in the reserved memory region, and allocates all
491 * of the memory up to and including the hole.
493 unsigned long hole_start, hole_end, size;
497 size = (1 << order) << PAGE_SHIFT;
498 hole_start = (image->control_page + (size - 1)) & ~(size - 1);
499 hole_end = hole_start + size - 1;
500 while (hole_end <= crashk_res.end) {
503 if (hole_end > KEXEC_CRASH_CONTROL_MEMORY_LIMIT)
505 /* See if I overlap any of the segments */
506 for (i = 0; i < image->nr_segments; i++) {
507 unsigned long mstart, mend;
509 mstart = image->segment[i].mem;
510 mend = mstart + image->segment[i].memsz - 1;
511 if ((hole_end >= mstart) && (hole_start <= mend)) {
512 /* Advance the hole to the end of the segment */
513 hole_start = (mend + (size - 1)) & ~(size - 1);
514 hole_end = hole_start + size - 1;
518 /* If I don't overlap any segments I have found my hole! */
519 if (i == image->nr_segments) {
520 pages = pfn_to_page(hole_start >> PAGE_SHIFT);
525 image->control_page = hole_end;
531 struct page *kimage_alloc_control_pages(struct kimage *image,
534 struct page *pages = NULL;
536 switch (image->type) {
537 case KEXEC_TYPE_DEFAULT:
538 pages = kimage_alloc_normal_control_pages(image, order);
540 case KEXEC_TYPE_CRASH:
541 pages = kimage_alloc_crash_control_pages(image, order);
548 static int kimage_add_entry(struct kimage *image, kimage_entry_t entry)
550 if (*image->entry != 0)
553 if (image->entry == image->last_entry) {
554 kimage_entry_t *ind_page;
557 page = kimage_alloc_page(image, GFP_KERNEL, KIMAGE_NO_DEST);
561 ind_page = page_address(page);
562 *image->entry = virt_to_phys(ind_page) | IND_INDIRECTION;
563 image->entry = ind_page;
564 image->last_entry = ind_page +
565 ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1);
567 *image->entry = entry;
574 static int kimage_set_destination(struct kimage *image,
575 unsigned long destination)
579 destination &= PAGE_MASK;
580 result = kimage_add_entry(image, destination | IND_DESTINATION);
582 image->destination = destination;
588 static int kimage_add_page(struct kimage *image, unsigned long page)
593 result = kimage_add_entry(image, page | IND_SOURCE);
595 image->destination += PAGE_SIZE;
601 static void kimage_free_extra_pages(struct kimage *image)
603 /* Walk through and free any extra destination pages I may have */
604 kimage_free_page_list(&image->dest_pages);
606 /* Walk through and free any unusable pages I have cached */
607 kimage_free_page_list(&image->unuseable_pages);
610 static void kimage_terminate(struct kimage *image)
612 if (*image->entry != 0)
615 *image->entry = IND_DONE;
618 #define for_each_kimage_entry(image, ptr, entry) \
619 for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \
620 ptr = (entry & IND_INDIRECTION)? \
621 phys_to_virt((entry & PAGE_MASK)): ptr +1)
623 static void kimage_free_entry(kimage_entry_t entry)
627 page = pfn_to_page(entry >> PAGE_SHIFT);
628 kimage_free_pages(page);
631 static void kimage_free(struct kimage *image)
633 kimage_entry_t *ptr, entry;
634 kimage_entry_t ind = 0;
639 kimage_free_extra_pages(image);
640 for_each_kimage_entry(image, ptr, entry) {
641 if (entry & IND_INDIRECTION) {
642 /* Free the previous indirection page */
643 if (ind & IND_INDIRECTION)
644 kimage_free_entry(ind);
645 /* Save this indirection page until we are
650 else if (entry & IND_SOURCE)
651 kimage_free_entry(entry);
653 /* Free the final indirection page */
654 if (ind & IND_INDIRECTION)
655 kimage_free_entry(ind);
657 /* Handle any machine specific cleanup */
658 machine_kexec_cleanup(image);
660 /* Free the kexec control pages... */
661 kimage_free_page_list(&image->control_pages);
665 static kimage_entry_t *kimage_dst_used(struct kimage *image,
668 kimage_entry_t *ptr, entry;
669 unsigned long destination = 0;
671 for_each_kimage_entry(image, ptr, entry) {
672 if (entry & IND_DESTINATION)
673 destination = entry & PAGE_MASK;
674 else if (entry & IND_SOURCE) {
675 if (page == destination)
677 destination += PAGE_SIZE;
684 static struct page *kimage_alloc_page(struct kimage *image,
686 unsigned long destination)
689 * Here we implement safeguards to ensure that a source page
690 * is not copied to its destination page before the data on
691 * the destination page is no longer useful.
693 * To do this we maintain the invariant that a source page is
694 * either its own destination page, or it is not a
695 * destination page at all.
697 * That is slightly stronger than required, but the proof
698 * that no problems will not occur is trivial, and the
699 * implementation is simply to verify.
701 * When allocating all pages normally this algorithm will run
702 * in O(N) time, but in the worst case it will run in O(N^2)
703 * time. If the runtime is a problem the data structures can
710 * Walk through the list of destination pages, and see if I
713 list_for_each_entry(page, &image->dest_pages, lru) {
714 addr = page_to_pfn(page) << PAGE_SHIFT;
715 if (addr == destination) {
716 list_del(&page->lru);
724 /* Allocate a page, if we run out of memory give up */
725 page = kimage_alloc_pages(gfp_mask, 0);
728 /* If the page cannot be used file it away */
729 if (page_to_pfn(page) >
730 (KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) {
731 list_add(&page->lru, &image->unuseable_pages);
734 addr = page_to_pfn(page) << PAGE_SHIFT;
736 /* If it is the destination page we want use it */
737 if (addr == destination)
740 /* If the page is not a destination page use it */
741 if (!kimage_is_destination_range(image, addr,
746 * I know that the page is someones destination page.
747 * See if there is already a source page for this
748 * destination page. And if so swap the source pages.
750 old = kimage_dst_used(image, addr);
753 unsigned long old_addr;
754 struct page *old_page;
756 old_addr = *old & PAGE_MASK;
757 old_page = pfn_to_page(old_addr >> PAGE_SHIFT);
758 copy_highpage(page, old_page);
759 *old = addr | (*old & ~PAGE_MASK);
761 /* The old page I have found cannot be a
762 * destination page, so return it if it's
763 * gfp_flags honor the ones passed in.
765 if (!(gfp_mask & __GFP_HIGHMEM) &&
766 PageHighMem(old_page)) {
767 kimage_free_pages(old_page);
775 /* Place the page on the destination list I
778 list_add(&page->lru, &image->dest_pages);
785 static int kimage_load_normal_segment(struct kimage *image,
786 struct kexec_segment *segment)
789 unsigned long ubytes, mbytes;
791 unsigned char __user *buf;
795 ubytes = segment->bufsz;
796 mbytes = segment->memsz;
797 maddr = segment->mem;
799 result = kimage_set_destination(image, maddr);
806 size_t uchunk, mchunk;
808 page = kimage_alloc_page(image, GFP_HIGHUSER, maddr);
813 result = kimage_add_page(image, page_to_pfn(page)
819 /* Start with a clear page */
821 ptr += maddr & ~PAGE_MASK;
822 mchunk = PAGE_SIZE - (maddr & ~PAGE_MASK);
830 result = copy_from_user(ptr, buf, uchunk);
845 static int kimage_load_crash_segment(struct kimage *image,
846 struct kexec_segment *segment)
848 /* For crash dumps kernels we simply copy the data from
849 * user space to it's destination.
850 * We do things a page at a time for the sake of kmap.
853 unsigned long ubytes, mbytes;
855 unsigned char __user *buf;
859 ubytes = segment->bufsz;
860 mbytes = segment->memsz;
861 maddr = segment->mem;
865 size_t uchunk, mchunk;
867 page = pfn_to_page(maddr >> PAGE_SHIFT);
873 ptr += maddr & ~PAGE_MASK;
874 mchunk = PAGE_SIZE - (maddr & ~PAGE_MASK);
879 if (uchunk > ubytes) {
881 /* Zero the trailing part of the page */
882 memset(ptr + uchunk, 0, mchunk - uchunk);
884 result = copy_from_user(ptr, buf, uchunk);
885 kexec_flush_icache_page(page);
900 static int kimage_load_segment(struct kimage *image,
901 struct kexec_segment *segment)
903 int result = -ENOMEM;
905 switch (image->type) {
906 case KEXEC_TYPE_DEFAULT:
907 result = kimage_load_normal_segment(image, segment);
909 case KEXEC_TYPE_CRASH:
910 result = kimage_load_crash_segment(image, segment);
918 * Exec Kernel system call: for obvious reasons only root may call it.
920 * This call breaks up into three pieces.
921 * - A generic part which loads the new kernel from the current
922 * address space, and very carefully places the data in the
925 * - A generic part that interacts with the kernel and tells all of
926 * the devices to shut down. Preventing on-going dmas, and placing
927 * the devices in a consistent state so a later kernel can
930 * - A machine specific part that includes the syscall number
931 * and the copies the image to it's final destination. And
932 * jumps into the image at entry.
934 * kexec does not sync, or unmount filesystems so if you need
935 * that to happen you need to do that yourself.
937 struct kimage *kexec_image;
938 struct kimage *kexec_crash_image;
940 static DEFINE_MUTEX(kexec_mutex);
942 SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments,
943 struct kexec_segment __user *, segments, unsigned long, flags)
945 struct kimage **dest_image, *image;
948 /* We only trust the superuser with rebooting the system. */
949 if (!capable(CAP_SYS_BOOT))
953 * Verify we have a legal set of flags
954 * This leaves us room for future extensions.
956 if ((flags & KEXEC_FLAGS) != (flags & ~KEXEC_ARCH_MASK))
959 /* Verify we are on the appropriate architecture */
960 if (((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH) &&
961 ((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH_DEFAULT))
964 /* Put an artificial cap on the number
965 * of segments passed to kexec_load.
967 if (nr_segments > KEXEC_SEGMENT_MAX)
973 /* Because we write directly to the reserved memory
974 * region when loading crash kernels we need a mutex here to
975 * prevent multiple crash kernels from attempting to load
976 * simultaneously, and to prevent a crash kernel from loading
977 * over the top of a in use crash kernel.
979 * KISS: always take the mutex.
981 if (!mutex_trylock(&kexec_mutex))
984 dest_image = &kexec_image;
985 if (flags & KEXEC_ON_CRASH)
986 dest_image = &kexec_crash_image;
987 if (nr_segments > 0) {
990 /* Loading another kernel to reboot into */
991 if ((flags & KEXEC_ON_CRASH) == 0)
992 result = kimage_normal_alloc(&image, entry,
993 nr_segments, segments);
994 /* Loading another kernel to switch to if this one crashes */
995 else if (flags & KEXEC_ON_CRASH) {
996 /* Free any current crash dump kernel before
999 kimage_free(xchg(&kexec_crash_image, NULL));
1000 result = kimage_crash_alloc(&image, entry,
1001 nr_segments, segments);
1002 crash_map_reserved_pages();
1007 if (flags & KEXEC_PRESERVE_CONTEXT)
1008 image->preserve_context = 1;
1009 result = machine_kexec_prepare(image);
1013 for (i = 0; i < nr_segments; i++) {
1014 result = kimage_load_segment(image, &image->segment[i]);
1018 kimage_terminate(image);
1019 if (flags & KEXEC_ON_CRASH)
1020 crash_unmap_reserved_pages();
1022 /* Install the new kernel, and Uninstall the old */
1023 image = xchg(dest_image, image);
1026 mutex_unlock(&kexec_mutex);
1033 * Add and remove page tables for crashkernel memory
1035 * Provide an empty default implementation here -- architecture
1036 * code may override this
1038 void __weak crash_map_reserved_pages(void)
1041 void __weak crash_unmap_reserved_pages(void)
1044 #ifdef CONFIG_COMPAT
1045 asmlinkage long compat_sys_kexec_load(unsigned long entry,
1046 unsigned long nr_segments,
1047 struct compat_kexec_segment __user *segments,
1048 unsigned long flags)
1050 struct compat_kexec_segment in;
1051 struct kexec_segment out, __user *ksegments;
1052 unsigned long i, result;
1054 /* Don't allow clients that don't understand the native
1055 * architecture to do anything.
1057 if ((flags & KEXEC_ARCH_MASK) == KEXEC_ARCH_DEFAULT)
1060 if (nr_segments > KEXEC_SEGMENT_MAX)
1063 ksegments = compat_alloc_user_space(nr_segments * sizeof(out));
1064 for (i=0; i < nr_segments; i++) {
1065 result = copy_from_user(&in, &segments[i], sizeof(in));
1069 out.buf = compat_ptr(in.buf);
1070 out.bufsz = in.bufsz;
1072 out.memsz = in.memsz;
1074 result = copy_to_user(&ksegments[i], &out, sizeof(out));
1079 return sys_kexec_load(entry, nr_segments, ksegments, flags);
1083 void crash_kexec(struct pt_regs *regs)
1085 /* Take the kexec_mutex here to prevent sys_kexec_load
1086 * running on one cpu from replacing the crash kernel
1087 * we are using after a panic on a different cpu.
1089 * If the crash kernel was not located in a fixed area
1090 * of memory the xchg(&kexec_crash_image) would be
1091 * sufficient. But since I reuse the memory...
1093 if (mutex_trylock(&kexec_mutex)) {
1094 if (kexec_crash_image) {
1095 struct pt_regs fixed_regs;
1097 crash_setup_regs(&fixed_regs, regs);
1098 crash_save_vmcoreinfo();
1099 machine_crash_shutdown(&fixed_regs);
1100 machine_kexec(kexec_crash_image);
1102 mutex_unlock(&kexec_mutex);
1106 size_t crash_get_memory_size(void)
1109 mutex_lock(&kexec_mutex);
1110 if (crashk_res.end != crashk_res.start)
1111 size = resource_size(&crashk_res);
1112 mutex_unlock(&kexec_mutex);
1116 void __weak crash_free_reserved_phys_range(unsigned long begin,
1121 for (addr = begin; addr < end; addr += PAGE_SIZE)
1122 free_reserved_page(pfn_to_page(addr >> PAGE_SHIFT));
1125 int crash_shrink_memory(unsigned long new_size)
1128 unsigned long start, end;
1129 unsigned long old_size;
1130 struct resource *ram_res;
1132 mutex_lock(&kexec_mutex);
1134 if (kexec_crash_image) {
1138 start = crashk_res.start;
1139 end = crashk_res.end;
1140 old_size = (end == 0) ? 0 : end - start + 1;
1141 if (new_size >= old_size) {
1142 ret = (new_size == old_size) ? 0 : -EINVAL;
1146 ram_res = kzalloc(sizeof(*ram_res), GFP_KERNEL);
1152 start = roundup(start, KEXEC_CRASH_MEM_ALIGN);
1153 end = roundup(start + new_size, KEXEC_CRASH_MEM_ALIGN);
1155 crash_map_reserved_pages();
1156 crash_free_reserved_phys_range(end, crashk_res.end);
1158 if ((start == end) && (crashk_res.parent != NULL))
1159 release_resource(&crashk_res);
1161 ram_res->start = end;
1162 ram_res->end = crashk_res.end;
1163 ram_res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
1164 ram_res->name = "System RAM";
1166 crashk_res.end = end - 1;
1168 insert_resource(&iomem_resource, ram_res);
1169 crash_unmap_reserved_pages();
1172 mutex_unlock(&kexec_mutex);
1176 static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data,
1179 struct elf_note note;
1181 note.n_namesz = strlen(name) + 1;
1182 note.n_descsz = data_len;
1184 memcpy(buf, ¬e, sizeof(note));
1185 buf += (sizeof(note) + 3)/4;
1186 memcpy(buf, name, note.n_namesz);
1187 buf += (note.n_namesz + 3)/4;
1188 memcpy(buf, data, note.n_descsz);
1189 buf += (note.n_descsz + 3)/4;
1194 static void final_note(u32 *buf)
1196 struct elf_note note;
1201 memcpy(buf, ¬e, sizeof(note));
1204 void crash_save_cpu(struct pt_regs *regs, int cpu)
1206 struct elf_prstatus prstatus;
1209 if ((cpu < 0) || (cpu >= nr_cpu_ids))
1212 /* Using ELF notes here is opportunistic.
1213 * I need a well defined structure format
1214 * for the data I pass, and I need tags
1215 * on the data to indicate what information I have
1216 * squirrelled away. ELF notes happen to provide
1217 * all of that, so there is no need to invent something new.
1219 buf = (u32*)per_cpu_ptr(crash_notes, cpu);
1222 memset(&prstatus, 0, sizeof(prstatus));
1223 prstatus.pr_pid = current->pid;
1224 elf_core_copy_kernel_regs(&prstatus.pr_reg, regs);
1225 buf = append_elf_note(buf, KEXEC_CORE_NOTE_NAME, NT_PRSTATUS,
1226 &prstatus, sizeof(prstatus));
1230 static int __init crash_notes_memory_init(void)
1232 /* Allocate memory for saving cpu registers. */
1233 crash_notes = alloc_percpu(note_buf_t);
1235 printk("Kexec: Memory allocation for saving cpu register"
1236 " states failed\n");
1241 module_init(crash_notes_memory_init)
1245 * parsing the "crashkernel" commandline
1247 * this code is intended to be called from architecture specific code
1252 * This function parses command lines in the format
1254 * crashkernel=ramsize-range:size[,...][@offset]
1256 * The function returns 0 on success and -EINVAL on failure.
1258 static int __init parse_crashkernel_mem(char *cmdline,
1259 unsigned long long system_ram,
1260 unsigned long long *crash_size,
1261 unsigned long long *crash_base)
1263 char *cur = cmdline, *tmp;
1265 /* for each entry of the comma-separated list */
1267 unsigned long long start, end = ULLONG_MAX, size;
1269 /* get the start of the range */
1270 start = memparse(cur, &tmp);
1272 pr_warning("crashkernel: Memory value expected\n");
1277 pr_warning("crashkernel: '-' expected\n");
1282 /* if no ':' is here, than we read the end */
1284 end = memparse(cur, &tmp);
1286 pr_warning("crashkernel: Memory "
1287 "value expected\n");
1292 pr_warning("crashkernel: end <= start\n");
1298 pr_warning("crashkernel: ':' expected\n");
1303 size = memparse(cur, &tmp);
1305 pr_warning("Memory value expected\n");
1309 if (size >= system_ram) {
1310 pr_warning("crashkernel: invalid size\n");
1315 if (system_ram >= start && system_ram < end) {
1319 } while (*cur++ == ',');
1321 if (*crash_size > 0) {
1322 while (*cur && *cur != ' ' && *cur != '@')
1326 *crash_base = memparse(cur, &tmp);
1328 pr_warning("Memory value expected "
1339 * That function parses "simple" (old) crashkernel command lines like
1341 * crashkernel=size[@offset]
1343 * It returns 0 on success and -EINVAL on failure.
1345 static int __init parse_crashkernel_simple(char *cmdline,
1346 unsigned long long *crash_size,
1347 unsigned long long *crash_base)
1349 char *cur = cmdline;
1351 *crash_size = memparse(cmdline, &cur);
1352 if (cmdline == cur) {
1353 pr_warning("crashkernel: memory value expected\n");
1358 *crash_base = memparse(cur+1, &cur);
1359 else if (*cur != ' ' && *cur != '\0') {
1360 pr_warning("crashkernel: unrecognized char\n");
1367 #define SUFFIX_HIGH 0
1368 #define SUFFIX_LOW 1
1369 #define SUFFIX_NULL 2
1370 static __initdata char *suffix_tbl[] = {
1371 [SUFFIX_HIGH] = ",high",
1372 [SUFFIX_LOW] = ",low",
1373 [SUFFIX_NULL] = NULL,
1377 * That function parses "suffix" crashkernel command lines like
1379 * crashkernel=size,[high|low]
1381 * It returns 0 on success and -EINVAL on failure.
1383 static int __init parse_crashkernel_suffix(char *cmdline,
1384 unsigned long long *crash_size,
1385 unsigned long long *crash_base,
1388 char *cur = cmdline;
1390 *crash_size = memparse(cmdline, &cur);
1391 if (cmdline == cur) {
1392 pr_warn("crashkernel: memory value expected\n");
1396 /* check with suffix */
1397 if (strncmp(cur, suffix, strlen(suffix))) {
1398 pr_warn("crashkernel: unrecognized char\n");
1401 cur += strlen(suffix);
1402 if (*cur != ' ' && *cur != '\0') {
1403 pr_warn("crashkernel: unrecognized char\n");
1410 static __init char *get_last_crashkernel(char *cmdline,
1414 char *p = cmdline, *ck_cmdline = NULL;
1416 /* find crashkernel and use the last one if there are more */
1417 p = strstr(p, name);
1419 char *end_p = strchr(p, ' ');
1423 end_p = p + strlen(p);
1428 /* skip the one with any known suffix */
1429 for (i = 0; suffix_tbl[i]; i++) {
1430 q = end_p - strlen(suffix_tbl[i]);
1431 if (!strncmp(q, suffix_tbl[i],
1432 strlen(suffix_tbl[i])))
1437 q = end_p - strlen(suffix);
1438 if (!strncmp(q, suffix, strlen(suffix)))
1442 p = strstr(p+1, name);
1451 static int __init __parse_crashkernel(char *cmdline,
1452 unsigned long long system_ram,
1453 unsigned long long *crash_size,
1454 unsigned long long *crash_base,
1458 char *first_colon, *first_space;
1461 BUG_ON(!crash_size || !crash_base);
1465 ck_cmdline = get_last_crashkernel(cmdline, name, suffix);
1470 ck_cmdline += strlen(name);
1473 return parse_crashkernel_suffix(ck_cmdline, crash_size,
1474 crash_base, suffix);
1476 * if the commandline contains a ':', then that's the extended
1477 * syntax -- if not, it must be the classic syntax
1479 first_colon = strchr(ck_cmdline, ':');
1480 first_space = strchr(ck_cmdline, ' ');
1481 if (first_colon && (!first_space || first_colon < first_space))
1482 return parse_crashkernel_mem(ck_cmdline, system_ram,
1483 crash_size, crash_base);
1485 return parse_crashkernel_simple(ck_cmdline, crash_size,
1492 * That function is the entry point for command line parsing and should be
1493 * called from the arch-specific code.
1495 int __init parse_crashkernel(char *cmdline,
1496 unsigned long long system_ram,
1497 unsigned long long *crash_size,
1498 unsigned long long *crash_base)
1500 return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base,
1501 "crashkernel=", NULL);
1504 int __init parse_crashkernel_high(char *cmdline,
1505 unsigned long long system_ram,
1506 unsigned long long *crash_size,
1507 unsigned long long *crash_base)
1509 return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base,
1510 "crashkernel=", suffix_tbl[SUFFIX_HIGH]);
1513 int __init parse_crashkernel_low(char *cmdline,
1514 unsigned long long system_ram,
1515 unsigned long long *crash_size,
1516 unsigned long long *crash_base)
1518 return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base,
1519 "crashkernel=", suffix_tbl[SUFFIX_LOW]);
1522 static void update_vmcoreinfo_note(void)
1524 u32 *buf = vmcoreinfo_note;
1526 if (!vmcoreinfo_size)
1528 buf = append_elf_note(buf, VMCOREINFO_NOTE_NAME, 0, vmcoreinfo_data,
1533 void crash_save_vmcoreinfo(void)
1535 vmcoreinfo_append_str("CRASHTIME=%ld\n", get_seconds());
1536 update_vmcoreinfo_note();
1539 void vmcoreinfo_append_str(const char *fmt, ...)
1545 va_start(args, fmt);
1546 r = vsnprintf(buf, sizeof(buf), fmt, args);
1549 if (r + vmcoreinfo_size > vmcoreinfo_max_size)
1550 r = vmcoreinfo_max_size - vmcoreinfo_size;
1552 memcpy(&vmcoreinfo_data[vmcoreinfo_size], buf, r);
1554 vmcoreinfo_size += r;
1558 * provide an empty default implementation here -- architecture
1559 * code may override this
1561 void __attribute__ ((weak)) arch_crash_save_vmcoreinfo(void)
1564 unsigned long __attribute__ ((weak)) paddr_vmcoreinfo_note(void)
1566 return __pa((unsigned long)(char *)&vmcoreinfo_note);
1569 static int __init crash_save_vmcoreinfo_init(void)
1571 VMCOREINFO_OSRELEASE(init_uts_ns.name.release);
1572 VMCOREINFO_PAGESIZE(PAGE_SIZE);
1574 VMCOREINFO_SYMBOL(init_uts_ns);
1575 VMCOREINFO_SYMBOL(node_online_map);
1577 VMCOREINFO_SYMBOL(swapper_pg_dir);
1579 VMCOREINFO_SYMBOL(_stext);
1580 VMCOREINFO_SYMBOL(vmap_area_list);
1582 #ifndef CONFIG_NEED_MULTIPLE_NODES
1583 VMCOREINFO_SYMBOL(mem_map);
1584 VMCOREINFO_SYMBOL(contig_page_data);
1586 #ifdef CONFIG_SPARSEMEM
1587 VMCOREINFO_SYMBOL(mem_section);
1588 VMCOREINFO_LENGTH(mem_section, NR_SECTION_ROOTS);
1589 VMCOREINFO_STRUCT_SIZE(mem_section);
1590 VMCOREINFO_OFFSET(mem_section, section_mem_map);
1592 VMCOREINFO_STRUCT_SIZE(page);
1593 VMCOREINFO_STRUCT_SIZE(pglist_data);
1594 VMCOREINFO_STRUCT_SIZE(zone);
1595 VMCOREINFO_STRUCT_SIZE(free_area);
1596 VMCOREINFO_STRUCT_SIZE(list_head);
1597 VMCOREINFO_SIZE(nodemask_t);
1598 VMCOREINFO_OFFSET(page, flags);
1599 VMCOREINFO_OFFSET(page, _count);
1600 VMCOREINFO_OFFSET(page, mapping);
1601 VMCOREINFO_OFFSET(page, lru);
1602 VMCOREINFO_OFFSET(page, _mapcount);
1603 VMCOREINFO_OFFSET(page, private);
1604 VMCOREINFO_OFFSET(pglist_data, node_zones);
1605 VMCOREINFO_OFFSET(pglist_data, nr_zones);
1606 #ifdef CONFIG_FLAT_NODE_MEM_MAP
1607 VMCOREINFO_OFFSET(pglist_data, node_mem_map);
1609 VMCOREINFO_OFFSET(pglist_data, node_start_pfn);
1610 VMCOREINFO_OFFSET(pglist_data, node_spanned_pages);
1611 VMCOREINFO_OFFSET(pglist_data, node_id);
1612 VMCOREINFO_OFFSET(zone, free_area);
1613 VMCOREINFO_OFFSET(zone, vm_stat);
1614 VMCOREINFO_OFFSET(zone, spanned_pages);
1615 VMCOREINFO_OFFSET(free_area, free_list);
1616 VMCOREINFO_OFFSET(list_head, next);
1617 VMCOREINFO_OFFSET(list_head, prev);
1618 VMCOREINFO_OFFSET(vmap_area, va_start);
1619 VMCOREINFO_OFFSET(vmap_area, list);
1620 VMCOREINFO_LENGTH(zone.free_area, MAX_ORDER);
1621 log_buf_kexec_setup();
1622 VMCOREINFO_LENGTH(free_area.free_list, MIGRATE_TYPES);
1623 VMCOREINFO_NUMBER(NR_FREE_PAGES);
1624 VMCOREINFO_NUMBER(PG_lru);
1625 VMCOREINFO_NUMBER(PG_private);
1626 VMCOREINFO_NUMBER(PG_swapcache);
1627 VMCOREINFO_NUMBER(PG_slab);
1628 #ifdef CONFIG_MEMORY_FAILURE
1629 VMCOREINFO_NUMBER(PG_hwpoison);
1631 VMCOREINFO_NUMBER(PAGE_BUDDY_MAPCOUNT_VALUE);
1633 arch_crash_save_vmcoreinfo();
1634 update_vmcoreinfo_note();
1639 module_init(crash_save_vmcoreinfo_init)
1642 * Move into place and start executing a preloaded standalone
1643 * executable. If nothing was preloaded return an error.
1645 int kernel_kexec(void)
1649 if (!mutex_trylock(&kexec_mutex))
1656 #ifdef CONFIG_KEXEC_JUMP
1657 if (kexec_image->preserve_context) {
1658 lock_system_sleep();
1659 pm_prepare_console();
1660 error = freeze_processes();
1663 goto Restore_console;
1666 error = dpm_suspend_start(PMSG_FREEZE);
1668 goto Resume_console;
1669 /* At this point, dpm_suspend_start() has been called,
1670 * but *not* dpm_suspend_end(). We *must* call
1671 * dpm_suspend_end() now. Otherwise, drivers for
1672 * some devices (e.g. interrupt controllers) become
1673 * desynchronized with the actual state of the
1674 * hardware at resume time, and evil weirdness ensues.
1676 error = dpm_suspend_end(PMSG_FREEZE);
1678 goto Resume_devices;
1679 error = disable_nonboot_cpus();
1682 local_irq_disable();
1683 error = syscore_suspend();
1689 kernel_restart_prepare(NULL);
1690 printk(KERN_EMERG "Starting new kernel\n");
1694 machine_kexec(kexec_image);
1696 #ifdef CONFIG_KEXEC_JUMP
1697 if (kexec_image->preserve_context) {
1702 enable_nonboot_cpus();
1703 dpm_resume_start(PMSG_RESTORE);
1705 dpm_resume_end(PMSG_RESTORE);
1710 pm_restore_console();
1711 unlock_system_sleep();
1716 mutex_unlock(&kexec_mutex);