]>
Commit | Line | Data |
---|---|---|
dc009d92 EB |
1 | /* |
2 | * kexec.c - kexec system call | |
3 | * Copyright (C) 2002-2004 Eric Biederman <[email protected]> | |
4 | * | |
5 | * This source code is licensed under the GNU General Public License, | |
6 | * Version 2. See the file COPYING for more details. | |
7 | */ | |
8 | ||
cb105258 VG |
9 | #define pr_fmt(fmt) "kexec: " fmt |
10 | ||
c59ede7b | 11 | #include <linux/capability.h> |
dc009d92 EB |
12 | #include <linux/mm.h> |
13 | #include <linux/file.h> | |
14 | #include <linux/slab.h> | |
15 | #include <linux/fs.h> | |
16 | #include <linux/kexec.h> | |
8c5a1cf0 | 17 | #include <linux/mutex.h> |
dc009d92 EB |
18 | #include <linux/list.h> |
19 | #include <linux/highmem.h> | |
20 | #include <linux/syscalls.h> | |
21 | #include <linux/reboot.h> | |
dc009d92 | 22 | #include <linux/ioport.h> |
6e274d14 | 23 | #include <linux/hardirq.h> |
85916f81 MD |
24 | #include <linux/elf.h> |
25 | #include <linux/elfcore.h> | |
fd59d231 KO |
26 | #include <linux/utsname.h> |
27 | #include <linux/numa.h> | |
3ab83521 YH |
28 | #include <linux/suspend.h> |
29 | #include <linux/device.h> | |
89081d17 YH |
30 | #include <linux/freezer.h> |
31 | #include <linux/pm.h> | |
32 | #include <linux/cpu.h> | |
33 | #include <linux/console.h> | |
5f41b8cd | 34 | #include <linux/vmalloc.h> |
06a7f711 | 35 | #include <linux/swap.h> |
19234c08 | 36 | #include <linux/syscore_ops.h> |
52f5684c | 37 | #include <linux/compiler.h> |
8f1d26d0 | 38 | #include <linux/hugetlb.h> |
6e274d14 | 39 | |
dc009d92 EB |
40 | #include <asm/page.h> |
41 | #include <asm/uaccess.h> | |
42 | #include <asm/io.h> | |
fd59d231 | 43 | #include <asm/sections.h> |
dc009d92 | 44 | |
12db5562 VG |
45 | #include <crypto/hash.h> |
46 | #include <crypto/sha.h> | |
47 | ||
cc571658 | 48 | /* Per cpu memory for storing cpu states in case of system crash. */ |
43cf38eb | 49 | note_buf_t __percpu *crash_notes; |
cc571658 | 50 | |
fd59d231 | 51 | /* vmcoreinfo stuff */ |
edb79a21 | 52 | static unsigned char vmcoreinfo_data[VMCOREINFO_BYTES]; |
fd59d231 | 53 | u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4]; |
d768281e KO |
54 | size_t vmcoreinfo_size; |
55 | size_t vmcoreinfo_max_size = sizeof(vmcoreinfo_data); | |
fd59d231 | 56 | |
4fc9bbf9 KA |
57 | /* Flag to indicate we are going to kexec a new kernel */ |
58 | bool kexec_in_progress = false; | |
59 | ||
12db5562 VG |
60 | /* |
61 | * Declare these symbols weak so that if architecture provides a purgatory, | |
62 | * these will be overridden. | |
63 | */ | |
64 | char __weak kexec_purgatory[0]; | |
65 | size_t __weak kexec_purgatory_size = 0; | |
66 | ||
74ca317c | 67 | #ifdef CONFIG_KEXEC_FILE |
12db5562 | 68 | static int kexec_calculate_store_digests(struct kimage *image); |
74ca317c | 69 | #endif |
12db5562 | 70 | |
dc009d92 EB |
71 | /* Location of the reserved area for the crash kernel */ |
72 | struct resource crashk_res = { | |
73 | .name = "Crash kernel", | |
74 | .start = 0, | |
75 | .end = 0, | |
76 | .flags = IORESOURCE_BUSY | IORESOURCE_MEM | |
77 | }; | |
0212f915 | 78 | struct resource crashk_low_res = { |
157752d8 | 79 | .name = "Crash kernel", |
0212f915 YL |
80 | .start = 0, |
81 | .end = 0, | |
82 | .flags = IORESOURCE_BUSY | IORESOURCE_MEM | |
83 | }; | |
dc009d92 | 84 | |
6e274d14 AN |
85 | int kexec_should_crash(struct task_struct *p) |
86 | { | |
b460cbc5 | 87 | if (in_interrupt() || !p->pid || is_global_init(p) || panic_on_oops) |
6e274d14 AN |
88 | return 1; |
89 | return 0; | |
90 | } | |
91 | ||
dc009d92 EB |
92 | /* |
93 | * When kexec transitions to the new kernel there is a one-to-one | |
94 | * mapping between physical and virtual addresses. On processors | |
95 | * where you can disable the MMU this is trivial, and easy. For | |
96 | * others it is still a simple predictable page table to setup. | |
97 | * | |
98 | * In that environment kexec copies the new kernel to its final | |
99 | * resting place. This means I can only support memory whose | |
100 | * physical address can fit in an unsigned long. In particular | |
101 | * addresses where (pfn << PAGE_SHIFT) > ULONG_MAX cannot be handled. | |
102 | * If the assembly stub has more restrictive requirements | |
103 | * KEXEC_SOURCE_MEMORY_LIMIT and KEXEC_DEST_MEMORY_LIMIT can be | |
104 | * defined more restrictively in <asm/kexec.h>. | |
105 | * | |
106 | * The code for the transition from the current kernel to the | |
107 | * the new kernel is placed in the control_code_buffer, whose size | |
163f6876 | 108 | * is given by KEXEC_CONTROL_PAGE_SIZE. In the best case only a single |
dc009d92 EB |
109 | * page of memory is necessary, but some architectures require more. |
110 | * Because this memory must be identity mapped in the transition from | |
111 | * virtual to physical addresses it must live in the range | |
112 | * 0 - TASK_SIZE, as only the user space mappings are arbitrarily | |
113 | * modifiable. | |
114 | * | |
115 | * The assembly stub in the control code buffer is passed a linked list | |
116 | * of descriptor pages detailing the source pages of the new kernel, | |
117 | * and the destination addresses of those source pages. As this data | |
118 | * structure is not used in the context of the current OS, it must | |
119 | * be self-contained. | |
120 | * | |
121 | * The code has been made to work with highmem pages and will use a | |
122 | * destination page in its final resting place (if it happens | |
123 | * to allocate it). The end product of this is that most of the | |
124 | * physical address space, and most of RAM can be used. | |
125 | * | |
126 | * Future directions include: | |
127 | * - allocating a page table with the control code buffer identity | |
128 | * mapped, to simplify machine_kexec and make kexec_on_panic more | |
129 | * reliable. | |
130 | */ | |
131 | ||
132 | /* | |
133 | * KIMAGE_NO_DEST is an impossible destination address..., for | |
134 | * allocating pages whose destination address we do not care about. | |
135 | */ | |
136 | #define KIMAGE_NO_DEST (-1UL) | |
137 | ||
72414d3f MS |
138 | static int kimage_is_destination_range(struct kimage *image, |
139 | unsigned long start, unsigned long end); | |
140 | static struct page *kimage_alloc_page(struct kimage *image, | |
9796fdd8 | 141 | gfp_t gfp_mask, |
72414d3f | 142 | unsigned long dest); |
dc009d92 | 143 | |
dabe7862 VG |
144 | static int copy_user_segment_list(struct kimage *image, |
145 | unsigned long nr_segments, | |
146 | struct kexec_segment __user *segments) | |
dc009d92 | 147 | { |
dabe7862 | 148 | int ret; |
dc009d92 | 149 | size_t segment_bytes; |
dc009d92 EB |
150 | |
151 | /* Read in the segments */ | |
152 | image->nr_segments = nr_segments; | |
153 | segment_bytes = nr_segments * sizeof(*segments); | |
dabe7862 VG |
154 | ret = copy_from_user(image->segment, segments, segment_bytes); |
155 | if (ret) | |
156 | ret = -EFAULT; | |
157 | ||
158 | return ret; | |
159 | } | |
160 | ||
161 | static int sanity_check_segment_list(struct kimage *image) | |
162 | { | |
163 | int result, i; | |
164 | unsigned long nr_segments = image->nr_segments; | |
dc009d92 EB |
165 | |
166 | /* | |
167 | * Verify we have good destination addresses. The caller is | |
168 | * responsible for making certain we don't attempt to load | |
169 | * the new image into invalid or reserved areas of RAM. This | |
170 | * just verifies it is an address we can use. | |
171 | * | |
172 | * Since the kernel does everything in page size chunks ensure | |
b595076a | 173 | * the destination addresses are page aligned. Too many |
dc009d92 EB |
174 | * special cases crop of when we don't do this. The most |
175 | * insidious is getting overlapping destination addresses | |
176 | * simply because addresses are changed to page size | |
177 | * granularity. | |
178 | */ | |
179 | result = -EADDRNOTAVAIL; | |
180 | for (i = 0; i < nr_segments; i++) { | |
181 | unsigned long mstart, mend; | |
72414d3f | 182 | |
dc009d92 EB |
183 | mstart = image->segment[i].mem; |
184 | mend = mstart + image->segment[i].memsz; | |
185 | if ((mstart & ~PAGE_MASK) || (mend & ~PAGE_MASK)) | |
dabe7862 | 186 | return result; |
dc009d92 | 187 | if (mend >= KEXEC_DESTINATION_MEMORY_LIMIT) |
dabe7862 | 188 | return result; |
dc009d92 EB |
189 | } |
190 | ||
191 | /* Verify our destination addresses do not overlap. | |
192 | * If we alloed overlapping destination addresses | |
193 | * through very weird things can happen with no | |
194 | * easy explanation as one segment stops on another. | |
195 | */ | |
196 | result = -EINVAL; | |
72414d3f | 197 | for (i = 0; i < nr_segments; i++) { |
dc009d92 EB |
198 | unsigned long mstart, mend; |
199 | unsigned long j; | |
72414d3f | 200 | |
dc009d92 EB |
201 | mstart = image->segment[i].mem; |
202 | mend = mstart + image->segment[i].memsz; | |
72414d3f | 203 | for (j = 0; j < i; j++) { |
dc009d92 EB |
204 | unsigned long pstart, pend; |
205 | pstart = image->segment[j].mem; | |
206 | pend = pstart + image->segment[j].memsz; | |
207 | /* Do the segments overlap ? */ | |
208 | if ((mend > pstart) && (mstart < pend)) | |
dabe7862 | 209 | return result; |
dc009d92 EB |
210 | } |
211 | } | |
212 | ||
213 | /* Ensure our buffer sizes are strictly less than | |
214 | * our memory sizes. This should always be the case, | |
215 | * and it is easier to check up front than to be surprised | |
216 | * later on. | |
217 | */ | |
218 | result = -EINVAL; | |
72414d3f | 219 | for (i = 0; i < nr_segments; i++) { |
dc009d92 | 220 | if (image->segment[i].bufsz > image->segment[i].memsz) |
dabe7862 | 221 | return result; |
dc009d92 EB |
222 | } |
223 | ||
dabe7862 VG |
224 | /* |
225 | * Verify we have good destination addresses. Normally | |
226 | * the caller is responsible for making certain we don't | |
227 | * attempt to load the new image into invalid or reserved | |
228 | * areas of RAM. But crash kernels are preloaded into a | |
229 | * reserved area of ram. We must ensure the addresses | |
230 | * are in the reserved area otherwise preloading the | |
231 | * kernel could corrupt things. | |
232 | */ | |
72414d3f | 233 | |
dabe7862 VG |
234 | if (image->type == KEXEC_TYPE_CRASH) { |
235 | result = -EADDRNOTAVAIL; | |
236 | for (i = 0; i < nr_segments; i++) { | |
237 | unsigned long mstart, mend; | |
238 | ||
239 | mstart = image->segment[i].mem; | |
240 | mend = mstart + image->segment[i].memsz - 1; | |
241 | /* Ensure we are within the crash kernel limits */ | |
242 | if ((mstart < crashk_res.start) || | |
243 | (mend > crashk_res.end)) | |
244 | return result; | |
245 | } | |
246 | } | |
dc009d92 | 247 | |
dabe7862 VG |
248 | return 0; |
249 | } | |
250 | ||
251 | static struct kimage *do_kimage_alloc_init(void) | |
252 | { | |
253 | struct kimage *image; | |
254 | ||
255 | /* Allocate a controlling structure */ | |
256 | image = kzalloc(sizeof(*image), GFP_KERNEL); | |
257 | if (!image) | |
258 | return NULL; | |
259 | ||
260 | image->head = 0; | |
261 | image->entry = &image->head; | |
262 | image->last_entry = &image->head; | |
263 | image->control_page = ~0; /* By default this does not apply */ | |
264 | image->type = KEXEC_TYPE_DEFAULT; | |
265 | ||
266 | /* Initialize the list of control pages */ | |
267 | INIT_LIST_HEAD(&image->control_pages); | |
268 | ||
269 | /* Initialize the list of destination pages */ | |
270 | INIT_LIST_HEAD(&image->dest_pages); | |
271 | ||
272 | /* Initialize the list of unusable pages */ | |
273 | INIT_LIST_HEAD(&image->unusable_pages); | |
274 | ||
275 | return image; | |
dc009d92 EB |
276 | } |
277 | ||
b92e7e0d ZY |
278 | static void kimage_free_page_list(struct list_head *list); |
279 | ||
255aedd9 VG |
280 | static int kimage_alloc_init(struct kimage **rimage, unsigned long entry, |
281 | unsigned long nr_segments, | |
282 | struct kexec_segment __user *segments, | |
283 | unsigned long flags) | |
dc009d92 | 284 | { |
255aedd9 | 285 | int ret; |
dc009d92 | 286 | struct kimage *image; |
255aedd9 VG |
287 | bool kexec_on_panic = flags & KEXEC_ON_CRASH; |
288 | ||
289 | if (kexec_on_panic) { | |
290 | /* Verify we have a valid entry point */ | |
291 | if ((entry < crashk_res.start) || (entry > crashk_res.end)) | |
292 | return -EADDRNOTAVAIL; | |
293 | } | |
dc009d92 EB |
294 | |
295 | /* Allocate and initialize a controlling structure */ | |
dabe7862 VG |
296 | image = do_kimage_alloc_init(); |
297 | if (!image) | |
298 | return -ENOMEM; | |
299 | ||
300 | image->start = entry; | |
301 | ||
255aedd9 VG |
302 | ret = copy_user_segment_list(image, nr_segments, segments); |
303 | if (ret) | |
dabe7862 VG |
304 | goto out_free_image; |
305 | ||
255aedd9 VG |
306 | ret = sanity_check_segment_list(image); |
307 | if (ret) | |
dabe7862 | 308 | goto out_free_image; |
72414d3f | 309 | |
255aedd9 VG |
310 | /* Enable the special crash kernel control page allocation policy. */ |
311 | if (kexec_on_panic) { | |
312 | image->control_page = crashk_res.start; | |
313 | image->type = KEXEC_TYPE_CRASH; | |
314 | } | |
315 | ||
dc009d92 EB |
316 | /* |
317 | * Find a location for the control code buffer, and add it | |
318 | * the vector of segments so that it's pages will also be | |
319 | * counted as destination pages. | |
320 | */ | |
255aedd9 | 321 | ret = -ENOMEM; |
dc009d92 | 322 | image->control_code_page = kimage_alloc_control_pages(image, |
163f6876 | 323 | get_order(KEXEC_CONTROL_PAGE_SIZE)); |
dc009d92 | 324 | if (!image->control_code_page) { |
e1bebcf4 | 325 | pr_err("Could not allocate control_code_buffer\n"); |
dabe7862 | 326 | goto out_free_image; |
dc009d92 EB |
327 | } |
328 | ||
255aedd9 VG |
329 | if (!kexec_on_panic) { |
330 | image->swap_page = kimage_alloc_control_pages(image, 0); | |
331 | if (!image->swap_page) { | |
332 | pr_err("Could not allocate swap buffer\n"); | |
333 | goto out_free_control_pages; | |
334 | } | |
3ab83521 YH |
335 | } |
336 | ||
b92e7e0d ZY |
337 | *rimage = image; |
338 | return 0; | |
dabe7862 | 339 | out_free_control_pages: |
b92e7e0d | 340 | kimage_free_page_list(&image->control_pages); |
dabe7862 | 341 | out_free_image: |
b92e7e0d | 342 | kfree(image); |
255aedd9 | 343 | return ret; |
dc009d92 EB |
344 | } |
345 | ||
74ca317c | 346 | #ifdef CONFIG_KEXEC_FILE |
cb105258 VG |
347 | static int copy_file_from_fd(int fd, void **buf, unsigned long *buf_len) |
348 | { | |
349 | struct fd f = fdget(fd); | |
350 | int ret; | |
351 | struct kstat stat; | |
352 | loff_t pos; | |
353 | ssize_t bytes = 0; | |
354 | ||
355 | if (!f.file) | |
356 | return -EBADF; | |
357 | ||
358 | ret = vfs_getattr(&f.file->f_path, &stat); | |
359 | if (ret) | |
360 | goto out; | |
361 | ||
362 | if (stat.size > INT_MAX) { | |
363 | ret = -EFBIG; | |
364 | goto out; | |
365 | } | |
366 | ||
367 | /* Don't hand 0 to vmalloc, it whines. */ | |
368 | if (stat.size == 0) { | |
369 | ret = -EINVAL; | |
370 | goto out; | |
371 | } | |
372 | ||
373 | *buf = vmalloc(stat.size); | |
374 | if (!*buf) { | |
375 | ret = -ENOMEM; | |
376 | goto out; | |
377 | } | |
378 | ||
379 | pos = 0; | |
380 | while (pos < stat.size) { | |
381 | bytes = kernel_read(f.file, pos, (char *)(*buf) + pos, | |
382 | stat.size - pos); | |
383 | if (bytes < 0) { | |
384 | vfree(*buf); | |
385 | ret = bytes; | |
386 | goto out; | |
387 | } | |
388 | ||
389 | if (bytes == 0) | |
390 | break; | |
391 | pos += bytes; | |
392 | } | |
393 | ||
394 | if (pos != stat.size) { | |
395 | ret = -EBADF; | |
396 | vfree(*buf); | |
397 | goto out; | |
398 | } | |
399 | ||
400 | *buf_len = pos; | |
401 | out: | |
402 | fdput(f); | |
403 | return ret; | |
404 | } | |
405 | ||
406 | /* Architectures can provide this probe function */ | |
407 | int __weak arch_kexec_kernel_image_probe(struct kimage *image, void *buf, | |
408 | unsigned long buf_len) | |
409 | { | |
410 | return -ENOEXEC; | |
411 | } | |
412 | ||
413 | void * __weak arch_kexec_kernel_image_load(struct kimage *image) | |
414 | { | |
415 | return ERR_PTR(-ENOEXEC); | |
416 | } | |
417 | ||
418 | void __weak arch_kimage_file_post_load_cleanup(struct kimage *image) | |
419 | { | |
420 | } | |
421 | ||
8e7d8381 VG |
422 | int __weak arch_kexec_kernel_verify_sig(struct kimage *image, void *buf, |
423 | unsigned long buf_len) | |
424 | { | |
425 | return -EKEYREJECTED; | |
426 | } | |
427 | ||
12db5562 VG |
428 | /* Apply relocations of type RELA */ |
429 | int __weak | |
430 | arch_kexec_apply_relocations_add(const Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, | |
431 | unsigned int relsec) | |
432 | { | |
433 | pr_err("RELA relocation unsupported.\n"); | |
434 | return -ENOEXEC; | |
435 | } | |
436 | ||
437 | /* Apply relocations of type REL */ | |
438 | int __weak | |
439 | arch_kexec_apply_relocations(const Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, | |
440 | unsigned int relsec) | |
441 | { | |
442 | pr_err("REL relocation unsupported.\n"); | |
443 | return -ENOEXEC; | |
444 | } | |
445 | ||
cb105258 VG |
446 | /* |
447 | * Free up memory used by kernel, initrd, and comand line. This is temporary | |
448 | * memory allocation which is not needed any more after these buffers have | |
449 | * been loaded into separate segments and have been copied elsewhere. | |
450 | */ | |
451 | static void kimage_file_post_load_cleanup(struct kimage *image) | |
452 | { | |
12db5562 VG |
453 | struct purgatory_info *pi = &image->purgatory_info; |
454 | ||
cb105258 VG |
455 | vfree(image->kernel_buf); |
456 | image->kernel_buf = NULL; | |
457 | ||
458 | vfree(image->initrd_buf); | |
459 | image->initrd_buf = NULL; | |
460 | ||
461 | kfree(image->cmdline_buf); | |
462 | image->cmdline_buf = NULL; | |
463 | ||
12db5562 VG |
464 | vfree(pi->purgatory_buf); |
465 | pi->purgatory_buf = NULL; | |
466 | ||
467 | vfree(pi->sechdrs); | |
468 | pi->sechdrs = NULL; | |
469 | ||
cb105258 VG |
470 | /* See if architecture has anything to cleanup post load */ |
471 | arch_kimage_file_post_load_cleanup(image); | |
27f48d3e VG |
472 | |
473 | /* | |
474 | * Above call should have called into bootloader to free up | |
475 | * any data stored in kimage->image_loader_data. It should | |
476 | * be ok now to free it up. | |
477 | */ | |
478 | kfree(image->image_loader_data); | |
479 | image->image_loader_data = NULL; | |
cb105258 VG |
480 | } |
481 | ||
482 | /* | |
483 | * In file mode list of segments is prepared by kernel. Copy relevant | |
484 | * data from user space, do error checking, prepare segment list | |
485 | */ | |
486 | static int | |
487 | kimage_file_prepare_segments(struct kimage *image, int kernel_fd, int initrd_fd, | |
488 | const char __user *cmdline_ptr, | |
489 | unsigned long cmdline_len, unsigned flags) | |
490 | { | |
491 | int ret = 0; | |
492 | void *ldata; | |
493 | ||
494 | ret = copy_file_from_fd(kernel_fd, &image->kernel_buf, | |
495 | &image->kernel_buf_len); | |
496 | if (ret) | |
497 | return ret; | |
498 | ||
499 | /* Call arch image probe handlers */ | |
500 | ret = arch_kexec_kernel_image_probe(image, image->kernel_buf, | |
501 | image->kernel_buf_len); | |
502 | ||
503 | if (ret) | |
504 | goto out; | |
505 | ||
8e7d8381 VG |
506 | #ifdef CONFIG_KEXEC_VERIFY_SIG |
507 | ret = arch_kexec_kernel_verify_sig(image, image->kernel_buf, | |
508 | image->kernel_buf_len); | |
509 | if (ret) { | |
510 | pr_debug("kernel signature verification failed.\n"); | |
511 | goto out; | |
512 | } | |
513 | pr_debug("kernel signature verification successful.\n"); | |
514 | #endif | |
cb105258 VG |
515 | /* It is possible that there no initramfs is being loaded */ |
516 | if (!(flags & KEXEC_FILE_NO_INITRAMFS)) { | |
517 | ret = copy_file_from_fd(initrd_fd, &image->initrd_buf, | |
518 | &image->initrd_buf_len); | |
519 | if (ret) | |
520 | goto out; | |
521 | } | |
522 | ||
523 | if (cmdline_len) { | |
524 | image->cmdline_buf = kzalloc(cmdline_len, GFP_KERNEL); | |
525 | if (!image->cmdline_buf) { | |
526 | ret = -ENOMEM; | |
527 | goto out; | |
528 | } | |
529 | ||
530 | ret = copy_from_user(image->cmdline_buf, cmdline_ptr, | |
531 | cmdline_len); | |
532 | if (ret) { | |
533 | ret = -EFAULT; | |
534 | goto out; | |
535 | } | |
536 | ||
537 | image->cmdline_buf_len = cmdline_len; | |
538 | ||
539 | /* command line should be a string with last byte null */ | |
540 | if (image->cmdline_buf[cmdline_len - 1] != '\0') { | |
541 | ret = -EINVAL; | |
542 | goto out; | |
543 | } | |
544 | } | |
545 | ||
546 | /* Call arch image load handlers */ | |
547 | ldata = arch_kexec_kernel_image_load(image); | |
548 | ||
549 | if (IS_ERR(ldata)) { | |
550 | ret = PTR_ERR(ldata); | |
551 | goto out; | |
552 | } | |
553 | ||
554 | image->image_loader_data = ldata; | |
555 | out: | |
556 | /* In case of error, free up all allocated memory in this function */ | |
557 | if (ret) | |
558 | kimage_file_post_load_cleanup(image); | |
559 | return ret; | |
560 | } | |
561 | ||
562 | static int | |
563 | kimage_file_alloc_init(struct kimage **rimage, int kernel_fd, | |
564 | int initrd_fd, const char __user *cmdline_ptr, | |
565 | unsigned long cmdline_len, unsigned long flags) | |
566 | { | |
567 | int ret; | |
568 | struct kimage *image; | |
dd5f7260 | 569 | bool kexec_on_panic = flags & KEXEC_FILE_ON_CRASH; |
cb105258 VG |
570 | |
571 | image = do_kimage_alloc_init(); | |
572 | if (!image) | |
573 | return -ENOMEM; | |
574 | ||
575 | image->file_mode = 1; | |
576 | ||
dd5f7260 VG |
577 | if (kexec_on_panic) { |
578 | /* Enable special crash kernel control page alloc policy. */ | |
579 | image->control_page = crashk_res.start; | |
580 | image->type = KEXEC_TYPE_CRASH; | |
581 | } | |
582 | ||
cb105258 VG |
583 | ret = kimage_file_prepare_segments(image, kernel_fd, initrd_fd, |
584 | cmdline_ptr, cmdline_len, flags); | |
585 | if (ret) | |
586 | goto out_free_image; | |
587 | ||
588 | ret = sanity_check_segment_list(image); | |
589 | if (ret) | |
590 | goto out_free_post_load_bufs; | |
591 | ||
592 | ret = -ENOMEM; | |
593 | image->control_code_page = kimage_alloc_control_pages(image, | |
594 | get_order(KEXEC_CONTROL_PAGE_SIZE)); | |
595 | if (!image->control_code_page) { | |
596 | pr_err("Could not allocate control_code_buffer\n"); | |
597 | goto out_free_post_load_bufs; | |
598 | } | |
599 | ||
dd5f7260 VG |
600 | if (!kexec_on_panic) { |
601 | image->swap_page = kimage_alloc_control_pages(image, 0); | |
602 | if (!image->swap_page) { | |
603 | pr_err(KERN_ERR "Could not allocate swap buffer\n"); | |
604 | goto out_free_control_pages; | |
605 | } | |
cb105258 VG |
606 | } |
607 | ||
608 | *rimage = image; | |
609 | return 0; | |
610 | out_free_control_pages: | |
611 | kimage_free_page_list(&image->control_pages); | |
612 | out_free_post_load_bufs: | |
613 | kimage_file_post_load_cleanup(image); | |
cb105258 VG |
614 | out_free_image: |
615 | kfree(image); | |
616 | return ret; | |
617 | } | |
74ca317c VG |
618 | #else /* CONFIG_KEXEC_FILE */ |
619 | static inline void kimage_file_post_load_cleanup(struct kimage *image) { } | |
620 | #endif /* CONFIG_KEXEC_FILE */ | |
cb105258 | 621 | |
72414d3f MS |
622 | static int kimage_is_destination_range(struct kimage *image, |
623 | unsigned long start, | |
624 | unsigned long end) | |
dc009d92 EB |
625 | { |
626 | unsigned long i; | |
627 | ||
628 | for (i = 0; i < image->nr_segments; i++) { | |
629 | unsigned long mstart, mend; | |
72414d3f | 630 | |
dc009d92 | 631 | mstart = image->segment[i].mem; |
72414d3f MS |
632 | mend = mstart + image->segment[i].memsz; |
633 | if ((end > mstart) && (start < mend)) | |
dc009d92 | 634 | return 1; |
dc009d92 | 635 | } |
72414d3f | 636 | |
dc009d92 EB |
637 | return 0; |
638 | } | |
639 | ||
9796fdd8 | 640 | static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order) |
dc009d92 EB |
641 | { |
642 | struct page *pages; | |
72414d3f | 643 | |
dc009d92 EB |
644 | pages = alloc_pages(gfp_mask, order); |
645 | if (pages) { | |
646 | unsigned int count, i; | |
647 | pages->mapping = NULL; | |
4c21e2f2 | 648 | set_page_private(pages, order); |
dc009d92 | 649 | count = 1 << order; |
72414d3f | 650 | for (i = 0; i < count; i++) |
dc009d92 | 651 | SetPageReserved(pages + i); |
dc009d92 | 652 | } |
72414d3f | 653 | |
dc009d92 EB |
654 | return pages; |
655 | } | |
656 | ||
657 | static void kimage_free_pages(struct page *page) | |
658 | { | |
659 | unsigned int order, count, i; | |
72414d3f | 660 | |
4c21e2f2 | 661 | order = page_private(page); |
dc009d92 | 662 | count = 1 << order; |
72414d3f | 663 | for (i = 0; i < count; i++) |
dc009d92 | 664 | ClearPageReserved(page + i); |
dc009d92 EB |
665 | __free_pages(page, order); |
666 | } | |
667 | ||
668 | static void kimage_free_page_list(struct list_head *list) | |
669 | { | |
670 | struct list_head *pos, *next; | |
72414d3f | 671 | |
dc009d92 EB |
672 | list_for_each_safe(pos, next, list) { |
673 | struct page *page; | |
674 | ||
675 | page = list_entry(pos, struct page, lru); | |
676 | list_del(&page->lru); | |
dc009d92 EB |
677 | kimage_free_pages(page); |
678 | } | |
679 | } | |
680 | ||
72414d3f MS |
681 | static struct page *kimage_alloc_normal_control_pages(struct kimage *image, |
682 | unsigned int order) | |
dc009d92 EB |
683 | { |
684 | /* Control pages are special, they are the intermediaries | |
685 | * that are needed while we copy the rest of the pages | |
686 | * to their final resting place. As such they must | |
687 | * not conflict with either the destination addresses | |
688 | * or memory the kernel is already using. | |
689 | * | |
690 | * The only case where we really need more than one of | |
691 | * these are for architectures where we cannot disable | |
692 | * the MMU and must instead generate an identity mapped | |
693 | * page table for all of the memory. | |
694 | * | |
695 | * At worst this runs in O(N) of the image size. | |
696 | */ | |
697 | struct list_head extra_pages; | |
698 | struct page *pages; | |
699 | unsigned int count; | |
700 | ||
701 | count = 1 << order; | |
702 | INIT_LIST_HEAD(&extra_pages); | |
703 | ||
704 | /* Loop while I can allocate a page and the page allocated | |
705 | * is a destination page. | |
706 | */ | |
707 | do { | |
708 | unsigned long pfn, epfn, addr, eaddr; | |
72414d3f | 709 | |
dc009d92 EB |
710 | pages = kimage_alloc_pages(GFP_KERNEL, order); |
711 | if (!pages) | |
712 | break; | |
713 | pfn = page_to_pfn(pages); | |
714 | epfn = pfn + count; | |
715 | addr = pfn << PAGE_SHIFT; | |
716 | eaddr = epfn << PAGE_SHIFT; | |
717 | if ((epfn >= (KEXEC_CONTROL_MEMORY_LIMIT >> PAGE_SHIFT)) || | |
72414d3f | 718 | kimage_is_destination_range(image, addr, eaddr)) { |
dc009d92 EB |
719 | list_add(&pages->lru, &extra_pages); |
720 | pages = NULL; | |
721 | } | |
72414d3f MS |
722 | } while (!pages); |
723 | ||
dc009d92 EB |
724 | if (pages) { |
725 | /* Remember the allocated page... */ | |
726 | list_add(&pages->lru, &image->control_pages); | |
727 | ||
728 | /* Because the page is already in it's destination | |
729 | * location we will never allocate another page at | |
730 | * that address. Therefore kimage_alloc_pages | |
731 | * will not return it (again) and we don't need | |
732 | * to give it an entry in image->segment[]. | |
733 | */ | |
734 | } | |
735 | /* Deal with the destination pages I have inadvertently allocated. | |
736 | * | |
737 | * Ideally I would convert multi-page allocations into single | |
25985edc | 738 | * page allocations, and add everything to image->dest_pages. |
dc009d92 EB |
739 | * |
740 | * For now it is simpler to just free the pages. | |
741 | */ | |
742 | kimage_free_page_list(&extra_pages); | |
dc009d92 | 743 | |
72414d3f | 744 | return pages; |
dc009d92 EB |
745 | } |
746 | ||
72414d3f MS |
747 | static struct page *kimage_alloc_crash_control_pages(struct kimage *image, |
748 | unsigned int order) | |
dc009d92 EB |
749 | { |
750 | /* Control pages are special, they are the intermediaries | |
751 | * that are needed while we copy the rest of the pages | |
752 | * to their final resting place. As such they must | |
753 | * not conflict with either the destination addresses | |
754 | * or memory the kernel is already using. | |
755 | * | |
756 | * Control pages are also the only pags we must allocate | |
757 | * when loading a crash kernel. All of the other pages | |
758 | * are specified by the segments and we just memcpy | |
759 | * into them directly. | |
760 | * | |
761 | * The only case where we really need more than one of | |
762 | * these are for architectures where we cannot disable | |
763 | * the MMU and must instead generate an identity mapped | |
764 | * page table for all of the memory. | |
765 | * | |
766 | * Given the low demand this implements a very simple | |
767 | * allocator that finds the first hole of the appropriate | |
768 | * size in the reserved memory region, and allocates all | |
769 | * of the memory up to and including the hole. | |
770 | */ | |
771 | unsigned long hole_start, hole_end, size; | |
772 | struct page *pages; | |
72414d3f | 773 | |
dc009d92 EB |
774 | pages = NULL; |
775 | size = (1 << order) << PAGE_SHIFT; | |
776 | hole_start = (image->control_page + (size - 1)) & ~(size - 1); | |
777 | hole_end = hole_start + size - 1; | |
72414d3f | 778 | while (hole_end <= crashk_res.end) { |
dc009d92 | 779 | unsigned long i; |
72414d3f | 780 | |
3d214fae | 781 | if (hole_end > KEXEC_CRASH_CONTROL_MEMORY_LIMIT) |
dc009d92 | 782 | break; |
dc009d92 | 783 | /* See if I overlap any of the segments */ |
72414d3f | 784 | for (i = 0; i < image->nr_segments; i++) { |
dc009d92 | 785 | unsigned long mstart, mend; |
72414d3f | 786 | |
dc009d92 EB |
787 | mstart = image->segment[i].mem; |
788 | mend = mstart + image->segment[i].memsz - 1; | |
789 | if ((hole_end >= mstart) && (hole_start <= mend)) { | |
790 | /* Advance the hole to the end of the segment */ | |
791 | hole_start = (mend + (size - 1)) & ~(size - 1); | |
792 | hole_end = hole_start + size - 1; | |
793 | break; | |
794 | } | |
795 | } | |
796 | /* If I don't overlap any segments I have found my hole! */ | |
797 | if (i == image->nr_segments) { | |
798 | pages = pfn_to_page(hole_start >> PAGE_SHIFT); | |
799 | break; | |
800 | } | |
801 | } | |
72414d3f | 802 | if (pages) |
dc009d92 | 803 | image->control_page = hole_end; |
72414d3f | 804 | |
dc009d92 EB |
805 | return pages; |
806 | } | |
807 | ||
808 | ||
72414d3f MS |
809 | struct page *kimage_alloc_control_pages(struct kimage *image, |
810 | unsigned int order) | |
dc009d92 EB |
811 | { |
812 | struct page *pages = NULL; | |
72414d3f MS |
813 | |
814 | switch (image->type) { | |
dc009d92 EB |
815 | case KEXEC_TYPE_DEFAULT: |
816 | pages = kimage_alloc_normal_control_pages(image, order); | |
817 | break; | |
818 | case KEXEC_TYPE_CRASH: | |
819 | pages = kimage_alloc_crash_control_pages(image, order); | |
820 | break; | |
821 | } | |
72414d3f | 822 | |
dc009d92 EB |
823 | return pages; |
824 | } | |
825 | ||
826 | static int kimage_add_entry(struct kimage *image, kimage_entry_t entry) | |
827 | { | |
72414d3f | 828 | if (*image->entry != 0) |
dc009d92 | 829 | image->entry++; |
72414d3f | 830 | |
dc009d92 EB |
831 | if (image->entry == image->last_entry) { |
832 | kimage_entry_t *ind_page; | |
833 | struct page *page; | |
72414d3f | 834 | |
dc009d92 | 835 | page = kimage_alloc_page(image, GFP_KERNEL, KIMAGE_NO_DEST); |
72414d3f | 836 | if (!page) |
dc009d92 | 837 | return -ENOMEM; |
72414d3f | 838 | |
dc009d92 EB |
839 | ind_page = page_address(page); |
840 | *image->entry = virt_to_phys(ind_page) | IND_INDIRECTION; | |
841 | image->entry = ind_page; | |
72414d3f MS |
842 | image->last_entry = ind_page + |
843 | ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1); | |
dc009d92 EB |
844 | } |
845 | *image->entry = entry; | |
846 | image->entry++; | |
847 | *image->entry = 0; | |
72414d3f | 848 | |
dc009d92 EB |
849 | return 0; |
850 | } | |
851 | ||
72414d3f MS |
852 | static int kimage_set_destination(struct kimage *image, |
853 | unsigned long destination) | |
dc009d92 EB |
854 | { |
855 | int result; | |
856 | ||
857 | destination &= PAGE_MASK; | |
858 | result = kimage_add_entry(image, destination | IND_DESTINATION); | |
72414d3f | 859 | if (result == 0) |
dc009d92 | 860 | image->destination = destination; |
72414d3f | 861 | |
dc009d92 EB |
862 | return result; |
863 | } | |
864 | ||
865 | ||
866 | static int kimage_add_page(struct kimage *image, unsigned long page) | |
867 | { | |
868 | int result; | |
869 | ||
870 | page &= PAGE_MASK; | |
871 | result = kimage_add_entry(image, page | IND_SOURCE); | |
72414d3f | 872 | if (result == 0) |
dc009d92 | 873 | image->destination += PAGE_SIZE; |
72414d3f | 874 | |
dc009d92 EB |
875 | return result; |
876 | } | |
877 | ||
878 | ||
879 | static void kimage_free_extra_pages(struct kimage *image) | |
880 | { | |
881 | /* Walk through and free any extra destination pages I may have */ | |
882 | kimage_free_page_list(&image->dest_pages); | |
883 | ||
25985edc | 884 | /* Walk through and free any unusable pages I have cached */ |
7d3e2bca | 885 | kimage_free_page_list(&image->unusable_pages); |
dc009d92 EB |
886 | |
887 | } | |
7fccf032 | 888 | static void kimage_terminate(struct kimage *image) |
dc009d92 | 889 | { |
72414d3f | 890 | if (*image->entry != 0) |
dc009d92 | 891 | image->entry++; |
72414d3f | 892 | |
dc009d92 | 893 | *image->entry = IND_DONE; |
dc009d92 EB |
894 | } |
895 | ||
896 | #define for_each_kimage_entry(image, ptr, entry) \ | |
897 | for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \ | |
e1bebcf4 FF |
898 | ptr = (entry & IND_INDIRECTION) ? \ |
899 | phys_to_virt((entry & PAGE_MASK)) : ptr + 1) | |
dc009d92 EB |
900 | |
901 | static void kimage_free_entry(kimage_entry_t entry) | |
902 | { | |
903 | struct page *page; | |
904 | ||
905 | page = pfn_to_page(entry >> PAGE_SHIFT); | |
906 | kimage_free_pages(page); | |
907 | } | |
908 | ||
909 | static void kimage_free(struct kimage *image) | |
910 | { | |
911 | kimage_entry_t *ptr, entry; | |
912 | kimage_entry_t ind = 0; | |
913 | ||
914 | if (!image) | |
915 | return; | |
72414d3f | 916 | |
dc009d92 EB |
917 | kimage_free_extra_pages(image); |
918 | for_each_kimage_entry(image, ptr, entry) { | |
919 | if (entry & IND_INDIRECTION) { | |
920 | /* Free the previous indirection page */ | |
72414d3f | 921 | if (ind & IND_INDIRECTION) |
dc009d92 | 922 | kimage_free_entry(ind); |
dc009d92 EB |
923 | /* Save this indirection page until we are |
924 | * done with it. | |
925 | */ | |
926 | ind = entry; | |
e1bebcf4 | 927 | } else if (entry & IND_SOURCE) |
dc009d92 | 928 | kimage_free_entry(entry); |
dc009d92 EB |
929 | } |
930 | /* Free the final indirection page */ | |
72414d3f | 931 | if (ind & IND_INDIRECTION) |
dc009d92 | 932 | kimage_free_entry(ind); |
dc009d92 EB |
933 | |
934 | /* Handle any machine specific cleanup */ | |
935 | machine_kexec_cleanup(image); | |
936 | ||
937 | /* Free the kexec control pages... */ | |
938 | kimage_free_page_list(&image->control_pages); | |
cb105258 | 939 | |
cb105258 VG |
940 | /* |
941 | * Free up any temporary buffers allocated. This might hit if | |
942 | * error occurred much later after buffer allocation. | |
943 | */ | |
944 | if (image->file_mode) | |
945 | kimage_file_post_load_cleanup(image); | |
946 | ||
dc009d92 EB |
947 | kfree(image); |
948 | } | |
949 | ||
72414d3f MS |
950 | static kimage_entry_t *kimage_dst_used(struct kimage *image, |
951 | unsigned long page) | |
dc009d92 EB |
952 | { |
953 | kimage_entry_t *ptr, entry; | |
954 | unsigned long destination = 0; | |
955 | ||
956 | for_each_kimage_entry(image, ptr, entry) { | |
72414d3f | 957 | if (entry & IND_DESTINATION) |
dc009d92 | 958 | destination = entry & PAGE_MASK; |
dc009d92 | 959 | else if (entry & IND_SOURCE) { |
72414d3f | 960 | if (page == destination) |
dc009d92 | 961 | return ptr; |
dc009d92 EB |
962 | destination += PAGE_SIZE; |
963 | } | |
964 | } | |
72414d3f | 965 | |
314b6a4d | 966 | return NULL; |
dc009d92 EB |
967 | } |
968 | ||
72414d3f | 969 | static struct page *kimage_alloc_page(struct kimage *image, |
9796fdd8 | 970 | gfp_t gfp_mask, |
72414d3f | 971 | unsigned long destination) |
dc009d92 EB |
972 | { |
973 | /* | |
974 | * Here we implement safeguards to ensure that a source page | |
975 | * is not copied to its destination page before the data on | |
976 | * the destination page is no longer useful. | |
977 | * | |
978 | * To do this we maintain the invariant that a source page is | |
979 | * either its own destination page, or it is not a | |
980 | * destination page at all. | |
981 | * | |
982 | * That is slightly stronger than required, but the proof | |
983 | * that no problems will not occur is trivial, and the | |
984 | * implementation is simply to verify. | |
985 | * | |
986 | * When allocating all pages normally this algorithm will run | |
987 | * in O(N) time, but in the worst case it will run in O(N^2) | |
988 | * time. If the runtime is a problem the data structures can | |
989 | * be fixed. | |
990 | */ | |
991 | struct page *page; | |
992 | unsigned long addr; | |
993 | ||
994 | /* | |
995 | * Walk through the list of destination pages, and see if I | |
996 | * have a match. | |
997 | */ | |
998 | list_for_each_entry(page, &image->dest_pages, lru) { | |
999 | addr = page_to_pfn(page) << PAGE_SHIFT; | |
1000 | if (addr == destination) { | |
1001 | list_del(&page->lru); | |
1002 | return page; | |
1003 | } | |
1004 | } | |
1005 | page = NULL; | |
1006 | while (1) { | |
1007 | kimage_entry_t *old; | |
1008 | ||
1009 | /* Allocate a page, if we run out of memory give up */ | |
1010 | page = kimage_alloc_pages(gfp_mask, 0); | |
72414d3f | 1011 | if (!page) |
314b6a4d | 1012 | return NULL; |
dc009d92 | 1013 | /* If the page cannot be used file it away */ |
72414d3f MS |
1014 | if (page_to_pfn(page) > |
1015 | (KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) { | |
7d3e2bca | 1016 | list_add(&page->lru, &image->unusable_pages); |
dc009d92 EB |
1017 | continue; |
1018 | } | |
1019 | addr = page_to_pfn(page) << PAGE_SHIFT; | |
1020 | ||
1021 | /* If it is the destination page we want use it */ | |
1022 | if (addr == destination) | |
1023 | break; | |
1024 | ||
1025 | /* If the page is not a destination page use it */ | |
72414d3f MS |
1026 | if (!kimage_is_destination_range(image, addr, |
1027 | addr + PAGE_SIZE)) | |
dc009d92 EB |
1028 | break; |
1029 | ||
1030 | /* | |
1031 | * I know that the page is someones destination page. | |
1032 | * See if there is already a source page for this | |
1033 | * destination page. And if so swap the source pages. | |
1034 | */ | |
1035 | old = kimage_dst_used(image, addr); | |
1036 | if (old) { | |
1037 | /* If so move it */ | |
1038 | unsigned long old_addr; | |
1039 | struct page *old_page; | |
1040 | ||
1041 | old_addr = *old & PAGE_MASK; | |
1042 | old_page = pfn_to_page(old_addr >> PAGE_SHIFT); | |
1043 | copy_highpage(page, old_page); | |
1044 | *old = addr | (*old & ~PAGE_MASK); | |
1045 | ||
1046 | /* The old page I have found cannot be a | |
f9092f35 JS |
1047 | * destination page, so return it if it's |
1048 | * gfp_flags honor the ones passed in. | |
dc009d92 | 1049 | */ |
f9092f35 JS |
1050 | if (!(gfp_mask & __GFP_HIGHMEM) && |
1051 | PageHighMem(old_page)) { | |
1052 | kimage_free_pages(old_page); | |
1053 | continue; | |
1054 | } | |
dc009d92 EB |
1055 | addr = old_addr; |
1056 | page = old_page; | |
1057 | break; | |
e1bebcf4 | 1058 | } else { |
dc009d92 EB |
1059 | /* Place the page on the destination list I |
1060 | * will use it later. | |
1061 | */ | |
1062 | list_add(&page->lru, &image->dest_pages); | |
1063 | } | |
1064 | } | |
72414d3f | 1065 | |
dc009d92 EB |
1066 | return page; |
1067 | } | |
1068 | ||
1069 | static int kimage_load_normal_segment(struct kimage *image, | |
72414d3f | 1070 | struct kexec_segment *segment) |
dc009d92 EB |
1071 | { |
1072 | unsigned long maddr; | |
310faaa9 | 1073 | size_t ubytes, mbytes; |
dc009d92 | 1074 | int result; |
cb105258 VG |
1075 | unsigned char __user *buf = NULL; |
1076 | unsigned char *kbuf = NULL; | |
dc009d92 EB |
1077 | |
1078 | result = 0; | |
cb105258 VG |
1079 | if (image->file_mode) |
1080 | kbuf = segment->kbuf; | |
1081 | else | |
1082 | buf = segment->buf; | |
dc009d92 EB |
1083 | ubytes = segment->bufsz; |
1084 | mbytes = segment->memsz; | |
1085 | maddr = segment->mem; | |
1086 | ||
1087 | result = kimage_set_destination(image, maddr); | |
72414d3f | 1088 | if (result < 0) |
dc009d92 | 1089 | goto out; |
72414d3f MS |
1090 | |
1091 | while (mbytes) { | |
dc009d92 EB |
1092 | struct page *page; |
1093 | char *ptr; | |
1094 | size_t uchunk, mchunk; | |
72414d3f | 1095 | |
dc009d92 | 1096 | page = kimage_alloc_page(image, GFP_HIGHUSER, maddr); |
c80544dc | 1097 | if (!page) { |
dc009d92 EB |
1098 | result = -ENOMEM; |
1099 | goto out; | |
1100 | } | |
72414d3f MS |
1101 | result = kimage_add_page(image, page_to_pfn(page) |
1102 | << PAGE_SHIFT); | |
1103 | if (result < 0) | |
dc009d92 | 1104 | goto out; |
72414d3f | 1105 | |
dc009d92 EB |
1106 | ptr = kmap(page); |
1107 | /* Start with a clear page */ | |
3ecb01df | 1108 | clear_page(ptr); |
dc009d92 | 1109 | ptr += maddr & ~PAGE_MASK; |
31c3a3fe ZY |
1110 | mchunk = min_t(size_t, mbytes, |
1111 | PAGE_SIZE - (maddr & ~PAGE_MASK)); | |
1112 | uchunk = min(ubytes, mchunk); | |
72414d3f | 1113 | |
cb105258 VG |
1114 | /* For file based kexec, source pages are in kernel memory */ |
1115 | if (image->file_mode) | |
1116 | memcpy(ptr, kbuf, uchunk); | |
1117 | else | |
1118 | result = copy_from_user(ptr, buf, uchunk); | |
dc009d92 EB |
1119 | kunmap(page); |
1120 | if (result) { | |
f65a03f6 | 1121 | result = -EFAULT; |
dc009d92 EB |
1122 | goto out; |
1123 | } | |
1124 | ubytes -= uchunk; | |
1125 | maddr += mchunk; | |
cb105258 VG |
1126 | if (image->file_mode) |
1127 | kbuf += mchunk; | |
1128 | else | |
1129 | buf += mchunk; | |
dc009d92 EB |
1130 | mbytes -= mchunk; |
1131 | } | |
72414d3f | 1132 | out: |
dc009d92 EB |
1133 | return result; |
1134 | } | |
1135 | ||
1136 | static int kimage_load_crash_segment(struct kimage *image, | |
72414d3f | 1137 | struct kexec_segment *segment) |
dc009d92 EB |
1138 | { |
1139 | /* For crash dumps kernels we simply copy the data from | |
1140 | * user space to it's destination. | |
1141 | * We do things a page at a time for the sake of kmap. | |
1142 | */ | |
1143 | unsigned long maddr; | |
310faaa9 | 1144 | size_t ubytes, mbytes; |
dc009d92 | 1145 | int result; |
dd5f7260 VG |
1146 | unsigned char __user *buf = NULL; |
1147 | unsigned char *kbuf = NULL; | |
dc009d92 EB |
1148 | |
1149 | result = 0; | |
dd5f7260 VG |
1150 | if (image->file_mode) |
1151 | kbuf = segment->kbuf; | |
1152 | else | |
1153 | buf = segment->buf; | |
dc009d92 EB |
1154 | ubytes = segment->bufsz; |
1155 | mbytes = segment->memsz; | |
1156 | maddr = segment->mem; | |
72414d3f | 1157 | while (mbytes) { |
dc009d92 EB |
1158 | struct page *page; |
1159 | char *ptr; | |
1160 | size_t uchunk, mchunk; | |
72414d3f | 1161 | |
dc009d92 | 1162 | page = pfn_to_page(maddr >> PAGE_SHIFT); |
c80544dc | 1163 | if (!page) { |
dc009d92 EB |
1164 | result = -ENOMEM; |
1165 | goto out; | |
1166 | } | |
1167 | ptr = kmap(page); | |
1168 | ptr += maddr & ~PAGE_MASK; | |
31c3a3fe ZY |
1169 | mchunk = min_t(size_t, mbytes, |
1170 | PAGE_SIZE - (maddr & ~PAGE_MASK)); | |
1171 | uchunk = min(ubytes, mchunk); | |
1172 | if (mchunk > uchunk) { | |
dc009d92 EB |
1173 | /* Zero the trailing part of the page */ |
1174 | memset(ptr + uchunk, 0, mchunk - uchunk); | |
1175 | } | |
dd5f7260 VG |
1176 | |
1177 | /* For file based kexec, source pages are in kernel memory */ | |
1178 | if (image->file_mode) | |
1179 | memcpy(ptr, kbuf, uchunk); | |
1180 | else | |
1181 | result = copy_from_user(ptr, buf, uchunk); | |
a7956113 | 1182 | kexec_flush_icache_page(page); |
dc009d92 EB |
1183 | kunmap(page); |
1184 | if (result) { | |
f65a03f6 | 1185 | result = -EFAULT; |
dc009d92 EB |
1186 | goto out; |
1187 | } | |
1188 | ubytes -= uchunk; | |
1189 | maddr += mchunk; | |
dd5f7260 VG |
1190 | if (image->file_mode) |
1191 | kbuf += mchunk; | |
1192 | else | |
1193 | buf += mchunk; | |
dc009d92 EB |
1194 | mbytes -= mchunk; |
1195 | } | |
72414d3f | 1196 | out: |
dc009d92 EB |
1197 | return result; |
1198 | } | |
1199 | ||
1200 | static int kimage_load_segment(struct kimage *image, | |
72414d3f | 1201 | struct kexec_segment *segment) |
dc009d92 EB |
1202 | { |
1203 | int result = -ENOMEM; | |
72414d3f MS |
1204 | |
1205 | switch (image->type) { | |
dc009d92 EB |
1206 | case KEXEC_TYPE_DEFAULT: |
1207 | result = kimage_load_normal_segment(image, segment); | |
1208 | break; | |
1209 | case KEXEC_TYPE_CRASH: | |
1210 | result = kimage_load_crash_segment(image, segment); | |
1211 | break; | |
1212 | } | |
72414d3f | 1213 | |
dc009d92 EB |
1214 | return result; |
1215 | } | |
1216 | ||
1217 | /* | |
1218 | * Exec Kernel system call: for obvious reasons only root may call it. | |
1219 | * | |
1220 | * This call breaks up into three pieces. | |
1221 | * - A generic part which loads the new kernel from the current | |
1222 | * address space, and very carefully places the data in the | |
1223 | * allocated pages. | |
1224 | * | |
1225 | * - A generic part that interacts with the kernel and tells all of | |
1226 | * the devices to shut down. Preventing on-going dmas, and placing | |
1227 | * the devices in a consistent state so a later kernel can | |
1228 | * reinitialize them. | |
1229 | * | |
1230 | * - A machine specific part that includes the syscall number | |
002ace78 | 1231 | * and then copies the image to it's final destination. And |
dc009d92 EB |
1232 | * jumps into the image at entry. |
1233 | * | |
1234 | * kexec does not sync, or unmount filesystems so if you need | |
1235 | * that to happen you need to do that yourself. | |
1236 | */ | |
c330dda9 JM |
1237 | struct kimage *kexec_image; |
1238 | struct kimage *kexec_crash_image; | |
7984754b | 1239 | int kexec_load_disabled; |
8c5a1cf0 AM |
1240 | |
1241 | static DEFINE_MUTEX(kexec_mutex); | |
dc009d92 | 1242 | |
754fe8d2 HC |
1243 | SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments, |
1244 | struct kexec_segment __user *, segments, unsigned long, flags) | |
dc009d92 EB |
1245 | { |
1246 | struct kimage **dest_image, *image; | |
dc009d92 EB |
1247 | int result; |
1248 | ||
1249 | /* We only trust the superuser with rebooting the system. */ | |
7984754b | 1250 | if (!capable(CAP_SYS_BOOT) || kexec_load_disabled) |
dc009d92 EB |
1251 | return -EPERM; |
1252 | ||
1253 | /* | |
1254 | * Verify we have a legal set of flags | |
1255 | * This leaves us room for future extensions. | |
1256 | */ | |
1257 | if ((flags & KEXEC_FLAGS) != (flags & ~KEXEC_ARCH_MASK)) | |
1258 | return -EINVAL; | |
1259 | ||
1260 | /* Verify we are on the appropriate architecture */ | |
1261 | if (((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH) && | |
1262 | ((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH_DEFAULT)) | |
dc009d92 | 1263 | return -EINVAL; |
dc009d92 EB |
1264 | |
1265 | /* Put an artificial cap on the number | |
1266 | * of segments passed to kexec_load. | |
1267 | */ | |
1268 | if (nr_segments > KEXEC_SEGMENT_MAX) | |
1269 | return -EINVAL; | |
1270 | ||
1271 | image = NULL; | |
1272 | result = 0; | |
1273 | ||
1274 | /* Because we write directly to the reserved memory | |
1275 | * region when loading crash kernels we need a mutex here to | |
1276 | * prevent multiple crash kernels from attempting to load | |
1277 | * simultaneously, and to prevent a crash kernel from loading | |
1278 | * over the top of a in use crash kernel. | |
1279 | * | |
1280 | * KISS: always take the mutex. | |
1281 | */ | |
8c5a1cf0 | 1282 | if (!mutex_trylock(&kexec_mutex)) |
dc009d92 | 1283 | return -EBUSY; |
72414d3f | 1284 | |
dc009d92 | 1285 | dest_image = &kexec_image; |
72414d3f | 1286 | if (flags & KEXEC_ON_CRASH) |
dc009d92 | 1287 | dest_image = &kexec_crash_image; |
dc009d92 EB |
1288 | if (nr_segments > 0) { |
1289 | unsigned long i; | |
72414d3f | 1290 | |
dc009d92 | 1291 | /* Loading another kernel to reboot into */ |
72414d3f | 1292 | if ((flags & KEXEC_ON_CRASH) == 0) |
255aedd9 VG |
1293 | result = kimage_alloc_init(&image, entry, nr_segments, |
1294 | segments, flags); | |
dc009d92 EB |
1295 | /* Loading another kernel to switch to if this one crashes */ |
1296 | else if (flags & KEXEC_ON_CRASH) { | |
1297 | /* Free any current crash dump kernel before | |
1298 | * we corrupt it. | |
1299 | */ | |
1300 | kimage_free(xchg(&kexec_crash_image, NULL)); | |
255aedd9 VG |
1301 | result = kimage_alloc_init(&image, entry, nr_segments, |
1302 | segments, flags); | |
558df720 | 1303 | crash_map_reserved_pages(); |
dc009d92 | 1304 | } |
72414d3f | 1305 | if (result) |
dc009d92 | 1306 | goto out; |
72414d3f | 1307 | |
3ab83521 YH |
1308 | if (flags & KEXEC_PRESERVE_CONTEXT) |
1309 | image->preserve_context = 1; | |
dc009d92 | 1310 | result = machine_kexec_prepare(image); |
72414d3f | 1311 | if (result) |
dc009d92 | 1312 | goto out; |
72414d3f MS |
1313 | |
1314 | for (i = 0; i < nr_segments; i++) { | |
dc009d92 | 1315 | result = kimage_load_segment(image, &image->segment[i]); |
72414d3f | 1316 | if (result) |
dc009d92 | 1317 | goto out; |
dc009d92 | 1318 | } |
7fccf032 | 1319 | kimage_terminate(image); |
558df720 MH |
1320 | if (flags & KEXEC_ON_CRASH) |
1321 | crash_unmap_reserved_pages(); | |
dc009d92 EB |
1322 | } |
1323 | /* Install the new kernel, and Uninstall the old */ | |
1324 | image = xchg(dest_image, image); | |
1325 | ||
72414d3f | 1326 | out: |
8c5a1cf0 | 1327 | mutex_unlock(&kexec_mutex); |
dc009d92 | 1328 | kimage_free(image); |
72414d3f | 1329 | |
dc009d92 EB |
1330 | return result; |
1331 | } | |
1332 | ||
558df720 MH |
1333 | /* |
1334 | * Add and remove page tables for crashkernel memory | |
1335 | * | |
1336 | * Provide an empty default implementation here -- architecture | |
1337 | * code may override this | |
1338 | */ | |
1339 | void __weak crash_map_reserved_pages(void) | |
1340 | {} | |
1341 | ||
1342 | void __weak crash_unmap_reserved_pages(void) | |
1343 | {} | |
1344 | ||
dc009d92 | 1345 | #ifdef CONFIG_COMPAT |
ca2c405a HC |
1346 | COMPAT_SYSCALL_DEFINE4(kexec_load, compat_ulong_t, entry, |
1347 | compat_ulong_t, nr_segments, | |
1348 | struct compat_kexec_segment __user *, segments, | |
1349 | compat_ulong_t, flags) | |
dc009d92 EB |
1350 | { |
1351 | struct compat_kexec_segment in; | |
1352 | struct kexec_segment out, __user *ksegments; | |
1353 | unsigned long i, result; | |
1354 | ||
1355 | /* Don't allow clients that don't understand the native | |
1356 | * architecture to do anything. | |
1357 | */ | |
72414d3f | 1358 | if ((flags & KEXEC_ARCH_MASK) == KEXEC_ARCH_DEFAULT) |
dc009d92 | 1359 | return -EINVAL; |
dc009d92 | 1360 | |
72414d3f | 1361 | if (nr_segments > KEXEC_SEGMENT_MAX) |
dc009d92 | 1362 | return -EINVAL; |
dc009d92 EB |
1363 | |
1364 | ksegments = compat_alloc_user_space(nr_segments * sizeof(out)); | |
e1bebcf4 | 1365 | for (i = 0; i < nr_segments; i++) { |
dc009d92 | 1366 | result = copy_from_user(&in, &segments[i], sizeof(in)); |
72414d3f | 1367 | if (result) |
dc009d92 | 1368 | return -EFAULT; |
dc009d92 EB |
1369 | |
1370 | out.buf = compat_ptr(in.buf); | |
1371 | out.bufsz = in.bufsz; | |
1372 | out.mem = in.mem; | |
1373 | out.memsz = in.memsz; | |
1374 | ||
1375 | result = copy_to_user(&ksegments[i], &out, sizeof(out)); | |
72414d3f | 1376 | if (result) |
dc009d92 | 1377 | return -EFAULT; |
dc009d92 EB |
1378 | } |
1379 | ||
1380 | return sys_kexec_load(entry, nr_segments, ksegments, flags); | |
1381 | } | |
1382 | #endif | |
1383 | ||
74ca317c | 1384 | #ifdef CONFIG_KEXEC_FILE |
f0895685 VG |
1385 | SYSCALL_DEFINE5(kexec_file_load, int, kernel_fd, int, initrd_fd, |
1386 | unsigned long, cmdline_len, const char __user *, cmdline_ptr, | |
1387 | unsigned long, flags) | |
1388 | { | |
cb105258 VG |
1389 | int ret = 0, i; |
1390 | struct kimage **dest_image, *image; | |
1391 | ||
1392 | /* We only trust the superuser with rebooting the system. */ | |
1393 | if (!capable(CAP_SYS_BOOT) || kexec_load_disabled) | |
1394 | return -EPERM; | |
1395 | ||
1396 | /* Make sure we have a legal set of flags */ | |
1397 | if (flags != (flags & KEXEC_FILE_FLAGS)) | |
1398 | return -EINVAL; | |
1399 | ||
1400 | image = NULL; | |
1401 | ||
1402 | if (!mutex_trylock(&kexec_mutex)) | |
1403 | return -EBUSY; | |
1404 | ||
1405 | dest_image = &kexec_image; | |
1406 | if (flags & KEXEC_FILE_ON_CRASH) | |
1407 | dest_image = &kexec_crash_image; | |
1408 | ||
1409 | if (flags & KEXEC_FILE_UNLOAD) | |
1410 | goto exchange; | |
1411 | ||
1412 | /* | |
1413 | * In case of crash, new kernel gets loaded in reserved region. It is | |
1414 | * same memory where old crash kernel might be loaded. Free any | |
1415 | * current crash dump kernel before we corrupt it. | |
1416 | */ | |
1417 | if (flags & KEXEC_FILE_ON_CRASH) | |
1418 | kimage_free(xchg(&kexec_crash_image, NULL)); | |
1419 | ||
1420 | ret = kimage_file_alloc_init(&image, kernel_fd, initrd_fd, cmdline_ptr, | |
1421 | cmdline_len, flags); | |
1422 | if (ret) | |
1423 | goto out; | |
1424 | ||
1425 | ret = machine_kexec_prepare(image); | |
1426 | if (ret) | |
1427 | goto out; | |
1428 | ||
12db5562 VG |
1429 | ret = kexec_calculate_store_digests(image); |
1430 | if (ret) | |
1431 | goto out; | |
1432 | ||
cb105258 VG |
1433 | for (i = 0; i < image->nr_segments; i++) { |
1434 | struct kexec_segment *ksegment; | |
1435 | ||
1436 | ksegment = &image->segment[i]; | |
1437 | pr_debug("Loading segment %d: buf=0x%p bufsz=0x%zx mem=0x%lx memsz=0x%zx\n", | |
1438 | i, ksegment->buf, ksegment->bufsz, ksegment->mem, | |
1439 | ksegment->memsz); | |
1440 | ||
1441 | ret = kimage_load_segment(image, &image->segment[i]); | |
1442 | if (ret) | |
1443 | goto out; | |
1444 | } | |
1445 | ||
1446 | kimage_terminate(image); | |
1447 | ||
1448 | /* | |
1449 | * Free up any temporary buffers allocated which are not needed | |
1450 | * after image has been loaded | |
1451 | */ | |
1452 | kimage_file_post_load_cleanup(image); | |
1453 | exchange: | |
1454 | image = xchg(dest_image, image); | |
1455 | out: | |
1456 | mutex_unlock(&kexec_mutex); | |
1457 | kimage_free(image); | |
1458 | return ret; | |
f0895685 VG |
1459 | } |
1460 | ||
74ca317c VG |
1461 | #endif /* CONFIG_KEXEC_FILE */ |
1462 | ||
6e274d14 | 1463 | void crash_kexec(struct pt_regs *regs) |
dc009d92 | 1464 | { |
8c5a1cf0 | 1465 | /* Take the kexec_mutex here to prevent sys_kexec_load |
dc009d92 EB |
1466 | * running on one cpu from replacing the crash kernel |
1467 | * we are using after a panic on a different cpu. | |
1468 | * | |
1469 | * If the crash kernel was not located in a fixed area | |
1470 | * of memory the xchg(&kexec_crash_image) would be | |
1471 | * sufficient. But since I reuse the memory... | |
1472 | */ | |
8c5a1cf0 | 1473 | if (mutex_trylock(&kexec_mutex)) { |
c0ce7d08 | 1474 | if (kexec_crash_image) { |
e996e581 | 1475 | struct pt_regs fixed_regs; |
0f4bd46e | 1476 | |
e996e581 | 1477 | crash_setup_regs(&fixed_regs, regs); |
fd59d231 | 1478 | crash_save_vmcoreinfo(); |
e996e581 | 1479 | machine_crash_shutdown(&fixed_regs); |
c0ce7d08 | 1480 | machine_kexec(kexec_crash_image); |
dc009d92 | 1481 | } |
8c5a1cf0 | 1482 | mutex_unlock(&kexec_mutex); |
dc009d92 EB |
1483 | } |
1484 | } | |
cc571658 | 1485 | |
06a7f711 AW |
1486 | size_t crash_get_memory_size(void) |
1487 | { | |
e05bd336 | 1488 | size_t size = 0; |
06a7f711 | 1489 | mutex_lock(&kexec_mutex); |
e05bd336 | 1490 | if (crashk_res.end != crashk_res.start) |
28f65c11 | 1491 | size = resource_size(&crashk_res); |
06a7f711 AW |
1492 | mutex_unlock(&kexec_mutex); |
1493 | return size; | |
1494 | } | |
1495 | ||
c0bb9e45 AB |
1496 | void __weak crash_free_reserved_phys_range(unsigned long begin, |
1497 | unsigned long end) | |
06a7f711 AW |
1498 | { |
1499 | unsigned long addr; | |
1500 | ||
e07cee23 JL |
1501 | for (addr = begin; addr < end; addr += PAGE_SIZE) |
1502 | free_reserved_page(pfn_to_page(addr >> PAGE_SHIFT)); | |
06a7f711 AW |
1503 | } |
1504 | ||
1505 | int crash_shrink_memory(unsigned long new_size) | |
1506 | { | |
1507 | int ret = 0; | |
1508 | unsigned long start, end; | |
bec013c4 | 1509 | unsigned long old_size; |
6480e5a0 | 1510 | struct resource *ram_res; |
06a7f711 AW |
1511 | |
1512 | mutex_lock(&kexec_mutex); | |
1513 | ||
1514 | if (kexec_crash_image) { | |
1515 | ret = -ENOENT; | |
1516 | goto unlock; | |
1517 | } | |
1518 | start = crashk_res.start; | |
1519 | end = crashk_res.end; | |
bec013c4 MH |
1520 | old_size = (end == 0) ? 0 : end - start + 1; |
1521 | if (new_size >= old_size) { | |
1522 | ret = (new_size == old_size) ? 0 : -EINVAL; | |
06a7f711 AW |
1523 | goto unlock; |
1524 | } | |
1525 | ||
6480e5a0 MH |
1526 | ram_res = kzalloc(sizeof(*ram_res), GFP_KERNEL); |
1527 | if (!ram_res) { | |
1528 | ret = -ENOMEM; | |
1529 | goto unlock; | |
1530 | } | |
1531 | ||
558df720 MH |
1532 | start = roundup(start, KEXEC_CRASH_MEM_ALIGN); |
1533 | end = roundup(start + new_size, KEXEC_CRASH_MEM_ALIGN); | |
06a7f711 | 1534 | |
558df720 | 1535 | crash_map_reserved_pages(); |
c0bb9e45 | 1536 | crash_free_reserved_phys_range(end, crashk_res.end); |
06a7f711 | 1537 | |
e05bd336 | 1538 | if ((start == end) && (crashk_res.parent != NULL)) |
06a7f711 | 1539 | release_resource(&crashk_res); |
6480e5a0 MH |
1540 | |
1541 | ram_res->start = end; | |
1542 | ram_res->end = crashk_res.end; | |
1543 | ram_res->flags = IORESOURCE_BUSY | IORESOURCE_MEM; | |
1544 | ram_res->name = "System RAM"; | |
1545 | ||
475f9aa6 | 1546 | crashk_res.end = end - 1; |
6480e5a0 MH |
1547 | |
1548 | insert_resource(&iomem_resource, ram_res); | |
558df720 | 1549 | crash_unmap_reserved_pages(); |
06a7f711 AW |
1550 | |
1551 | unlock: | |
1552 | mutex_unlock(&kexec_mutex); | |
1553 | return ret; | |
1554 | } | |
1555 | ||
85916f81 MD |
1556 | static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data, |
1557 | size_t data_len) | |
1558 | { | |
1559 | struct elf_note note; | |
1560 | ||
1561 | note.n_namesz = strlen(name) + 1; | |
1562 | note.n_descsz = data_len; | |
1563 | note.n_type = type; | |
1564 | memcpy(buf, ¬e, sizeof(note)); | |
1565 | buf += (sizeof(note) + 3)/4; | |
1566 | memcpy(buf, name, note.n_namesz); | |
1567 | buf += (note.n_namesz + 3)/4; | |
1568 | memcpy(buf, data, note.n_descsz); | |
1569 | buf += (note.n_descsz + 3)/4; | |
1570 | ||
1571 | return buf; | |
1572 | } | |
1573 | ||
1574 | static void final_note(u32 *buf) | |
1575 | { | |
1576 | struct elf_note note; | |
1577 | ||
1578 | note.n_namesz = 0; | |
1579 | note.n_descsz = 0; | |
1580 | note.n_type = 0; | |
1581 | memcpy(buf, ¬e, sizeof(note)); | |
1582 | } | |
1583 | ||
1584 | void crash_save_cpu(struct pt_regs *regs, int cpu) | |
1585 | { | |
1586 | struct elf_prstatus prstatus; | |
1587 | u32 *buf; | |
1588 | ||
4f4b6c1a | 1589 | if ((cpu < 0) || (cpu >= nr_cpu_ids)) |
85916f81 MD |
1590 | return; |
1591 | ||
1592 | /* Using ELF notes here is opportunistic. | |
1593 | * I need a well defined structure format | |
1594 | * for the data I pass, and I need tags | |
1595 | * on the data to indicate what information I have | |
1596 | * squirrelled away. ELF notes happen to provide | |
1597 | * all of that, so there is no need to invent something new. | |
1598 | */ | |
e1bebcf4 | 1599 | buf = (u32 *)per_cpu_ptr(crash_notes, cpu); |
85916f81 MD |
1600 | if (!buf) |
1601 | return; | |
1602 | memset(&prstatus, 0, sizeof(prstatus)); | |
1603 | prstatus.pr_pid = current->pid; | |
6cd61c0b | 1604 | elf_core_copy_kernel_regs(&prstatus.pr_reg, regs); |
6672f76a | 1605 | buf = append_elf_note(buf, KEXEC_CORE_NOTE_NAME, NT_PRSTATUS, |
e1bebcf4 | 1606 | &prstatus, sizeof(prstatus)); |
85916f81 MD |
1607 | final_note(buf); |
1608 | } | |
1609 | ||
cc571658 VG |
1610 | static int __init crash_notes_memory_init(void) |
1611 | { | |
1612 | /* Allocate memory for saving cpu registers. */ | |
1613 | crash_notes = alloc_percpu(note_buf_t); | |
1614 | if (!crash_notes) { | |
e1bebcf4 | 1615 | pr_warn("Kexec: Memory allocation for saving cpu register states failed\n"); |
cc571658 VG |
1616 | return -ENOMEM; |
1617 | } | |
1618 | return 0; | |
1619 | } | |
c96d6660 | 1620 | subsys_initcall(crash_notes_memory_init); |
fd59d231 | 1621 | |
cba63c30 BW |
1622 | |
1623 | /* | |
1624 | * parsing the "crashkernel" commandline | |
1625 | * | |
1626 | * this code is intended to be called from architecture specific code | |
1627 | */ | |
1628 | ||
1629 | ||
1630 | /* | |
1631 | * This function parses command lines in the format | |
1632 | * | |
1633 | * crashkernel=ramsize-range:size[,...][@offset] | |
1634 | * | |
1635 | * The function returns 0 on success and -EINVAL on failure. | |
1636 | */ | |
e1bebcf4 FF |
1637 | static int __init parse_crashkernel_mem(char *cmdline, |
1638 | unsigned long long system_ram, | |
1639 | unsigned long long *crash_size, | |
1640 | unsigned long long *crash_base) | |
cba63c30 BW |
1641 | { |
1642 | char *cur = cmdline, *tmp; | |
1643 | ||
1644 | /* for each entry of the comma-separated list */ | |
1645 | do { | |
1646 | unsigned long long start, end = ULLONG_MAX, size; | |
1647 | ||
1648 | /* get the start of the range */ | |
1649 | start = memparse(cur, &tmp); | |
1650 | if (cur == tmp) { | |
e1bebcf4 | 1651 | pr_warn("crashkernel: Memory value expected\n"); |
cba63c30 BW |
1652 | return -EINVAL; |
1653 | } | |
1654 | cur = tmp; | |
1655 | if (*cur != '-') { | |
e1bebcf4 | 1656 | pr_warn("crashkernel: '-' expected\n"); |
cba63c30 BW |
1657 | return -EINVAL; |
1658 | } | |
1659 | cur++; | |
1660 | ||
1661 | /* if no ':' is here, than we read the end */ | |
1662 | if (*cur != ':') { | |
1663 | end = memparse(cur, &tmp); | |
1664 | if (cur == tmp) { | |
e1bebcf4 | 1665 | pr_warn("crashkernel: Memory value expected\n"); |
cba63c30 BW |
1666 | return -EINVAL; |
1667 | } | |
1668 | cur = tmp; | |
1669 | if (end <= start) { | |
e1bebcf4 | 1670 | pr_warn("crashkernel: end <= start\n"); |
cba63c30 BW |
1671 | return -EINVAL; |
1672 | } | |
1673 | } | |
1674 | ||
1675 | if (*cur != ':') { | |
e1bebcf4 | 1676 | pr_warn("crashkernel: ':' expected\n"); |
cba63c30 BW |
1677 | return -EINVAL; |
1678 | } | |
1679 | cur++; | |
1680 | ||
1681 | size = memparse(cur, &tmp); | |
1682 | if (cur == tmp) { | |
e1bebcf4 | 1683 | pr_warn("Memory value expected\n"); |
cba63c30 BW |
1684 | return -EINVAL; |
1685 | } | |
1686 | cur = tmp; | |
1687 | if (size >= system_ram) { | |
e1bebcf4 | 1688 | pr_warn("crashkernel: invalid size\n"); |
cba63c30 BW |
1689 | return -EINVAL; |
1690 | } | |
1691 | ||
1692 | /* match ? */ | |
be089d79 | 1693 | if (system_ram >= start && system_ram < end) { |
cba63c30 BW |
1694 | *crash_size = size; |
1695 | break; | |
1696 | } | |
1697 | } while (*cur++ == ','); | |
1698 | ||
1699 | if (*crash_size > 0) { | |
11c7da4b | 1700 | while (*cur && *cur != ' ' && *cur != '@') |
cba63c30 BW |
1701 | cur++; |
1702 | if (*cur == '@') { | |
1703 | cur++; | |
1704 | *crash_base = memparse(cur, &tmp); | |
1705 | if (cur == tmp) { | |
e1bebcf4 | 1706 | pr_warn("Memory value expected after '@'\n"); |
cba63c30 BW |
1707 | return -EINVAL; |
1708 | } | |
1709 | } | |
1710 | } | |
1711 | ||
1712 | return 0; | |
1713 | } | |
1714 | ||
1715 | /* | |
1716 | * That function parses "simple" (old) crashkernel command lines like | |
1717 | * | |
e1bebcf4 | 1718 | * crashkernel=size[@offset] |
cba63c30 BW |
1719 | * |
1720 | * It returns 0 on success and -EINVAL on failure. | |
1721 | */ | |
e1bebcf4 FF |
1722 | static int __init parse_crashkernel_simple(char *cmdline, |
1723 | unsigned long long *crash_size, | |
1724 | unsigned long long *crash_base) | |
cba63c30 BW |
1725 | { |
1726 | char *cur = cmdline; | |
1727 | ||
1728 | *crash_size = memparse(cmdline, &cur); | |
1729 | if (cmdline == cur) { | |
e1bebcf4 | 1730 | pr_warn("crashkernel: memory value expected\n"); |
cba63c30 BW |
1731 | return -EINVAL; |
1732 | } | |
1733 | ||
1734 | if (*cur == '@') | |
1735 | *crash_base = memparse(cur+1, &cur); | |
eaa3be6a | 1736 | else if (*cur != ' ' && *cur != '\0') { |
e1bebcf4 | 1737 | pr_warn("crashkernel: unrecognized char\n"); |
eaa3be6a ZD |
1738 | return -EINVAL; |
1739 | } | |
cba63c30 BW |
1740 | |
1741 | return 0; | |
1742 | } | |
1743 | ||
adbc742b YL |
1744 | #define SUFFIX_HIGH 0 |
1745 | #define SUFFIX_LOW 1 | |
1746 | #define SUFFIX_NULL 2 | |
1747 | static __initdata char *suffix_tbl[] = { | |
1748 | [SUFFIX_HIGH] = ",high", | |
1749 | [SUFFIX_LOW] = ",low", | |
1750 | [SUFFIX_NULL] = NULL, | |
1751 | }; | |
1752 | ||
cba63c30 | 1753 | /* |
adbc742b YL |
1754 | * That function parses "suffix" crashkernel command lines like |
1755 | * | |
1756 | * crashkernel=size,[high|low] | |
1757 | * | |
1758 | * It returns 0 on success and -EINVAL on failure. | |
cba63c30 | 1759 | */ |
adbc742b YL |
1760 | static int __init parse_crashkernel_suffix(char *cmdline, |
1761 | unsigned long long *crash_size, | |
1762 | unsigned long long *crash_base, | |
1763 | const char *suffix) | |
1764 | { | |
1765 | char *cur = cmdline; | |
1766 | ||
1767 | *crash_size = memparse(cmdline, &cur); | |
1768 | if (cmdline == cur) { | |
1769 | pr_warn("crashkernel: memory value expected\n"); | |
1770 | return -EINVAL; | |
1771 | } | |
1772 | ||
1773 | /* check with suffix */ | |
1774 | if (strncmp(cur, suffix, strlen(suffix))) { | |
1775 | pr_warn("crashkernel: unrecognized char\n"); | |
1776 | return -EINVAL; | |
1777 | } | |
1778 | cur += strlen(suffix); | |
1779 | if (*cur != ' ' && *cur != '\0') { | |
1780 | pr_warn("crashkernel: unrecognized char\n"); | |
1781 | return -EINVAL; | |
1782 | } | |
1783 | ||
1784 | return 0; | |
1785 | } | |
1786 | ||
1787 | static __init char *get_last_crashkernel(char *cmdline, | |
1788 | const char *name, | |
1789 | const char *suffix) | |
1790 | { | |
1791 | char *p = cmdline, *ck_cmdline = NULL; | |
1792 | ||
1793 | /* find crashkernel and use the last one if there are more */ | |
1794 | p = strstr(p, name); | |
1795 | while (p) { | |
1796 | char *end_p = strchr(p, ' '); | |
1797 | char *q; | |
1798 | ||
1799 | if (!end_p) | |
1800 | end_p = p + strlen(p); | |
1801 | ||
1802 | if (!suffix) { | |
1803 | int i; | |
1804 | ||
1805 | /* skip the one with any known suffix */ | |
1806 | for (i = 0; suffix_tbl[i]; i++) { | |
1807 | q = end_p - strlen(suffix_tbl[i]); | |
1808 | if (!strncmp(q, suffix_tbl[i], | |
1809 | strlen(suffix_tbl[i]))) | |
1810 | goto next; | |
1811 | } | |
1812 | ck_cmdline = p; | |
1813 | } else { | |
1814 | q = end_p - strlen(suffix); | |
1815 | if (!strncmp(q, suffix, strlen(suffix))) | |
1816 | ck_cmdline = p; | |
1817 | } | |
1818 | next: | |
1819 | p = strstr(p+1, name); | |
1820 | } | |
1821 | ||
1822 | if (!ck_cmdline) | |
1823 | return NULL; | |
1824 | ||
1825 | return ck_cmdline; | |
1826 | } | |
1827 | ||
0212f915 | 1828 | static int __init __parse_crashkernel(char *cmdline, |
cba63c30 BW |
1829 | unsigned long long system_ram, |
1830 | unsigned long long *crash_size, | |
0212f915 | 1831 | unsigned long long *crash_base, |
adbc742b YL |
1832 | const char *name, |
1833 | const char *suffix) | |
cba63c30 | 1834 | { |
cba63c30 | 1835 | char *first_colon, *first_space; |
adbc742b | 1836 | char *ck_cmdline; |
cba63c30 BW |
1837 | |
1838 | BUG_ON(!crash_size || !crash_base); | |
1839 | *crash_size = 0; | |
1840 | *crash_base = 0; | |
1841 | ||
adbc742b | 1842 | ck_cmdline = get_last_crashkernel(cmdline, name, suffix); |
cba63c30 BW |
1843 | |
1844 | if (!ck_cmdline) | |
1845 | return -EINVAL; | |
1846 | ||
0212f915 | 1847 | ck_cmdline += strlen(name); |
cba63c30 | 1848 | |
adbc742b YL |
1849 | if (suffix) |
1850 | return parse_crashkernel_suffix(ck_cmdline, crash_size, | |
1851 | crash_base, suffix); | |
cba63c30 BW |
1852 | /* |
1853 | * if the commandline contains a ':', then that's the extended | |
1854 | * syntax -- if not, it must be the classic syntax | |
1855 | */ | |
1856 | first_colon = strchr(ck_cmdline, ':'); | |
1857 | first_space = strchr(ck_cmdline, ' '); | |
1858 | if (first_colon && (!first_space || first_colon < first_space)) | |
1859 | return parse_crashkernel_mem(ck_cmdline, system_ram, | |
1860 | crash_size, crash_base); | |
cba63c30 | 1861 | |
80c74f6a | 1862 | return parse_crashkernel_simple(ck_cmdline, crash_size, crash_base); |
cba63c30 BW |
1863 | } |
1864 | ||
adbc742b YL |
1865 | /* |
1866 | * That function is the entry point for command line parsing and should be | |
1867 | * called from the arch-specific code. | |
1868 | */ | |
0212f915 YL |
1869 | int __init parse_crashkernel(char *cmdline, |
1870 | unsigned long long system_ram, | |
1871 | unsigned long long *crash_size, | |
1872 | unsigned long long *crash_base) | |
1873 | { | |
1874 | return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base, | |
adbc742b | 1875 | "crashkernel=", NULL); |
0212f915 | 1876 | } |
55a20ee7 YL |
1877 | |
1878 | int __init parse_crashkernel_high(char *cmdline, | |
1879 | unsigned long long system_ram, | |
1880 | unsigned long long *crash_size, | |
1881 | unsigned long long *crash_base) | |
1882 | { | |
1883 | return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base, | |
adbc742b | 1884 | "crashkernel=", suffix_tbl[SUFFIX_HIGH]); |
55a20ee7 | 1885 | } |
0212f915 YL |
1886 | |
1887 | int __init parse_crashkernel_low(char *cmdline, | |
1888 | unsigned long long system_ram, | |
1889 | unsigned long long *crash_size, | |
1890 | unsigned long long *crash_base) | |
1891 | { | |
1892 | return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base, | |
adbc742b | 1893 | "crashkernel=", suffix_tbl[SUFFIX_LOW]); |
0212f915 | 1894 | } |
cba63c30 | 1895 | |
fa8ff292 | 1896 | static void update_vmcoreinfo_note(void) |
fd59d231 | 1897 | { |
fa8ff292 | 1898 | u32 *buf = vmcoreinfo_note; |
fd59d231 KO |
1899 | |
1900 | if (!vmcoreinfo_size) | |
1901 | return; | |
fd59d231 KO |
1902 | buf = append_elf_note(buf, VMCOREINFO_NOTE_NAME, 0, vmcoreinfo_data, |
1903 | vmcoreinfo_size); | |
fd59d231 KO |
1904 | final_note(buf); |
1905 | } | |
1906 | ||
fa8ff292 MH |
1907 | void crash_save_vmcoreinfo(void) |
1908 | { | |
63dca8d5 | 1909 | vmcoreinfo_append_str("CRASHTIME=%ld\n", get_seconds()); |
fa8ff292 MH |
1910 | update_vmcoreinfo_note(); |
1911 | } | |
1912 | ||
fd59d231 KO |
1913 | void vmcoreinfo_append_str(const char *fmt, ...) |
1914 | { | |
1915 | va_list args; | |
1916 | char buf[0x50]; | |
310faaa9 | 1917 | size_t r; |
fd59d231 KO |
1918 | |
1919 | va_start(args, fmt); | |
a19428e5 | 1920 | r = vscnprintf(buf, sizeof(buf), fmt, args); |
fd59d231 KO |
1921 | va_end(args); |
1922 | ||
31c3a3fe | 1923 | r = min(r, vmcoreinfo_max_size - vmcoreinfo_size); |
fd59d231 KO |
1924 | |
1925 | memcpy(&vmcoreinfo_data[vmcoreinfo_size], buf, r); | |
1926 | ||
1927 | vmcoreinfo_size += r; | |
1928 | } | |
1929 | ||
1930 | /* | |
1931 | * provide an empty default implementation here -- architecture | |
1932 | * code may override this | |
1933 | */ | |
52f5684c | 1934 | void __weak arch_crash_save_vmcoreinfo(void) |
fd59d231 KO |
1935 | {} |
1936 | ||
52f5684c | 1937 | unsigned long __weak paddr_vmcoreinfo_note(void) |
fd59d231 KO |
1938 | { |
1939 | return __pa((unsigned long)(char *)&vmcoreinfo_note); | |
1940 | } | |
1941 | ||
1942 | static int __init crash_save_vmcoreinfo_init(void) | |
1943 | { | |
bba1f603 KO |
1944 | VMCOREINFO_OSRELEASE(init_uts_ns.name.release); |
1945 | VMCOREINFO_PAGESIZE(PAGE_SIZE); | |
fd59d231 | 1946 | |
bcbba6c1 KO |
1947 | VMCOREINFO_SYMBOL(init_uts_ns); |
1948 | VMCOREINFO_SYMBOL(node_online_map); | |
d034cfab | 1949 | #ifdef CONFIG_MMU |
bcbba6c1 | 1950 | VMCOREINFO_SYMBOL(swapper_pg_dir); |
d034cfab | 1951 | #endif |
bcbba6c1 | 1952 | VMCOREINFO_SYMBOL(_stext); |
f1c4069e | 1953 | VMCOREINFO_SYMBOL(vmap_area_list); |
fd59d231 KO |
1954 | |
1955 | #ifndef CONFIG_NEED_MULTIPLE_NODES | |
bcbba6c1 KO |
1956 | VMCOREINFO_SYMBOL(mem_map); |
1957 | VMCOREINFO_SYMBOL(contig_page_data); | |
fd59d231 KO |
1958 | #endif |
1959 | #ifdef CONFIG_SPARSEMEM | |
bcbba6c1 KO |
1960 | VMCOREINFO_SYMBOL(mem_section); |
1961 | VMCOREINFO_LENGTH(mem_section, NR_SECTION_ROOTS); | |
c76f860c | 1962 | VMCOREINFO_STRUCT_SIZE(mem_section); |
bcbba6c1 | 1963 | VMCOREINFO_OFFSET(mem_section, section_mem_map); |
fd59d231 | 1964 | #endif |
c76f860c KO |
1965 | VMCOREINFO_STRUCT_SIZE(page); |
1966 | VMCOREINFO_STRUCT_SIZE(pglist_data); | |
1967 | VMCOREINFO_STRUCT_SIZE(zone); | |
1968 | VMCOREINFO_STRUCT_SIZE(free_area); | |
1969 | VMCOREINFO_STRUCT_SIZE(list_head); | |
1970 | VMCOREINFO_SIZE(nodemask_t); | |
bcbba6c1 KO |
1971 | VMCOREINFO_OFFSET(page, flags); |
1972 | VMCOREINFO_OFFSET(page, _count); | |
1973 | VMCOREINFO_OFFSET(page, mapping); | |
1974 | VMCOREINFO_OFFSET(page, lru); | |
8d67091e AK |
1975 | VMCOREINFO_OFFSET(page, _mapcount); |
1976 | VMCOREINFO_OFFSET(page, private); | |
bcbba6c1 KO |
1977 | VMCOREINFO_OFFSET(pglist_data, node_zones); |
1978 | VMCOREINFO_OFFSET(pglist_data, nr_zones); | |
fd59d231 | 1979 | #ifdef CONFIG_FLAT_NODE_MEM_MAP |
bcbba6c1 | 1980 | VMCOREINFO_OFFSET(pglist_data, node_mem_map); |
fd59d231 | 1981 | #endif |
bcbba6c1 KO |
1982 | VMCOREINFO_OFFSET(pglist_data, node_start_pfn); |
1983 | VMCOREINFO_OFFSET(pglist_data, node_spanned_pages); | |
1984 | VMCOREINFO_OFFSET(pglist_data, node_id); | |
1985 | VMCOREINFO_OFFSET(zone, free_area); | |
1986 | VMCOREINFO_OFFSET(zone, vm_stat); | |
1987 | VMCOREINFO_OFFSET(zone, spanned_pages); | |
1988 | VMCOREINFO_OFFSET(free_area, free_list); | |
1989 | VMCOREINFO_OFFSET(list_head, next); | |
1990 | VMCOREINFO_OFFSET(list_head, prev); | |
13ba3fcb AK |
1991 | VMCOREINFO_OFFSET(vmap_area, va_start); |
1992 | VMCOREINFO_OFFSET(vmap_area, list); | |
bcbba6c1 | 1993 | VMCOREINFO_LENGTH(zone.free_area, MAX_ORDER); |
04d491ab | 1994 | log_buf_kexec_setup(); |
83a08e7c | 1995 | VMCOREINFO_LENGTH(free_area.free_list, MIGRATE_TYPES); |
bcbba6c1 | 1996 | VMCOREINFO_NUMBER(NR_FREE_PAGES); |
122c7a59 KO |
1997 | VMCOREINFO_NUMBER(PG_lru); |
1998 | VMCOREINFO_NUMBER(PG_private); | |
1999 | VMCOREINFO_NUMBER(PG_swapcache); | |
8d67091e | 2000 | VMCOREINFO_NUMBER(PG_slab); |
0d0bf667 MT |
2001 | #ifdef CONFIG_MEMORY_FAILURE |
2002 | VMCOREINFO_NUMBER(PG_hwpoison); | |
2003 | #endif | |
b3acc56b | 2004 | VMCOREINFO_NUMBER(PG_head_mask); |
8d67091e | 2005 | VMCOREINFO_NUMBER(PAGE_BUDDY_MAPCOUNT_VALUE); |
3a1122d2 | 2006 | #ifdef CONFIG_HUGETLBFS |
8f1d26d0 | 2007 | VMCOREINFO_SYMBOL(free_huge_page); |
3a1122d2 | 2008 | #endif |
fd59d231 KO |
2009 | |
2010 | arch_crash_save_vmcoreinfo(); | |
fa8ff292 | 2011 | update_vmcoreinfo_note(); |
fd59d231 KO |
2012 | |
2013 | return 0; | |
2014 | } | |
2015 | ||
c96d6660 | 2016 | subsys_initcall(crash_save_vmcoreinfo_init); |
3ab83521 | 2017 | |
74ca317c | 2018 | #ifdef CONFIG_KEXEC_FILE |
cb105258 VG |
2019 | static int __kexec_add_segment(struct kimage *image, char *buf, |
2020 | unsigned long bufsz, unsigned long mem, | |
2021 | unsigned long memsz) | |
2022 | { | |
2023 | struct kexec_segment *ksegment; | |
2024 | ||
2025 | ksegment = &image->segment[image->nr_segments]; | |
2026 | ksegment->kbuf = buf; | |
2027 | ksegment->bufsz = bufsz; | |
2028 | ksegment->mem = mem; | |
2029 | ksegment->memsz = memsz; | |
2030 | image->nr_segments++; | |
2031 | ||
2032 | return 0; | |
2033 | } | |
2034 | ||
2035 | static int locate_mem_hole_top_down(unsigned long start, unsigned long end, | |
2036 | struct kexec_buf *kbuf) | |
2037 | { | |
2038 | struct kimage *image = kbuf->image; | |
2039 | unsigned long temp_start, temp_end; | |
2040 | ||
2041 | temp_end = min(end, kbuf->buf_max); | |
2042 | temp_start = temp_end - kbuf->memsz; | |
2043 | ||
2044 | do { | |
2045 | /* align down start */ | |
2046 | temp_start = temp_start & (~(kbuf->buf_align - 1)); | |
2047 | ||
2048 | if (temp_start < start || temp_start < kbuf->buf_min) | |
2049 | return 0; | |
2050 | ||
2051 | temp_end = temp_start + kbuf->memsz - 1; | |
2052 | ||
2053 | /* | |
2054 | * Make sure this does not conflict with any of existing | |
2055 | * segments | |
2056 | */ | |
2057 | if (kimage_is_destination_range(image, temp_start, temp_end)) { | |
2058 | temp_start = temp_start - PAGE_SIZE; | |
2059 | continue; | |
2060 | } | |
2061 | ||
2062 | /* We found a suitable memory range */ | |
2063 | break; | |
2064 | } while (1); | |
2065 | ||
2066 | /* If we are here, we found a suitable memory range */ | |
2067 | __kexec_add_segment(image, kbuf->buffer, kbuf->bufsz, temp_start, | |
2068 | kbuf->memsz); | |
2069 | ||
2070 | /* Success, stop navigating through remaining System RAM ranges */ | |
2071 | return 1; | |
2072 | } | |
2073 | ||
2074 | static int locate_mem_hole_bottom_up(unsigned long start, unsigned long end, | |
2075 | struct kexec_buf *kbuf) | |
2076 | { | |
2077 | struct kimage *image = kbuf->image; | |
2078 | unsigned long temp_start, temp_end; | |
2079 | ||
2080 | temp_start = max(start, kbuf->buf_min); | |
2081 | ||
2082 | do { | |
2083 | temp_start = ALIGN(temp_start, kbuf->buf_align); | |
2084 | temp_end = temp_start + kbuf->memsz - 1; | |
2085 | ||
2086 | if (temp_end > end || temp_end > kbuf->buf_max) | |
2087 | return 0; | |
2088 | /* | |
2089 | * Make sure this does not conflict with any of existing | |
2090 | * segments | |
2091 | */ | |
2092 | if (kimage_is_destination_range(image, temp_start, temp_end)) { | |
2093 | temp_start = temp_start + PAGE_SIZE; | |
2094 | continue; | |
2095 | } | |
2096 | ||
2097 | /* We found a suitable memory range */ | |
2098 | break; | |
2099 | } while (1); | |
2100 | ||
2101 | /* If we are here, we found a suitable memory range */ | |
2102 | __kexec_add_segment(image, kbuf->buffer, kbuf->bufsz, temp_start, | |
2103 | kbuf->memsz); | |
2104 | ||
2105 | /* Success, stop navigating through remaining System RAM ranges */ | |
2106 | return 1; | |
2107 | } | |
2108 | ||
2109 | static int locate_mem_hole_callback(u64 start, u64 end, void *arg) | |
2110 | { | |
2111 | struct kexec_buf *kbuf = (struct kexec_buf *)arg; | |
2112 | unsigned long sz = end - start + 1; | |
2113 | ||
2114 | /* Returning 0 will take to next memory range */ | |
2115 | if (sz < kbuf->memsz) | |
2116 | return 0; | |
2117 | ||
2118 | if (end < kbuf->buf_min || start > kbuf->buf_max) | |
2119 | return 0; | |
2120 | ||
2121 | /* | |
2122 | * Allocate memory top down with-in ram range. Otherwise bottom up | |
2123 | * allocation. | |
2124 | */ | |
2125 | if (kbuf->top_down) | |
2126 | return locate_mem_hole_top_down(start, end, kbuf); | |
2127 | return locate_mem_hole_bottom_up(start, end, kbuf); | |
2128 | } | |
2129 | ||
2130 | /* | |
2131 | * Helper function for placing a buffer in a kexec segment. This assumes | |
2132 | * that kexec_mutex is held. | |
2133 | */ | |
2134 | int kexec_add_buffer(struct kimage *image, char *buffer, unsigned long bufsz, | |
2135 | unsigned long memsz, unsigned long buf_align, | |
2136 | unsigned long buf_min, unsigned long buf_max, | |
2137 | bool top_down, unsigned long *load_addr) | |
2138 | { | |
2139 | ||
2140 | struct kexec_segment *ksegment; | |
2141 | struct kexec_buf buf, *kbuf; | |
2142 | int ret; | |
2143 | ||
2144 | /* Currently adding segment this way is allowed only in file mode */ | |
2145 | if (!image->file_mode) | |
2146 | return -EINVAL; | |
2147 | ||
2148 | if (image->nr_segments >= KEXEC_SEGMENT_MAX) | |
2149 | return -EINVAL; | |
2150 | ||
2151 | /* | |
2152 | * Make sure we are not trying to add buffer after allocating | |
2153 | * control pages. All segments need to be placed first before | |
2154 | * any control pages are allocated. As control page allocation | |
2155 | * logic goes through list of segments to make sure there are | |
2156 | * no destination overlaps. | |
2157 | */ | |
2158 | if (!list_empty(&image->control_pages)) { | |
2159 | WARN_ON(1); | |
2160 | return -EINVAL; | |
2161 | } | |
2162 | ||
2163 | memset(&buf, 0, sizeof(struct kexec_buf)); | |
2164 | kbuf = &buf; | |
2165 | kbuf->image = image; | |
2166 | kbuf->buffer = buffer; | |
2167 | kbuf->bufsz = bufsz; | |
2168 | ||
2169 | kbuf->memsz = ALIGN(memsz, PAGE_SIZE); | |
2170 | kbuf->buf_align = max(buf_align, PAGE_SIZE); | |
2171 | kbuf->buf_min = buf_min; | |
2172 | kbuf->buf_max = buf_max; | |
2173 | kbuf->top_down = top_down; | |
2174 | ||
2175 | /* Walk the RAM ranges and allocate a suitable range for the buffer */ | |
dd5f7260 VG |
2176 | if (image->type == KEXEC_TYPE_CRASH) |
2177 | ret = walk_iomem_res("Crash kernel", | |
2178 | IORESOURCE_MEM | IORESOURCE_BUSY, | |
2179 | crashk_res.start, crashk_res.end, kbuf, | |
2180 | locate_mem_hole_callback); | |
2181 | else | |
2182 | ret = walk_system_ram_res(0, -1, kbuf, | |
2183 | locate_mem_hole_callback); | |
cb105258 VG |
2184 | if (ret != 1) { |
2185 | /* A suitable memory range could not be found for buffer */ | |
2186 | return -EADDRNOTAVAIL; | |
2187 | } | |
2188 | ||
2189 | /* Found a suitable memory range */ | |
2190 | ksegment = &image->segment[image->nr_segments - 1]; | |
2191 | *load_addr = ksegment->mem; | |
2192 | return 0; | |
2193 | } | |
2194 | ||
12db5562 VG |
2195 | /* Calculate and store the digest of segments */ |
2196 | static int kexec_calculate_store_digests(struct kimage *image) | |
2197 | { | |
2198 | struct crypto_shash *tfm; | |
2199 | struct shash_desc *desc; | |
2200 | int ret = 0, i, j, zero_buf_sz, sha_region_sz; | |
2201 | size_t desc_size, nullsz; | |
2202 | char *digest; | |
2203 | void *zero_buf; | |
2204 | struct kexec_sha_region *sha_regions; | |
2205 | struct purgatory_info *pi = &image->purgatory_info; | |
2206 | ||
2207 | zero_buf = __va(page_to_pfn(ZERO_PAGE(0)) << PAGE_SHIFT); | |
2208 | zero_buf_sz = PAGE_SIZE; | |
2209 | ||
2210 | tfm = crypto_alloc_shash("sha256", 0, 0); | |
2211 | if (IS_ERR(tfm)) { | |
2212 | ret = PTR_ERR(tfm); | |
2213 | goto out; | |
2214 | } | |
2215 | ||
2216 | desc_size = crypto_shash_descsize(tfm) + sizeof(*desc); | |
2217 | desc = kzalloc(desc_size, GFP_KERNEL); | |
2218 | if (!desc) { | |
2219 | ret = -ENOMEM; | |
2220 | goto out_free_tfm; | |
2221 | } | |
2222 | ||
2223 | sha_region_sz = KEXEC_SEGMENT_MAX * sizeof(struct kexec_sha_region); | |
2224 | sha_regions = vzalloc(sha_region_sz); | |
2225 | if (!sha_regions) | |
2226 | goto out_free_desc; | |
2227 | ||
2228 | desc->tfm = tfm; | |
2229 | desc->flags = 0; | |
2230 | ||
2231 | ret = crypto_shash_init(desc); | |
2232 | if (ret < 0) | |
2233 | goto out_free_sha_regions; | |
2234 | ||
2235 | digest = kzalloc(SHA256_DIGEST_SIZE, GFP_KERNEL); | |
2236 | if (!digest) { | |
2237 | ret = -ENOMEM; | |
2238 | goto out_free_sha_regions; | |
2239 | } | |
2240 | ||
2241 | for (j = i = 0; i < image->nr_segments; i++) { | |
2242 | struct kexec_segment *ksegment; | |
2243 | ||
2244 | ksegment = &image->segment[i]; | |
2245 | /* | |
2246 | * Skip purgatory as it will be modified once we put digest | |
2247 | * info in purgatory. | |
2248 | */ | |
2249 | if (ksegment->kbuf == pi->purgatory_buf) | |
2250 | continue; | |
2251 | ||
2252 | ret = crypto_shash_update(desc, ksegment->kbuf, | |
2253 | ksegment->bufsz); | |
2254 | if (ret) | |
2255 | break; | |
2256 | ||
2257 | /* | |
2258 | * Assume rest of the buffer is filled with zero and | |
2259 | * update digest accordingly. | |
2260 | */ | |
2261 | nullsz = ksegment->memsz - ksegment->bufsz; | |
2262 | while (nullsz) { | |
2263 | unsigned long bytes = nullsz; | |
2264 | ||
2265 | if (bytes > zero_buf_sz) | |
2266 | bytes = zero_buf_sz; | |
2267 | ret = crypto_shash_update(desc, zero_buf, bytes); | |
2268 | if (ret) | |
2269 | break; | |
2270 | nullsz -= bytes; | |
2271 | } | |
2272 | ||
2273 | if (ret) | |
2274 | break; | |
2275 | ||
2276 | sha_regions[j].start = ksegment->mem; | |
2277 | sha_regions[j].len = ksegment->memsz; | |
2278 | j++; | |
2279 | } | |
2280 | ||
2281 | if (!ret) { | |
2282 | ret = crypto_shash_final(desc, digest); | |
2283 | if (ret) | |
2284 | goto out_free_digest; | |
2285 | ret = kexec_purgatory_get_set_symbol(image, "sha_regions", | |
2286 | sha_regions, sha_region_sz, 0); | |
2287 | if (ret) | |
2288 | goto out_free_digest; | |
2289 | ||
2290 | ret = kexec_purgatory_get_set_symbol(image, "sha256_digest", | |
2291 | digest, SHA256_DIGEST_SIZE, 0); | |
2292 | if (ret) | |
2293 | goto out_free_digest; | |
2294 | } | |
2295 | ||
2296 | out_free_digest: | |
2297 | kfree(digest); | |
2298 | out_free_sha_regions: | |
2299 | vfree(sha_regions); | |
2300 | out_free_desc: | |
2301 | kfree(desc); | |
2302 | out_free_tfm: | |
2303 | kfree(tfm); | |
2304 | out: | |
2305 | return ret; | |
2306 | } | |
2307 | ||
2308 | /* Actually load purgatory. Lot of code taken from kexec-tools */ | |
2309 | static int __kexec_load_purgatory(struct kimage *image, unsigned long min, | |
2310 | unsigned long max, int top_down) | |
2311 | { | |
2312 | struct purgatory_info *pi = &image->purgatory_info; | |
2313 | unsigned long align, buf_align, bss_align, buf_sz, bss_sz, bss_pad; | |
2314 | unsigned long memsz, entry, load_addr, curr_load_addr, bss_addr, offset; | |
2315 | unsigned char *buf_addr, *src; | |
2316 | int i, ret = 0, entry_sidx = -1; | |
2317 | const Elf_Shdr *sechdrs_c; | |
2318 | Elf_Shdr *sechdrs = NULL; | |
2319 | void *purgatory_buf = NULL; | |
2320 | ||
2321 | /* | |
2322 | * sechdrs_c points to section headers in purgatory and are read | |
2323 | * only. No modifications allowed. | |
2324 | */ | |
2325 | sechdrs_c = (void *)pi->ehdr + pi->ehdr->e_shoff; | |
2326 | ||
2327 | /* | |
2328 | * We can not modify sechdrs_c[] and its fields. It is read only. | |
2329 | * Copy it over to a local copy where one can store some temporary | |
2330 | * data and free it at the end. We need to modify ->sh_addr and | |
2331 | * ->sh_offset fields to keep track of permanent and temporary | |
2332 | * locations of sections. | |
2333 | */ | |
2334 | sechdrs = vzalloc(pi->ehdr->e_shnum * sizeof(Elf_Shdr)); | |
2335 | if (!sechdrs) | |
2336 | return -ENOMEM; | |
2337 | ||
2338 | memcpy(sechdrs, sechdrs_c, pi->ehdr->e_shnum * sizeof(Elf_Shdr)); | |
2339 | ||
2340 | /* | |
2341 | * We seem to have multiple copies of sections. First copy is which | |
2342 | * is embedded in kernel in read only section. Some of these sections | |
2343 | * will be copied to a temporary buffer and relocated. And these | |
2344 | * sections will finally be copied to their final destination at | |
2345 | * segment load time. | |
2346 | * | |
2347 | * Use ->sh_offset to reflect section address in memory. It will | |
2348 | * point to original read only copy if section is not allocatable. | |
2349 | * Otherwise it will point to temporary copy which will be relocated. | |
2350 | * | |
2351 | * Use ->sh_addr to contain final address of the section where it | |
2352 | * will go during execution time. | |
2353 | */ | |
2354 | for (i = 0; i < pi->ehdr->e_shnum; i++) { | |
2355 | if (sechdrs[i].sh_type == SHT_NOBITS) | |
2356 | continue; | |
2357 | ||
2358 | sechdrs[i].sh_offset = (unsigned long)pi->ehdr + | |
2359 | sechdrs[i].sh_offset; | |
2360 | } | |
2361 | ||
2362 | /* | |
2363 | * Identify entry point section and make entry relative to section | |
2364 | * start. | |
2365 | */ | |
2366 | entry = pi->ehdr->e_entry; | |
2367 | for (i = 0; i < pi->ehdr->e_shnum; i++) { | |
2368 | if (!(sechdrs[i].sh_flags & SHF_ALLOC)) | |
2369 | continue; | |
2370 | ||
2371 | if (!(sechdrs[i].sh_flags & SHF_EXECINSTR)) | |
2372 | continue; | |
2373 | ||
2374 | /* Make entry section relative */ | |
2375 | if (sechdrs[i].sh_addr <= pi->ehdr->e_entry && | |
2376 | ((sechdrs[i].sh_addr + sechdrs[i].sh_size) > | |
2377 | pi->ehdr->e_entry)) { | |
2378 | entry_sidx = i; | |
2379 | entry -= sechdrs[i].sh_addr; | |
2380 | break; | |
2381 | } | |
2382 | } | |
2383 | ||
2384 | /* Determine how much memory is needed to load relocatable object. */ | |
2385 | buf_align = 1; | |
2386 | bss_align = 1; | |
2387 | buf_sz = 0; | |
2388 | bss_sz = 0; | |
2389 | ||
2390 | for (i = 0; i < pi->ehdr->e_shnum; i++) { | |
2391 | if (!(sechdrs[i].sh_flags & SHF_ALLOC)) | |
2392 | continue; | |
2393 | ||
2394 | align = sechdrs[i].sh_addralign; | |
2395 | if (sechdrs[i].sh_type != SHT_NOBITS) { | |
2396 | if (buf_align < align) | |
2397 | buf_align = align; | |
2398 | buf_sz = ALIGN(buf_sz, align); | |
2399 | buf_sz += sechdrs[i].sh_size; | |
2400 | } else { | |
2401 | /* bss section */ | |
2402 | if (bss_align < align) | |
2403 | bss_align = align; | |
2404 | bss_sz = ALIGN(bss_sz, align); | |
2405 | bss_sz += sechdrs[i].sh_size; | |
2406 | } | |
2407 | } | |
2408 | ||
2409 | /* Determine the bss padding required to align bss properly */ | |
2410 | bss_pad = 0; | |
2411 | if (buf_sz & (bss_align - 1)) | |
2412 | bss_pad = bss_align - (buf_sz & (bss_align - 1)); | |
2413 | ||
2414 | memsz = buf_sz + bss_pad + bss_sz; | |
2415 | ||
2416 | /* Allocate buffer for purgatory */ | |
2417 | purgatory_buf = vzalloc(buf_sz); | |
2418 | if (!purgatory_buf) { | |
2419 | ret = -ENOMEM; | |
2420 | goto out; | |
2421 | } | |
2422 | ||
2423 | if (buf_align < bss_align) | |
2424 | buf_align = bss_align; | |
2425 | ||
2426 | /* Add buffer to segment list */ | |
2427 | ret = kexec_add_buffer(image, purgatory_buf, buf_sz, memsz, | |
2428 | buf_align, min, max, top_down, | |
2429 | &pi->purgatory_load_addr); | |
2430 | if (ret) | |
2431 | goto out; | |
2432 | ||
2433 | /* Load SHF_ALLOC sections */ | |
2434 | buf_addr = purgatory_buf; | |
2435 | load_addr = curr_load_addr = pi->purgatory_load_addr; | |
2436 | bss_addr = load_addr + buf_sz + bss_pad; | |
2437 | ||
2438 | for (i = 0; i < pi->ehdr->e_shnum; i++) { | |
2439 | if (!(sechdrs[i].sh_flags & SHF_ALLOC)) | |
2440 | continue; | |
2441 | ||
2442 | align = sechdrs[i].sh_addralign; | |
2443 | if (sechdrs[i].sh_type != SHT_NOBITS) { | |
2444 | curr_load_addr = ALIGN(curr_load_addr, align); | |
2445 | offset = curr_load_addr - load_addr; | |
2446 | /* We already modifed ->sh_offset to keep src addr */ | |
2447 | src = (char *) sechdrs[i].sh_offset; | |
2448 | memcpy(buf_addr + offset, src, sechdrs[i].sh_size); | |
2449 | ||
2450 | /* Store load address and source address of section */ | |
2451 | sechdrs[i].sh_addr = curr_load_addr; | |
2452 | ||
2453 | /* | |
2454 | * This section got copied to temporary buffer. Update | |
2455 | * ->sh_offset accordingly. | |
2456 | */ | |
2457 | sechdrs[i].sh_offset = (unsigned long)(buf_addr + offset); | |
2458 | ||
2459 | /* Advance to the next address */ | |
2460 | curr_load_addr += sechdrs[i].sh_size; | |
2461 | } else { | |
2462 | bss_addr = ALIGN(bss_addr, align); | |
2463 | sechdrs[i].sh_addr = bss_addr; | |
2464 | bss_addr += sechdrs[i].sh_size; | |
2465 | } | |
2466 | } | |
2467 | ||
2468 | /* Update entry point based on load address of text section */ | |
2469 | if (entry_sidx >= 0) | |
2470 | entry += sechdrs[entry_sidx].sh_addr; | |
2471 | ||
2472 | /* Make kernel jump to purgatory after shutdown */ | |
2473 | image->start = entry; | |
2474 | ||
2475 | /* Used later to get/set symbol values */ | |
2476 | pi->sechdrs = sechdrs; | |
2477 | ||
2478 | /* | |
2479 | * Used later to identify which section is purgatory and skip it | |
2480 | * from checksumming. | |
2481 | */ | |
2482 | pi->purgatory_buf = purgatory_buf; | |
2483 | return ret; | |
2484 | out: | |
2485 | vfree(sechdrs); | |
2486 | vfree(purgatory_buf); | |
2487 | return ret; | |
2488 | } | |
2489 | ||
2490 | static int kexec_apply_relocations(struct kimage *image) | |
2491 | { | |
2492 | int i, ret; | |
2493 | struct purgatory_info *pi = &image->purgatory_info; | |
2494 | Elf_Shdr *sechdrs = pi->sechdrs; | |
2495 | ||
2496 | /* Apply relocations */ | |
2497 | for (i = 0; i < pi->ehdr->e_shnum; i++) { | |
2498 | Elf_Shdr *section, *symtab; | |
2499 | ||
2500 | if (sechdrs[i].sh_type != SHT_RELA && | |
2501 | sechdrs[i].sh_type != SHT_REL) | |
2502 | continue; | |
2503 | ||
2504 | /* | |
2505 | * For section of type SHT_RELA/SHT_REL, | |
2506 | * ->sh_link contains section header index of associated | |
2507 | * symbol table. And ->sh_info contains section header | |
2508 | * index of section to which relocations apply. | |
2509 | */ | |
2510 | if (sechdrs[i].sh_info >= pi->ehdr->e_shnum || | |
2511 | sechdrs[i].sh_link >= pi->ehdr->e_shnum) | |
2512 | return -ENOEXEC; | |
2513 | ||
2514 | section = &sechdrs[sechdrs[i].sh_info]; | |
2515 | symtab = &sechdrs[sechdrs[i].sh_link]; | |
2516 | ||
2517 | if (!(section->sh_flags & SHF_ALLOC)) | |
2518 | continue; | |
2519 | ||
2520 | /* | |
2521 | * symtab->sh_link contain section header index of associated | |
2522 | * string table. | |
2523 | */ | |
2524 | if (symtab->sh_link >= pi->ehdr->e_shnum) | |
2525 | /* Invalid section number? */ | |
2526 | continue; | |
2527 | ||
2528 | /* | |
2529 | * Respective archicture needs to provide support for applying | |
2530 | * relocations of type SHT_RELA/SHT_REL. | |
2531 | */ | |
2532 | if (sechdrs[i].sh_type == SHT_RELA) | |
2533 | ret = arch_kexec_apply_relocations_add(pi->ehdr, | |
2534 | sechdrs, i); | |
2535 | else if (sechdrs[i].sh_type == SHT_REL) | |
2536 | ret = arch_kexec_apply_relocations(pi->ehdr, | |
2537 | sechdrs, i); | |
2538 | if (ret) | |
2539 | return ret; | |
2540 | } | |
2541 | ||
2542 | return 0; | |
2543 | } | |
2544 | ||
2545 | /* Load relocatable purgatory object and relocate it appropriately */ | |
2546 | int kexec_load_purgatory(struct kimage *image, unsigned long min, | |
2547 | unsigned long max, int top_down, | |
2548 | unsigned long *load_addr) | |
2549 | { | |
2550 | struct purgatory_info *pi = &image->purgatory_info; | |
2551 | int ret; | |
2552 | ||
2553 | if (kexec_purgatory_size <= 0) | |
2554 | return -EINVAL; | |
2555 | ||
2556 | if (kexec_purgatory_size < sizeof(Elf_Ehdr)) | |
2557 | return -ENOEXEC; | |
2558 | ||
2559 | pi->ehdr = (Elf_Ehdr *)kexec_purgatory; | |
2560 | ||
2561 | if (memcmp(pi->ehdr->e_ident, ELFMAG, SELFMAG) != 0 | |
2562 | || pi->ehdr->e_type != ET_REL | |
2563 | || !elf_check_arch(pi->ehdr) | |
2564 | || pi->ehdr->e_shentsize != sizeof(Elf_Shdr)) | |
2565 | return -ENOEXEC; | |
2566 | ||
2567 | if (pi->ehdr->e_shoff >= kexec_purgatory_size | |
2568 | || (pi->ehdr->e_shnum * sizeof(Elf_Shdr) > | |
2569 | kexec_purgatory_size - pi->ehdr->e_shoff)) | |
2570 | return -ENOEXEC; | |
2571 | ||
2572 | ret = __kexec_load_purgatory(image, min, max, top_down); | |
2573 | if (ret) | |
2574 | return ret; | |
2575 | ||
2576 | ret = kexec_apply_relocations(image); | |
2577 | if (ret) | |
2578 | goto out; | |
2579 | ||
2580 | *load_addr = pi->purgatory_load_addr; | |
2581 | return 0; | |
2582 | out: | |
2583 | vfree(pi->sechdrs); | |
2584 | vfree(pi->purgatory_buf); | |
2585 | return ret; | |
2586 | } | |
2587 | ||
2588 | static Elf_Sym *kexec_purgatory_find_symbol(struct purgatory_info *pi, | |
2589 | const char *name) | |
2590 | { | |
2591 | Elf_Sym *syms; | |
2592 | Elf_Shdr *sechdrs; | |
2593 | Elf_Ehdr *ehdr; | |
2594 | int i, k; | |
2595 | const char *strtab; | |
2596 | ||
2597 | if (!pi->sechdrs || !pi->ehdr) | |
2598 | return NULL; | |
2599 | ||
2600 | sechdrs = pi->sechdrs; | |
2601 | ehdr = pi->ehdr; | |
2602 | ||
2603 | for (i = 0; i < ehdr->e_shnum; i++) { | |
2604 | if (sechdrs[i].sh_type != SHT_SYMTAB) | |
2605 | continue; | |
2606 | ||
2607 | if (sechdrs[i].sh_link >= ehdr->e_shnum) | |
2608 | /* Invalid strtab section number */ | |
2609 | continue; | |
2610 | strtab = (char *)sechdrs[sechdrs[i].sh_link].sh_offset; | |
2611 | syms = (Elf_Sym *)sechdrs[i].sh_offset; | |
2612 | ||
2613 | /* Go through symbols for a match */ | |
2614 | for (k = 0; k < sechdrs[i].sh_size/sizeof(Elf_Sym); k++) { | |
2615 | if (ELF_ST_BIND(syms[k].st_info) != STB_GLOBAL) | |
2616 | continue; | |
2617 | ||
2618 | if (strcmp(strtab + syms[k].st_name, name) != 0) | |
2619 | continue; | |
2620 | ||
2621 | if (syms[k].st_shndx == SHN_UNDEF || | |
2622 | syms[k].st_shndx >= ehdr->e_shnum) { | |
2623 | pr_debug("Symbol: %s has bad section index %d.\n", | |
2624 | name, syms[k].st_shndx); | |
2625 | return NULL; | |
2626 | } | |
2627 | ||
2628 | /* Found the symbol we are looking for */ | |
2629 | return &syms[k]; | |
2630 | } | |
2631 | } | |
2632 | ||
2633 | return NULL; | |
2634 | } | |
2635 | ||
2636 | void *kexec_purgatory_get_symbol_addr(struct kimage *image, const char *name) | |
2637 | { | |
2638 | struct purgatory_info *pi = &image->purgatory_info; | |
2639 | Elf_Sym *sym; | |
2640 | Elf_Shdr *sechdr; | |
2641 | ||
2642 | sym = kexec_purgatory_find_symbol(pi, name); | |
2643 | if (!sym) | |
2644 | return ERR_PTR(-EINVAL); | |
2645 | ||
2646 | sechdr = &pi->sechdrs[sym->st_shndx]; | |
2647 | ||
2648 | /* | |
2649 | * Returns the address where symbol will finally be loaded after | |
2650 | * kexec_load_segment() | |
2651 | */ | |
2652 | return (void *)(sechdr->sh_addr + sym->st_value); | |
2653 | } | |
2654 | ||
2655 | /* | |
2656 | * Get or set value of a symbol. If "get_value" is true, symbol value is | |
2657 | * returned in buf otherwise symbol value is set based on value in buf. | |
2658 | */ | |
2659 | int kexec_purgatory_get_set_symbol(struct kimage *image, const char *name, | |
2660 | void *buf, unsigned int size, bool get_value) | |
2661 | { | |
2662 | Elf_Sym *sym; | |
2663 | Elf_Shdr *sechdrs; | |
2664 | struct purgatory_info *pi = &image->purgatory_info; | |
2665 | char *sym_buf; | |
2666 | ||
2667 | sym = kexec_purgatory_find_symbol(pi, name); | |
2668 | if (!sym) | |
2669 | return -EINVAL; | |
2670 | ||
2671 | if (sym->st_size != size) { | |
2672 | pr_err("symbol %s size mismatch: expected %lu actual %u\n", | |
2673 | name, (unsigned long)sym->st_size, size); | |
2674 | return -EINVAL; | |
2675 | } | |
2676 | ||
2677 | sechdrs = pi->sechdrs; | |
2678 | ||
2679 | if (sechdrs[sym->st_shndx].sh_type == SHT_NOBITS) { | |
2680 | pr_err("symbol %s is in a bss section. Cannot %s\n", name, | |
2681 | get_value ? "get" : "set"); | |
2682 | return -EINVAL; | |
2683 | } | |
2684 | ||
2685 | sym_buf = (unsigned char *)sechdrs[sym->st_shndx].sh_offset + | |
2686 | sym->st_value; | |
2687 | ||
2688 | if (get_value) | |
2689 | memcpy((void *)buf, sym_buf, size); | |
2690 | else | |
2691 | memcpy((void *)sym_buf, buf, size); | |
2692 | ||
2693 | return 0; | |
2694 | } | |
74ca317c | 2695 | #endif /* CONFIG_KEXEC_FILE */ |
cb105258 | 2696 | |
7ade3fcc YH |
2697 | /* |
2698 | * Move into place and start executing a preloaded standalone | |
2699 | * executable. If nothing was preloaded return an error. | |
3ab83521 YH |
2700 | */ |
2701 | int kernel_kexec(void) | |
2702 | { | |
2703 | int error = 0; | |
2704 | ||
8c5a1cf0 | 2705 | if (!mutex_trylock(&kexec_mutex)) |
3ab83521 YH |
2706 | return -EBUSY; |
2707 | if (!kexec_image) { | |
2708 | error = -EINVAL; | |
2709 | goto Unlock; | |
2710 | } | |
2711 | ||
3ab83521 | 2712 | #ifdef CONFIG_KEXEC_JUMP |
7ade3fcc | 2713 | if (kexec_image->preserve_context) { |
bcda53fa | 2714 | lock_system_sleep(); |
89081d17 YH |
2715 | pm_prepare_console(); |
2716 | error = freeze_processes(); | |
2717 | if (error) { | |
2718 | error = -EBUSY; | |
2719 | goto Restore_console; | |
2720 | } | |
2721 | suspend_console(); | |
d1616302 | 2722 | error = dpm_suspend_start(PMSG_FREEZE); |
89081d17 YH |
2723 | if (error) |
2724 | goto Resume_console; | |
d1616302 | 2725 | /* At this point, dpm_suspend_start() has been called, |
cf579dfb RW |
2726 | * but *not* dpm_suspend_end(). We *must* call |
2727 | * dpm_suspend_end() now. Otherwise, drivers for | |
89081d17 YH |
2728 | * some devices (e.g. interrupt controllers) become |
2729 | * desynchronized with the actual state of the | |
2730 | * hardware at resume time, and evil weirdness ensues. | |
2731 | */ | |
cf579dfb | 2732 | error = dpm_suspend_end(PMSG_FREEZE); |
89081d17 | 2733 | if (error) |
749b0afc RW |
2734 | goto Resume_devices; |
2735 | error = disable_nonboot_cpus(); | |
2736 | if (error) | |
2737 | goto Enable_cpus; | |
2ed8d2b3 | 2738 | local_irq_disable(); |
2e711c04 | 2739 | error = syscore_suspend(); |
770824bd | 2740 | if (error) |
749b0afc | 2741 | goto Enable_irqs; |
7ade3fcc | 2742 | } else |
3ab83521 | 2743 | #endif |
7ade3fcc | 2744 | { |
4fc9bbf9 | 2745 | kexec_in_progress = true; |
ca195b7f | 2746 | kernel_restart_prepare(NULL); |
c97102ba | 2747 | migrate_to_reboot_cpu(); |
011e4b02 SB |
2748 | |
2749 | /* | |
2750 | * migrate_to_reboot_cpu() disables CPU hotplug assuming that | |
2751 | * no further code needs to use CPU hotplug (which is true in | |
2752 | * the reboot case). However, the kexec path depends on using | |
2753 | * CPU hotplug again; so re-enable it here. | |
2754 | */ | |
2755 | cpu_hotplug_enable(); | |
e1bebcf4 | 2756 | pr_emerg("Starting new kernel\n"); |
3ab83521 YH |
2757 | machine_shutdown(); |
2758 | } | |
2759 | ||
2760 | machine_kexec(kexec_image); | |
2761 | ||
3ab83521 | 2762 | #ifdef CONFIG_KEXEC_JUMP |
7ade3fcc | 2763 | if (kexec_image->preserve_context) { |
19234c08 | 2764 | syscore_resume(); |
749b0afc | 2765 | Enable_irqs: |
3ab83521 | 2766 | local_irq_enable(); |
749b0afc | 2767 | Enable_cpus: |
89081d17 | 2768 | enable_nonboot_cpus(); |
cf579dfb | 2769 | dpm_resume_start(PMSG_RESTORE); |
89081d17 | 2770 | Resume_devices: |
d1616302 | 2771 | dpm_resume_end(PMSG_RESTORE); |
89081d17 YH |
2772 | Resume_console: |
2773 | resume_console(); | |
2774 | thaw_processes(); | |
2775 | Restore_console: | |
2776 | pm_restore_console(); | |
bcda53fa | 2777 | unlock_system_sleep(); |
3ab83521 | 2778 | } |
7ade3fcc | 2779 | #endif |
3ab83521 YH |
2780 | |
2781 | Unlock: | |
8c5a1cf0 | 2782 | mutex_unlock(&kexec_mutex); |
3ab83521 YH |
2783 | return error; |
2784 | } |