]>
Commit | Line | Data |
---|---|---|
dc009d92 EB |
1 | /* |
2 | * kexec.c - kexec system call | |
3 | * Copyright (C) 2002-2004 Eric Biederman <[email protected]> | |
4 | * | |
5 | * This source code is licensed under the GNU General Public License, | |
6 | * Version 2. See the file COPYING for more details. | |
7 | */ | |
8 | ||
c59ede7b | 9 | #include <linux/capability.h> |
dc009d92 EB |
10 | #include <linux/mm.h> |
11 | #include <linux/file.h> | |
12 | #include <linux/slab.h> | |
13 | #include <linux/fs.h> | |
14 | #include <linux/kexec.h> | |
15 | #include <linux/spinlock.h> | |
16 | #include <linux/list.h> | |
17 | #include <linux/highmem.h> | |
18 | #include <linux/syscalls.h> | |
19 | #include <linux/reboot.h> | |
20 | #include <linux/syscalls.h> | |
21 | #include <linux/ioport.h> | |
6e274d14 AN |
22 | #include <linux/hardirq.h> |
23 | ||
dc009d92 EB |
24 | #include <asm/page.h> |
25 | #include <asm/uaccess.h> | |
26 | #include <asm/io.h> | |
27 | #include <asm/system.h> | |
28 | #include <asm/semaphore.h> | |
29 | ||
cc571658 VG |
30 | /* Per cpu memory for storing cpu states in case of system crash. */ |
31 | note_buf_t* crash_notes; | |
32 | ||
dc009d92 EB |
33 | /* Location of the reserved area for the crash kernel */ |
34 | struct resource crashk_res = { | |
35 | .name = "Crash kernel", | |
36 | .start = 0, | |
37 | .end = 0, | |
38 | .flags = IORESOURCE_BUSY | IORESOURCE_MEM | |
39 | }; | |
40 | ||
6e274d14 AN |
41 | int kexec_should_crash(struct task_struct *p) |
42 | { | |
43 | if (in_interrupt() || !p->pid || p->pid == 1 || panic_on_oops) | |
44 | return 1; | |
45 | return 0; | |
46 | } | |
47 | ||
dc009d92 EB |
48 | /* |
49 | * When kexec transitions to the new kernel there is a one-to-one | |
50 | * mapping between physical and virtual addresses. On processors | |
51 | * where you can disable the MMU this is trivial, and easy. For | |
52 | * others it is still a simple predictable page table to setup. | |
53 | * | |
54 | * In that environment kexec copies the new kernel to its final | |
55 | * resting place. This means I can only support memory whose | |
56 | * physical address can fit in an unsigned long. In particular | |
57 | * addresses where (pfn << PAGE_SHIFT) > ULONG_MAX cannot be handled. | |
58 | * If the assembly stub has more restrictive requirements | |
59 | * KEXEC_SOURCE_MEMORY_LIMIT and KEXEC_DEST_MEMORY_LIMIT can be | |
60 | * defined more restrictively in <asm/kexec.h>. | |
61 | * | |
62 | * The code for the transition from the current kernel to the | |
63 | * the new kernel is placed in the control_code_buffer, whose size | |
64 | * is given by KEXEC_CONTROL_CODE_SIZE. In the best case only a single | |
65 | * page of memory is necessary, but some architectures require more. | |
66 | * Because this memory must be identity mapped in the transition from | |
67 | * virtual to physical addresses it must live in the range | |
68 | * 0 - TASK_SIZE, as only the user space mappings are arbitrarily | |
69 | * modifiable. | |
70 | * | |
71 | * The assembly stub in the control code buffer is passed a linked list | |
72 | * of descriptor pages detailing the source pages of the new kernel, | |
73 | * and the destination addresses of those source pages. As this data | |
74 | * structure is not used in the context of the current OS, it must | |
75 | * be self-contained. | |
76 | * | |
77 | * The code has been made to work with highmem pages and will use a | |
78 | * destination page in its final resting place (if it happens | |
79 | * to allocate it). The end product of this is that most of the | |
80 | * physical address space, and most of RAM can be used. | |
81 | * | |
82 | * Future directions include: | |
83 | * - allocating a page table with the control code buffer identity | |
84 | * mapped, to simplify machine_kexec and make kexec_on_panic more | |
85 | * reliable. | |
86 | */ | |
87 | ||
88 | /* | |
89 | * KIMAGE_NO_DEST is an impossible destination address..., for | |
90 | * allocating pages whose destination address we do not care about. | |
91 | */ | |
92 | #define KIMAGE_NO_DEST (-1UL) | |
93 | ||
72414d3f MS |
94 | static int kimage_is_destination_range(struct kimage *image, |
95 | unsigned long start, unsigned long end); | |
96 | static struct page *kimage_alloc_page(struct kimage *image, | |
9796fdd8 | 97 | gfp_t gfp_mask, |
72414d3f | 98 | unsigned long dest); |
dc009d92 EB |
99 | |
100 | static int do_kimage_alloc(struct kimage **rimage, unsigned long entry, | |
72414d3f MS |
101 | unsigned long nr_segments, |
102 | struct kexec_segment __user *segments) | |
dc009d92 EB |
103 | { |
104 | size_t segment_bytes; | |
105 | struct kimage *image; | |
106 | unsigned long i; | |
107 | int result; | |
108 | ||
109 | /* Allocate a controlling structure */ | |
110 | result = -ENOMEM; | |
111 | image = kmalloc(sizeof(*image), GFP_KERNEL); | |
72414d3f | 112 | if (!image) |
dc009d92 | 113 | goto out; |
72414d3f | 114 | |
dc009d92 EB |
115 | memset(image, 0, sizeof(*image)); |
116 | image->head = 0; | |
117 | image->entry = &image->head; | |
118 | image->last_entry = &image->head; | |
119 | image->control_page = ~0; /* By default this does not apply */ | |
120 | image->start = entry; | |
121 | image->type = KEXEC_TYPE_DEFAULT; | |
122 | ||
123 | /* Initialize the list of control pages */ | |
124 | INIT_LIST_HEAD(&image->control_pages); | |
125 | ||
126 | /* Initialize the list of destination pages */ | |
127 | INIT_LIST_HEAD(&image->dest_pages); | |
128 | ||
129 | /* Initialize the list of unuseable pages */ | |
130 | INIT_LIST_HEAD(&image->unuseable_pages); | |
131 | ||
132 | /* Read in the segments */ | |
133 | image->nr_segments = nr_segments; | |
134 | segment_bytes = nr_segments * sizeof(*segments); | |
135 | result = copy_from_user(image->segment, segments, segment_bytes); | |
136 | if (result) | |
137 | goto out; | |
138 | ||
139 | /* | |
140 | * Verify we have good destination addresses. The caller is | |
141 | * responsible for making certain we don't attempt to load | |
142 | * the new image into invalid or reserved areas of RAM. This | |
143 | * just verifies it is an address we can use. | |
144 | * | |
145 | * Since the kernel does everything in page size chunks ensure | |
146 | * the destination addreses are page aligned. Too many | |
147 | * special cases crop of when we don't do this. The most | |
148 | * insidious is getting overlapping destination addresses | |
149 | * simply because addresses are changed to page size | |
150 | * granularity. | |
151 | */ | |
152 | result = -EADDRNOTAVAIL; | |
153 | for (i = 0; i < nr_segments; i++) { | |
154 | unsigned long mstart, mend; | |
72414d3f | 155 | |
dc009d92 EB |
156 | mstart = image->segment[i].mem; |
157 | mend = mstart + image->segment[i].memsz; | |
158 | if ((mstart & ~PAGE_MASK) || (mend & ~PAGE_MASK)) | |
159 | goto out; | |
160 | if (mend >= KEXEC_DESTINATION_MEMORY_LIMIT) | |
161 | goto out; | |
162 | } | |
163 | ||
164 | /* Verify our destination addresses do not overlap. | |
165 | * If we alloed overlapping destination addresses | |
166 | * through very weird things can happen with no | |
167 | * easy explanation as one segment stops on another. | |
168 | */ | |
169 | result = -EINVAL; | |
72414d3f | 170 | for (i = 0; i < nr_segments; i++) { |
dc009d92 EB |
171 | unsigned long mstart, mend; |
172 | unsigned long j; | |
72414d3f | 173 | |
dc009d92 EB |
174 | mstart = image->segment[i].mem; |
175 | mend = mstart + image->segment[i].memsz; | |
72414d3f | 176 | for (j = 0; j < i; j++) { |
dc009d92 EB |
177 | unsigned long pstart, pend; |
178 | pstart = image->segment[j].mem; | |
179 | pend = pstart + image->segment[j].memsz; | |
180 | /* Do the segments overlap ? */ | |
181 | if ((mend > pstart) && (mstart < pend)) | |
182 | goto out; | |
183 | } | |
184 | } | |
185 | ||
186 | /* Ensure our buffer sizes are strictly less than | |
187 | * our memory sizes. This should always be the case, | |
188 | * and it is easier to check up front than to be surprised | |
189 | * later on. | |
190 | */ | |
191 | result = -EINVAL; | |
72414d3f | 192 | for (i = 0; i < nr_segments; i++) { |
dc009d92 EB |
193 | if (image->segment[i].bufsz > image->segment[i].memsz) |
194 | goto out; | |
195 | } | |
196 | ||
dc009d92 | 197 | result = 0; |
72414d3f MS |
198 | out: |
199 | if (result == 0) | |
dc009d92 | 200 | *rimage = image; |
72414d3f | 201 | else |
dc009d92 | 202 | kfree(image); |
72414d3f | 203 | |
dc009d92 EB |
204 | return result; |
205 | ||
206 | } | |
207 | ||
208 | static int kimage_normal_alloc(struct kimage **rimage, unsigned long entry, | |
72414d3f MS |
209 | unsigned long nr_segments, |
210 | struct kexec_segment __user *segments) | |
dc009d92 EB |
211 | { |
212 | int result; | |
213 | struct kimage *image; | |
214 | ||
215 | /* Allocate and initialize a controlling structure */ | |
216 | image = NULL; | |
217 | result = do_kimage_alloc(&image, entry, nr_segments, segments); | |
72414d3f | 218 | if (result) |
dc009d92 | 219 | goto out; |
72414d3f | 220 | |
dc009d92 EB |
221 | *rimage = image; |
222 | ||
223 | /* | |
224 | * Find a location for the control code buffer, and add it | |
225 | * the vector of segments so that it's pages will also be | |
226 | * counted as destination pages. | |
227 | */ | |
228 | result = -ENOMEM; | |
229 | image->control_code_page = kimage_alloc_control_pages(image, | |
72414d3f | 230 | get_order(KEXEC_CONTROL_CODE_SIZE)); |
dc009d92 EB |
231 | if (!image->control_code_page) { |
232 | printk(KERN_ERR "Could not allocate control_code_buffer\n"); | |
233 | goto out; | |
234 | } | |
235 | ||
236 | result = 0; | |
237 | out: | |
72414d3f | 238 | if (result == 0) |
dc009d92 | 239 | *rimage = image; |
72414d3f | 240 | else |
dc009d92 | 241 | kfree(image); |
72414d3f | 242 | |
dc009d92 EB |
243 | return result; |
244 | } | |
245 | ||
246 | static int kimage_crash_alloc(struct kimage **rimage, unsigned long entry, | |
72414d3f | 247 | unsigned long nr_segments, |
314b6a4d | 248 | struct kexec_segment __user *segments) |
dc009d92 EB |
249 | { |
250 | int result; | |
251 | struct kimage *image; | |
252 | unsigned long i; | |
253 | ||
254 | image = NULL; | |
255 | /* Verify we have a valid entry point */ | |
256 | if ((entry < crashk_res.start) || (entry > crashk_res.end)) { | |
257 | result = -EADDRNOTAVAIL; | |
258 | goto out; | |
259 | } | |
260 | ||
261 | /* Allocate and initialize a controlling structure */ | |
262 | result = do_kimage_alloc(&image, entry, nr_segments, segments); | |
72414d3f | 263 | if (result) |
dc009d92 | 264 | goto out; |
dc009d92 EB |
265 | |
266 | /* Enable the special crash kernel control page | |
267 | * allocation policy. | |
268 | */ | |
269 | image->control_page = crashk_res.start; | |
270 | image->type = KEXEC_TYPE_CRASH; | |
271 | ||
272 | /* | |
273 | * Verify we have good destination addresses. Normally | |
274 | * the caller is responsible for making certain we don't | |
275 | * attempt to load the new image into invalid or reserved | |
276 | * areas of RAM. But crash kernels are preloaded into a | |
277 | * reserved area of ram. We must ensure the addresses | |
278 | * are in the reserved area otherwise preloading the | |
279 | * kernel could corrupt things. | |
280 | */ | |
281 | result = -EADDRNOTAVAIL; | |
282 | for (i = 0; i < nr_segments; i++) { | |
283 | unsigned long mstart, mend; | |
72414d3f | 284 | |
dc009d92 | 285 | mstart = image->segment[i].mem; |
50cccc69 | 286 | mend = mstart + image->segment[i].memsz - 1; |
dc009d92 EB |
287 | /* Ensure we are within the crash kernel limits */ |
288 | if ((mstart < crashk_res.start) || (mend > crashk_res.end)) | |
289 | goto out; | |
290 | } | |
291 | ||
dc009d92 EB |
292 | /* |
293 | * Find a location for the control code buffer, and add | |
294 | * the vector of segments so that it's pages will also be | |
295 | * counted as destination pages. | |
296 | */ | |
297 | result = -ENOMEM; | |
298 | image->control_code_page = kimage_alloc_control_pages(image, | |
72414d3f | 299 | get_order(KEXEC_CONTROL_CODE_SIZE)); |
dc009d92 EB |
300 | if (!image->control_code_page) { |
301 | printk(KERN_ERR "Could not allocate control_code_buffer\n"); | |
302 | goto out; | |
303 | } | |
304 | ||
305 | result = 0; | |
72414d3f MS |
306 | out: |
307 | if (result == 0) | |
dc009d92 | 308 | *rimage = image; |
72414d3f | 309 | else |
dc009d92 | 310 | kfree(image); |
72414d3f | 311 | |
dc009d92 EB |
312 | return result; |
313 | } | |
314 | ||
72414d3f MS |
315 | static int kimage_is_destination_range(struct kimage *image, |
316 | unsigned long start, | |
317 | unsigned long end) | |
dc009d92 EB |
318 | { |
319 | unsigned long i; | |
320 | ||
321 | for (i = 0; i < image->nr_segments; i++) { | |
322 | unsigned long mstart, mend; | |
72414d3f | 323 | |
dc009d92 | 324 | mstart = image->segment[i].mem; |
72414d3f MS |
325 | mend = mstart + image->segment[i].memsz; |
326 | if ((end > mstart) && (start < mend)) | |
dc009d92 | 327 | return 1; |
dc009d92 | 328 | } |
72414d3f | 329 | |
dc009d92 EB |
330 | return 0; |
331 | } | |
332 | ||
9796fdd8 | 333 | static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order) |
dc009d92 EB |
334 | { |
335 | struct page *pages; | |
72414d3f | 336 | |
dc009d92 EB |
337 | pages = alloc_pages(gfp_mask, order); |
338 | if (pages) { | |
339 | unsigned int count, i; | |
340 | pages->mapping = NULL; | |
4c21e2f2 | 341 | set_page_private(pages, order); |
dc009d92 | 342 | count = 1 << order; |
72414d3f | 343 | for (i = 0; i < count; i++) |
dc009d92 | 344 | SetPageReserved(pages + i); |
dc009d92 | 345 | } |
72414d3f | 346 | |
dc009d92 EB |
347 | return pages; |
348 | } | |
349 | ||
350 | static void kimage_free_pages(struct page *page) | |
351 | { | |
352 | unsigned int order, count, i; | |
72414d3f | 353 | |
4c21e2f2 | 354 | order = page_private(page); |
dc009d92 | 355 | count = 1 << order; |
72414d3f | 356 | for (i = 0; i < count; i++) |
dc009d92 | 357 | ClearPageReserved(page + i); |
dc009d92 EB |
358 | __free_pages(page, order); |
359 | } | |
360 | ||
361 | static void kimage_free_page_list(struct list_head *list) | |
362 | { | |
363 | struct list_head *pos, *next; | |
72414d3f | 364 | |
dc009d92 EB |
365 | list_for_each_safe(pos, next, list) { |
366 | struct page *page; | |
367 | ||
368 | page = list_entry(pos, struct page, lru); | |
369 | list_del(&page->lru); | |
dc009d92 EB |
370 | kimage_free_pages(page); |
371 | } | |
372 | } | |
373 | ||
72414d3f MS |
374 | static struct page *kimage_alloc_normal_control_pages(struct kimage *image, |
375 | unsigned int order) | |
dc009d92 EB |
376 | { |
377 | /* Control pages are special, they are the intermediaries | |
378 | * that are needed while we copy the rest of the pages | |
379 | * to their final resting place. As such they must | |
380 | * not conflict with either the destination addresses | |
381 | * or memory the kernel is already using. | |
382 | * | |
383 | * The only case where we really need more than one of | |
384 | * these are for architectures where we cannot disable | |
385 | * the MMU and must instead generate an identity mapped | |
386 | * page table for all of the memory. | |
387 | * | |
388 | * At worst this runs in O(N) of the image size. | |
389 | */ | |
390 | struct list_head extra_pages; | |
391 | struct page *pages; | |
392 | unsigned int count; | |
393 | ||
394 | count = 1 << order; | |
395 | INIT_LIST_HEAD(&extra_pages); | |
396 | ||
397 | /* Loop while I can allocate a page and the page allocated | |
398 | * is a destination page. | |
399 | */ | |
400 | do { | |
401 | unsigned long pfn, epfn, addr, eaddr; | |
72414d3f | 402 | |
dc009d92 EB |
403 | pages = kimage_alloc_pages(GFP_KERNEL, order); |
404 | if (!pages) | |
405 | break; | |
406 | pfn = page_to_pfn(pages); | |
407 | epfn = pfn + count; | |
408 | addr = pfn << PAGE_SHIFT; | |
409 | eaddr = epfn << PAGE_SHIFT; | |
410 | if ((epfn >= (KEXEC_CONTROL_MEMORY_LIMIT >> PAGE_SHIFT)) || | |
72414d3f | 411 | kimage_is_destination_range(image, addr, eaddr)) { |
dc009d92 EB |
412 | list_add(&pages->lru, &extra_pages); |
413 | pages = NULL; | |
414 | } | |
72414d3f MS |
415 | } while (!pages); |
416 | ||
dc009d92 EB |
417 | if (pages) { |
418 | /* Remember the allocated page... */ | |
419 | list_add(&pages->lru, &image->control_pages); | |
420 | ||
421 | /* Because the page is already in it's destination | |
422 | * location we will never allocate another page at | |
423 | * that address. Therefore kimage_alloc_pages | |
424 | * will not return it (again) and we don't need | |
425 | * to give it an entry in image->segment[]. | |
426 | */ | |
427 | } | |
428 | /* Deal with the destination pages I have inadvertently allocated. | |
429 | * | |
430 | * Ideally I would convert multi-page allocations into single | |
431 | * page allocations, and add everyting to image->dest_pages. | |
432 | * | |
433 | * For now it is simpler to just free the pages. | |
434 | */ | |
435 | kimage_free_page_list(&extra_pages); | |
dc009d92 | 436 | |
72414d3f | 437 | return pages; |
dc009d92 EB |
438 | } |
439 | ||
72414d3f MS |
440 | static struct page *kimage_alloc_crash_control_pages(struct kimage *image, |
441 | unsigned int order) | |
dc009d92 EB |
442 | { |
443 | /* Control pages are special, they are the intermediaries | |
444 | * that are needed while we copy the rest of the pages | |
445 | * to their final resting place. As such they must | |
446 | * not conflict with either the destination addresses | |
447 | * or memory the kernel is already using. | |
448 | * | |
449 | * Control pages are also the only pags we must allocate | |
450 | * when loading a crash kernel. All of the other pages | |
451 | * are specified by the segments and we just memcpy | |
452 | * into them directly. | |
453 | * | |
454 | * The only case where we really need more than one of | |
455 | * these are for architectures where we cannot disable | |
456 | * the MMU and must instead generate an identity mapped | |
457 | * page table for all of the memory. | |
458 | * | |
459 | * Given the low demand this implements a very simple | |
460 | * allocator that finds the first hole of the appropriate | |
461 | * size in the reserved memory region, and allocates all | |
462 | * of the memory up to and including the hole. | |
463 | */ | |
464 | unsigned long hole_start, hole_end, size; | |
465 | struct page *pages; | |
72414d3f | 466 | |
dc009d92 EB |
467 | pages = NULL; |
468 | size = (1 << order) << PAGE_SHIFT; | |
469 | hole_start = (image->control_page + (size - 1)) & ~(size - 1); | |
470 | hole_end = hole_start + size - 1; | |
72414d3f | 471 | while (hole_end <= crashk_res.end) { |
dc009d92 | 472 | unsigned long i; |
72414d3f MS |
473 | |
474 | if (hole_end > KEXEC_CONTROL_MEMORY_LIMIT) | |
dc009d92 | 475 | break; |
72414d3f | 476 | if (hole_end > crashk_res.end) |
dc009d92 | 477 | break; |
dc009d92 | 478 | /* See if I overlap any of the segments */ |
72414d3f | 479 | for (i = 0; i < image->nr_segments; i++) { |
dc009d92 | 480 | unsigned long mstart, mend; |
72414d3f | 481 | |
dc009d92 EB |
482 | mstart = image->segment[i].mem; |
483 | mend = mstart + image->segment[i].memsz - 1; | |
484 | if ((hole_end >= mstart) && (hole_start <= mend)) { | |
485 | /* Advance the hole to the end of the segment */ | |
486 | hole_start = (mend + (size - 1)) & ~(size - 1); | |
487 | hole_end = hole_start + size - 1; | |
488 | break; | |
489 | } | |
490 | } | |
491 | /* If I don't overlap any segments I have found my hole! */ | |
492 | if (i == image->nr_segments) { | |
493 | pages = pfn_to_page(hole_start >> PAGE_SHIFT); | |
494 | break; | |
495 | } | |
496 | } | |
72414d3f | 497 | if (pages) |
dc009d92 | 498 | image->control_page = hole_end; |
72414d3f | 499 | |
dc009d92 EB |
500 | return pages; |
501 | } | |
502 | ||
503 | ||
72414d3f MS |
504 | struct page *kimage_alloc_control_pages(struct kimage *image, |
505 | unsigned int order) | |
dc009d92 EB |
506 | { |
507 | struct page *pages = NULL; | |
72414d3f MS |
508 | |
509 | switch (image->type) { | |
dc009d92 EB |
510 | case KEXEC_TYPE_DEFAULT: |
511 | pages = kimage_alloc_normal_control_pages(image, order); | |
512 | break; | |
513 | case KEXEC_TYPE_CRASH: | |
514 | pages = kimage_alloc_crash_control_pages(image, order); | |
515 | break; | |
516 | } | |
72414d3f | 517 | |
dc009d92 EB |
518 | return pages; |
519 | } | |
520 | ||
521 | static int kimage_add_entry(struct kimage *image, kimage_entry_t entry) | |
522 | { | |
72414d3f | 523 | if (*image->entry != 0) |
dc009d92 | 524 | image->entry++; |
72414d3f | 525 | |
dc009d92 EB |
526 | if (image->entry == image->last_entry) { |
527 | kimage_entry_t *ind_page; | |
528 | struct page *page; | |
72414d3f | 529 | |
dc009d92 | 530 | page = kimage_alloc_page(image, GFP_KERNEL, KIMAGE_NO_DEST); |
72414d3f | 531 | if (!page) |
dc009d92 | 532 | return -ENOMEM; |
72414d3f | 533 | |
dc009d92 EB |
534 | ind_page = page_address(page); |
535 | *image->entry = virt_to_phys(ind_page) | IND_INDIRECTION; | |
536 | image->entry = ind_page; | |
72414d3f MS |
537 | image->last_entry = ind_page + |
538 | ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1); | |
dc009d92 EB |
539 | } |
540 | *image->entry = entry; | |
541 | image->entry++; | |
542 | *image->entry = 0; | |
72414d3f | 543 | |
dc009d92 EB |
544 | return 0; |
545 | } | |
546 | ||
72414d3f MS |
547 | static int kimage_set_destination(struct kimage *image, |
548 | unsigned long destination) | |
dc009d92 EB |
549 | { |
550 | int result; | |
551 | ||
552 | destination &= PAGE_MASK; | |
553 | result = kimage_add_entry(image, destination | IND_DESTINATION); | |
72414d3f | 554 | if (result == 0) |
dc009d92 | 555 | image->destination = destination; |
72414d3f | 556 | |
dc009d92 EB |
557 | return result; |
558 | } | |
559 | ||
560 | ||
561 | static int kimage_add_page(struct kimage *image, unsigned long page) | |
562 | { | |
563 | int result; | |
564 | ||
565 | page &= PAGE_MASK; | |
566 | result = kimage_add_entry(image, page | IND_SOURCE); | |
72414d3f | 567 | if (result == 0) |
dc009d92 | 568 | image->destination += PAGE_SIZE; |
72414d3f | 569 | |
dc009d92 EB |
570 | return result; |
571 | } | |
572 | ||
573 | ||
574 | static void kimage_free_extra_pages(struct kimage *image) | |
575 | { | |
576 | /* Walk through and free any extra destination pages I may have */ | |
577 | kimage_free_page_list(&image->dest_pages); | |
578 | ||
579 | /* Walk through and free any unuseable pages I have cached */ | |
580 | kimage_free_page_list(&image->unuseable_pages); | |
581 | ||
582 | } | |
583 | static int kimage_terminate(struct kimage *image) | |
584 | { | |
72414d3f | 585 | if (*image->entry != 0) |
dc009d92 | 586 | image->entry++; |
72414d3f | 587 | |
dc009d92 | 588 | *image->entry = IND_DONE; |
72414d3f | 589 | |
dc009d92 EB |
590 | return 0; |
591 | } | |
592 | ||
593 | #define for_each_kimage_entry(image, ptr, entry) \ | |
594 | for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \ | |
595 | ptr = (entry & IND_INDIRECTION)? \ | |
596 | phys_to_virt((entry & PAGE_MASK)): ptr +1) | |
597 | ||
598 | static void kimage_free_entry(kimage_entry_t entry) | |
599 | { | |
600 | struct page *page; | |
601 | ||
602 | page = pfn_to_page(entry >> PAGE_SHIFT); | |
603 | kimage_free_pages(page); | |
604 | } | |
605 | ||
606 | static void kimage_free(struct kimage *image) | |
607 | { | |
608 | kimage_entry_t *ptr, entry; | |
609 | kimage_entry_t ind = 0; | |
610 | ||
611 | if (!image) | |
612 | return; | |
72414d3f | 613 | |
dc009d92 EB |
614 | kimage_free_extra_pages(image); |
615 | for_each_kimage_entry(image, ptr, entry) { | |
616 | if (entry & IND_INDIRECTION) { | |
617 | /* Free the previous indirection page */ | |
72414d3f | 618 | if (ind & IND_INDIRECTION) |
dc009d92 | 619 | kimage_free_entry(ind); |
dc009d92 EB |
620 | /* Save this indirection page until we are |
621 | * done with it. | |
622 | */ | |
623 | ind = entry; | |
624 | } | |
72414d3f | 625 | else if (entry & IND_SOURCE) |
dc009d92 | 626 | kimage_free_entry(entry); |
dc009d92 EB |
627 | } |
628 | /* Free the final indirection page */ | |
72414d3f | 629 | if (ind & IND_INDIRECTION) |
dc009d92 | 630 | kimage_free_entry(ind); |
dc009d92 EB |
631 | |
632 | /* Handle any machine specific cleanup */ | |
633 | machine_kexec_cleanup(image); | |
634 | ||
635 | /* Free the kexec control pages... */ | |
636 | kimage_free_page_list(&image->control_pages); | |
637 | kfree(image); | |
638 | } | |
639 | ||
72414d3f MS |
640 | static kimage_entry_t *kimage_dst_used(struct kimage *image, |
641 | unsigned long page) | |
dc009d92 EB |
642 | { |
643 | kimage_entry_t *ptr, entry; | |
644 | unsigned long destination = 0; | |
645 | ||
646 | for_each_kimage_entry(image, ptr, entry) { | |
72414d3f | 647 | if (entry & IND_DESTINATION) |
dc009d92 | 648 | destination = entry & PAGE_MASK; |
dc009d92 | 649 | else if (entry & IND_SOURCE) { |
72414d3f | 650 | if (page == destination) |
dc009d92 | 651 | return ptr; |
dc009d92 EB |
652 | destination += PAGE_SIZE; |
653 | } | |
654 | } | |
72414d3f | 655 | |
314b6a4d | 656 | return NULL; |
dc009d92 EB |
657 | } |
658 | ||
72414d3f | 659 | static struct page *kimage_alloc_page(struct kimage *image, |
9796fdd8 | 660 | gfp_t gfp_mask, |
72414d3f | 661 | unsigned long destination) |
dc009d92 EB |
662 | { |
663 | /* | |
664 | * Here we implement safeguards to ensure that a source page | |
665 | * is not copied to its destination page before the data on | |
666 | * the destination page is no longer useful. | |
667 | * | |
668 | * To do this we maintain the invariant that a source page is | |
669 | * either its own destination page, or it is not a | |
670 | * destination page at all. | |
671 | * | |
672 | * That is slightly stronger than required, but the proof | |
673 | * that no problems will not occur is trivial, and the | |
674 | * implementation is simply to verify. | |
675 | * | |
676 | * When allocating all pages normally this algorithm will run | |
677 | * in O(N) time, but in the worst case it will run in O(N^2) | |
678 | * time. If the runtime is a problem the data structures can | |
679 | * be fixed. | |
680 | */ | |
681 | struct page *page; | |
682 | unsigned long addr; | |
683 | ||
684 | /* | |
685 | * Walk through the list of destination pages, and see if I | |
686 | * have a match. | |
687 | */ | |
688 | list_for_each_entry(page, &image->dest_pages, lru) { | |
689 | addr = page_to_pfn(page) << PAGE_SHIFT; | |
690 | if (addr == destination) { | |
691 | list_del(&page->lru); | |
692 | return page; | |
693 | } | |
694 | } | |
695 | page = NULL; | |
696 | while (1) { | |
697 | kimage_entry_t *old; | |
698 | ||
699 | /* Allocate a page, if we run out of memory give up */ | |
700 | page = kimage_alloc_pages(gfp_mask, 0); | |
72414d3f | 701 | if (!page) |
314b6a4d | 702 | return NULL; |
dc009d92 | 703 | /* If the page cannot be used file it away */ |
72414d3f MS |
704 | if (page_to_pfn(page) > |
705 | (KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) { | |
dc009d92 EB |
706 | list_add(&page->lru, &image->unuseable_pages); |
707 | continue; | |
708 | } | |
709 | addr = page_to_pfn(page) << PAGE_SHIFT; | |
710 | ||
711 | /* If it is the destination page we want use it */ | |
712 | if (addr == destination) | |
713 | break; | |
714 | ||
715 | /* If the page is not a destination page use it */ | |
72414d3f MS |
716 | if (!kimage_is_destination_range(image, addr, |
717 | addr + PAGE_SIZE)) | |
dc009d92 EB |
718 | break; |
719 | ||
720 | /* | |
721 | * I know that the page is someones destination page. | |
722 | * See if there is already a source page for this | |
723 | * destination page. And if so swap the source pages. | |
724 | */ | |
725 | old = kimage_dst_used(image, addr); | |
726 | if (old) { | |
727 | /* If so move it */ | |
728 | unsigned long old_addr; | |
729 | struct page *old_page; | |
730 | ||
731 | old_addr = *old & PAGE_MASK; | |
732 | old_page = pfn_to_page(old_addr >> PAGE_SHIFT); | |
733 | copy_highpage(page, old_page); | |
734 | *old = addr | (*old & ~PAGE_MASK); | |
735 | ||
736 | /* The old page I have found cannot be a | |
737 | * destination page, so return it. | |
738 | */ | |
739 | addr = old_addr; | |
740 | page = old_page; | |
741 | break; | |
742 | } | |
743 | else { | |
744 | /* Place the page on the destination list I | |
745 | * will use it later. | |
746 | */ | |
747 | list_add(&page->lru, &image->dest_pages); | |
748 | } | |
749 | } | |
72414d3f | 750 | |
dc009d92 EB |
751 | return page; |
752 | } | |
753 | ||
754 | static int kimage_load_normal_segment(struct kimage *image, | |
72414d3f | 755 | struct kexec_segment *segment) |
dc009d92 EB |
756 | { |
757 | unsigned long maddr; | |
758 | unsigned long ubytes, mbytes; | |
759 | int result; | |
314b6a4d | 760 | unsigned char __user *buf; |
dc009d92 EB |
761 | |
762 | result = 0; | |
763 | buf = segment->buf; | |
764 | ubytes = segment->bufsz; | |
765 | mbytes = segment->memsz; | |
766 | maddr = segment->mem; | |
767 | ||
768 | result = kimage_set_destination(image, maddr); | |
72414d3f | 769 | if (result < 0) |
dc009d92 | 770 | goto out; |
72414d3f MS |
771 | |
772 | while (mbytes) { | |
dc009d92 EB |
773 | struct page *page; |
774 | char *ptr; | |
775 | size_t uchunk, mchunk; | |
72414d3f | 776 | |
dc009d92 EB |
777 | page = kimage_alloc_page(image, GFP_HIGHUSER, maddr); |
778 | if (page == 0) { | |
779 | result = -ENOMEM; | |
780 | goto out; | |
781 | } | |
72414d3f MS |
782 | result = kimage_add_page(image, page_to_pfn(page) |
783 | << PAGE_SHIFT); | |
784 | if (result < 0) | |
dc009d92 | 785 | goto out; |
72414d3f | 786 | |
dc009d92 EB |
787 | ptr = kmap(page); |
788 | /* Start with a clear page */ | |
789 | memset(ptr, 0, PAGE_SIZE); | |
790 | ptr += maddr & ~PAGE_MASK; | |
791 | mchunk = PAGE_SIZE - (maddr & ~PAGE_MASK); | |
72414d3f | 792 | if (mchunk > mbytes) |
dc009d92 | 793 | mchunk = mbytes; |
72414d3f | 794 | |
dc009d92 | 795 | uchunk = mchunk; |
72414d3f | 796 | if (uchunk > ubytes) |
dc009d92 | 797 | uchunk = ubytes; |
72414d3f | 798 | |
dc009d92 EB |
799 | result = copy_from_user(ptr, buf, uchunk); |
800 | kunmap(page); | |
801 | if (result) { | |
802 | result = (result < 0) ? result : -EIO; | |
803 | goto out; | |
804 | } | |
805 | ubytes -= uchunk; | |
806 | maddr += mchunk; | |
807 | buf += mchunk; | |
808 | mbytes -= mchunk; | |
809 | } | |
72414d3f | 810 | out: |
dc009d92 EB |
811 | return result; |
812 | } | |
813 | ||
814 | static int kimage_load_crash_segment(struct kimage *image, | |
72414d3f | 815 | struct kexec_segment *segment) |
dc009d92 EB |
816 | { |
817 | /* For crash dumps kernels we simply copy the data from | |
818 | * user space to it's destination. | |
819 | * We do things a page at a time for the sake of kmap. | |
820 | */ | |
821 | unsigned long maddr; | |
822 | unsigned long ubytes, mbytes; | |
823 | int result; | |
314b6a4d | 824 | unsigned char __user *buf; |
dc009d92 EB |
825 | |
826 | result = 0; | |
827 | buf = segment->buf; | |
828 | ubytes = segment->bufsz; | |
829 | mbytes = segment->memsz; | |
830 | maddr = segment->mem; | |
72414d3f | 831 | while (mbytes) { |
dc009d92 EB |
832 | struct page *page; |
833 | char *ptr; | |
834 | size_t uchunk, mchunk; | |
72414d3f | 835 | |
dc009d92 EB |
836 | page = pfn_to_page(maddr >> PAGE_SHIFT); |
837 | if (page == 0) { | |
838 | result = -ENOMEM; | |
839 | goto out; | |
840 | } | |
841 | ptr = kmap(page); | |
842 | ptr += maddr & ~PAGE_MASK; | |
843 | mchunk = PAGE_SIZE - (maddr & ~PAGE_MASK); | |
72414d3f | 844 | if (mchunk > mbytes) |
dc009d92 | 845 | mchunk = mbytes; |
72414d3f | 846 | |
dc009d92 EB |
847 | uchunk = mchunk; |
848 | if (uchunk > ubytes) { | |
849 | uchunk = ubytes; | |
850 | /* Zero the trailing part of the page */ | |
851 | memset(ptr + uchunk, 0, mchunk - uchunk); | |
852 | } | |
853 | result = copy_from_user(ptr, buf, uchunk); | |
854 | kunmap(page); | |
855 | if (result) { | |
856 | result = (result < 0) ? result : -EIO; | |
857 | goto out; | |
858 | } | |
859 | ubytes -= uchunk; | |
860 | maddr += mchunk; | |
861 | buf += mchunk; | |
862 | mbytes -= mchunk; | |
863 | } | |
72414d3f | 864 | out: |
dc009d92 EB |
865 | return result; |
866 | } | |
867 | ||
868 | static int kimage_load_segment(struct kimage *image, | |
72414d3f | 869 | struct kexec_segment *segment) |
dc009d92 EB |
870 | { |
871 | int result = -ENOMEM; | |
72414d3f MS |
872 | |
873 | switch (image->type) { | |
dc009d92 EB |
874 | case KEXEC_TYPE_DEFAULT: |
875 | result = kimage_load_normal_segment(image, segment); | |
876 | break; | |
877 | case KEXEC_TYPE_CRASH: | |
878 | result = kimage_load_crash_segment(image, segment); | |
879 | break; | |
880 | } | |
72414d3f | 881 | |
dc009d92 EB |
882 | return result; |
883 | } | |
884 | ||
885 | /* | |
886 | * Exec Kernel system call: for obvious reasons only root may call it. | |
887 | * | |
888 | * This call breaks up into three pieces. | |
889 | * - A generic part which loads the new kernel from the current | |
890 | * address space, and very carefully places the data in the | |
891 | * allocated pages. | |
892 | * | |
893 | * - A generic part that interacts with the kernel and tells all of | |
894 | * the devices to shut down. Preventing on-going dmas, and placing | |
895 | * the devices in a consistent state so a later kernel can | |
896 | * reinitialize them. | |
897 | * | |
898 | * - A machine specific part that includes the syscall number | |
899 | * and the copies the image to it's final destination. And | |
900 | * jumps into the image at entry. | |
901 | * | |
902 | * kexec does not sync, or unmount filesystems so if you need | |
903 | * that to happen you need to do that yourself. | |
904 | */ | |
905 | struct kimage *kexec_image = NULL; | |
906 | static struct kimage *kexec_crash_image = NULL; | |
907 | /* | |
908 | * A home grown binary mutex. | |
909 | * Nothing can wait so this mutex is safe to use | |
910 | * in interrupt context :) | |
911 | */ | |
912 | static int kexec_lock = 0; | |
913 | ||
72414d3f MS |
914 | asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments, |
915 | struct kexec_segment __user *segments, | |
916 | unsigned long flags) | |
dc009d92 EB |
917 | { |
918 | struct kimage **dest_image, *image; | |
919 | int locked; | |
920 | int result; | |
921 | ||
922 | /* We only trust the superuser with rebooting the system. */ | |
923 | if (!capable(CAP_SYS_BOOT)) | |
924 | return -EPERM; | |
925 | ||
926 | /* | |
927 | * Verify we have a legal set of flags | |
928 | * This leaves us room for future extensions. | |
929 | */ | |
930 | if ((flags & KEXEC_FLAGS) != (flags & ~KEXEC_ARCH_MASK)) | |
931 | return -EINVAL; | |
932 | ||
933 | /* Verify we are on the appropriate architecture */ | |
934 | if (((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH) && | |
935 | ((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH_DEFAULT)) | |
dc009d92 | 936 | return -EINVAL; |
dc009d92 EB |
937 | |
938 | /* Put an artificial cap on the number | |
939 | * of segments passed to kexec_load. | |
940 | */ | |
941 | if (nr_segments > KEXEC_SEGMENT_MAX) | |
942 | return -EINVAL; | |
943 | ||
944 | image = NULL; | |
945 | result = 0; | |
946 | ||
947 | /* Because we write directly to the reserved memory | |
948 | * region when loading crash kernels we need a mutex here to | |
949 | * prevent multiple crash kernels from attempting to load | |
950 | * simultaneously, and to prevent a crash kernel from loading | |
951 | * over the top of a in use crash kernel. | |
952 | * | |
953 | * KISS: always take the mutex. | |
954 | */ | |
955 | locked = xchg(&kexec_lock, 1); | |
72414d3f | 956 | if (locked) |
dc009d92 | 957 | return -EBUSY; |
72414d3f | 958 | |
dc009d92 | 959 | dest_image = &kexec_image; |
72414d3f | 960 | if (flags & KEXEC_ON_CRASH) |
dc009d92 | 961 | dest_image = &kexec_crash_image; |
dc009d92 EB |
962 | if (nr_segments > 0) { |
963 | unsigned long i; | |
72414d3f | 964 | |
dc009d92 | 965 | /* Loading another kernel to reboot into */ |
72414d3f MS |
966 | if ((flags & KEXEC_ON_CRASH) == 0) |
967 | result = kimage_normal_alloc(&image, entry, | |
968 | nr_segments, segments); | |
dc009d92 EB |
969 | /* Loading another kernel to switch to if this one crashes */ |
970 | else if (flags & KEXEC_ON_CRASH) { | |
971 | /* Free any current crash dump kernel before | |
972 | * we corrupt it. | |
973 | */ | |
974 | kimage_free(xchg(&kexec_crash_image, NULL)); | |
72414d3f MS |
975 | result = kimage_crash_alloc(&image, entry, |
976 | nr_segments, segments); | |
dc009d92 | 977 | } |
72414d3f | 978 | if (result) |
dc009d92 | 979 | goto out; |
72414d3f | 980 | |
dc009d92 | 981 | result = machine_kexec_prepare(image); |
72414d3f | 982 | if (result) |
dc009d92 | 983 | goto out; |
72414d3f MS |
984 | |
985 | for (i = 0; i < nr_segments; i++) { | |
dc009d92 | 986 | result = kimage_load_segment(image, &image->segment[i]); |
72414d3f | 987 | if (result) |
dc009d92 | 988 | goto out; |
dc009d92 EB |
989 | } |
990 | result = kimage_terminate(image); | |
72414d3f | 991 | if (result) |
dc009d92 | 992 | goto out; |
dc009d92 EB |
993 | } |
994 | /* Install the new kernel, and Uninstall the old */ | |
995 | image = xchg(dest_image, image); | |
996 | ||
72414d3f | 997 | out: |
dc009d92 EB |
998 | xchg(&kexec_lock, 0); /* Release the mutex */ |
999 | kimage_free(image); | |
72414d3f | 1000 | |
dc009d92 EB |
1001 | return result; |
1002 | } | |
1003 | ||
1004 | #ifdef CONFIG_COMPAT | |
1005 | asmlinkage long compat_sys_kexec_load(unsigned long entry, | |
72414d3f MS |
1006 | unsigned long nr_segments, |
1007 | struct compat_kexec_segment __user *segments, | |
1008 | unsigned long flags) | |
dc009d92 EB |
1009 | { |
1010 | struct compat_kexec_segment in; | |
1011 | struct kexec_segment out, __user *ksegments; | |
1012 | unsigned long i, result; | |
1013 | ||
1014 | /* Don't allow clients that don't understand the native | |
1015 | * architecture to do anything. | |
1016 | */ | |
72414d3f | 1017 | if ((flags & KEXEC_ARCH_MASK) == KEXEC_ARCH_DEFAULT) |
dc009d92 | 1018 | return -EINVAL; |
dc009d92 | 1019 | |
72414d3f | 1020 | if (nr_segments > KEXEC_SEGMENT_MAX) |
dc009d92 | 1021 | return -EINVAL; |
dc009d92 EB |
1022 | |
1023 | ksegments = compat_alloc_user_space(nr_segments * sizeof(out)); | |
1024 | for (i=0; i < nr_segments; i++) { | |
1025 | result = copy_from_user(&in, &segments[i], sizeof(in)); | |
72414d3f | 1026 | if (result) |
dc009d92 | 1027 | return -EFAULT; |
dc009d92 EB |
1028 | |
1029 | out.buf = compat_ptr(in.buf); | |
1030 | out.bufsz = in.bufsz; | |
1031 | out.mem = in.mem; | |
1032 | out.memsz = in.memsz; | |
1033 | ||
1034 | result = copy_to_user(&ksegments[i], &out, sizeof(out)); | |
72414d3f | 1035 | if (result) |
dc009d92 | 1036 | return -EFAULT; |
dc009d92 EB |
1037 | } |
1038 | ||
1039 | return sys_kexec_load(entry, nr_segments, ksegments, flags); | |
1040 | } | |
1041 | #endif | |
1042 | ||
6e274d14 | 1043 | void crash_kexec(struct pt_regs *regs) |
dc009d92 EB |
1044 | { |
1045 | struct kimage *image; | |
1046 | int locked; | |
1047 | ||
1048 | ||
1049 | /* Take the kexec_lock here to prevent sys_kexec_load | |
1050 | * running on one cpu from replacing the crash kernel | |
1051 | * we are using after a panic on a different cpu. | |
1052 | * | |
1053 | * If the crash kernel was not located in a fixed area | |
1054 | * of memory the xchg(&kexec_crash_image) would be | |
1055 | * sufficient. But since I reuse the memory... | |
1056 | */ | |
1057 | locked = xchg(&kexec_lock, 1); | |
1058 | if (!locked) { | |
1059 | image = xchg(&kexec_crash_image, NULL); | |
1060 | if (image) { | |
e996e581 VG |
1061 | struct pt_regs fixed_regs; |
1062 | crash_setup_regs(&fixed_regs, regs); | |
1063 | machine_crash_shutdown(&fixed_regs); | |
dc009d92 EB |
1064 | machine_kexec(image); |
1065 | } | |
1066 | xchg(&kexec_lock, 0); | |
1067 | } | |
1068 | } | |
cc571658 VG |
1069 | |
1070 | static int __init crash_notes_memory_init(void) | |
1071 | { | |
1072 | /* Allocate memory for saving cpu registers. */ | |
1073 | crash_notes = alloc_percpu(note_buf_t); | |
1074 | if (!crash_notes) { | |
1075 | printk("Kexec: Memory allocation for saving cpu register" | |
1076 | " states failed\n"); | |
1077 | return -ENOMEM; | |
1078 | } | |
1079 | return 0; | |
1080 | } | |
1081 | module_init(crash_notes_memory_init) |