]>
Commit | Line | Data |
---|---|---|
666bfddb VG |
1 | /* |
2 | * fs/proc/vmcore.c Interface for accessing the crash | |
3 | * dump from the system's previous life. | |
4 | * Heavily borrowed from fs/proc/kcore.c | |
5 | * Created by: Hariprasad Nellitheertha ([email protected]) | |
6 | * Copyright (C) IBM Corporation, 2004. All rights reserved | |
7 | * | |
8 | */ | |
9 | ||
666bfddb | 10 | #include <linux/mm.h> |
2f96b8c1 | 11 | #include <linux/kcore.h> |
666bfddb | 12 | #include <linux/user.h> |
666bfddb VG |
13 | #include <linux/elf.h> |
14 | #include <linux/elfcore.h> | |
afeacc8c | 15 | #include <linux/export.h> |
5a0e3ad6 | 16 | #include <linux/slab.h> |
666bfddb | 17 | #include <linux/highmem.h> |
87ebdc00 | 18 | #include <linux/printk.h> |
666bfddb VG |
19 | #include <linux/bootmem.h> |
20 | #include <linux/init.h> | |
21 | #include <linux/crash_dump.h> | |
22 | #include <linux/list.h> | |
2724273e | 23 | #include <linux/mutex.h> |
83086978 | 24 | #include <linux/vmalloc.h> |
9cb21813 | 25 | #include <linux/pagemap.h> |
7c0f6ba6 | 26 | #include <linux/uaccess.h> |
666bfddb | 27 | #include <asm/io.h> |
2f96b8c1 | 28 | #include "internal.h" |
666bfddb VG |
29 | |
30 | /* List representing chunks of contiguous memory areas and their offsets in | |
31 | * vmcore file. | |
32 | */ | |
33 | static LIST_HEAD(vmcore_list); | |
34 | ||
35 | /* Stores the pointer to the buffer containing kernel elf core headers. */ | |
36 | static char *elfcorebuf; | |
37 | static size_t elfcorebuf_sz; | |
f2bdacdd | 38 | static size_t elfcorebuf_sz_orig; |
666bfddb | 39 | |
087350c9 HD |
40 | static char *elfnotes_buf; |
41 | static size_t elfnotes_sz; | |
7efe48df RL |
42 | /* Size of all notes minus the device dump notes */ |
43 | static size_t elfnotes_orig_sz; | |
087350c9 | 44 | |
666bfddb VG |
45 | /* Total size of vmcore file. */ |
46 | static u64 vmcore_size; | |
47 | ||
a05e16ad | 48 | static struct proc_dir_entry *proc_vmcore; |
666bfddb | 49 | |
2724273e RL |
50 | #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP |
51 | /* Device Dump list and mutex to synchronize access to list */ | |
52 | static LIST_HEAD(vmcoredd_list); | |
53 | static DEFINE_MUTEX(vmcoredd_mutex); | |
54 | #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */ | |
55 | ||
7efe48df RL |
56 | /* Device Dump Size */ |
57 | static size_t vmcoredd_orig_sz; | |
58 | ||
997c136f OH |
59 | /* |
60 | * Returns > 0 for RAM pages, 0 for non-RAM pages, < 0 on error | |
61 | * The called function has to take care of module refcounting. | |
62 | */ | |
63 | static int (*oldmem_pfn_is_ram)(unsigned long pfn); | |
64 | ||
65 | int register_oldmem_pfn_is_ram(int (*fn)(unsigned long pfn)) | |
66 | { | |
67 | if (oldmem_pfn_is_ram) | |
68 | return -EBUSY; | |
69 | oldmem_pfn_is_ram = fn; | |
70 | return 0; | |
71 | } | |
72 | EXPORT_SYMBOL_GPL(register_oldmem_pfn_is_ram); | |
73 | ||
74 | void unregister_oldmem_pfn_is_ram(void) | |
75 | { | |
76 | oldmem_pfn_is_ram = NULL; | |
77 | wmb(); | |
78 | } | |
79 | EXPORT_SYMBOL_GPL(unregister_oldmem_pfn_is_ram); | |
80 | ||
81 | static int pfn_is_ram(unsigned long pfn) | |
82 | { | |
83 | int (*fn)(unsigned long pfn); | |
84 | /* pfn is ram unless fn() checks pagetype */ | |
85 | int ret = 1; | |
86 | ||
87 | /* | |
88 | * Ask hypervisor if the pfn is really ram. | |
89 | * A ballooned page contains no data and reading from such a page | |
90 | * will cause high load in the hypervisor. | |
91 | */ | |
92 | fn = oldmem_pfn_is_ram; | |
93 | if (fn) | |
94 | ret = fn(pfn); | |
95 | ||
96 | return ret; | |
97 | } | |
98 | ||
666bfddb VG |
99 | /* Reads a page from the oldmem device from given offset. */ |
100 | static ssize_t read_from_oldmem(char *buf, size_t count, | |
9e9e3941 | 101 | u64 *ppos, int userbuf) |
666bfddb VG |
102 | { |
103 | unsigned long pfn, offset; | |
104 | size_t nr_bytes; | |
105 | ssize_t read = 0, tmp; | |
106 | ||
107 | if (!count) | |
108 | return 0; | |
109 | ||
110 | offset = (unsigned long)(*ppos % PAGE_SIZE); | |
111 | pfn = (unsigned long)(*ppos / PAGE_SIZE); | |
666bfddb VG |
112 | |
113 | do { | |
114 | if (count > (PAGE_SIZE - offset)) | |
115 | nr_bytes = PAGE_SIZE - offset; | |
116 | else | |
117 | nr_bytes = count; | |
118 | ||
997c136f OH |
119 | /* If pfn is not ram, return zeros for sparse dump files */ |
120 | if (pfn_is_ram(pfn) == 0) | |
121 | memset(buf, 0, nr_bytes); | |
122 | else { | |
123 | tmp = copy_oldmem_page(pfn, buf, nr_bytes, | |
124 | offset, userbuf); | |
125 | if (tmp < 0) | |
126 | return tmp; | |
127 | } | |
666bfddb VG |
128 | *ppos += nr_bytes; |
129 | count -= nr_bytes; | |
130 | buf += nr_bytes; | |
131 | read += nr_bytes; | |
132 | ++pfn; | |
133 | offset = 0; | |
134 | } while (count); | |
135 | ||
136 | return read; | |
137 | } | |
138 | ||
be8a8d06 MH |
139 | /* |
140 | * Architectures may override this function to allocate ELF header in 2nd kernel | |
141 | */ | |
142 | int __weak elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size) | |
143 | { | |
144 | return 0; | |
145 | } | |
146 | ||
147 | /* | |
148 | * Architectures may override this function to free header | |
149 | */ | |
150 | void __weak elfcorehdr_free(unsigned long long addr) | |
151 | {} | |
152 | ||
153 | /* | |
154 | * Architectures may override this function to read from ELF header | |
155 | */ | |
156 | ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos) | |
157 | { | |
158 | return read_from_oldmem(buf, count, ppos, 0); | |
159 | } | |
160 | ||
161 | /* | |
162 | * Architectures may override this function to read from notes sections | |
163 | */ | |
164 | ssize_t __weak elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos) | |
165 | { | |
166 | return read_from_oldmem(buf, count, ppos, 0); | |
167 | } | |
168 | ||
9cb21813 MH |
169 | /* |
170 | * Architectures may override this function to map oldmem | |
171 | */ | |
172 | int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma, | |
173 | unsigned long from, unsigned long pfn, | |
174 | unsigned long size, pgprot_t prot) | |
175 | { | |
176 | return remap_pfn_range(vma, from, pfn, size, prot); | |
177 | } | |
178 | ||
179 | /* | |
180 | * Copy to either kernel or user space | |
181 | */ | |
182 | static int copy_to(void *target, void *src, size_t size, int userbuf) | |
183 | { | |
184 | if (userbuf) { | |
185 | if (copy_to_user((char __user *) target, src, size)) | |
186 | return -EFAULT; | |
187 | } else { | |
188 | memcpy(target, src, size); | |
189 | } | |
190 | return 0; | |
191 | } | |
192 | ||
7efe48df RL |
193 | #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP |
194 | static int vmcoredd_copy_dumps(void *dst, u64 start, size_t size, int userbuf) | |
195 | { | |
196 | struct vmcoredd_node *dump; | |
197 | u64 offset = 0; | |
198 | int ret = 0; | |
199 | size_t tsz; | |
200 | char *buf; | |
201 | ||
202 | mutex_lock(&vmcoredd_mutex); | |
203 | list_for_each_entry(dump, &vmcoredd_list, list) { | |
204 | if (start < offset + dump->size) { | |
205 | tsz = min(offset + (u64)dump->size - start, (u64)size); | |
206 | buf = dump->buf + start - offset; | |
207 | if (copy_to(dst, buf, tsz, userbuf)) { | |
208 | ret = -EFAULT; | |
209 | goto out_unlock; | |
210 | } | |
211 | ||
212 | size -= tsz; | |
213 | start += tsz; | |
214 | dst += tsz; | |
215 | ||
216 | /* Leave now if buffer filled already */ | |
217 | if (!size) | |
218 | goto out_unlock; | |
219 | } | |
220 | offset += dump->size; | |
221 | } | |
222 | ||
223 | out_unlock: | |
224 | mutex_unlock(&vmcoredd_mutex); | |
225 | return ret; | |
226 | } | |
227 | ||
228 | static int vmcoredd_mmap_dumps(struct vm_area_struct *vma, unsigned long dst, | |
229 | u64 start, size_t size) | |
230 | { | |
231 | struct vmcoredd_node *dump; | |
232 | u64 offset = 0; | |
233 | int ret = 0; | |
234 | size_t tsz; | |
235 | char *buf; | |
236 | ||
237 | mutex_lock(&vmcoredd_mutex); | |
238 | list_for_each_entry(dump, &vmcoredd_list, list) { | |
239 | if (start < offset + dump->size) { | |
240 | tsz = min(offset + (u64)dump->size - start, (u64)size); | |
241 | buf = dump->buf + start - offset; | |
242 | if (remap_vmalloc_range_partial(vma, dst, buf, tsz)) { | |
243 | ret = -EFAULT; | |
244 | goto out_unlock; | |
245 | } | |
246 | ||
247 | size -= tsz; | |
248 | start += tsz; | |
249 | dst += tsz; | |
250 | ||
251 | /* Leave now if buffer filled already */ | |
252 | if (!size) | |
253 | goto out_unlock; | |
254 | } | |
255 | offset += dump->size; | |
256 | } | |
257 | ||
258 | out_unlock: | |
259 | mutex_unlock(&vmcoredd_mutex); | |
260 | return ret; | |
261 | } | |
262 | #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */ | |
263 | ||
666bfddb VG |
264 | /* Read from the ELF header and then the crash dump. On error, negative value is |
265 | * returned otherwise number of bytes read are returned. | |
266 | */ | |
9cb21813 MH |
267 | static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos, |
268 | int userbuf) | |
666bfddb VG |
269 | { |
270 | ssize_t acc = 0, tmp; | |
80e8ff63 | 271 | size_t tsz; |
b27eb186 HD |
272 | u64 start; |
273 | struct vmcore *m = NULL; | |
666bfddb VG |
274 | |
275 | if (buflen == 0 || *fpos >= vmcore_size) | |
276 | return 0; | |
277 | ||
278 | /* trim buflen to not go beyond EOF */ | |
279 | if (buflen > vmcore_size - *fpos) | |
280 | buflen = vmcore_size - *fpos; | |
281 | ||
282 | /* Read ELF core header */ | |
283 | if (*fpos < elfcorebuf_sz) { | |
087350c9 | 284 | tsz = min(elfcorebuf_sz - (size_t)*fpos, buflen); |
9cb21813 | 285 | if (copy_to(buffer, elfcorebuf + *fpos, tsz, userbuf)) |
666bfddb VG |
286 | return -EFAULT; |
287 | buflen -= tsz; | |
288 | *fpos += tsz; | |
289 | buffer += tsz; | |
290 | acc += tsz; | |
291 | ||
292 | /* leave now if filled buffer already */ | |
293 | if (buflen == 0) | |
294 | return acc; | |
295 | } | |
296 | ||
087350c9 HD |
297 | /* Read Elf note segment */ |
298 | if (*fpos < elfcorebuf_sz + elfnotes_sz) { | |
299 | void *kaddr; | |
300 | ||
7efe48df RL |
301 | /* We add device dumps before other elf notes because the |
302 | * other elf notes may not fill the elf notes buffer | |
303 | * completely and we will end up with zero-filled data | |
304 | * between the elf notes and the device dumps. Tools will | |
305 | * then try to decode this zero-filled data as valid notes | |
306 | * and we don't want that. Hence, adding device dumps before | |
307 | * the other elf notes ensure that zero-filled data can be | |
308 | * avoided. | |
309 | */ | |
310 | #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP | |
311 | /* Read device dumps */ | |
312 | if (*fpos < elfcorebuf_sz + vmcoredd_orig_sz) { | |
313 | tsz = min(elfcorebuf_sz + vmcoredd_orig_sz - | |
314 | (size_t)*fpos, buflen); | |
315 | start = *fpos - elfcorebuf_sz; | |
316 | if (vmcoredd_copy_dumps(buffer, start, tsz, userbuf)) | |
317 | return -EFAULT; | |
318 | ||
319 | buflen -= tsz; | |
320 | *fpos += tsz; | |
321 | buffer += tsz; | |
322 | acc += tsz; | |
323 | ||
324 | /* leave now if filled buffer already */ | |
325 | if (!buflen) | |
326 | return acc; | |
327 | } | |
328 | #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */ | |
329 | ||
330 | /* Read remaining elf notes */ | |
087350c9 | 331 | tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)*fpos, buflen); |
7efe48df | 332 | kaddr = elfnotes_buf + *fpos - elfcorebuf_sz - vmcoredd_orig_sz; |
9cb21813 | 333 | if (copy_to(buffer, kaddr, tsz, userbuf)) |
087350c9 | 334 | return -EFAULT; |
7efe48df | 335 | |
087350c9 HD |
336 | buflen -= tsz; |
337 | *fpos += tsz; | |
338 | buffer += tsz; | |
339 | acc += tsz; | |
340 | ||
341 | /* leave now if filled buffer already */ | |
342 | if (buflen == 0) | |
343 | return acc; | |
344 | } | |
345 | ||
b27eb186 HD |
346 | list_for_each_entry(m, &vmcore_list, list) { |
347 | if (*fpos < m->offset + m->size) { | |
0b50a2d8 DY |
348 | tsz = (size_t)min_t(unsigned long long, |
349 | m->offset + m->size - *fpos, | |
350 | buflen); | |
b27eb186 | 351 | start = m->paddr + *fpos - m->offset; |
9cb21813 | 352 | tmp = read_from_oldmem(buffer, tsz, &start, userbuf); |
b27eb186 HD |
353 | if (tmp < 0) |
354 | return tmp; | |
355 | buflen -= tsz; | |
356 | *fpos += tsz; | |
357 | buffer += tsz; | |
358 | acc += tsz; | |
359 | ||
360 | /* leave now if filled buffer already */ | |
361 | if (buflen == 0) | |
362 | return acc; | |
666bfddb | 363 | } |
666bfddb | 364 | } |
b27eb186 | 365 | |
666bfddb VG |
366 | return acc; |
367 | } | |
368 | ||
9cb21813 MH |
369 | static ssize_t read_vmcore(struct file *file, char __user *buffer, |
370 | size_t buflen, loff_t *fpos) | |
371 | { | |
372 | return __read_vmcore((__force char *) buffer, buflen, fpos, 1); | |
373 | } | |
374 | ||
375 | /* | |
376 | * The vmcore fault handler uses the page cache and fills data using the | |
377 | * standard __vmcore_read() function. | |
378 | * | |
379 | * On s390 the fault handler is used for memory regions that can't be mapped | |
380 | * directly with remap_pfn_range(). | |
381 | */ | |
11bac800 | 382 | static int mmap_vmcore_fault(struct vm_fault *vmf) |
9cb21813 MH |
383 | { |
384 | #ifdef CONFIG_S390 | |
11bac800 | 385 | struct address_space *mapping = vmf->vma->vm_file->f_mapping; |
9cb21813 MH |
386 | pgoff_t index = vmf->pgoff; |
387 | struct page *page; | |
388 | loff_t offset; | |
389 | char *buf; | |
390 | int rc; | |
391 | ||
392 | page = find_or_create_page(mapping, index, GFP_KERNEL); | |
393 | if (!page) | |
394 | return VM_FAULT_OOM; | |
395 | if (!PageUptodate(page)) { | |
09cbfeaf | 396 | offset = (loff_t) index << PAGE_SHIFT; |
9cb21813 MH |
397 | buf = __va((page_to_pfn(page) << PAGE_SHIFT)); |
398 | rc = __read_vmcore(buf, PAGE_SIZE, &offset, 0); | |
399 | if (rc < 0) { | |
400 | unlock_page(page); | |
09cbfeaf | 401 | put_page(page); |
9cb21813 MH |
402 | return (rc == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS; |
403 | } | |
404 | SetPageUptodate(page); | |
405 | } | |
406 | unlock_page(page); | |
407 | vmf->page = page; | |
408 | return 0; | |
409 | #else | |
410 | return VM_FAULT_SIGBUS; | |
411 | #endif | |
412 | } | |
413 | ||
414 | static const struct vm_operations_struct vmcore_mmap_ops = { | |
415 | .fault = mmap_vmcore_fault, | |
416 | }; | |
417 | ||
83086978 | 418 | /** |
2724273e RL |
419 | * vmcore_alloc_buf - allocate buffer in vmalloc memory |
420 | * @sizez: size of buffer | |
83086978 HD |
421 | * |
422 | * If CONFIG_MMU is defined, use vmalloc_user() to allow users to mmap | |
423 | * the buffer to user-space by means of remap_vmalloc_range(). | |
424 | * | |
425 | * If CONFIG_MMU is not defined, use vzalloc() since mmap_vmcore() is | |
426 | * disabled and there's no need to allow users to mmap the buffer. | |
427 | */ | |
2724273e | 428 | static inline char *vmcore_alloc_buf(size_t size) |
83086978 HD |
429 | { |
430 | #ifdef CONFIG_MMU | |
2724273e | 431 | return vmalloc_user(size); |
83086978 | 432 | #else |
2724273e | 433 | return vzalloc(size); |
83086978 HD |
434 | #endif |
435 | } | |
436 | ||
437 | /* | |
438 | * Disable mmap_vmcore() if CONFIG_MMU is not defined. MMU is | |
439 | * essential for mmap_vmcore() in order to map physically | |
440 | * non-contiguous objects (ELF header, ELF note segment and memory | |
441 | * regions in the 1st kernel pointed to by PT_LOAD entries) into | |
442 | * virtually contiguous user-space in ELF layout. | |
443 | */ | |
11e376a3 | 444 | #ifdef CONFIG_MMU |
0692dedc VK |
445 | /* |
446 | * remap_oldmem_pfn_checked - do remap_oldmem_pfn_range replacing all pages | |
447 | * reported as not being ram with the zero page. | |
448 | * | |
449 | * @vma: vm_area_struct describing requested mapping | |
450 | * @from: start remapping from | |
451 | * @pfn: page frame number to start remapping to | |
452 | * @size: remapping size | |
453 | * @prot: protection bits | |
454 | * | |
455 | * Returns zero on success, -EAGAIN on failure. | |
456 | */ | |
457 | static int remap_oldmem_pfn_checked(struct vm_area_struct *vma, | |
458 | unsigned long from, unsigned long pfn, | |
459 | unsigned long size, pgprot_t prot) | |
460 | { | |
461 | unsigned long map_size; | |
462 | unsigned long pos_start, pos_end, pos; | |
463 | unsigned long zeropage_pfn = my_zero_pfn(0); | |
464 | size_t len = 0; | |
465 | ||
466 | pos_start = pfn; | |
467 | pos_end = pfn + (size >> PAGE_SHIFT); | |
468 | ||
469 | for (pos = pos_start; pos < pos_end; ++pos) { | |
470 | if (!pfn_is_ram(pos)) { | |
471 | /* | |
472 | * We hit a page which is not ram. Remap the continuous | |
473 | * region between pos_start and pos-1 and replace | |
474 | * the non-ram page at pos with the zero page. | |
475 | */ | |
476 | if (pos > pos_start) { | |
477 | /* Remap continuous region */ | |
478 | map_size = (pos - pos_start) << PAGE_SHIFT; | |
479 | if (remap_oldmem_pfn_range(vma, from + len, | |
480 | pos_start, map_size, | |
481 | prot)) | |
482 | goto fail; | |
483 | len += map_size; | |
484 | } | |
485 | /* Remap the zero page */ | |
486 | if (remap_oldmem_pfn_range(vma, from + len, | |
487 | zeropage_pfn, | |
488 | PAGE_SIZE, prot)) | |
489 | goto fail; | |
490 | len += PAGE_SIZE; | |
491 | pos_start = pos + 1; | |
492 | } | |
493 | } | |
494 | if (pos > pos_start) { | |
495 | /* Remap the rest */ | |
496 | map_size = (pos - pos_start) << PAGE_SHIFT; | |
497 | if (remap_oldmem_pfn_range(vma, from + len, pos_start, | |
498 | map_size, prot)) | |
499 | goto fail; | |
500 | } | |
501 | return 0; | |
502 | fail: | |
897ab3e0 | 503 | do_munmap(vma->vm_mm, from, len, NULL); |
0692dedc VK |
504 | return -EAGAIN; |
505 | } | |
506 | ||
507 | static int vmcore_remap_oldmem_pfn(struct vm_area_struct *vma, | |
508 | unsigned long from, unsigned long pfn, | |
509 | unsigned long size, pgprot_t prot) | |
510 | { | |
511 | /* | |
512 | * Check if oldmem_pfn_is_ram was registered to avoid | |
513 | * looping over all pages without a reason. | |
514 | */ | |
515 | if (oldmem_pfn_is_ram) | |
516 | return remap_oldmem_pfn_checked(vma, from, pfn, size, prot); | |
517 | else | |
518 | return remap_oldmem_pfn_range(vma, from, pfn, size, prot); | |
519 | } | |
520 | ||
83086978 HD |
521 | static int mmap_vmcore(struct file *file, struct vm_area_struct *vma) |
522 | { | |
523 | size_t size = vma->vm_end - vma->vm_start; | |
524 | u64 start, end, len, tsz; | |
525 | struct vmcore *m; | |
526 | ||
527 | start = (u64)vma->vm_pgoff << PAGE_SHIFT; | |
528 | end = start + size; | |
529 | ||
530 | if (size > vmcore_size || end > vmcore_size) | |
531 | return -EINVAL; | |
532 | ||
533 | if (vma->vm_flags & (VM_WRITE | VM_EXEC)) | |
534 | return -EPERM; | |
535 | ||
536 | vma->vm_flags &= ~(VM_MAYWRITE | VM_MAYEXEC); | |
537 | vma->vm_flags |= VM_MIXEDMAP; | |
9cb21813 | 538 | vma->vm_ops = &vmcore_mmap_ops; |
83086978 HD |
539 | |
540 | len = 0; | |
541 | ||
542 | if (start < elfcorebuf_sz) { | |
543 | u64 pfn; | |
544 | ||
545 | tsz = min(elfcorebuf_sz - (size_t)start, size); | |
546 | pfn = __pa(elfcorebuf + start) >> PAGE_SHIFT; | |
547 | if (remap_pfn_range(vma, vma->vm_start, pfn, tsz, | |
548 | vma->vm_page_prot)) | |
549 | return -EAGAIN; | |
550 | size -= tsz; | |
551 | start += tsz; | |
552 | len += tsz; | |
553 | ||
554 | if (size == 0) | |
555 | return 0; | |
556 | } | |
557 | ||
558 | if (start < elfcorebuf_sz + elfnotes_sz) { | |
559 | void *kaddr; | |
560 | ||
7efe48df RL |
561 | /* We add device dumps before other elf notes because the |
562 | * other elf notes may not fill the elf notes buffer | |
563 | * completely and we will end up with zero-filled data | |
564 | * between the elf notes and the device dumps. Tools will | |
565 | * then try to decode this zero-filled data as valid notes | |
566 | * and we don't want that. Hence, adding device dumps before | |
567 | * the other elf notes ensure that zero-filled data can be | |
568 | * avoided. This also ensures that the device dumps and | |
569 | * other elf notes can be properly mmaped at page aligned | |
570 | * address. | |
571 | */ | |
572 | #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP | |
573 | /* Read device dumps */ | |
574 | if (start < elfcorebuf_sz + vmcoredd_orig_sz) { | |
575 | u64 start_off; | |
576 | ||
577 | tsz = min(elfcorebuf_sz + vmcoredd_orig_sz - | |
578 | (size_t)start, size); | |
579 | start_off = start - elfcorebuf_sz; | |
580 | if (vmcoredd_mmap_dumps(vma, vma->vm_start + len, | |
581 | start_off, tsz)) | |
582 | goto fail; | |
583 | ||
584 | size -= tsz; | |
585 | start += tsz; | |
586 | len += tsz; | |
587 | ||
588 | /* leave now if filled buffer already */ | |
589 | if (!size) | |
590 | return 0; | |
591 | } | |
592 | #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */ | |
593 | ||
594 | /* Read remaining elf notes */ | |
83086978 | 595 | tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)start, size); |
7efe48df | 596 | kaddr = elfnotes_buf + start - elfcorebuf_sz - vmcoredd_orig_sz; |
83086978 HD |
597 | if (remap_vmalloc_range_partial(vma, vma->vm_start + len, |
598 | kaddr, tsz)) | |
599 | goto fail; | |
7efe48df | 600 | |
83086978 HD |
601 | size -= tsz; |
602 | start += tsz; | |
603 | len += tsz; | |
604 | ||
605 | if (size == 0) | |
606 | return 0; | |
607 | } | |
608 | ||
609 | list_for_each_entry(m, &vmcore_list, list) { | |
610 | if (start < m->offset + m->size) { | |
611 | u64 paddr = 0; | |
612 | ||
0b50a2d8 DY |
613 | tsz = (size_t)min_t(unsigned long long, |
614 | m->offset + m->size - start, size); | |
83086978 | 615 | paddr = m->paddr + start - m->offset; |
0692dedc VK |
616 | if (vmcore_remap_oldmem_pfn(vma, vma->vm_start + len, |
617 | paddr >> PAGE_SHIFT, tsz, | |
618 | vma->vm_page_prot)) | |
83086978 HD |
619 | goto fail; |
620 | size -= tsz; | |
621 | start += tsz; | |
622 | len += tsz; | |
623 | ||
624 | if (size == 0) | |
625 | return 0; | |
626 | } | |
627 | } | |
628 | ||
629 | return 0; | |
630 | fail: | |
897ab3e0 | 631 | do_munmap(vma->vm_mm, vma->vm_start, len, NULL); |
83086978 HD |
632 | return -EAGAIN; |
633 | } | |
634 | #else | |
635 | static int mmap_vmcore(struct file *file, struct vm_area_struct *vma) | |
636 | { | |
637 | return -ENOSYS; | |
638 | } | |
639 | #endif | |
640 | ||
5aa140c2 | 641 | static const struct file_operations proc_vmcore_operations = { |
666bfddb | 642 | .read = read_vmcore, |
c227e690 | 643 | .llseek = default_llseek, |
83086978 | 644 | .mmap = mmap_vmcore, |
666bfddb VG |
645 | }; |
646 | ||
647 | static struct vmcore* __init get_new_element(void) | |
648 | { | |
2f6d3110 | 649 | return kzalloc(sizeof(struct vmcore), GFP_KERNEL); |
666bfddb VG |
650 | } |
651 | ||
591ff716 HD |
652 | static u64 __init get_vmcore_size(size_t elfsz, size_t elfnotesegsz, |
653 | struct list_head *vc_list) | |
666bfddb | 654 | { |
666bfddb | 655 | u64 size; |
591ff716 | 656 | struct vmcore *m; |
72658e9d | 657 | |
591ff716 HD |
658 | size = elfsz + elfnotesegsz; |
659 | list_for_each_entry(m, vc_list, list) { | |
660 | size += m->size; | |
72658e9d VG |
661 | } |
662 | return size; | |
663 | } | |
664 | ||
087350c9 HD |
665 | /** |
666 | * update_note_header_size_elf64 - update p_memsz member of each PT_NOTE entry | |
667 | * | |
668 | * @ehdr_ptr: ELF header | |
669 | * | |
670 | * This function updates p_memsz member of each PT_NOTE entry in the | |
671 | * program header table pointed to by @ehdr_ptr to real size of ELF | |
672 | * note segment. | |
673 | */ | |
674 | static int __init update_note_header_size_elf64(const Elf64_Ehdr *ehdr_ptr) | |
666bfddb | 675 | { |
087350c9 HD |
676 | int i, rc=0; |
677 | Elf64_Phdr *phdr_ptr; | |
666bfddb | 678 | Elf64_Nhdr *nhdr_ptr; |
666bfddb | 679 | |
087350c9 | 680 | phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1); |
666bfddb | 681 | for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { |
666bfddb | 682 | void *notes_section; |
666bfddb VG |
683 | u64 offset, max_sz, sz, real_sz = 0; |
684 | if (phdr_ptr->p_type != PT_NOTE) | |
685 | continue; | |
666bfddb VG |
686 | max_sz = phdr_ptr->p_memsz; |
687 | offset = phdr_ptr->p_offset; | |
688 | notes_section = kmalloc(max_sz, GFP_KERNEL); | |
689 | if (!notes_section) | |
690 | return -ENOMEM; | |
be8a8d06 | 691 | rc = elfcorehdr_read_notes(notes_section, max_sz, &offset); |
666bfddb VG |
692 | if (rc < 0) { |
693 | kfree(notes_section); | |
694 | return rc; | |
695 | } | |
696 | nhdr_ptr = notes_section; | |
38dfac84 | 697 | while (nhdr_ptr->n_namesz != 0) { |
666bfddb | 698 | sz = sizeof(Elf64_Nhdr) + |
34b47764 WC |
699 | (((u64)nhdr_ptr->n_namesz + 3) & ~3) + |
700 | (((u64)nhdr_ptr->n_descsz + 3) & ~3); | |
38dfac84 GP |
701 | if ((real_sz + sz) > max_sz) { |
702 | pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n", | |
703 | nhdr_ptr->n_namesz, nhdr_ptr->n_descsz); | |
704 | break; | |
705 | } | |
666bfddb VG |
706 | real_sz += sz; |
707 | nhdr_ptr = (Elf64_Nhdr*)((char*)nhdr_ptr + sz); | |
708 | } | |
666bfddb | 709 | kfree(notes_section); |
087350c9 | 710 | phdr_ptr->p_memsz = real_sz; |
38dfac84 GP |
711 | if (real_sz == 0) { |
712 | pr_warn("Warning: Zero PT_NOTE entries found\n"); | |
38dfac84 | 713 | } |
666bfddb VG |
714 | } |
715 | ||
087350c9 HD |
716 | return 0; |
717 | } | |
718 | ||
719 | /** | |
720 | * get_note_number_and_size_elf64 - get the number of PT_NOTE program | |
721 | * headers and sum of real size of their ELF note segment headers and | |
722 | * data. | |
723 | * | |
724 | * @ehdr_ptr: ELF header | |
725 | * @nr_ptnote: buffer for the number of PT_NOTE program headers | |
726 | * @sz_ptnote: buffer for size of unique PT_NOTE program header | |
727 | * | |
728 | * This function is used to merge multiple PT_NOTE program headers | |
729 | * into a unique single one. The resulting unique entry will have | |
730 | * @sz_ptnote in its phdr->p_mem. | |
731 | * | |
732 | * It is assumed that program headers with PT_NOTE type pointed to by | |
733 | * @ehdr_ptr has already been updated by update_note_header_size_elf64 | |
734 | * and each of PT_NOTE program headers has actual ELF note segment | |
735 | * size in its p_memsz member. | |
736 | */ | |
737 | static int __init get_note_number_and_size_elf64(const Elf64_Ehdr *ehdr_ptr, | |
738 | int *nr_ptnote, u64 *sz_ptnote) | |
739 | { | |
740 | int i; | |
741 | Elf64_Phdr *phdr_ptr; | |
742 | ||
743 | *nr_ptnote = *sz_ptnote = 0; | |
744 | ||
745 | phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1); | |
746 | for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { | |
747 | if (phdr_ptr->p_type != PT_NOTE) | |
748 | continue; | |
749 | *nr_ptnote += 1; | |
750 | *sz_ptnote += phdr_ptr->p_memsz; | |
751 | } | |
752 | ||
753 | return 0; | |
754 | } | |
755 | ||
756 | /** | |
757 | * copy_notes_elf64 - copy ELF note segments in a given buffer | |
758 | * | |
759 | * @ehdr_ptr: ELF header | |
760 | * @notes_buf: buffer into which ELF note segments are copied | |
761 | * | |
762 | * This function is used to copy ELF note segment in the 1st kernel | |
763 | * into the buffer @notes_buf in the 2nd kernel. It is assumed that | |
764 | * size of the buffer @notes_buf is equal to or larger than sum of the | |
765 | * real ELF note segment headers and data. | |
766 | * | |
767 | * It is assumed that program headers with PT_NOTE type pointed to by | |
768 | * @ehdr_ptr has already been updated by update_note_header_size_elf64 | |
769 | * and each of PT_NOTE program headers has actual ELF note segment | |
770 | * size in its p_memsz member. | |
771 | */ | |
772 | static int __init copy_notes_elf64(const Elf64_Ehdr *ehdr_ptr, char *notes_buf) | |
773 | { | |
774 | int i, rc=0; | |
775 | Elf64_Phdr *phdr_ptr; | |
776 | ||
777 | phdr_ptr = (Elf64_Phdr*)(ehdr_ptr + 1); | |
778 | ||
779 | for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { | |
780 | u64 offset; | |
781 | if (phdr_ptr->p_type != PT_NOTE) | |
782 | continue; | |
783 | offset = phdr_ptr->p_offset; | |
be8a8d06 MH |
784 | rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz, |
785 | &offset); | |
087350c9 HD |
786 | if (rc < 0) |
787 | return rc; | |
788 | notes_buf += phdr_ptr->p_memsz; | |
789 | } | |
790 | ||
791 | return 0; | |
792 | } | |
793 | ||
794 | /* Merges all the PT_NOTE headers into one. */ | |
795 | static int __init merge_note_headers_elf64(char *elfptr, size_t *elfsz, | |
796 | char **notes_buf, size_t *notes_sz) | |
797 | { | |
798 | int i, nr_ptnote=0, rc=0; | |
799 | char *tmp; | |
800 | Elf64_Ehdr *ehdr_ptr; | |
801 | Elf64_Phdr phdr; | |
802 | u64 phdr_sz = 0, note_off; | |
803 | ||
804 | ehdr_ptr = (Elf64_Ehdr *)elfptr; | |
805 | ||
806 | rc = update_note_header_size_elf64(ehdr_ptr); | |
807 | if (rc < 0) | |
808 | return rc; | |
809 | ||
810 | rc = get_note_number_and_size_elf64(ehdr_ptr, &nr_ptnote, &phdr_sz); | |
811 | if (rc < 0) | |
812 | return rc; | |
813 | ||
814 | *notes_sz = roundup(phdr_sz, PAGE_SIZE); | |
2724273e | 815 | *notes_buf = vmcore_alloc_buf(*notes_sz); |
087350c9 HD |
816 | if (!*notes_buf) |
817 | return -ENOMEM; | |
818 | ||
819 | rc = copy_notes_elf64(ehdr_ptr, *notes_buf); | |
820 | if (rc < 0) | |
821 | return rc; | |
822 | ||
666bfddb VG |
823 | /* Prepare merged PT_NOTE program header. */ |
824 | phdr.p_type = PT_NOTE; | |
825 | phdr.p_flags = 0; | |
826 | note_off = sizeof(Elf64_Ehdr) + | |
827 | (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf64_Phdr); | |
087350c9 | 828 | phdr.p_offset = roundup(note_off, PAGE_SIZE); |
666bfddb VG |
829 | phdr.p_vaddr = phdr.p_paddr = 0; |
830 | phdr.p_filesz = phdr.p_memsz = phdr_sz; | |
831 | phdr.p_align = 0; | |
832 | ||
833 | /* Add merged PT_NOTE program header*/ | |
834 | tmp = elfptr + sizeof(Elf64_Ehdr); | |
835 | memcpy(tmp, &phdr, sizeof(phdr)); | |
836 | tmp += sizeof(phdr); | |
837 | ||
838 | /* Remove unwanted PT_NOTE program headers. */ | |
839 | i = (nr_ptnote - 1) * sizeof(Elf64_Phdr); | |
840 | *elfsz = *elfsz - i; | |
841 | memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf64_Ehdr)-sizeof(Elf64_Phdr))); | |
f2bdacdd HD |
842 | memset(elfptr + *elfsz, 0, i); |
843 | *elfsz = roundup(*elfsz, PAGE_SIZE); | |
666bfddb VG |
844 | |
845 | /* Modify e_phnum to reflect merged headers. */ | |
846 | ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1; | |
847 | ||
7efe48df RL |
848 | /* Store the size of all notes. We need this to update the note |
849 | * header when the device dumps will be added. | |
850 | */ | |
851 | elfnotes_orig_sz = phdr.p_memsz; | |
852 | ||
666bfddb VG |
853 | return 0; |
854 | } | |
855 | ||
087350c9 HD |
856 | /** |
857 | * update_note_header_size_elf32 - update p_memsz member of each PT_NOTE entry | |
858 | * | |
859 | * @ehdr_ptr: ELF header | |
860 | * | |
861 | * This function updates p_memsz member of each PT_NOTE entry in the | |
862 | * program header table pointed to by @ehdr_ptr to real size of ELF | |
863 | * note segment. | |
864 | */ | |
865 | static int __init update_note_header_size_elf32(const Elf32_Ehdr *ehdr_ptr) | |
72658e9d | 866 | { |
087350c9 HD |
867 | int i, rc=0; |
868 | Elf32_Phdr *phdr_ptr; | |
72658e9d | 869 | Elf32_Nhdr *nhdr_ptr; |
72658e9d | 870 | |
087350c9 | 871 | phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1); |
72658e9d | 872 | for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { |
72658e9d | 873 | void *notes_section; |
72658e9d VG |
874 | u64 offset, max_sz, sz, real_sz = 0; |
875 | if (phdr_ptr->p_type != PT_NOTE) | |
876 | continue; | |
72658e9d VG |
877 | max_sz = phdr_ptr->p_memsz; |
878 | offset = phdr_ptr->p_offset; | |
879 | notes_section = kmalloc(max_sz, GFP_KERNEL); | |
880 | if (!notes_section) | |
881 | return -ENOMEM; | |
be8a8d06 | 882 | rc = elfcorehdr_read_notes(notes_section, max_sz, &offset); |
72658e9d VG |
883 | if (rc < 0) { |
884 | kfree(notes_section); | |
885 | return rc; | |
886 | } | |
887 | nhdr_ptr = notes_section; | |
38dfac84 | 888 | while (nhdr_ptr->n_namesz != 0) { |
72658e9d | 889 | sz = sizeof(Elf32_Nhdr) + |
34b47764 WC |
890 | (((u64)nhdr_ptr->n_namesz + 3) & ~3) + |
891 | (((u64)nhdr_ptr->n_descsz + 3) & ~3); | |
38dfac84 GP |
892 | if ((real_sz + sz) > max_sz) { |
893 | pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n", | |
894 | nhdr_ptr->n_namesz, nhdr_ptr->n_descsz); | |
895 | break; | |
896 | } | |
72658e9d VG |
897 | real_sz += sz; |
898 | nhdr_ptr = (Elf32_Nhdr*)((char*)nhdr_ptr + sz); | |
899 | } | |
72658e9d | 900 | kfree(notes_section); |
087350c9 | 901 | phdr_ptr->p_memsz = real_sz; |
38dfac84 GP |
902 | if (real_sz == 0) { |
903 | pr_warn("Warning: Zero PT_NOTE entries found\n"); | |
38dfac84 | 904 | } |
087350c9 HD |
905 | } |
906 | ||
907 | return 0; | |
908 | } | |
909 | ||
910 | /** | |
911 | * get_note_number_and_size_elf32 - get the number of PT_NOTE program | |
912 | * headers and sum of real size of their ELF note segment headers and | |
913 | * data. | |
914 | * | |
915 | * @ehdr_ptr: ELF header | |
916 | * @nr_ptnote: buffer for the number of PT_NOTE program headers | |
917 | * @sz_ptnote: buffer for size of unique PT_NOTE program header | |
918 | * | |
919 | * This function is used to merge multiple PT_NOTE program headers | |
920 | * into a unique single one. The resulting unique entry will have | |
921 | * @sz_ptnote in its phdr->p_mem. | |
922 | * | |
923 | * It is assumed that program headers with PT_NOTE type pointed to by | |
924 | * @ehdr_ptr has already been updated by update_note_header_size_elf32 | |
925 | * and each of PT_NOTE program headers has actual ELF note segment | |
926 | * size in its p_memsz member. | |
927 | */ | |
928 | static int __init get_note_number_and_size_elf32(const Elf32_Ehdr *ehdr_ptr, | |
929 | int *nr_ptnote, u64 *sz_ptnote) | |
930 | { | |
931 | int i; | |
932 | Elf32_Phdr *phdr_ptr; | |
933 | ||
934 | *nr_ptnote = *sz_ptnote = 0; | |
935 | ||
936 | phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1); | |
937 | for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { | |
938 | if (phdr_ptr->p_type != PT_NOTE) | |
939 | continue; | |
940 | *nr_ptnote += 1; | |
941 | *sz_ptnote += phdr_ptr->p_memsz; | |
72658e9d VG |
942 | } |
943 | ||
087350c9 HD |
944 | return 0; |
945 | } | |
946 | ||
947 | /** | |
948 | * copy_notes_elf32 - copy ELF note segments in a given buffer | |
949 | * | |
950 | * @ehdr_ptr: ELF header | |
951 | * @notes_buf: buffer into which ELF note segments are copied | |
952 | * | |
953 | * This function is used to copy ELF note segment in the 1st kernel | |
954 | * into the buffer @notes_buf in the 2nd kernel. It is assumed that | |
955 | * size of the buffer @notes_buf is equal to or larger than sum of the | |
956 | * real ELF note segment headers and data. | |
957 | * | |
958 | * It is assumed that program headers with PT_NOTE type pointed to by | |
959 | * @ehdr_ptr has already been updated by update_note_header_size_elf32 | |
960 | * and each of PT_NOTE program headers has actual ELF note segment | |
961 | * size in its p_memsz member. | |
962 | */ | |
963 | static int __init copy_notes_elf32(const Elf32_Ehdr *ehdr_ptr, char *notes_buf) | |
964 | { | |
965 | int i, rc=0; | |
966 | Elf32_Phdr *phdr_ptr; | |
967 | ||
968 | phdr_ptr = (Elf32_Phdr*)(ehdr_ptr + 1); | |
969 | ||
970 | for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { | |
971 | u64 offset; | |
972 | if (phdr_ptr->p_type != PT_NOTE) | |
973 | continue; | |
974 | offset = phdr_ptr->p_offset; | |
be8a8d06 MH |
975 | rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz, |
976 | &offset); | |
087350c9 HD |
977 | if (rc < 0) |
978 | return rc; | |
979 | notes_buf += phdr_ptr->p_memsz; | |
980 | } | |
981 | ||
982 | return 0; | |
983 | } | |
984 | ||
985 | /* Merges all the PT_NOTE headers into one. */ | |
986 | static int __init merge_note_headers_elf32(char *elfptr, size_t *elfsz, | |
987 | char **notes_buf, size_t *notes_sz) | |
988 | { | |
989 | int i, nr_ptnote=0, rc=0; | |
990 | char *tmp; | |
991 | Elf32_Ehdr *ehdr_ptr; | |
992 | Elf32_Phdr phdr; | |
993 | u64 phdr_sz = 0, note_off; | |
994 | ||
995 | ehdr_ptr = (Elf32_Ehdr *)elfptr; | |
996 | ||
997 | rc = update_note_header_size_elf32(ehdr_ptr); | |
998 | if (rc < 0) | |
999 | return rc; | |
1000 | ||
1001 | rc = get_note_number_and_size_elf32(ehdr_ptr, &nr_ptnote, &phdr_sz); | |
1002 | if (rc < 0) | |
1003 | return rc; | |
1004 | ||
1005 | *notes_sz = roundup(phdr_sz, PAGE_SIZE); | |
2724273e | 1006 | *notes_buf = vmcore_alloc_buf(*notes_sz); |
087350c9 HD |
1007 | if (!*notes_buf) |
1008 | return -ENOMEM; | |
1009 | ||
1010 | rc = copy_notes_elf32(ehdr_ptr, *notes_buf); | |
1011 | if (rc < 0) | |
1012 | return rc; | |
1013 | ||
72658e9d VG |
1014 | /* Prepare merged PT_NOTE program header. */ |
1015 | phdr.p_type = PT_NOTE; | |
1016 | phdr.p_flags = 0; | |
1017 | note_off = sizeof(Elf32_Ehdr) + | |
1018 | (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf32_Phdr); | |
087350c9 | 1019 | phdr.p_offset = roundup(note_off, PAGE_SIZE); |
72658e9d VG |
1020 | phdr.p_vaddr = phdr.p_paddr = 0; |
1021 | phdr.p_filesz = phdr.p_memsz = phdr_sz; | |
1022 | phdr.p_align = 0; | |
1023 | ||
1024 | /* Add merged PT_NOTE program header*/ | |
1025 | tmp = elfptr + sizeof(Elf32_Ehdr); | |
1026 | memcpy(tmp, &phdr, sizeof(phdr)); | |
1027 | tmp += sizeof(phdr); | |
1028 | ||
1029 | /* Remove unwanted PT_NOTE program headers. */ | |
1030 | i = (nr_ptnote - 1) * sizeof(Elf32_Phdr); | |
1031 | *elfsz = *elfsz - i; | |
1032 | memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf32_Ehdr)-sizeof(Elf32_Phdr))); | |
f2bdacdd HD |
1033 | memset(elfptr + *elfsz, 0, i); |
1034 | *elfsz = roundup(*elfsz, PAGE_SIZE); | |
72658e9d VG |
1035 | |
1036 | /* Modify e_phnum to reflect merged headers. */ | |
1037 | ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1; | |
1038 | ||
7efe48df RL |
1039 | /* Store the size of all notes. We need this to update the note |
1040 | * header when the device dumps will be added. | |
1041 | */ | |
1042 | elfnotes_orig_sz = phdr.p_memsz; | |
1043 | ||
72658e9d VG |
1044 | return 0; |
1045 | } | |
1046 | ||
666bfddb VG |
1047 | /* Add memory chunks represented by program headers to vmcore list. Also update |
1048 | * the new offset fields of exported program headers. */ | |
1049 | static int __init process_ptload_program_headers_elf64(char *elfptr, | |
1050 | size_t elfsz, | |
087350c9 | 1051 | size_t elfnotes_sz, |
666bfddb VG |
1052 | struct list_head *vc_list) |
1053 | { | |
1054 | int i; | |
1055 | Elf64_Ehdr *ehdr_ptr; | |
1056 | Elf64_Phdr *phdr_ptr; | |
1057 | loff_t vmcore_off; | |
1058 | struct vmcore *new; | |
1059 | ||
1060 | ehdr_ptr = (Elf64_Ehdr *)elfptr; | |
1061 | phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr)); /* PT_NOTE hdr */ | |
1062 | ||
087350c9 HD |
1063 | /* Skip Elf header, program headers and Elf note segment. */ |
1064 | vmcore_off = elfsz + elfnotes_sz; | |
666bfddb VG |
1065 | |
1066 | for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { | |
7f614cd1 HD |
1067 | u64 paddr, start, end, size; |
1068 | ||
666bfddb VG |
1069 | if (phdr_ptr->p_type != PT_LOAD) |
1070 | continue; | |
1071 | ||
7f614cd1 HD |
1072 | paddr = phdr_ptr->p_offset; |
1073 | start = rounddown(paddr, PAGE_SIZE); | |
1074 | end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE); | |
1075 | size = end - start; | |
1076 | ||
666bfddb VG |
1077 | /* Add this contiguous chunk of memory to vmcore list.*/ |
1078 | new = get_new_element(); | |
1079 | if (!new) | |
1080 | return -ENOMEM; | |
7f614cd1 HD |
1081 | new->paddr = start; |
1082 | new->size = size; | |
666bfddb VG |
1083 | list_add_tail(&new->list, vc_list); |
1084 | ||
1085 | /* Update the program header offset. */ | |
7f614cd1 HD |
1086 | phdr_ptr->p_offset = vmcore_off + (paddr - start); |
1087 | vmcore_off = vmcore_off + size; | |
666bfddb VG |
1088 | } |
1089 | return 0; | |
1090 | } | |
1091 | ||
72658e9d VG |
1092 | static int __init process_ptload_program_headers_elf32(char *elfptr, |
1093 | size_t elfsz, | |
087350c9 | 1094 | size_t elfnotes_sz, |
72658e9d VG |
1095 | struct list_head *vc_list) |
1096 | { | |
1097 | int i; | |
1098 | Elf32_Ehdr *ehdr_ptr; | |
1099 | Elf32_Phdr *phdr_ptr; | |
1100 | loff_t vmcore_off; | |
1101 | struct vmcore *new; | |
1102 | ||
1103 | ehdr_ptr = (Elf32_Ehdr *)elfptr; | |
1104 | phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr)); /* PT_NOTE hdr */ | |
1105 | ||
087350c9 HD |
1106 | /* Skip Elf header, program headers and Elf note segment. */ |
1107 | vmcore_off = elfsz + elfnotes_sz; | |
72658e9d VG |
1108 | |
1109 | for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { | |
7f614cd1 HD |
1110 | u64 paddr, start, end, size; |
1111 | ||
72658e9d VG |
1112 | if (phdr_ptr->p_type != PT_LOAD) |
1113 | continue; | |
1114 | ||
7f614cd1 HD |
1115 | paddr = phdr_ptr->p_offset; |
1116 | start = rounddown(paddr, PAGE_SIZE); | |
1117 | end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE); | |
1118 | size = end - start; | |
1119 | ||
72658e9d VG |
1120 | /* Add this contiguous chunk of memory to vmcore list.*/ |
1121 | new = get_new_element(); | |
1122 | if (!new) | |
1123 | return -ENOMEM; | |
7f614cd1 HD |
1124 | new->paddr = start; |
1125 | new->size = size; | |
72658e9d VG |
1126 | list_add_tail(&new->list, vc_list); |
1127 | ||
1128 | /* Update the program header offset */ | |
7f614cd1 HD |
1129 | phdr_ptr->p_offset = vmcore_off + (paddr - start); |
1130 | vmcore_off = vmcore_off + size; | |
72658e9d VG |
1131 | } |
1132 | return 0; | |
1133 | } | |
1134 | ||
666bfddb | 1135 | /* Sets offset fields of vmcore elements. */ |
7efe48df RL |
1136 | static void set_vmcore_list_offsets(size_t elfsz, size_t elfnotes_sz, |
1137 | struct list_head *vc_list) | |
666bfddb VG |
1138 | { |
1139 | loff_t vmcore_off; | |
666bfddb VG |
1140 | struct vmcore *m; |
1141 | ||
087350c9 HD |
1142 | /* Skip Elf header, program headers and Elf note segment. */ |
1143 | vmcore_off = elfsz + elfnotes_sz; | |
666bfddb VG |
1144 | |
1145 | list_for_each_entry(m, vc_list, list) { | |
1146 | m->offset = vmcore_off; | |
1147 | vmcore_off += m->size; | |
1148 | } | |
1149 | } | |
1150 | ||
f2bdacdd | 1151 | static void free_elfcorebuf(void) |
72658e9d | 1152 | { |
f2bdacdd HD |
1153 | free_pages((unsigned long)elfcorebuf, get_order(elfcorebuf_sz_orig)); |
1154 | elfcorebuf = NULL; | |
087350c9 HD |
1155 | vfree(elfnotes_buf); |
1156 | elfnotes_buf = NULL; | |
72658e9d VG |
1157 | } |
1158 | ||
666bfddb VG |
1159 | static int __init parse_crash_elf64_headers(void) |
1160 | { | |
1161 | int rc=0; | |
1162 | Elf64_Ehdr ehdr; | |
1163 | u64 addr; | |
1164 | ||
1165 | addr = elfcorehdr_addr; | |
1166 | ||
1167 | /* Read Elf header */ | |
be8a8d06 | 1168 | rc = elfcorehdr_read((char *)&ehdr, sizeof(Elf64_Ehdr), &addr); |
666bfddb VG |
1169 | if (rc < 0) |
1170 | return rc; | |
1171 | ||
1172 | /* Do some basic Verification. */ | |
1173 | if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 || | |
1174 | (ehdr.e_type != ET_CORE) || | |
9833c394 | 1175 | !vmcore_elf64_check_arch(&ehdr) || |
666bfddb VG |
1176 | ehdr.e_ident[EI_CLASS] != ELFCLASS64 || |
1177 | ehdr.e_ident[EI_VERSION] != EV_CURRENT || | |
1178 | ehdr.e_version != EV_CURRENT || | |
1179 | ehdr.e_ehsize != sizeof(Elf64_Ehdr) || | |
1180 | ehdr.e_phentsize != sizeof(Elf64_Phdr) || | |
1181 | ehdr.e_phnum == 0) { | |
87ebdc00 | 1182 | pr_warn("Warning: Core image elf header is not sane\n"); |
666bfddb VG |
1183 | return -EINVAL; |
1184 | } | |
1185 | ||
1186 | /* Read in all elf headers. */ | |
f2bdacdd HD |
1187 | elfcorebuf_sz_orig = sizeof(Elf64_Ehdr) + |
1188 | ehdr.e_phnum * sizeof(Elf64_Phdr); | |
1189 | elfcorebuf_sz = elfcorebuf_sz_orig; | |
1190 | elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, | |
1191 | get_order(elfcorebuf_sz_orig)); | |
666bfddb VG |
1192 | if (!elfcorebuf) |
1193 | return -ENOMEM; | |
1194 | addr = elfcorehdr_addr; | |
be8a8d06 | 1195 | rc = elfcorehdr_read(elfcorebuf, elfcorebuf_sz_orig, &addr); |
f2bdacdd HD |
1196 | if (rc < 0) |
1197 | goto fail; | |
666bfddb VG |
1198 | |
1199 | /* Merge all PT_NOTE headers into one. */ | |
087350c9 HD |
1200 | rc = merge_note_headers_elf64(elfcorebuf, &elfcorebuf_sz, |
1201 | &elfnotes_buf, &elfnotes_sz); | |
f2bdacdd HD |
1202 | if (rc) |
1203 | goto fail; | |
666bfddb | 1204 | rc = process_ptload_program_headers_elf64(elfcorebuf, elfcorebuf_sz, |
087350c9 | 1205 | elfnotes_sz, &vmcore_list); |
f2bdacdd HD |
1206 | if (rc) |
1207 | goto fail; | |
087350c9 | 1208 | set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list); |
666bfddb | 1209 | return 0; |
f2bdacdd HD |
1210 | fail: |
1211 | free_elfcorebuf(); | |
1212 | return rc; | |
666bfddb VG |
1213 | } |
1214 | ||
72658e9d VG |
1215 | static int __init parse_crash_elf32_headers(void) |
1216 | { | |
1217 | int rc=0; | |
1218 | Elf32_Ehdr ehdr; | |
1219 | u64 addr; | |
1220 | ||
1221 | addr = elfcorehdr_addr; | |
1222 | ||
1223 | /* Read Elf header */ | |
be8a8d06 | 1224 | rc = elfcorehdr_read((char *)&ehdr, sizeof(Elf32_Ehdr), &addr); |
72658e9d VG |
1225 | if (rc < 0) |
1226 | return rc; | |
1227 | ||
1228 | /* Do some basic Verification. */ | |
1229 | if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 || | |
1230 | (ehdr.e_type != ET_CORE) || | |
e55d5312 | 1231 | !vmcore_elf32_check_arch(&ehdr) || |
72658e9d VG |
1232 | ehdr.e_ident[EI_CLASS] != ELFCLASS32|| |
1233 | ehdr.e_ident[EI_VERSION] != EV_CURRENT || | |
1234 | ehdr.e_version != EV_CURRENT || | |
1235 | ehdr.e_ehsize != sizeof(Elf32_Ehdr) || | |
1236 | ehdr.e_phentsize != sizeof(Elf32_Phdr) || | |
1237 | ehdr.e_phnum == 0) { | |
87ebdc00 | 1238 | pr_warn("Warning: Core image elf header is not sane\n"); |
72658e9d VG |
1239 | return -EINVAL; |
1240 | } | |
1241 | ||
1242 | /* Read in all elf headers. */ | |
f2bdacdd HD |
1243 | elfcorebuf_sz_orig = sizeof(Elf32_Ehdr) + ehdr.e_phnum * sizeof(Elf32_Phdr); |
1244 | elfcorebuf_sz = elfcorebuf_sz_orig; | |
1245 | elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, | |
1246 | get_order(elfcorebuf_sz_orig)); | |
72658e9d VG |
1247 | if (!elfcorebuf) |
1248 | return -ENOMEM; | |
1249 | addr = elfcorehdr_addr; | |
be8a8d06 | 1250 | rc = elfcorehdr_read(elfcorebuf, elfcorebuf_sz_orig, &addr); |
f2bdacdd HD |
1251 | if (rc < 0) |
1252 | goto fail; | |
72658e9d VG |
1253 | |
1254 | /* Merge all PT_NOTE headers into one. */ | |
087350c9 HD |
1255 | rc = merge_note_headers_elf32(elfcorebuf, &elfcorebuf_sz, |
1256 | &elfnotes_buf, &elfnotes_sz); | |
f2bdacdd HD |
1257 | if (rc) |
1258 | goto fail; | |
72658e9d | 1259 | rc = process_ptload_program_headers_elf32(elfcorebuf, elfcorebuf_sz, |
087350c9 | 1260 | elfnotes_sz, &vmcore_list); |
f2bdacdd HD |
1261 | if (rc) |
1262 | goto fail; | |
087350c9 | 1263 | set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list); |
72658e9d | 1264 | return 0; |
f2bdacdd HD |
1265 | fail: |
1266 | free_elfcorebuf(); | |
1267 | return rc; | |
72658e9d VG |
1268 | } |
1269 | ||
666bfddb VG |
1270 | static int __init parse_crash_elf_headers(void) |
1271 | { | |
1272 | unsigned char e_ident[EI_NIDENT]; | |
1273 | u64 addr; | |
1274 | int rc=0; | |
1275 | ||
1276 | addr = elfcorehdr_addr; | |
be8a8d06 | 1277 | rc = elfcorehdr_read(e_ident, EI_NIDENT, &addr); |
666bfddb VG |
1278 | if (rc < 0) |
1279 | return rc; | |
1280 | if (memcmp(e_ident, ELFMAG, SELFMAG) != 0) { | |
87ebdc00 | 1281 | pr_warn("Warning: Core image elf header not found\n"); |
666bfddb VG |
1282 | return -EINVAL; |
1283 | } | |
1284 | ||
1285 | if (e_ident[EI_CLASS] == ELFCLASS64) { | |
1286 | rc = parse_crash_elf64_headers(); | |
1287 | if (rc) | |
1288 | return rc; | |
72658e9d VG |
1289 | } else if (e_ident[EI_CLASS] == ELFCLASS32) { |
1290 | rc = parse_crash_elf32_headers(); | |
1291 | if (rc) | |
1292 | return rc; | |
666bfddb | 1293 | } else { |
87ebdc00 | 1294 | pr_warn("Warning: Core image elf header is not sane\n"); |
666bfddb VG |
1295 | return -EINVAL; |
1296 | } | |
591ff716 HD |
1297 | |
1298 | /* Determine vmcore size. */ | |
1299 | vmcore_size = get_vmcore_size(elfcorebuf_sz, elfnotes_sz, | |
1300 | &vmcore_list); | |
1301 | ||
666bfddb VG |
1302 | return 0; |
1303 | } | |
1304 | ||
2724273e RL |
1305 | #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP |
1306 | /** | |
1307 | * vmcoredd_write_header - Write vmcore device dump header at the | |
1308 | * beginning of the dump's buffer. | |
1309 | * @buf: Output buffer where the note is written | |
1310 | * @data: Dump info | |
1311 | * @size: Size of the dump | |
1312 | * | |
1313 | * Fills beginning of the dump's buffer with vmcore device dump header. | |
1314 | */ | |
1315 | static void vmcoredd_write_header(void *buf, struct vmcoredd_data *data, | |
1316 | u32 size) | |
1317 | { | |
1318 | struct vmcoredd_header *vdd_hdr = (struct vmcoredd_header *)buf; | |
1319 | ||
1320 | vdd_hdr->n_namesz = sizeof(vdd_hdr->name); | |
1321 | vdd_hdr->n_descsz = size + sizeof(vdd_hdr->dump_name); | |
1322 | vdd_hdr->n_type = NT_VMCOREDD; | |
1323 | ||
1324 | strncpy((char *)vdd_hdr->name, VMCOREDD_NOTE_NAME, | |
1325 | sizeof(vdd_hdr->name)); | |
1326 | memcpy(vdd_hdr->dump_name, data->dump_name, sizeof(vdd_hdr->dump_name)); | |
1327 | } | |
1328 | ||
7efe48df RL |
1329 | /** |
1330 | * vmcoredd_update_program_headers - Update all Elf program headers | |
1331 | * @elfptr: Pointer to elf header | |
1332 | * @elfnotesz: Size of elf notes aligned to page size | |
1333 | * @vmcoreddsz: Size of device dumps to be added to elf note header | |
1334 | * | |
1335 | * Determine type of Elf header (Elf64 or Elf32) and update the elf note size. | |
1336 | * Also update the offsets of all the program headers after the elf note header. | |
1337 | */ | |
1338 | static void vmcoredd_update_program_headers(char *elfptr, size_t elfnotesz, | |
1339 | size_t vmcoreddsz) | |
1340 | { | |
1341 | unsigned char *e_ident = (unsigned char *)elfptr; | |
1342 | u64 start, end, size; | |
1343 | loff_t vmcore_off; | |
1344 | u32 i; | |
1345 | ||
1346 | vmcore_off = elfcorebuf_sz + elfnotesz; | |
1347 | ||
1348 | if (e_ident[EI_CLASS] == ELFCLASS64) { | |
1349 | Elf64_Ehdr *ehdr = (Elf64_Ehdr *)elfptr; | |
1350 | Elf64_Phdr *phdr = (Elf64_Phdr *)(elfptr + sizeof(Elf64_Ehdr)); | |
1351 | ||
1352 | /* Update all program headers */ | |
1353 | for (i = 0; i < ehdr->e_phnum; i++, phdr++) { | |
1354 | if (phdr->p_type == PT_NOTE) { | |
1355 | /* Update note size */ | |
1356 | phdr->p_memsz = elfnotes_orig_sz + vmcoreddsz; | |
1357 | phdr->p_filesz = phdr->p_memsz; | |
1358 | continue; | |
1359 | } | |
1360 | ||
1361 | start = rounddown(phdr->p_offset, PAGE_SIZE); | |
1362 | end = roundup(phdr->p_offset + phdr->p_memsz, | |
1363 | PAGE_SIZE); | |
1364 | size = end - start; | |
1365 | phdr->p_offset = vmcore_off + (phdr->p_offset - start); | |
1366 | vmcore_off += size; | |
1367 | } | |
1368 | } else { | |
1369 | Elf32_Ehdr *ehdr = (Elf32_Ehdr *)elfptr; | |
1370 | Elf32_Phdr *phdr = (Elf32_Phdr *)(elfptr + sizeof(Elf32_Ehdr)); | |
1371 | ||
1372 | /* Update all program headers */ | |
1373 | for (i = 0; i < ehdr->e_phnum; i++, phdr++) { | |
1374 | if (phdr->p_type == PT_NOTE) { | |
1375 | /* Update note size */ | |
1376 | phdr->p_memsz = elfnotes_orig_sz + vmcoreddsz; | |
1377 | phdr->p_filesz = phdr->p_memsz; | |
1378 | continue; | |
1379 | } | |
1380 | ||
1381 | start = rounddown(phdr->p_offset, PAGE_SIZE); | |
1382 | end = roundup(phdr->p_offset + phdr->p_memsz, | |
1383 | PAGE_SIZE); | |
1384 | size = end - start; | |
1385 | phdr->p_offset = vmcore_off + (phdr->p_offset - start); | |
1386 | vmcore_off += size; | |
1387 | } | |
1388 | } | |
1389 | } | |
1390 | ||
1391 | /** | |
1392 | * vmcoredd_update_size - Update the total size of the device dumps and update | |
1393 | * Elf header | |
1394 | * @dump_size: Size of the current device dump to be added to total size | |
1395 | * | |
1396 | * Update the total size of all the device dumps and update the Elf program | |
1397 | * headers. Calculate the new offsets for the vmcore list and update the | |
1398 | * total vmcore size. | |
1399 | */ | |
1400 | static void vmcoredd_update_size(size_t dump_size) | |
1401 | { | |
1402 | vmcoredd_orig_sz += dump_size; | |
1403 | elfnotes_sz = roundup(elfnotes_orig_sz, PAGE_SIZE) + vmcoredd_orig_sz; | |
1404 | vmcoredd_update_program_headers(elfcorebuf, elfnotes_sz, | |
1405 | vmcoredd_orig_sz); | |
1406 | ||
1407 | /* Update vmcore list offsets */ | |
1408 | set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list); | |
1409 | ||
1410 | vmcore_size = get_vmcore_size(elfcorebuf_sz, elfnotes_sz, | |
1411 | &vmcore_list); | |
1412 | proc_vmcore->size = vmcore_size; | |
1413 | } | |
1414 | ||
2724273e RL |
1415 | /** |
1416 | * vmcore_add_device_dump - Add a buffer containing device dump to vmcore | |
1417 | * @data: dump info. | |
1418 | * | |
1419 | * Allocate a buffer and invoke the calling driver's dump collect routine. | |
1420 | * Write Elf note at the beginning of the buffer to indicate vmcore device | |
1421 | * dump and add the dump to global list. | |
1422 | */ | |
1423 | int vmcore_add_device_dump(struct vmcoredd_data *data) | |
1424 | { | |
1425 | struct vmcoredd_node *dump; | |
1426 | void *buf = NULL; | |
1427 | size_t data_size; | |
1428 | int ret; | |
1429 | ||
1430 | if (!data || !strlen(data->dump_name) || | |
1431 | !data->vmcoredd_callback || !data->size) | |
1432 | return -EINVAL; | |
1433 | ||
1434 | dump = vzalloc(sizeof(*dump)); | |
1435 | if (!dump) { | |
1436 | ret = -ENOMEM; | |
1437 | goto out_err; | |
1438 | } | |
1439 | ||
1440 | /* Keep size of the buffer page aligned so that it can be mmaped */ | |
1441 | data_size = roundup(sizeof(struct vmcoredd_header) + data->size, | |
1442 | PAGE_SIZE); | |
1443 | ||
1444 | /* Allocate buffer for driver's to write their dumps */ | |
1445 | buf = vmcore_alloc_buf(data_size); | |
1446 | if (!buf) { | |
1447 | ret = -ENOMEM; | |
1448 | goto out_err; | |
1449 | } | |
1450 | ||
1451 | vmcoredd_write_header(buf, data, data_size - | |
1452 | sizeof(struct vmcoredd_header)); | |
1453 | ||
1454 | /* Invoke the driver's dump collection routing */ | |
1455 | ret = data->vmcoredd_callback(data, buf + | |
1456 | sizeof(struct vmcoredd_header)); | |
1457 | if (ret) | |
1458 | goto out_err; | |
1459 | ||
1460 | dump->buf = buf; | |
1461 | dump->size = data_size; | |
1462 | ||
1463 | /* Add the dump to driver sysfs list */ | |
1464 | mutex_lock(&vmcoredd_mutex); | |
1465 | list_add_tail(&dump->list, &vmcoredd_list); | |
1466 | mutex_unlock(&vmcoredd_mutex); | |
1467 | ||
7efe48df | 1468 | vmcoredd_update_size(data_size); |
2724273e RL |
1469 | return 0; |
1470 | ||
1471 | out_err: | |
1472 | if (buf) | |
1473 | vfree(buf); | |
1474 | ||
1475 | if (dump) | |
1476 | vfree(dump); | |
1477 | ||
1478 | return ret; | |
1479 | } | |
1480 | EXPORT_SYMBOL(vmcore_add_device_dump); | |
1481 | #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */ | |
1482 | ||
1483 | /* Free all dumps in vmcore device dump list */ | |
1484 | static void vmcore_free_device_dumps(void) | |
1485 | { | |
1486 | #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP | |
1487 | mutex_lock(&vmcoredd_mutex); | |
1488 | while (!list_empty(&vmcoredd_list)) { | |
1489 | struct vmcoredd_node *dump; | |
1490 | ||
1491 | dump = list_first_entry(&vmcoredd_list, struct vmcoredd_node, | |
1492 | list); | |
1493 | list_del(&dump->list); | |
1494 | vfree(dump->buf); | |
1495 | vfree(dump); | |
1496 | } | |
1497 | mutex_unlock(&vmcoredd_mutex); | |
1498 | #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */ | |
1499 | } | |
1500 | ||
666bfddb VG |
1501 | /* Init function for vmcore module. */ |
1502 | static int __init vmcore_init(void) | |
1503 | { | |
1504 | int rc = 0; | |
1505 | ||
be8a8d06 MH |
1506 | /* Allow architectures to allocate ELF header in 2nd kernel */ |
1507 | rc = elfcorehdr_alloc(&elfcorehdr_addr, &elfcorehdr_size); | |
1508 | if (rc) | |
1509 | return rc; | |
1510 | /* | |
1511 | * If elfcorehdr= has been passed in cmdline or created in 2nd kernel, | |
1512 | * then capture the dump. | |
1513 | */ | |
85a0ee34 | 1514 | if (!(is_vmcore_usable())) |
666bfddb VG |
1515 | return rc; |
1516 | rc = parse_crash_elf_headers(); | |
1517 | if (rc) { | |
87ebdc00 | 1518 | pr_warn("Kdump: vmcore not initialized\n"); |
666bfddb VG |
1519 | return rc; |
1520 | } | |
be8a8d06 MH |
1521 | elfcorehdr_free(elfcorehdr_addr); |
1522 | elfcorehdr_addr = ELFCORE_ADDR_ERR; | |
666bfddb | 1523 | |
5aa140c2 | 1524 | proc_vmcore = proc_create("vmcore", S_IRUSR, NULL, &proc_vmcore_operations); |
666bfddb VG |
1525 | if (proc_vmcore) |
1526 | proc_vmcore->size = vmcore_size; | |
1527 | return 0; | |
1528 | } | |
abaf3787 | 1529 | fs_initcall(vmcore_init); |
16257393 MS |
1530 | |
1531 | /* Cleanup function for vmcore module. */ | |
1532 | void vmcore_cleanup(void) | |
1533 | { | |
16257393 | 1534 | if (proc_vmcore) { |
a8ca16ea | 1535 | proc_remove(proc_vmcore); |
16257393 MS |
1536 | proc_vmcore = NULL; |
1537 | } | |
1538 | ||
1539 | /* clear the vmcore list. */ | |
593bc695 | 1540 | while (!list_empty(&vmcore_list)) { |
16257393 MS |
1541 | struct vmcore *m; |
1542 | ||
593bc695 | 1543 | m = list_first_entry(&vmcore_list, struct vmcore, list); |
16257393 MS |
1544 | list_del(&m->list); |
1545 | kfree(m); | |
1546 | } | |
f2bdacdd | 1547 | free_elfcorebuf(); |
2724273e RL |
1548 | |
1549 | /* clear vmcore device dump list */ | |
1550 | vmcore_free_device_dumps(); | |
16257393 | 1551 | } |