1 // SPDX-License-Identifier: GPL-2.0-only
3 * fs/proc/vmcore.c Interface for accessing the crash
4 * dump from the system's previous life.
5 * Heavily borrowed from fs/proc/kcore.c
7 * Copyright (C) IBM Corporation, 2004. All rights reserved
12 #include <linux/kcore.h>
13 #include <linux/user.h>
14 #include <linux/elf.h>
15 #include <linux/elfcore.h>
16 #include <linux/export.h>
17 #include <linux/slab.h>
18 #include <linux/highmem.h>
19 #include <linux/printk.h>
20 #include <linux/memblock.h>
21 #include <linux/init.h>
22 #include <linux/crash_dump.h>
23 #include <linux/list.h>
24 #include <linux/moduleparam.h>
25 #include <linux/mutex.h>
26 #include <linux/vmalloc.h>
27 #include <linux/pagemap.h>
28 #include <linux/uaccess.h>
29 #include <linux/cc_platform.h>
33 /* List representing chunks of contiguous memory areas and their offsets in
36 static LIST_HEAD(vmcore_list);
38 /* Stores the pointer to the buffer containing kernel elf core headers. */
39 static char *elfcorebuf;
40 static size_t elfcorebuf_sz;
41 static size_t elfcorebuf_sz_orig;
43 static char *elfnotes_buf;
44 static size_t elfnotes_sz;
45 /* Size of all notes minus the device dump notes */
46 static size_t elfnotes_orig_sz;
48 /* Total size of vmcore file. */
49 static u64 vmcore_size;
51 static struct proc_dir_entry *proc_vmcore;
53 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
54 /* Device Dump list and mutex to synchronize access to list */
55 static LIST_HEAD(vmcoredd_list);
56 static DEFINE_MUTEX(vmcoredd_mutex);
58 static bool vmcoredd_disabled;
59 core_param(novmcoredd, vmcoredd_disabled, bool, 0);
60 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
62 /* Device Dump Size */
63 static size_t vmcoredd_orig_sz;
65 static DEFINE_SPINLOCK(vmcore_cb_lock);
66 DEFINE_STATIC_SRCU(vmcore_cb_srcu);
67 /* List of registered vmcore callbacks. */
68 static LIST_HEAD(vmcore_cb_list);
69 /* Whether the vmcore has been opened once. */
70 static bool vmcore_opened;
72 void register_vmcore_cb(struct vmcore_cb *cb)
74 INIT_LIST_HEAD(&cb->next);
75 spin_lock(&vmcore_cb_lock);
76 list_add_tail(&cb->next, &vmcore_cb_list);
78 * Registering a vmcore callback after the vmcore was opened is
79 * very unusual (e.g., manual driver loading).
82 pr_warn_once("Unexpected vmcore callback registration\n");
83 spin_unlock(&vmcore_cb_lock);
85 EXPORT_SYMBOL_GPL(register_vmcore_cb);
87 void unregister_vmcore_cb(struct vmcore_cb *cb)
89 spin_lock(&vmcore_cb_lock);
90 list_del_rcu(&cb->next);
92 * Unregistering a vmcore callback after the vmcore was opened is
93 * very unusual (e.g., forced driver removal), but we cannot stop
97 pr_warn_once("Unexpected vmcore callback unregistration\n");
98 spin_unlock(&vmcore_cb_lock);
100 synchronize_srcu(&vmcore_cb_srcu);
102 EXPORT_SYMBOL_GPL(unregister_vmcore_cb);
104 static bool pfn_is_ram(unsigned long pfn)
106 struct vmcore_cb *cb;
109 list_for_each_entry_srcu(cb, &vmcore_cb_list, next,
110 srcu_read_lock_held(&vmcore_cb_srcu)) {
111 if (unlikely(!cb->pfn_is_ram))
113 ret = cb->pfn_is_ram(cb, pfn);
121 static int open_vmcore(struct inode *inode, struct file *file)
123 spin_lock(&vmcore_cb_lock);
124 vmcore_opened = true;
125 spin_unlock(&vmcore_cb_lock);
130 /* Reads a page from the oldmem device from given offset. */
131 ssize_t read_from_oldmem(char *buf, size_t count,
132 u64 *ppos, int userbuf,
135 unsigned long pfn, offset;
137 ssize_t read = 0, tmp;
143 offset = (unsigned long)(*ppos % PAGE_SIZE);
144 pfn = (unsigned long)(*ppos / PAGE_SIZE);
146 idx = srcu_read_lock(&vmcore_cb_srcu);
148 if (count > (PAGE_SIZE - offset))
149 nr_bytes = PAGE_SIZE - offset;
153 /* If pfn is not ram, return zeros for sparse dump files */
154 if (!pfn_is_ram(pfn)) {
157 memset(buf, 0, nr_bytes);
158 else if (clear_user(buf, nr_bytes))
162 tmp = copy_oldmem_page_encrypted(pfn, buf,
167 tmp = copy_oldmem_page(pfn, buf, nr_bytes,
171 srcu_read_unlock(&vmcore_cb_srcu, idx);
182 srcu_read_unlock(&vmcore_cb_srcu, idx);
188 * Architectures may override this function to allocate ELF header in 2nd kernel
190 int __weak elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
196 * Architectures may override this function to free header
198 void __weak elfcorehdr_free(unsigned long long addr)
202 * Architectures may override this function to read from ELF header
204 ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos)
206 return read_from_oldmem(buf, count, ppos, 0, false);
210 * Architectures may override this function to read from notes sections
212 ssize_t __weak elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos)
214 return read_from_oldmem(buf, count, ppos, 0, cc_platform_has(CC_ATTR_MEM_ENCRYPT));
218 * Architectures may override this function to map oldmem
220 int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
221 unsigned long from, unsigned long pfn,
222 unsigned long size, pgprot_t prot)
224 prot = pgprot_encrypted(prot);
225 return remap_pfn_range(vma, from, pfn, size, prot);
229 * Architectures which support memory encryption override this.
232 copy_oldmem_page_encrypted(unsigned long pfn, char *buf, size_t csize,
233 unsigned long offset, int userbuf)
235 return copy_oldmem_page(pfn, buf, csize, offset, userbuf);
239 * Copy to either kernel or user space
241 static int copy_to(void *target, void *src, size_t size, int userbuf)
244 if (copy_to_user((char __user *) target, src, size))
247 memcpy(target, src, size);
252 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
253 static int vmcoredd_copy_dumps(void *dst, u64 start, size_t size, int userbuf)
255 struct vmcoredd_node *dump;
261 mutex_lock(&vmcoredd_mutex);
262 list_for_each_entry(dump, &vmcoredd_list, list) {
263 if (start < offset + dump->size) {
264 tsz = min(offset + (u64)dump->size - start, (u64)size);
265 buf = dump->buf + start - offset;
266 if (copy_to(dst, buf, tsz, userbuf)) {
275 /* Leave now if buffer filled already */
279 offset += dump->size;
283 mutex_unlock(&vmcoredd_mutex);
288 static int vmcoredd_mmap_dumps(struct vm_area_struct *vma, unsigned long dst,
289 u64 start, size_t size)
291 struct vmcoredd_node *dump;
297 mutex_lock(&vmcoredd_mutex);
298 list_for_each_entry(dump, &vmcoredd_list, list) {
299 if (start < offset + dump->size) {
300 tsz = min(offset + (u64)dump->size - start, (u64)size);
301 buf = dump->buf + start - offset;
302 if (remap_vmalloc_range_partial(vma, dst, buf, 0,
312 /* Leave now if buffer filled already */
316 offset += dump->size;
320 mutex_unlock(&vmcoredd_mutex);
323 #endif /* CONFIG_MMU */
324 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
326 /* Read from the ELF header and then the crash dump. On error, negative value is
327 * returned otherwise number of bytes read are returned.
329 static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
332 ssize_t acc = 0, tmp;
335 struct vmcore *m = NULL;
337 if (buflen == 0 || *fpos >= vmcore_size)
340 /* trim buflen to not go beyond EOF */
341 if (buflen > vmcore_size - *fpos)
342 buflen = vmcore_size - *fpos;
344 /* Read ELF core header */
345 if (*fpos < elfcorebuf_sz) {
346 tsz = min(elfcorebuf_sz - (size_t)*fpos, buflen);
347 if (copy_to(buffer, elfcorebuf + *fpos, tsz, userbuf))
354 /* leave now if filled buffer already */
359 /* Read Elf note segment */
360 if (*fpos < elfcorebuf_sz + elfnotes_sz) {
363 /* We add device dumps before other elf notes because the
364 * other elf notes may not fill the elf notes buffer
365 * completely and we will end up with zero-filled data
366 * between the elf notes and the device dumps. Tools will
367 * then try to decode this zero-filled data as valid notes
368 * and we don't want that. Hence, adding device dumps before
369 * the other elf notes ensure that zero-filled data can be
372 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
373 /* Read device dumps */
374 if (*fpos < elfcorebuf_sz + vmcoredd_orig_sz) {
375 tsz = min(elfcorebuf_sz + vmcoredd_orig_sz -
376 (size_t)*fpos, buflen);
377 start = *fpos - elfcorebuf_sz;
378 if (vmcoredd_copy_dumps(buffer, start, tsz, userbuf))
386 /* leave now if filled buffer already */
390 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
392 /* Read remaining elf notes */
393 tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)*fpos, buflen);
394 kaddr = elfnotes_buf + *fpos - elfcorebuf_sz - vmcoredd_orig_sz;
395 if (copy_to(buffer, kaddr, tsz, userbuf))
403 /* leave now if filled buffer already */
408 list_for_each_entry(m, &vmcore_list, list) {
409 if (*fpos < m->offset + m->size) {
410 tsz = (size_t)min_t(unsigned long long,
411 m->offset + m->size - *fpos,
413 start = m->paddr + *fpos - m->offset;
414 tmp = read_from_oldmem(buffer, tsz, &start,
415 userbuf, cc_platform_has(CC_ATTR_MEM_ENCRYPT));
423 /* leave now if filled buffer already */
432 static ssize_t read_vmcore(struct file *file, char __user *buffer,
433 size_t buflen, loff_t *fpos)
435 return __read_vmcore((__force char *) buffer, buflen, fpos, 1);
439 * The vmcore fault handler uses the page cache and fills data using the
440 * standard __vmcore_read() function.
442 * On s390 the fault handler is used for memory regions that can't be mapped
443 * directly with remap_pfn_range().
445 static vm_fault_t mmap_vmcore_fault(struct vm_fault *vmf)
448 struct address_space *mapping = vmf->vma->vm_file->f_mapping;
449 pgoff_t index = vmf->pgoff;
455 page = find_or_create_page(mapping, index, GFP_KERNEL);
458 if (!PageUptodate(page)) {
459 offset = (loff_t) index << PAGE_SHIFT;
460 buf = __va((page_to_pfn(page) << PAGE_SHIFT));
461 rc = __read_vmcore(buf, PAGE_SIZE, &offset, 0);
465 return vmf_error(rc);
467 SetPageUptodate(page);
473 return VM_FAULT_SIGBUS;
477 static const struct vm_operations_struct vmcore_mmap_ops = {
478 .fault = mmap_vmcore_fault,
482 * vmcore_alloc_buf - allocate buffer in vmalloc memory
483 * @size: size of buffer
485 * If CONFIG_MMU is defined, use vmalloc_user() to allow users to mmap
486 * the buffer to user-space by means of remap_vmalloc_range().
488 * If CONFIG_MMU is not defined, use vzalloc() since mmap_vmcore() is
489 * disabled and there's no need to allow users to mmap the buffer.
491 static inline char *vmcore_alloc_buf(size_t size)
494 return vmalloc_user(size);
496 return vzalloc(size);
501 * Disable mmap_vmcore() if CONFIG_MMU is not defined. MMU is
502 * essential for mmap_vmcore() in order to map physically
503 * non-contiguous objects (ELF header, ELF note segment and memory
504 * regions in the 1st kernel pointed to by PT_LOAD entries) into
505 * virtually contiguous user-space in ELF layout.
509 * remap_oldmem_pfn_checked - do remap_oldmem_pfn_range replacing all pages
510 * reported as not being ram with the zero page.
512 * @vma: vm_area_struct describing requested mapping
513 * @from: start remapping from
514 * @pfn: page frame number to start remapping to
515 * @size: remapping size
516 * @prot: protection bits
518 * Returns zero on success, -EAGAIN on failure.
520 static int remap_oldmem_pfn_checked(struct vm_area_struct *vma,
521 unsigned long from, unsigned long pfn,
522 unsigned long size, pgprot_t prot)
524 unsigned long map_size;
525 unsigned long pos_start, pos_end, pos;
526 unsigned long zeropage_pfn = my_zero_pfn(0);
530 pos_end = pfn + (size >> PAGE_SHIFT);
532 for (pos = pos_start; pos < pos_end; ++pos) {
533 if (!pfn_is_ram(pos)) {
535 * We hit a page which is not ram. Remap the continuous
536 * region between pos_start and pos-1 and replace
537 * the non-ram page at pos with the zero page.
539 if (pos > pos_start) {
540 /* Remap continuous region */
541 map_size = (pos - pos_start) << PAGE_SHIFT;
542 if (remap_oldmem_pfn_range(vma, from + len,
548 /* Remap the zero page */
549 if (remap_oldmem_pfn_range(vma, from + len,
557 if (pos > pos_start) {
559 map_size = (pos - pos_start) << PAGE_SHIFT;
560 if (remap_oldmem_pfn_range(vma, from + len, pos_start,
566 do_munmap(vma->vm_mm, from, len, NULL);
570 static int vmcore_remap_oldmem_pfn(struct vm_area_struct *vma,
571 unsigned long from, unsigned long pfn,
572 unsigned long size, pgprot_t prot)
577 * Check if a callback was registered to avoid looping over all
578 * pages without a reason.
580 idx = srcu_read_lock(&vmcore_cb_srcu);
581 if (!list_empty(&vmcore_cb_list))
582 ret = remap_oldmem_pfn_checked(vma, from, pfn, size, prot);
584 ret = remap_oldmem_pfn_range(vma, from, pfn, size, prot);
585 srcu_read_unlock(&vmcore_cb_srcu, idx);
589 static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
591 size_t size = vma->vm_end - vma->vm_start;
592 u64 start, end, len, tsz;
595 start = (u64)vma->vm_pgoff << PAGE_SHIFT;
598 if (size > vmcore_size || end > vmcore_size)
601 if (vma->vm_flags & (VM_WRITE | VM_EXEC))
604 vma->vm_flags &= ~(VM_MAYWRITE | VM_MAYEXEC);
605 vma->vm_flags |= VM_MIXEDMAP;
606 vma->vm_ops = &vmcore_mmap_ops;
610 if (start < elfcorebuf_sz) {
613 tsz = min(elfcorebuf_sz - (size_t)start, size);
614 pfn = __pa(elfcorebuf + start) >> PAGE_SHIFT;
615 if (remap_pfn_range(vma, vma->vm_start, pfn, tsz,
626 if (start < elfcorebuf_sz + elfnotes_sz) {
629 /* We add device dumps before other elf notes because the
630 * other elf notes may not fill the elf notes buffer
631 * completely and we will end up with zero-filled data
632 * between the elf notes and the device dumps. Tools will
633 * then try to decode this zero-filled data as valid notes
634 * and we don't want that. Hence, adding device dumps before
635 * the other elf notes ensure that zero-filled data can be
636 * avoided. This also ensures that the device dumps and
637 * other elf notes can be properly mmaped at page aligned
640 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
641 /* Read device dumps */
642 if (start < elfcorebuf_sz + vmcoredd_orig_sz) {
645 tsz = min(elfcorebuf_sz + vmcoredd_orig_sz -
646 (size_t)start, size);
647 start_off = start - elfcorebuf_sz;
648 if (vmcoredd_mmap_dumps(vma, vma->vm_start + len,
656 /* leave now if filled buffer already */
660 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
662 /* Read remaining elf notes */
663 tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)start, size);
664 kaddr = elfnotes_buf + start - elfcorebuf_sz - vmcoredd_orig_sz;
665 if (remap_vmalloc_range_partial(vma, vma->vm_start + len,
677 list_for_each_entry(m, &vmcore_list, list) {
678 if (start < m->offset + m->size) {
681 tsz = (size_t)min_t(unsigned long long,
682 m->offset + m->size - start, size);
683 paddr = m->paddr + start - m->offset;
684 if (vmcore_remap_oldmem_pfn(vma, vma->vm_start + len,
685 paddr >> PAGE_SHIFT, tsz,
699 do_munmap(vma->vm_mm, vma->vm_start, len, NULL);
703 static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
709 static const struct proc_ops vmcore_proc_ops = {
710 .proc_open = open_vmcore,
711 .proc_read = read_vmcore,
712 .proc_lseek = default_llseek,
713 .proc_mmap = mmap_vmcore,
716 static struct vmcore* __init get_new_element(void)
718 return kzalloc(sizeof(struct vmcore), GFP_KERNEL);
721 static u64 get_vmcore_size(size_t elfsz, size_t elfnotesegsz,
722 struct list_head *vc_list)
727 size = elfsz + elfnotesegsz;
728 list_for_each_entry(m, vc_list, list) {
735 * update_note_header_size_elf64 - update p_memsz member of each PT_NOTE entry
737 * @ehdr_ptr: ELF header
739 * This function updates p_memsz member of each PT_NOTE entry in the
740 * program header table pointed to by @ehdr_ptr to real size of ELF
743 static int __init update_note_header_size_elf64(const Elf64_Ehdr *ehdr_ptr)
746 Elf64_Phdr *phdr_ptr;
747 Elf64_Nhdr *nhdr_ptr;
749 phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1);
750 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
752 u64 offset, max_sz, sz, real_sz = 0;
753 if (phdr_ptr->p_type != PT_NOTE)
755 max_sz = phdr_ptr->p_memsz;
756 offset = phdr_ptr->p_offset;
757 notes_section = kmalloc(max_sz, GFP_KERNEL);
760 rc = elfcorehdr_read_notes(notes_section, max_sz, &offset);
762 kfree(notes_section);
765 nhdr_ptr = notes_section;
766 while (nhdr_ptr->n_namesz != 0) {
767 sz = sizeof(Elf64_Nhdr) +
768 (((u64)nhdr_ptr->n_namesz + 3) & ~3) +
769 (((u64)nhdr_ptr->n_descsz + 3) & ~3);
770 if ((real_sz + sz) > max_sz) {
771 pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n",
772 nhdr_ptr->n_namesz, nhdr_ptr->n_descsz);
776 nhdr_ptr = (Elf64_Nhdr*)((char*)nhdr_ptr + sz);
778 kfree(notes_section);
779 phdr_ptr->p_memsz = real_sz;
781 pr_warn("Warning: Zero PT_NOTE entries found\n");
789 * get_note_number_and_size_elf64 - get the number of PT_NOTE program
790 * headers and sum of real size of their ELF note segment headers and
793 * @ehdr_ptr: ELF header
794 * @nr_ptnote: buffer for the number of PT_NOTE program headers
795 * @sz_ptnote: buffer for size of unique PT_NOTE program header
797 * This function is used to merge multiple PT_NOTE program headers
798 * into a unique single one. The resulting unique entry will have
799 * @sz_ptnote in its phdr->p_mem.
801 * It is assumed that program headers with PT_NOTE type pointed to by
802 * @ehdr_ptr has already been updated by update_note_header_size_elf64
803 * and each of PT_NOTE program headers has actual ELF note segment
804 * size in its p_memsz member.
806 static int __init get_note_number_and_size_elf64(const Elf64_Ehdr *ehdr_ptr,
807 int *nr_ptnote, u64 *sz_ptnote)
810 Elf64_Phdr *phdr_ptr;
812 *nr_ptnote = *sz_ptnote = 0;
814 phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1);
815 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
816 if (phdr_ptr->p_type != PT_NOTE)
819 *sz_ptnote += phdr_ptr->p_memsz;
826 * copy_notes_elf64 - copy ELF note segments in a given buffer
828 * @ehdr_ptr: ELF header
829 * @notes_buf: buffer into which ELF note segments are copied
831 * This function is used to copy ELF note segment in the 1st kernel
832 * into the buffer @notes_buf in the 2nd kernel. It is assumed that
833 * size of the buffer @notes_buf is equal to or larger than sum of the
834 * real ELF note segment headers and data.
836 * It is assumed that program headers with PT_NOTE type pointed to by
837 * @ehdr_ptr has already been updated by update_note_header_size_elf64
838 * and each of PT_NOTE program headers has actual ELF note segment
839 * size in its p_memsz member.
841 static int __init copy_notes_elf64(const Elf64_Ehdr *ehdr_ptr, char *notes_buf)
844 Elf64_Phdr *phdr_ptr;
846 phdr_ptr = (Elf64_Phdr*)(ehdr_ptr + 1);
848 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
850 if (phdr_ptr->p_type != PT_NOTE)
852 offset = phdr_ptr->p_offset;
853 rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz,
857 notes_buf += phdr_ptr->p_memsz;
863 /* Merges all the PT_NOTE headers into one. */
864 static int __init merge_note_headers_elf64(char *elfptr, size_t *elfsz,
865 char **notes_buf, size_t *notes_sz)
867 int i, nr_ptnote=0, rc=0;
869 Elf64_Ehdr *ehdr_ptr;
871 u64 phdr_sz = 0, note_off;
873 ehdr_ptr = (Elf64_Ehdr *)elfptr;
875 rc = update_note_header_size_elf64(ehdr_ptr);
879 rc = get_note_number_and_size_elf64(ehdr_ptr, &nr_ptnote, &phdr_sz);
883 *notes_sz = roundup(phdr_sz, PAGE_SIZE);
884 *notes_buf = vmcore_alloc_buf(*notes_sz);
888 rc = copy_notes_elf64(ehdr_ptr, *notes_buf);
892 /* Prepare merged PT_NOTE program header. */
893 phdr.p_type = PT_NOTE;
895 note_off = sizeof(Elf64_Ehdr) +
896 (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf64_Phdr);
897 phdr.p_offset = roundup(note_off, PAGE_SIZE);
898 phdr.p_vaddr = phdr.p_paddr = 0;
899 phdr.p_filesz = phdr.p_memsz = phdr_sz;
902 /* Add merged PT_NOTE program header*/
903 tmp = elfptr + sizeof(Elf64_Ehdr);
904 memcpy(tmp, &phdr, sizeof(phdr));
907 /* Remove unwanted PT_NOTE program headers. */
908 i = (nr_ptnote - 1) * sizeof(Elf64_Phdr);
910 memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf64_Ehdr)-sizeof(Elf64_Phdr)));
911 memset(elfptr + *elfsz, 0, i);
912 *elfsz = roundup(*elfsz, PAGE_SIZE);
914 /* Modify e_phnum to reflect merged headers. */
915 ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
917 /* Store the size of all notes. We need this to update the note
918 * header when the device dumps will be added.
920 elfnotes_orig_sz = phdr.p_memsz;
926 * update_note_header_size_elf32 - update p_memsz member of each PT_NOTE entry
928 * @ehdr_ptr: ELF header
930 * This function updates p_memsz member of each PT_NOTE entry in the
931 * program header table pointed to by @ehdr_ptr to real size of ELF
934 static int __init update_note_header_size_elf32(const Elf32_Ehdr *ehdr_ptr)
937 Elf32_Phdr *phdr_ptr;
938 Elf32_Nhdr *nhdr_ptr;
940 phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1);
941 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
943 u64 offset, max_sz, sz, real_sz = 0;
944 if (phdr_ptr->p_type != PT_NOTE)
946 max_sz = phdr_ptr->p_memsz;
947 offset = phdr_ptr->p_offset;
948 notes_section = kmalloc(max_sz, GFP_KERNEL);
951 rc = elfcorehdr_read_notes(notes_section, max_sz, &offset);
953 kfree(notes_section);
956 nhdr_ptr = notes_section;
957 while (nhdr_ptr->n_namesz != 0) {
958 sz = sizeof(Elf32_Nhdr) +
959 (((u64)nhdr_ptr->n_namesz + 3) & ~3) +
960 (((u64)nhdr_ptr->n_descsz + 3) & ~3);
961 if ((real_sz + sz) > max_sz) {
962 pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n",
963 nhdr_ptr->n_namesz, nhdr_ptr->n_descsz);
967 nhdr_ptr = (Elf32_Nhdr*)((char*)nhdr_ptr + sz);
969 kfree(notes_section);
970 phdr_ptr->p_memsz = real_sz;
972 pr_warn("Warning: Zero PT_NOTE entries found\n");
980 * get_note_number_and_size_elf32 - get the number of PT_NOTE program
981 * headers and sum of real size of their ELF note segment headers and
984 * @ehdr_ptr: ELF header
985 * @nr_ptnote: buffer for the number of PT_NOTE program headers
986 * @sz_ptnote: buffer for size of unique PT_NOTE program header
988 * This function is used to merge multiple PT_NOTE program headers
989 * into a unique single one. The resulting unique entry will have
990 * @sz_ptnote in its phdr->p_mem.
992 * It is assumed that program headers with PT_NOTE type pointed to by
993 * @ehdr_ptr has already been updated by update_note_header_size_elf32
994 * and each of PT_NOTE program headers has actual ELF note segment
995 * size in its p_memsz member.
997 static int __init get_note_number_and_size_elf32(const Elf32_Ehdr *ehdr_ptr,
998 int *nr_ptnote, u64 *sz_ptnote)
1001 Elf32_Phdr *phdr_ptr;
1003 *nr_ptnote = *sz_ptnote = 0;
1005 phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1);
1006 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
1007 if (phdr_ptr->p_type != PT_NOTE)
1010 *sz_ptnote += phdr_ptr->p_memsz;
1017 * copy_notes_elf32 - copy ELF note segments in a given buffer
1019 * @ehdr_ptr: ELF header
1020 * @notes_buf: buffer into which ELF note segments are copied
1022 * This function is used to copy ELF note segment in the 1st kernel
1023 * into the buffer @notes_buf in the 2nd kernel. It is assumed that
1024 * size of the buffer @notes_buf is equal to or larger than sum of the
1025 * real ELF note segment headers and data.
1027 * It is assumed that program headers with PT_NOTE type pointed to by
1028 * @ehdr_ptr has already been updated by update_note_header_size_elf32
1029 * and each of PT_NOTE program headers has actual ELF note segment
1030 * size in its p_memsz member.
1032 static int __init copy_notes_elf32(const Elf32_Ehdr *ehdr_ptr, char *notes_buf)
1035 Elf32_Phdr *phdr_ptr;
1037 phdr_ptr = (Elf32_Phdr*)(ehdr_ptr + 1);
1039 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
1041 if (phdr_ptr->p_type != PT_NOTE)
1043 offset = phdr_ptr->p_offset;
1044 rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz,
1048 notes_buf += phdr_ptr->p_memsz;
1054 /* Merges all the PT_NOTE headers into one. */
1055 static int __init merge_note_headers_elf32(char *elfptr, size_t *elfsz,
1056 char **notes_buf, size_t *notes_sz)
1058 int i, nr_ptnote=0, rc=0;
1060 Elf32_Ehdr *ehdr_ptr;
1062 u64 phdr_sz = 0, note_off;
1064 ehdr_ptr = (Elf32_Ehdr *)elfptr;
1066 rc = update_note_header_size_elf32(ehdr_ptr);
1070 rc = get_note_number_and_size_elf32(ehdr_ptr, &nr_ptnote, &phdr_sz);
1074 *notes_sz = roundup(phdr_sz, PAGE_SIZE);
1075 *notes_buf = vmcore_alloc_buf(*notes_sz);
1079 rc = copy_notes_elf32(ehdr_ptr, *notes_buf);
1083 /* Prepare merged PT_NOTE program header. */
1084 phdr.p_type = PT_NOTE;
1086 note_off = sizeof(Elf32_Ehdr) +
1087 (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf32_Phdr);
1088 phdr.p_offset = roundup(note_off, PAGE_SIZE);
1089 phdr.p_vaddr = phdr.p_paddr = 0;
1090 phdr.p_filesz = phdr.p_memsz = phdr_sz;
1093 /* Add merged PT_NOTE program header*/
1094 tmp = elfptr + sizeof(Elf32_Ehdr);
1095 memcpy(tmp, &phdr, sizeof(phdr));
1096 tmp += sizeof(phdr);
1098 /* Remove unwanted PT_NOTE program headers. */
1099 i = (nr_ptnote - 1) * sizeof(Elf32_Phdr);
1100 *elfsz = *elfsz - i;
1101 memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf32_Ehdr)-sizeof(Elf32_Phdr)));
1102 memset(elfptr + *elfsz, 0, i);
1103 *elfsz = roundup(*elfsz, PAGE_SIZE);
1105 /* Modify e_phnum to reflect merged headers. */
1106 ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
1108 /* Store the size of all notes. We need this to update the note
1109 * header when the device dumps will be added.
1111 elfnotes_orig_sz = phdr.p_memsz;
1116 /* Add memory chunks represented by program headers to vmcore list. Also update
1117 * the new offset fields of exported program headers. */
1118 static int __init process_ptload_program_headers_elf64(char *elfptr,
1121 struct list_head *vc_list)
1124 Elf64_Ehdr *ehdr_ptr;
1125 Elf64_Phdr *phdr_ptr;
1129 ehdr_ptr = (Elf64_Ehdr *)elfptr;
1130 phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr)); /* PT_NOTE hdr */
1132 /* Skip Elf header, program headers and Elf note segment. */
1133 vmcore_off = elfsz + elfnotes_sz;
1135 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
1136 u64 paddr, start, end, size;
1138 if (phdr_ptr->p_type != PT_LOAD)
1141 paddr = phdr_ptr->p_offset;
1142 start = rounddown(paddr, PAGE_SIZE);
1143 end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE);
1146 /* Add this contiguous chunk of memory to vmcore list.*/
1147 new = get_new_element();
1152 list_add_tail(&new->list, vc_list);
1154 /* Update the program header offset. */
1155 phdr_ptr->p_offset = vmcore_off + (paddr - start);
1156 vmcore_off = vmcore_off + size;
1161 static int __init process_ptload_program_headers_elf32(char *elfptr,
1164 struct list_head *vc_list)
1167 Elf32_Ehdr *ehdr_ptr;
1168 Elf32_Phdr *phdr_ptr;
1172 ehdr_ptr = (Elf32_Ehdr *)elfptr;
1173 phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr)); /* PT_NOTE hdr */
1175 /* Skip Elf header, program headers and Elf note segment. */
1176 vmcore_off = elfsz + elfnotes_sz;
1178 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
1179 u64 paddr, start, end, size;
1181 if (phdr_ptr->p_type != PT_LOAD)
1184 paddr = phdr_ptr->p_offset;
1185 start = rounddown(paddr, PAGE_SIZE);
1186 end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE);
1189 /* Add this contiguous chunk of memory to vmcore list.*/
1190 new = get_new_element();
1195 list_add_tail(&new->list, vc_list);
1197 /* Update the program header offset */
1198 phdr_ptr->p_offset = vmcore_off + (paddr - start);
1199 vmcore_off = vmcore_off + size;
1204 /* Sets offset fields of vmcore elements. */
1205 static void set_vmcore_list_offsets(size_t elfsz, size_t elfnotes_sz,
1206 struct list_head *vc_list)
1211 /* Skip Elf header, program headers and Elf note segment. */
1212 vmcore_off = elfsz + elfnotes_sz;
1214 list_for_each_entry(m, vc_list, list) {
1215 m->offset = vmcore_off;
1216 vmcore_off += m->size;
1220 static void free_elfcorebuf(void)
1222 free_pages((unsigned long)elfcorebuf, get_order(elfcorebuf_sz_orig));
1224 vfree(elfnotes_buf);
1225 elfnotes_buf = NULL;
1228 static int __init parse_crash_elf64_headers(void)
1234 addr = elfcorehdr_addr;
1236 /* Read Elf header */
1237 rc = elfcorehdr_read((char *)&ehdr, sizeof(Elf64_Ehdr), &addr);
1241 /* Do some basic Verification. */
1242 if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
1243 (ehdr.e_type != ET_CORE) ||
1244 !vmcore_elf64_check_arch(&ehdr) ||
1245 ehdr.e_ident[EI_CLASS] != ELFCLASS64 ||
1246 ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
1247 ehdr.e_version != EV_CURRENT ||
1248 ehdr.e_ehsize != sizeof(Elf64_Ehdr) ||
1249 ehdr.e_phentsize != sizeof(Elf64_Phdr) ||
1250 ehdr.e_phnum == 0) {
1251 pr_warn("Warning: Core image elf header is not sane\n");
1255 /* Read in all elf headers. */
1256 elfcorebuf_sz_orig = sizeof(Elf64_Ehdr) +
1257 ehdr.e_phnum * sizeof(Elf64_Phdr);
1258 elfcorebuf_sz = elfcorebuf_sz_orig;
1259 elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1260 get_order(elfcorebuf_sz_orig));
1263 addr = elfcorehdr_addr;
1264 rc = elfcorehdr_read(elfcorebuf, elfcorebuf_sz_orig, &addr);
1268 /* Merge all PT_NOTE headers into one. */
1269 rc = merge_note_headers_elf64(elfcorebuf, &elfcorebuf_sz,
1270 &elfnotes_buf, &elfnotes_sz);
1273 rc = process_ptload_program_headers_elf64(elfcorebuf, elfcorebuf_sz,
1274 elfnotes_sz, &vmcore_list);
1277 set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
1284 static int __init parse_crash_elf32_headers(void)
1290 addr = elfcorehdr_addr;
1292 /* Read Elf header */
1293 rc = elfcorehdr_read((char *)&ehdr, sizeof(Elf32_Ehdr), &addr);
1297 /* Do some basic Verification. */
1298 if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
1299 (ehdr.e_type != ET_CORE) ||
1300 !vmcore_elf32_check_arch(&ehdr) ||
1301 ehdr.e_ident[EI_CLASS] != ELFCLASS32||
1302 ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
1303 ehdr.e_version != EV_CURRENT ||
1304 ehdr.e_ehsize != sizeof(Elf32_Ehdr) ||
1305 ehdr.e_phentsize != sizeof(Elf32_Phdr) ||
1306 ehdr.e_phnum == 0) {
1307 pr_warn("Warning: Core image elf header is not sane\n");
1311 /* Read in all elf headers. */
1312 elfcorebuf_sz_orig = sizeof(Elf32_Ehdr) + ehdr.e_phnum * sizeof(Elf32_Phdr);
1313 elfcorebuf_sz = elfcorebuf_sz_orig;
1314 elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1315 get_order(elfcorebuf_sz_orig));
1318 addr = elfcorehdr_addr;
1319 rc = elfcorehdr_read(elfcorebuf, elfcorebuf_sz_orig, &addr);
1323 /* Merge all PT_NOTE headers into one. */
1324 rc = merge_note_headers_elf32(elfcorebuf, &elfcorebuf_sz,
1325 &elfnotes_buf, &elfnotes_sz);
1328 rc = process_ptload_program_headers_elf32(elfcorebuf, elfcorebuf_sz,
1329 elfnotes_sz, &vmcore_list);
1332 set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
1339 static int __init parse_crash_elf_headers(void)
1341 unsigned char e_ident[EI_NIDENT];
1345 addr = elfcorehdr_addr;
1346 rc = elfcorehdr_read(e_ident, EI_NIDENT, &addr);
1349 if (memcmp(e_ident, ELFMAG, SELFMAG) != 0) {
1350 pr_warn("Warning: Core image elf header not found\n");
1354 if (e_ident[EI_CLASS] == ELFCLASS64) {
1355 rc = parse_crash_elf64_headers();
1358 } else if (e_ident[EI_CLASS] == ELFCLASS32) {
1359 rc = parse_crash_elf32_headers();
1363 pr_warn("Warning: Core image elf header is not sane\n");
1367 /* Determine vmcore size. */
1368 vmcore_size = get_vmcore_size(elfcorebuf_sz, elfnotes_sz,
1374 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
1376 * vmcoredd_write_header - Write vmcore device dump header at the
1377 * beginning of the dump's buffer.
1378 * @buf: Output buffer where the note is written
1380 * @size: Size of the dump
1382 * Fills beginning of the dump's buffer with vmcore device dump header.
1384 static void vmcoredd_write_header(void *buf, struct vmcoredd_data *data,
1387 struct vmcoredd_header *vdd_hdr = (struct vmcoredd_header *)buf;
1389 vdd_hdr->n_namesz = sizeof(vdd_hdr->name);
1390 vdd_hdr->n_descsz = size + sizeof(vdd_hdr->dump_name);
1391 vdd_hdr->n_type = NT_VMCOREDD;
1393 strncpy((char *)vdd_hdr->name, VMCOREDD_NOTE_NAME,
1394 sizeof(vdd_hdr->name));
1395 memcpy(vdd_hdr->dump_name, data->dump_name, sizeof(vdd_hdr->dump_name));
1399 * vmcoredd_update_program_headers - Update all Elf program headers
1400 * @elfptr: Pointer to elf header
1401 * @elfnotesz: Size of elf notes aligned to page size
1402 * @vmcoreddsz: Size of device dumps to be added to elf note header
1404 * Determine type of Elf header (Elf64 or Elf32) and update the elf note size.
1405 * Also update the offsets of all the program headers after the elf note header.
1407 static void vmcoredd_update_program_headers(char *elfptr, size_t elfnotesz,
1410 unsigned char *e_ident = (unsigned char *)elfptr;
1411 u64 start, end, size;
1415 vmcore_off = elfcorebuf_sz + elfnotesz;
1417 if (e_ident[EI_CLASS] == ELFCLASS64) {
1418 Elf64_Ehdr *ehdr = (Elf64_Ehdr *)elfptr;
1419 Elf64_Phdr *phdr = (Elf64_Phdr *)(elfptr + sizeof(Elf64_Ehdr));
1421 /* Update all program headers */
1422 for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
1423 if (phdr->p_type == PT_NOTE) {
1424 /* Update note size */
1425 phdr->p_memsz = elfnotes_orig_sz + vmcoreddsz;
1426 phdr->p_filesz = phdr->p_memsz;
1430 start = rounddown(phdr->p_offset, PAGE_SIZE);
1431 end = roundup(phdr->p_offset + phdr->p_memsz,
1434 phdr->p_offset = vmcore_off + (phdr->p_offset - start);
1438 Elf32_Ehdr *ehdr = (Elf32_Ehdr *)elfptr;
1439 Elf32_Phdr *phdr = (Elf32_Phdr *)(elfptr + sizeof(Elf32_Ehdr));
1441 /* Update all program headers */
1442 for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
1443 if (phdr->p_type == PT_NOTE) {
1444 /* Update note size */
1445 phdr->p_memsz = elfnotes_orig_sz + vmcoreddsz;
1446 phdr->p_filesz = phdr->p_memsz;
1450 start = rounddown(phdr->p_offset, PAGE_SIZE);
1451 end = roundup(phdr->p_offset + phdr->p_memsz,
1454 phdr->p_offset = vmcore_off + (phdr->p_offset - start);
1461 * vmcoredd_update_size - Update the total size of the device dumps and update
1463 * @dump_size: Size of the current device dump to be added to total size
1465 * Update the total size of all the device dumps and update the Elf program
1466 * headers. Calculate the new offsets for the vmcore list and update the
1467 * total vmcore size.
1469 static void vmcoredd_update_size(size_t dump_size)
1471 vmcoredd_orig_sz += dump_size;
1472 elfnotes_sz = roundup(elfnotes_orig_sz, PAGE_SIZE) + vmcoredd_orig_sz;
1473 vmcoredd_update_program_headers(elfcorebuf, elfnotes_sz,
1476 /* Update vmcore list offsets */
1477 set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
1479 vmcore_size = get_vmcore_size(elfcorebuf_sz, elfnotes_sz,
1481 proc_vmcore->size = vmcore_size;
1485 * vmcore_add_device_dump - Add a buffer containing device dump to vmcore
1488 * Allocate a buffer and invoke the calling driver's dump collect routine.
1489 * Write Elf note at the beginning of the buffer to indicate vmcore device
1490 * dump and add the dump to global list.
1492 int vmcore_add_device_dump(struct vmcoredd_data *data)
1494 struct vmcoredd_node *dump;
1499 if (vmcoredd_disabled) {
1500 pr_err_once("Device dump is disabled\n");
1504 if (!data || !strlen(data->dump_name) ||
1505 !data->vmcoredd_callback || !data->size)
1508 dump = vzalloc(sizeof(*dump));
1514 /* Keep size of the buffer page aligned so that it can be mmaped */
1515 data_size = roundup(sizeof(struct vmcoredd_header) + data->size,
1518 /* Allocate buffer for driver's to write their dumps */
1519 buf = vmcore_alloc_buf(data_size);
1525 vmcoredd_write_header(buf, data, data_size -
1526 sizeof(struct vmcoredd_header));
1528 /* Invoke the driver's dump collection routing */
1529 ret = data->vmcoredd_callback(data, buf +
1530 sizeof(struct vmcoredd_header));
1535 dump->size = data_size;
1537 /* Add the dump to driver sysfs list */
1538 mutex_lock(&vmcoredd_mutex);
1539 list_add_tail(&dump->list, &vmcoredd_list);
1540 mutex_unlock(&vmcoredd_mutex);
1542 vmcoredd_update_size(data_size);
1551 EXPORT_SYMBOL(vmcore_add_device_dump);
1552 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
1554 /* Free all dumps in vmcore device dump list */
1555 static void vmcore_free_device_dumps(void)
1557 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
1558 mutex_lock(&vmcoredd_mutex);
1559 while (!list_empty(&vmcoredd_list)) {
1560 struct vmcoredd_node *dump;
1562 dump = list_first_entry(&vmcoredd_list, struct vmcoredd_node,
1564 list_del(&dump->list);
1568 mutex_unlock(&vmcoredd_mutex);
1569 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
1572 /* Init function for vmcore module. */
1573 static int __init vmcore_init(void)
1577 /* Allow architectures to allocate ELF header in 2nd kernel */
1578 rc = elfcorehdr_alloc(&elfcorehdr_addr, &elfcorehdr_size);
1582 * If elfcorehdr= has been passed in cmdline or created in 2nd kernel,
1583 * then capture the dump.
1585 if (!(is_vmcore_usable()))
1587 rc = parse_crash_elf_headers();
1589 pr_warn("Kdump: vmcore not initialized\n");
1592 elfcorehdr_free(elfcorehdr_addr);
1593 elfcorehdr_addr = ELFCORE_ADDR_ERR;
1595 proc_vmcore = proc_create("vmcore", S_IRUSR, NULL, &vmcore_proc_ops);
1597 proc_vmcore->size = vmcore_size;
1600 fs_initcall(vmcore_init);
1602 /* Cleanup function for vmcore module. */
1603 void vmcore_cleanup(void)
1606 proc_remove(proc_vmcore);
1610 /* clear the vmcore list. */
1611 while (!list_empty(&vmcore_list)) {
1614 m = list_first_entry(&vmcore_list, struct vmcore, list);
1620 /* clear vmcore device dump list */
1621 vmcore_free_device_dumps();