1 // SPDX-License-Identifier: GPL-2.0-only
3 * fs/proc/vmcore.c Interface for accessing the crash
4 * dump from the system's previous life.
5 * Heavily borrowed from fs/proc/kcore.c
7 * Copyright (C) IBM Corporation, 2004. All rights reserved
12 #include <linux/kcore.h>
13 #include <linux/user.h>
14 #include <linux/elf.h>
15 #include <linux/elfcore.h>
16 #include <linux/export.h>
17 #include <linux/slab.h>
18 #include <linux/highmem.h>
19 #include <linux/printk.h>
20 #include <linux/memblock.h>
21 #include <linux/init.h>
22 #include <linux/crash_dump.h>
23 #include <linux/list.h>
24 #include <linux/moduleparam.h>
25 #include <linux/mutex.h>
26 #include <linux/vmalloc.h>
27 #include <linux/pagemap.h>
28 #include <linux/uio.h>
29 #include <linux/cc_platform.h>
33 /* List representing chunks of contiguous memory areas and their offsets in
36 static LIST_HEAD(vmcore_list);
38 /* Stores the pointer to the buffer containing kernel elf core headers. */
39 static char *elfcorebuf;
40 static size_t elfcorebuf_sz;
41 static size_t elfcorebuf_sz_orig;
43 static char *elfnotes_buf;
44 static size_t elfnotes_sz;
45 /* Size of all notes minus the device dump notes */
46 static size_t elfnotes_orig_sz;
48 /* Total size of vmcore file. */
49 static u64 vmcore_size;
51 static struct proc_dir_entry *proc_vmcore;
53 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
54 /* Device Dump list and mutex to synchronize access to list */
55 static LIST_HEAD(vmcoredd_list);
56 static DEFINE_MUTEX(vmcoredd_mutex);
58 static bool vmcoredd_disabled;
59 core_param(novmcoredd, vmcoredd_disabled, bool, 0);
60 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
62 /* Device Dump Size */
63 static size_t vmcoredd_orig_sz;
65 static DEFINE_SPINLOCK(vmcore_cb_lock);
66 DEFINE_STATIC_SRCU(vmcore_cb_srcu);
67 /* List of registered vmcore callbacks. */
68 static LIST_HEAD(vmcore_cb_list);
69 /* Whether the vmcore has been opened once. */
70 static bool vmcore_opened;
72 void register_vmcore_cb(struct vmcore_cb *cb)
74 INIT_LIST_HEAD(&cb->next);
75 spin_lock(&vmcore_cb_lock);
76 list_add_tail(&cb->next, &vmcore_cb_list);
78 * Registering a vmcore callback after the vmcore was opened is
79 * very unusual (e.g., manual driver loading).
82 pr_warn_once("Unexpected vmcore callback registration\n");
83 spin_unlock(&vmcore_cb_lock);
85 EXPORT_SYMBOL_GPL(register_vmcore_cb);
87 void unregister_vmcore_cb(struct vmcore_cb *cb)
89 spin_lock(&vmcore_cb_lock);
90 list_del_rcu(&cb->next);
92 * Unregistering a vmcore callback after the vmcore was opened is
93 * very unusual (e.g., forced driver removal), but we cannot stop
97 pr_warn_once("Unexpected vmcore callback unregistration\n");
98 spin_unlock(&vmcore_cb_lock);
100 synchronize_srcu(&vmcore_cb_srcu);
102 EXPORT_SYMBOL_GPL(unregister_vmcore_cb);
104 static bool pfn_is_ram(unsigned long pfn)
106 struct vmcore_cb *cb;
109 list_for_each_entry_srcu(cb, &vmcore_cb_list, next,
110 srcu_read_lock_held(&vmcore_cb_srcu)) {
111 if (unlikely(!cb->pfn_is_ram))
113 ret = cb->pfn_is_ram(cb, pfn);
121 static int open_vmcore(struct inode *inode, struct file *file)
123 spin_lock(&vmcore_cb_lock);
124 vmcore_opened = true;
125 spin_unlock(&vmcore_cb_lock);
130 /* Reads a page from the oldmem device from given offset. */
131 ssize_t read_from_oldmem(struct iov_iter *iter, size_t count,
132 u64 *ppos, bool encrypted)
134 unsigned long pfn, offset;
136 ssize_t read = 0, tmp;
142 offset = (unsigned long)(*ppos % PAGE_SIZE);
143 pfn = (unsigned long)(*ppos / PAGE_SIZE);
145 idx = srcu_read_lock(&vmcore_cb_srcu);
147 if (count > (PAGE_SIZE - offset))
148 nr_bytes = PAGE_SIZE - offset;
152 /* If pfn is not ram, return zeros for sparse dump files */
153 if (!pfn_is_ram(pfn)) {
154 tmp = iov_iter_zero(nr_bytes, iter);
157 tmp = copy_oldmem_page_encrypted(iter, pfn,
161 tmp = copy_oldmem_page(iter, pfn, nr_bytes,
164 if (tmp < nr_bytes) {
165 srcu_read_unlock(&vmcore_cb_srcu, idx);
175 srcu_read_unlock(&vmcore_cb_srcu, idx);
181 * Architectures may override this function to allocate ELF header in 2nd kernel
183 int __weak elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
189 * Architectures may override this function to free header
191 void __weak elfcorehdr_free(unsigned long long addr)
195 * Architectures may override this function to read from ELF header
197 ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos)
199 struct kvec kvec = { .iov_base = buf, .iov_len = count };
200 struct iov_iter iter;
202 iov_iter_kvec(&iter, ITER_DEST, &kvec, 1, count);
204 return read_from_oldmem(&iter, count, ppos, false);
208 * Architectures may override this function to read from notes sections
210 ssize_t __weak elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos)
212 struct kvec kvec = { .iov_base = buf, .iov_len = count };
213 struct iov_iter iter;
215 iov_iter_kvec(&iter, ITER_DEST, &kvec, 1, count);
217 return read_from_oldmem(&iter, count, ppos,
218 cc_platform_has(CC_ATTR_MEM_ENCRYPT));
222 * Architectures may override this function to map oldmem
224 int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
225 unsigned long from, unsigned long pfn,
226 unsigned long size, pgprot_t prot)
228 prot = pgprot_encrypted(prot);
229 return remap_pfn_range(vma, from, pfn, size, prot);
233 * Architectures which support memory encryption override this.
235 ssize_t __weak copy_oldmem_page_encrypted(struct iov_iter *iter,
236 unsigned long pfn, size_t csize, unsigned long offset)
238 return copy_oldmem_page(iter, pfn, csize, offset);
241 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
242 static int vmcoredd_copy_dumps(struct iov_iter *iter, u64 start, size_t size)
244 struct vmcoredd_node *dump;
250 mutex_lock(&vmcoredd_mutex);
251 list_for_each_entry(dump, &vmcoredd_list, list) {
252 if (start < offset + dump->size) {
253 tsz = min(offset + (u64)dump->size - start, (u64)size);
254 buf = dump->buf + start - offset;
255 if (copy_to_iter(buf, tsz, iter) < tsz) {
263 /* Leave now if buffer filled already */
267 offset += dump->size;
271 mutex_unlock(&vmcoredd_mutex);
276 static int vmcoredd_mmap_dumps(struct vm_area_struct *vma, unsigned long dst,
277 u64 start, size_t size)
279 struct vmcoredd_node *dump;
285 mutex_lock(&vmcoredd_mutex);
286 list_for_each_entry(dump, &vmcoredd_list, list) {
287 if (start < offset + dump->size) {
288 tsz = min(offset + (u64)dump->size - start, (u64)size);
289 buf = dump->buf + start - offset;
290 if (remap_vmalloc_range_partial(vma, dst, buf, 0,
300 /* Leave now if buffer filled already */
304 offset += dump->size;
308 mutex_unlock(&vmcoredd_mutex);
311 #endif /* CONFIG_MMU */
312 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
314 /* Read from the ELF header and then the crash dump. On error, negative value is
315 * returned otherwise number of bytes read are returned.
317 static ssize_t __read_vmcore(struct iov_iter *iter, loff_t *fpos)
319 ssize_t acc = 0, tmp;
322 struct vmcore *m = NULL;
324 if (!iov_iter_count(iter) || *fpos >= vmcore_size)
327 iov_iter_truncate(iter, vmcore_size - *fpos);
329 /* Read ELF core header */
330 if (*fpos < elfcorebuf_sz) {
331 tsz = min(elfcorebuf_sz - (size_t)*fpos, iov_iter_count(iter));
332 if (copy_to_iter(elfcorebuf + *fpos, tsz, iter) < tsz)
337 /* leave now if filled buffer already */
338 if (!iov_iter_count(iter))
342 /* Read ELF note segment */
343 if (*fpos < elfcorebuf_sz + elfnotes_sz) {
346 /* We add device dumps before other elf notes because the
347 * other elf notes may not fill the elf notes buffer
348 * completely and we will end up with zero-filled data
349 * between the elf notes and the device dumps. Tools will
350 * then try to decode this zero-filled data as valid notes
351 * and we don't want that. Hence, adding device dumps before
352 * the other elf notes ensure that zero-filled data can be
355 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
356 /* Read device dumps */
357 if (*fpos < elfcorebuf_sz + vmcoredd_orig_sz) {
358 tsz = min(elfcorebuf_sz + vmcoredd_orig_sz -
359 (size_t)*fpos, iov_iter_count(iter));
360 start = *fpos - elfcorebuf_sz;
361 if (vmcoredd_copy_dumps(iter, start, tsz))
367 /* leave now if filled buffer already */
368 if (!iov_iter_count(iter))
371 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
373 /* Read remaining elf notes */
374 tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)*fpos,
375 iov_iter_count(iter));
376 kaddr = elfnotes_buf + *fpos - elfcorebuf_sz - vmcoredd_orig_sz;
377 if (copy_to_iter(kaddr, tsz, iter) < tsz)
383 /* leave now if filled buffer already */
384 if (!iov_iter_count(iter))
390 list_for_each_entry(m, &vmcore_list, list) {
391 if (*fpos < m->offset + m->size) {
392 tsz = (size_t)min_t(unsigned long long,
393 m->offset + m->size - *fpos,
394 iov_iter_count(iter));
395 start = m->paddr + *fpos - m->offset;
396 tmp = read_from_oldmem(iter, tsz, &start,
397 cc_platform_has(CC_ATTR_MEM_ENCRYPT));
403 /* leave now if filled buffer already */
404 if (!iov_iter_count(iter))
412 static ssize_t read_vmcore(struct kiocb *iocb, struct iov_iter *iter)
414 return __read_vmcore(iter, &iocb->ki_pos);
418 * The vmcore fault handler uses the page cache and fills data using the
419 * standard __read_vmcore() function.
421 * On s390 the fault handler is used for memory regions that can't be mapped
422 * directly with remap_pfn_range().
424 static vm_fault_t mmap_vmcore_fault(struct vm_fault *vmf)
427 struct address_space *mapping = vmf->vma->vm_file->f_mapping;
428 pgoff_t index = vmf->pgoff;
429 struct iov_iter iter;
435 page = find_or_create_page(mapping, index, GFP_KERNEL);
438 if (!PageUptodate(page)) {
439 offset = (loff_t) index << PAGE_SHIFT;
440 kvec.iov_base = page_address(page);
441 kvec.iov_len = PAGE_SIZE;
442 iov_iter_kvec(&iter, ITER_DEST, &kvec, 1, PAGE_SIZE);
444 rc = __read_vmcore(&iter, &offset);
448 return vmf_error(rc);
450 SetPageUptodate(page);
456 return VM_FAULT_SIGBUS;
460 static const struct vm_operations_struct vmcore_mmap_ops = {
461 .fault = mmap_vmcore_fault,
465 * vmcore_alloc_buf - allocate buffer in vmalloc memory
466 * @size: size of buffer
468 * If CONFIG_MMU is defined, use vmalloc_user() to allow users to mmap
469 * the buffer to user-space by means of remap_vmalloc_range().
471 * If CONFIG_MMU is not defined, use vzalloc() since mmap_vmcore() is
472 * disabled and there's no need to allow users to mmap the buffer.
474 static inline char *vmcore_alloc_buf(size_t size)
477 return vmalloc_user(size);
479 return vzalloc(size);
484 * Disable mmap_vmcore() if CONFIG_MMU is not defined. MMU is
485 * essential for mmap_vmcore() in order to map physically
486 * non-contiguous objects (ELF header, ELF note segment and memory
487 * regions in the 1st kernel pointed to by PT_LOAD entries) into
488 * virtually contiguous user-space in ELF layout.
492 * remap_oldmem_pfn_checked - do remap_oldmem_pfn_range replacing all pages
493 * reported as not being ram with the zero page.
495 * @vma: vm_area_struct describing requested mapping
496 * @from: start remapping from
497 * @pfn: page frame number to start remapping to
498 * @size: remapping size
499 * @prot: protection bits
501 * Returns zero on success, -EAGAIN on failure.
503 static int remap_oldmem_pfn_checked(struct vm_area_struct *vma,
504 unsigned long from, unsigned long pfn,
505 unsigned long size, pgprot_t prot)
507 unsigned long map_size;
508 unsigned long pos_start, pos_end, pos;
509 unsigned long zeropage_pfn = my_zero_pfn(0);
513 pos_end = pfn + (size >> PAGE_SHIFT);
515 for (pos = pos_start; pos < pos_end; ++pos) {
516 if (!pfn_is_ram(pos)) {
518 * We hit a page which is not ram. Remap the continuous
519 * region between pos_start and pos-1 and replace
520 * the non-ram page at pos with the zero page.
522 if (pos > pos_start) {
523 /* Remap continuous region */
524 map_size = (pos - pos_start) << PAGE_SHIFT;
525 if (remap_oldmem_pfn_range(vma, from + len,
531 /* Remap the zero page */
532 if (remap_oldmem_pfn_range(vma, from + len,
540 if (pos > pos_start) {
542 map_size = (pos - pos_start) << PAGE_SHIFT;
543 if (remap_oldmem_pfn_range(vma, from + len, pos_start,
549 do_munmap(vma->vm_mm, from, len, NULL);
553 static int vmcore_remap_oldmem_pfn(struct vm_area_struct *vma,
554 unsigned long from, unsigned long pfn,
555 unsigned long size, pgprot_t prot)
560 * Check if a callback was registered to avoid looping over all
561 * pages without a reason.
563 idx = srcu_read_lock(&vmcore_cb_srcu);
564 if (!list_empty(&vmcore_cb_list))
565 ret = remap_oldmem_pfn_checked(vma, from, pfn, size, prot);
567 ret = remap_oldmem_pfn_range(vma, from, pfn, size, prot);
568 srcu_read_unlock(&vmcore_cb_srcu, idx);
572 static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
574 size_t size = vma->vm_end - vma->vm_start;
575 u64 start, end, len, tsz;
578 start = (u64)vma->vm_pgoff << PAGE_SHIFT;
581 if (size > vmcore_size || end > vmcore_size)
584 if (vma->vm_flags & (VM_WRITE | VM_EXEC))
587 vm_flags_mod(vma, VM_MIXEDMAP, VM_MAYWRITE | VM_MAYEXEC);
588 vma->vm_ops = &vmcore_mmap_ops;
592 if (start < elfcorebuf_sz) {
595 tsz = min(elfcorebuf_sz - (size_t)start, size);
596 pfn = __pa(elfcorebuf + start) >> PAGE_SHIFT;
597 if (remap_pfn_range(vma, vma->vm_start, pfn, tsz,
608 if (start < elfcorebuf_sz + elfnotes_sz) {
611 /* We add device dumps before other elf notes because the
612 * other elf notes may not fill the elf notes buffer
613 * completely and we will end up with zero-filled data
614 * between the elf notes and the device dumps. Tools will
615 * then try to decode this zero-filled data as valid notes
616 * and we don't want that. Hence, adding device dumps before
617 * the other elf notes ensure that zero-filled data can be
618 * avoided. This also ensures that the device dumps and
619 * other elf notes can be properly mmaped at page aligned
622 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
623 /* Read device dumps */
624 if (start < elfcorebuf_sz + vmcoredd_orig_sz) {
627 tsz = min(elfcorebuf_sz + vmcoredd_orig_sz -
628 (size_t)start, size);
629 start_off = start - elfcorebuf_sz;
630 if (vmcoredd_mmap_dumps(vma, vma->vm_start + len,
638 /* leave now if filled buffer already */
642 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
644 /* Read remaining elf notes */
645 tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)start, size);
646 kaddr = elfnotes_buf + start - elfcorebuf_sz - vmcoredd_orig_sz;
647 if (remap_vmalloc_range_partial(vma, vma->vm_start + len,
659 list_for_each_entry(m, &vmcore_list, list) {
660 if (start < m->offset + m->size) {
663 tsz = (size_t)min_t(unsigned long long,
664 m->offset + m->size - start, size);
665 paddr = m->paddr + start - m->offset;
666 if (vmcore_remap_oldmem_pfn(vma, vma->vm_start + len,
667 paddr >> PAGE_SHIFT, tsz,
681 do_munmap(vma->vm_mm, vma->vm_start, len, NULL);
685 static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
691 static const struct proc_ops vmcore_proc_ops = {
692 .proc_open = open_vmcore,
693 .proc_read_iter = read_vmcore,
694 .proc_lseek = default_llseek,
695 .proc_mmap = mmap_vmcore,
698 static struct vmcore* __init get_new_element(void)
700 return kzalloc(sizeof(struct vmcore), GFP_KERNEL);
703 static u64 get_vmcore_size(size_t elfsz, size_t elfnotesegsz,
704 struct list_head *vc_list)
709 size = elfsz + elfnotesegsz;
710 list_for_each_entry(m, vc_list, list) {
717 * update_note_header_size_elf64 - update p_memsz member of each PT_NOTE entry
719 * @ehdr_ptr: ELF header
721 * This function updates p_memsz member of each PT_NOTE entry in the
722 * program header table pointed to by @ehdr_ptr to real size of ELF
725 static int __init update_note_header_size_elf64(const Elf64_Ehdr *ehdr_ptr)
728 Elf64_Phdr *phdr_ptr;
729 Elf64_Nhdr *nhdr_ptr;
731 phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1);
732 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
734 u64 offset, max_sz, sz, real_sz = 0;
735 if (phdr_ptr->p_type != PT_NOTE)
737 max_sz = phdr_ptr->p_memsz;
738 offset = phdr_ptr->p_offset;
739 notes_section = kmalloc(max_sz, GFP_KERNEL);
742 rc = elfcorehdr_read_notes(notes_section, max_sz, &offset);
744 kfree(notes_section);
747 nhdr_ptr = notes_section;
748 while (nhdr_ptr->n_namesz != 0) {
749 sz = sizeof(Elf64_Nhdr) +
750 (((u64)nhdr_ptr->n_namesz + 3) & ~3) +
751 (((u64)nhdr_ptr->n_descsz + 3) & ~3);
752 if ((real_sz + sz) > max_sz) {
753 pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n",
754 nhdr_ptr->n_namesz, nhdr_ptr->n_descsz);
758 nhdr_ptr = (Elf64_Nhdr*)((char*)nhdr_ptr + sz);
760 kfree(notes_section);
761 phdr_ptr->p_memsz = real_sz;
763 pr_warn("Warning: Zero PT_NOTE entries found\n");
771 * get_note_number_and_size_elf64 - get the number of PT_NOTE program
772 * headers and sum of real size of their ELF note segment headers and
775 * @ehdr_ptr: ELF header
776 * @nr_ptnote: buffer for the number of PT_NOTE program headers
777 * @sz_ptnote: buffer for size of unique PT_NOTE program header
779 * This function is used to merge multiple PT_NOTE program headers
780 * into a unique single one. The resulting unique entry will have
781 * @sz_ptnote in its phdr->p_mem.
783 * It is assumed that program headers with PT_NOTE type pointed to by
784 * @ehdr_ptr has already been updated by update_note_header_size_elf64
785 * and each of PT_NOTE program headers has actual ELF note segment
786 * size in its p_memsz member.
788 static int __init get_note_number_and_size_elf64(const Elf64_Ehdr *ehdr_ptr,
789 int *nr_ptnote, u64 *sz_ptnote)
792 Elf64_Phdr *phdr_ptr;
794 *nr_ptnote = *sz_ptnote = 0;
796 phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1);
797 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
798 if (phdr_ptr->p_type != PT_NOTE)
801 *sz_ptnote += phdr_ptr->p_memsz;
808 * copy_notes_elf64 - copy ELF note segments in a given buffer
810 * @ehdr_ptr: ELF header
811 * @notes_buf: buffer into which ELF note segments are copied
813 * This function is used to copy ELF note segment in the 1st kernel
814 * into the buffer @notes_buf in the 2nd kernel. It is assumed that
815 * size of the buffer @notes_buf is equal to or larger than sum of the
816 * real ELF note segment headers and data.
818 * It is assumed that program headers with PT_NOTE type pointed to by
819 * @ehdr_ptr has already been updated by update_note_header_size_elf64
820 * and each of PT_NOTE program headers has actual ELF note segment
821 * size in its p_memsz member.
823 static int __init copy_notes_elf64(const Elf64_Ehdr *ehdr_ptr, char *notes_buf)
826 Elf64_Phdr *phdr_ptr;
828 phdr_ptr = (Elf64_Phdr*)(ehdr_ptr + 1);
830 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
832 if (phdr_ptr->p_type != PT_NOTE)
834 offset = phdr_ptr->p_offset;
835 rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz,
839 notes_buf += phdr_ptr->p_memsz;
845 /* Merges all the PT_NOTE headers into one. */
846 static int __init merge_note_headers_elf64(char *elfptr, size_t *elfsz,
847 char **notes_buf, size_t *notes_sz)
849 int i, nr_ptnote=0, rc=0;
851 Elf64_Ehdr *ehdr_ptr;
853 u64 phdr_sz = 0, note_off;
855 ehdr_ptr = (Elf64_Ehdr *)elfptr;
857 rc = update_note_header_size_elf64(ehdr_ptr);
861 rc = get_note_number_and_size_elf64(ehdr_ptr, &nr_ptnote, &phdr_sz);
865 *notes_sz = roundup(phdr_sz, PAGE_SIZE);
866 *notes_buf = vmcore_alloc_buf(*notes_sz);
870 rc = copy_notes_elf64(ehdr_ptr, *notes_buf);
874 /* Prepare merged PT_NOTE program header. */
875 phdr.p_type = PT_NOTE;
877 note_off = sizeof(Elf64_Ehdr) +
878 (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf64_Phdr);
879 phdr.p_offset = roundup(note_off, PAGE_SIZE);
880 phdr.p_vaddr = phdr.p_paddr = 0;
881 phdr.p_filesz = phdr.p_memsz = phdr_sz;
884 /* Add merged PT_NOTE program header*/
885 tmp = elfptr + sizeof(Elf64_Ehdr);
886 memcpy(tmp, &phdr, sizeof(phdr));
889 /* Remove unwanted PT_NOTE program headers. */
890 i = (nr_ptnote - 1) * sizeof(Elf64_Phdr);
892 memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf64_Ehdr)-sizeof(Elf64_Phdr)));
893 memset(elfptr + *elfsz, 0, i);
894 *elfsz = roundup(*elfsz, PAGE_SIZE);
896 /* Modify e_phnum to reflect merged headers. */
897 ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
899 /* Store the size of all notes. We need this to update the note
900 * header when the device dumps will be added.
902 elfnotes_orig_sz = phdr.p_memsz;
908 * update_note_header_size_elf32 - update p_memsz member of each PT_NOTE entry
910 * @ehdr_ptr: ELF header
912 * This function updates p_memsz member of each PT_NOTE entry in the
913 * program header table pointed to by @ehdr_ptr to real size of ELF
916 static int __init update_note_header_size_elf32(const Elf32_Ehdr *ehdr_ptr)
919 Elf32_Phdr *phdr_ptr;
920 Elf32_Nhdr *nhdr_ptr;
922 phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1);
923 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
925 u64 offset, max_sz, sz, real_sz = 0;
926 if (phdr_ptr->p_type != PT_NOTE)
928 max_sz = phdr_ptr->p_memsz;
929 offset = phdr_ptr->p_offset;
930 notes_section = kmalloc(max_sz, GFP_KERNEL);
933 rc = elfcorehdr_read_notes(notes_section, max_sz, &offset);
935 kfree(notes_section);
938 nhdr_ptr = notes_section;
939 while (nhdr_ptr->n_namesz != 0) {
940 sz = sizeof(Elf32_Nhdr) +
941 (((u64)nhdr_ptr->n_namesz + 3) & ~3) +
942 (((u64)nhdr_ptr->n_descsz + 3) & ~3);
943 if ((real_sz + sz) > max_sz) {
944 pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n",
945 nhdr_ptr->n_namesz, nhdr_ptr->n_descsz);
949 nhdr_ptr = (Elf32_Nhdr*)((char*)nhdr_ptr + sz);
951 kfree(notes_section);
952 phdr_ptr->p_memsz = real_sz;
954 pr_warn("Warning: Zero PT_NOTE entries found\n");
962 * get_note_number_and_size_elf32 - get the number of PT_NOTE program
963 * headers and sum of real size of their ELF note segment headers and
966 * @ehdr_ptr: ELF header
967 * @nr_ptnote: buffer for the number of PT_NOTE program headers
968 * @sz_ptnote: buffer for size of unique PT_NOTE program header
970 * This function is used to merge multiple PT_NOTE program headers
971 * into a unique single one. The resulting unique entry will have
972 * @sz_ptnote in its phdr->p_mem.
974 * It is assumed that program headers with PT_NOTE type pointed to by
975 * @ehdr_ptr has already been updated by update_note_header_size_elf32
976 * and each of PT_NOTE program headers has actual ELF note segment
977 * size in its p_memsz member.
979 static int __init get_note_number_and_size_elf32(const Elf32_Ehdr *ehdr_ptr,
980 int *nr_ptnote, u64 *sz_ptnote)
983 Elf32_Phdr *phdr_ptr;
985 *nr_ptnote = *sz_ptnote = 0;
987 phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1);
988 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
989 if (phdr_ptr->p_type != PT_NOTE)
992 *sz_ptnote += phdr_ptr->p_memsz;
999 * copy_notes_elf32 - copy ELF note segments in a given buffer
1001 * @ehdr_ptr: ELF header
1002 * @notes_buf: buffer into which ELF note segments are copied
1004 * This function is used to copy ELF note segment in the 1st kernel
1005 * into the buffer @notes_buf in the 2nd kernel. It is assumed that
1006 * size of the buffer @notes_buf is equal to or larger than sum of the
1007 * real ELF note segment headers and data.
1009 * It is assumed that program headers with PT_NOTE type pointed to by
1010 * @ehdr_ptr has already been updated by update_note_header_size_elf32
1011 * and each of PT_NOTE program headers has actual ELF note segment
1012 * size in its p_memsz member.
1014 static int __init copy_notes_elf32(const Elf32_Ehdr *ehdr_ptr, char *notes_buf)
1017 Elf32_Phdr *phdr_ptr;
1019 phdr_ptr = (Elf32_Phdr*)(ehdr_ptr + 1);
1021 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
1023 if (phdr_ptr->p_type != PT_NOTE)
1025 offset = phdr_ptr->p_offset;
1026 rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz,
1030 notes_buf += phdr_ptr->p_memsz;
1036 /* Merges all the PT_NOTE headers into one. */
1037 static int __init merge_note_headers_elf32(char *elfptr, size_t *elfsz,
1038 char **notes_buf, size_t *notes_sz)
1040 int i, nr_ptnote=0, rc=0;
1042 Elf32_Ehdr *ehdr_ptr;
1044 u64 phdr_sz = 0, note_off;
1046 ehdr_ptr = (Elf32_Ehdr *)elfptr;
1048 rc = update_note_header_size_elf32(ehdr_ptr);
1052 rc = get_note_number_and_size_elf32(ehdr_ptr, &nr_ptnote, &phdr_sz);
1056 *notes_sz = roundup(phdr_sz, PAGE_SIZE);
1057 *notes_buf = vmcore_alloc_buf(*notes_sz);
1061 rc = copy_notes_elf32(ehdr_ptr, *notes_buf);
1065 /* Prepare merged PT_NOTE program header. */
1066 phdr.p_type = PT_NOTE;
1068 note_off = sizeof(Elf32_Ehdr) +
1069 (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf32_Phdr);
1070 phdr.p_offset = roundup(note_off, PAGE_SIZE);
1071 phdr.p_vaddr = phdr.p_paddr = 0;
1072 phdr.p_filesz = phdr.p_memsz = phdr_sz;
1075 /* Add merged PT_NOTE program header*/
1076 tmp = elfptr + sizeof(Elf32_Ehdr);
1077 memcpy(tmp, &phdr, sizeof(phdr));
1078 tmp += sizeof(phdr);
1080 /* Remove unwanted PT_NOTE program headers. */
1081 i = (nr_ptnote - 1) * sizeof(Elf32_Phdr);
1082 *elfsz = *elfsz - i;
1083 memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf32_Ehdr)-sizeof(Elf32_Phdr)));
1084 memset(elfptr + *elfsz, 0, i);
1085 *elfsz = roundup(*elfsz, PAGE_SIZE);
1087 /* Modify e_phnum to reflect merged headers. */
1088 ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
1090 /* Store the size of all notes. We need this to update the note
1091 * header when the device dumps will be added.
1093 elfnotes_orig_sz = phdr.p_memsz;
1098 /* Add memory chunks represented by program headers to vmcore list. Also update
1099 * the new offset fields of exported program headers. */
1100 static int __init process_ptload_program_headers_elf64(char *elfptr,
1103 struct list_head *vc_list)
1106 Elf64_Ehdr *ehdr_ptr;
1107 Elf64_Phdr *phdr_ptr;
1111 ehdr_ptr = (Elf64_Ehdr *)elfptr;
1112 phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr)); /* PT_NOTE hdr */
1114 /* Skip ELF header, program headers and ELF note segment. */
1115 vmcore_off = elfsz + elfnotes_sz;
1117 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
1118 u64 paddr, start, end, size;
1120 if (phdr_ptr->p_type != PT_LOAD)
1123 paddr = phdr_ptr->p_offset;
1124 start = rounddown(paddr, PAGE_SIZE);
1125 end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE);
1128 /* Add this contiguous chunk of memory to vmcore list.*/
1129 new = get_new_element();
1134 list_add_tail(&new->list, vc_list);
1136 /* Update the program header offset. */
1137 phdr_ptr->p_offset = vmcore_off + (paddr - start);
1138 vmcore_off = vmcore_off + size;
1143 static int __init process_ptload_program_headers_elf32(char *elfptr,
1146 struct list_head *vc_list)
1149 Elf32_Ehdr *ehdr_ptr;
1150 Elf32_Phdr *phdr_ptr;
1154 ehdr_ptr = (Elf32_Ehdr *)elfptr;
1155 phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr)); /* PT_NOTE hdr */
1157 /* Skip ELF header, program headers and ELF note segment. */
1158 vmcore_off = elfsz + elfnotes_sz;
1160 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
1161 u64 paddr, start, end, size;
1163 if (phdr_ptr->p_type != PT_LOAD)
1166 paddr = phdr_ptr->p_offset;
1167 start = rounddown(paddr, PAGE_SIZE);
1168 end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE);
1171 /* Add this contiguous chunk of memory to vmcore list.*/
1172 new = get_new_element();
1177 list_add_tail(&new->list, vc_list);
1179 /* Update the program header offset */
1180 phdr_ptr->p_offset = vmcore_off + (paddr - start);
1181 vmcore_off = vmcore_off + size;
1186 /* Sets offset fields of vmcore elements. */
1187 static void set_vmcore_list_offsets(size_t elfsz, size_t elfnotes_sz,
1188 struct list_head *vc_list)
1193 /* Skip ELF header, program headers and ELF note segment. */
1194 vmcore_off = elfsz + elfnotes_sz;
1196 list_for_each_entry(m, vc_list, list) {
1197 m->offset = vmcore_off;
1198 vmcore_off += m->size;
1202 static void free_elfcorebuf(void)
1204 free_pages((unsigned long)elfcorebuf, get_order(elfcorebuf_sz_orig));
1206 vfree(elfnotes_buf);
1207 elfnotes_buf = NULL;
1210 static int __init parse_crash_elf64_headers(void)
1216 addr = elfcorehdr_addr;
1218 /* Read ELF header */
1219 rc = elfcorehdr_read((char *)&ehdr, sizeof(Elf64_Ehdr), &addr);
1223 /* Do some basic Verification. */
1224 if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
1225 (ehdr.e_type != ET_CORE) ||
1226 !vmcore_elf64_check_arch(&ehdr) ||
1227 ehdr.e_ident[EI_CLASS] != ELFCLASS64 ||
1228 ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
1229 ehdr.e_version != EV_CURRENT ||
1230 ehdr.e_ehsize != sizeof(Elf64_Ehdr) ||
1231 ehdr.e_phentsize != sizeof(Elf64_Phdr) ||
1232 ehdr.e_phnum == 0) {
1233 pr_warn("Warning: Core image elf header is not sane\n");
1237 /* Read in all elf headers. */
1238 elfcorebuf_sz_orig = sizeof(Elf64_Ehdr) +
1239 ehdr.e_phnum * sizeof(Elf64_Phdr);
1240 elfcorebuf_sz = elfcorebuf_sz_orig;
1241 elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1242 get_order(elfcorebuf_sz_orig));
1245 addr = elfcorehdr_addr;
1246 rc = elfcorehdr_read(elfcorebuf, elfcorebuf_sz_orig, &addr);
1250 /* Merge all PT_NOTE headers into one. */
1251 rc = merge_note_headers_elf64(elfcorebuf, &elfcorebuf_sz,
1252 &elfnotes_buf, &elfnotes_sz);
1255 rc = process_ptload_program_headers_elf64(elfcorebuf, elfcorebuf_sz,
1256 elfnotes_sz, &vmcore_list);
1259 set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
1266 static int __init parse_crash_elf32_headers(void)
1272 addr = elfcorehdr_addr;
1274 /* Read ELF header */
1275 rc = elfcorehdr_read((char *)&ehdr, sizeof(Elf32_Ehdr), &addr);
1279 /* Do some basic Verification. */
1280 if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
1281 (ehdr.e_type != ET_CORE) ||
1282 !vmcore_elf32_check_arch(&ehdr) ||
1283 ehdr.e_ident[EI_CLASS] != ELFCLASS32||
1284 ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
1285 ehdr.e_version != EV_CURRENT ||
1286 ehdr.e_ehsize != sizeof(Elf32_Ehdr) ||
1287 ehdr.e_phentsize != sizeof(Elf32_Phdr) ||
1288 ehdr.e_phnum == 0) {
1289 pr_warn("Warning: Core image elf header is not sane\n");
1293 /* Read in all elf headers. */
1294 elfcorebuf_sz_orig = sizeof(Elf32_Ehdr) + ehdr.e_phnum * sizeof(Elf32_Phdr);
1295 elfcorebuf_sz = elfcorebuf_sz_orig;
1296 elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1297 get_order(elfcorebuf_sz_orig));
1300 addr = elfcorehdr_addr;
1301 rc = elfcorehdr_read(elfcorebuf, elfcorebuf_sz_orig, &addr);
1305 /* Merge all PT_NOTE headers into one. */
1306 rc = merge_note_headers_elf32(elfcorebuf, &elfcorebuf_sz,
1307 &elfnotes_buf, &elfnotes_sz);
1310 rc = process_ptload_program_headers_elf32(elfcorebuf, elfcorebuf_sz,
1311 elfnotes_sz, &vmcore_list);
1314 set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
1321 static int __init parse_crash_elf_headers(void)
1323 unsigned char e_ident[EI_NIDENT];
1327 addr = elfcorehdr_addr;
1328 rc = elfcorehdr_read(e_ident, EI_NIDENT, &addr);
1331 if (memcmp(e_ident, ELFMAG, SELFMAG) != 0) {
1332 pr_warn("Warning: Core image elf header not found\n");
1336 if (e_ident[EI_CLASS] == ELFCLASS64) {
1337 rc = parse_crash_elf64_headers();
1340 } else if (e_ident[EI_CLASS] == ELFCLASS32) {
1341 rc = parse_crash_elf32_headers();
1345 pr_warn("Warning: Core image elf header is not sane\n");
1349 /* Determine vmcore size. */
1350 vmcore_size = get_vmcore_size(elfcorebuf_sz, elfnotes_sz,
1356 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
1358 * vmcoredd_write_header - Write vmcore device dump header at the
1359 * beginning of the dump's buffer.
1360 * @buf: Output buffer where the note is written
1362 * @size: Size of the dump
1364 * Fills beginning of the dump's buffer with vmcore device dump header.
1366 static void vmcoredd_write_header(void *buf, struct vmcoredd_data *data,
1369 struct vmcoredd_header *vdd_hdr = (struct vmcoredd_header *)buf;
1371 vdd_hdr->n_namesz = sizeof(vdd_hdr->name);
1372 vdd_hdr->n_descsz = size + sizeof(vdd_hdr->dump_name);
1373 vdd_hdr->n_type = NT_VMCOREDD;
1375 strscpy_pad(vdd_hdr->name, VMCOREDD_NOTE_NAME);
1376 strscpy_pad(vdd_hdr->dump_name, data->dump_name);
1380 * vmcoredd_update_program_headers - Update all ELF program headers
1381 * @elfptr: Pointer to elf header
1382 * @elfnotesz: Size of elf notes aligned to page size
1383 * @vmcoreddsz: Size of device dumps to be added to elf note header
1385 * Determine type of ELF header (Elf64 or Elf32) and update the elf note size.
1386 * Also update the offsets of all the program headers after the elf note header.
1388 static void vmcoredd_update_program_headers(char *elfptr, size_t elfnotesz,
1391 unsigned char *e_ident = (unsigned char *)elfptr;
1392 u64 start, end, size;
1396 vmcore_off = elfcorebuf_sz + elfnotesz;
1398 if (e_ident[EI_CLASS] == ELFCLASS64) {
1399 Elf64_Ehdr *ehdr = (Elf64_Ehdr *)elfptr;
1400 Elf64_Phdr *phdr = (Elf64_Phdr *)(elfptr + sizeof(Elf64_Ehdr));
1402 /* Update all program headers */
1403 for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
1404 if (phdr->p_type == PT_NOTE) {
1405 /* Update note size */
1406 phdr->p_memsz = elfnotes_orig_sz + vmcoreddsz;
1407 phdr->p_filesz = phdr->p_memsz;
1411 start = rounddown(phdr->p_offset, PAGE_SIZE);
1412 end = roundup(phdr->p_offset + phdr->p_memsz,
1415 phdr->p_offset = vmcore_off + (phdr->p_offset - start);
1419 Elf32_Ehdr *ehdr = (Elf32_Ehdr *)elfptr;
1420 Elf32_Phdr *phdr = (Elf32_Phdr *)(elfptr + sizeof(Elf32_Ehdr));
1422 /* Update all program headers */
1423 for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
1424 if (phdr->p_type == PT_NOTE) {
1425 /* Update note size */
1426 phdr->p_memsz = elfnotes_orig_sz + vmcoreddsz;
1427 phdr->p_filesz = phdr->p_memsz;
1431 start = rounddown(phdr->p_offset, PAGE_SIZE);
1432 end = roundup(phdr->p_offset + phdr->p_memsz,
1435 phdr->p_offset = vmcore_off + (phdr->p_offset - start);
1442 * vmcoredd_update_size - Update the total size of the device dumps and update
1444 * @dump_size: Size of the current device dump to be added to total size
1446 * Update the total size of all the device dumps and update the ELF program
1447 * headers. Calculate the new offsets for the vmcore list and update the
1448 * total vmcore size.
1450 static void vmcoredd_update_size(size_t dump_size)
1452 vmcoredd_orig_sz += dump_size;
1453 elfnotes_sz = roundup(elfnotes_orig_sz, PAGE_SIZE) + vmcoredd_orig_sz;
1454 vmcoredd_update_program_headers(elfcorebuf, elfnotes_sz,
1457 /* Update vmcore list offsets */
1458 set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
1460 vmcore_size = get_vmcore_size(elfcorebuf_sz, elfnotes_sz,
1462 proc_vmcore->size = vmcore_size;
1466 * vmcore_add_device_dump - Add a buffer containing device dump to vmcore
1469 * Allocate a buffer and invoke the calling driver's dump collect routine.
1470 * Write ELF note at the beginning of the buffer to indicate vmcore device
1471 * dump and add the dump to global list.
1473 int vmcore_add_device_dump(struct vmcoredd_data *data)
1475 struct vmcoredd_node *dump;
1480 if (vmcoredd_disabled) {
1481 pr_err_once("Device dump is disabled\n");
1485 if (!data || !strlen(data->dump_name) ||
1486 !data->vmcoredd_callback || !data->size)
1489 dump = vzalloc(sizeof(*dump));
1495 /* Keep size of the buffer page aligned so that it can be mmaped */
1496 data_size = roundup(sizeof(struct vmcoredd_header) + data->size,
1499 /* Allocate buffer for driver's to write their dumps */
1500 buf = vmcore_alloc_buf(data_size);
1506 vmcoredd_write_header(buf, data, data_size -
1507 sizeof(struct vmcoredd_header));
1509 /* Invoke the driver's dump collection routing */
1510 ret = data->vmcoredd_callback(data, buf +
1511 sizeof(struct vmcoredd_header));
1516 dump->size = data_size;
1518 /* Add the dump to driver sysfs list */
1519 mutex_lock(&vmcoredd_mutex);
1520 list_add_tail(&dump->list, &vmcoredd_list);
1521 mutex_unlock(&vmcoredd_mutex);
1523 vmcoredd_update_size(data_size);
1532 EXPORT_SYMBOL(vmcore_add_device_dump);
1533 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
1535 /* Free all dumps in vmcore device dump list */
1536 static void vmcore_free_device_dumps(void)
1538 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
1539 mutex_lock(&vmcoredd_mutex);
1540 while (!list_empty(&vmcoredd_list)) {
1541 struct vmcoredd_node *dump;
1543 dump = list_first_entry(&vmcoredd_list, struct vmcoredd_node,
1545 list_del(&dump->list);
1549 mutex_unlock(&vmcoredd_mutex);
1550 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
1553 /* Init function for vmcore module. */
1554 static int __init vmcore_init(void)
1558 /* Allow architectures to allocate ELF header in 2nd kernel */
1559 rc = elfcorehdr_alloc(&elfcorehdr_addr, &elfcorehdr_size);
1563 * If elfcorehdr= has been passed in cmdline or created in 2nd kernel,
1564 * then capture the dump.
1566 if (!(is_vmcore_usable()))
1568 rc = parse_crash_elf_headers();
1570 elfcorehdr_free(elfcorehdr_addr);
1571 pr_warn("Kdump: vmcore not initialized\n");
1574 elfcorehdr_free(elfcorehdr_addr);
1575 elfcorehdr_addr = ELFCORE_ADDR_ERR;
1577 proc_vmcore = proc_create("vmcore", S_IRUSR, NULL, &vmcore_proc_ops);
1579 proc_vmcore->size = vmcore_size;
1582 fs_initcall(vmcore_init);
1584 /* Cleanup function for vmcore module. */
1585 void vmcore_cleanup(void)
1588 proc_remove(proc_vmcore);
1592 /* clear the vmcore list. */
1593 while (!list_empty(&vmcore_list)) {
1596 m = list_first_entry(&vmcore_list, struct vmcore, list);
1602 /* clear vmcore device dump list */
1603 vmcore_free_device_dumps();