1 // SPDX-License-Identifier: GPL-2.0
3 * fs/proc/kcore.c kernel ELF core dumper
5 * Modelled on fs/exec.c:aout_core_dump()
10 * Safe accesses to vmalloc/direct-mapped discontiguous areas, Kanoj Sarcar <
[email protected]>
13 #include <linux/vmcore_info.h>
15 #include <linux/proc_fs.h>
16 #include <linux/kcore.h>
17 #include <linux/user.h>
18 #include <linux/capability.h>
19 #include <linux/elf.h>
20 #include <linux/elfcore.h>
21 #include <linux/vmalloc.h>
22 #include <linux/highmem.h>
23 #include <linux/printk.h>
24 #include <linux/memblock.h>
25 #include <linux/init.h>
26 #include <linux/slab.h>
27 #include <linux/uio.h>
29 #include <linux/list.h>
30 #include <linux/ioport.h>
31 #include <linux/memory.h>
32 #include <linux/sched/task.h>
33 #include <linux/security.h>
34 #include <asm/sections.h>
37 #define CORE_STR "CORE"
39 #ifndef ELF_CORE_EFLAGS
40 #define ELF_CORE_EFLAGS 0
43 static struct proc_dir_entry *proc_root_kcore;
46 #ifndef kc_vaddr_to_offset
47 #define kc_vaddr_to_offset(v) ((v) - PAGE_OFFSET)
49 #ifndef kc_offset_to_vaddr
50 #define kc_offset_to_vaddr(o) ((o) + PAGE_OFFSET)
53 static LIST_HEAD(kclist_head);
54 static DECLARE_RWSEM(kclist_lock);
55 static int kcore_need_update = 1;
58 * Returns > 0 for RAM pages, 0 for non-RAM pages, < 0 on error
59 * Same as oldmem_pfn_is_ram in vmcore
61 static int (*mem_pfn_is_ram)(unsigned long pfn);
63 int __init register_mem_pfn_is_ram(int (*fn)(unsigned long pfn))
71 static int pfn_is_ram(unsigned long pfn)
74 return mem_pfn_is_ram(pfn);
79 /* This doesn't grab kclist_lock, so it should only be used at init time. */
80 void __init kclist_add(struct kcore_list *new, void *addr, size_t size,
83 new->addr = (unsigned long)addr;
87 list_add_tail(&new->list, &kclist_head);
90 static size_t get_kcore_size(int *nphdr, size_t *phdrs_len, size_t *notes_len,
96 *nphdr = 1; /* PT_NOTE */
99 list_for_each_entry(m, &kclist_head, list) {
100 try = kc_vaddr_to_offset((size_t)m->addr + m->size);
106 *phdrs_len = *nphdr * sizeof(struct elf_phdr);
107 *notes_len = (4 * sizeof(struct elf_note) +
108 3 * ALIGN(sizeof(CORE_STR), 4) +
109 VMCOREINFO_NOTE_NAME_BYTES +
110 ALIGN(sizeof(struct elf_prstatus), 4) +
111 ALIGN(sizeof(struct elf_prpsinfo), 4) +
112 ALIGN(arch_task_struct_size, 4) +
113 ALIGN(vmcoreinfo_size, 4));
114 *data_offset = PAGE_ALIGN(sizeof(struct elfhdr) + *phdrs_len +
116 return *data_offset + size;
119 #ifdef CONFIG_HIGHMEM
121 * If no highmem, we can assume [0...max_low_pfn) continuous range of memory
122 * because memory hole is not as big as !HIGHMEM case.
123 * (HIGHMEM is special because part of memory is _invisible_ from the kernel.)
125 static int kcore_ram_list(struct list_head *head)
127 struct kcore_list *ent;
129 ent = kmalloc(sizeof(*ent), GFP_KERNEL);
132 ent->addr = (unsigned long)__va(0);
133 ent->size = max_low_pfn << PAGE_SHIFT;
134 ent->type = KCORE_RAM;
135 list_add(&ent->list, head);
139 #else /* !CONFIG_HIGHMEM */
141 #ifdef CONFIG_SPARSEMEM_VMEMMAP
142 /* calculate vmemmap's address from given system ram pfn and register it */
144 get_sparsemem_vmemmap_info(struct kcore_list *ent, struct list_head *head)
146 unsigned long pfn = __pa(ent->addr) >> PAGE_SHIFT;
147 unsigned long nr_pages = ent->size >> PAGE_SHIFT;
148 unsigned long start, end;
149 struct kcore_list *vmm, *tmp;
152 start = ((unsigned long)pfn_to_page(pfn)) & PAGE_MASK;
153 end = ((unsigned long)pfn_to_page(pfn + nr_pages)) - 1;
154 end = PAGE_ALIGN(end);
155 /* overlap check (because we have to align page */
156 list_for_each_entry(tmp, head, list) {
157 if (tmp->type != KCORE_VMEMMAP)
159 if (start < tmp->addr + tmp->size)
164 vmm = kmalloc(sizeof(*vmm), GFP_KERNEL);
168 vmm->size = end - start;
169 vmm->type = KCORE_VMEMMAP;
170 list_add_tail(&vmm->list, head);
177 get_sparsemem_vmemmap_info(struct kcore_list *ent, struct list_head *head)
185 kclist_add_private(unsigned long pfn, unsigned long nr_pages, void *arg)
187 struct list_head *head = (struct list_head *)arg;
188 struct kcore_list *ent;
194 p = pfn_to_page(pfn);
196 ent = kmalloc(sizeof(*ent), GFP_KERNEL);
199 ent->addr = (unsigned long)page_to_virt(p);
200 ent->size = nr_pages << PAGE_SHIFT;
202 if (!virt_addr_valid((void *)ent->addr))
205 /* cut not-mapped area. ....from ppc-32 code. */
206 if (ULONG_MAX - ent->addr < ent->size)
207 ent->size = ULONG_MAX - ent->addr;
210 * We've already checked virt_addr_valid so we know this address
211 * is a valid pointer, therefore we can check against it to determine
214 if (VMALLOC_START > ent->addr) {
215 if (VMALLOC_START - ent->addr < ent->size)
216 ent->size = VMALLOC_START - ent->addr;
219 ent->type = KCORE_RAM;
220 list_add_tail(&ent->list, head);
222 if (!get_sparsemem_vmemmap_info(ent, head)) {
223 list_del(&ent->list);
233 static int kcore_ram_list(struct list_head *list)
236 unsigned long end_pfn;
238 /* Not inialized....update now */
239 /* find out "max pfn" */
241 for_each_node_state(nid, N_MEMORY) {
242 unsigned long node_end;
243 node_end = node_end_pfn(nid);
244 if (end_pfn < node_end)
247 /* scan 0 to max_pfn */
248 ret = walk_system_ram_range(0, end_pfn, list, kclist_add_private);
253 #endif /* CONFIG_HIGHMEM */
255 static int kcore_update_ram(void)
260 size_t phdrs_len, notes_len, data_offset;
261 struct kcore_list *tmp, *pos;
264 down_write(&kclist_lock);
265 if (!xchg(&kcore_need_update, 0))
268 ret = kcore_ram_list(&list);
270 /* Couldn't get the RAM list, try again next time. */
271 WRITE_ONCE(kcore_need_update, 1);
272 list_splice_tail(&list, &garbage);
276 list_for_each_entry_safe(pos, tmp, &kclist_head, list) {
277 if (pos->type == KCORE_RAM || pos->type == KCORE_VMEMMAP)
278 list_move(&pos->list, &garbage);
280 list_splice_tail(&list, &kclist_head);
282 proc_root_kcore->size = get_kcore_size(&nphdr, &phdrs_len, ¬es_len,
286 up_write(&kclist_lock);
287 list_for_each_entry_safe(pos, tmp, &garbage, list) {
288 list_del(&pos->list);
294 static void append_kcore_note(char *notes, size_t *i, const char *name,
295 unsigned int type, const void *desc,
298 struct elf_note *note = (struct elf_note *)¬es[*i];
300 note->n_namesz = strlen(name) + 1;
301 note->n_descsz = descsz;
304 memcpy(¬es[*i], name, note->n_namesz);
305 *i = ALIGN(*i + note->n_namesz, 4);
306 memcpy(¬es[*i], desc, descsz);
307 *i = ALIGN(*i + descsz, 4);
310 static ssize_t read_kcore_iter(struct kiocb *iocb, struct iov_iter *iter)
312 struct file *file = iocb->ki_filp;
313 char *buf = file->private_data;
314 loff_t *fpos = &iocb->ki_pos;
315 size_t phdrs_offset, notes_offset, data_offset;
316 size_t page_offline_frozen = 1;
317 size_t phdrs_len, notes_len;
318 struct kcore_list *m;
322 size_t buflen = iov_iter_count(iter);
323 size_t orig_buflen = buflen;
326 down_read(&kclist_lock);
328 * Don't race against drivers that set PageOffline() and expect no
329 * further page access.
331 page_offline_freeze();
333 get_kcore_size(&nphdr, &phdrs_len, ¬es_len, &data_offset);
334 phdrs_offset = sizeof(struct elfhdr);
335 notes_offset = phdrs_offset + phdrs_len;
337 /* ELF file header. */
338 if (buflen && *fpos < sizeof(struct elfhdr)) {
339 struct elfhdr ehdr = {
345 [EI_CLASS] = ELF_CLASS,
346 [EI_DATA] = ELF_DATA,
347 [EI_VERSION] = EV_CURRENT,
348 [EI_OSABI] = ELF_OSABI,
351 .e_machine = ELF_ARCH,
352 .e_version = EV_CURRENT,
353 .e_phoff = sizeof(struct elfhdr),
354 .e_flags = ELF_CORE_EFLAGS,
355 .e_ehsize = sizeof(struct elfhdr),
356 .e_phentsize = sizeof(struct elf_phdr),
360 tsz = min_t(size_t, buflen, sizeof(struct elfhdr) - *fpos);
361 if (copy_to_iter((char *)&ehdr + *fpos, tsz, iter) != tsz) {
370 /* ELF program headers. */
371 if (buflen && *fpos < phdrs_offset + phdrs_len) {
372 struct elf_phdr *phdrs, *phdr;
374 phdrs = kzalloc(phdrs_len, GFP_KERNEL);
380 phdrs[0].p_type = PT_NOTE;
381 phdrs[0].p_offset = notes_offset;
382 phdrs[0].p_filesz = notes_len;
385 list_for_each_entry(m, &kclist_head, list) {
386 phdr->p_type = PT_LOAD;
387 phdr->p_flags = PF_R | PF_W | PF_X;
388 phdr->p_offset = kc_vaddr_to_offset(m->addr) + data_offset;
389 phdr->p_vaddr = (size_t)m->addr;
390 if (m->type == KCORE_RAM)
391 phdr->p_paddr = __pa(m->addr);
392 else if (m->type == KCORE_TEXT)
393 phdr->p_paddr = __pa_symbol(m->addr);
395 phdr->p_paddr = (elf_addr_t)-1;
396 phdr->p_filesz = phdr->p_memsz = m->size;
397 phdr->p_align = PAGE_SIZE;
401 tsz = min_t(size_t, buflen, phdrs_offset + phdrs_len - *fpos);
402 if (copy_to_iter((char *)phdrs + *fpos - phdrs_offset, tsz,
414 /* ELF note segment. */
415 if (buflen && *fpos < notes_offset + notes_len) {
416 struct elf_prstatus prstatus = {};
417 struct elf_prpsinfo prpsinfo = {
419 .pr_fname = "vmlinux",
424 strscpy(prpsinfo.pr_psargs, saved_command_line,
425 sizeof(prpsinfo.pr_psargs));
427 notes = kzalloc(notes_len, GFP_KERNEL);
433 append_kcore_note(notes, &i, CORE_STR, NT_PRSTATUS, &prstatus,
435 append_kcore_note(notes, &i, CORE_STR, NT_PRPSINFO, &prpsinfo,
437 append_kcore_note(notes, &i, CORE_STR, NT_TASKSTRUCT, current,
438 arch_task_struct_size);
440 * vmcoreinfo_size is mostly constant after init time, but it
441 * can be changed by crash_save_vmcoreinfo(). Racing here with a
442 * panic on another CPU before the machine goes down is insanely
443 * unlikely, but it's better to not leave potential buffer
444 * overflows lying around, regardless.
446 append_kcore_note(notes, &i, VMCOREINFO_NOTE_NAME, 0,
448 min(vmcoreinfo_size, notes_len - i));
450 tsz = min_t(size_t, buflen, notes_offset + notes_len - *fpos);
451 if (copy_to_iter(notes + *fpos - notes_offset, tsz, iter) != tsz) {
463 * Check to see if our file offset matches with any of
464 * the addresses in the elf_phdr on our list.
466 start = kc_offset_to_vaddr(*fpos - data_offset);
467 if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
476 * If this is the first iteration or the address is not within
477 * the previous entry, search for a matching entry.
479 if (!m || start < m->addr || start >= m->addr + m->size) {
480 struct kcore_list *iter;
483 list_for_each_entry(iter, &kclist_head, list) {
484 if (start >= iter->addr &&
485 start < iter->addr + iter->size) {
492 if (page_offline_frozen++ % MAX_ORDER_NR_PAGES == 0) {
495 page_offline_freeze();
499 if (iov_iter_zero(tsz, iter) != tsz) {
509 const char *src = (char *)start;
510 size_t read = 0, left = tsz;
513 * vmalloc uses spinlocks, so we optimistically try to
514 * read memory. If this fails, fault pages in and try
515 * again until we are done.
518 read += vread_iter(iter, src, left);
525 if (fault_in_iov_iter_writeable(iter, left)) {
533 /* User page is handled prior to normal kernel page: */
534 if (copy_to_iter((char *)start, tsz, iter) != tsz) {
540 pfn = __pa(start) >> PAGE_SHIFT;
541 page = pfn_to_online_page(pfn);
544 * Don't read offline sections, logically offline pages
545 * (e.g., inflated in a balloon), hwpoisoned pages,
546 * and explicitly excluded physical ranges.
548 if (!page || PageOffline(page) ||
549 is_page_hwpoison(page) || !pfn_is_ram(pfn) ||
550 pfn_is_unaccepted_memory(pfn)) {
551 if (iov_iter_zero(tsz, iter) != tsz) {
561 * Sadly we must use a bounce buffer here to be able to
562 * make use of copy_from_kernel_nofault(), as these
563 * memory regions might not always be mapped on all
566 if (copy_from_kernel_nofault(buf, (void *)start, tsz)) {
567 if (iov_iter_zero(tsz, iter) != tsz) {
572 * We know the bounce buffer is safe to copy from, so
573 * use _copy_to_iter() directly.
575 } else if (_copy_to_iter(buf, tsz, iter) != tsz) {
581 pr_warn_once("Unhandled KCORE type: %d\n", m->type);
582 if (iov_iter_zero(tsz, iter) != tsz) {
591 tsz = (buflen > PAGE_SIZE ? PAGE_SIZE : buflen);
596 up_read(&kclist_lock);
599 return orig_buflen - buflen;
602 static int open_kcore(struct inode *inode, struct file *filp)
604 int ret = security_locked_down(LOCKDOWN_KCORE);
606 if (!capable(CAP_SYS_RAWIO))
612 filp->private_data = kmalloc(PAGE_SIZE, GFP_KERNEL);
613 if (!filp->private_data)
616 if (kcore_need_update)
618 if (i_size_read(inode) != proc_root_kcore->size) {
620 i_size_write(inode, proc_root_kcore->size);
626 static int release_kcore(struct inode *inode, struct file *file)
628 kfree(file->private_data);
632 static const struct proc_ops kcore_proc_ops = {
633 .proc_read_iter = read_kcore_iter,
634 .proc_open = open_kcore,
635 .proc_release = release_kcore,
636 .proc_lseek = default_llseek,
639 /* just remember that we have to update kcore */
640 static int __meminit kcore_callback(struct notifier_block *self,
641 unsigned long action, void *arg)
646 kcore_need_update = 1;
653 static struct kcore_list kcore_vmalloc;
655 #ifdef CONFIG_ARCH_PROC_KCORE_TEXT
656 static struct kcore_list kcore_text;
658 * If defined, special segment is used for mapping kernel text instead of
659 * direct-map area. We need to create special TEXT section.
661 static void __init proc_kcore_text_init(void)
663 kclist_add(&kcore_text, _text, _end - _text, KCORE_TEXT);
666 static void __init proc_kcore_text_init(void)
671 #if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
673 * MODULES_VADDR has no intersection with VMALLOC_ADDR.
675 static struct kcore_list kcore_modules;
676 static void __init add_modules_range(void)
678 if (MODULES_VADDR != VMALLOC_START && MODULES_END != VMALLOC_END) {
679 kclist_add(&kcore_modules, (void *)MODULES_VADDR,
680 MODULES_END - MODULES_VADDR, KCORE_VMALLOC);
684 static void __init add_modules_range(void)
689 static int __init proc_kcore_init(void)
691 proc_root_kcore = proc_create("kcore", S_IRUSR, NULL, &kcore_proc_ops);
692 if (!proc_root_kcore) {
693 pr_err("couldn't create /proc/kcore\n");
694 return 0; /* Always returns 0. */
696 /* Store text area if it's special */
697 proc_kcore_text_init();
698 /* Store vmalloc area */
699 kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
700 VMALLOC_END - VMALLOC_START, KCORE_VMALLOC);
702 /* Store direct-map area from physical memory map */
704 hotplug_memory_notifier(kcore_callback, DEFAULT_CALLBACK_PRI);
708 fs_initcall(proc_kcore_init);