1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2008, 2009 Intel Corporation
4 * Authors: Andi Kleen, Fengguang Wu
6 * High level machine check handler. Handles pages reported by the
7 * hardware as being corrupted usually due to a multi-bit ECC memory or cache
10 * In addition there is a "soft offline" entry point that allows stop using
11 * not-yet-corrupted-by-suspicious pages without killing anything.
13 * Handles page cache pages in various states. The tricky part
14 * here is that we can access any page asynchronously in respect to
15 * other VM users, because memory failures could happen anytime and
16 * anywhere. This could violate some of their assumptions. This is why
17 * this code has to be extremely careful. Generally it tries to use
18 * normal locking rules, as in get the standard locks, even if that means
19 * the error handling takes potentially a long time.
21 * It can be very tempting to add handling for obscure cases here.
22 * In general any code for handling new cases should only be added iff:
23 * - You know how to test it.
24 * - You have a test that can be added to mce-test
25 * https://git.kernel.org/cgit/utils/cpu/mce/mce-test.git/
26 * - The case actually shows up as a frequent (top 10) page state in
27 * tools/mm/page-types when running a real workload.
29 * There are several operations here with exponential complexity because
30 * of unsuitable VM data structures. For example the operation to map back
31 * from RMAP chains to processes has to walk the complete process list and
32 * has non linear complexity with the number. But since memory corruptions
33 * are rare we hope to get away with this. This avoids impacting the core
37 #define pr_fmt(fmt) "Memory failure: " fmt
39 #include <linux/kernel.h>
41 #include <linux/page-flags.h>
42 #include <linux/sched/signal.h>
43 #include <linux/sched/task.h>
44 #include <linux/dax.h>
45 #include <linux/ksm.h>
46 #include <linux/rmap.h>
47 #include <linux/export.h>
48 #include <linux/pagemap.h>
49 #include <linux/swap.h>
50 #include <linux/backing-dev.h>
51 #include <linux/migrate.h>
52 #include <linux/slab.h>
53 #include <linux/swapops.h>
54 #include <linux/hugetlb.h>
55 #include <linux/memory_hotplug.h>
56 #include <linux/mm_inline.h>
57 #include <linux/memremap.h>
58 #include <linux/kfifo.h>
59 #include <linux/ratelimit.h>
60 #include <linux/pagewalk.h>
61 #include <linux/shmem_fs.h>
62 #include <linux/sysctl.h>
65 #include "ras/ras_event.h"
67 static int sysctl_memory_failure_early_kill __read_mostly;
69 static int sysctl_memory_failure_recovery __read_mostly = 1;
71 atomic_long_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
73 static bool hw_memory_failure __read_mostly = false;
75 static DEFINE_MUTEX(mf_mutex);
77 void num_poisoned_pages_inc(unsigned long pfn)
79 atomic_long_inc(&num_poisoned_pages);
80 memblk_nr_poison_inc(pfn);
83 void num_poisoned_pages_sub(unsigned long pfn, long i)
85 atomic_long_sub(i, &num_poisoned_pages);
87 memblk_nr_poison_sub(pfn, i);
91 * MF_ATTR_RO - Create sysfs entry for each memory failure statistics.
92 * @_name: name of the file in the per NUMA sysfs directory.
94 #define MF_ATTR_RO(_name) \
95 static ssize_t _name##_show(struct device *dev, \
96 struct device_attribute *attr, \
99 struct memory_failure_stats *mf_stats = \
100 &NODE_DATA(dev->id)->mf_stats; \
101 return sprintf(buf, "%lu\n", mf_stats->_name); \
103 static DEVICE_ATTR_RO(_name)
109 MF_ATTR_RO(recovered);
111 static struct attribute *memory_failure_attr[] = {
112 &dev_attr_total.attr,
113 &dev_attr_ignored.attr,
114 &dev_attr_failed.attr,
115 &dev_attr_delayed.attr,
116 &dev_attr_recovered.attr,
120 const struct attribute_group memory_failure_attr_group = {
121 .name = "memory_failure",
122 .attrs = memory_failure_attr,
125 static struct ctl_table memory_failure_table[] = {
127 .procname = "memory_failure_early_kill",
128 .data = &sysctl_memory_failure_early_kill,
129 .maxlen = sizeof(sysctl_memory_failure_early_kill),
131 .proc_handler = proc_dointvec_minmax,
132 .extra1 = SYSCTL_ZERO,
133 .extra2 = SYSCTL_ONE,
136 .procname = "memory_failure_recovery",
137 .data = &sysctl_memory_failure_recovery,
138 .maxlen = sizeof(sysctl_memory_failure_recovery),
140 .proc_handler = proc_dointvec_minmax,
141 .extra1 = SYSCTL_ZERO,
142 .extra2 = SYSCTL_ONE,
149 * 1: the page is dissolved (if needed) and taken off from buddy,
150 * 0: the page is dissolved (if needed) and not taken off from buddy,
151 * < 0: failed to dissolve.
153 static int __page_handle_poison(struct page *page)
157 zone_pcp_disable(page_zone(page));
158 ret = dissolve_free_huge_page(page);
160 ret = take_page_off_buddy(page);
161 zone_pcp_enable(page_zone(page));
166 static bool page_handle_poison(struct page *page, bool hugepage_or_freepage, bool release)
168 if (hugepage_or_freepage) {
170 * Doing this check for free pages is also fine since dissolve_free_huge_page
171 * returns 0 for non-hugetlb pages as well.
173 if (__page_handle_poison(page) <= 0)
175 * We could fail to take off the target page from buddy
176 * for example due to racy page allocation, but that's
177 * acceptable because soft-offlined page is not broken
178 * and if someone really want to use it, they should
184 SetPageHWPoison(page);
188 num_poisoned_pages_inc(page_to_pfn(page));
193 #if IS_ENABLED(CONFIG_HWPOISON_INJECT)
195 u32 hwpoison_filter_enable = 0;
196 u32 hwpoison_filter_dev_major = ~0U;
197 u32 hwpoison_filter_dev_minor = ~0U;
198 u64 hwpoison_filter_flags_mask;
199 u64 hwpoison_filter_flags_value;
200 EXPORT_SYMBOL_GPL(hwpoison_filter_enable);
201 EXPORT_SYMBOL_GPL(hwpoison_filter_dev_major);
202 EXPORT_SYMBOL_GPL(hwpoison_filter_dev_minor);
203 EXPORT_SYMBOL_GPL(hwpoison_filter_flags_mask);
204 EXPORT_SYMBOL_GPL(hwpoison_filter_flags_value);
206 static int hwpoison_filter_dev(struct page *p)
208 struct address_space *mapping;
211 if (hwpoison_filter_dev_major == ~0U &&
212 hwpoison_filter_dev_minor == ~0U)
215 mapping = page_mapping(p);
216 if (mapping == NULL || mapping->host == NULL)
219 dev = mapping->host->i_sb->s_dev;
220 if (hwpoison_filter_dev_major != ~0U &&
221 hwpoison_filter_dev_major != MAJOR(dev))
223 if (hwpoison_filter_dev_minor != ~0U &&
224 hwpoison_filter_dev_minor != MINOR(dev))
230 static int hwpoison_filter_flags(struct page *p)
232 if (!hwpoison_filter_flags_mask)
235 if ((stable_page_flags(p) & hwpoison_filter_flags_mask) ==
236 hwpoison_filter_flags_value)
243 * This allows stress tests to limit test scope to a collection of tasks
244 * by putting them under some memcg. This prevents killing unrelated/important
245 * processes such as /sbin/init. Note that the target task may share clean
246 * pages with init (eg. libc text), which is harmless. If the target task
247 * share _dirty_ pages with another task B, the test scheme must make sure B
248 * is also included in the memcg. At last, due to race conditions this filter
249 * can only guarantee that the page either belongs to the memcg tasks, or is
253 u64 hwpoison_filter_memcg;
254 EXPORT_SYMBOL_GPL(hwpoison_filter_memcg);
255 static int hwpoison_filter_task(struct page *p)
257 if (!hwpoison_filter_memcg)
260 if (page_cgroup_ino(p) != hwpoison_filter_memcg)
266 static int hwpoison_filter_task(struct page *p) { return 0; }
269 int hwpoison_filter(struct page *p)
271 if (!hwpoison_filter_enable)
274 if (hwpoison_filter_dev(p))
277 if (hwpoison_filter_flags(p))
280 if (hwpoison_filter_task(p))
286 int hwpoison_filter(struct page *p)
292 EXPORT_SYMBOL_GPL(hwpoison_filter);
295 * Kill all processes that have a poisoned page mapped and then isolate
299 * Find all processes having the page mapped and kill them.
300 * But we keep a page reference around so that the page is not
301 * actually freed yet.
302 * Then stash the page away
304 * There's no convenient way to get back to mapped processes
305 * from the VMAs. So do a brute-force search over all
308 * Remember that machine checks are not common (or rather
309 * if they are common you have other problems), so this shouldn't
310 * be a performance issue.
312 * Also there are some races possible while we get from the
313 * error detection to actually handle it.
318 struct task_struct *tsk;
324 * Send all the processes who have the page mapped a signal.
325 * ``action optional'' if they are not immediately affected by the error
326 * ``action required'' if error happened in current execution context
328 static int kill_proc(struct to_kill *tk, unsigned long pfn, int flags)
330 struct task_struct *t = tk->tsk;
331 short addr_lsb = tk->size_shift;
334 pr_err("%#lx: Sending SIGBUS to %s:%d due to hardware memory corruption\n",
335 pfn, t->comm, t->pid);
337 if ((flags & MF_ACTION_REQUIRED) && (t == current))
338 ret = force_sig_mceerr(BUS_MCEERR_AR,
339 (void __user *)tk->addr, addr_lsb);
342 * Signal other processes sharing the page if they have
344 * Don't use force here, it's convenient if the signal
345 * can be temporarily blocked.
346 * This could cause a loop when the user sets SIGBUS
347 * to SIG_IGN, but hopefully no one will do that?
349 ret = send_sig_mceerr(BUS_MCEERR_AO, (void __user *)tk->addr,
352 pr_info("Error sending signal to %s:%d: %d\n",
353 t->comm, t->pid, ret);
358 * Unknown page type encountered. Try to check whether it can turn PageLRU by
361 void shake_page(struct page *p)
366 * TODO: Could shrink slab caches here if a lightweight range-based
367 * shrinker will be available.
374 EXPORT_SYMBOL_GPL(shake_page);
376 static unsigned long dev_pagemap_mapping_shift(struct vm_area_struct *vma,
377 unsigned long address)
379 unsigned long ret = 0;
387 VM_BUG_ON_VMA(address == -EFAULT, vma);
388 pgd = pgd_offset(vma->vm_mm, address);
389 if (!pgd_present(*pgd))
391 p4d = p4d_offset(pgd, address);
392 if (!p4d_present(*p4d))
394 pud = pud_offset(p4d, address);
395 if (!pud_present(*pud))
397 if (pud_devmap(*pud))
399 pmd = pmd_offset(pud, address);
400 if (!pmd_present(*pmd))
402 if (pmd_devmap(*pmd))
404 pte = pte_offset_map(pmd, address);
407 ptent = ptep_get(pte);
408 if (pte_present(ptent) && pte_devmap(ptent))
415 * Failure handling: if we can't find or can't kill a process there's
416 * not much we can do. We just print a message and ignore otherwise.
419 #define FSDAX_INVALID_PGOFF ULONG_MAX
422 * Schedule a process for later kill.
423 * Uses GFP_ATOMIC allocations to avoid potential recursions in the VM.
425 * Note: @fsdax_pgoff is used only when @p is a fsdax page and a
426 * filesystem with a memory failure handler has claimed the
427 * memory_failure event. In all other cases, page->index and
428 * page->mapping are sufficient for mapping the page back to its
429 * corresponding user virtual address.
431 static void __add_to_kill(struct task_struct *tsk, struct page *p,
432 struct vm_area_struct *vma, struct list_head *to_kill,
433 unsigned long ksm_addr, pgoff_t fsdax_pgoff)
437 tk = kmalloc(sizeof(struct to_kill), GFP_ATOMIC);
439 pr_err("Out of memory while machine check handling\n");
443 tk->addr = ksm_addr ? ksm_addr : page_address_in_vma(p, vma);
444 if (is_zone_device_page(p)) {
445 if (fsdax_pgoff != FSDAX_INVALID_PGOFF)
446 tk->addr = vma_pgoff_address(fsdax_pgoff, 1, vma);
447 tk->size_shift = dev_pagemap_mapping_shift(vma, tk->addr);
449 tk->size_shift = page_shift(compound_head(p));
452 * Send SIGKILL if "tk->addr == -EFAULT". Also, as
453 * "tk->size_shift" is always non-zero for !is_zone_device_page(),
454 * so "tk->size_shift == 0" effectively checks no mapping on
455 * ZONE_DEVICE. Indeed, when a devdax page is mmapped N times
456 * to a process' address space, it's possible not all N VMAs
457 * contain mappings for the page, but at least one VMA does.
458 * Only deliver SIGBUS with payload derived from the VMA that
459 * has a mapping for the page.
461 if (tk->addr == -EFAULT) {
462 pr_info("Unable to find user space address %lx in %s\n",
463 page_to_pfn(p), tsk->comm);
464 } else if (tk->size_shift == 0) {
469 get_task_struct(tsk);
471 list_add_tail(&tk->nd, to_kill);
474 static void add_to_kill_anon_file(struct task_struct *tsk, struct page *p,
475 struct vm_area_struct *vma,
476 struct list_head *to_kill)
478 __add_to_kill(tsk, p, vma, to_kill, 0, FSDAX_INVALID_PGOFF);
482 static bool task_in_to_kill_list(struct list_head *to_kill,
483 struct task_struct *tsk)
485 struct to_kill *tk, *next;
487 list_for_each_entry_safe(tk, next, to_kill, nd) {
494 void add_to_kill_ksm(struct task_struct *tsk, struct page *p,
495 struct vm_area_struct *vma, struct list_head *to_kill,
496 unsigned long ksm_addr)
498 if (!task_in_to_kill_list(to_kill, tsk))
499 __add_to_kill(tsk, p, vma, to_kill, ksm_addr, FSDAX_INVALID_PGOFF);
503 * Kill the processes that have been collected earlier.
505 * Only do anything when FORCEKILL is set, otherwise just free the
506 * list (this is used for clean pages which do not need killing)
507 * Also when FAIL is set do a force kill because something went
510 static void kill_procs(struct list_head *to_kill, int forcekill, bool fail,
511 unsigned long pfn, int flags)
513 struct to_kill *tk, *next;
515 list_for_each_entry_safe(tk, next, to_kill, nd) {
518 * In case something went wrong with munmapping
519 * make sure the process doesn't catch the
520 * signal and then access the memory. Just kill it.
522 if (fail || tk->addr == -EFAULT) {
523 pr_err("%#lx: forcibly killing %s:%d because of failure to unmap corrupted page\n",
524 pfn, tk->tsk->comm, tk->tsk->pid);
525 do_send_sig_info(SIGKILL, SEND_SIG_PRIV,
526 tk->tsk, PIDTYPE_PID);
530 * In theory the process could have mapped
531 * something else on the address in-between. We could
532 * check for that, but we need to tell the
535 else if (kill_proc(tk, pfn, flags) < 0)
536 pr_err("%#lx: Cannot send advisory machine check signal to %s:%d\n",
537 pfn, tk->tsk->comm, tk->tsk->pid);
540 put_task_struct(tk->tsk);
546 * Find a dedicated thread which is supposed to handle SIGBUS(BUS_MCEERR_AO)
547 * on behalf of the thread group. Return task_struct of the (first found)
548 * dedicated thread if found, and return NULL otherwise.
550 * We already hold rcu lock in the caller, so we don't have to call
551 * rcu_read_lock/unlock() in this function.
553 static struct task_struct *find_early_kill_thread(struct task_struct *tsk)
555 struct task_struct *t;
557 for_each_thread(tsk, t) {
558 if (t->flags & PF_MCE_PROCESS) {
559 if (t->flags & PF_MCE_EARLY)
562 if (sysctl_memory_failure_early_kill)
570 * Determine whether a given process is "early kill" process which expects
571 * to be signaled when some page under the process is hwpoisoned.
572 * Return task_struct of the dedicated thread (main thread unless explicitly
573 * specified) if the process is "early kill" and otherwise returns NULL.
575 * Note that the above is true for Action Optional case. For Action Required
576 * case, it's only meaningful to the current thread which need to be signaled
577 * with SIGBUS, this error is Action Optional for other non current
578 * processes sharing the same error page,if the process is "early kill", the
579 * task_struct of the dedicated thread will also be returned.
581 struct task_struct *task_early_kill(struct task_struct *tsk, int force_early)
586 * Comparing ->mm here because current task might represent
587 * a subthread, while tsk always points to the main thread.
589 if (force_early && tsk->mm == current->mm)
592 return find_early_kill_thread(tsk);
596 * Collect processes when the error hit an anonymous page.
598 static void collect_procs_anon(struct folio *folio, struct page *page,
599 struct list_head *to_kill, int force_early)
601 struct vm_area_struct *vma;
602 struct task_struct *tsk;
606 av = folio_lock_anon_vma_read(folio, NULL);
607 if (av == NULL) /* Not actually mapped anymore */
610 pgoff = page_to_pgoff(page);
612 for_each_process(tsk) {
613 struct anon_vma_chain *vmac;
614 struct task_struct *t = task_early_kill(tsk, force_early);
618 anon_vma_interval_tree_foreach(vmac, &av->rb_root,
621 if (vma->vm_mm != t->mm)
623 if (!page_mapped_in_vma(page, vma))
625 add_to_kill_anon_file(t, page, vma, to_kill);
629 anon_vma_unlock_read(av);
633 * Collect processes when the error hit a file mapped page.
635 static void collect_procs_file(struct folio *folio, struct page *page,
636 struct list_head *to_kill, int force_early)
638 struct vm_area_struct *vma;
639 struct task_struct *tsk;
640 struct address_space *mapping = folio->mapping;
643 i_mmap_lock_read(mapping);
645 pgoff = page_to_pgoff(page);
646 for_each_process(tsk) {
647 struct task_struct *t = task_early_kill(tsk, force_early);
651 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff,
654 * Send early kill signal to tasks where a vma covers
655 * the page but the corrupted page is not necessarily
657 * Assume applications who requested early kill want
658 * to be informed of all such data corruptions.
660 if (vma->vm_mm == t->mm)
661 add_to_kill_anon_file(t, page, vma, to_kill);
665 i_mmap_unlock_read(mapping);
669 static void add_to_kill_fsdax(struct task_struct *tsk, struct page *p,
670 struct vm_area_struct *vma,
671 struct list_head *to_kill, pgoff_t pgoff)
673 __add_to_kill(tsk, p, vma, to_kill, 0, pgoff);
677 * Collect processes when the error hit a fsdax page.
679 static void collect_procs_fsdax(struct page *page,
680 struct address_space *mapping, pgoff_t pgoff,
681 struct list_head *to_kill, bool pre_remove)
683 struct vm_area_struct *vma;
684 struct task_struct *tsk;
686 i_mmap_lock_read(mapping);
688 for_each_process(tsk) {
689 struct task_struct *t = tsk;
692 * Search for all tasks while MF_MEM_PRE_REMOVE is set, because
693 * the current may not be the one accessing the fsdax page.
694 * Otherwise, search for the current task.
697 t = task_early_kill(tsk, true);
700 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
701 if (vma->vm_mm == t->mm)
702 add_to_kill_fsdax(t, page, vma, to_kill, pgoff);
706 i_mmap_unlock_read(mapping);
708 #endif /* CONFIG_FS_DAX */
711 * Collect the processes who have the corrupted page mapped to kill.
713 static void collect_procs(struct folio *folio, struct page *page,
714 struct list_head *tokill, int force_early)
718 if (unlikely(PageKsm(page)))
719 collect_procs_ksm(page, tokill, force_early);
720 else if (PageAnon(page))
721 collect_procs_anon(folio, page, tokill, force_early);
723 collect_procs_file(folio, page, tokill, force_early);
726 struct hwpoison_walk {
732 static void set_to_kill(struct to_kill *tk, unsigned long addr, short shift)
735 tk->size_shift = shift;
738 static int check_hwpoisoned_entry(pte_t pte, unsigned long addr, short shift,
739 unsigned long poisoned_pfn, struct to_kill *tk)
741 unsigned long pfn = 0;
743 if (pte_present(pte)) {
746 swp_entry_t swp = pte_to_swp_entry(pte);
748 if (is_hwpoison_entry(swp))
749 pfn = swp_offset_pfn(swp);
752 if (!pfn || pfn != poisoned_pfn)
755 set_to_kill(tk, addr, shift);
759 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
760 static int check_hwpoisoned_pmd_entry(pmd_t *pmdp, unsigned long addr,
761 struct hwpoison_walk *hwp)
765 unsigned long hwpoison_vaddr;
767 if (!pmd_present(pmd))
770 if (pfn <= hwp->pfn && hwp->pfn < pfn + HPAGE_PMD_NR) {
771 hwpoison_vaddr = addr + ((hwp->pfn - pfn) << PAGE_SHIFT);
772 set_to_kill(&hwp->tk, hwpoison_vaddr, PAGE_SHIFT);
778 static int check_hwpoisoned_pmd_entry(pmd_t *pmdp, unsigned long addr,
779 struct hwpoison_walk *hwp)
785 static int hwpoison_pte_range(pmd_t *pmdp, unsigned long addr,
786 unsigned long end, struct mm_walk *walk)
788 struct hwpoison_walk *hwp = walk->private;
790 pte_t *ptep, *mapped_pte;
793 ptl = pmd_trans_huge_lock(pmdp, walk->vma);
795 ret = check_hwpoisoned_pmd_entry(pmdp, addr, hwp);
800 mapped_pte = ptep = pte_offset_map_lock(walk->vma->vm_mm, pmdp,
805 for (; addr != end; ptep++, addr += PAGE_SIZE) {
806 ret = check_hwpoisoned_entry(ptep_get(ptep), addr, PAGE_SHIFT,
811 pte_unmap_unlock(mapped_pte, ptl);
817 #ifdef CONFIG_HUGETLB_PAGE
818 static int hwpoison_hugetlb_range(pte_t *ptep, unsigned long hmask,
819 unsigned long addr, unsigned long end,
820 struct mm_walk *walk)
822 struct hwpoison_walk *hwp = walk->private;
823 pte_t pte = huge_ptep_get(ptep);
824 struct hstate *h = hstate_vma(walk->vma);
826 return check_hwpoisoned_entry(pte, addr, huge_page_shift(h),
830 #define hwpoison_hugetlb_range NULL
833 static const struct mm_walk_ops hwpoison_walk_ops = {
834 .pmd_entry = hwpoison_pte_range,
835 .hugetlb_entry = hwpoison_hugetlb_range,
836 .walk_lock = PGWALK_RDLOCK,
840 * Sends SIGBUS to the current process with error info.
842 * This function is intended to handle "Action Required" MCEs on already
843 * hardware poisoned pages. They could happen, for example, when
844 * memory_failure() failed to unmap the error page at the first call, or
845 * when multiple local machine checks happened on different CPUs.
847 * MCE handler currently has no easy access to the error virtual address,
848 * so this function walks page table to find it. The returned virtual address
849 * is proper in most cases, but it could be wrong when the application
850 * process has multiple entries mapping the error page.
852 static int kill_accessing_process(struct task_struct *p, unsigned long pfn,
856 struct hwpoison_walk priv = {
864 mmap_read_lock(p->mm);
865 ret = walk_page_range(p->mm, 0, TASK_SIZE, &hwpoison_walk_ops,
867 if (ret == 1 && priv.tk.addr)
868 kill_proc(&priv.tk, pfn, flags);
871 mmap_read_unlock(p->mm);
872 return ret > 0 ? -EHWPOISON : -EFAULT;
875 static const char *action_name[] = {
876 [MF_IGNORED] = "Ignored",
877 [MF_FAILED] = "Failed",
878 [MF_DELAYED] = "Delayed",
879 [MF_RECOVERED] = "Recovered",
882 static const char * const action_page_types[] = {
883 [MF_MSG_KERNEL] = "reserved kernel page",
884 [MF_MSG_KERNEL_HIGH_ORDER] = "high-order kernel page",
885 [MF_MSG_SLAB] = "kernel slab page",
886 [MF_MSG_DIFFERENT_COMPOUND] = "different compound page after locking",
887 [MF_MSG_HUGE] = "huge page",
888 [MF_MSG_FREE_HUGE] = "free huge page",
889 [MF_MSG_UNMAP_FAILED] = "unmapping failed page",
890 [MF_MSG_DIRTY_SWAPCACHE] = "dirty swapcache page",
891 [MF_MSG_CLEAN_SWAPCACHE] = "clean swapcache page",
892 [MF_MSG_DIRTY_MLOCKED_LRU] = "dirty mlocked LRU page",
893 [MF_MSG_CLEAN_MLOCKED_LRU] = "clean mlocked LRU page",
894 [MF_MSG_DIRTY_UNEVICTABLE_LRU] = "dirty unevictable LRU page",
895 [MF_MSG_CLEAN_UNEVICTABLE_LRU] = "clean unevictable LRU page",
896 [MF_MSG_DIRTY_LRU] = "dirty LRU page",
897 [MF_MSG_CLEAN_LRU] = "clean LRU page",
898 [MF_MSG_TRUNCATED_LRU] = "already truncated LRU page",
899 [MF_MSG_BUDDY] = "free buddy page",
900 [MF_MSG_DAX] = "dax page",
901 [MF_MSG_UNSPLIT_THP] = "unsplit thp",
902 [MF_MSG_UNKNOWN] = "unknown page",
906 * XXX: It is possible that a page is isolated from LRU cache,
907 * and then kept in swap cache or failed to remove from page cache.
908 * The page count will stop it from being freed by unpoison.
909 * Stress tests should be aware of this memory leak problem.
911 static int delete_from_lru_cache(struct folio *folio)
913 if (folio_isolate_lru(folio)) {
915 * Clear sensible page flags, so that the buddy system won't
916 * complain when the folio is unpoison-and-freed.
918 folio_clear_active(folio);
919 folio_clear_unevictable(folio);
922 * Poisoned page might never drop its ref count to 0 so we have
923 * to uncharge it manually from its memcg.
925 mem_cgroup_uncharge(folio);
928 * drop the refcount elevated by folio_isolate_lru()
936 static int truncate_error_folio(struct folio *folio, unsigned long pfn,
937 struct address_space *mapping)
941 if (mapping->a_ops->error_remove_folio) {
942 int err = mapping->a_ops->error_remove_folio(mapping, folio);
945 pr_info("%#lx: Failed to punch page: %d\n", pfn, err);
946 else if (!filemap_release_folio(folio, GFP_NOIO))
947 pr_info("%#lx: failed to release buffers\n", pfn);
952 * If the file system doesn't support it just invalidate
953 * This fails on dirty or anything with private pages
955 if (mapping_evict_folio(mapping, folio))
958 pr_info("%#lx: Failed to invalidate\n", pfn);
967 enum mf_action_page_type type;
969 /* Callback ->action() has to unlock the relevant page inside it. */
970 int (*action)(struct page_state *ps, struct page *p);
974 * Return true if page is still referenced by others, otherwise return
977 * The extra_pins is true when one extra refcount is expected.
979 static bool has_extra_refcount(struct page_state *ps, struct page *p,
982 int count = page_count(p) - 1;
988 pr_err("%#lx: %s still referenced by %d users\n",
989 page_to_pfn(p), action_page_types[ps->type], count);
997 * Error hit kernel page.
998 * Do nothing, try to be lucky and not touch this instead. For a few cases we
999 * could be more sophisticated.
1001 static int me_kernel(struct page_state *ps, struct page *p)
1008 * Page in unknown state. Do nothing.
1010 static int me_unknown(struct page_state *ps, struct page *p)
1012 pr_err("%#lx: Unknown page state\n", page_to_pfn(p));
1018 * Clean (or cleaned) page cache page.
1020 static int me_pagecache_clean(struct page_state *ps, struct page *p)
1022 struct folio *folio = page_folio(p);
1024 struct address_space *mapping;
1027 delete_from_lru_cache(folio);
1030 * For anonymous folios the only reference left
1031 * should be the one m_f() holds.
1033 if (folio_test_anon(folio)) {
1039 * Now truncate the page in the page cache. This is really
1040 * more like a "temporary hole punch"
1041 * Don't do this for block devices when someone else
1042 * has a reference, because it could be file system metadata
1043 * and that's not safe to truncate.
1045 mapping = folio_mapping(folio);
1047 /* Folio has been torn down in the meantime */
1053 * The shmem page is kept in page cache instead of truncating
1054 * so is expected to have an extra refcount after error-handling.
1056 extra_pins = shmem_mapping(mapping);
1059 * Truncation is a bit tricky. Enable it per file system for now.
1061 * Open: to take i_rwsem or not for this? Right now we don't.
1063 ret = truncate_error_folio(folio, page_to_pfn(p), mapping);
1064 if (has_extra_refcount(ps, p, extra_pins))
1068 folio_unlock(folio);
1074 * Dirty pagecache page
1075 * Issues: when the error hit a hole page the error is not properly
1078 static int me_pagecache_dirty(struct page_state *ps, struct page *p)
1080 struct address_space *mapping = page_mapping(p);
1083 /* TBD: print more information about the file. */
1086 * IO error will be reported by write(), fsync(), etc.
1087 * who check the mapping.
1088 * This way the application knows that something went
1089 * wrong with its dirty file data.
1091 * There's one open issue:
1093 * The EIO will be only reported on the next IO
1094 * operation and then cleared through the IO map.
1095 * Normally Linux has two mechanisms to pass IO error
1096 * first through the AS_EIO flag in the address space
1097 * and then through the PageError flag in the page.
1098 * Since we drop pages on memory failure handling the
1099 * only mechanism open to use is through AS_AIO.
1101 * This has the disadvantage that it gets cleared on
1102 * the first operation that returns an error, while
1103 * the PageError bit is more sticky and only cleared
1104 * when the page is reread or dropped. If an
1105 * application assumes it will always get error on
1106 * fsync, but does other operations on the fd before
1107 * and the page is dropped between then the error
1108 * will not be properly reported.
1110 * This can already happen even without hwpoisoned
1111 * pages: first on metadata IO errors (which only
1112 * report through AS_EIO) or when the page is dropped
1113 * at the wrong time.
1115 * So right now we assume that the application DTRT on
1116 * the first EIO, but we're not worse than other parts
1119 mapping_set_error(mapping, -EIO);
1122 return me_pagecache_clean(ps, p);
1126 * Clean and dirty swap cache.
1128 * Dirty swap cache page is tricky to handle. The page could live both in page
1129 * cache and swap cache(ie. page is freshly swapped in). So it could be
1130 * referenced concurrently by 2 types of PTEs:
1131 * normal PTEs and swap PTEs. We try to handle them consistently by calling
1132 * try_to_unmap(!TTU_HWPOISON) to convert the normal PTEs to swap PTEs,
1134 * - clear dirty bit to prevent IO
1136 * - but keep in the swap cache, so that when we return to it on
1137 * a later page fault, we know the application is accessing
1138 * corrupted data and shall be killed (we installed simple
1139 * interception code in do_swap_page to catch it).
1141 * Clean swap cache pages can be directly isolated. A later page fault will
1142 * bring in the known good data from disk.
1144 static int me_swapcache_dirty(struct page_state *ps, struct page *p)
1146 struct folio *folio = page_folio(p);
1148 bool extra_pins = false;
1150 folio_clear_dirty(folio);
1151 /* Trigger EIO in shmem: */
1152 folio_clear_uptodate(folio);
1154 ret = delete_from_lru_cache(folio) ? MF_FAILED : MF_DELAYED;
1155 folio_unlock(folio);
1157 if (ret == MF_DELAYED)
1160 if (has_extra_refcount(ps, p, extra_pins))
1166 static int me_swapcache_clean(struct page_state *ps, struct page *p)
1168 struct folio *folio = page_folio(p);
1171 delete_from_swap_cache(folio);
1173 ret = delete_from_lru_cache(folio) ? MF_FAILED : MF_RECOVERED;
1174 folio_unlock(folio);
1176 if (has_extra_refcount(ps, p, false))
1183 * Huge pages. Needs work.
1185 * - Error on hugepage is contained in hugepage unit (not in raw page unit.)
1186 * To narrow down kill region to one page, we need to break up pmd.
1188 static int me_huge_page(struct page_state *ps, struct page *p)
1190 struct folio *folio = page_folio(p);
1192 struct address_space *mapping;
1193 bool extra_pins = false;
1195 mapping = folio_mapping(folio);
1197 res = truncate_error_folio(folio, page_to_pfn(p), mapping);
1198 /* The page is kept in page cache. */
1200 folio_unlock(folio);
1202 folio_unlock(folio);
1204 * migration entry prevents later access on error hugepage,
1205 * so we can free and dissolve it into buddy to save healthy
1209 if (__page_handle_poison(p) >= 0) {
1217 if (has_extra_refcount(ps, p, extra_pins))
1224 * Various page states we can handle.
1226 * A page state is defined by its current page->flags bits.
1227 * The table matches them in order and calls the right handler.
1229 * This is quite tricky because we can access page at any time
1230 * in its live cycle, so all accesses have to be extremely careful.
1232 * This is not complete. More states could be added.
1233 * For any missing state don't attempt recovery.
1236 #define dirty (1UL << PG_dirty)
1237 #define sc ((1UL << PG_swapcache) | (1UL << PG_swapbacked))
1238 #define unevict (1UL << PG_unevictable)
1239 #define mlock (1UL << PG_mlocked)
1240 #define lru (1UL << PG_lru)
1241 #define head (1UL << PG_head)
1242 #define slab (1UL << PG_slab)
1243 #define reserved (1UL << PG_reserved)
1245 static struct page_state error_states[] = {
1246 { reserved, reserved, MF_MSG_KERNEL, me_kernel },
1248 * free pages are specially detected outside this table:
1249 * PG_buddy pages only make a small fraction of all free pages.
1253 * Could in theory check if slab page is free or if we can drop
1254 * currently unused objects without touching them. But just
1255 * treat it as standard kernel for now.
1257 { slab, slab, MF_MSG_SLAB, me_kernel },
1259 { head, head, MF_MSG_HUGE, me_huge_page },
1261 { sc|dirty, sc|dirty, MF_MSG_DIRTY_SWAPCACHE, me_swapcache_dirty },
1262 { sc|dirty, sc, MF_MSG_CLEAN_SWAPCACHE, me_swapcache_clean },
1264 { mlock|dirty, mlock|dirty, MF_MSG_DIRTY_MLOCKED_LRU, me_pagecache_dirty },
1265 { mlock|dirty, mlock, MF_MSG_CLEAN_MLOCKED_LRU, me_pagecache_clean },
1267 { unevict|dirty, unevict|dirty, MF_MSG_DIRTY_UNEVICTABLE_LRU, me_pagecache_dirty },
1268 { unevict|dirty, unevict, MF_MSG_CLEAN_UNEVICTABLE_LRU, me_pagecache_clean },
1270 { lru|dirty, lru|dirty, MF_MSG_DIRTY_LRU, me_pagecache_dirty },
1271 { lru|dirty, lru, MF_MSG_CLEAN_LRU, me_pagecache_clean },
1274 * Catchall entry: must be at end.
1276 { 0, 0, MF_MSG_UNKNOWN, me_unknown },
1288 static void update_per_node_mf_stats(unsigned long pfn,
1289 enum mf_result result)
1291 int nid = MAX_NUMNODES;
1292 struct memory_failure_stats *mf_stats = NULL;
1294 nid = pfn_to_nid(pfn);
1295 if (unlikely(nid < 0 || nid >= MAX_NUMNODES)) {
1296 WARN_ONCE(1, "Memory failure: pfn=%#lx, invalid nid=%d", pfn, nid);
1300 mf_stats = &NODE_DATA(nid)->mf_stats;
1303 ++mf_stats->ignored;
1309 ++mf_stats->delayed;
1312 ++mf_stats->recovered;
1315 WARN_ONCE(1, "Memory failure: mf_result=%d is not properly handled", result);
1322 * "Dirty/Clean" indication is not 100% accurate due to the possibility of
1323 * setting PG_dirty outside page lock. See also comment above set_page_dirty().
1325 static int action_result(unsigned long pfn, enum mf_action_page_type type,
1326 enum mf_result result)
1328 trace_memory_failure_event(pfn, type, result);
1330 num_poisoned_pages_inc(pfn);
1332 update_per_node_mf_stats(pfn, result);
1334 pr_err("%#lx: recovery action for %s: %s\n",
1335 pfn, action_page_types[type], action_name[result]);
1337 return (result == MF_RECOVERED || result == MF_DELAYED) ? 0 : -EBUSY;
1340 static int page_action(struct page_state *ps, struct page *p,
1345 /* page p should be unlocked after returning from ps->action(). */
1346 result = ps->action(ps, p);
1348 /* Could do more checks here if page looks ok */
1350 * Could adjust zone counters here to correct for the missing page.
1353 return action_result(pfn, ps->type, result);
1356 static inline bool PageHWPoisonTakenOff(struct page *page)
1358 return PageHWPoison(page) && page_private(page) == MAGIC_HWPOISON;
1361 void SetPageHWPoisonTakenOff(struct page *page)
1363 set_page_private(page, MAGIC_HWPOISON);
1366 void ClearPageHWPoisonTakenOff(struct page *page)
1368 if (PageHWPoison(page))
1369 set_page_private(page, 0);
1373 * Return true if a page type of a given page is supported by hwpoison
1374 * mechanism (while handling could fail), otherwise false. This function
1375 * does not return true for hugetlb or device memory pages, so it's assumed
1376 * to be called only in the context where we never have such pages.
1378 static inline bool HWPoisonHandlable(struct page *page, unsigned long flags)
1380 /* Soft offline could migrate non-LRU movable pages */
1381 if ((flags & MF_SOFT_OFFLINE) && __PageMovable(page))
1384 return PageLRU(page) || is_free_buddy_page(page);
1387 static int __get_hwpoison_page(struct page *page, unsigned long flags)
1389 struct folio *folio = page_folio(page);
1391 bool hugetlb = false;
1393 ret = get_hwpoison_hugetlb_folio(folio, &hugetlb, false);
1395 /* Make sure hugetlb demotion did not happen from under us. */
1396 if (folio == page_folio(page))
1400 folio = page_folio(page);
1405 * This check prevents from calling folio_try_get() for any
1406 * unsupported type of folio in order to reduce the risk of unexpected
1407 * races caused by taking a folio refcount.
1409 if (!HWPoisonHandlable(&folio->page, flags))
1412 if (folio_try_get(folio)) {
1413 if (folio == page_folio(page))
1416 pr_info("%#lx cannot catch tail\n", page_to_pfn(page));
1423 static int get_any_page(struct page *p, unsigned long flags)
1425 int ret = 0, pass = 0;
1426 bool count_increased = false;
1428 if (flags & MF_COUNT_INCREASED)
1429 count_increased = true;
1432 if (!count_increased) {
1433 ret = __get_hwpoison_page(p, flags);
1435 if (page_count(p)) {
1436 /* We raced with an allocation, retry. */
1440 } else if (!PageHuge(p) && !is_free_buddy_page(p)) {
1441 /* We raced with put_page, retry. */
1447 } else if (ret == -EBUSY) {
1449 * We raced with (possibly temporary) unhandlable
1461 if (PageHuge(p) || HWPoisonHandlable(p, flags)) {
1465 * A page we cannot handle. Check whether we can turn
1466 * it into something we can handle.
1471 count_increased = false;
1479 pr_err("%#lx: unhandlable page.\n", page_to_pfn(p));
1484 static int __get_unpoison_page(struct page *page)
1486 struct folio *folio = page_folio(page);
1488 bool hugetlb = false;
1490 ret = get_hwpoison_hugetlb_folio(folio, &hugetlb, true);
1492 /* Make sure hugetlb demotion did not happen from under us. */
1493 if (folio == page_folio(page))
1500 * PageHWPoisonTakenOff pages are not only marked as PG_hwpoison,
1501 * but also isolated from buddy freelist, so need to identify the
1502 * state and have to cancel both operations to unpoison.
1504 if (PageHWPoisonTakenOff(page))
1507 return get_page_unless_zero(page) ? 1 : 0;
1511 * get_hwpoison_page() - Get refcount for memory error handling
1512 * @p: Raw error page (hit by memory error)
1513 * @flags: Flags controlling behavior of error handling
1515 * get_hwpoison_page() takes a page refcount of an error page to handle memory
1516 * error on it, after checking that the error page is in a well-defined state
1517 * (defined as a page-type we can successfully handle the memory error on it,
1518 * such as LRU page and hugetlb page).
1520 * Memory error handling could be triggered at any time on any type of page,
1521 * so it's prone to race with typical memory management lifecycle (like
1522 * allocation and free). So to avoid such races, get_hwpoison_page() takes
1523 * extra care for the error page's state (as done in __get_hwpoison_page()),
1524 * and has some retry logic in get_any_page().
1526 * When called from unpoison_memory(), the caller should already ensure that
1527 * the given page has PG_hwpoison. So it's never reused for other page
1528 * allocations, and __get_unpoison_page() never races with them.
1530 * Return: 0 on failure,
1531 * 1 on success for in-use pages in a well-defined state,
1532 * -EIO for pages on which we can not handle memory errors,
1533 * -EBUSY when get_hwpoison_page() has raced with page lifecycle
1534 * operations like allocation and free,
1535 * -EHWPOISON when the page is hwpoisoned and taken off from buddy.
1537 static int get_hwpoison_page(struct page *p, unsigned long flags)
1541 zone_pcp_disable(page_zone(p));
1542 if (flags & MF_UNPOISON)
1543 ret = __get_unpoison_page(p);
1545 ret = get_any_page(p, flags);
1546 zone_pcp_enable(page_zone(p));
1552 * Do all that is necessary to remove user space mappings. Unmap
1553 * the pages and send SIGBUS to the processes if the data was dirty.
1555 static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
1556 int flags, struct page *hpage)
1558 struct folio *folio = page_folio(hpage);
1559 enum ttu_flags ttu = TTU_IGNORE_MLOCK | TTU_SYNC | TTU_HWPOISON;
1560 struct address_space *mapping;
1564 bool mlocked = PageMlocked(hpage);
1567 * Here we are interested only in user-mapped pages, so skip any
1568 * other types of pages.
1570 if (PageReserved(p) || PageSlab(p) || PageTable(p) || PageOffline(p))
1572 if (!(PageLRU(hpage) || PageHuge(p)))
1576 * This check implies we don't kill processes if their pages
1577 * are in the swap cache early. Those are always late kills.
1579 if (!page_mapped(p))
1582 if (PageSwapCache(p)) {
1583 pr_err("%#lx: keeping poisoned page in swap cache\n", pfn);
1584 ttu &= ~TTU_HWPOISON;
1588 * Propagate the dirty bit from PTEs to struct page first, because we
1589 * need this to decide if we should kill or just drop the page.
1590 * XXX: the dirty test could be racy: set_page_dirty() may not always
1591 * be called inside page lock (it's recommended but not enforced).
1593 mapping = page_mapping(hpage);
1594 if (!(flags & MF_MUST_KILL) && !PageDirty(hpage) && mapping &&
1595 mapping_can_writeback(mapping)) {
1596 if (page_mkclean(hpage)) {
1597 SetPageDirty(hpage);
1599 ttu &= ~TTU_HWPOISON;
1600 pr_info("%#lx: corrupted page was clean: dropped without side effects\n",
1606 * First collect all the processes that have the page
1607 * mapped in dirty form. This has to be done before try_to_unmap,
1608 * because ttu takes the rmap data structures down.
1610 collect_procs(folio, p, &tokill, flags & MF_ACTION_REQUIRED);
1612 if (PageHuge(hpage) && !PageAnon(hpage)) {
1614 * For hugetlb pages in shared mappings, try_to_unmap
1615 * could potentially call huge_pmd_unshare. Because of
1616 * this, take semaphore in write mode here and set
1617 * TTU_RMAP_LOCKED to indicate we have taken the lock
1618 * at this higher level.
1620 mapping = hugetlb_page_mapping_lock_write(hpage);
1622 try_to_unmap(folio, ttu|TTU_RMAP_LOCKED);
1623 i_mmap_unlock_write(mapping);
1625 pr_info("%#lx: could not lock mapping for mapped huge page\n", pfn);
1627 try_to_unmap(folio, ttu);
1630 unmap_success = !page_mapped(p);
1632 pr_err("%#lx: failed to unmap page (mapcount=%d)\n",
1633 pfn, page_mapcount(p));
1636 * try_to_unmap() might put mlocked page in lru cache, so call
1637 * shake_page() again to ensure that it's flushed.
1643 * Now that the dirty bit has been propagated to the
1644 * struct page and all unmaps done we can decide if
1645 * killing is needed or not. Only kill when the page
1646 * was dirty or the process is not restartable,
1647 * otherwise the tokill list is merely
1648 * freed. When there was a problem unmapping earlier
1649 * use a more force-full uncatchable kill to prevent
1650 * any accesses to the poisoned memory.
1652 forcekill = PageDirty(hpage) || (flags & MF_MUST_KILL) ||
1654 kill_procs(&tokill, forcekill, !unmap_success, pfn, flags);
1656 return unmap_success;
1659 static int identify_page_state(unsigned long pfn, struct page *p,
1660 unsigned long page_flags)
1662 struct page_state *ps;
1665 * The first check uses the current page flags which may not have any
1666 * relevant information. The second check with the saved page flags is
1667 * carried out only if the first check can't determine the page status.
1669 for (ps = error_states;; ps++)
1670 if ((p->flags & ps->mask) == ps->res)
1673 page_flags |= (p->flags & (1UL << PG_dirty));
1676 for (ps = error_states;; ps++)
1677 if ((page_flags & ps->mask) == ps->res)
1679 return page_action(ps, p, pfn);
1682 static int try_to_split_thp_page(struct page *page)
1687 ret = split_huge_page(page);
1696 static void unmap_and_kill(struct list_head *to_kill, unsigned long pfn,
1697 struct address_space *mapping, pgoff_t index, int flags)
1700 unsigned long size = 0;
1702 list_for_each_entry(tk, to_kill, nd)
1704 size = max(size, 1UL << tk->size_shift);
1708 * Unmap the largest mapping to avoid breaking up device-dax
1709 * mappings which are constant size. The actual size of the
1710 * mapping being torn down is communicated in siginfo, see
1713 loff_t start = ((loff_t)index << PAGE_SHIFT) & ~(size - 1);
1715 unmap_mapping_range(mapping, start, size, 0);
1718 kill_procs(to_kill, flags & MF_MUST_KILL, false, pfn, flags);
1722 * Only dev_pagemap pages get here, such as fsdax when the filesystem
1723 * either do not claim or fails to claim a hwpoison event, or devdax.
1724 * The fsdax pages are initialized per base page, and the devdax pages
1725 * could be initialized either as base pages, or as compound pages with
1726 * vmemmap optimization enabled. Devdax is simplistic in its dealing with
1727 * hwpoison, such that, if a subpage of a compound page is poisoned,
1728 * simply mark the compound head page is by far sufficient.
1730 static int mf_generic_kill_procs(unsigned long long pfn, int flags,
1731 struct dev_pagemap *pgmap)
1733 struct folio *folio = pfn_folio(pfn);
1739 * Prevent the inode from being freed while we are interrogating
1740 * the address_space, typically this would be handled by
1741 * lock_page(), but dax pages do not use the page lock. This
1742 * also prevents changes to the mapping of this pfn until
1743 * poison signaling is complete.
1745 cookie = dax_lock_folio(folio);
1749 if (hwpoison_filter(&folio->page)) {
1754 switch (pgmap->type) {
1755 case MEMORY_DEVICE_PRIVATE:
1756 case MEMORY_DEVICE_COHERENT:
1758 * TODO: Handle device pages which may need coordination
1759 * with device-side memory.
1768 * Use this flag as an indication that the dax page has been
1769 * remapped UC to prevent speculative consumption of poison.
1771 SetPageHWPoison(&folio->page);
1774 * Unlike System-RAM there is no possibility to swap in a
1775 * different physical page at a given virtual address, so all
1776 * userspace consumption of ZONE_DEVICE memory necessitates
1777 * SIGBUS (i.e. MF_MUST_KILL)
1779 flags |= MF_ACTION_REQUIRED | MF_MUST_KILL;
1780 collect_procs(folio, &folio->page, &to_kill, true);
1782 unmap_and_kill(&to_kill, pfn, folio->mapping, folio->index, flags);
1784 dax_unlock_folio(folio, cookie);
1788 #ifdef CONFIG_FS_DAX
1790 * mf_dax_kill_procs - Collect and kill processes who are using this file range
1791 * @mapping: address_space of the file in use
1792 * @index: start pgoff of the range within the file
1793 * @count: length of the range, in unit of PAGE_SIZE
1794 * @mf_flags: memory failure flags
1796 int mf_dax_kill_procs(struct address_space *mapping, pgoff_t index,
1797 unsigned long count, int mf_flags)
1802 size_t end = index + count;
1803 bool pre_remove = mf_flags & MF_MEM_PRE_REMOVE;
1805 mf_flags |= MF_ACTION_REQUIRED | MF_MUST_KILL;
1807 for (; index < end; index++) {
1809 cookie = dax_lock_mapping_entry(mapping, index, &page);
1816 SetPageHWPoison(page);
1819 * The pre_remove case is revoking access, the memory is still
1820 * good and could theoretically be put back into service.
1822 collect_procs_fsdax(page, mapping, index, &to_kill, pre_remove);
1823 unmap_and_kill(&to_kill, page_to_pfn(page), mapping,
1826 dax_unlock_mapping_entry(mapping, index, cookie);
1830 EXPORT_SYMBOL_GPL(mf_dax_kill_procs);
1831 #endif /* CONFIG_FS_DAX */
1833 #ifdef CONFIG_HUGETLB_PAGE
1836 * Struct raw_hwp_page represents information about "raw error page",
1837 * constructing singly linked list from ->_hugetlb_hwpoison field of folio.
1839 struct raw_hwp_page {
1840 struct llist_node node;
1844 static inline struct llist_head *raw_hwp_list_head(struct folio *folio)
1846 return (struct llist_head *)&folio->_hugetlb_hwpoison;
1849 bool is_raw_hwpoison_page_in_hugepage(struct page *page)
1851 struct llist_head *raw_hwp_head;
1852 struct raw_hwp_page *p;
1853 struct folio *folio = page_folio(page);
1856 if (!folio_test_hwpoison(folio))
1859 if (!folio_test_hugetlb(folio))
1860 return PageHWPoison(page);
1863 * When RawHwpUnreliable is set, kernel lost track of which subpages
1864 * are HWPOISON. So return as if ALL subpages are HWPOISONed.
1866 if (folio_test_hugetlb_raw_hwp_unreliable(folio))
1869 mutex_lock(&mf_mutex);
1871 raw_hwp_head = raw_hwp_list_head(folio);
1872 llist_for_each_entry(p, raw_hwp_head->first, node) {
1873 if (page == p->page) {
1879 mutex_unlock(&mf_mutex);
1884 static unsigned long __folio_free_raw_hwp(struct folio *folio, bool move_flag)
1886 struct llist_node *head;
1887 struct raw_hwp_page *p, *next;
1888 unsigned long count = 0;
1890 head = llist_del_all(raw_hwp_list_head(folio));
1891 llist_for_each_entry_safe(p, next, head, node) {
1893 SetPageHWPoison(p->page);
1895 num_poisoned_pages_sub(page_to_pfn(p->page), 1);
1902 static int folio_set_hugetlb_hwpoison(struct folio *folio, struct page *page)
1904 struct llist_head *head;
1905 struct raw_hwp_page *raw_hwp;
1906 struct raw_hwp_page *p, *next;
1907 int ret = folio_test_set_hwpoison(folio) ? -EHWPOISON : 0;
1910 * Once the hwpoison hugepage has lost reliable raw error info,
1911 * there is little meaning to keep additional error info precisely,
1912 * so skip to add additional raw error info.
1914 if (folio_test_hugetlb_raw_hwp_unreliable(folio))
1916 head = raw_hwp_list_head(folio);
1917 llist_for_each_entry_safe(p, next, head->first, node) {
1918 if (p->page == page)
1922 raw_hwp = kmalloc(sizeof(struct raw_hwp_page), GFP_ATOMIC);
1924 raw_hwp->page = page;
1925 llist_add(&raw_hwp->node, head);
1926 /* the first error event will be counted in action_result(). */
1928 num_poisoned_pages_inc(page_to_pfn(page));
1931 * Failed to save raw error info. We no longer trace all
1932 * hwpoisoned subpages, and we need refuse to free/dissolve
1933 * this hwpoisoned hugepage.
1935 folio_set_hugetlb_raw_hwp_unreliable(folio);
1937 * Once hugetlb_raw_hwp_unreliable is set, raw_hwp_page is not
1938 * used any more, so free it.
1940 __folio_free_raw_hwp(folio, false);
1945 static unsigned long folio_free_raw_hwp(struct folio *folio, bool move_flag)
1948 * hugetlb_vmemmap_optimized hugepages can't be freed because struct
1949 * pages for tail pages are required but they don't exist.
1951 if (move_flag && folio_test_hugetlb_vmemmap_optimized(folio))
1955 * hugetlb_raw_hwp_unreliable hugepages shouldn't be unpoisoned by
1958 if (folio_test_hugetlb_raw_hwp_unreliable(folio))
1961 return __folio_free_raw_hwp(folio, move_flag);
1964 void folio_clear_hugetlb_hwpoison(struct folio *folio)
1966 if (folio_test_hugetlb_raw_hwp_unreliable(folio))
1968 if (folio_test_hugetlb_vmemmap_optimized(folio))
1970 folio_clear_hwpoison(folio);
1971 folio_free_raw_hwp(folio, true);
1975 * Called from hugetlb code with hugetlb_lock held.
1979 * 1 - in-use hugepage
1980 * 2 - not a hugepage
1981 * -EBUSY - the hugepage is busy (try to retry)
1982 * -EHWPOISON - the hugepage is already hwpoisoned
1984 int __get_huge_page_for_hwpoison(unsigned long pfn, int flags,
1985 bool *migratable_cleared)
1987 struct page *page = pfn_to_page(pfn);
1988 struct folio *folio = page_folio(page);
1989 int ret = 2; /* fallback to normal page handling */
1990 bool count_increased = false;
1992 if (!folio_test_hugetlb(folio))
1995 if (flags & MF_COUNT_INCREASED) {
1997 count_increased = true;
1998 } else if (folio_test_hugetlb_freed(folio)) {
2000 } else if (folio_test_hugetlb_migratable(folio)) {
2001 ret = folio_try_get(folio);
2003 count_increased = true;
2006 if (!(flags & MF_NO_RETRY))
2010 if (folio_set_hugetlb_hwpoison(folio, page)) {
2016 * Clearing hugetlb_migratable for hwpoisoned hugepages to prevent them
2017 * from being migrated by memory hotremove.
2019 if (count_increased && folio_test_hugetlb_migratable(folio)) {
2020 folio_clear_hugetlb_migratable(folio);
2021 *migratable_cleared = true;
2026 if (count_increased)
2032 * Taking refcount of hugetlb pages needs extra care about race conditions
2033 * with basic operations like hugepage allocation/free/demotion.
2034 * So some of prechecks for hwpoison (pinning, and testing/setting
2035 * PageHWPoison) should be done in single hugetlb_lock range.
2037 static int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb)
2040 struct page *p = pfn_to_page(pfn);
2041 struct folio *folio;
2042 unsigned long page_flags;
2043 bool migratable_cleared = false;
2047 res = get_huge_page_for_hwpoison(pfn, flags, &migratable_cleared);
2048 if (res == 2) { /* fallback to normal page handling */
2051 } else if (res == -EHWPOISON) {
2052 pr_err("%#lx: already hardware poisoned\n", pfn);
2053 if (flags & MF_ACTION_REQUIRED) {
2054 folio = page_folio(p);
2055 res = kill_accessing_process(current, folio_pfn(folio), flags);
2058 } else if (res == -EBUSY) {
2059 if (!(flags & MF_NO_RETRY)) {
2060 flags |= MF_NO_RETRY;
2063 return action_result(pfn, MF_MSG_UNKNOWN, MF_IGNORED);
2066 folio = page_folio(p);
2069 if (hwpoison_filter(p)) {
2070 folio_clear_hugetlb_hwpoison(folio);
2071 if (migratable_cleared)
2072 folio_set_hugetlb_migratable(folio);
2073 folio_unlock(folio);
2080 * Handling free hugepage. The possible race with hugepage allocation
2081 * or demotion can be prevented by PageHWPoison flag.
2084 folio_unlock(folio);
2085 if (__page_handle_poison(p) >= 0) {
2091 return action_result(pfn, MF_MSG_FREE_HUGE, res);
2094 page_flags = folio->flags;
2096 if (!hwpoison_user_mappings(p, pfn, flags, &folio->page)) {
2097 folio_unlock(folio);
2098 return action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED);
2101 return identify_page_state(pfn, p, page_flags);
2105 static inline int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb)
2110 static inline unsigned long folio_free_raw_hwp(struct folio *folio, bool flag)
2114 #endif /* CONFIG_HUGETLB_PAGE */
2116 /* Drop the extra refcount in case we come from madvise() */
2117 static void put_ref_page(unsigned long pfn, int flags)
2121 if (!(flags & MF_COUNT_INCREASED))
2124 page = pfn_to_page(pfn);
2129 static int memory_failure_dev_pagemap(unsigned long pfn, int flags,
2130 struct dev_pagemap *pgmap)
2134 /* device metadata space is not recoverable */
2135 if (!pgmap_pfn_valid(pgmap, pfn))
2139 * Call driver's implementation to handle the memory failure, otherwise
2140 * fall back to generic handler.
2142 if (pgmap_has_memory_failure(pgmap)) {
2143 rc = pgmap->ops->memory_failure(pgmap, pfn, 1, flags);
2145 * Fall back to generic handler too if operation is not
2146 * supported inside the driver/device/filesystem.
2148 if (rc != -EOPNOTSUPP)
2152 rc = mf_generic_kill_procs(pfn, flags, pgmap);
2154 /* drop pgmap ref acquired in caller */
2155 put_dev_pagemap(pgmap);
2156 if (rc != -EOPNOTSUPP)
2157 action_result(pfn, MF_MSG_DAX, rc ? MF_FAILED : MF_RECOVERED);
2162 * memory_failure - Handle memory failure of a page.
2163 * @pfn: Page Number of the corrupted page
2164 * @flags: fine tune action taken
2166 * This function is called by the low level machine check code
2167 * of an architecture when it detects hardware memory corruption
2168 * of a page. It tries its best to recover, which includes
2169 * dropping pages, killing processes etc.
2171 * The function is primarily of use for corruptions that
2172 * happen outside the current execution context (e.g. when
2173 * detected by a background scrubber)
2175 * Must run in process context (e.g. a work queue) with interrupts
2176 * enabled and no spinlocks held.
2178 * Return: 0 for successfully handled the memory error,
2179 * -EOPNOTSUPP for hwpoison_filter() filtered the error event,
2180 * < 0(except -EOPNOTSUPP) on failure.
2182 int memory_failure(unsigned long pfn, int flags)
2186 struct dev_pagemap *pgmap;
2188 unsigned long page_flags;
2192 if (!sysctl_memory_failure_recovery)
2193 panic("Memory failure on page %lx", pfn);
2195 mutex_lock(&mf_mutex);
2197 if (!(flags & MF_SW_SIMULATED))
2198 hw_memory_failure = true;
2200 p = pfn_to_online_page(pfn);
2202 res = arch_memory_failure(pfn, flags);
2206 if (pfn_valid(pfn)) {
2207 pgmap = get_dev_pagemap(pfn, NULL);
2208 put_ref_page(pfn, flags);
2210 res = memory_failure_dev_pagemap(pfn, flags,
2215 pr_err("%#lx: memory outside kernel control\n", pfn);
2221 res = try_memory_failure_hugetlb(pfn, flags, &hugetlb);
2225 if (TestSetPageHWPoison(p)) {
2226 pr_err("%#lx: already hardware poisoned\n", pfn);
2228 if (flags & MF_ACTION_REQUIRED)
2229 res = kill_accessing_process(current, pfn, flags);
2230 if (flags & MF_COUNT_INCREASED)
2236 * We need/can do nothing about count=0 pages.
2237 * 1) it's a free page, and therefore in safe hand:
2238 * check_new_page() will be the gate keeper.
2239 * 2) it's part of a non-compound high order page.
2240 * Implies some kernel user: cannot stop them from
2241 * R/W the page; let's pray that the page has been
2242 * used and will be freed some time later.
2243 * In fact it's dangerous to directly bump up page count from 0,
2244 * that may make page_ref_freeze()/page_ref_unfreeze() mismatch.
2246 if (!(flags & MF_COUNT_INCREASED)) {
2247 res = get_hwpoison_page(p, flags);
2249 if (is_free_buddy_page(p)) {
2250 if (take_page_off_buddy(p)) {
2254 /* We lost the race, try again */
2256 ClearPageHWPoison(p);
2262 res = action_result(pfn, MF_MSG_BUDDY, res);
2264 res = action_result(pfn, MF_MSG_KERNEL_HIGH_ORDER, MF_IGNORED);
2267 } else if (res < 0) {
2268 res = action_result(pfn, MF_MSG_UNKNOWN, MF_IGNORED);
2273 hpage = compound_head(p);
2274 if (PageTransHuge(hpage)) {
2276 * The flag must be set after the refcount is bumped
2277 * otherwise it may race with THP split.
2278 * And the flag can't be set in get_hwpoison_page() since
2279 * it is called by soft offline too and it is just called
2280 * for !MF_COUNT_INCREASED. So here seems to be the best
2283 * Don't need care about the above error handling paths for
2284 * get_hwpoison_page() since they handle either free page
2285 * or unhandlable page. The refcount is bumped iff the
2286 * page is a valid handlable page.
2288 SetPageHasHWPoisoned(hpage);
2289 if (try_to_split_thp_page(p) < 0) {
2290 res = action_result(pfn, MF_MSG_UNSPLIT_THP, MF_IGNORED);
2293 VM_BUG_ON_PAGE(!page_count(p), p);
2297 * We ignore non-LRU pages for good reasons.
2298 * - PG_locked is only well defined for LRU pages and a few others
2299 * - to avoid races with __SetPageLocked()
2300 * - to avoid races with __SetPageSlab*() (and more non-atomic ops)
2301 * The check (unnecessarily) ignores LRU pages being isolated and
2302 * walked by the page reclaim code, however that's not a big loss.
2309 * We're only intended to deal with the non-Compound page here.
2310 * However, the page could have changed compound pages due to
2311 * race window. If this happens, we could try again to hopefully
2312 * handle the page next round.
2314 if (PageCompound(p)) {
2316 ClearPageHWPoison(p);
2319 flags &= ~MF_COUNT_INCREASED;
2323 res = action_result(pfn, MF_MSG_DIFFERENT_COMPOUND, MF_IGNORED);
2328 * We use page flags to determine what action should be taken, but
2329 * the flags can be modified by the error containment action. One
2330 * example is an mlocked page, where PG_mlocked is cleared by
2331 * folio_remove_rmap_*() in try_to_unmap_one(). So to determine page
2332 * status correctly, we save a copy of the page flags at this time.
2334 page_flags = p->flags;
2336 if (hwpoison_filter(p)) {
2337 ClearPageHWPoison(p);
2345 * __munlock_folio() may clear a writeback page's LRU flag without
2346 * page_lock. We need wait writeback completion for this page or it
2347 * may trigger vfs BUG while evict inode.
2349 if (!PageLRU(p) && !PageWriteback(p))
2350 goto identify_page_state;
2353 * It's very difficult to mess with pages currently under IO
2354 * and in many cases impossible, so we just avoid it here.
2356 wait_on_page_writeback(p);
2359 * Now take care of user space mappings.
2360 * Abort on fail: __filemap_remove_folio() assumes unmapped page.
2362 if (!hwpoison_user_mappings(p, pfn, flags, p)) {
2363 res = action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED);
2368 * Torn down by someone else?
2370 if (PageLRU(p) && !PageSwapCache(p) && p->mapping == NULL) {
2371 res = action_result(pfn, MF_MSG_TRUNCATED_LRU, MF_IGNORED);
2375 identify_page_state:
2376 res = identify_page_state(pfn, p, page_flags);
2377 mutex_unlock(&mf_mutex);
2382 mutex_unlock(&mf_mutex);
2385 EXPORT_SYMBOL_GPL(memory_failure);
2387 #define MEMORY_FAILURE_FIFO_ORDER 4
2388 #define MEMORY_FAILURE_FIFO_SIZE (1 << MEMORY_FAILURE_FIFO_ORDER)
2390 struct memory_failure_entry {
2395 struct memory_failure_cpu {
2396 DECLARE_KFIFO(fifo, struct memory_failure_entry,
2397 MEMORY_FAILURE_FIFO_SIZE);
2399 struct work_struct work;
2402 static DEFINE_PER_CPU(struct memory_failure_cpu, memory_failure_cpu);
2405 * memory_failure_queue - Schedule handling memory failure of a page.
2406 * @pfn: Page Number of the corrupted page
2407 * @flags: Flags for memory failure handling
2409 * This function is called by the low level hardware error handler
2410 * when it detects hardware memory corruption of a page. It schedules
2411 * the recovering of error page, including dropping pages, killing
2414 * The function is primarily of use for corruptions that
2415 * happen outside the current execution context (e.g. when
2416 * detected by a background scrubber)
2418 * Can run in IRQ context.
2420 void memory_failure_queue(unsigned long pfn, int flags)
2422 struct memory_failure_cpu *mf_cpu;
2423 unsigned long proc_flags;
2424 struct memory_failure_entry entry = {
2429 mf_cpu = &get_cpu_var(memory_failure_cpu);
2430 spin_lock_irqsave(&mf_cpu->lock, proc_flags);
2431 if (kfifo_put(&mf_cpu->fifo, entry))
2432 schedule_work_on(smp_processor_id(), &mf_cpu->work);
2434 pr_err("buffer overflow when queuing memory failure at %#lx\n",
2436 spin_unlock_irqrestore(&mf_cpu->lock, proc_flags);
2437 put_cpu_var(memory_failure_cpu);
2439 EXPORT_SYMBOL_GPL(memory_failure_queue);
2441 static void memory_failure_work_func(struct work_struct *work)
2443 struct memory_failure_cpu *mf_cpu;
2444 struct memory_failure_entry entry = { 0, };
2445 unsigned long proc_flags;
2448 mf_cpu = container_of(work, struct memory_failure_cpu, work);
2450 spin_lock_irqsave(&mf_cpu->lock, proc_flags);
2451 gotten = kfifo_get(&mf_cpu->fifo, &entry);
2452 spin_unlock_irqrestore(&mf_cpu->lock, proc_flags);
2455 if (entry.flags & MF_SOFT_OFFLINE)
2456 soft_offline_page(entry.pfn, entry.flags);
2458 memory_failure(entry.pfn, entry.flags);
2463 * Process memory_failure work queued on the specified CPU.
2464 * Used to avoid return-to-userspace racing with the memory_failure workqueue.
2466 void memory_failure_queue_kick(int cpu)
2468 struct memory_failure_cpu *mf_cpu;
2470 mf_cpu = &per_cpu(memory_failure_cpu, cpu);
2471 cancel_work_sync(&mf_cpu->work);
2472 memory_failure_work_func(&mf_cpu->work);
2475 static int __init memory_failure_init(void)
2477 struct memory_failure_cpu *mf_cpu;
2480 for_each_possible_cpu(cpu) {
2481 mf_cpu = &per_cpu(memory_failure_cpu, cpu);
2482 spin_lock_init(&mf_cpu->lock);
2483 INIT_KFIFO(mf_cpu->fifo);
2484 INIT_WORK(&mf_cpu->work, memory_failure_work_func);
2487 register_sysctl_init("vm", memory_failure_table);
2491 core_initcall(memory_failure_init);
2494 #define pr_fmt(fmt) "" fmt
2495 #define unpoison_pr_info(fmt, pfn, rs) \
2497 if (__ratelimit(rs)) \
2498 pr_info(fmt, pfn); \
2502 * unpoison_memory - Unpoison a previously poisoned page
2503 * @pfn: Page number of the to be unpoisoned page
2505 * Software-unpoison a page that has been poisoned by
2506 * memory_failure() earlier.
2508 * This is only done on the software-level, so it only works
2509 * for linux injected failures, not real hardware failures
2511 * Returns 0 for success, otherwise -errno.
2513 int unpoison_memory(unsigned long pfn)
2515 struct folio *folio;
2517 int ret = -EBUSY, ghp;
2518 unsigned long count = 1;
2520 static DEFINE_RATELIMIT_STATE(unpoison_rs, DEFAULT_RATELIMIT_INTERVAL,
2521 DEFAULT_RATELIMIT_BURST);
2523 if (!pfn_valid(pfn))
2526 p = pfn_to_page(pfn);
2527 folio = page_folio(p);
2529 mutex_lock(&mf_mutex);
2531 if (hw_memory_failure) {
2532 unpoison_pr_info("Unpoison: Disabled after HW memory failure %#lx\n",
2538 if (!PageHWPoison(p)) {
2539 unpoison_pr_info("Unpoison: Page was already unpoisoned %#lx\n",
2544 if (folio_ref_count(folio) > 1) {
2545 unpoison_pr_info("Unpoison: Someone grabs the hwpoison page %#lx\n",
2550 if (folio_test_slab(folio) || PageTable(&folio->page) ||
2551 folio_test_reserved(folio) || PageOffline(&folio->page))
2555 * Note that folio->_mapcount is overloaded in SLAB, so the simple test
2556 * in folio_mapped() has to be done after folio_test_slab() is checked.
2558 if (folio_mapped(folio)) {
2559 unpoison_pr_info("Unpoison: Someone maps the hwpoison page %#lx\n",
2564 if (folio_mapping(folio)) {
2565 unpoison_pr_info("Unpoison: the hwpoison page has non-NULL mapping %#lx\n",
2570 ghp = get_hwpoison_page(p, MF_UNPOISON);
2574 count = folio_free_raw_hwp(folio, false);
2578 ret = folio_test_clear_hwpoison(folio) ? 0 : -EBUSY;
2579 } else if (ghp < 0) {
2580 if (ghp == -EHWPOISON) {
2581 ret = put_page_back_buddy(p) ? 0 : -EBUSY;
2584 unpoison_pr_info("Unpoison: failed to grab page %#lx\n",
2590 count = folio_free_raw_hwp(folio, false);
2598 if (TestClearPageHWPoison(p)) {
2605 mutex_unlock(&mf_mutex);
2608 num_poisoned_pages_sub(pfn, 1);
2609 unpoison_pr_info("Unpoison: Software-unpoisoned page %#lx\n",
2610 page_to_pfn(p), &unpoison_rs);
2614 EXPORT_SYMBOL(unpoison_memory);
2616 static bool mf_isolate_folio(struct folio *folio, struct list_head *pagelist)
2618 bool isolated = false;
2620 if (folio_test_hugetlb(folio)) {
2621 isolated = isolate_hugetlb(folio, pagelist);
2623 bool lru = !__folio_test_movable(folio);
2626 isolated = folio_isolate_lru(folio);
2628 isolated = isolate_movable_page(&folio->page,
2629 ISOLATE_UNEVICTABLE);
2632 list_add(&folio->lru, pagelist);
2634 node_stat_add_folio(folio, NR_ISOLATED_ANON +
2635 folio_is_file_lru(folio));
2640 * If we succeed to isolate the folio, we grabbed another refcount on
2641 * the folio, so we can safely drop the one we got from get_any_page().
2642 * If we failed to isolate the folio, it means that we cannot go further
2643 * and we will return an error, so drop the reference we got from
2644 * get_any_page() as well.
2651 * soft_offline_in_use_page handles hugetlb-pages and non-hugetlb pages.
2652 * If the page is a non-dirty unmapped page-cache page, it simply invalidates.
2653 * If the page is mapped, it migrates the contents over.
2655 static int soft_offline_in_use_page(struct page *page)
2658 unsigned long pfn = page_to_pfn(page);
2659 struct folio *folio = page_folio(page);
2660 char const *msg_page[] = {"page", "hugepage"};
2661 bool huge = folio_test_hugetlb(folio);
2662 LIST_HEAD(pagelist);
2663 struct migration_target_control mtc = {
2664 .nid = NUMA_NO_NODE,
2665 .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
2668 if (!huge && folio_test_large(folio)) {
2669 if (try_to_split_thp_page(page)) {
2670 pr_info("soft offline: %#lx: thp split failed\n", pfn);
2673 folio = page_folio(page);
2678 folio_wait_writeback(folio);
2679 if (PageHWPoison(page)) {
2680 folio_unlock(folio);
2682 pr_info("soft offline: %#lx page already poisoned\n", pfn);
2686 if (!huge && folio_test_lru(folio) && !folio_test_swapcache(folio))
2688 * Try to invalidate first. This should work for
2689 * non dirty unmapped page cache pages.
2691 ret = mapping_evict_folio(folio_mapping(folio), folio);
2692 folio_unlock(folio);
2695 pr_info("soft_offline: %#lx: invalidated\n", pfn);
2696 page_handle_poison(page, false, true);
2700 if (mf_isolate_folio(folio, &pagelist)) {
2701 ret = migrate_pages(&pagelist, alloc_migration_target, NULL,
2702 (unsigned long)&mtc, MIGRATE_SYNC, MR_MEMORY_FAILURE, NULL);
2704 bool release = !huge;
2706 if (!page_handle_poison(page, huge, release))
2709 if (!list_empty(&pagelist))
2710 putback_movable_pages(&pagelist);
2712 pr_info("soft offline: %#lx: %s migration failed %ld, type %pGp\n",
2713 pfn, msg_page[huge], ret, &page->flags);
2718 pr_info("soft offline: %#lx: %s isolation failed, page count %d, type %pGp\n",
2719 pfn, msg_page[huge], page_count(page), &page->flags);
2726 * soft_offline_page - Soft offline a page.
2727 * @pfn: pfn to soft-offline
2728 * @flags: flags. Same as memory_failure().
2730 * Returns 0 on success
2731 * -EOPNOTSUPP for hwpoison_filter() filtered the error event
2732 * < 0 otherwise negated errno.
2734 * Soft offline a page, by migration or invalidation,
2735 * without killing anything. This is for the case when
2736 * a page is not corrupted yet (so it's still valid to access),
2737 * but has had a number of corrected errors and is better taken
2740 * The actual policy on when to do that is maintained by
2743 * This should never impact any application or cause data loss,
2744 * however it might take some time.
2746 * This is not a 100% solution for all memory, but tries to be
2747 * ``good enough'' for the majority of memory.
2749 int soft_offline_page(unsigned long pfn, int flags)
2752 bool try_again = true;
2755 if (!pfn_valid(pfn)) {
2756 WARN_ON_ONCE(flags & MF_COUNT_INCREASED);
2760 /* Only online pages can be soft-offlined (esp., not ZONE_DEVICE). */
2761 page = pfn_to_online_page(pfn);
2763 put_ref_page(pfn, flags);
2767 mutex_lock(&mf_mutex);
2769 if (PageHWPoison(page)) {
2770 pr_info("%s: %#lx page already poisoned\n", __func__, pfn);
2771 put_ref_page(pfn, flags);
2772 mutex_unlock(&mf_mutex);
2778 ret = get_hwpoison_page(page, flags | MF_SOFT_OFFLINE);
2781 if (hwpoison_filter(page)) {
2785 mutex_unlock(&mf_mutex);
2790 ret = soft_offline_in_use_page(page);
2791 } else if (ret == 0) {
2792 if (!page_handle_poison(page, true, false)) {
2795 flags &= ~MF_COUNT_INCREASED;
2802 mutex_unlock(&mf_mutex);