1 // SPDX-License-Identifier: GPL-2.0-only
3 * Simple NUMA memory policy for the Linux kernel.
5 * Copyright 2003,2004 Andi Kleen, SuSE Labs.
6 * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
8 * NUMA policy allows the user to give hints in which node(s) memory should
11 * Support four policies per VMA and per process:
13 * The VMA policy has priority over the process policy for a page fault.
15 * interleave Allocate memory interleaved over a set of nodes,
16 * with normal fallback if it fails.
17 * For VMA based allocations this interleaves based on the
18 * offset into the backing object or offset into the mapping
19 * for anonymous memory. For process policy an process counter
23 * Allocate memory interleaved over a set of nodes based on
24 * a set of weights (per-node), with normal fallback if it
25 * fails. Otherwise operates the same as interleave.
26 * Example: nodeset(0,1) & weights (2,1) - 2 pages allocated
27 * on node 0 for every 1 page allocated on node 1.
29 * bind Only allocate memory on a specific set of nodes,
31 * FIXME: memory is allocated starting with the first node
32 * to the last. It would be better if bind would truly restrict
33 * the allocation to memory nodes instead
35 * preferred Try a specific node first before normal fallback.
36 * As a special case NUMA_NO_NODE here means do the allocation
37 * on the local CPU. This is normally identical to default,
38 * but useful to set in a VMA when you have a non default
41 * preferred many Try a set of nodes first before normal fallback. This is
42 * similar to preferred without the special case.
44 * default Allocate on the local node first, or when on a VMA
45 * use the process policy. This is what Linux always did
46 * in a NUMA aware kernel and still does by, ahem, default.
48 * The process policy is applied for most non interrupt memory allocations
49 * in that process' context. Interrupts ignore the policies and always
50 * try to allocate on the local CPU. The VMA policy is only applied for memory
51 * allocations for a VMA in the VM.
53 * Currently there are a few corner cases in swapping where the policy
54 * is not applied, but the majority should be handled. When process policy
55 * is used it is not remembered over swap outs/swap ins.
57 * Only the highest zone in the zone hierarchy gets policied. Allocations
58 * requesting a lower zone just use default policy. This implies that
59 * on systems with highmem kernel lowmem allocation don't get policied.
60 * Same with GFP_DMA allocations.
62 * For shmem/tmpfs shared memory the policy is shared between
63 * all users and remembered even when nobody has memory mapped.
67 fix mmap readahead to honour policy and enable policy for any page cache
69 statistics for bigpages
70 global policy for page cache? currently it uses process policy. Requires
72 handle mremap for shared memory (currently ignored for the policy)
74 make bind policy root only? It can trigger oom much faster and the
75 kernel is not always grateful with that.
78 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
80 #include <linux/mempolicy.h>
81 #include <linux/pagewalk.h>
82 #include <linux/highmem.h>
83 #include <linux/hugetlb.h>
84 #include <linux/kernel.h>
85 #include <linux/sched.h>
86 #include <linux/sched/mm.h>
87 #include <linux/sched/numa_balancing.h>
88 #include <linux/sched/task.h>
89 #include <linux/nodemask.h>
90 #include <linux/cpuset.h>
91 #include <linux/slab.h>
92 #include <linux/string.h>
93 #include <linux/export.h>
94 #include <linux/nsproxy.h>
95 #include <linux/interrupt.h>
96 #include <linux/init.h>
97 #include <linux/compat.h>
98 #include <linux/ptrace.h>
99 #include <linux/swap.h>
100 #include <linux/seq_file.h>
101 #include <linux/proc_fs.h>
102 #include <linux/migrate.h>
103 #include <linux/ksm.h>
104 #include <linux/rmap.h>
105 #include <linux/security.h>
106 #include <linux/syscalls.h>
107 #include <linux/ctype.h>
108 #include <linux/mm_inline.h>
109 #include <linux/mmu_notifier.h>
110 #include <linux/printk.h>
111 #include <linux/swapops.h>
113 #include <asm/tlbflush.h>
115 #include <linux/uaccess.h>
117 #include "internal.h"
120 #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */
121 #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */
122 #define MPOL_MF_WRLOCK (MPOL_MF_INTERNAL << 2) /* Write-lock walked vmas */
124 static struct kmem_cache *policy_cache;
125 static struct kmem_cache *sn_cache;
127 /* Highest zone. An specific allocation for a zone below that is not
129 enum zone_type policy_zone = 0;
132 * run-time system-wide default policy => local allocation
134 static struct mempolicy default_policy = {
135 .refcnt = ATOMIC_INIT(1), /* never free it */
139 static struct mempolicy preferred_node_policy[MAX_NUMNODES];
142 * iw_table is the sysfs-set interleave weight table, a value of 0 denotes
143 * system-default value should be used. A NULL iw_table also denotes that
144 * system-default values should be used. Until the system-default table
145 * is implemented, the system-default is always 1.
147 * iw_table is RCU protected
149 static u8 __rcu *iw_table;
150 static DEFINE_MUTEX(iw_table_lock);
152 static u8 get_il_weight(int node)
158 table = rcu_dereference(iw_table);
159 /* if no iw_table, use system default */
160 weight = table ? table[node] : 1;
161 /* if value in iw_table is 0, use system default */
162 weight = weight ? weight : 1;
168 * numa_nearest_node - Find nearest node by state
169 * @node: Node id to start the search
170 * @state: State to filter the search
172 * Lookup the closest node by distance if @nid is not in state.
174 * Return: this @node if it is in state, otherwise the closest node by distance
176 int numa_nearest_node(int node, unsigned int state)
178 int min_dist = INT_MAX, dist, n, min_node;
180 if (state >= NR_NODE_STATES)
183 if (node == NUMA_NO_NODE || node_state(node, state))
187 for_each_node_state(n, state) {
188 dist = node_distance(node, n);
189 if (dist < min_dist) {
197 EXPORT_SYMBOL_GPL(numa_nearest_node);
199 struct mempolicy *get_task_policy(struct task_struct *p)
201 struct mempolicy *pol = p->mempolicy;
207 node = numa_node_id();
208 if (node != NUMA_NO_NODE) {
209 pol = &preferred_node_policy[node];
210 /* preferred_node_policy is not initialised early in boot */
215 return &default_policy;
218 static const struct mempolicy_operations {
219 int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
220 void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes);
221 } mpol_ops[MPOL_MAX];
223 static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
225 return pol->flags & MPOL_MODE_FLAGS;
228 static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
229 const nodemask_t *rel)
232 nodes_fold(tmp, *orig, nodes_weight(*rel));
233 nodes_onto(*ret, tmp, *rel);
236 static int mpol_new_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
238 if (nodes_empty(*nodes))
244 static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
246 if (nodes_empty(*nodes))
249 nodes_clear(pol->nodes);
250 node_set(first_node(*nodes), pol->nodes);
255 * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
256 * any, for the new policy. mpol_new() has already validated the nodes
257 * parameter with respect to the policy mode and flags.
259 * Must be called holding task's alloc_lock to protect task's mems_allowed
260 * and mempolicy. May also be called holding the mmap_lock for write.
262 static int mpol_set_nodemask(struct mempolicy *pol,
263 const nodemask_t *nodes, struct nodemask_scratch *nsc)
268 * Default (pol==NULL) resp. local memory policies are not a
269 * subject of any remapping. They also do not need any special
272 if (!pol || pol->mode == MPOL_LOCAL)
276 nodes_and(nsc->mask1,
277 cpuset_current_mems_allowed, node_states[N_MEMORY]);
281 if (pol->flags & MPOL_F_RELATIVE_NODES)
282 mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1);
284 nodes_and(nsc->mask2, *nodes, nsc->mask1);
286 if (mpol_store_user_nodemask(pol))
287 pol->w.user_nodemask = *nodes;
289 pol->w.cpuset_mems_allowed = cpuset_current_mems_allowed;
291 ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
296 * This function just creates a new policy, does some check and simple
297 * initialization. You must invoke mpol_set_nodemask() to set nodes.
299 static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
302 struct mempolicy *policy;
304 if (mode == MPOL_DEFAULT) {
305 if (nodes && !nodes_empty(*nodes))
306 return ERR_PTR(-EINVAL);
312 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
313 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
314 * All other modes require a valid pointer to a non-empty nodemask.
316 if (mode == MPOL_PREFERRED) {
317 if (nodes_empty(*nodes)) {
318 if (((flags & MPOL_F_STATIC_NODES) ||
319 (flags & MPOL_F_RELATIVE_NODES)))
320 return ERR_PTR(-EINVAL);
324 } else if (mode == MPOL_LOCAL) {
325 if (!nodes_empty(*nodes) ||
326 (flags & MPOL_F_STATIC_NODES) ||
327 (flags & MPOL_F_RELATIVE_NODES))
328 return ERR_PTR(-EINVAL);
329 } else if (nodes_empty(*nodes))
330 return ERR_PTR(-EINVAL);
332 policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
334 return ERR_PTR(-ENOMEM);
335 atomic_set(&policy->refcnt, 1);
337 policy->flags = flags;
338 policy->home_node = NUMA_NO_NODE;
343 /* Slow path of a mpol destructor. */
344 void __mpol_put(struct mempolicy *pol)
346 if (!atomic_dec_and_test(&pol->refcnt))
348 kmem_cache_free(policy_cache, pol);
351 static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes)
355 static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
359 if (pol->flags & MPOL_F_STATIC_NODES)
360 nodes_and(tmp, pol->w.user_nodemask, *nodes);
361 else if (pol->flags & MPOL_F_RELATIVE_NODES)
362 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
364 nodes_remap(tmp, pol->nodes, pol->w.cpuset_mems_allowed,
366 pol->w.cpuset_mems_allowed = *nodes;
369 if (nodes_empty(tmp))
375 static void mpol_rebind_preferred(struct mempolicy *pol,
376 const nodemask_t *nodes)
378 pol->w.cpuset_mems_allowed = *nodes;
382 * mpol_rebind_policy - Migrate a policy to a different set of nodes
384 * Per-vma policies are protected by mmap_lock. Allocations using per-task
385 * policies are protected by task->mems_allowed_seq to prevent a premature
386 * OOM/allocation failure due to parallel nodemask modification.
388 static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask)
390 if (!pol || pol->mode == MPOL_LOCAL)
392 if (!mpol_store_user_nodemask(pol) &&
393 nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
396 mpol_ops[pol->mode].rebind(pol, newmask);
400 * Wrapper for mpol_rebind_policy() that just requires task
401 * pointer, and updates task mempolicy.
403 * Called with task's alloc_lock held.
405 void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
407 mpol_rebind_policy(tsk->mempolicy, new);
411 * Rebind each vma in mm to new nodemask.
413 * Call holding a reference to mm. Takes mm->mmap_lock during call.
415 void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
417 struct vm_area_struct *vma;
418 VMA_ITERATOR(vmi, mm, 0);
421 for_each_vma(vmi, vma) {
422 vma_start_write(vma);
423 mpol_rebind_policy(vma->vm_policy, new);
425 mmap_write_unlock(mm);
428 static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
430 .rebind = mpol_rebind_default,
432 [MPOL_INTERLEAVE] = {
433 .create = mpol_new_nodemask,
434 .rebind = mpol_rebind_nodemask,
437 .create = mpol_new_preferred,
438 .rebind = mpol_rebind_preferred,
441 .create = mpol_new_nodemask,
442 .rebind = mpol_rebind_nodemask,
445 .rebind = mpol_rebind_default,
447 [MPOL_PREFERRED_MANY] = {
448 .create = mpol_new_nodemask,
449 .rebind = mpol_rebind_preferred,
451 [MPOL_WEIGHTED_INTERLEAVE] = {
452 .create = mpol_new_nodemask,
453 .rebind = mpol_rebind_nodemask,
457 static bool migrate_folio_add(struct folio *folio, struct list_head *foliolist,
458 unsigned long flags);
459 static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *pol,
460 pgoff_t ilx, int *nid);
462 static bool strictly_unmovable(unsigned long flags)
465 * STRICT without MOVE flags lets do_mbind() fail immediately with -EIO
466 * if any misplaced page is found.
468 return (flags & (MPOL_MF_STRICT | MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) ==
472 struct migration_mpol { /* for alloc_migration_target_by_mpol() */
473 struct mempolicy *pol;
478 struct list_head *pagelist;
483 struct vm_area_struct *first;
484 struct folio *large; /* note last large folio encountered */
485 long nr_failed; /* could not be isolated at this time */
489 * Check if the folio's nid is in qp->nmask.
491 * If MPOL_MF_INVERT is set in qp->flags, check if the nid is
492 * in the invert of qp->nmask.
494 static inline bool queue_folio_required(struct folio *folio,
495 struct queue_pages *qp)
497 int nid = folio_nid(folio);
498 unsigned long flags = qp->flags;
500 return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT);
503 static void queue_folios_pmd(pmd_t *pmd, struct mm_walk *walk)
506 struct queue_pages *qp = walk->private;
508 if (unlikely(is_pmd_migration_entry(*pmd))) {
512 folio = pmd_folio(*pmd);
513 if (is_huge_zero_folio(folio)) {
514 walk->action = ACTION_CONTINUE;
517 if (!queue_folio_required(folio, qp))
519 if (!(qp->flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) ||
520 !vma_migratable(walk->vma) ||
521 !migrate_folio_add(folio, qp->pagelist, qp->flags))
526 * Scan through folios, checking if they satisfy the required conditions,
527 * moving them from LRU to local pagelist for migration if they do (or not).
529 * queue_folios_pte_range() has two possible return values:
530 * 0 - continue walking to scan for more, even if an existing folio on the
531 * wrong node could not be isolated and queued for migration.
532 * -EIO - only MPOL_MF_STRICT was specified, without MPOL_MF_MOVE or ..._ALL,
533 * and an existing folio was on a node that does not follow the policy.
535 static int queue_folios_pte_range(pmd_t *pmd, unsigned long addr,
536 unsigned long end, struct mm_walk *walk)
538 struct vm_area_struct *vma = walk->vma;
540 struct queue_pages *qp = walk->private;
541 unsigned long flags = qp->flags;
542 pte_t *pte, *mapped_pte;
546 ptl = pmd_trans_huge_lock(pmd, vma);
548 queue_folios_pmd(pmd, walk);
553 mapped_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
555 walk->action = ACTION_AGAIN;
558 for (; addr != end; pte++, addr += PAGE_SIZE) {
559 ptent = ptep_get(pte);
562 if (!pte_present(ptent)) {
563 if (is_migration_entry(pte_to_swp_entry(ptent)))
567 folio = vm_normal_folio(vma, addr, ptent);
568 if (!folio || folio_is_zone_device(folio))
571 * vm_normal_folio() filters out zero pages, but there might
572 * still be reserved folios to skip, perhaps in a VDSO.
574 if (folio_test_reserved(folio))
576 if (!queue_folio_required(folio, qp))
578 if (folio_test_large(folio)) {
580 * A large folio can only be isolated from LRU once,
581 * but may be mapped by many PTEs (and Copy-On-Write may
582 * intersperse PTEs of other, order 0, folios). This is
583 * a common case, so don't mistake it for failure (but
584 * there can be other cases of multi-mapped pages which
585 * this quick check does not help to filter out - and a
586 * search of the pagelist might grow to be prohibitive).
588 * migrate_pages(&pagelist) returns nr_failed folios, so
589 * check "large" now so that queue_pages_range() returns
590 * a comparable nr_failed folios. This does imply that
591 * if folio could not be isolated for some racy reason
592 * at its first PTE, later PTEs will not give it another
593 * chance of isolation; but keeps the accounting simple.
595 if (folio == qp->large)
599 if (!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) ||
600 !vma_migratable(vma) ||
601 !migrate_folio_add(folio, qp->pagelist, flags)) {
603 if (strictly_unmovable(flags))
607 pte_unmap_unlock(mapped_pte, ptl);
610 if (qp->nr_failed && strictly_unmovable(flags))
615 static int queue_folios_hugetlb(pte_t *pte, unsigned long hmask,
616 unsigned long addr, unsigned long end,
617 struct mm_walk *walk)
619 #ifdef CONFIG_HUGETLB_PAGE
620 struct queue_pages *qp = walk->private;
621 unsigned long flags = qp->flags;
626 ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
627 entry = huge_ptep_get(walk->mm, addr, pte);
628 if (!pte_present(entry)) {
629 if (unlikely(is_hugetlb_entry_migration(entry)))
633 folio = pfn_folio(pte_pfn(entry));
634 if (!queue_folio_required(folio, qp))
636 if (!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) ||
637 !vma_migratable(walk->vma)) {
642 * Unless MPOL_MF_MOVE_ALL, we try to avoid migrating a shared folio.
643 * Choosing not to migrate a shared folio is not counted as a failure.
645 * See folio_likely_mapped_shared() on possible imprecision when we
646 * cannot easily detect if a folio is shared.
648 if ((flags & MPOL_MF_MOVE_ALL) ||
649 (!folio_likely_mapped_shared(folio) && !hugetlb_pmd_shared(pte)))
650 if (!isolate_hugetlb(folio, qp->pagelist))
654 if (qp->nr_failed && strictly_unmovable(flags))
660 #ifdef CONFIG_NUMA_BALANCING
662 * This is used to mark a range of virtual addresses to be inaccessible.
663 * These are later cleared by a NUMA hinting fault. Depending on these
664 * faults, pages may be migrated for better NUMA placement.
666 * This is assuming that NUMA faults are handled using PROT_NONE. If
667 * an architecture makes a different choice, it will need further
668 * changes to the core.
670 unsigned long change_prot_numa(struct vm_area_struct *vma,
671 unsigned long addr, unsigned long end)
673 struct mmu_gather tlb;
676 tlb_gather_mmu(&tlb, vma->vm_mm);
678 nr_updated = change_protection(&tlb, vma, addr, end, MM_CP_PROT_NUMA);
680 count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
682 tlb_finish_mmu(&tlb);
686 #endif /* CONFIG_NUMA_BALANCING */
688 static int queue_pages_test_walk(unsigned long start, unsigned long end,
689 struct mm_walk *walk)
691 struct vm_area_struct *next, *vma = walk->vma;
692 struct queue_pages *qp = walk->private;
693 unsigned long flags = qp->flags;
695 /* range check first */
696 VM_BUG_ON_VMA(!range_in_vma(vma, start, end), vma);
700 if (!(flags & MPOL_MF_DISCONTIG_OK) &&
701 (qp->start < vma->vm_start))
702 /* hole at head side of range */
705 next = find_vma(vma->vm_mm, vma->vm_end);
706 if (!(flags & MPOL_MF_DISCONTIG_OK) &&
707 ((vma->vm_end < qp->end) &&
708 (!next || vma->vm_end < next->vm_start)))
709 /* hole at middle or tail of range */
713 * Need check MPOL_MF_STRICT to return -EIO if possible
714 * regardless of vma_migratable
716 if (!vma_migratable(vma) &&
717 !(flags & MPOL_MF_STRICT))
721 * Check page nodes, and queue pages to move, in the current vma.
722 * But if no moving, and no strict checking, the scan can be skipped.
724 if (flags & (MPOL_MF_STRICT | MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
729 static const struct mm_walk_ops queue_pages_walk_ops = {
730 .hugetlb_entry = queue_folios_hugetlb,
731 .pmd_entry = queue_folios_pte_range,
732 .test_walk = queue_pages_test_walk,
733 .walk_lock = PGWALK_RDLOCK,
736 static const struct mm_walk_ops queue_pages_lock_vma_walk_ops = {
737 .hugetlb_entry = queue_folios_hugetlb,
738 .pmd_entry = queue_folios_pte_range,
739 .test_walk = queue_pages_test_walk,
740 .walk_lock = PGWALK_WRLOCK,
744 * Walk through page tables and collect pages to be migrated.
746 * If pages found in a given range are not on the required set of @nodes,
747 * and migration is allowed, they are isolated and queued to @pagelist.
749 * queue_pages_range() may return:
750 * 0 - all pages already on the right node, or successfully queued for moving
751 * (or neither strict checking nor moving requested: only range checking).
752 * >0 - this number of misplaced folios could not be queued for moving
753 * (a hugetlbfs page or a transparent huge page being counted as 1).
754 * -EIO - a misplaced page found, when MPOL_MF_STRICT specified without MOVEs.
755 * -EFAULT - a hole in the memory range, when MPOL_MF_DISCONTIG_OK unspecified.
758 queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
759 nodemask_t *nodes, unsigned long flags,
760 struct list_head *pagelist)
763 struct queue_pages qp = {
764 .pagelist = pagelist,
771 const struct mm_walk_ops *ops = (flags & MPOL_MF_WRLOCK) ?
772 &queue_pages_lock_vma_walk_ops : &queue_pages_walk_ops;
774 err = walk_page_range(mm, start, end, ops, &qp);
777 /* whole range in hole */
780 return err ? : qp.nr_failed;
784 * Apply policy to a single VMA
785 * This must be called with the mmap_lock held for writing.
787 static int vma_replace_policy(struct vm_area_struct *vma,
788 struct mempolicy *pol)
791 struct mempolicy *old;
792 struct mempolicy *new;
794 vma_assert_write_locked(vma);
800 if (vma->vm_ops && vma->vm_ops->set_policy) {
801 err = vma->vm_ops->set_policy(vma, new);
806 old = vma->vm_policy;
807 vma->vm_policy = new; /* protected by mmap_lock */
816 /* Split or merge the VMA (if required) and apply the new policy */
817 static int mbind_range(struct vma_iterator *vmi, struct vm_area_struct *vma,
818 struct vm_area_struct **prev, unsigned long start,
819 unsigned long end, struct mempolicy *new_pol)
821 unsigned long vmstart, vmend;
823 vmend = min(end, vma->vm_end);
824 if (start > vma->vm_start) {
828 vmstart = vma->vm_start;
831 if (mpol_equal(vma->vm_policy, new_pol)) {
836 vma = vma_modify_policy(vmi, *prev, vma, vmstart, vmend, new_pol);
841 return vma_replace_policy(vma, new_pol);
844 /* Set the process memory policy */
845 static long do_set_mempolicy(unsigned short mode, unsigned short flags,
848 struct mempolicy *new, *old;
849 NODEMASK_SCRATCH(scratch);
855 new = mpol_new(mode, flags, nodes);
862 ret = mpol_set_nodemask(new, nodes, scratch);
864 task_unlock(current);
869 old = current->mempolicy;
870 current->mempolicy = new;
871 if (new && (new->mode == MPOL_INTERLEAVE ||
872 new->mode == MPOL_WEIGHTED_INTERLEAVE)) {
873 current->il_prev = MAX_NUMNODES-1;
874 current->il_weight = 0;
876 task_unlock(current);
880 NODEMASK_SCRATCH_FREE(scratch);
885 * Return nodemask for policy for get_mempolicy() query
887 * Called with task's alloc_lock held
889 static void get_policy_nodemask(struct mempolicy *pol, nodemask_t *nodes)
892 if (pol == &default_policy)
897 case MPOL_INTERLEAVE:
899 case MPOL_PREFERRED_MANY:
900 case MPOL_WEIGHTED_INTERLEAVE:
904 /* return empty node mask for local allocation */
911 static int lookup_node(struct mm_struct *mm, unsigned long addr)
913 struct page *p = NULL;
916 ret = get_user_pages_fast(addr & PAGE_MASK, 1, 0, &p);
918 ret = page_to_nid(p);
924 /* Retrieve NUMA policy */
925 static long do_get_mempolicy(int *policy, nodemask_t *nmask,
926 unsigned long addr, unsigned long flags)
929 struct mm_struct *mm = current->mm;
930 struct vm_area_struct *vma = NULL;
931 struct mempolicy *pol = current->mempolicy, *pol_refcount = NULL;
934 ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
937 if (flags & MPOL_F_MEMS_ALLOWED) {
938 if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
940 *policy = 0; /* just so it's initialized */
942 *nmask = cpuset_current_mems_allowed;
943 task_unlock(current);
947 if (flags & MPOL_F_ADDR) {
948 pgoff_t ilx; /* ignored here */
950 * Do NOT fall back to task policy if the
951 * vma/shared policy at addr is NULL. We
952 * want to return MPOL_DEFAULT in this case.
955 vma = vma_lookup(mm, addr);
957 mmap_read_unlock(mm);
960 pol = __get_vma_policy(vma, addr, &ilx);
965 pol = &default_policy; /* indicates default behavior */
967 if (flags & MPOL_F_NODE) {
968 if (flags & MPOL_F_ADDR) {
970 * Take a refcount on the mpol, because we are about to
971 * drop the mmap_lock, after which only "pol" remains
972 * valid, "vma" is stale.
977 mmap_read_unlock(mm);
978 err = lookup_node(mm, addr);
982 } else if (pol == current->mempolicy &&
983 pol->mode == MPOL_INTERLEAVE) {
984 *policy = next_node_in(current->il_prev, pol->nodes);
985 } else if (pol == current->mempolicy &&
986 pol->mode == MPOL_WEIGHTED_INTERLEAVE) {
987 if (current->il_weight)
988 *policy = current->il_prev;
990 *policy = next_node_in(current->il_prev,
997 *policy = pol == &default_policy ? MPOL_DEFAULT :
1000 * Internal mempolicy flags must be masked off before exposing
1001 * the policy to userspace.
1003 *policy |= (pol->flags & MPOL_MODE_FLAGS);
1008 if (mpol_store_user_nodemask(pol)) {
1009 *nmask = pol->w.user_nodemask;
1012 get_policy_nodemask(pol, nmask);
1013 task_unlock(current);
1020 mmap_read_unlock(mm);
1022 mpol_put(pol_refcount);
1026 #ifdef CONFIG_MIGRATION
1027 static bool migrate_folio_add(struct folio *folio, struct list_head *foliolist,
1028 unsigned long flags)
1031 * Unless MPOL_MF_MOVE_ALL, we try to avoid migrating a shared folio.
1032 * Choosing not to migrate a shared folio is not counted as a failure.
1034 * See folio_likely_mapped_shared() on possible imprecision when we
1035 * cannot easily detect if a folio is shared.
1037 if ((flags & MPOL_MF_MOVE_ALL) || !folio_likely_mapped_shared(folio)) {
1038 if (folio_isolate_lru(folio)) {
1039 list_add_tail(&folio->lru, foliolist);
1040 node_stat_mod_folio(folio,
1041 NR_ISOLATED_ANON + folio_is_file_lru(folio),
1042 folio_nr_pages(folio));
1045 * Non-movable folio may reach here. And, there may be
1046 * temporary off LRU folios or non-LRU movable folios.
1047 * Treat them as unmovable folios since they can't be
1048 * isolated, so they can't be moved at the moment.
1057 * Migrate pages from one node to a target node.
1058 * Returns error or the number of pages not migrated.
1060 static long migrate_to_node(struct mm_struct *mm, int source, int dest,
1064 struct vm_area_struct *vma;
1065 LIST_HEAD(pagelist);
1068 struct migration_target_control mtc = {
1070 .gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
1071 .reason = MR_SYSCALL,
1075 node_set(source, nmask);
1077 VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
1080 vma = find_vma(mm, 0);
1083 * This does not migrate the range, but isolates all pages that
1084 * need migration. Between passing in the full user address
1085 * space range and MPOL_MF_DISCONTIG_OK, this call cannot fail,
1086 * but passes back the count of pages which could not be isolated.
1088 nr_failed = queue_pages_range(mm, vma->vm_start, mm->task_size, &nmask,
1089 flags | MPOL_MF_DISCONTIG_OK, &pagelist);
1090 mmap_read_unlock(mm);
1092 if (!list_empty(&pagelist)) {
1093 err = migrate_pages(&pagelist, alloc_migration_target, NULL,
1094 (unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL);
1096 putback_movable_pages(&pagelist);
1105 * Move pages between the two nodesets so as to preserve the physical
1106 * layout as much as possible.
1108 * Returns the number of page that could not be moved.
1110 int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1111 const nodemask_t *to, int flags)
1117 lru_cache_disable();
1120 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
1121 * bit in 'to' is not also set in 'tmp'. Clear the found 'source'
1122 * bit in 'tmp', and return that <source, dest> pair for migration.
1123 * The pair of nodemasks 'to' and 'from' define the map.
1125 * If no pair of bits is found that way, fallback to picking some
1126 * pair of 'source' and 'dest' bits that are not the same. If the
1127 * 'source' and 'dest' bits are the same, this represents a node
1128 * that will be migrating to itself, so no pages need move.
1130 * If no bits are left in 'tmp', or if all remaining bits left
1131 * in 'tmp' correspond to the same bit in 'to', return false
1132 * (nothing left to migrate).
1134 * This lets us pick a pair of nodes to migrate between, such that
1135 * if possible the dest node is not already occupied by some other
1136 * source node, minimizing the risk of overloading the memory on a
1137 * node that would happen if we migrated incoming memory to a node
1138 * before migrating outgoing memory source that same node.
1140 * A single scan of tmp is sufficient. As we go, we remember the
1141 * most recent <s, d> pair that moved (s != d). If we find a pair
1142 * that not only moved, but what's better, moved to an empty slot
1143 * (d is not set in tmp), then we break out then, with that pair.
1144 * Otherwise when we finish scanning from_tmp, we at least have the
1145 * most recent <s, d> pair that moved. If we get all the way through
1146 * the scan of tmp without finding any node that moved, much less
1147 * moved to an empty node, then there is nothing left worth migrating.
1151 while (!nodes_empty(tmp)) {
1153 int source = NUMA_NO_NODE;
1156 for_each_node_mask(s, tmp) {
1159 * do_migrate_pages() tries to maintain the relative
1160 * node relationship of the pages established between
1161 * threads and memory areas.
1163 * However if the number of source nodes is not equal to
1164 * the number of destination nodes we can not preserve
1165 * this node relative relationship. In that case, skip
1166 * copying memory from a node that is in the destination
1169 * Example: [2,3,4] -> [3,4,5] moves everything.
1170 * [0-7] - > [3,4,5] moves only 0,1,2,6,7.
1173 if ((nodes_weight(*from) != nodes_weight(*to)) &&
1174 (node_isset(s, *to)))
1177 d = node_remap(s, *from, *to);
1181 source = s; /* Node moved. Memorize */
1184 /* dest not in remaining from nodes? */
1185 if (!node_isset(dest, tmp))
1188 if (source == NUMA_NO_NODE)
1191 node_clear(source, tmp);
1192 err = migrate_to_node(mm, source, dest, flags);
1202 return (nr_failed < INT_MAX) ? nr_failed : INT_MAX;
1206 * Allocate a new folio for page migration, according to NUMA mempolicy.
1208 static struct folio *alloc_migration_target_by_mpol(struct folio *src,
1209 unsigned long private)
1211 struct migration_mpol *mmpol = (struct migration_mpol *)private;
1212 struct mempolicy *pol = mmpol->pol;
1213 pgoff_t ilx = mmpol->ilx;
1215 int nid = numa_node_id();
1218 order = folio_order(src);
1219 ilx += src->index >> order;
1221 if (folio_test_hugetlb(src)) {
1222 nodemask_t *nodemask;
1225 h = folio_hstate(src);
1226 gfp = htlb_alloc_mask(h);
1227 nodemask = policy_nodemask(gfp, pol, ilx, &nid);
1228 return alloc_hugetlb_folio_nodemask(h, nid, nodemask, gfp,
1229 htlb_allow_alloc_fallback(MR_MEMPOLICY_MBIND));
1232 if (folio_test_large(src))
1233 gfp = GFP_TRANSHUGE;
1235 gfp = GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL | __GFP_COMP;
1237 return folio_alloc_mpol(gfp, order, pol, ilx, nid);
1241 static bool migrate_folio_add(struct folio *folio, struct list_head *foliolist,
1242 unsigned long flags)
1247 int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1248 const nodemask_t *to, int flags)
1253 static struct folio *alloc_migration_target_by_mpol(struct folio *src,
1254 unsigned long private)
1260 static long do_mbind(unsigned long start, unsigned long len,
1261 unsigned short mode, unsigned short mode_flags,
1262 nodemask_t *nmask, unsigned long flags)
1264 struct mm_struct *mm = current->mm;
1265 struct vm_area_struct *vma, *prev;
1266 struct vma_iterator vmi;
1267 struct migration_mpol mmpol;
1268 struct mempolicy *new;
1272 LIST_HEAD(pagelist);
1274 if (flags & ~(unsigned long)MPOL_MF_VALID)
1276 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
1279 if (start & ~PAGE_MASK)
1282 if (mode == MPOL_DEFAULT)
1283 flags &= ~MPOL_MF_STRICT;
1285 len = PAGE_ALIGN(len);
1293 new = mpol_new(mode, mode_flags, nmask);
1295 return PTR_ERR(new);
1298 * If we are using the default policy then operation
1299 * on discontinuous address spaces is okay after all
1302 flags |= MPOL_MF_DISCONTIG_OK;
1304 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
1305 lru_cache_disable();
1307 NODEMASK_SCRATCH(scratch);
1309 mmap_write_lock(mm);
1310 err = mpol_set_nodemask(new, nmask, scratch);
1312 mmap_write_unlock(mm);
1315 NODEMASK_SCRATCH_FREE(scratch);
1321 * Lock the VMAs before scanning for pages to migrate,
1322 * to ensure we don't miss a concurrently inserted page.
1324 nr_failed = queue_pages_range(mm, start, end, nmask,
1325 flags | MPOL_MF_INVERT | MPOL_MF_WRLOCK, &pagelist);
1327 if (nr_failed < 0) {
1331 vma_iter_init(&vmi, mm, start);
1332 prev = vma_prev(&vmi);
1333 for_each_vma_range(vmi, vma, end) {
1334 err = mbind_range(&vmi, vma, &prev, start, end, new);
1340 if (!err && !list_empty(&pagelist)) {
1341 /* Convert MPOL_DEFAULT's NULL to task or default policy */
1343 new = get_task_policy(current);
1350 * In the interleaved case, attempt to allocate on exactly the
1351 * targeted nodes, for the first VMA to be migrated; for later
1352 * VMAs, the nodes will still be interleaved from the targeted
1353 * nodemask, but one by one may be selected differently.
1355 if (new->mode == MPOL_INTERLEAVE ||
1356 new->mode == MPOL_WEIGHTED_INTERLEAVE) {
1357 struct folio *folio;
1359 unsigned long addr = -EFAULT;
1361 list_for_each_entry(folio, &pagelist, lru) {
1362 if (!folio_test_ksm(folio))
1365 if (!list_entry_is_head(folio, &pagelist, lru)) {
1366 vma_iter_init(&vmi, mm, start);
1367 for_each_vma_range(vmi, vma, end) {
1368 addr = page_address_in_vma(
1369 folio_page(folio, 0), vma);
1370 if (addr != -EFAULT)
1374 if (addr != -EFAULT) {
1375 order = folio_order(folio);
1376 /* We already know the pol, but not the ilx */
1377 mpol_cond_put(get_vma_policy(vma, addr, order,
1379 /* Set base from which to increment by index */
1380 mmpol.ilx -= folio->index >> order;
1385 mmap_write_unlock(mm);
1387 if (!err && !list_empty(&pagelist)) {
1388 nr_failed |= migrate_pages(&pagelist,
1389 alloc_migration_target_by_mpol, NULL,
1390 (unsigned long)&mmpol, MIGRATE_SYNC,
1391 MR_MEMPOLICY_MBIND, NULL);
1394 if (nr_failed && (flags & MPOL_MF_STRICT))
1396 if (!list_empty(&pagelist))
1397 putback_movable_pages(&pagelist);
1400 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
1406 * User space interface with variable sized bitmaps for nodelists.
1408 static int get_bitmap(unsigned long *mask, const unsigned long __user *nmask,
1409 unsigned long maxnode)
1411 unsigned long nlongs = BITS_TO_LONGS(maxnode);
1414 if (in_compat_syscall())
1415 ret = compat_get_bitmap(mask,
1416 (const compat_ulong_t __user *)nmask,
1419 ret = copy_from_user(mask, nmask,
1420 nlongs * sizeof(unsigned long));
1425 if (maxnode % BITS_PER_LONG)
1426 mask[nlongs - 1] &= (1UL << (maxnode % BITS_PER_LONG)) - 1;
1431 /* Copy a node mask from user space. */
1432 static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
1433 unsigned long maxnode)
1436 nodes_clear(*nodes);
1437 if (maxnode == 0 || !nmask)
1439 if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
1443 * When the user specified more nodes than supported just check
1444 * if the non supported part is all zero, one word at a time,
1445 * starting at the end.
1447 while (maxnode > MAX_NUMNODES) {
1448 unsigned long bits = min_t(unsigned long, maxnode, BITS_PER_LONG);
1451 if (get_bitmap(&t, &nmask[(maxnode - 1) / BITS_PER_LONG], bits))
1454 if (maxnode - bits >= MAX_NUMNODES) {
1457 maxnode = MAX_NUMNODES;
1458 t &= ~((1UL << (MAX_NUMNODES % BITS_PER_LONG)) - 1);
1464 return get_bitmap(nodes_addr(*nodes), nmask, maxnode);
1467 /* Copy a kernel node mask to user space */
1468 static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
1471 unsigned long copy = ALIGN(maxnode-1, 64) / 8;
1472 unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long);
1473 bool compat = in_compat_syscall();
1476 nbytes = BITS_TO_COMPAT_LONGS(nr_node_ids) * sizeof(compat_long_t);
1478 if (copy > nbytes) {
1479 if (copy > PAGE_SIZE)
1481 if (clear_user((char __user *)mask + nbytes, copy - nbytes))
1484 maxnode = nr_node_ids;
1488 return compat_put_bitmap((compat_ulong_t __user *)mask,
1489 nodes_addr(*nodes), maxnode);
1491 return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
1494 /* Basic parameter sanity check used by both mbind() and set_mempolicy() */
1495 static inline int sanitize_mpol_flags(int *mode, unsigned short *flags)
1497 *flags = *mode & MPOL_MODE_FLAGS;
1498 *mode &= ~MPOL_MODE_FLAGS;
1500 if ((unsigned int)(*mode) >= MPOL_MAX)
1502 if ((*flags & MPOL_F_STATIC_NODES) && (*flags & MPOL_F_RELATIVE_NODES))
1504 if (*flags & MPOL_F_NUMA_BALANCING) {
1505 if (*mode == MPOL_BIND || *mode == MPOL_PREFERRED_MANY)
1506 *flags |= (MPOL_F_MOF | MPOL_F_MORON);
1513 static long kernel_mbind(unsigned long start, unsigned long len,
1514 unsigned long mode, const unsigned long __user *nmask,
1515 unsigned long maxnode, unsigned int flags)
1517 unsigned short mode_flags;
1522 start = untagged_addr(start);
1523 err = sanitize_mpol_flags(&lmode, &mode_flags);
1527 err = get_nodes(&nodes, nmask, maxnode);
1531 return do_mbind(start, len, lmode, mode_flags, &nodes, flags);
1534 SYSCALL_DEFINE4(set_mempolicy_home_node, unsigned long, start, unsigned long, len,
1535 unsigned long, home_node, unsigned long, flags)
1537 struct mm_struct *mm = current->mm;
1538 struct vm_area_struct *vma, *prev;
1539 struct mempolicy *new, *old;
1542 VMA_ITERATOR(vmi, mm, start);
1544 start = untagged_addr(start);
1545 if (start & ~PAGE_MASK)
1548 * flags is used for future extension if any.
1554 * Check home_node is online to avoid accessing uninitialized
1557 if (home_node >= MAX_NUMNODES || !node_online(home_node))
1560 len = PAGE_ALIGN(len);
1567 mmap_write_lock(mm);
1568 prev = vma_prev(&vmi);
1569 for_each_vma_range(vmi, vma, end) {
1571 * If any vma in the range got policy other than MPOL_BIND
1572 * or MPOL_PREFERRED_MANY we return error. We don't reset
1573 * the home node for vmas we already updated before.
1575 old = vma_policy(vma);
1580 if (old->mode != MPOL_BIND && old->mode != MPOL_PREFERRED_MANY) {
1584 new = mpol_dup(old);
1590 vma_start_write(vma);
1591 new->home_node = home_node;
1592 err = mbind_range(&vmi, vma, &prev, start, end, new);
1597 mmap_write_unlock(mm);
1601 SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1602 unsigned long, mode, const unsigned long __user *, nmask,
1603 unsigned long, maxnode, unsigned int, flags)
1605 return kernel_mbind(start, len, mode, nmask, maxnode, flags);
1608 /* Set the process memory policy */
1609 static long kernel_set_mempolicy(int mode, const unsigned long __user *nmask,
1610 unsigned long maxnode)
1612 unsigned short mode_flags;
1617 err = sanitize_mpol_flags(&lmode, &mode_flags);
1621 err = get_nodes(&nodes, nmask, maxnode);
1625 return do_set_mempolicy(lmode, mode_flags, &nodes);
1628 SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask,
1629 unsigned long, maxnode)
1631 return kernel_set_mempolicy(mode, nmask, maxnode);
1634 static int kernel_migrate_pages(pid_t pid, unsigned long maxnode,
1635 const unsigned long __user *old_nodes,
1636 const unsigned long __user *new_nodes)
1638 struct mm_struct *mm = NULL;
1639 struct task_struct *task;
1640 nodemask_t task_nodes;
1644 NODEMASK_SCRATCH(scratch);
1649 old = &scratch->mask1;
1650 new = &scratch->mask2;
1652 err = get_nodes(old, old_nodes, maxnode);
1656 err = get_nodes(new, new_nodes, maxnode);
1660 /* Find the mm_struct */
1662 task = pid ? find_task_by_vpid(pid) : current;
1668 get_task_struct(task);
1673 * Check if this process has the right to modify the specified process.
1674 * Use the regular "ptrace_may_access()" checks.
1676 if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
1683 task_nodes = cpuset_mems_allowed(task);
1684 /* Is the user allowed to access the target nodes? */
1685 if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
1690 task_nodes = cpuset_mems_allowed(current);
1691 nodes_and(*new, *new, task_nodes);
1692 if (nodes_empty(*new))
1695 err = security_task_movememory(task);
1699 mm = get_task_mm(task);
1700 put_task_struct(task);
1707 err = do_migrate_pages(mm, old, new,
1708 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
1712 NODEMASK_SCRATCH_FREE(scratch);
1717 put_task_struct(task);
1721 SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1722 const unsigned long __user *, old_nodes,
1723 const unsigned long __user *, new_nodes)
1725 return kernel_migrate_pages(pid, maxnode, old_nodes, new_nodes);
1728 /* Retrieve NUMA policy */
1729 static int kernel_get_mempolicy(int __user *policy,
1730 unsigned long __user *nmask,
1731 unsigned long maxnode,
1733 unsigned long flags)
1739 if (nmask != NULL && maxnode < nr_node_ids)
1742 addr = untagged_addr(addr);
1744 err = do_get_mempolicy(&pval, &nodes, addr, flags);
1749 if (policy && put_user(pval, policy))
1753 err = copy_nodes_to_user(nmask, maxnode, &nodes);
1758 SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1759 unsigned long __user *, nmask, unsigned long, maxnode,
1760 unsigned long, addr, unsigned long, flags)
1762 return kernel_get_mempolicy(policy, nmask, maxnode, addr, flags);
1765 bool vma_migratable(struct vm_area_struct *vma)
1767 if (vma->vm_flags & (VM_IO | VM_PFNMAP))
1771 * DAX device mappings require predictable access latency, so avoid
1772 * incurring periodic faults.
1774 if (vma_is_dax(vma))
1777 if (is_vm_hugetlb_page(vma) &&
1778 !hugepage_migration_supported(hstate_vma(vma)))
1782 * Migration allocates pages in the highest zone. If we cannot
1783 * do so then migration (at least from node to node) is not
1787 gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping))
1793 struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
1794 unsigned long addr, pgoff_t *ilx)
1797 return (vma->vm_ops && vma->vm_ops->get_policy) ?
1798 vma->vm_ops->get_policy(vma, addr, ilx) : vma->vm_policy;
1802 * get_vma_policy(@vma, @addr, @order, @ilx)
1803 * @vma: virtual memory area whose policy is sought
1804 * @addr: address in @vma for shared policy lookup
1805 * @order: 0, or appropriate huge_page_order for interleaving
1806 * @ilx: interleave index (output), for use only when MPOL_INTERLEAVE or
1807 * MPOL_WEIGHTED_INTERLEAVE
1809 * Returns effective policy for a VMA at specified address.
1810 * Falls back to current->mempolicy or system default policy, as necessary.
1811 * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
1812 * count--added by the get_policy() vm_op, as appropriate--to protect against
1813 * freeing by another task. It is the caller's responsibility to free the
1814 * extra reference for shared policies.
1816 struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
1817 unsigned long addr, int order, pgoff_t *ilx)
1819 struct mempolicy *pol;
1821 pol = __get_vma_policy(vma, addr, ilx);
1823 pol = get_task_policy(current);
1824 if (pol->mode == MPOL_INTERLEAVE ||
1825 pol->mode == MPOL_WEIGHTED_INTERLEAVE) {
1826 *ilx += vma->vm_pgoff >> order;
1827 *ilx += (addr - vma->vm_start) >> (PAGE_SHIFT + order);
1832 bool vma_policy_mof(struct vm_area_struct *vma)
1834 struct mempolicy *pol;
1836 if (vma->vm_ops && vma->vm_ops->get_policy) {
1838 pgoff_t ilx; /* ignored here */
1840 pol = vma->vm_ops->get_policy(vma, vma->vm_start, &ilx);
1841 if (pol && (pol->flags & MPOL_F_MOF))
1848 pol = vma->vm_policy;
1850 pol = get_task_policy(current);
1852 return pol->flags & MPOL_F_MOF;
1855 bool apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
1857 enum zone_type dynamic_policy_zone = policy_zone;
1859 BUG_ON(dynamic_policy_zone == ZONE_MOVABLE);
1862 * if policy->nodes has movable memory only,
1863 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only.
1865 * policy->nodes is intersect with node_states[N_MEMORY].
1866 * so if the following test fails, it implies
1867 * policy->nodes has movable memory only.
1869 if (!nodes_intersects(policy->nodes, node_states[N_HIGH_MEMORY]))
1870 dynamic_policy_zone = ZONE_MOVABLE;
1872 return zone >= dynamic_policy_zone;
1875 static unsigned int weighted_interleave_nodes(struct mempolicy *policy)
1878 unsigned int cpuset_mems_cookie;
1881 /* to prevent miscount use tsk->mems_allowed_seq to detect rebind */
1882 cpuset_mems_cookie = read_mems_allowed_begin();
1883 node = current->il_prev;
1884 if (!current->il_weight || !node_isset(node, policy->nodes)) {
1885 node = next_node_in(node, policy->nodes);
1886 if (read_mems_allowed_retry(cpuset_mems_cookie))
1888 if (node == MAX_NUMNODES)
1890 current->il_prev = node;
1891 current->il_weight = get_il_weight(node);
1893 current->il_weight--;
1897 /* Do dynamic interleaving for a process */
1898 static unsigned int interleave_nodes(struct mempolicy *policy)
1901 unsigned int cpuset_mems_cookie;
1903 /* to prevent miscount, use tsk->mems_allowed_seq to detect rebind */
1905 cpuset_mems_cookie = read_mems_allowed_begin();
1906 nid = next_node_in(current->il_prev, policy->nodes);
1907 } while (read_mems_allowed_retry(cpuset_mems_cookie));
1909 if (nid < MAX_NUMNODES)
1910 current->il_prev = nid;
1915 * Depending on the memory policy provide a node from which to allocate the
1918 unsigned int mempolicy_slab_node(void)
1920 struct mempolicy *policy;
1921 int node = numa_mem_id();
1926 policy = current->mempolicy;
1930 switch (policy->mode) {
1931 case MPOL_PREFERRED:
1932 return first_node(policy->nodes);
1934 case MPOL_INTERLEAVE:
1935 return interleave_nodes(policy);
1937 case MPOL_WEIGHTED_INTERLEAVE:
1938 return weighted_interleave_nodes(policy);
1941 case MPOL_PREFERRED_MANY:
1946 * Follow bind policy behavior and start allocation at the
1949 struct zonelist *zonelist;
1950 enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
1951 zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK];
1952 z = first_zones_zonelist(zonelist, highest_zoneidx,
1954 return z->zone ? zone_to_nid(z->zone) : node;
1964 static unsigned int read_once_policy_nodemask(struct mempolicy *pol,
1968 * barrier stabilizes the nodemask locally so that it can be iterated
1969 * over safely without concern for changes. Allocators validate node
1970 * selection does not violate mems_allowed, so this is safe.
1973 memcpy(mask, &pol->nodes, sizeof(nodemask_t));
1975 return nodes_weight(*mask);
1978 static unsigned int weighted_interleave_nid(struct mempolicy *pol, pgoff_t ilx)
1980 nodemask_t nodemask;
1981 unsigned int target, nr_nodes;
1983 unsigned int weight_total = 0;
1987 nr_nodes = read_once_policy_nodemask(pol, &nodemask);
1989 return numa_node_id();
1992 table = rcu_dereference(iw_table);
1993 /* calculate the total weight */
1994 for_each_node_mask(nid, nodemask) {
1995 /* detect system default usage */
1996 weight = table ? table[nid] : 1;
1997 weight = weight ? weight : 1;
1998 weight_total += weight;
2001 /* Calculate the node offset based on totals */
2002 target = ilx % weight_total;
2003 nid = first_node(nodemask);
2005 /* detect system default usage */
2006 weight = table ? table[nid] : 1;
2007 weight = weight ? weight : 1;
2008 if (target < weight)
2011 nid = next_node_in(nid, nodemask);
2018 * Do static interleaving for interleave index @ilx. Returns the ilx'th
2019 * node in pol->nodes (starting from ilx=0), wrapping around if ilx
2020 * exceeds the number of present nodes.
2022 static unsigned int interleave_nid(struct mempolicy *pol, pgoff_t ilx)
2024 nodemask_t nodemask;
2025 unsigned int target, nnodes;
2029 nnodes = read_once_policy_nodemask(pol, &nodemask);
2031 return numa_node_id();
2032 target = ilx % nnodes;
2033 nid = first_node(nodemask);
2034 for (i = 0; i < target; i++)
2035 nid = next_node(nid, nodemask);
2040 * Return a nodemask representing a mempolicy for filtering nodes for
2041 * page allocation, together with preferred node id (or the input node id).
2043 static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *pol,
2044 pgoff_t ilx, int *nid)
2046 nodemask_t *nodemask = NULL;
2048 switch (pol->mode) {
2049 case MPOL_PREFERRED:
2050 /* Override input node id */
2051 *nid = first_node(pol->nodes);
2053 case MPOL_PREFERRED_MANY:
2054 nodemask = &pol->nodes;
2055 if (pol->home_node != NUMA_NO_NODE)
2056 *nid = pol->home_node;
2059 /* Restrict to nodemask (but not on lower zones) */
2060 if (apply_policy_zone(pol, gfp_zone(gfp)) &&
2061 cpuset_nodemask_valid_mems_allowed(&pol->nodes))
2062 nodemask = &pol->nodes;
2063 if (pol->home_node != NUMA_NO_NODE)
2064 *nid = pol->home_node;
2066 * __GFP_THISNODE shouldn't even be used with the bind policy
2067 * because we might easily break the expectation to stay on the
2068 * requested node and not break the policy.
2070 WARN_ON_ONCE(gfp & __GFP_THISNODE);
2072 case MPOL_INTERLEAVE:
2073 /* Override input node id */
2074 *nid = (ilx == NO_INTERLEAVE_INDEX) ?
2075 interleave_nodes(pol) : interleave_nid(pol, ilx);
2077 case MPOL_WEIGHTED_INTERLEAVE:
2078 *nid = (ilx == NO_INTERLEAVE_INDEX) ?
2079 weighted_interleave_nodes(pol) :
2080 weighted_interleave_nid(pol, ilx);
2087 #ifdef CONFIG_HUGETLBFS
2089 * huge_node(@vma, @addr, @gfp_flags, @mpol)
2090 * @vma: virtual memory area whose policy is sought
2091 * @addr: address in @vma for shared policy lookup and interleave policy
2092 * @gfp_flags: for requested zone
2093 * @mpol: pointer to mempolicy pointer for reference counted mempolicy
2094 * @nodemask: pointer to nodemask pointer for 'bind' and 'prefer-many' policy
2096 * Returns a nid suitable for a huge page allocation and a pointer
2097 * to the struct mempolicy for conditional unref after allocation.
2098 * If the effective policy is 'bind' or 'prefer-many', returns a pointer
2099 * to the mempolicy's @nodemask for filtering the zonelist.
2101 int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags,
2102 struct mempolicy **mpol, nodemask_t **nodemask)
2107 nid = numa_node_id();
2108 *mpol = get_vma_policy(vma, addr, hstate_vma(vma)->order, &ilx);
2109 *nodemask = policy_nodemask(gfp_flags, *mpol, ilx, &nid);
2114 * init_nodemask_of_mempolicy
2116 * If the current task's mempolicy is "default" [NULL], return 'false'
2117 * to indicate default policy. Otherwise, extract the policy nodemask
2118 * for 'bind' or 'interleave' policy into the argument nodemask, or
2119 * initialize the argument nodemask to contain the single node for
2120 * 'preferred' or 'local' policy and return 'true' to indicate presence
2121 * of non-default mempolicy.
2123 * We don't bother with reference counting the mempolicy [mpol_get/put]
2124 * because the current task is examining it's own mempolicy and a task's
2125 * mempolicy is only ever changed by the task itself.
2127 * N.B., it is the caller's responsibility to free a returned nodemask.
2129 bool init_nodemask_of_mempolicy(nodemask_t *mask)
2131 struct mempolicy *mempolicy;
2133 if (!(mask && current->mempolicy))
2137 mempolicy = current->mempolicy;
2138 switch (mempolicy->mode) {
2139 case MPOL_PREFERRED:
2140 case MPOL_PREFERRED_MANY:
2142 case MPOL_INTERLEAVE:
2143 case MPOL_WEIGHTED_INTERLEAVE:
2144 *mask = mempolicy->nodes;
2148 init_nodemask_of_node(mask, numa_node_id());
2154 task_unlock(current);
2161 * mempolicy_in_oom_domain
2163 * If tsk's mempolicy is "bind", check for intersection between mask and
2164 * the policy nodemask. Otherwise, return true for all other policies
2165 * including "interleave", as a tsk with "interleave" policy may have
2166 * memory allocated from all nodes in system.
2168 * Takes task_lock(tsk) to prevent freeing of its mempolicy.
2170 bool mempolicy_in_oom_domain(struct task_struct *tsk,
2171 const nodemask_t *mask)
2173 struct mempolicy *mempolicy;
2180 mempolicy = tsk->mempolicy;
2181 if (mempolicy && mempolicy->mode == MPOL_BIND)
2182 ret = nodes_intersects(mempolicy->nodes, *mask);
2188 static struct page *alloc_pages_preferred_many(gfp_t gfp, unsigned int order,
2189 int nid, nodemask_t *nodemask)
2192 gfp_t preferred_gfp;
2195 * This is a two pass approach. The first pass will only try the
2196 * preferred nodes but skip the direct reclaim and allow the
2197 * allocation to fail, while the second pass will try all the
2200 preferred_gfp = gfp | __GFP_NOWARN;
2201 preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
2202 page = __alloc_pages_noprof(preferred_gfp, order, nid, nodemask);
2204 page = __alloc_pages_noprof(gfp, order, nid, NULL);
2210 * alloc_pages_mpol - Allocate pages according to NUMA mempolicy.
2212 * @order: Order of the page allocation.
2213 * @pol: Pointer to the NUMA mempolicy.
2214 * @ilx: Index for interleave mempolicy (also distinguishes alloc_pages()).
2215 * @nid: Preferred node (usually numa_node_id() but @mpol may override it).
2217 * Return: The page on success or NULL if allocation fails.
2219 struct page *alloc_pages_mpol_noprof(gfp_t gfp, unsigned int order,
2220 struct mempolicy *pol, pgoff_t ilx, int nid)
2222 nodemask_t *nodemask;
2225 nodemask = policy_nodemask(gfp, pol, ilx, &nid);
2227 if (pol->mode == MPOL_PREFERRED_MANY)
2228 return alloc_pages_preferred_many(gfp, order, nid, nodemask);
2230 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
2231 /* filter "hugepage" allocation, unless from alloc_pages() */
2232 order == HPAGE_PMD_ORDER && ilx != NO_INTERLEAVE_INDEX) {
2234 * For hugepage allocation and non-interleave policy which
2235 * allows the current node (or other explicitly preferred
2236 * node) we only try to allocate from the current/preferred
2237 * node and don't fall back to other nodes, as the cost of
2238 * remote accesses would likely offset THP benefits.
2240 * If the policy is interleave or does not allow the current
2241 * node in its nodemask, we allocate the standard way.
2243 if (pol->mode != MPOL_INTERLEAVE &&
2244 pol->mode != MPOL_WEIGHTED_INTERLEAVE &&
2245 (!nodemask || node_isset(nid, *nodemask))) {
2247 * First, try to allocate THP only on local node, but
2248 * don't reclaim unnecessarily, just compact.
2250 page = __alloc_pages_node_noprof(nid,
2251 gfp | __GFP_THISNODE | __GFP_NORETRY, order);
2252 if (page || !(gfp & __GFP_DIRECT_RECLAIM))
2255 * If hugepage allocations are configured to always
2256 * synchronous compact or the vma has been madvised
2257 * to prefer hugepage backing, retry allowing remote
2258 * memory with both reclaim and compact as well.
2263 page = __alloc_pages_noprof(gfp, order, nid, nodemask);
2265 if (unlikely(pol->mode == MPOL_INTERLEAVE) && page) {
2266 /* skip NUMA_INTERLEAVE_HIT update if numa stats is disabled */
2267 if (static_branch_likely(&vm_numa_stat_key) &&
2268 page_to_nid(page) == nid) {
2270 __count_numa_event(page_zone(page), NUMA_INTERLEAVE_HIT);
2278 struct folio *folio_alloc_mpol_noprof(gfp_t gfp, unsigned int order,
2279 struct mempolicy *pol, pgoff_t ilx, int nid)
2281 return page_rmappable_folio(alloc_pages_mpol_noprof(gfp | __GFP_COMP,
2282 order, pol, ilx, nid));
2286 * vma_alloc_folio - Allocate a folio for a VMA.
2288 * @order: Order of the folio.
2289 * @vma: Pointer to VMA.
2290 * @addr: Virtual address of the allocation. Must be inside @vma.
2291 * @hugepage: Unused (was: For hugepages try only preferred node if possible).
2293 * Allocate a folio for a specific address in @vma, using the appropriate
2294 * NUMA policy. The caller must hold the mmap_lock of the mm_struct of the
2295 * VMA to prevent it from going away. Should be used for all allocations
2296 * for folios that will be mapped into user space, excepting hugetlbfs, and
2297 * excepting where direct use of alloc_pages_mpol() is more appropriate.
2299 * Return: The folio on success or NULL if allocation fails.
2301 struct folio *vma_alloc_folio_noprof(gfp_t gfp, int order, struct vm_area_struct *vma,
2302 unsigned long addr, bool hugepage)
2304 struct mempolicy *pol;
2306 struct folio *folio;
2308 if (vma->vm_flags & VM_DROPPABLE)
2309 gfp |= __GFP_NOWARN;
2311 pol = get_vma_policy(vma, addr, order, &ilx);
2312 folio = folio_alloc_mpol_noprof(gfp, order, pol, ilx, numa_node_id());
2316 EXPORT_SYMBOL(vma_alloc_folio_noprof);
2319 * alloc_pages - Allocate pages.
2321 * @order: Power of two of number of pages to allocate.
2323 * Allocate 1 << @order contiguous pages. The physical address of the
2324 * first page is naturally aligned (eg an order-3 allocation will be aligned
2325 * to a multiple of 8 * PAGE_SIZE bytes). The NUMA policy of the current
2326 * process is honoured when in process context.
2328 * Context: Can be called from any context, providing the appropriate GFP
2330 * Return: The page on success or NULL if allocation fails.
2332 struct page *alloc_pages_noprof(gfp_t gfp, unsigned int order)
2334 struct mempolicy *pol = &default_policy;
2337 * No reference counting needed for current->mempolicy
2338 * nor system default_policy
2340 if (!in_interrupt() && !(gfp & __GFP_THISNODE))
2341 pol = get_task_policy(current);
2343 return alloc_pages_mpol_noprof(gfp, order, pol, NO_INTERLEAVE_INDEX,
2346 EXPORT_SYMBOL(alloc_pages_noprof);
2348 struct folio *folio_alloc_noprof(gfp_t gfp, unsigned int order)
2350 return page_rmappable_folio(alloc_pages_noprof(gfp | __GFP_COMP, order));
2352 EXPORT_SYMBOL(folio_alloc_noprof);
2354 static unsigned long alloc_pages_bulk_array_interleave(gfp_t gfp,
2355 struct mempolicy *pol, unsigned long nr_pages,
2356 struct page **page_array)
2359 unsigned long nr_pages_per_node;
2362 unsigned long nr_allocated;
2363 unsigned long total_allocated = 0;
2365 nodes = nodes_weight(pol->nodes);
2366 nr_pages_per_node = nr_pages / nodes;
2367 delta = nr_pages - nodes * nr_pages_per_node;
2369 for (i = 0; i < nodes; i++) {
2371 nr_allocated = alloc_pages_bulk_noprof(gfp,
2372 interleave_nodes(pol), NULL,
2373 nr_pages_per_node + 1, NULL,
2377 nr_allocated = alloc_pages_bulk_noprof(gfp,
2378 interleave_nodes(pol), NULL,
2379 nr_pages_per_node, NULL, page_array);
2382 page_array += nr_allocated;
2383 total_allocated += nr_allocated;
2386 return total_allocated;
2389 static unsigned long alloc_pages_bulk_array_weighted_interleave(gfp_t gfp,
2390 struct mempolicy *pol, unsigned long nr_pages,
2391 struct page **page_array)
2393 struct task_struct *me = current;
2394 unsigned int cpuset_mems_cookie;
2395 unsigned long total_allocated = 0;
2396 unsigned long nr_allocated = 0;
2397 unsigned long rounds;
2398 unsigned long node_pages, delta;
2399 u8 *table, *weights, weight;
2400 unsigned int weight_total = 0;
2401 unsigned long rem_pages = nr_pages;
2404 int resume_node = MAX_NUMNODES - 1;
2405 u8 resume_weight = 0;
2412 /* read the nodes onto the stack, retry if done during rebind */
2414 cpuset_mems_cookie = read_mems_allowed_begin();
2415 nnodes = read_once_policy_nodemask(pol, &nodes);
2416 } while (read_mems_allowed_retry(cpuset_mems_cookie));
2418 /* if the nodemask has become invalid, we cannot do anything */
2422 /* Continue allocating from most recent node and adjust the nr_pages */
2424 weight = me->il_weight;
2425 if (weight && node_isset(node, nodes)) {
2426 node_pages = min(rem_pages, weight);
2427 nr_allocated = __alloc_pages_bulk(gfp, node, NULL, node_pages,
2429 page_array += nr_allocated;
2430 total_allocated += nr_allocated;
2431 /* if that's all the pages, no need to interleave */
2432 if (rem_pages <= weight) {
2433 me->il_weight -= rem_pages;
2434 return total_allocated;
2436 /* Otherwise we adjust remaining pages, continue from there */
2437 rem_pages -= weight;
2439 /* clear active weight in case of an allocation failure */
2443 /* create a local copy of node weights to operate on outside rcu */
2444 weights = kzalloc(nr_node_ids, GFP_KERNEL);
2446 return total_allocated;
2449 table = rcu_dereference(iw_table);
2451 memcpy(weights, table, nr_node_ids);
2454 /* calculate total, detect system default usage */
2455 for_each_node_mask(node, nodes) {
2458 weight_total += weights[node];
2462 * Calculate rounds/partial rounds to minimize __alloc_pages_bulk calls.
2463 * Track which node weighted interleave should resume from.
2465 * if (rounds > 0) and (delta == 0), resume_node will always be
2466 * the node following prev_node and its weight.
2468 rounds = rem_pages / weight_total;
2469 delta = rem_pages % weight_total;
2470 resume_node = next_node_in(prev_node, nodes);
2471 resume_weight = weights[resume_node];
2472 for (i = 0; i < nnodes; i++) {
2473 node = next_node_in(prev_node, nodes);
2474 weight = weights[node];
2475 node_pages = weight * rounds;
2476 /* If a delta exists, add this node's portion of the delta */
2477 if (delta > weight) {
2478 node_pages += weight;
2481 /* when delta is depleted, resume from that node */
2482 node_pages += delta;
2484 resume_weight = weight - delta;
2487 /* node_pages can be 0 if an allocation fails and rounds == 0 */
2490 nr_allocated = __alloc_pages_bulk(gfp, node, NULL, node_pages,
2492 page_array += nr_allocated;
2493 total_allocated += nr_allocated;
2494 if (total_allocated == nr_pages)
2498 me->il_prev = resume_node;
2499 me->il_weight = resume_weight;
2501 return total_allocated;
2504 static unsigned long alloc_pages_bulk_array_preferred_many(gfp_t gfp, int nid,
2505 struct mempolicy *pol, unsigned long nr_pages,
2506 struct page **page_array)
2508 gfp_t preferred_gfp;
2509 unsigned long nr_allocated = 0;
2511 preferred_gfp = gfp | __GFP_NOWARN;
2512 preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
2514 nr_allocated = alloc_pages_bulk_noprof(preferred_gfp, nid, &pol->nodes,
2515 nr_pages, NULL, page_array);
2517 if (nr_allocated < nr_pages)
2518 nr_allocated += alloc_pages_bulk_noprof(gfp, numa_node_id(), NULL,
2519 nr_pages - nr_allocated, NULL,
2520 page_array + nr_allocated);
2521 return nr_allocated;
2524 /* alloc pages bulk and mempolicy should be considered at the
2525 * same time in some situation such as vmalloc.
2527 * It can accelerate memory allocation especially interleaving
2530 unsigned long alloc_pages_bulk_array_mempolicy_noprof(gfp_t gfp,
2531 unsigned long nr_pages, struct page **page_array)
2533 struct mempolicy *pol = &default_policy;
2534 nodemask_t *nodemask;
2537 if (!in_interrupt() && !(gfp & __GFP_THISNODE))
2538 pol = get_task_policy(current);
2540 if (pol->mode == MPOL_INTERLEAVE)
2541 return alloc_pages_bulk_array_interleave(gfp, pol,
2542 nr_pages, page_array);
2544 if (pol->mode == MPOL_WEIGHTED_INTERLEAVE)
2545 return alloc_pages_bulk_array_weighted_interleave(
2546 gfp, pol, nr_pages, page_array);
2548 if (pol->mode == MPOL_PREFERRED_MANY)
2549 return alloc_pages_bulk_array_preferred_many(gfp,
2550 numa_node_id(), pol, nr_pages, page_array);
2552 nid = numa_node_id();
2553 nodemask = policy_nodemask(gfp, pol, NO_INTERLEAVE_INDEX, &nid);
2554 return alloc_pages_bulk_noprof(gfp, nid, nodemask,
2555 nr_pages, NULL, page_array);
2558 int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
2560 struct mempolicy *pol = mpol_dup(src->vm_policy);
2563 return PTR_ERR(pol);
2564 dst->vm_policy = pol;
2569 * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
2570 * rebinds the mempolicy its copying by calling mpol_rebind_policy()
2571 * with the mems_allowed returned by cpuset_mems_allowed(). This
2572 * keeps mempolicies cpuset relative after its cpuset moves. See
2573 * further kernel/cpuset.c update_nodemask().
2575 * current's mempolicy may be rebinded by the other task(the task that changes
2576 * cpuset's mems), so we needn't do rebind work for current task.
2579 /* Slow path of a mempolicy duplicate */
2580 struct mempolicy *__mpol_dup(struct mempolicy *old)
2582 struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2585 return ERR_PTR(-ENOMEM);
2587 /* task's mempolicy is protected by alloc_lock */
2588 if (old == current->mempolicy) {
2591 task_unlock(current);
2595 if (current_cpuset_is_being_rebound()) {
2596 nodemask_t mems = cpuset_mems_allowed(current);
2597 mpol_rebind_policy(new, &mems);
2599 atomic_set(&new->refcnt, 1);
2603 /* Slow path of a mempolicy comparison */
2604 bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
2608 if (a->mode != b->mode)
2610 if (a->flags != b->flags)
2612 if (a->home_node != b->home_node)
2614 if (mpol_store_user_nodemask(a))
2615 if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
2620 case MPOL_INTERLEAVE:
2621 case MPOL_PREFERRED:
2622 case MPOL_PREFERRED_MANY:
2623 case MPOL_WEIGHTED_INTERLEAVE:
2624 return !!nodes_equal(a->nodes, b->nodes);
2634 * Shared memory backing store policy support.
2636 * Remember policies even when nobody has shared memory mapped.
2637 * The policies are kept in Red-Black tree linked from the inode.
2638 * They are protected by the sp->lock rwlock, which should be held
2639 * for any accesses to the tree.
2643 * lookup first element intersecting start-end. Caller holds sp->lock for
2644 * reading or for writing
2646 static struct sp_node *sp_lookup(struct shared_policy *sp,
2647 pgoff_t start, pgoff_t end)
2649 struct rb_node *n = sp->root.rb_node;
2652 struct sp_node *p = rb_entry(n, struct sp_node, nd);
2654 if (start >= p->end)
2656 else if (end <= p->start)
2664 struct sp_node *w = NULL;
2665 struct rb_node *prev = rb_prev(n);
2668 w = rb_entry(prev, struct sp_node, nd);
2669 if (w->end <= start)
2673 return rb_entry(n, struct sp_node, nd);
2677 * Insert a new shared policy into the list. Caller holds sp->lock for
2680 static void sp_insert(struct shared_policy *sp, struct sp_node *new)
2682 struct rb_node **p = &sp->root.rb_node;
2683 struct rb_node *parent = NULL;
2688 nd = rb_entry(parent, struct sp_node, nd);
2689 if (new->start < nd->start)
2691 else if (new->end > nd->end)
2692 p = &(*p)->rb_right;
2696 rb_link_node(&new->nd, parent, p);
2697 rb_insert_color(&new->nd, &sp->root);
2700 /* Find shared policy intersecting idx */
2701 struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp,
2704 struct mempolicy *pol = NULL;
2707 if (!sp->root.rb_node)
2709 read_lock(&sp->lock);
2710 sn = sp_lookup(sp, idx, idx+1);
2712 mpol_get(sn->policy);
2715 read_unlock(&sp->lock);
2719 static void sp_free(struct sp_node *n)
2721 mpol_put(n->policy);
2722 kmem_cache_free(sn_cache, n);
2726 * mpol_misplaced - check whether current folio node is valid in policy
2728 * @folio: folio to be checked
2729 * @vmf: structure describing the fault
2730 * @addr: virtual address in @vma for shared policy lookup and interleave policy
2732 * Lookup current policy node id for vma,addr and "compare to" folio's
2733 * node id. Policy determination "mimics" alloc_page_vma().
2734 * Called from fault path where we know the vma and faulting address.
2736 * Return: NUMA_NO_NODE if the page is in a node that is valid for this
2737 * policy, or a suitable node ID to allocate a replacement folio from.
2739 int mpol_misplaced(struct folio *folio, struct vm_fault *vmf,
2742 struct mempolicy *pol;
2745 int curnid = folio_nid(folio);
2746 struct vm_area_struct *vma = vmf->vma;
2747 int thiscpu = raw_smp_processor_id();
2748 int thisnid = numa_node_id();
2749 int polnid = NUMA_NO_NODE;
2750 int ret = NUMA_NO_NODE;
2753 * Make sure ptl is held so that we don't preempt and we
2754 * have a stable smp processor id
2756 lockdep_assert_held(vmf->ptl);
2757 pol = get_vma_policy(vma, addr, folio_order(folio), &ilx);
2758 if (!(pol->flags & MPOL_F_MOF))
2761 switch (pol->mode) {
2762 case MPOL_INTERLEAVE:
2763 polnid = interleave_nid(pol, ilx);
2766 case MPOL_WEIGHTED_INTERLEAVE:
2767 polnid = weighted_interleave_nid(pol, ilx);
2770 case MPOL_PREFERRED:
2771 if (node_isset(curnid, pol->nodes))
2773 polnid = first_node(pol->nodes);
2777 polnid = numa_node_id();
2781 case MPOL_PREFERRED_MANY:
2783 * Even though MPOL_PREFERRED_MANY can allocate pages outside
2784 * policy nodemask we don't allow numa migration to nodes
2785 * outside policy nodemask for now. This is done so that if we
2786 * want demotion to slow memory to happen, before allocating
2787 * from some DRAM node say 'x', we will end up using a
2788 * MPOL_PREFERRED_MANY mask excluding node 'x'. In such scenario
2789 * we should not promote to node 'x' from slow memory node.
2791 if (pol->flags & MPOL_F_MORON) {
2793 * Optimize placement among multiple nodes
2794 * via NUMA balancing
2796 if (node_isset(thisnid, pol->nodes))
2802 * use current page if in policy nodemask,
2803 * else select nearest allowed node, if any.
2804 * If no allowed nodes, use current [!misplaced].
2806 if (node_isset(curnid, pol->nodes))
2808 z = first_zones_zonelist(
2809 node_zonelist(thisnid, GFP_HIGHUSER),
2810 gfp_zone(GFP_HIGHUSER),
2812 polnid = zone_to_nid(z->zone);
2819 /* Migrate the folio towards the node whose CPU is referencing it */
2820 if (pol->flags & MPOL_F_MORON) {
2823 if (!should_numa_migrate_memory(current, folio, curnid,
2828 if (curnid != polnid)
2837 * Drop the (possibly final) reference to task->mempolicy. It needs to be
2838 * dropped after task->mempolicy is set to NULL so that any allocation done as
2839 * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed
2842 void mpol_put_task_policy(struct task_struct *task)
2844 struct mempolicy *pol;
2847 pol = task->mempolicy;
2848 task->mempolicy = NULL;
2853 static void sp_delete(struct shared_policy *sp, struct sp_node *n)
2855 rb_erase(&n->nd, &sp->root);
2859 static void sp_node_init(struct sp_node *node, unsigned long start,
2860 unsigned long end, struct mempolicy *pol)
2862 node->start = start;
2867 static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2868 struct mempolicy *pol)
2871 struct mempolicy *newpol;
2873 n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2877 newpol = mpol_dup(pol);
2878 if (IS_ERR(newpol)) {
2879 kmem_cache_free(sn_cache, n);
2882 newpol->flags |= MPOL_F_SHARED;
2883 sp_node_init(n, start, end, newpol);
2888 /* Replace a policy range. */
2889 static int shared_policy_replace(struct shared_policy *sp, pgoff_t start,
2890 pgoff_t end, struct sp_node *new)
2893 struct sp_node *n_new = NULL;
2894 struct mempolicy *mpol_new = NULL;
2898 write_lock(&sp->lock);
2899 n = sp_lookup(sp, start, end);
2900 /* Take care of old policies in the same range. */
2901 while (n && n->start < end) {
2902 struct rb_node *next = rb_next(&n->nd);
2903 if (n->start >= start) {
2909 /* Old policy spanning whole new range. */
2914 *mpol_new = *n->policy;
2915 atomic_set(&mpol_new->refcnt, 1);
2916 sp_node_init(n_new, end, n->end, mpol_new);
2918 sp_insert(sp, n_new);
2927 n = rb_entry(next, struct sp_node, nd);
2931 write_unlock(&sp->lock);
2938 kmem_cache_free(sn_cache, n_new);
2943 write_unlock(&sp->lock);
2945 n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2948 mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2951 atomic_set(&mpol_new->refcnt, 1);
2956 * mpol_shared_policy_init - initialize shared policy for inode
2957 * @sp: pointer to inode shared policy
2958 * @mpol: struct mempolicy to install
2960 * Install non-NULL @mpol in inode's shared policy rb-tree.
2961 * On entry, the current task has a reference on a non-NULL @mpol.
2962 * This must be released on exit.
2963 * This is called at get_inode() calls and we can use GFP_KERNEL.
2965 void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
2969 sp->root = RB_ROOT; /* empty tree == default mempolicy */
2970 rwlock_init(&sp->lock);
2974 struct mempolicy *npol;
2975 NODEMASK_SCRATCH(scratch);
2980 /* contextualize the tmpfs mount point mempolicy to this file */
2981 npol = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
2983 goto free_scratch; /* no valid nodemask intersection */
2986 ret = mpol_set_nodemask(npol, &mpol->w.user_nodemask, scratch);
2987 task_unlock(current);
2991 /* alloc node covering entire file; adds ref to file's npol */
2992 sn = sp_alloc(0, MAX_LFS_FILESIZE >> PAGE_SHIFT, npol);
2996 mpol_put(npol); /* drop initial ref on file's npol */
2998 NODEMASK_SCRATCH_FREE(scratch);
3000 mpol_put(mpol); /* drop our incoming ref on sb mpol */
3004 int mpol_set_shared_policy(struct shared_policy *sp,
3005 struct vm_area_struct *vma, struct mempolicy *pol)
3008 struct sp_node *new = NULL;
3009 unsigned long sz = vma_pages(vma);
3012 new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, pol);
3016 err = shared_policy_replace(sp, vma->vm_pgoff, vma->vm_pgoff + sz, new);
3022 /* Free a backing policy store on inode delete. */
3023 void mpol_free_shared_policy(struct shared_policy *sp)
3026 struct rb_node *next;
3028 if (!sp->root.rb_node)
3030 write_lock(&sp->lock);
3031 next = rb_first(&sp->root);
3033 n = rb_entry(next, struct sp_node, nd);
3034 next = rb_next(&n->nd);
3037 write_unlock(&sp->lock);
3040 #ifdef CONFIG_NUMA_BALANCING
3041 static int __initdata numabalancing_override;
3043 static void __init check_numabalancing_enable(void)
3045 bool numabalancing_default = false;
3047 if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
3048 numabalancing_default = true;
3050 /* Parsed by setup_numabalancing. override == 1 enables, -1 disables */
3051 if (numabalancing_override)
3052 set_numabalancing_state(numabalancing_override == 1);
3054 if (num_online_nodes() > 1 && !numabalancing_override) {
3055 pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n",
3056 numabalancing_default ? "Enabling" : "Disabling");
3057 set_numabalancing_state(numabalancing_default);
3061 static int __init setup_numabalancing(char *str)
3067 if (!strcmp(str, "enable")) {
3068 numabalancing_override = 1;
3070 } else if (!strcmp(str, "disable")) {
3071 numabalancing_override = -1;
3076 pr_warn("Unable to parse numa_balancing=\n");
3080 __setup("numa_balancing=", setup_numabalancing);
3082 static inline void __init check_numabalancing_enable(void)
3085 #endif /* CONFIG_NUMA_BALANCING */
3087 void __init numa_policy_init(void)
3089 nodemask_t interleave_nodes;
3090 unsigned long largest = 0;
3091 int nid, prefer = 0;
3093 policy_cache = kmem_cache_create("numa_policy",
3094 sizeof(struct mempolicy),
3095 0, SLAB_PANIC, NULL);
3097 sn_cache = kmem_cache_create("shared_policy_node",
3098 sizeof(struct sp_node),
3099 0, SLAB_PANIC, NULL);
3101 for_each_node(nid) {
3102 preferred_node_policy[nid] = (struct mempolicy) {
3103 .refcnt = ATOMIC_INIT(1),
3104 .mode = MPOL_PREFERRED,
3105 .flags = MPOL_F_MOF | MPOL_F_MORON,
3106 .nodes = nodemask_of_node(nid),
3111 * Set interleaving policy for system init. Interleaving is only
3112 * enabled across suitably sized nodes (default is >= 16MB), or
3113 * fall back to the largest node if they're all smaller.
3115 nodes_clear(interleave_nodes);
3116 for_each_node_state(nid, N_MEMORY) {
3117 unsigned long total_pages = node_present_pages(nid);
3119 /* Preserve the largest node */
3120 if (largest < total_pages) {
3121 largest = total_pages;
3125 /* Interleave this node? */
3126 if ((total_pages << PAGE_SHIFT) >= (16 << 20))
3127 node_set(nid, interleave_nodes);
3130 /* All too small, use the largest */
3131 if (unlikely(nodes_empty(interleave_nodes)))
3132 node_set(prefer, interleave_nodes);
3134 if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
3135 pr_err("%s: interleaving failed\n", __func__);
3137 check_numabalancing_enable();
3140 /* Reset policy of current process to default */
3141 void numa_default_policy(void)
3143 do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
3147 * Parse and format mempolicy from/to strings
3149 static const char * const policy_modes[] =
3151 [MPOL_DEFAULT] = "default",
3152 [MPOL_PREFERRED] = "prefer",
3153 [MPOL_BIND] = "bind",
3154 [MPOL_INTERLEAVE] = "interleave",
3155 [MPOL_WEIGHTED_INTERLEAVE] = "weighted interleave",
3156 [MPOL_LOCAL] = "local",
3157 [MPOL_PREFERRED_MANY] = "prefer (many)",
3162 * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
3163 * @str: string containing mempolicy to parse
3164 * @mpol: pointer to struct mempolicy pointer, returned on success.
3167 * <mode>[=<flags>][:<nodelist>]
3169 * Return: %0 on success, else %1
3171 int mpol_parse_str(char *str, struct mempolicy **mpol)
3173 struct mempolicy *new = NULL;
3174 unsigned short mode_flags;
3176 char *nodelist = strchr(str, ':');
3177 char *flags = strchr(str, '=');
3181 *flags++ = '\0'; /* terminate mode string */
3184 /* NUL-terminate mode or flags string */
3186 if (nodelist_parse(nodelist, nodes))
3188 if (!nodes_subset(nodes, node_states[N_MEMORY]))
3193 mode = match_string(policy_modes, MPOL_MAX, str);
3198 case MPOL_PREFERRED:
3200 * Insist on a nodelist of one node only, although later
3201 * we use first_node(nodes) to grab a single node, so here
3202 * nodelist (or nodes) cannot be empty.
3205 char *rest = nodelist;
3206 while (isdigit(*rest))
3210 if (nodes_empty(nodes))
3214 case MPOL_INTERLEAVE:
3215 case MPOL_WEIGHTED_INTERLEAVE:
3217 * Default to online nodes with memory if no nodelist
3220 nodes = node_states[N_MEMORY];
3224 * Don't allow a nodelist; mpol_new() checks flags
3231 * Insist on a empty nodelist
3236 case MPOL_PREFERRED_MANY:
3239 * Insist on a nodelist
3248 * Currently, we only support two mutually exclusive
3251 if (!strcmp(flags, "static"))
3252 mode_flags |= MPOL_F_STATIC_NODES;
3253 else if (!strcmp(flags, "relative"))
3254 mode_flags |= MPOL_F_RELATIVE_NODES;
3259 new = mpol_new(mode, mode_flags, &nodes);
3264 * Save nodes for mpol_to_str() to show the tmpfs mount options
3265 * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
3267 if (mode != MPOL_PREFERRED) {
3269 } else if (nodelist) {
3270 nodes_clear(new->nodes);
3271 node_set(first_node(nodes), new->nodes);
3273 new->mode = MPOL_LOCAL;
3277 * Save nodes for contextualization: this will be used to "clone"
3278 * the mempolicy in a specific context [cpuset] at a later time.
3280 new->w.user_nodemask = nodes;
3285 /* Restore string for error message */
3294 #endif /* CONFIG_TMPFS */
3297 * mpol_to_str - format a mempolicy structure for printing
3298 * @buffer: to contain formatted mempolicy string
3299 * @maxlen: length of @buffer
3300 * @pol: pointer to mempolicy to be formatted
3302 * Convert @pol into a string. If @buffer is too short, truncate the string.
3303 * Recommend a @maxlen of at least 51 for the longest mode, "weighted
3304 * interleave", plus the longest flag flags, "relative|balancing", and to
3305 * display at least a few node ids.
3307 void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
3310 nodemask_t nodes = NODE_MASK_NONE;
3311 unsigned short mode = MPOL_DEFAULT;
3312 unsigned short flags = 0;
3315 pol != &default_policy &&
3316 !(pol >= &preferred_node_policy[0] &&
3317 pol <= &preferred_node_policy[ARRAY_SIZE(preferred_node_policy) - 1])) {
3326 case MPOL_PREFERRED:
3327 case MPOL_PREFERRED_MANY:
3329 case MPOL_INTERLEAVE:
3330 case MPOL_WEIGHTED_INTERLEAVE:
3335 snprintf(p, maxlen, "unknown");
3339 p += snprintf(p, maxlen, "%s", policy_modes[mode]);
3341 if (flags & MPOL_MODE_FLAGS) {
3342 p += snprintf(p, buffer + maxlen - p, "=");
3345 * Static and relative are mutually exclusive.
3347 if (flags & MPOL_F_STATIC_NODES)
3348 p += snprintf(p, buffer + maxlen - p, "static");
3349 else if (flags & MPOL_F_RELATIVE_NODES)
3350 p += snprintf(p, buffer + maxlen - p, "relative");
3352 if (flags & MPOL_F_NUMA_BALANCING) {
3353 if (!is_power_of_2(flags & MPOL_MODE_FLAGS))
3354 p += snprintf(p, buffer + maxlen - p, "|");
3355 p += snprintf(p, buffer + maxlen - p, "balancing");
3359 if (!nodes_empty(nodes))
3360 p += scnprintf(p, buffer + maxlen - p, ":%*pbl",
3361 nodemask_pr_args(&nodes));
3365 struct iw_node_attr {
3366 struct kobj_attribute kobj_attr;
3370 static ssize_t node_show(struct kobject *kobj, struct kobj_attribute *attr,
3373 struct iw_node_attr *node_attr;
3376 node_attr = container_of(attr, struct iw_node_attr, kobj_attr);
3377 weight = get_il_weight(node_attr->nid);
3378 return sysfs_emit(buf, "%d\n", weight);
3381 static ssize_t node_store(struct kobject *kobj, struct kobj_attribute *attr,
3382 const char *buf, size_t count)
3384 struct iw_node_attr *node_attr;
3389 node_attr = container_of(attr, struct iw_node_attr, kobj_attr);
3390 if (count == 0 || sysfs_streq(buf, ""))
3392 else if (kstrtou8(buf, 0, &weight))
3395 new = kzalloc(nr_node_ids, GFP_KERNEL);
3399 mutex_lock(&iw_table_lock);
3400 old = rcu_dereference_protected(iw_table,
3401 lockdep_is_held(&iw_table_lock));
3403 memcpy(new, old, nr_node_ids);
3404 new[node_attr->nid] = weight;
3405 rcu_assign_pointer(iw_table, new);
3406 mutex_unlock(&iw_table_lock);
3412 static struct iw_node_attr **node_attrs;
3414 static void sysfs_wi_node_release(struct iw_node_attr *node_attr,
3415 struct kobject *parent)
3419 sysfs_remove_file(parent, &node_attr->kobj_attr.attr);
3420 kfree(node_attr->kobj_attr.attr.name);
3424 static void sysfs_wi_release(struct kobject *wi_kobj)
3428 for (i = 0; i < nr_node_ids; i++)
3429 sysfs_wi_node_release(node_attrs[i], wi_kobj);
3430 kobject_put(wi_kobj);
3433 static const struct kobj_type wi_ktype = {
3434 .sysfs_ops = &kobj_sysfs_ops,
3435 .release = sysfs_wi_release,
3438 static int add_weight_node(int nid, struct kobject *wi_kobj)
3440 struct iw_node_attr *node_attr;
3443 node_attr = kzalloc(sizeof(*node_attr), GFP_KERNEL);
3447 name = kasprintf(GFP_KERNEL, "node%d", nid);
3453 sysfs_attr_init(&node_attr->kobj_attr.attr);
3454 node_attr->kobj_attr.attr.name = name;
3455 node_attr->kobj_attr.attr.mode = 0644;
3456 node_attr->kobj_attr.show = node_show;
3457 node_attr->kobj_attr.store = node_store;
3458 node_attr->nid = nid;
3460 if (sysfs_create_file(wi_kobj, &node_attr->kobj_attr.attr)) {
3461 kfree(node_attr->kobj_attr.attr.name);
3463 pr_err("failed to add attribute to weighted_interleave\n");
3467 node_attrs[nid] = node_attr;
3471 static int add_weighted_interleave_group(struct kobject *root_kobj)
3473 struct kobject *wi_kobj;
3476 wi_kobj = kzalloc(sizeof(struct kobject), GFP_KERNEL);
3480 err = kobject_init_and_add(wi_kobj, &wi_ktype, root_kobj,
3481 "weighted_interleave");
3487 for_each_node_state(nid, N_POSSIBLE) {
3488 err = add_weight_node(nid, wi_kobj);
3490 pr_err("failed to add sysfs [node%d]\n", nid);
3495 kobject_put(wi_kobj);
3499 static void mempolicy_kobj_release(struct kobject *kobj)
3503 mutex_lock(&iw_table_lock);
3504 old = rcu_dereference_protected(iw_table,
3505 lockdep_is_held(&iw_table_lock));
3506 rcu_assign_pointer(iw_table, NULL);
3507 mutex_unlock(&iw_table_lock);
3514 static const struct kobj_type mempolicy_ktype = {
3515 .release = mempolicy_kobj_release
3518 static int __init mempolicy_sysfs_init(void)
3521 static struct kobject *mempolicy_kobj;
3523 mempolicy_kobj = kzalloc(sizeof(*mempolicy_kobj), GFP_KERNEL);
3524 if (!mempolicy_kobj) {
3529 node_attrs = kcalloc(nr_node_ids, sizeof(struct iw_node_attr *),
3536 err = kobject_init_and_add(mempolicy_kobj, &mempolicy_ktype, mm_kobj,
3541 err = add_weighted_interleave_group(mempolicy_kobj);
3543 pr_err("mempolicy sysfs structure failed to initialize\n");
3544 kobject_put(mempolicy_kobj);
3552 kfree(mempolicy_kobj);
3554 pr_err("failed to add mempolicy kobject to the system\n");
3558 late_initcall(mempolicy_sysfs_init);
3559 #endif /* CONFIG_SYSFS */