1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_HUGETLB_H
3 #define _LINUX_HUGETLB_H
6 #include <linux/mm_types.h>
7 #include <linux/mmdebug.h>
9 #include <linux/hugetlb_inline.h>
10 #include <linux/cgroup.h>
11 #include <linux/page_ref.h>
12 #include <linux/list.h>
13 #include <linux/kref.h>
14 #include <linux/pgtable.h>
15 #include <linux/gfp.h>
16 #include <linux/userfaultfd_k.h>
23 void free_huge_folio(struct folio *folio);
25 #ifdef CONFIG_HUGETLB_PAGE
27 #include <linux/pagemap.h>
28 #include <linux/shm.h>
29 #include <asm/tlbflush.h>
32 * For HugeTLB page, there are more metadata to save in the struct page. But
33 * the head struct page cannot meet our needs, so we have to abuse other tail
34 * struct page to store the metadata.
36 #define __NR_USED_SUBPAGE 3
38 struct hugepage_subpool {
41 long max_hpages; /* Maximum huge pages or -1 if no maximum. */
42 long used_hpages; /* Used count against maximum, includes */
43 /* both allocated and reserved pages. */
44 struct hstate *hstate;
45 long min_hpages; /* Minimum huge pages or -1 if no minimum. */
46 long rsv_hpages; /* Pages reserved against global pool to */
47 /* satisfy minimum size. */
53 struct list_head regions;
54 long adds_in_progress;
55 struct list_head region_cache;
56 long region_cache_count;
57 struct rw_semaphore rw_sema;
58 #ifdef CONFIG_CGROUP_HUGETLB
60 * On private mappings, the counter to uncharge reservations is stored
61 * here. If these fields are 0, then either the mapping is shared, or
62 * cgroup accounting is disabled for this resv_map.
64 struct page_counter *reservation_counter;
65 unsigned long pages_per_hpage;
66 struct cgroup_subsys_state *css;
71 * Region tracking -- allows tracking of reservations and instantiated pages
72 * across the pages in a mapping.
74 * The region data structures are embedded into a resv_map and protected
75 * by a resv_map's lock. The set of regions within the resv_map represent
76 * reservations for huge pages, or huge pages that have already been
77 * instantiated within the map. The from and to elements are huge page
78 * indices into the associated mapping. from indicates the starting index
79 * of the region. to represents the first index past the end of the region.
81 * For example, a file region structure with from == 0 and to == 4 represents
82 * four huge pages in a mapping. It is important to note that the to element
83 * represents the first element past the end of the region. This is used in
84 * arithmetic as 4(to) - 0(from) = 4 huge pages in the region.
86 * Interval notation of the form [from, to) will be used to indicate that
87 * the endpoint from is inclusive and to is exclusive.
90 struct list_head link;
93 #ifdef CONFIG_CGROUP_HUGETLB
95 * On shared mappings, each reserved region appears as a struct
96 * file_region in resv_map. These fields hold the info needed to
97 * uncharge each reservation.
99 struct page_counter *reservation_counter;
100 struct cgroup_subsys_state *css;
104 struct hugetlb_vma_lock {
106 struct rw_semaphore rw_sema;
107 struct vm_area_struct *vma;
110 extern struct resv_map *resv_map_alloc(void);
111 void resv_map_release(struct kref *ref);
113 extern spinlock_t hugetlb_lock;
114 extern int hugetlb_max_hstate __read_mostly;
115 #define for_each_hstate(h) \
116 for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++)
118 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
120 void hugepage_put_subpool(struct hugepage_subpool *spool);
122 void hugetlb_dup_vma_private(struct vm_area_struct *vma);
123 void clear_vma_resv_huge_pages(struct vm_area_struct *vma);
124 int move_hugetlb_page_tables(struct vm_area_struct *vma,
125 struct vm_area_struct *new_vma,
126 unsigned long old_addr, unsigned long new_addr,
128 int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *,
129 struct vm_area_struct *, struct vm_area_struct *);
130 struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma,
131 unsigned long address, unsigned int flags,
132 unsigned int *page_mask);
133 void unmap_hugepage_range(struct vm_area_struct *,
134 unsigned long, unsigned long, struct page *,
136 void __unmap_hugepage_range(struct mmu_gather *tlb,
137 struct vm_area_struct *vma,
138 unsigned long start, unsigned long end,
139 struct page *ref_page, zap_flags_t zap_flags);
140 void hugetlb_report_meminfo(struct seq_file *);
141 int hugetlb_report_node_meminfo(char *buf, int len, int nid);
142 void hugetlb_show_meminfo_node(int nid);
143 unsigned long hugetlb_total_pages(void);
144 vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
145 unsigned long address, unsigned int flags);
146 #ifdef CONFIG_USERFAULTFD
147 int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
148 struct vm_area_struct *dst_vma,
149 unsigned long dst_addr,
150 unsigned long src_addr,
152 struct folio **foliop);
153 #endif /* CONFIG_USERFAULTFD */
154 bool hugetlb_reserve_pages(struct inode *inode, long from, long to,
155 struct vm_area_struct *vma,
156 vm_flags_t vm_flags);
157 long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
159 bool isolate_hugetlb(struct folio *folio, struct list_head *list);
160 int get_hwpoison_hugetlb_folio(struct folio *folio, bool *hugetlb, bool unpoison);
161 int get_huge_page_for_hwpoison(unsigned long pfn, int flags,
162 bool *migratable_cleared);
163 void folio_putback_active_hugetlb(struct folio *folio);
164 void move_hugetlb_state(struct folio *old_folio, struct folio *new_folio, int reason);
165 void hugetlb_fix_reserve_counts(struct inode *inode);
166 extern struct mutex *hugetlb_fault_mutex_table;
167 u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx);
169 pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
170 unsigned long addr, pud_t *pud);
171 bool hugetlbfs_pagecache_present(struct hstate *h,
172 struct vm_area_struct *vma,
173 unsigned long address);
175 struct address_space *hugetlb_folio_mapping_lock_write(struct folio *folio);
177 extern int sysctl_hugetlb_shm_group;
178 extern struct list_head huge_boot_pages[MAX_NUMNODES];
182 #ifndef CONFIG_HIGHPTE
184 * pte_offset_huge() and pte_alloc_huge() are helpers for those architectures
185 * which may go down to the lowest PTE level in their huge_pte_offset() and
186 * huge_pte_alloc(): to avoid reliance on pte_offset_map() without pte_unmap().
188 static inline pte_t *pte_offset_huge(pmd_t *pmd, unsigned long address)
190 return pte_offset_kernel(pmd, address);
192 static inline pte_t *pte_alloc_huge(struct mm_struct *mm, pmd_t *pmd,
193 unsigned long address)
195 return pte_alloc(mm, pmd) ? NULL : pte_offset_huge(pmd, address);
199 pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
200 unsigned long addr, unsigned long sz);
202 * huge_pte_offset(): Walk the hugetlb pgtable until the last level PTE.
203 * Returns the pte_t* if found, or NULL if the address is not mapped.
205 * IMPORTANT: we should normally not directly call this function, instead
206 * this is only a common interface to implement arch-specific
207 * walker. Please use hugetlb_walk() instead, because that will attempt to
208 * verify the locking for you.
210 * Since this function will walk all the pgtable pages (including not only
211 * high-level pgtable page, but also PUD entry that can be unshared
212 * concurrently for VM_SHARED), the caller of this function should be
213 * responsible of its thread safety. One can follow this rule:
215 * (1) For private mappings: pmd unsharing is not possible, so holding the
216 * mmap_lock for either read or write is sufficient. Most callers
217 * already hold the mmap_lock, so normally, no special action is
220 * (2) For shared mappings: pmd unsharing is possible (so the PUD-ranged
221 * pgtable page can go away from under us! It can be done by a pmd
222 * unshare with a follow up munmap() on the other process), then we
225 * (2.1) hugetlb vma lock read or write held, to make sure pmd unshare
226 * won't happen upon the range (it also makes sure the pte_t we
227 * read is the right and stable one), or,
229 * (2.2) hugetlb mapping i_mmap_rwsem lock held read or write, to make
230 * sure even if unshare happened the racy unmap() will wait until
231 * i_mmap_rwsem is released.
233 * Option (2.1) is the safest, which guarantees pte stability from pmd
234 * sharing pov, until the vma lock released. Option (2.2) doesn't protect
235 * a concurrent pmd unshare, but it makes sure the pgtable page is safe to
238 pte_t *huge_pte_offset(struct mm_struct *mm,
239 unsigned long addr, unsigned long sz);
240 unsigned long hugetlb_mask_last_page(struct hstate *h);
241 int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
242 unsigned long addr, pte_t *ptep);
243 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
244 unsigned long *start, unsigned long *end);
246 extern void __hugetlb_zap_begin(struct vm_area_struct *vma,
247 unsigned long *begin, unsigned long *end);
248 extern void __hugetlb_zap_end(struct vm_area_struct *vma,
249 struct zap_details *details);
251 static inline void hugetlb_zap_begin(struct vm_area_struct *vma,
252 unsigned long *start, unsigned long *end)
254 if (is_vm_hugetlb_page(vma))
255 __hugetlb_zap_begin(vma, start, end);
258 static inline void hugetlb_zap_end(struct vm_area_struct *vma,
259 struct zap_details *details)
261 if (is_vm_hugetlb_page(vma))
262 __hugetlb_zap_end(vma, details);
265 void hugetlb_vma_lock_read(struct vm_area_struct *vma);
266 void hugetlb_vma_unlock_read(struct vm_area_struct *vma);
267 void hugetlb_vma_lock_write(struct vm_area_struct *vma);
268 void hugetlb_vma_unlock_write(struct vm_area_struct *vma);
269 int hugetlb_vma_trylock_write(struct vm_area_struct *vma);
270 void hugetlb_vma_assert_locked(struct vm_area_struct *vma);
271 void hugetlb_vma_lock_release(struct kref *kref);
272 long hugetlb_change_protection(struct vm_area_struct *vma,
273 unsigned long address, unsigned long end, pgprot_t newprot,
274 unsigned long cp_flags);
275 bool is_hugetlb_entry_migration(pte_t pte);
276 bool is_hugetlb_entry_hwpoisoned(pte_t pte);
277 void hugetlb_unshare_all_pmds(struct vm_area_struct *vma);
279 #else /* !CONFIG_HUGETLB_PAGE */
281 static inline void hugetlb_dup_vma_private(struct vm_area_struct *vma)
285 static inline void clear_vma_resv_huge_pages(struct vm_area_struct *vma)
289 static inline unsigned long hugetlb_total_pages(void)
294 static inline struct address_space *hugetlb_folio_mapping_lock_write(
300 static inline int huge_pmd_unshare(struct mm_struct *mm,
301 struct vm_area_struct *vma,
302 unsigned long addr, pte_t *ptep)
307 static inline void adjust_range_if_pmd_sharing_possible(
308 struct vm_area_struct *vma,
309 unsigned long *start, unsigned long *end)
313 static inline void hugetlb_zap_begin(
314 struct vm_area_struct *vma,
315 unsigned long *start, unsigned long *end)
319 static inline void hugetlb_zap_end(
320 struct vm_area_struct *vma,
321 struct zap_details *details)
325 static inline int copy_hugetlb_page_range(struct mm_struct *dst,
326 struct mm_struct *src,
327 struct vm_area_struct *dst_vma,
328 struct vm_area_struct *src_vma)
334 static inline int move_hugetlb_page_tables(struct vm_area_struct *vma,
335 struct vm_area_struct *new_vma,
336 unsigned long old_addr,
337 unsigned long new_addr,
344 static inline void hugetlb_report_meminfo(struct seq_file *m)
348 static inline int hugetlb_report_node_meminfo(char *buf, int len, int nid)
353 static inline void hugetlb_show_meminfo_node(int nid)
357 static inline int prepare_hugepage_range(struct file *file,
358 unsigned long addr, unsigned long len)
363 static inline void hugetlb_vma_lock_read(struct vm_area_struct *vma)
367 static inline void hugetlb_vma_unlock_read(struct vm_area_struct *vma)
371 static inline void hugetlb_vma_lock_write(struct vm_area_struct *vma)
375 static inline void hugetlb_vma_unlock_write(struct vm_area_struct *vma)
379 static inline int hugetlb_vma_trylock_write(struct vm_area_struct *vma)
384 static inline void hugetlb_vma_assert_locked(struct vm_area_struct *vma)
388 static inline int is_hugepage_only_range(struct mm_struct *mm,
389 unsigned long addr, unsigned long len)
394 static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
395 unsigned long addr, unsigned long end,
396 unsigned long floor, unsigned long ceiling)
401 #ifdef CONFIG_USERFAULTFD
402 static inline int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
403 struct vm_area_struct *dst_vma,
404 unsigned long dst_addr,
405 unsigned long src_addr,
407 struct folio **foliop)
412 #endif /* CONFIG_USERFAULTFD */
414 static inline pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
420 static inline bool isolate_hugetlb(struct folio *folio, struct list_head *list)
425 static inline int get_hwpoison_hugetlb_folio(struct folio *folio, bool *hugetlb, bool unpoison)
430 static inline int get_huge_page_for_hwpoison(unsigned long pfn, int flags,
431 bool *migratable_cleared)
436 static inline void folio_putback_active_hugetlb(struct folio *folio)
440 static inline void move_hugetlb_state(struct folio *old_folio,
441 struct folio *new_folio, int reason)
445 static inline long hugetlb_change_protection(
446 struct vm_area_struct *vma, unsigned long address,
447 unsigned long end, pgprot_t newprot,
448 unsigned long cp_flags)
453 static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
454 struct vm_area_struct *vma, unsigned long start,
455 unsigned long end, struct page *ref_page,
456 zap_flags_t zap_flags)
461 static inline vm_fault_t hugetlb_fault(struct mm_struct *mm,
462 struct vm_area_struct *vma, unsigned long address,
469 static inline void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) { }
471 #endif /* !CONFIG_HUGETLB_PAGE */
474 static inline int pgd_write(pgd_t pgd)
481 #define HUGETLB_ANON_FILE "anon_hugepage"
485 * The file will be used as an shm file so shmfs accounting rules
488 HUGETLB_SHMFS_INODE = 1,
490 * The file is being created on the internal vfs mount and shmfs
491 * accounting rules do not apply
493 HUGETLB_ANONHUGE_INODE = 2,
496 #ifdef CONFIG_HUGETLBFS
497 struct hugetlbfs_sb_info {
498 long max_inodes; /* inodes allowed */
499 long free_inodes; /* inodes free */
500 spinlock_t stat_lock;
501 struct hstate *hstate;
502 struct hugepage_subpool *spool;
508 static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
510 return sb->s_fs_info;
513 struct hugetlbfs_inode_info {
514 struct inode vfs_inode;
518 static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
520 return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
523 extern const struct vm_operations_struct hugetlb_vm_ops;
524 struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
525 int creat_flags, int page_size_log);
527 static inline bool is_file_hugepages(const struct file *file)
529 return file->f_op->fop_flags & FOP_HUGE_PAGES;
532 static inline struct hstate *hstate_inode(struct inode *i)
534 return HUGETLBFS_SB(i->i_sb)->hstate;
536 #else /* !CONFIG_HUGETLBFS */
538 #define is_file_hugepages(file) false
539 static inline struct file *
540 hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
541 int creat_flags, int page_size_log)
543 return ERR_PTR(-ENOSYS);
546 static inline struct hstate *hstate_inode(struct inode *i)
550 #endif /* !CONFIG_HUGETLBFS */
552 #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
553 unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
554 unsigned long len, unsigned long pgoff,
555 unsigned long flags);
556 #endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
559 generic_hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
560 unsigned long len, unsigned long pgoff,
561 unsigned long flags);
564 * huegtlb page specific state flags. These flags are located in page.private
565 * of the hugetlb head page. Functions created via the below macros should be
566 * used to manipulate these flags.
568 * HPG_restore_reserve - Set when a hugetlb page consumes a reservation at
569 * allocation time. Cleared when page is fully instantiated. Free
570 * routine checks flag to restore a reservation on error paths.
571 * Synchronization: Examined or modified by code that knows it has
572 * the only reference to page. i.e. After allocation but before use
573 * or when the page is being freed.
574 * HPG_migratable - Set after a newly allocated page is added to the page
575 * cache and/or page tables. Indicates the page is a candidate for
577 * Synchronization: Initially set after new page allocation with no
578 * locking. When examined and modified during migration processing
579 * (isolate, migrate, putback) the hugetlb_lock is held.
580 * HPG_temporary - Set on a page that is temporarily allocated from the buddy
581 * allocator. Typically used for migration target pages when no pages
582 * are available in the pool. The hugetlb free page path will
583 * immediately free pages with this flag set to the buddy allocator.
584 * Synchronization: Can be set after huge page allocation from buddy when
585 * code knows it has only reference. All other examinations and
586 * modifications require hugetlb_lock.
587 * HPG_freed - Set when page is on the free lists.
588 * Synchronization: hugetlb_lock held for examination and modification.
589 * HPG_vmemmap_optimized - Set when the vmemmap pages of the page are freed.
590 * HPG_raw_hwp_unreliable - Set when the hugetlb page has a hwpoison sub-page
591 * that is not tracked by raw_hwp_page list.
593 enum hugetlb_page_flags {
594 HPG_restore_reserve = 0,
598 HPG_vmemmap_optimized,
599 HPG_raw_hwp_unreliable,
604 * Macros to create test, set and clear function definitions for
605 * hugetlb specific page flags.
607 #ifdef CONFIG_HUGETLB_PAGE
608 #define TESTHPAGEFLAG(uname, flname) \
609 static __always_inline \
610 bool folio_test_hugetlb_##flname(struct folio *folio) \
611 { void *private = &folio->private; \
612 return test_bit(HPG_##flname, private); \
615 #define SETHPAGEFLAG(uname, flname) \
616 static __always_inline \
617 void folio_set_hugetlb_##flname(struct folio *folio) \
618 { void *private = &folio->private; \
619 set_bit(HPG_##flname, private); \
622 #define CLEARHPAGEFLAG(uname, flname) \
623 static __always_inline \
624 void folio_clear_hugetlb_##flname(struct folio *folio) \
625 { void *private = &folio->private; \
626 clear_bit(HPG_##flname, private); \
629 #define TESTHPAGEFLAG(uname, flname) \
631 folio_test_hugetlb_##flname(struct folio *folio) \
634 #define SETHPAGEFLAG(uname, flname) \
636 folio_set_hugetlb_##flname(struct folio *folio) \
639 #define CLEARHPAGEFLAG(uname, flname) \
641 folio_clear_hugetlb_##flname(struct folio *folio) \
645 #define HPAGEFLAG(uname, flname) \
646 TESTHPAGEFLAG(uname, flname) \
647 SETHPAGEFLAG(uname, flname) \
648 CLEARHPAGEFLAG(uname, flname) \
651 * Create functions associated with hugetlb page flags
653 HPAGEFLAG(RestoreReserve, restore_reserve)
654 HPAGEFLAG(Migratable, migratable)
655 HPAGEFLAG(Temporary, temporary)
656 HPAGEFLAG(Freed, freed)
657 HPAGEFLAG(VmemmapOptimized, vmemmap_optimized)
658 HPAGEFLAG(RawHwpUnreliable, raw_hwp_unreliable)
660 #ifdef CONFIG_HUGETLB_PAGE
662 #define HSTATE_NAME_LEN 32
663 /* Defines one hugetlb page size */
665 struct mutex resize_lock;
666 struct lock_class_key resize_key;
667 int next_nid_to_alloc;
668 int next_nid_to_free;
670 unsigned int demote_order;
672 unsigned long max_huge_pages;
673 unsigned long nr_huge_pages;
674 unsigned long free_huge_pages;
675 unsigned long resv_huge_pages;
676 unsigned long surplus_huge_pages;
677 unsigned long nr_overcommit_huge_pages;
678 struct list_head hugepage_activelist;
679 struct list_head hugepage_freelists[MAX_NUMNODES];
680 unsigned int max_huge_pages_node[MAX_NUMNODES];
681 unsigned int nr_huge_pages_node[MAX_NUMNODES];
682 unsigned int free_huge_pages_node[MAX_NUMNODES];
683 unsigned int surplus_huge_pages_node[MAX_NUMNODES];
684 char name[HSTATE_NAME_LEN];
687 struct huge_bootmem_page {
688 struct list_head list;
689 struct hstate *hstate;
692 int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list);
693 struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
694 unsigned long addr, int avoid_reserve);
695 struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid,
696 nodemask_t *nmask, gfp_t gfp_mask,
697 bool allow_alloc_fallback);
698 int hugetlb_add_to_page_cache(struct folio *folio, struct address_space *mapping,
700 void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
701 unsigned long address, struct folio *folio);
704 int __init __alloc_bootmem_huge_page(struct hstate *h, int nid);
705 int __init alloc_bootmem_huge_page(struct hstate *h, int nid);
706 bool __init hugetlb_node_alloc_supported(void);
708 void __init hugetlb_add_hstate(unsigned order);
709 bool __init arch_hugetlb_valid_size(unsigned long size);
710 struct hstate *size_to_hstate(unsigned long size);
712 #ifndef HUGE_MAX_HSTATE
713 #define HUGE_MAX_HSTATE 1
716 extern struct hstate hstates[HUGE_MAX_HSTATE];
717 extern unsigned int default_hstate_idx;
719 #define default_hstate (hstates[default_hstate_idx])
721 static inline struct hugepage_subpool *hugetlb_folio_subpool(struct folio *folio)
723 return folio->_hugetlb_subpool;
726 static inline void hugetlb_set_folio_subpool(struct folio *folio,
727 struct hugepage_subpool *subpool)
729 folio->_hugetlb_subpool = subpool;
732 static inline struct hstate *hstate_file(struct file *f)
734 return hstate_inode(file_inode(f));
737 static inline struct hstate *hstate_sizelog(int page_size_log)
740 return &default_hstate;
742 if (page_size_log < BITS_PER_LONG)
743 return size_to_hstate(1UL << page_size_log);
748 static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
750 return hstate_file(vma->vm_file);
753 static inline unsigned long huge_page_size(const struct hstate *h)
755 return (unsigned long)PAGE_SIZE << h->order;
758 extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma);
760 extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma);
762 static inline unsigned long huge_page_mask(struct hstate *h)
767 static inline unsigned int huge_page_order(struct hstate *h)
772 static inline unsigned huge_page_shift(struct hstate *h)
774 return h->order + PAGE_SHIFT;
777 static inline bool hstate_is_gigantic(struct hstate *h)
779 return huge_page_order(h) > MAX_PAGE_ORDER;
782 static inline unsigned int pages_per_huge_page(const struct hstate *h)
784 return 1 << h->order;
787 static inline unsigned int blocks_per_huge_page(struct hstate *h)
789 return huge_page_size(h) / 512;
792 static inline struct folio *filemap_lock_hugetlb_folio(struct hstate *h,
793 struct address_space *mapping, pgoff_t idx)
795 return filemap_lock_folio(mapping, idx << huge_page_order(h));
798 #include <asm/hugetlb.h>
800 #ifndef is_hugepage_only_range
801 static inline int is_hugepage_only_range(struct mm_struct *mm,
802 unsigned long addr, unsigned long len)
806 #define is_hugepage_only_range is_hugepage_only_range
809 #ifndef arch_clear_hugetlb_flags
810 static inline void arch_clear_hugetlb_flags(struct folio *folio) { }
811 #define arch_clear_hugetlb_flags arch_clear_hugetlb_flags
814 #ifndef arch_make_huge_pte
815 static inline pte_t arch_make_huge_pte(pte_t entry, unsigned int shift,
818 return pte_mkhuge(entry);
822 static inline struct hstate *folio_hstate(struct folio *folio)
824 VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio);
825 return size_to_hstate(folio_size(folio));
828 static inline unsigned hstate_index_to_shift(unsigned index)
830 return hstates[index].order + PAGE_SHIFT;
833 static inline int hstate_index(struct hstate *h)
838 int dissolve_free_hugetlb_folio(struct folio *folio);
839 int dissolve_free_hugetlb_folios(unsigned long start_pfn,
840 unsigned long end_pfn);
842 #ifdef CONFIG_MEMORY_FAILURE
843 extern void folio_clear_hugetlb_hwpoison(struct folio *folio);
845 static inline void folio_clear_hugetlb_hwpoison(struct folio *folio)
850 #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
851 #ifndef arch_hugetlb_migration_supported
852 static inline bool arch_hugetlb_migration_supported(struct hstate *h)
854 if ((huge_page_shift(h) == PMD_SHIFT) ||
855 (huge_page_shift(h) == PUD_SHIFT) ||
856 (huge_page_shift(h) == PGDIR_SHIFT))
863 static inline bool arch_hugetlb_migration_supported(struct hstate *h)
869 static inline bool hugepage_migration_supported(struct hstate *h)
871 return arch_hugetlb_migration_supported(h);
875 * Movability check is different as compared to migration check.
876 * It determines whether or not a huge page should be placed on
877 * movable zone or not. Movability of any huge page should be
878 * required only if huge page size is supported for migration.
879 * There won't be any reason for the huge page to be movable if
880 * it is not migratable to start with. Also the size of the huge
881 * page should be large enough to be placed under a movable zone
882 * and still feasible enough to be migratable. Just the presence
883 * in movable zone does not make the migration feasible.
885 * So even though large huge page sizes like the gigantic ones
886 * are migratable they should not be movable because its not
887 * feasible to migrate them from movable zone.
889 static inline bool hugepage_movable_supported(struct hstate *h)
891 if (!hugepage_migration_supported(h))
894 if (hstate_is_gigantic(h))
899 /* Movability of hugepages depends on migration support. */
900 static inline gfp_t htlb_alloc_mask(struct hstate *h)
902 if (hugepage_movable_supported(h))
903 return GFP_HIGHUSER_MOVABLE;
908 static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
910 gfp_t modified_mask = htlb_alloc_mask(h);
912 /* Some callers might want to enforce node */
913 modified_mask |= (gfp_mask & __GFP_THISNODE);
915 modified_mask |= (gfp_mask & __GFP_NOWARN);
917 return modified_mask;
920 static inline bool htlb_allow_alloc_fallback(int reason)
922 bool allowed_fallback = false;
925 * Note: the memory offline, memory failure and migration syscalls will
926 * be allowed to fallback to other nodes due to lack of a better chioce,
927 * that might break the per-node hugetlb pool. While other cases will
928 * set the __GFP_THISNODE to avoid breaking the per-node hugetlb pool.
931 case MR_MEMORY_HOTPLUG:
932 case MR_MEMORY_FAILURE:
934 case MR_MEMPOLICY_MBIND:
935 allowed_fallback = true;
941 return allowed_fallback;
944 static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
945 struct mm_struct *mm, pte_t *pte)
947 if (huge_page_size(h) == PMD_SIZE)
948 return pmd_lockptr(mm, (pmd_t *) pte);
949 VM_BUG_ON(huge_page_size(h) == PAGE_SIZE);
950 return &mm->page_table_lock;
953 #ifndef hugepages_supported
955 * Some platform decide whether they support huge pages at boot
956 * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0
957 * when there is no such support
959 #define hugepages_supported() (HPAGE_SHIFT != 0)
962 void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm);
964 static inline void hugetlb_count_init(struct mm_struct *mm)
966 atomic_long_set(&mm->hugetlb_usage, 0);
969 static inline void hugetlb_count_add(long l, struct mm_struct *mm)
971 atomic_long_add(l, &mm->hugetlb_usage);
974 static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
976 atomic_long_sub(l, &mm->hugetlb_usage);
979 #ifndef huge_ptep_modify_prot_start
980 #define huge_ptep_modify_prot_start huge_ptep_modify_prot_start
981 static inline pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
982 unsigned long addr, pte_t *ptep)
984 return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
988 #ifndef huge_ptep_modify_prot_commit
989 #define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit
990 static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
991 unsigned long addr, pte_t *ptep,
992 pte_t old_pte, pte_t pte)
994 unsigned long psize = huge_page_size(hstate_vma(vma));
996 set_huge_pte_at(vma->vm_mm, addr, ptep, pte, psize);
1001 void hugetlb_register_node(struct node *node);
1002 void hugetlb_unregister_node(struct node *node);
1006 * Check if a given raw @page in a hugepage is HWPOISON.
1008 bool is_raw_hwpoison_page_in_hugepage(struct page *page);
1010 #else /* CONFIG_HUGETLB_PAGE */
1013 static inline struct hugepage_subpool *hugetlb_folio_subpool(struct folio *folio)
1018 static inline struct folio *filemap_lock_hugetlb_folio(struct hstate *h,
1019 struct address_space *mapping, pgoff_t idx)
1024 static inline int isolate_or_dissolve_huge_page(struct page *page,
1025 struct list_head *list)
1030 static inline struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
1037 static inline struct folio *
1038 alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid,
1039 nodemask_t *nmask, gfp_t gfp_mask,
1040 bool allow_alloc_fallback)
1045 static inline int __alloc_bootmem_huge_page(struct hstate *h)
1050 static inline struct hstate *hstate_file(struct file *f)
1055 static inline struct hstate *hstate_sizelog(int page_size_log)
1060 static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
1065 static inline struct hstate *folio_hstate(struct folio *folio)
1070 static inline struct hstate *size_to_hstate(unsigned long size)
1075 static inline unsigned long huge_page_size(struct hstate *h)
1080 static inline unsigned long huge_page_mask(struct hstate *h)
1085 static inline unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
1090 static inline unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
1095 static inline unsigned int huge_page_order(struct hstate *h)
1100 static inline unsigned int huge_page_shift(struct hstate *h)
1105 static inline bool hstate_is_gigantic(struct hstate *h)
1110 static inline unsigned int pages_per_huge_page(struct hstate *h)
1115 static inline unsigned hstate_index_to_shift(unsigned index)
1120 static inline int hstate_index(struct hstate *h)
1125 static inline int dissolve_free_hugetlb_folio(struct folio *folio)
1130 static inline int dissolve_free_hugetlb_folios(unsigned long start_pfn,
1131 unsigned long end_pfn)
1136 static inline bool hugepage_migration_supported(struct hstate *h)
1141 static inline bool hugepage_movable_supported(struct hstate *h)
1146 static inline gfp_t htlb_alloc_mask(struct hstate *h)
1151 static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
1156 static inline bool htlb_allow_alloc_fallback(int reason)
1161 static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
1162 struct mm_struct *mm, pte_t *pte)
1164 return &mm->page_table_lock;
1167 static inline void hugetlb_count_init(struct mm_struct *mm)
1171 static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m)
1175 static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
1179 static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
1180 unsigned long addr, pte_t *ptep)
1183 return ptep_get(ptep);
1189 static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
1190 pte_t *ptep, pte_t pte, unsigned long sz)
1194 static inline void hugetlb_register_node(struct node *node)
1198 static inline void hugetlb_unregister_node(struct node *node)
1202 static inline bool hugetlbfs_pagecache_present(
1203 struct hstate *h, struct vm_area_struct *vma, unsigned long address)
1207 #endif /* CONFIG_HUGETLB_PAGE */
1209 static inline spinlock_t *huge_pte_lock(struct hstate *h,
1210 struct mm_struct *mm, pte_t *pte)
1214 ptl = huge_pte_lockptr(h, mm, pte);
1219 #if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA)
1220 extern void __init hugetlb_cma_reserve(int order);
1222 static inline __init void hugetlb_cma_reserve(int order)
1227 #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
1228 static inline bool hugetlb_pmd_shared(pte_t *pte)
1230 return page_count(virt_to_page(pte)) > 1;
1233 static inline bool hugetlb_pmd_shared(pte_t *pte)
1239 bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr);
1241 #ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
1243 * ARCHes with special requirements for evicting HUGETLB backing TLB entries can
1246 #define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
1249 static inline bool __vma_shareable_lock(struct vm_area_struct *vma)
1251 return (vma->vm_flags & VM_MAYSHARE) && vma->vm_private_data;
1254 bool __vma_private_lock(struct vm_area_struct *vma);
1257 * Safe version of huge_pte_offset() to check the locks. See comments
1258 * above huge_pte_offset().
1260 static inline pte_t *
1261 hugetlb_walk(struct vm_area_struct *vma, unsigned long addr, unsigned long sz)
1263 #if defined(CONFIG_HUGETLB_PAGE) && \
1264 defined(CONFIG_ARCH_WANT_HUGE_PMD_SHARE) && defined(CONFIG_LOCKDEP)
1265 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
1268 * If pmd sharing possible, locking needed to safely walk the
1269 * hugetlb pgtables. More information can be found at the comment
1270 * above huge_pte_offset() in the same file.
1272 * NOTE: lockdep_is_held() is only defined with CONFIG_LOCKDEP.
1274 if (__vma_shareable_lock(vma))
1275 WARN_ON_ONCE(!lockdep_is_held(&vma_lock->rw_sema) &&
1277 &vma->vm_file->f_mapping->i_mmap_rwsem));
1279 return huge_pte_offset(vma->vm_mm, addr, sz);
1282 #endif /* _LINUX_HUGETLB_H */