]> Git Repo - linux.git/commitdiff
mm: remove pfn_valid_within() and CONFIG_HOLES_IN_ZONE
authorMike Rapoport <[email protected]>
Wed, 8 Sep 2021 02:54:52 +0000 (19:54 -0700)
committerLinus Torvalds <[email protected]>
Wed, 8 Sep 2021 18:50:22 +0000 (11:50 -0700)
Patch series "mm: remove pfn_valid_within() and CONFIG_HOLES_IN_ZONE".

After recent updates to freeing unused parts of the memory map, no
architecture can have holes in the memory map within a pageblock.  This
makes pfn_valid_within() check and CONFIG_HOLES_IN_ZONE configuration
option redundant.

The first patch removes them both in a mechanical way and the second patch
simplifies memory_hotplug::test_pages_in_a_zone() that had
pfn_valid_within() surrounded by more logic than simple if.

This patch (of 2):

After recent changes in freeing of the unused parts of the memory map and
rework of pfn_valid() in arm and arm64 there are no architectures that can
have holes in the memory map within a pageblock and so nothing can enable
CONFIG_HOLES_IN_ZONE which guards non trivial implementation of
pfn_valid_within().

With that, pfn_valid_within() is always hardwired to 1 and can be
completely removed.

Remove calls to pfn_valid_within() and CONFIG_HOLES_IN_ZONE.

Link: https://lkml.kernel.org/r/[email protected]
Link: https://lkml.kernel.org/r/[email protected]
Signed-off-by: Mike Rapoport <[email protected]>
Acked-by: David Hildenbrand <[email protected]>
Cc: Greg Kroah-Hartman <[email protected]>
Cc: "Rafael J. Wysocki" <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
drivers/base/node.c
include/linux/mmzone.h
mm/Kconfig
mm/compaction.c
mm/memory_hotplug.c
mm/page_alloc.c
mm/page_isolation.c
mm/page_owner.c

index 4a4ae868ad9f8482285220f97fa3ec8dea2526c9..8ec6b7dfbb0f975a7919c0384920c7b7a510c287 100644 (file)
@@ -768,8 +768,6 @@ int unregister_cpu_under_node(unsigned int cpu, unsigned int nid)
 #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
 static int __ref get_nid_for_pfn(unsigned long pfn)
 {
-       if (!pfn_valid_within(pfn))
-               return -1;
 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
        if (system_state < SYSTEM_RUNNING)
                return early_pfn_to_nid(pfn);
index fcb535560028fc59a82a4b1e56c6db728fe5bf37..ee3a868305196f739f7fef324ef07766cd5834df 100644 (file)
@@ -1525,18 +1525,6 @@ void sparse_init(void);
 #define subsection_map_init(_pfn, _nr_pages) do {} while (0)
 #endif /* CONFIG_SPARSEMEM */
 
-/*
- * If it is possible to have holes within a MAX_ORDER_NR_PAGES, then we
- * need to check pfn validity within that MAX_ORDER_NR_PAGES block.
- * pfn_valid_within() should be used in this case; we optimise this away
- * when we have no holes within a MAX_ORDER_NR_PAGES block.
- */
-#ifdef CONFIG_HOLES_IN_ZONE
-#define pfn_valid_within(pfn) pfn_valid(pfn)
-#else
-#define pfn_valid_within(pfn) (1)
-#endif
-
 #endif /* !__GENERATING_BOUNDS.H */
 #endif /* !__ASSEMBLY__ */
 #endif /* _LINUX_MMZONE_H */
index 40a9bfcd5062e1d06313125bdec31d7c4f403553..14d5d2837737c81b227f19a8fbeb40671499eed0 100644 (file)
@@ -96,9 +96,6 @@ config HAVE_FAST_GUP
        depends on MMU
        bool
 
-config HOLES_IN_ZONE
-       bool
-
 # Don't discard allocated memory used to track "memory" and "reserved" memblocks
 # after early boot, so it can still be used to test for validity of memory.
 # Also, memblocks are updated with memory hot(un)plug.
index 621508e0ecd5da3c4b82ea8d69832375c1bb3e50..ed37e1cb4369e6bec3496300da10901b483b4b93 100644 (file)
@@ -306,16 +306,14 @@ __reset_isolation_pfn(struct zone *zone, unsigned long pfn, bool check_source,
         * is necessary for the block to be a migration source/target.
         */
        do {
-               if (pfn_valid_within(pfn)) {
-                       if (check_source && PageLRU(page)) {
-                               clear_pageblock_skip(page);
-                               return true;
-                       }
+               if (check_source && PageLRU(page)) {
+                       clear_pageblock_skip(page);
+                       return true;
+               }
 
-                       if (check_target && PageBuddy(page)) {
-                               clear_pageblock_skip(page);
-                               return true;
-                       }
+               if (check_target && PageBuddy(page)) {
+                       clear_pageblock_skip(page);
+                       return true;
                }
 
                page += (1 << PAGE_ALLOC_COSTLY_ORDER);
@@ -585,8 +583,6 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
                        break;
 
                nr_scanned++;
-               if (!pfn_valid_within(blockpfn))
-                       goto isolate_fail;
 
                /*
                 * For compound pages such as THP and hugetlbfs, we can save
@@ -885,8 +881,6 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
                        cond_resched();
                }
 
-               if (!pfn_valid_within(low_pfn))
-                       goto isolate_fail;
                nr_scanned++;
 
                page = pfn_to_page(low_pfn);
index 86c3af79e874e9ff986276e1b812d2892fdc863b..8d3376f66f01d400d87cdfb14b1dbf97ce44d500 100644 (file)
@@ -1308,10 +1308,6 @@ struct zone *test_pages_in_a_zone(unsigned long start_pfn,
                for (; pfn < sec_end_pfn && pfn < end_pfn;
                     pfn += MAX_ORDER_NR_PAGES) {
                        i = 0;
-                       /* This is just a CONFIG_HOLES_IN_ZONE check.*/
-                       while ((i < MAX_ORDER_NR_PAGES) &&
-                               !pfn_valid_within(pfn + i))
-                               i++;
                        if (i == MAX_ORDER_NR_PAGES || pfn + i >= end_pfn)
                                continue;
                        /* Check if we got outside of the zone */
index eeb3a9cb36bb4ff417247501d53c7b0796db6045..79a2fc5b6c6fa8d27251b7ead86b930ca8771716 100644 (file)
@@ -594,8 +594,6 @@ static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
 
 static int page_is_consistent(struct zone *zone, struct page *page)
 {
-       if (!pfn_valid_within(page_to_pfn(page)))
-               return 0;
        if (zone != page_zone(page))
                return 0;
 
@@ -1025,16 +1023,12 @@ buddy_merge_likely(unsigned long pfn, unsigned long buddy_pfn,
        if (order >= MAX_ORDER - 2)
                return false;
 
-       if (!pfn_valid_within(buddy_pfn))
-               return false;
-
        combined_pfn = buddy_pfn & pfn;
        higher_page = page + (combined_pfn - pfn);
        buddy_pfn = __find_buddy_pfn(combined_pfn, order + 1);
        higher_buddy = higher_page + (buddy_pfn - combined_pfn);
 
-       return pfn_valid_within(buddy_pfn) &&
-              page_is_buddy(higher_page, higher_buddy, order + 1);
+       return page_is_buddy(higher_page, higher_buddy, order + 1);
 }
 
 /*
@@ -1095,8 +1089,6 @@ continue_merging:
                buddy_pfn = __find_buddy_pfn(pfn, order);
                buddy = page + (buddy_pfn - pfn);
 
-               if (!pfn_valid_within(buddy_pfn))
-                       goto done_merging;
                if (!page_is_buddy(page, buddy, order))
                        goto done_merging;
                /*
@@ -1754,9 +1746,7 @@ void __init memblock_free_pages(struct page *page, unsigned long pfn,
 /*
  * Check that the whole (or subset of) a pageblock given by the interval of
  * [start_pfn, end_pfn) is valid and within the same zone, before scanning it
- * with the migration of free compaction scanner. The scanners then need to
- * use only pfn_valid_within() check for arches that allow holes within
- * pageblocks.
+ * with the migration of free compaction scanner.
  *
  * Return struct page pointer of start_pfn, or NULL if checks were not passed.
  *
@@ -1872,8 +1862,6 @@ static inline void __init pgdat_init_report_one_done(void)
  */
 static inline bool __init deferred_pfn_valid(unsigned long pfn)
 {
-       if (!pfn_valid_within(pfn))
-               return false;
        if (!(pfn & (pageblock_nr_pages - 1)) && !pfn_valid(pfn))
                return false;
        return true;
@@ -2520,11 +2508,6 @@ static int move_freepages(struct zone *zone,
        int pages_moved = 0;
 
        for (pfn = start_pfn; pfn <= end_pfn;) {
-               if (!pfn_valid_within(pfn)) {
-                       pfn++;
-                       continue;
-               }
-
                page = pfn_to_page(pfn);
                if (!PageBuddy(page)) {
                        /*
@@ -8814,9 +8797,6 @@ struct page *has_unmovable_pages(struct zone *zone, struct page *page,
        }
 
        for (; iter < pageblock_nr_pages - offset; iter++) {
-               if (!pfn_valid_within(pfn + iter))
-                       continue;
-
                page = pfn_to_page(pfn + iter);
 
                /*
index bddf788f45bff8cb2f1768dc3930a24355566658..471e3a13b5411da6809de7ff7d510e65c1e6cace 100644 (file)
@@ -93,8 +93,7 @@ static void unset_migratetype_isolate(struct page *page, unsigned migratetype)
                        buddy_pfn = __find_buddy_pfn(pfn, order);
                        buddy = page + (buddy_pfn - pfn);
 
-                       if (pfn_valid_within(buddy_pfn) &&
-                           !is_migrate_isolate_page(buddy)) {
+                       if (!is_migrate_isolate_page(buddy)) {
                                __isolate_free_page(page, order);
                                isolated_page = true;
                        }
@@ -250,10 +249,6 @@ __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn,
        struct page *page;
 
        while (pfn < end_pfn) {
-               if (!pfn_valid_within(pfn)) {
-                       pfn++;
-                       continue;
-               }
                page = pfn_to_page(pfn);
                if (PageBuddy(page))
                        /*
index f51a57e92aa380cbea774ce97bebcaf3de7e4740..62402d22539b8e47bc2996cae5f86b63138ae593 100644 (file)
@@ -276,9 +276,6 @@ void pagetypeinfo_showmixedcount_print(struct seq_file *m,
                pageblock_mt = get_pageblock_migratetype(page);
 
                for (; pfn < block_end_pfn; pfn++) {
-                       if (!pfn_valid_within(pfn))
-                               continue;
-
                        /* The pageblock is online, no need to recheck. */
                        page = pfn_to_page(pfn);
 
@@ -479,10 +476,6 @@ read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
                        continue;
                }
 
-               /* Check for holes within a MAX_ORDER area */
-               if (!pfn_valid_within(pfn))
-                       continue;
-
                page = pfn_to_page(pfn);
                if (PageBuddy(page)) {
                        unsigned long freepage_order = buddy_order_unsafe(page);
@@ -560,14 +553,9 @@ static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone)
                block_end_pfn = min(block_end_pfn, end_pfn);
 
                for (; pfn < block_end_pfn; pfn++) {
-                       struct page *page;
+                       struct page *page = pfn_to_page(pfn);
                        struct page_ext *page_ext;
 
-                       if (!pfn_valid_within(pfn))
-                               continue;
-
-                       page = pfn_to_page(pfn);
-
                        if (page_zone(page) != zone)
                                continue;
 
This page took 0.083204 seconds and 4 git commands to generate.