]>
Commit | Line | Data |
---|---|---|
1 | // SPDX-License-Identifier: GPL-2.0 | |
2 | /* | |
3 | * linux/mm/mmzone.c | |
4 | * | |
5 | * management codes for pgdats, zones and page flags | |
6 | */ | |
7 | ||
8 | ||
9 | #include <linux/stddef.h> | |
10 | #include <linux/mm.h> | |
11 | #include <linux/mmzone.h> | |
12 | ||
13 | struct pglist_data *first_online_pgdat(void) | |
14 | { | |
15 | return NODE_DATA(first_online_node); | |
16 | } | |
17 | ||
18 | struct pglist_data *next_online_pgdat(struct pglist_data *pgdat) | |
19 | { | |
20 | int nid = next_online_node(pgdat->node_id); | |
21 | ||
22 | if (nid == MAX_NUMNODES) | |
23 | return NULL; | |
24 | return NODE_DATA(nid); | |
25 | } | |
26 | ||
27 | /* | |
28 | * next_zone - helper magic for for_each_zone() | |
29 | */ | |
30 | struct zone *next_zone(struct zone *zone) | |
31 | { | |
32 | pg_data_t *pgdat = zone->zone_pgdat; | |
33 | ||
34 | if (zone < pgdat->node_zones + MAX_NR_ZONES - 1) | |
35 | zone++; | |
36 | else { | |
37 | pgdat = next_online_pgdat(pgdat); | |
38 | if (pgdat) | |
39 | zone = pgdat->node_zones; | |
40 | else | |
41 | zone = NULL; | |
42 | } | |
43 | return zone; | |
44 | } | |
45 | ||
46 | static inline int zref_in_nodemask(struct zoneref *zref, nodemask_t *nodes) | |
47 | { | |
48 | #ifdef CONFIG_NUMA | |
49 | return node_isset(zonelist_node_idx(zref), *nodes); | |
50 | #else | |
51 | return 1; | |
52 | #endif /* CONFIG_NUMA */ | |
53 | } | |
54 | ||
55 | /* Returns the next zone at or below highest_zoneidx in a zonelist */ | |
56 | struct zoneref *__next_zones_zonelist(struct zoneref *z, | |
57 | enum zone_type highest_zoneidx, | |
58 | nodemask_t *nodes) | |
59 | { | |
60 | /* | |
61 | * Find the next suitable zone to use for the allocation. | |
62 | * Only filter based on nodemask if it's set | |
63 | */ | |
64 | if (unlikely(nodes == NULL)) | |
65 | while (zonelist_zone_idx(z) > highest_zoneidx) | |
66 | z++; | |
67 | else | |
68 | while (zonelist_zone_idx(z) > highest_zoneidx || | |
69 | (z->zone && !zref_in_nodemask(z, nodes))) | |
70 | z++; | |
71 | ||
72 | return z; | |
73 | } | |
74 | ||
75 | #ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL | |
76 | bool memmap_valid_within(unsigned long pfn, | |
77 | struct page *page, struct zone *zone) | |
78 | { | |
79 | if (page_to_pfn(page) != pfn) | |
80 | return false; | |
81 | ||
82 | if (page_zone(page) != zone) | |
83 | return false; | |
84 | ||
85 | return true; | |
86 | } | |
87 | #endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */ | |
88 | ||
89 | void lruvec_init(struct lruvec *lruvec) | |
90 | { | |
91 | enum lru_list lru; | |
92 | ||
93 | memset(lruvec, 0, sizeof(struct lruvec)); | |
94 | ||
95 | for_each_lru(lru) | |
96 | INIT_LIST_HEAD(&lruvec->lists[lru]); | |
97 | } | |
98 | ||
99 | #if defined(CONFIG_NUMA_BALANCING) && !defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS) | |
100 | int page_cpupid_xchg_last(struct page *page, int cpupid) | |
101 | { | |
102 | unsigned long old_flags, flags; | |
103 | int last_cpupid; | |
104 | ||
105 | do { | |
106 | old_flags = flags = page->flags; | |
107 | last_cpupid = page_cpupid_last(page); | |
108 | ||
109 | flags &= ~(LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT); | |
110 | flags |= (cpupid & LAST_CPUPID_MASK) << LAST_CPUPID_PGSHIFT; | |
111 | } while (unlikely(cmpxchg(&page->flags, old_flags, flags) != old_flags)); | |
112 | ||
113 | return last_cpupid; | |
114 | } | |
115 | #endif |