]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
95144c78 KH |
2 | /* |
3 | * linux/mm/mmzone.c | |
4 | * | |
4468b8f1 | 5 | * management codes for pgdats, zones and page flags |
95144c78 KH |
6 | */ |
7 | ||
8 | ||
95144c78 | 9 | #include <linux/stddef.h> |
eb33575c | 10 | #include <linux/mm.h> |
95144c78 | 11 | #include <linux/mmzone.h> |
95144c78 KH |
12 | |
13 | struct pglist_data *first_online_pgdat(void) | |
14 | { | |
15 | return NODE_DATA(first_online_node); | |
16 | } | |
17 | ||
95144c78 KH |
18 | struct pglist_data *next_online_pgdat(struct pglist_data *pgdat) |
19 | { | |
20 | int nid = next_online_node(pgdat->node_id); | |
21 | ||
22 | if (nid == MAX_NUMNODES) | |
23 | return NULL; | |
24 | return NODE_DATA(nid); | |
25 | } | |
95144c78 KH |
26 | |
27 | /* | |
28 | * next_zone - helper magic for for_each_zone() | |
29 | */ | |
30 | struct zone *next_zone(struct zone *zone) | |
31 | { | |
32 | pg_data_t *pgdat = zone->zone_pgdat; | |
33 | ||
34 | if (zone < pgdat->node_zones + MAX_NR_ZONES - 1) | |
35 | zone++; | |
36 | else { | |
37 | pgdat = next_online_pgdat(pgdat); | |
38 | if (pgdat) | |
39 | zone = pgdat->node_zones; | |
40 | else | |
41 | zone = NULL; | |
42 | } | |
43 | return zone; | |
44 | } | |
95144c78 | 45 | |
19770b32 MG |
46 | static inline int zref_in_nodemask(struct zoneref *zref, nodemask_t *nodes) |
47 | { | |
48 | #ifdef CONFIG_NUMA | |
49 | return node_isset(zonelist_node_idx(zref), *nodes); | |
50 | #else | |
51 | return 1; | |
52 | #endif /* CONFIG_NUMA */ | |
53 | } | |
54 | ||
55 | /* Returns the next zone at or below highest_zoneidx in a zonelist */ | |
682a3385 | 56 | struct zoneref *__next_zones_zonelist(struct zoneref *z, |
19770b32 | 57 | enum zone_type highest_zoneidx, |
05891fb0 | 58 | nodemask_t *nodes) |
19770b32 MG |
59 | { |
60 | /* | |
61 | * Find the next suitable zone to use for the allocation. | |
62 | * Only filter based on nodemask if it's set | |
63 | */ | |
e57b9d8c | 64 | if (unlikely(nodes == NULL)) |
19770b32 MG |
65 | while (zonelist_zone_idx(z) > highest_zoneidx) |
66 | z++; | |
67 | else | |
68 | while (zonelist_zone_idx(z) > highest_zoneidx || | |
69 | (z->zone && !zref_in_nodemask(z, nodes))) | |
70 | z++; | |
71 | ||
19770b32 MG |
72 | return z; |
73 | } | |
eb33575c | 74 | |
bea8c150 | 75 | void lruvec_init(struct lruvec *lruvec) |
7f5e86c2 KK |
76 | { |
77 | enum lru_list lru; | |
78 | ||
79 | memset(lruvec, 0, sizeof(struct lruvec)); | |
6168d0da | 80 | spin_lock_init(&lruvec->lru_lock); |
7f5e86c2 KK |
81 | |
82 | for_each_lru(lru) | |
83 | INIT_LIST_HEAD(&lruvec->lists[lru]); | |
07ca7606 HD |
84 | /* |
85 | * The "Unevictable LRU" is imaginary: though its size is maintained, | |
86 | * it is never scanned, and unevictable pages are not threaded on it | |
87 | * (so that their lru fields can be reused to hold mlock_count). | |
88 | * Poison its list head, so that any operations on it would crash. | |
89 | */ | |
90 | list_del(&lruvec->lists[LRU_UNEVICTABLE]); | |
ec1c86b2 YZ |
91 | |
92 | lru_gen_init_lruvec(lruvec); | |
7f5e86c2 | 93 | } |
4468b8f1 | 94 | |
90572890 PZ |
95 | #if defined(CONFIG_NUMA_BALANCING) && !defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS) |
96 | int page_cpupid_xchg_last(struct page *page, int cpupid) | |
4468b8f1 MG |
97 | { |
98 | unsigned long old_flags, flags; | |
90572890 | 99 | int last_cpupid; |
4468b8f1 | 100 | |
abe8b2ae | 101 | old_flags = READ_ONCE(page->flags); |
4468b8f1 | 102 | do { |
abe8b2ae PC |
103 | flags = old_flags; |
104 | last_cpupid = (flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK; | |
4468b8f1 | 105 | |
90572890 PZ |
106 | flags &= ~(LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT); |
107 | flags |= (cpupid & LAST_CPUPID_MASK) << LAST_CPUPID_PGSHIFT; | |
abe8b2ae | 108 | } while (unlikely(!try_cmpxchg(&page->flags, &old_flags, flags))); |
4468b8f1 | 109 | |
90572890 | 110 | return last_cpupid; |
4468b8f1 MG |
111 | } |
112 | #endif |