]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
a528910e JW |
2 | /* |
3 | * Workingset detection | |
4 | * | |
5 | * Copyright (C) 2013 Red Hat, Inc., Johannes Weiner | |
6 | */ | |
7 | ||
8 | #include <linux/memcontrol.h> | |
170b04b7 | 9 | #include <linux/mm_inline.h> |
a528910e | 10 | #include <linux/writeback.h> |
3a4f8a0b | 11 | #include <linux/shmem_fs.h> |
a528910e JW |
12 | #include <linux/pagemap.h> |
13 | #include <linux/atomic.h> | |
14 | #include <linux/module.h> | |
15 | #include <linux/swap.h> | |
14b46879 | 16 | #include <linux/dax.h> |
a528910e JW |
17 | #include <linux/fs.h> |
18 | #include <linux/mm.h> | |
b64e74e9 | 19 | #include "internal.h" |
a528910e JW |
20 | |
21 | /* | |
22 | * Double CLOCK lists | |
23 | * | |
1e6b1085 | 24 | * Per node, two clock lists are maintained for file pages: the |
a528910e JW |
25 | * inactive and the active list. Freshly faulted pages start out at |
26 | * the head of the inactive list and page reclaim scans pages from the | |
27 | * tail. Pages that are accessed multiple times on the inactive list | |
28 | * are promoted to the active list, to protect them from reclaim, | |
29 | * whereas active pages are demoted to the inactive list when the | |
30 | * active list grows too big. | |
31 | * | |
32 | * fault ------------------------+ | |
33 | * | | |
34 | * +--------------+ | +-------------+ | |
35 | * reclaim <- | inactive | <-+-- demotion | active | <--+ | |
36 | * +--------------+ +-------------+ | | |
37 | * | | | |
38 | * +-------------- promotion ------------------+ | |
39 | * | |
40 | * | |
41 | * Access frequency and refault distance | |
42 | * | |
43 | * A workload is thrashing when its pages are frequently used but they | |
44 | * are evicted from the inactive list every time before another access | |
45 | * would have promoted them to the active list. | |
46 | * | |
47 | * In cases where the average access distance between thrashing pages | |
48 | * is bigger than the size of memory there is nothing that can be | |
49 | * done - the thrashing set could never fit into memory under any | |
50 | * circumstance. | |
51 | * | |
52 | * However, the average access distance could be bigger than the | |
53 | * inactive list, yet smaller than the size of memory. In this case, | |
54 | * the set could fit into memory if it weren't for the currently | |
55 | * active pages - which may be used more, hopefully less frequently: | |
56 | * | |
57 | * +-memory available to cache-+ | |
58 | * | | | |
59 | * +-inactive------+-active----+ | |
60 | * a b | c d e f g h i | J K L M N | | |
61 | * +---------------+-----------+ | |
62 | * | |
63 | * It is prohibitively expensive to accurately track access frequency | |
64 | * of pages. But a reasonable approximation can be made to measure | |
65 | * thrashing on the inactive list, after which refaulting pages can be | |
66 | * activated optimistically to compete with the existing active pages. | |
67 | * | |
68 | * Approximating inactive page access frequency - Observations: | |
69 | * | |
70 | * 1. When a page is accessed for the first time, it is added to the | |
71 | * head of the inactive list, slides every existing inactive page | |
72 | * towards the tail by one slot, and pushes the current tail page | |
73 | * out of memory. | |
74 | * | |
75 | * 2. When a page is accessed for the second time, it is promoted to | |
76 | * the active list, shrinking the inactive list by one slot. This | |
77 | * also slides all inactive pages that were faulted into the cache | |
78 | * more recently than the activated page towards the tail of the | |
79 | * inactive list. | |
80 | * | |
81 | * Thus: | |
82 | * | |
83 | * 1. The sum of evictions and activations between any two points in | |
84 | * time indicate the minimum number of inactive pages accessed in | |
85 | * between. | |
86 | * | |
87 | * 2. Moving one inactive page N page slots towards the tail of the | |
88 | * list requires at least N inactive page accesses. | |
89 | * | |
90 | * Combining these: | |
91 | * | |
92 | * 1. When a page is finally evicted from memory, the number of | |
93 | * inactive pages accessed while the page was in cache is at least | |
94 | * the number of page slots on the inactive list. | |
95 | * | |
96 | * 2. In addition, measuring the sum of evictions and activations (E) | |
97 | * at the time of a page's eviction, and comparing it to another | |
98 | * reading (R) at the time the page faults back into memory tells | |
99 | * the minimum number of accesses while the page was not cached. | |
100 | * This is called the refault distance. | |
101 | * | |
102 | * Because the first access of the page was the fault and the second | |
103 | * access the refault, we combine the in-cache distance with the | |
104 | * out-of-cache distance to get the complete minimum access distance | |
105 | * of this page: | |
106 | * | |
107 | * NR_inactive + (R - E) | |
108 | * | |
109 | * And knowing the minimum access distance of a page, we can easily | |
110 | * tell if the page would be able to stay in cache assuming all page | |
111 | * slots in the cache were available: | |
112 | * | |
113 | * NR_inactive + (R - E) <= NR_inactive + NR_active | |
114 | * | |
ed8f3f99 YY |
115 | * If we have swap we should consider about NR_inactive_anon and |
116 | * NR_active_anon, so for page cache and anonymous respectively: | |
a528910e | 117 | * |
ed8f3f99 YY |
118 | * NR_inactive_file + (R - E) <= NR_inactive_file + NR_active_file |
119 | * + NR_inactive_anon + NR_active_anon | |
120 | * | |
121 | * NR_inactive_anon + (R - E) <= NR_inactive_anon + NR_active_anon | |
122 | * + NR_inactive_file + NR_active_file | |
123 | * | |
124 | * Which can be further simplified to: | |
125 | * | |
126 | * (R - E) <= NR_active_file + NR_inactive_anon + NR_active_anon | |
127 | * | |
128 | * (R - E) <= NR_active_anon + NR_inactive_file + NR_active_file | |
a528910e JW |
129 | * |
130 | * Put into words, the refault distance (out-of-cache) can be seen as | |
131 | * a deficit in inactive list space (in-cache). If the inactive list | |
132 | * had (R - E) more page slots, the page would not have been evicted | |
133 | * in between accesses, but activated instead. And on a full system, | |
134 | * the only thing eating into inactive list space is active pages. | |
135 | * | |
136 | * | |
1899ad18 | 137 | * Refaulting inactive pages |
a528910e JW |
138 | * |
139 | * All that is known about the active list is that the pages have been | |
140 | * accessed more than once in the past. This means that at any given | |
141 | * time there is actually a good chance that pages on the active list | |
142 | * are no longer in active use. | |
143 | * | |
144 | * So when a refault distance of (R - E) is observed and there are at | |
ed8f3f99 YY |
145 | * least (R - E) pages in the userspace workingset, the refaulting page |
146 | * is activated optimistically in the hope that (R - E) pages are actually | |
a528910e JW |
147 | * used less frequently than the refaulting page - or even not used at |
148 | * all anymore. | |
149 | * | |
1899ad18 JW |
150 | * That means if inactive cache is refaulting with a suitable refault |
151 | * distance, we assume the cache workingset is transitioning and put | |
ed8f3f99 | 152 | * pressure on the current workingset. |
1899ad18 | 153 | * |
a528910e JW |
154 | * If this is wrong and demotion kicks in, the pages which are truly |
155 | * used more frequently will be reactivated while the less frequently | |
156 | * used once will be evicted from memory. | |
157 | * | |
158 | * But if this is right, the stale pages will be pushed out of memory | |
159 | * and the used pages get to stay in cache. | |
160 | * | |
1899ad18 JW |
161 | * Refaulting active pages |
162 | * | |
163 | * If on the other hand the refaulting pages have recently been | |
164 | * deactivated, it means that the active list is no longer protecting | |
165 | * actively used cache from reclaim. The cache is NOT transitioning to | |
166 | * a different workingset; the existing workingset is thrashing in the | |
167 | * space allocated to the page cache. | |
168 | * | |
a528910e JW |
169 | * |
170 | * Implementation | |
171 | * | |
31d8fcac JW |
172 | * For each node's LRU lists, a counter for inactive evictions and |
173 | * activations is maintained (node->nonresident_age). | |
a528910e JW |
174 | * |
175 | * On eviction, a snapshot of this counter (along with some bits to | |
a97e7904 | 176 | * identify the node) is stored in the now empty page cache |
a528910e JW |
177 | * slot of the evicted page. This is called a shadow entry. |
178 | * | |
179 | * On cache misses for which there are shadow entries, an eligible | |
180 | * refault distance will immediately activate the refaulting page. | |
181 | */ | |
182 | ||
3ebc57f4 | 183 | #define WORKINGSET_SHIFT 1 |
3159f943 | 184 | #define EVICTION_SHIFT ((BITS_PER_LONG - BITS_PER_XA_VALUE) + \ |
3ebc57f4 ML |
185 | WORKINGSET_SHIFT + NODES_SHIFT + \ |
186 | MEM_CGROUP_ID_SHIFT) | |
689c94f0 JW |
187 | #define EVICTION_MASK (~0UL >> EVICTION_SHIFT) |
188 | ||
612e4493 JW |
189 | /* |
190 | * Eviction timestamps need to be able to cover the full range of | |
a97e7904 | 191 | * actionable refaults. However, bits are tight in the xarray |
612e4493 JW |
192 | * entry, and after storing the identifier for the lruvec there might |
193 | * not be enough left to represent every single actionable refault. In | |
194 | * that case, we have to sacrifice granularity for distance, and group | |
195 | * evictions into coarser buckets by shaving off lower timestamp bits. | |
196 | */ | |
197 | static unsigned int bucket_order __read_mostly; | |
198 | ||
1899ad18 JW |
199 | static void *pack_shadow(int memcgid, pg_data_t *pgdat, unsigned long eviction, |
200 | bool workingset) | |
a528910e | 201 | { |
3159f943 | 202 | eviction &= EVICTION_MASK; |
23047a96 | 203 | eviction = (eviction << MEM_CGROUP_ID_SHIFT) | memcgid; |
1e6b1085 | 204 | eviction = (eviction << NODES_SHIFT) | pgdat->node_id; |
3ebc57f4 | 205 | eviction = (eviction << WORKINGSET_SHIFT) | workingset; |
a528910e | 206 | |
3159f943 | 207 | return xa_mk_value(eviction); |
a528910e JW |
208 | } |
209 | ||
1e6b1085 | 210 | static void unpack_shadow(void *shadow, int *memcgidp, pg_data_t **pgdat, |
1899ad18 | 211 | unsigned long *evictionp, bool *workingsetp) |
a528910e | 212 | { |
3159f943 | 213 | unsigned long entry = xa_to_value(shadow); |
1e6b1085 | 214 | int memcgid, nid; |
1899ad18 | 215 | bool workingset; |
a528910e | 216 | |
3ebc57f4 ML |
217 | workingset = entry & ((1UL << WORKINGSET_SHIFT) - 1); |
218 | entry >>= WORKINGSET_SHIFT; | |
a528910e JW |
219 | nid = entry & ((1UL << NODES_SHIFT) - 1); |
220 | entry >>= NODES_SHIFT; | |
23047a96 JW |
221 | memcgid = entry & ((1UL << MEM_CGROUP_ID_SHIFT) - 1); |
222 | entry >>= MEM_CGROUP_ID_SHIFT; | |
a528910e | 223 | |
23047a96 | 224 | *memcgidp = memcgid; |
1e6b1085 | 225 | *pgdat = NODE_DATA(nid); |
ac35a490 | 226 | *evictionp = entry; |
1899ad18 | 227 | *workingsetp = workingset; |
a528910e JW |
228 | } |
229 | ||
ac35a490 YZ |
230 | #ifdef CONFIG_LRU_GEN |
231 | ||
232 | static void *lru_gen_eviction(struct folio *folio) | |
233 | { | |
234 | int hist; | |
235 | unsigned long token; | |
236 | unsigned long min_seq; | |
237 | struct lruvec *lruvec; | |
391655fe | 238 | struct lru_gen_folio *lrugen; |
ac35a490 YZ |
239 | int type = folio_is_file_lru(folio); |
240 | int delta = folio_nr_pages(folio); | |
241 | int refs = folio_lru_refs(folio); | |
242 | int tier = lru_tier_from_refs(refs); | |
243 | struct mem_cgroup *memcg = folio_memcg(folio); | |
244 | struct pglist_data *pgdat = folio_pgdat(folio); | |
245 | ||
246 | BUILD_BUG_ON(LRU_GEN_WIDTH + LRU_REFS_WIDTH > BITS_PER_LONG - EVICTION_SHIFT); | |
247 | ||
248 | lruvec = mem_cgroup_lruvec(memcg, pgdat); | |
249 | lrugen = &lruvec->lrugen; | |
250 | min_seq = READ_ONCE(lrugen->min_seq[type]); | |
251 | token = (min_seq << LRU_REFS_WIDTH) | max(refs - 1, 0); | |
252 | ||
253 | hist = lru_hist_from_seq(min_seq); | |
254 | atomic_long_add(delta, &lrugen->evicted[hist][type][tier]); | |
255 | ||
256 | return pack_shadow(mem_cgroup_id(memcg), pgdat, token, refs); | |
257 | } | |
258 | ||
ffcb5f52 NP |
259 | /* |
260 | * Tests if the shadow entry is for a folio that was recently evicted. | |
d7f1afd0 | 261 | * Fills in @lruvec, @token, @workingset with the values unpacked from shadow. |
ffcb5f52 | 262 | */ |
d7f1afd0 A |
263 | static bool lru_gen_test_recent(void *shadow, bool file, struct lruvec **lruvec, |
264 | unsigned long *token, bool *workingset) | |
ffcb5f52 | 265 | { |
d7f1afd0 | 266 | int memcg_id; |
ffcb5f52 | 267 | unsigned long min_seq; |
d7f1afd0 A |
268 | struct mem_cgroup *memcg; |
269 | struct pglist_data *pgdat; | |
ffcb5f52 | 270 | |
d7f1afd0 | 271 | unpack_shadow(shadow, &memcg_id, &pgdat, token, workingset); |
ffcb5f52 | 272 | |
d7f1afd0 A |
273 | memcg = mem_cgroup_from_id(memcg_id); |
274 | *lruvec = mem_cgroup_lruvec(memcg, pgdat); | |
ffcb5f52 | 275 | |
d7f1afd0 | 276 | min_seq = READ_ONCE((*lruvec)->lrugen.min_seq[file]); |
ffcb5f52 NP |
277 | return (*token >> LRU_REFS_WIDTH) == (min_seq & (EVICTION_MASK >> LRU_REFS_WIDTH)); |
278 | } | |
279 | ||
ac35a490 YZ |
280 | static void lru_gen_refault(struct folio *folio, void *shadow) |
281 | { | |
3af0191a | 282 | bool recent; |
ac35a490 | 283 | int hist, tier, refs; |
ac35a490 YZ |
284 | bool workingset; |
285 | unsigned long token; | |
ac35a490 | 286 | struct lruvec *lruvec; |
391655fe | 287 | struct lru_gen_folio *lrugen; |
ac35a490 YZ |
288 | int type = folio_is_file_lru(folio); |
289 | int delta = folio_nr_pages(folio); | |
290 | ||
ac35a490 YZ |
291 | rcu_read_lock(); |
292 | ||
3af0191a KS |
293 | recent = lru_gen_test_recent(shadow, type, &lruvec, &token, &workingset); |
294 | if (lruvec != folio_lruvec(folio)) | |
ac35a490 YZ |
295 | goto unlock; |
296 | ||
3af0191a KS |
297 | mod_lruvec_state(lruvec, WORKINGSET_REFAULT_BASE + type, delta); |
298 | ||
299 | if (!recent) | |
ffcb5f52 NP |
300 | goto unlock; |
301 | ||
ac35a490 | 302 | lrugen = &lruvec->lrugen; |
ac35a490 | 303 | |
d7f1afd0 | 304 | hist = lru_hist_from_seq(READ_ONCE(lrugen->min_seq[type])); |
ac35a490 YZ |
305 | /* see the comment in folio_lru_refs() */ |
306 | refs = (token & (BIT(LRU_REFS_WIDTH) - 1)) + workingset; | |
307 | tier = lru_tier_from_refs(refs); | |
308 | ||
309 | atomic_long_add(delta, &lrugen->refaulted[hist][type][tier]); | |
3af0191a | 310 | mod_lruvec_state(lruvec, WORKINGSET_ACTIVATE_BASE + type, delta); |
ac35a490 YZ |
311 | |
312 | /* | |
313 | * Count the following two cases as stalls: | |
314 | * 1. For pages accessed through page tables, hotter pages pushed out | |
315 | * hot pages which refaulted immediately. | |
316 | * 2. For pages accessed multiple times through file descriptors, | |
08148805 | 317 | * they would have been protected by sort_folio(). |
ac35a490 | 318 | */ |
08148805 YZ |
319 | if (lru_gen_in_fault() || refs >= BIT(LRU_REFS_WIDTH) - 1) { |
320 | set_mask_bits(&folio->flags, 0, LRU_REFS_MASK | BIT(PG_workingset)); | |
ac35a490 YZ |
321 | mod_lruvec_state(lruvec, WORKINGSET_RESTORE_BASE + type, delta); |
322 | } | |
323 | unlock: | |
324 | rcu_read_unlock(); | |
325 | } | |
326 | ||
327 | #else /* !CONFIG_LRU_GEN */ | |
328 | ||
329 | static void *lru_gen_eviction(struct folio *folio) | |
330 | { | |
331 | return NULL; | |
332 | } | |
333 | ||
d7f1afd0 A |
334 | static bool lru_gen_test_recent(void *shadow, bool file, struct lruvec **lruvec, |
335 | unsigned long *token, bool *workingset) | |
ffcb5f52 NP |
336 | { |
337 | return false; | |
338 | } | |
339 | ||
ac35a490 YZ |
340 | static void lru_gen_refault(struct folio *folio, void *shadow) |
341 | { | |
342 | } | |
343 | ||
344 | #endif /* CONFIG_LRU_GEN */ | |
345 | ||
31d8fcac JW |
346 | /** |
347 | * workingset_age_nonresident - age non-resident entries as LRU ages | |
e755f4af | 348 | * @lruvec: the lruvec that was aged |
31d8fcac JW |
349 | * @nr_pages: the number of pages to count |
350 | * | |
351 | * As in-memory pages are aged, non-resident pages need to be aged as | |
352 | * well, in order for the refault distances later on to be comparable | |
353 | * to the in-memory dimensions. This function allows reclaim and LRU | |
354 | * operations to drive the non-resident aging along in parallel. | |
355 | */ | |
356 | void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages) | |
b910718a JW |
357 | { |
358 | /* | |
359 | * Reclaiming a cgroup means reclaiming all its children in a | |
360 | * round-robin fashion. That means that each cgroup has an LRU | |
361 | * order that is composed of the LRU orders of its child | |
362 | * cgroups; and every page has an LRU position not just in the | |
363 | * cgroup that owns it, but in all of that group's ancestors. | |
364 | * | |
365 | * So when the physical inactive list of a leaf cgroup ages, | |
366 | * the virtual inactive lists of all its parents, including | |
367 | * the root cgroup's, age as well. | |
368 | */ | |
369 | do { | |
31d8fcac JW |
370 | atomic_long_add(nr_pages, &lruvec->nonresident_age); |
371 | } while ((lruvec = parent_lruvec(lruvec))); | |
b910718a JW |
372 | } |
373 | ||
a528910e | 374 | /** |
8927f647 | 375 | * workingset_eviction - note the eviction of a folio from memory |
b910718a | 376 | * @target_memcg: the cgroup that is causing the reclaim |
8927f647 | 377 | * @folio: the folio being evicted |
a528910e | 378 | * |
8927f647 MWO |
379 | * Return: a shadow entry to be stored in @folio->mapping->i_pages in place |
380 | * of the evicted @folio so that a later refault can be detected. | |
a528910e | 381 | */ |
8927f647 | 382 | void *workingset_eviction(struct folio *folio, struct mem_cgroup *target_memcg) |
a528910e | 383 | { |
8927f647 | 384 | struct pglist_data *pgdat = folio_pgdat(folio); |
a528910e | 385 | unsigned long eviction; |
23047a96 | 386 | struct lruvec *lruvec; |
b910718a | 387 | int memcgid; |
a528910e | 388 | |
8927f647 MWO |
389 | /* Folio is fully exclusive and pins folio's memory cgroup pointer */ |
390 | VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); | |
391 | VM_BUG_ON_FOLIO(folio_ref_count(folio), folio); | |
392 | VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); | |
23047a96 | 393 | |
ac35a490 YZ |
394 | if (lru_gen_enabled()) |
395 | return lru_gen_eviction(folio); | |
396 | ||
b910718a JW |
397 | lruvec = mem_cgroup_lruvec(target_memcg, pgdat); |
398 | /* XXX: target_memcg can be NULL, go through lruvec */ | |
399 | memcgid = mem_cgroup_id(lruvec_memcg(lruvec)); | |
31d8fcac | 400 | eviction = atomic_long_read(&lruvec->nonresident_age); |
ac35a490 | 401 | eviction >>= bucket_order; |
8927f647 MWO |
402 | workingset_age_nonresident(lruvec, folio_nr_pages(folio)); |
403 | return pack_shadow(memcgid, pgdat, eviction, | |
404 | folio_test_workingset(folio)); | |
a528910e JW |
405 | } |
406 | ||
407 | /** | |
ffcb5f52 NP |
408 | * workingset_test_recent - tests if the shadow entry is for a folio that was |
409 | * recently evicted. Also fills in @workingset with the value unpacked from | |
410 | * shadow. | |
411 | * @shadow: the shadow entry to be tested. | |
412 | * @file: whether the corresponding folio is from the file lru. | |
413 | * @workingset: where the workingset value unpacked from shadow should | |
414 | * be stored. | |
5a4d8944 | 415 | * @flush: whether to flush cgroup rstat. |
ffcb5f52 NP |
416 | * |
417 | * Return: true if the shadow is for a recently evicted folio; false otherwise. | |
a528910e | 418 | */ |
5a4d8944 NP |
419 | bool workingset_test_recent(void *shadow, bool file, bool *workingset, |
420 | bool flush) | |
a528910e | 421 | { |
b910718a JW |
422 | struct mem_cgroup *eviction_memcg; |
423 | struct lruvec *eviction_lruvec; | |
a528910e | 424 | unsigned long refault_distance; |
34e58cac | 425 | unsigned long workingset_size; |
162453bf | 426 | unsigned long refault; |
23047a96 | 427 | int memcgid; |
ffcb5f52 NP |
428 | struct pglist_data *pgdat; |
429 | unsigned long eviction; | |
a528910e | 430 | |
b0068472 YA |
431 | rcu_read_lock(); |
432 | ||
433 | if (lru_gen_enabled()) { | |
434 | bool recent = lru_gen_test_recent(shadow, file, | |
435 | &eviction_lruvec, &eviction, workingset); | |
436 | ||
437 | rcu_read_unlock(); | |
438 | return recent; | |
439 | } | |
440 | ||
ac35a490 | 441 | |
ffcb5f52 | 442 | unpack_shadow(shadow, &memcgid, &pgdat, &eviction, workingset); |
ac35a490 | 443 | eviction <<= bucket_order; |
162453bf | 444 | |
23047a96 JW |
445 | /* |
446 | * Look up the memcg associated with the stored ID. It might | |
0995d7e5 | 447 | * have been deleted since the folio's eviction. |
23047a96 JW |
448 | * |
449 | * Note that in rare events the ID could have been recycled | |
0995d7e5 | 450 | * for a new cgroup that refaults a shared folio. This is |
23047a96 JW |
451 | * impossible to tell from the available data. However, this |
452 | * should be a rare and limited disturbance, and activations | |
453 | * are always speculative anyway. Ultimately, it's the aging | |
454 | * algorithm's job to shake out the minimum access frequency | |
455 | * for the active cache. | |
456 | * | |
457 | * XXX: On !CONFIG_MEMCG, this will always return NULL; it | |
458 | * would be better if the root_mem_cgroup existed in all | |
459 | * configurations instead. | |
460 | */ | |
b910718a | 461 | eviction_memcg = mem_cgroup_from_id(memcgid); |
b0068472 YA |
462 | if (!mem_cgroup_disabled() && |
463 | (!eviction_memcg || !mem_cgroup_tryget(eviction_memcg))) { | |
464 | rcu_read_unlock(); | |
ffcb5f52 | 465 | return false; |
b0068472 YA |
466 | } |
467 | ||
468 | rcu_read_unlock(); | |
469 | ||
7d7ef0a4 YA |
470 | /* |
471 | * Flush stats (and potentially sleep) outside the RCU read section. | |
5a4d8944 NP |
472 | * |
473 | * Note that workingset_test_recent() itself might be called in RCU read | |
474 | * section (for e.g, in cachestat) - these callers need to skip flushing | |
475 | * stats (via the flush argument). | |
476 | * | |
7d7ef0a4 YA |
477 | * XXX: With per-memcg flushing and thresholding, is ratelimiting |
478 | * still needed here? | |
479 | */ | |
5a4d8944 NP |
480 | if (flush) |
481 | mem_cgroup_flush_stats_ratelimited(eviction_memcg); | |
ffcb5f52 | 482 | |
b910718a | 483 | eviction_lruvec = mem_cgroup_lruvec(eviction_memcg, pgdat); |
31d8fcac | 484 | refault = atomic_long_read(&eviction_lruvec->nonresident_age); |
162453bf JW |
485 | |
486 | /* | |
1899ad18 | 487 | * Calculate the refault distance |
162453bf | 488 | * |
1899ad18 | 489 | * The unsigned subtraction here gives an accurate distance |
31d8fcac | 490 | * across nonresident_age overflows in most cases. There is a |
1899ad18 JW |
491 | * special case: usually, shadow entries have a short lifetime |
492 | * and are either refaulted or reclaimed along with the inode | |
493 | * before they get too old. But it is not impossible for the | |
31d8fcac JW |
494 | * nonresident_age to lap a shadow entry in the field, which |
495 | * can then result in a false small refault distance, leading | |
496 | * to a false activation should this old entry actually | |
497 | * refault again. However, earlier kernels used to deactivate | |
1899ad18 JW |
498 | * unconditionally with *every* reclaim invocation for the |
499 | * longest time, so the occasional inappropriate activation | |
500 | * leading to pressure on the active list is not a problem. | |
162453bf JW |
501 | */ |
502 | refault_distance = (refault - eviction) & EVICTION_MASK; | |
503 | ||
1899ad18 JW |
504 | /* |
505 | * Compare the distance to the existing workingset size. We | |
34e58cac | 506 | * don't activate pages that couldn't stay resident even if |
aae466b0 JK |
507 | * all the memory was available to the workingset. Whether |
508 | * workingset competition needs to consider anon or not depends | |
ed8f3f99 | 509 | * on having free swap space. |
1899ad18 | 510 | */ |
34e58cac | 511 | workingset_size = lruvec_page_state(eviction_lruvec, NR_ACTIVE_FILE); |
aae466b0 | 512 | if (!file) { |
34e58cac | 513 | workingset_size += lruvec_page_state(eviction_lruvec, |
aae466b0 JK |
514 | NR_INACTIVE_FILE); |
515 | } | |
f78dfc7b | 516 | if (mem_cgroup_get_nr_swap_pages(eviction_memcg) > 0) { |
34e58cac JW |
517 | workingset_size += lruvec_page_state(eviction_lruvec, |
518 | NR_ACTIVE_ANON); | |
aae466b0 JK |
519 | if (file) { |
520 | workingset_size += lruvec_page_state(eviction_lruvec, | |
521 | NR_INACTIVE_ANON); | |
522 | } | |
34e58cac | 523 | } |
ffcb5f52 | 524 | |
b0068472 | 525 | mem_cgroup_put(eviction_memcg); |
ffcb5f52 NP |
526 | return refault_distance <= workingset_size; |
527 | } | |
528 | ||
529 | /** | |
530 | * workingset_refault - Evaluate the refault of a previously evicted folio. | |
531 | * @folio: The freshly allocated replacement folio. | |
532 | * @shadow: Shadow entry of the evicted folio. | |
533 | * | |
534 | * Calculates and evaluates the refault distance of the previously | |
535 | * evicted folio in the context of the node and the memcg whose memory | |
536 | * pressure caused the eviction. | |
537 | */ | |
538 | void workingset_refault(struct folio *folio, void *shadow) | |
539 | { | |
540 | bool file = folio_is_file_lru(folio); | |
541 | struct pglist_data *pgdat; | |
542 | struct mem_cgroup *memcg; | |
543 | struct lruvec *lruvec; | |
544 | bool workingset; | |
545 | long nr; | |
546 | ||
547 | if (lru_gen_enabled()) { | |
548 | lru_gen_refault(folio, shadow); | |
549 | return; | |
550 | } | |
551 | ||
ffcb5f52 NP |
552 | /* |
553 | * The activation decision for this folio is made at the level | |
554 | * where the eviction occurred, as that is where the LRU order | |
555 | * during folio reclaim is being determined. | |
556 | * | |
557 | * However, the cgroup that will own the folio is the one that | |
b0068472 YA |
558 | * is actually experiencing the refault event. Make sure the folio is |
559 | * locked to guarantee folio_memcg() stability throughout. | |
ffcb5f52 | 560 | */ |
b0068472 | 561 | VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); |
ffcb5f52 NP |
562 | nr = folio_nr_pages(folio); |
563 | memcg = folio_memcg(folio); | |
564 | pgdat = folio_pgdat(folio); | |
565 | lruvec = mem_cgroup_lruvec(memcg, pgdat); | |
566 | ||
567 | mod_lruvec_state(lruvec, WORKINGSET_REFAULT_BASE + file, nr); | |
568 | ||
5a4d8944 | 569 | if (!workingset_test_recent(shadow, file, &workingset, true)) |
b0068472 | 570 | return; |
1899ad18 | 571 | |
0995d7e5 MWO |
572 | folio_set_active(folio); |
573 | workingset_age_nonresident(lruvec, nr); | |
574 | mod_lruvec_state(lruvec, WORKINGSET_ACTIVATE_BASE + file, nr); | |
1899ad18 | 575 | |
0995d7e5 | 576 | /* Folio was active prior to eviction */ |
1899ad18 | 577 | if (workingset) { |
0995d7e5 | 578 | folio_set_workingset(folio); |
6e1ca48d VMO |
579 | /* |
580 | * XXX: Move to folio_add_lru() when it supports new vs | |
581 | * putback | |
582 | */ | |
0538a82c | 583 | lru_note_cost_refault(folio); |
0995d7e5 | 584 | mod_lruvec_state(lruvec, WORKINGSET_RESTORE_BASE + file, nr); |
a528910e | 585 | } |
a528910e JW |
586 | } |
587 | ||
588 | /** | |
589 | * workingset_activation - note a page activation | |
c5ce619a | 590 | * @folio: Folio that is being activated. |
a528910e | 591 | */ |
c5ce619a | 592 | void workingset_activation(struct folio *folio) |
a528910e | 593 | { |
55779ec7 | 594 | struct mem_cgroup *memcg; |
23047a96 | 595 | |
55779ec7 | 596 | rcu_read_lock(); |
23047a96 JW |
597 | /* |
598 | * Filter non-memcg pages here, e.g. unmap can call | |
599 | * mark_page_accessed() on VDSO pages. | |
600 | * | |
601 | * XXX: See workingset_refault() - this should return | |
602 | * root_mem_cgroup even for !CONFIG_MEMCG. | |
603 | */ | |
c5ce619a | 604 | memcg = folio_memcg_rcu(folio); |
55779ec7 | 605 | if (!mem_cgroup_disabled() && !memcg) |
23047a96 | 606 | goto out; |
c5ce619a | 607 | workingset_age_nonresident(folio_lruvec(folio), folio_nr_pages(folio)); |
23047a96 | 608 | out: |
55779ec7 | 609 | rcu_read_unlock(); |
a528910e | 610 | } |
449dd698 JW |
611 | |
612 | /* | |
613 | * Shadow entries reflect the share of the working set that does not | |
614 | * fit into memory, so their number depends on the access pattern of | |
615 | * the workload. In most cases, they will refault or get reclaimed | |
616 | * along with the inode, but a (malicious) workload that streams | |
617 | * through files with a total size several times that of available | |
618 | * memory, while preventing the inodes from being reclaimed, can | |
619 | * create excessive amounts of shadow nodes. To keep a lid on this, | |
620 | * track shadow nodes and reclaim them when they grow way past the | |
621 | * point where they would still be useful. | |
622 | */ | |
623 | ||
9bbdc0f3 | 624 | struct list_lru shadow_nodes; |
14b46879 | 625 | |
a97e7904 | 626 | void workingset_update_node(struct xa_node *node) |
14b46879 | 627 | { |
2386eef2 | 628 | struct address_space *mapping; |
4715c6a7 | 629 | struct page *page = virt_to_page(node); |
2386eef2 | 630 | |
14b46879 JW |
631 | /* |
632 | * Track non-empty nodes that contain only shadow entries; | |
633 | * unlink those that contain pages or are being freed. | |
634 | * | |
635 | * Avoid acquiring the list_lru lock when the nodes are | |
636 | * already where they should be. The list_empty() test is safe | |
b93b0163 | 637 | * as node->private_list is protected by the i_pages lock. |
14b46879 | 638 | */ |
2386eef2 SAS |
639 | mapping = container_of(node->array, struct address_space, i_pages); |
640 | lockdep_assert_held(&mapping->i_pages.xa_lock); | |
68d48e6a | 641 | |
01959dfe | 642 | if (node->count && node->count == node->nr_values) { |
68d48e6a | 643 | if (list_empty(&node->private_list)) { |
0a97c01c | 644 | list_lru_add_obj(&shadow_nodes, &node->private_list); |
4715c6a7 | 645 | __inc_node_page_state(page, WORKINGSET_NODES); |
68d48e6a | 646 | } |
14b46879 | 647 | } else { |
68d48e6a | 648 | if (!list_empty(&node->private_list)) { |
0a97c01c | 649 | list_lru_del_obj(&shadow_nodes, &node->private_list); |
4715c6a7 | 650 | __dec_node_page_state(page, WORKINGSET_NODES); |
68d48e6a | 651 | } |
14b46879 JW |
652 | } |
653 | } | |
449dd698 JW |
654 | |
655 | static unsigned long count_shadow_nodes(struct shrinker *shrinker, | |
656 | struct shrink_control *sc) | |
657 | { | |
449dd698 | 658 | unsigned long max_nodes; |
14b46879 | 659 | unsigned long nodes; |
95f9ab2d | 660 | unsigned long pages; |
449dd698 | 661 | |
14b46879 | 662 | nodes = list_lru_shrink_count(&shadow_nodes, sc); |
725cac1c ML |
663 | if (!nodes) |
664 | return SHRINK_EMPTY; | |
449dd698 | 665 | |
449dd698 | 666 | /* |
a97e7904 | 667 | * Approximate a reasonable limit for the nodes |
b5388998 JW |
668 | * containing shadow entries. We don't need to keep more |
669 | * shadow entries than possible pages on the active list, | |
670 | * since refault distances bigger than that are dismissed. | |
671 | * | |
672 | * The size of the active list converges toward 100% of | |
673 | * overall page cache as memory grows, with only a tiny | |
674 | * inactive list. Assume the total cache size for that. | |
675 | * | |
676 | * Nodes might be sparsely populated, with only one shadow | |
677 | * entry in the extreme case. Obviously, we cannot keep one | |
678 | * node for every eligible shadow entry, so compromise on a | |
679 | * worst-case density of 1/8th. Below that, not all eligible | |
680 | * refaults can be detected anymore. | |
449dd698 | 681 | * |
a97e7904 | 682 | * On 64-bit with 7 xa_nodes per page and 64 slots |
449dd698 | 683 | * each, this will reclaim shadow entries when they consume |
b5388998 | 684 | * ~1.8% of available memory: |
449dd698 | 685 | * |
a97e7904 | 686 | * PAGE_SIZE / xa_nodes / node_entries * 8 / PAGE_SIZE |
449dd698 | 687 | */ |
95f9ab2d | 688 | #ifdef CONFIG_MEMCG |
b5388998 | 689 | if (sc->memcg) { |
95f9ab2d | 690 | struct lruvec *lruvec; |
2b487e59 | 691 | int i; |
95f9ab2d | 692 | |
d4a5b369 | 693 | mem_cgroup_flush_stats_ratelimited(sc->memcg); |
867e5e1d | 694 | lruvec = mem_cgroup_lruvec(sc->memcg, NODE_DATA(sc->nid)); |
2b487e59 | 695 | for (pages = 0, i = 0; i < NR_LRU_LISTS; i++) |
205b20cc JW |
696 | pages += lruvec_page_state_local(lruvec, |
697 | NR_LRU_BASE + i); | |
d42f3245 RG |
698 | pages += lruvec_page_state_local( |
699 | lruvec, NR_SLAB_RECLAIMABLE_B) >> PAGE_SHIFT; | |
700 | pages += lruvec_page_state_local( | |
701 | lruvec, NR_SLAB_UNRECLAIMABLE_B) >> PAGE_SHIFT; | |
95f9ab2d JW |
702 | } else |
703 | #endif | |
704 | pages = node_present_pages(sc->nid); | |
705 | ||
dad4f140 | 706 | max_nodes = pages >> (XA_CHUNK_SHIFT - 3); |
449dd698 | 707 | |
14b46879 | 708 | if (nodes <= max_nodes) |
449dd698 | 709 | return 0; |
14b46879 | 710 | return nodes - max_nodes; |
449dd698 JW |
711 | } |
712 | ||
713 | static enum lru_status shadow_lru_isolate(struct list_head *item, | |
3f97b163 | 714 | struct list_lru_one *lru, |
449dd698 | 715 | spinlock_t *lru_lock, |
a97e7904 | 716 | void *arg) __must_hold(lru_lock) |
449dd698 | 717 | { |
a97e7904 | 718 | struct xa_node *node = container_of(item, struct xa_node, private_list); |
449dd698 | 719 | struct address_space *mapping; |
449dd698 JW |
720 | int ret; |
721 | ||
722 | /* | |
f82cd2f0 | 723 | * Page cache insertions and deletions synchronously maintain |
b93b0163 | 724 | * the shadow node LRU under the i_pages lock and the |
449dd698 JW |
725 | * lru_lock. Because the page cache tree is emptied before |
726 | * the inode can be destroyed, holding the lru_lock pins any | |
a97e7904 | 727 | * address_space that has nodes on the LRU. |
449dd698 | 728 | * |
b93b0163 | 729 | * We can then safely transition to the i_pages lock to |
449dd698 JW |
730 | * pin only the address_space of the particular node we want |
731 | * to reclaim, take the node off-LRU, and drop the lru_lock. | |
732 | */ | |
733 | ||
01959dfe | 734 | mapping = container_of(node->array, struct address_space, i_pages); |
449dd698 JW |
735 | |
736 | /* Coming from the list, invert the lock order */ | |
b93b0163 | 737 | if (!xa_trylock(&mapping->i_pages)) { |
6ca342d0 | 738 | spin_unlock_irq(lru_lock); |
449dd698 JW |
739 | ret = LRU_RETRY; |
740 | goto out; | |
741 | } | |
742 | ||
5649d113 YY |
743 | /* For page cache we need to hold i_lock */ |
744 | if (mapping->host != NULL) { | |
745 | if (!spin_trylock(&mapping->host->i_lock)) { | |
746 | xa_unlock(&mapping->i_pages); | |
747 | spin_unlock_irq(lru_lock); | |
748 | ret = LRU_RETRY; | |
749 | goto out; | |
750 | } | |
51b8c1fe JW |
751 | } |
752 | ||
3f97b163 | 753 | list_lru_isolate(lru, item); |
4715c6a7 | 754 | __dec_node_page_state(virt_to_page(node), WORKINGSET_NODES); |
68d48e6a | 755 | |
449dd698 JW |
756 | spin_unlock(lru_lock); |
757 | ||
758 | /* | |
759 | * The nodes should only contain one or more shadow entries, | |
760 | * no pages, so we expect to be able to remove them all and | |
761 | * delete and free the empty node afterwards. | |
762 | */ | |
01959dfe | 763 | if (WARN_ON_ONCE(!node->nr_values)) |
b936887e | 764 | goto out_invalid; |
01959dfe | 765 | if (WARN_ON_ONCE(node->count != node->nr_values)) |
b936887e | 766 | goto out_invalid; |
f82cd2f0 | 767 | xa_delete_node(node, workingset_update_node); |
da3ceeff | 768 | __inc_lruvec_kmem_state(node, WORKINGSET_NODERECLAIM); |
449dd698 | 769 | |
b936887e | 770 | out_invalid: |
6ca342d0 | 771 | xa_unlock_irq(&mapping->i_pages); |
5649d113 YY |
772 | if (mapping->host != NULL) { |
773 | if (mapping_shrinkable(mapping)) | |
774 | inode_add_lru(mapping->host); | |
775 | spin_unlock(&mapping->host->i_lock); | |
776 | } | |
449dd698 JW |
777 | ret = LRU_REMOVED_RETRY; |
778 | out: | |
449dd698 | 779 | cond_resched(); |
6ca342d0 | 780 | spin_lock_irq(lru_lock); |
449dd698 JW |
781 | return ret; |
782 | } | |
783 | ||
784 | static unsigned long scan_shadow_nodes(struct shrinker *shrinker, | |
785 | struct shrink_control *sc) | |
786 | { | |
b93b0163 | 787 | /* list_lru lock nests inside the IRQ-safe i_pages lock */ |
6b51e881 SAS |
788 | return list_lru_shrink_walk_irq(&shadow_nodes, sc, shadow_lru_isolate, |
789 | NULL); | |
449dd698 JW |
790 | } |
791 | ||
449dd698 JW |
792 | /* |
793 | * Our list_lru->lock is IRQ-safe as it nests inside the IRQ-safe | |
b93b0163 | 794 | * i_pages lock. |
449dd698 JW |
795 | */ |
796 | static struct lock_class_key shadow_nodes_key; | |
797 | ||
798 | static int __init workingset_init(void) | |
799 | { | |
219c666e | 800 | struct shrinker *workingset_shadow_shrinker; |
612e4493 JW |
801 | unsigned int timestamp_bits; |
802 | unsigned int max_order; | |
219c666e | 803 | int ret = -ENOMEM; |
449dd698 | 804 | |
612e4493 JW |
805 | BUILD_BUG_ON(BITS_PER_LONG < EVICTION_SHIFT); |
806 | /* | |
807 | * Calculate the eviction bucket size to cover the longest | |
808 | * actionable refault distance, which is currently half of | |
809 | * memory (totalram_pages/2). However, memory hotplug may add | |
810 | * some more pages at runtime, so keep working with up to | |
811 | * double the initial memory by using totalram_pages as-is. | |
812 | */ | |
813 | timestamp_bits = BITS_PER_LONG - EVICTION_SHIFT; | |
ca79b0c2 | 814 | max_order = fls_long(totalram_pages() - 1); |
612e4493 JW |
815 | if (max_order > timestamp_bits) |
816 | bucket_order = max_order - timestamp_bits; | |
d3d36c4b | 817 | pr_info("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n", |
612e4493 JW |
818 | timestamp_bits, max_order, bucket_order); |
819 | ||
219c666e QZ |
820 | workingset_shadow_shrinker = shrinker_alloc(SHRINKER_NUMA_AWARE | |
821 | SHRINKER_MEMCG_AWARE, | |
822 | "mm-shadow"); | |
823 | if (!workingset_shadow_shrinker) | |
449dd698 | 824 | goto err; |
219c666e | 825 | |
c92e8e10 | 826 | ret = __list_lru_init(&shadow_nodes, true, &shadow_nodes_key, |
219c666e | 827 | workingset_shadow_shrinker); |
449dd698 JW |
828 | if (ret) |
829 | goto err_list_lru; | |
219c666e QZ |
830 | |
831 | workingset_shadow_shrinker->count_objects = count_shadow_nodes; | |
832 | workingset_shadow_shrinker->scan_objects = scan_shadow_nodes; | |
833 | /* ->count reports only fully expendable nodes */ | |
834 | workingset_shadow_shrinker->seeks = 0; | |
835 | ||
836 | shrinker_register(workingset_shadow_shrinker); | |
449dd698 JW |
837 | return 0; |
838 | err_list_lru: | |
219c666e | 839 | shrinker_free(workingset_shadow_shrinker); |
449dd698 JW |
840 | err: |
841 | return ret; | |
842 | } | |
843 | module_init(workingset_init); |