]>
Commit | Line | Data |
---|---|---|
7a338472 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
f8af4da3 | 2 | /* |
31dbd01f IE |
3 | * Memory merging support. |
4 | * | |
5 | * This code enables dynamic sharing of identical pages found in different | |
6 | * memory areas, even if they are not shared by fork() | |
7 | * | |
36b2528d | 8 | * Copyright (C) 2008-2009 Red Hat, Inc. |
31dbd01f IE |
9 | * Authors: |
10 | * Izik Eidus | |
11 | * Andrea Arcangeli | |
12 | * Chris Wright | |
36b2528d | 13 | * Hugh Dickins |
f8af4da3 HD |
14 | */ |
15 | ||
16 | #include <linux/errno.h> | |
31dbd01f | 17 | #include <linux/mm.h> |
36090def | 18 | #include <linux/mm_inline.h> |
31dbd01f | 19 | #include <linux/fs.h> |
f8af4da3 | 20 | #include <linux/mman.h> |
31dbd01f | 21 | #include <linux/sched.h> |
6e84f315 | 22 | #include <linux/sched/mm.h> |
f7ccbae4 | 23 | #include <linux/sched/coredump.h> |
4e5fa4f5 | 24 | #include <linux/sched/cputime.h> |
31dbd01f IE |
25 | #include <linux/rwsem.h> |
26 | #include <linux/pagemap.h> | |
27 | #include <linux/rmap.h> | |
28 | #include <linux/spinlock.h> | |
59e1a2f4 | 29 | #include <linux/xxhash.h> |
31dbd01f IE |
30 | #include <linux/delay.h> |
31 | #include <linux/kthread.h> | |
32 | #include <linux/wait.h> | |
33 | #include <linux/slab.h> | |
34 | #include <linux/rbtree.h> | |
62b61f61 | 35 | #include <linux/memory.h> |
31dbd01f | 36 | #include <linux/mmu_notifier.h> |
2c6854fd | 37 | #include <linux/swap.h> |
f8af4da3 | 38 | #include <linux/ksm.h> |
4ca3a69b | 39 | #include <linux/hashtable.h> |
878aee7d | 40 | #include <linux/freezer.h> |
72788c38 | 41 | #include <linux/oom.h> |
90bd6fd3 | 42 | #include <linux/numa.h> |
d7c0e68d | 43 | #include <linux/pagewalk.h> |
f8af4da3 | 44 | |
31dbd01f | 45 | #include <asm/tlbflush.h> |
73848b46 | 46 | #include "internal.h" |
58730ab6 | 47 | #include "mm_slot.h" |
31dbd01f | 48 | |
739100c8 SR |
49 | #define CREATE_TRACE_POINTS |
50 | #include <trace/events/ksm.h> | |
51 | ||
e850dcf5 HD |
52 | #ifdef CONFIG_NUMA |
53 | #define NUMA(x) (x) | |
54 | #define DO_NUMA(x) do { (x); } while (0) | |
55 | #else | |
56 | #define NUMA(x) (0) | |
57 | #define DO_NUMA(x) do { } while (0) | |
58 | #endif | |
59 | ||
5e924ff5 SR |
60 | typedef u8 rmap_age_t; |
61 | ||
5a2ca3ef MR |
62 | /** |
63 | * DOC: Overview | |
64 | * | |
31dbd01f IE |
65 | * A few notes about the KSM scanning process, |
66 | * to make it easier to understand the data structures below: | |
67 | * | |
68 | * In order to reduce excessive scanning, KSM sorts the memory pages by their | |
69 | * contents into a data structure that holds pointers to the pages' locations. | |
70 | * | |
71 | * Since the contents of the pages may change at any moment, KSM cannot just | |
72 | * insert the pages into a normal sorted tree and expect it to find anything. | |
73 | * Therefore KSM uses two data structures - the stable and the unstable tree. | |
74 | * | |
75 | * The stable tree holds pointers to all the merged pages (ksm pages), sorted | |
76 | * by their contents. Because each such page is write-protected, searching on | |
77 | * this tree is fully assured to be working (except when pages are unmapped), | |
78 | * and therefore this tree is called the stable tree. | |
79 | * | |
5a2ca3ef MR |
80 | * The stable tree node includes information required for reverse |
81 | * mapping from a KSM page to virtual addresses that map this page. | |
82 | * | |
83 | * In order to avoid large latencies of the rmap walks on KSM pages, | |
84 | * KSM maintains two types of nodes in the stable tree: | |
85 | * | |
86 | * * the regular nodes that keep the reverse mapping structures in a | |
87 | * linked list | |
88 | * * the "chains" that link nodes ("dups") that represent the same | |
89 | * write protected memory content, but each "dup" corresponds to a | |
90 | * different KSM page copy of that content | |
91 | * | |
92 | * Internally, the regular nodes, "dups" and "chains" are represented | |
21fbd591 | 93 | * using the same struct ksm_stable_node structure. |
5a2ca3ef | 94 | * |
31dbd01f IE |
95 | * In addition to the stable tree, KSM uses a second data structure called the |
96 | * unstable tree: this tree holds pointers to pages which have been found to | |
97 | * be "unchanged for a period of time". The unstable tree sorts these pages | |
98 | * by their contents, but since they are not write-protected, KSM cannot rely | |
99 | * upon the unstable tree to work correctly - the unstable tree is liable to | |
100 | * be corrupted as its contents are modified, and so it is called unstable. | |
101 | * | |
102 | * KSM solves this problem by several techniques: | |
103 | * | |
104 | * 1) The unstable tree is flushed every time KSM completes scanning all | |
105 | * memory areas, and then the tree is rebuilt again from the beginning. | |
106 | * 2) KSM will only insert into the unstable tree, pages whose hash value | |
107 | * has not changed since the previous scan of all memory areas. | |
108 | * 3) The unstable tree is a RedBlack Tree - so its balancing is based on the | |
109 | * colors of the nodes and not on their contents, assuring that even when | |
110 | * the tree gets "corrupted" it won't get out of balance, so scanning time | |
111 | * remains the same (also, searching and inserting nodes in an rbtree uses | |
112 | * the same algorithm, so we have no overhead when we flush and rebuild). | |
113 | * 4) KSM never flushes the stable tree, which means that even if it were to | |
114 | * take 10 attempts to find a page in the unstable tree, once it is found, | |
115 | * it is secured in the stable tree. (When we scan a new page, we first | |
116 | * compare it against the stable tree, and then against the unstable tree.) | |
8fdb3dbf HD |
117 | * |
118 | * If the merge_across_nodes tunable is unset, then KSM maintains multiple | |
119 | * stable trees and multiple unstable trees: one of each for each NUMA node. | |
31dbd01f IE |
120 | */ |
121 | ||
122 | /** | |
21fbd591 | 123 | * struct ksm_mm_slot - ksm information per mm that is being scanned |
58730ab6 | 124 | * @slot: hash lookup from mm to mm_slot |
6514d511 | 125 | * @rmap_list: head for this mm_slot's singly-linked list of rmap_items |
31dbd01f | 126 | */ |
21fbd591 | 127 | struct ksm_mm_slot { |
58730ab6 | 128 | struct mm_slot slot; |
21fbd591 | 129 | struct ksm_rmap_item *rmap_list; |
31dbd01f IE |
130 | }; |
131 | ||
132 | /** | |
133 | * struct ksm_scan - cursor for scanning | |
134 | * @mm_slot: the current mm_slot we are scanning | |
135 | * @address: the next address inside that to be scanned | |
6514d511 | 136 | * @rmap_list: link to the next rmap to be scanned in the rmap_list |
31dbd01f IE |
137 | * @seqnr: count of completed full scans (needed when removing unstable node) |
138 | * | |
139 | * There is only the one ksm_scan instance of this cursor structure. | |
140 | */ | |
141 | struct ksm_scan { | |
21fbd591 | 142 | struct ksm_mm_slot *mm_slot; |
31dbd01f | 143 | unsigned long address; |
21fbd591 | 144 | struct ksm_rmap_item **rmap_list; |
31dbd01f IE |
145 | unsigned long seqnr; |
146 | }; | |
147 | ||
7b6ba2c7 | 148 | /** |
21fbd591 | 149 | * struct ksm_stable_node - node of the stable rbtree |
7b6ba2c7 | 150 | * @node: rb node of this ksm page in the stable tree |
4146d2d6 | 151 | * @head: (overlaying parent) &migrate_nodes indicates temporarily on that list |
2c653d0e | 152 | * @hlist_dup: linked into the stable_node->hlist with a stable_node chain |
4146d2d6 | 153 | * @list: linked into migrate_nodes, pending placement in the proper node tree |
7b6ba2c7 | 154 | * @hlist: hlist head of rmap_items using this ksm page |
4146d2d6 | 155 | * @kpfn: page frame number of this ksm page (perhaps temporarily on wrong nid) |
2c653d0e AA |
156 | * @chain_prune_time: time of the last full garbage collection |
157 | * @rmap_hlist_len: number of rmap_item entries in hlist or STABLE_NODE_CHAIN | |
4146d2d6 | 158 | * @nid: NUMA node id of stable tree in which linked (may not match kpfn) |
7b6ba2c7 | 159 | */ |
21fbd591 | 160 | struct ksm_stable_node { |
4146d2d6 HD |
161 | union { |
162 | struct rb_node node; /* when node of stable tree */ | |
163 | struct { /* when listed for migration */ | |
164 | struct list_head *head; | |
2c653d0e AA |
165 | struct { |
166 | struct hlist_node hlist_dup; | |
167 | struct list_head list; | |
168 | }; | |
4146d2d6 HD |
169 | }; |
170 | }; | |
7b6ba2c7 | 171 | struct hlist_head hlist; |
2c653d0e AA |
172 | union { |
173 | unsigned long kpfn; | |
174 | unsigned long chain_prune_time; | |
175 | }; | |
176 | /* | |
177 | * STABLE_NODE_CHAIN can be any negative number in | |
178 | * rmap_hlist_len negative range, but better not -1 to be able | |
179 | * to reliably detect underflows. | |
180 | */ | |
181 | #define STABLE_NODE_CHAIN -1024 | |
182 | int rmap_hlist_len; | |
4146d2d6 HD |
183 | #ifdef CONFIG_NUMA |
184 | int nid; | |
185 | #endif | |
7b6ba2c7 HD |
186 | }; |
187 | ||
31dbd01f | 188 | /** |
21fbd591 | 189 | * struct ksm_rmap_item - reverse mapping item for virtual addresses |
6514d511 | 190 | * @rmap_list: next rmap_item in mm_slot's singly-linked rmap_list |
db114b83 | 191 | * @anon_vma: pointer to anon_vma for this mm,address, when in stable tree |
bc56620b | 192 | * @nid: NUMA node id of unstable tree in which linked (may not match page) |
31dbd01f IE |
193 | * @mm: the memory structure this rmap_item is pointing into |
194 | * @address: the virtual address this rmap_item tracks (+ flags in low bits) | |
195 | * @oldchecksum: previous checksum of the page at that virtual address | |
7b6ba2c7 HD |
196 | * @node: rb node of this rmap_item in the unstable tree |
197 | * @head: pointer to stable_node heading this list in the stable tree | |
198 | * @hlist: link into hlist of rmap_items hanging off that stable_node | |
5e924ff5 SR |
199 | * @age: number of scan iterations since creation |
200 | * @remaining_skips: how many scans to skip | |
31dbd01f | 201 | */ |
21fbd591 QZ |
202 | struct ksm_rmap_item { |
203 | struct ksm_rmap_item *rmap_list; | |
bc56620b HD |
204 | union { |
205 | struct anon_vma *anon_vma; /* when stable */ | |
206 | #ifdef CONFIG_NUMA | |
207 | int nid; /* when node of unstable tree */ | |
208 | #endif | |
209 | }; | |
31dbd01f IE |
210 | struct mm_struct *mm; |
211 | unsigned long address; /* + low bits used for flags below */ | |
7b6ba2c7 | 212 | unsigned int oldchecksum; /* when unstable */ |
5e924ff5 SR |
213 | rmap_age_t age; |
214 | rmap_age_t remaining_skips; | |
31dbd01f | 215 | union { |
7b6ba2c7 HD |
216 | struct rb_node node; /* when node of unstable tree */ |
217 | struct { /* when listed from stable tree */ | |
21fbd591 | 218 | struct ksm_stable_node *head; |
7b6ba2c7 HD |
219 | struct hlist_node hlist; |
220 | }; | |
31dbd01f IE |
221 | }; |
222 | }; | |
223 | ||
224 | #define SEQNR_MASK 0x0ff /* low bits of unstable tree seqnr */ | |
7b6ba2c7 HD |
225 | #define UNSTABLE_FLAG 0x100 /* is a node of the unstable tree */ |
226 | #define STABLE_FLAG 0x200 /* is listed from the stable tree */ | |
31dbd01f IE |
227 | |
228 | /* The stable and unstable tree heads */ | |
ef53d16c HD |
229 | static struct rb_root one_stable_tree[1] = { RB_ROOT }; |
230 | static struct rb_root one_unstable_tree[1] = { RB_ROOT }; | |
231 | static struct rb_root *root_stable_tree = one_stable_tree; | |
232 | static struct rb_root *root_unstable_tree = one_unstable_tree; | |
31dbd01f | 233 | |
4146d2d6 HD |
234 | /* Recently migrated nodes of stable tree, pending proper placement */ |
235 | static LIST_HEAD(migrate_nodes); | |
2c653d0e | 236 | #define STABLE_NODE_DUP_HEAD ((struct list_head *)&migrate_nodes.prev) |
4146d2d6 | 237 | |
4ca3a69b SL |
238 | #define MM_SLOTS_HASH_BITS 10 |
239 | static DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS); | |
31dbd01f | 240 | |
21fbd591 | 241 | static struct ksm_mm_slot ksm_mm_head = { |
58730ab6 | 242 | .slot.mm_node = LIST_HEAD_INIT(ksm_mm_head.slot.mm_node), |
31dbd01f IE |
243 | }; |
244 | static struct ksm_scan ksm_scan = { | |
245 | .mm_slot = &ksm_mm_head, | |
246 | }; | |
247 | ||
248 | static struct kmem_cache *rmap_item_cache; | |
7b6ba2c7 | 249 | static struct kmem_cache *stable_node_cache; |
31dbd01f IE |
250 | static struct kmem_cache *mm_slot_cache; |
251 | ||
4e5fa4f5 SR |
252 | /* Default number of pages to scan per batch */ |
253 | #define DEFAULT_PAGES_TO_SCAN 100 | |
254 | ||
b348b5fe SR |
255 | /* The number of pages scanned */ |
256 | static unsigned long ksm_pages_scanned; | |
257 | ||
31dbd01f | 258 | /* The number of nodes in the stable tree */ |
b4028260 | 259 | static unsigned long ksm_pages_shared; |
31dbd01f | 260 | |
e178dfde | 261 | /* The number of page slots additionally sharing those nodes */ |
b4028260 | 262 | static unsigned long ksm_pages_sharing; |
31dbd01f | 263 | |
473b0ce4 HD |
264 | /* The number of nodes in the unstable tree */ |
265 | static unsigned long ksm_pages_unshared; | |
266 | ||
267 | /* The number of rmap_items in use: to calculate pages_volatile */ | |
268 | static unsigned long ksm_rmap_items; | |
269 | ||
2c653d0e AA |
270 | /* The number of stable_node chains */ |
271 | static unsigned long ksm_stable_node_chains; | |
272 | ||
273 | /* The number of stable_node dups linked to the stable_node chains */ | |
274 | static unsigned long ksm_stable_node_dups; | |
275 | ||
276 | /* Delay in pruning stale stable_node_dups in the stable_node_chains */ | |
584ff0df | 277 | static unsigned int ksm_stable_node_chains_prune_millisecs = 2000; |
2c653d0e AA |
278 | |
279 | /* Maximum number of page slots sharing a stable node */ | |
280 | static int ksm_max_page_sharing = 256; | |
281 | ||
31dbd01f | 282 | /* Number of pages ksmd should scan in one batch */ |
4e5fa4f5 | 283 | static unsigned int ksm_thread_pages_to_scan = DEFAULT_PAGES_TO_SCAN; |
31dbd01f IE |
284 | |
285 | /* Milliseconds ksmd should sleep between batches */ | |
2ffd8679 | 286 | static unsigned int ksm_thread_sleep_millisecs = 20; |
31dbd01f | 287 | |
e86c59b1 CI |
288 | /* Checksum of an empty (zeroed) page */ |
289 | static unsigned int zero_checksum __read_mostly; | |
290 | ||
291 | /* Whether to merge empty (zeroed) pages with actual zero pages */ | |
292 | static bool ksm_use_zero_pages __read_mostly; | |
293 | ||
5e924ff5 SR |
294 | /* Skip pages that couldn't be de-duplicated previously */ |
295 | /* Default to true at least temporarily, for testing */ | |
296 | static bool ksm_smart_scan = true; | |
297 | ||
e2942062 | 298 | /* The number of zero pages which is placed by KSM */ |
c2dc78b8 | 299 | atomic_long_t ksm_zero_pages = ATOMIC_LONG_INIT(0); |
e2942062 | 300 | |
e5a68991 SR |
301 | /* The number of pages that have been skipped due to "smart scanning" */ |
302 | static unsigned long ksm_pages_skipped; | |
303 | ||
4e5fa4f5 SR |
304 | /* Don't scan more than max pages per batch. */ |
305 | static unsigned long ksm_advisor_max_pages_to_scan = 30000; | |
306 | ||
307 | /* Min CPU for scanning pages per scan */ | |
308 | #define KSM_ADVISOR_MIN_CPU 10 | |
309 | ||
310 | /* Max CPU for scanning pages per scan */ | |
311 | static unsigned int ksm_advisor_max_cpu = 70; | |
312 | ||
313 | /* Target scan time in seconds to analyze all KSM candidate pages. */ | |
314 | static unsigned long ksm_advisor_target_scan_time = 200; | |
315 | ||
316 | /* Exponentially weighted moving average. */ | |
317 | #define EWMA_WEIGHT 30 | |
318 | ||
319 | /** | |
320 | * struct advisor_ctx - metadata for KSM advisor | |
321 | * @start_scan: start time of the current scan | |
322 | * @scan_time: scan time of previous scan | |
323 | * @change: change in percent to pages_to_scan parameter | |
324 | * @cpu_time: cpu time consumed by the ksmd thread in the previous scan | |
325 | */ | |
326 | struct advisor_ctx { | |
327 | ktime_t start_scan; | |
328 | unsigned long scan_time; | |
329 | unsigned long change; | |
330 | unsigned long long cpu_time; | |
331 | }; | |
332 | static struct advisor_ctx advisor_ctx; | |
333 | ||
334 | /* Define different advisor's */ | |
335 | enum ksm_advisor_type { | |
336 | KSM_ADVISOR_NONE, | |
337 | KSM_ADVISOR_SCAN_TIME, | |
338 | }; | |
339 | static enum ksm_advisor_type ksm_advisor; | |
340 | ||
66790e9a SR |
341 | #ifdef CONFIG_SYSFS |
342 | /* | |
343 | * Only called through the sysfs control interface: | |
344 | */ | |
345 | ||
346 | /* At least scan this many pages per batch. */ | |
347 | static unsigned long ksm_advisor_min_pages_to_scan = 500; | |
348 | ||
349 | static void set_advisor_defaults(void) | |
350 | { | |
351 | if (ksm_advisor == KSM_ADVISOR_NONE) { | |
352 | ksm_thread_pages_to_scan = DEFAULT_PAGES_TO_SCAN; | |
353 | } else if (ksm_advisor == KSM_ADVISOR_SCAN_TIME) { | |
354 | advisor_ctx = (const struct advisor_ctx){ 0 }; | |
355 | ksm_thread_pages_to_scan = ksm_advisor_min_pages_to_scan; | |
356 | } | |
357 | } | |
358 | #endif /* CONFIG_SYSFS */ | |
359 | ||
4e5fa4f5 SR |
360 | static inline void advisor_start_scan(void) |
361 | { | |
362 | if (ksm_advisor == KSM_ADVISOR_SCAN_TIME) | |
363 | advisor_ctx.start_scan = ktime_get(); | |
364 | } | |
365 | ||
366 | /* | |
367 | * Use previous scan time if available, otherwise use current scan time as an | |
368 | * approximation for the previous scan time. | |
369 | */ | |
370 | static inline unsigned long prev_scan_time(struct advisor_ctx *ctx, | |
371 | unsigned long scan_time) | |
372 | { | |
373 | return ctx->scan_time ? ctx->scan_time : scan_time; | |
374 | } | |
375 | ||
376 | /* Calculate exponential weighted moving average */ | |
377 | static unsigned long ewma(unsigned long prev, unsigned long curr) | |
378 | { | |
379 | return ((100 - EWMA_WEIGHT) * prev + EWMA_WEIGHT * curr) / 100; | |
380 | } | |
381 | ||
382 | /* | |
383 | * The scan time advisor is based on the current scan rate and the target | |
384 | * scan rate. | |
385 | * | |
386 | * new_pages_to_scan = pages_to_scan * (scan_time / target_scan_time) | |
387 | * | |
388 | * To avoid perturbations it calculates a change factor of previous changes. | |
389 | * A new change factor is calculated for each iteration and it uses an | |
390 | * exponentially weighted moving average. The new pages_to_scan value is | |
391 | * multiplied with that change factor: | |
392 | * | |
393 | * new_pages_to_scan *= change facor | |
394 | * | |
395 | * The new_pages_to_scan value is limited by the cpu min and max values. It | |
396 | * calculates the cpu percent for the last scan and calculates the new | |
397 | * estimated cpu percent cost for the next scan. That value is capped by the | |
398 | * cpu min and max setting. | |
399 | * | |
400 | * In addition the new pages_to_scan value is capped by the max and min | |
401 | * limits. | |
402 | */ | |
403 | static void scan_time_advisor(void) | |
404 | { | |
405 | unsigned int cpu_percent; | |
406 | unsigned long cpu_time; | |
407 | unsigned long cpu_time_diff; | |
408 | unsigned long cpu_time_diff_ms; | |
409 | unsigned long pages; | |
410 | unsigned long per_page_cost; | |
411 | unsigned long factor; | |
412 | unsigned long change; | |
413 | unsigned long last_scan_time; | |
414 | unsigned long scan_time; | |
415 | ||
416 | /* Convert scan time to seconds */ | |
417 | scan_time = div_s64(ktime_ms_delta(ktime_get(), advisor_ctx.start_scan), | |
418 | MSEC_PER_SEC); | |
419 | scan_time = scan_time ? scan_time : 1; | |
420 | ||
421 | /* Calculate CPU consumption of ksmd background thread */ | |
422 | cpu_time = task_sched_runtime(current); | |
423 | cpu_time_diff = cpu_time - advisor_ctx.cpu_time; | |
424 | cpu_time_diff_ms = cpu_time_diff / 1000 / 1000; | |
425 | ||
426 | cpu_percent = (cpu_time_diff_ms * 100) / (scan_time * 1000); | |
427 | cpu_percent = cpu_percent ? cpu_percent : 1; | |
428 | last_scan_time = prev_scan_time(&advisor_ctx, scan_time); | |
429 | ||
430 | /* Calculate scan time as percentage of target scan time */ | |
431 | factor = ksm_advisor_target_scan_time * 100 / scan_time; | |
432 | factor = factor ? factor : 1; | |
433 | ||
434 | /* | |
435 | * Calculate scan time as percentage of last scan time and use | |
436 | * exponentially weighted average to smooth it | |
437 | */ | |
438 | change = scan_time * 100 / last_scan_time; | |
439 | change = change ? change : 1; | |
440 | change = ewma(advisor_ctx.change, change); | |
441 | ||
442 | /* Calculate new scan rate based on target scan rate. */ | |
443 | pages = ksm_thread_pages_to_scan * 100 / factor; | |
444 | /* Update pages_to_scan by weighted change percentage. */ | |
445 | pages = pages * change / 100; | |
446 | ||
447 | /* Cap new pages_to_scan value */ | |
448 | per_page_cost = ksm_thread_pages_to_scan / cpu_percent; | |
449 | per_page_cost = per_page_cost ? per_page_cost : 1; | |
450 | ||
451 | pages = min(pages, per_page_cost * ksm_advisor_max_cpu); | |
452 | pages = max(pages, per_page_cost * KSM_ADVISOR_MIN_CPU); | |
453 | pages = min(pages, ksm_advisor_max_pages_to_scan); | |
454 | ||
455 | /* Update advisor context */ | |
456 | advisor_ctx.change = change; | |
457 | advisor_ctx.scan_time = scan_time; | |
458 | advisor_ctx.cpu_time = cpu_time; | |
459 | ||
460 | ksm_thread_pages_to_scan = pages; | |
5088b497 | 461 | trace_ksm_advisor(scan_time, pages, cpu_percent); |
4e5fa4f5 SR |
462 | } |
463 | ||
464 | static void advisor_stop_scan(void) | |
465 | { | |
466 | if (ksm_advisor == KSM_ADVISOR_SCAN_TIME) | |
467 | scan_time_advisor(); | |
468 | } | |
469 | ||
e850dcf5 | 470 | #ifdef CONFIG_NUMA |
90bd6fd3 PH |
471 | /* Zeroed when merging across nodes is not allowed */ |
472 | static unsigned int ksm_merge_across_nodes = 1; | |
ef53d16c | 473 | static int ksm_nr_node_ids = 1; |
e850dcf5 HD |
474 | #else |
475 | #define ksm_merge_across_nodes 1U | |
ef53d16c | 476 | #define ksm_nr_node_ids 1 |
e850dcf5 | 477 | #endif |
90bd6fd3 | 478 | |
31dbd01f IE |
479 | #define KSM_RUN_STOP 0 |
480 | #define KSM_RUN_MERGE 1 | |
481 | #define KSM_RUN_UNMERGE 2 | |
ef4d43a8 HD |
482 | #define KSM_RUN_OFFLINE 4 |
483 | static unsigned long ksm_run = KSM_RUN_STOP; | |
484 | static void wait_while_offlining(void); | |
31dbd01f IE |
485 | |
486 | static DECLARE_WAIT_QUEUE_HEAD(ksm_thread_wait); | |
fcf9a0ef | 487 | static DECLARE_WAIT_QUEUE_HEAD(ksm_iter_wait); |
31dbd01f IE |
488 | static DEFINE_MUTEX(ksm_thread_mutex); |
489 | static DEFINE_SPINLOCK(ksm_mmlist_lock); | |
490 | ||
21fbd591 | 491 | #define KSM_KMEM_CACHE(__struct, __flags) kmem_cache_create(#__struct,\ |
31dbd01f IE |
492 | sizeof(struct __struct), __alignof__(struct __struct),\ |
493 | (__flags), NULL) | |
494 | ||
495 | static int __init ksm_slab_init(void) | |
496 | { | |
21fbd591 | 497 | rmap_item_cache = KSM_KMEM_CACHE(ksm_rmap_item, 0); |
31dbd01f IE |
498 | if (!rmap_item_cache) |
499 | goto out; | |
500 | ||
21fbd591 | 501 | stable_node_cache = KSM_KMEM_CACHE(ksm_stable_node, 0); |
7b6ba2c7 HD |
502 | if (!stable_node_cache) |
503 | goto out_free1; | |
504 | ||
21fbd591 | 505 | mm_slot_cache = KSM_KMEM_CACHE(ksm_mm_slot, 0); |
31dbd01f | 506 | if (!mm_slot_cache) |
7b6ba2c7 | 507 | goto out_free2; |
31dbd01f IE |
508 | |
509 | return 0; | |
510 | ||
7b6ba2c7 HD |
511 | out_free2: |
512 | kmem_cache_destroy(stable_node_cache); | |
513 | out_free1: | |
31dbd01f IE |
514 | kmem_cache_destroy(rmap_item_cache); |
515 | out: | |
516 | return -ENOMEM; | |
517 | } | |
518 | ||
519 | static void __init ksm_slab_free(void) | |
520 | { | |
521 | kmem_cache_destroy(mm_slot_cache); | |
7b6ba2c7 | 522 | kmem_cache_destroy(stable_node_cache); |
31dbd01f IE |
523 | kmem_cache_destroy(rmap_item_cache); |
524 | mm_slot_cache = NULL; | |
525 | } | |
526 | ||
21fbd591 | 527 | static __always_inline bool is_stable_node_chain(struct ksm_stable_node *chain) |
2c653d0e AA |
528 | { |
529 | return chain->rmap_hlist_len == STABLE_NODE_CHAIN; | |
530 | } | |
531 | ||
21fbd591 | 532 | static __always_inline bool is_stable_node_dup(struct ksm_stable_node *dup) |
2c653d0e AA |
533 | { |
534 | return dup->head == STABLE_NODE_DUP_HEAD; | |
535 | } | |
536 | ||
21fbd591 QZ |
537 | static inline void stable_node_chain_add_dup(struct ksm_stable_node *dup, |
538 | struct ksm_stable_node *chain) | |
2c653d0e AA |
539 | { |
540 | VM_BUG_ON(is_stable_node_dup(dup)); | |
541 | dup->head = STABLE_NODE_DUP_HEAD; | |
542 | VM_BUG_ON(!is_stable_node_chain(chain)); | |
543 | hlist_add_head(&dup->hlist_dup, &chain->hlist); | |
544 | ksm_stable_node_dups++; | |
545 | } | |
546 | ||
21fbd591 | 547 | static inline void __stable_node_dup_del(struct ksm_stable_node *dup) |
2c653d0e | 548 | { |
b4fecc67 | 549 | VM_BUG_ON(!is_stable_node_dup(dup)); |
2c653d0e AA |
550 | hlist_del(&dup->hlist_dup); |
551 | ksm_stable_node_dups--; | |
552 | } | |
553 | ||
21fbd591 | 554 | static inline void stable_node_dup_del(struct ksm_stable_node *dup) |
2c653d0e AA |
555 | { |
556 | VM_BUG_ON(is_stable_node_chain(dup)); | |
557 | if (is_stable_node_dup(dup)) | |
558 | __stable_node_dup_del(dup); | |
559 | else | |
560 | rb_erase(&dup->node, root_stable_tree + NUMA(dup->nid)); | |
561 | #ifdef CONFIG_DEBUG_VM | |
562 | dup->head = NULL; | |
563 | #endif | |
564 | } | |
565 | ||
21fbd591 | 566 | static inline struct ksm_rmap_item *alloc_rmap_item(void) |
31dbd01f | 567 | { |
21fbd591 | 568 | struct ksm_rmap_item *rmap_item; |
473b0ce4 | 569 | |
5b398e41 | 570 | rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL | |
571 | __GFP_NORETRY | __GFP_NOWARN); | |
473b0ce4 HD |
572 | if (rmap_item) |
573 | ksm_rmap_items++; | |
574 | return rmap_item; | |
31dbd01f IE |
575 | } |
576 | ||
21fbd591 | 577 | static inline void free_rmap_item(struct ksm_rmap_item *rmap_item) |
31dbd01f | 578 | { |
473b0ce4 | 579 | ksm_rmap_items--; |
cb4df4ca | 580 | rmap_item->mm->ksm_rmap_items--; |
31dbd01f IE |
581 | rmap_item->mm = NULL; /* debug safety */ |
582 | kmem_cache_free(rmap_item_cache, rmap_item); | |
583 | } | |
584 | ||
21fbd591 | 585 | static inline struct ksm_stable_node *alloc_stable_node(void) |
7b6ba2c7 | 586 | { |
6213055f | 587 | /* |
588 | * The allocation can take too long with GFP_KERNEL when memory is under | |
589 | * pressure, which may lead to hung task warnings. Adding __GFP_HIGH | |
590 | * grants access to memory reserves, helping to avoid this problem. | |
591 | */ | |
592 | return kmem_cache_alloc(stable_node_cache, GFP_KERNEL | __GFP_HIGH); | |
7b6ba2c7 HD |
593 | } |
594 | ||
21fbd591 | 595 | static inline void free_stable_node(struct ksm_stable_node *stable_node) |
7b6ba2c7 | 596 | { |
2c653d0e AA |
597 | VM_BUG_ON(stable_node->rmap_hlist_len && |
598 | !is_stable_node_chain(stable_node)); | |
7b6ba2c7 HD |
599 | kmem_cache_free(stable_node_cache, stable_node); |
600 | } | |
601 | ||
a913e182 HD |
602 | /* |
603 | * ksmd, and unmerge_and_remove_all_rmap_items(), must not touch an mm's | |
604 | * page tables after it has passed through ksm_exit() - which, if necessary, | |
c1e8d7c6 | 605 | * takes mmap_lock briefly to serialize against them. ksm_exit() does not set |
a913e182 HD |
606 | * a special flag: they can just back out as soon as mm_users goes to zero. |
607 | * ksm_test_exit() is used throughout to make this test for exit: in some | |
608 | * places for correctness, in some places just to avoid unnecessary work. | |
609 | */ | |
610 | static inline bool ksm_test_exit(struct mm_struct *mm) | |
611 | { | |
612 | return atomic_read(&mm->mm_users) == 0; | |
613 | } | |
614 | ||
d7c0e68d DH |
615 | static int break_ksm_pmd_entry(pmd_t *pmd, unsigned long addr, unsigned long next, |
616 | struct mm_walk *walk) | |
617 | { | |
618 | struct page *page = NULL; | |
619 | spinlock_t *ptl; | |
620 | pte_t *pte; | |
c33c7948 | 621 | pte_t ptent; |
d7c0e68d DH |
622 | int ret; |
623 | ||
d7c0e68d | 624 | pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); |
04dee9e8 HD |
625 | if (!pte) |
626 | return 0; | |
c33c7948 RR |
627 | ptent = ptep_get(pte); |
628 | if (pte_present(ptent)) { | |
629 | page = vm_normal_page(walk->vma, addr, ptent); | |
630 | } else if (!pte_none(ptent)) { | |
631 | swp_entry_t entry = pte_to_swp_entry(ptent); | |
d7c0e68d DH |
632 | |
633 | /* | |
634 | * As KSM pages remain KSM pages until freed, no need to wait | |
635 | * here for migration to end. | |
636 | */ | |
637 | if (is_migration_entry(entry)) | |
638 | page = pfn_swap_entry_to_page(entry); | |
639 | } | |
79271476 | 640 | /* return 1 if the page is an normal ksm page or KSM-placed zero page */ |
afccb080 | 641 | ret = (page && PageKsm(page)) || is_ksm_zero_pte(ptent); |
d7c0e68d DH |
642 | pte_unmap_unlock(pte, ptl); |
643 | return ret; | |
644 | } | |
645 | ||
646 | static const struct mm_walk_ops break_ksm_ops = { | |
647 | .pmd_entry = break_ksm_pmd_entry, | |
49b06385 SB |
648 | .walk_lock = PGWALK_RDLOCK, |
649 | }; | |
650 | ||
651 | static const struct mm_walk_ops break_ksm_lock_vma_ops = { | |
652 | .pmd_entry = break_ksm_pmd_entry, | |
653 | .walk_lock = PGWALK_WRLOCK, | |
d7c0e68d DH |
654 | }; |
655 | ||
31dbd01f | 656 | /* |
6cce3314 DH |
657 | * We use break_ksm to break COW on a ksm page by triggering unsharing, |
658 | * such that the ksm page will get replaced by an exclusive anonymous page. | |
31dbd01f | 659 | * |
6cce3314 | 660 | * We take great care only to touch a ksm page, in a VM_MERGEABLE vma, |
31dbd01f IE |
661 | * in case the application has unmapped and remapped mm,addr meanwhile. |
662 | * Could a ksm page appear anywhere else? Actually yes, in a VM_PFNMAP | |
bbcd53c9 | 663 | * mmap of /dev/mem, where we would not want to touch it. |
1b2ee126 | 664 | * |
6cce3314 | 665 | * FAULT_FLAG_REMOTE/FOLL_REMOTE are because we do this outside the context |
1b2ee126 DH |
666 | * of the process that owns 'vma'. We also do not want to enforce |
667 | * protection keys here anyway. | |
31dbd01f | 668 | */ |
49b06385 | 669 | static int break_ksm(struct vm_area_struct *vma, unsigned long addr, bool lock_vma) |
31dbd01f | 670 | { |
50a7ca3c | 671 | vm_fault_t ret = 0; |
49b06385 SB |
672 | const struct mm_walk_ops *ops = lock_vma ? |
673 | &break_ksm_lock_vma_ops : &break_ksm_ops; | |
31dbd01f IE |
674 | |
675 | do { | |
d7c0e68d | 676 | int ksm_page; |
58f595c6 | 677 | |
31dbd01f | 678 | cond_resched(); |
49b06385 | 679 | ksm_page = walk_page_range_vma(vma, addr, addr + 1, ops, NULL); |
d7c0e68d DH |
680 | if (WARN_ON_ONCE(ksm_page < 0)) |
681 | return ksm_page; | |
58f595c6 DH |
682 | if (!ksm_page) |
683 | return 0; | |
684 | ret = handle_mm_fault(vma, addr, | |
6cce3314 | 685 | FAULT_FLAG_UNSHARE | FAULT_FLAG_REMOTE, |
58f595c6 DH |
686 | NULL); |
687 | } while (!(ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | VM_FAULT_OOM))); | |
d952b791 | 688 | /* |
58f595c6 DH |
689 | * We must loop until we no longer find a KSM page because |
690 | * handle_mm_fault() may back out if there's any difficulty e.g. if | |
691 | * pte accessed bit gets updated concurrently. | |
d952b791 HD |
692 | * |
693 | * VM_FAULT_SIGBUS could occur if we race with truncation of the | |
694 | * backing file, which also invalidates anonymous pages: that's | |
695 | * okay, that truncation will have unmapped the PageKsm for us. | |
696 | * | |
697 | * VM_FAULT_OOM: at the time of writing (late July 2009), setting | |
698 | * aside mem_cgroup limits, VM_FAULT_OOM would only be set if the | |
699 | * current task has TIF_MEMDIE set, and will be OOM killed on return | |
700 | * to user; and ksmd, having no mm, would never be chosen for that. | |
701 | * | |
702 | * But if the mm is in a limited mem_cgroup, then the fault may fail | |
703 | * with VM_FAULT_OOM even if the current task is not TIF_MEMDIE; and | |
704 | * even ksmd can fail in this way - though it's usually breaking ksm | |
705 | * just to undo a merge it made a moment before, so unlikely to oom. | |
706 | * | |
707 | * That's a pity: we might therefore have more kernel pages allocated | |
708 | * than we're counting as nodes in the stable tree; but ksm_do_scan | |
709 | * will retry to break_cow on each pass, so should recover the page | |
710 | * in due course. The important thing is to not let VM_MERGEABLE | |
711 | * be cleared while any such pages might remain in the area. | |
712 | */ | |
713 | return (ret & VM_FAULT_OOM) ? -ENOMEM : 0; | |
31dbd01f IE |
714 | } |
715 | ||
d7597f59 SR |
716 | static bool vma_ksm_compatible(struct vm_area_struct *vma) |
717 | { | |
718 | if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE | VM_PFNMAP | | |
719 | VM_IO | VM_DONTEXPAND | VM_HUGETLB | | |
720 | VM_MIXEDMAP)) | |
721 | return false; /* just ignore the advice */ | |
722 | ||
723 | if (vma_is_dax(vma)) | |
724 | return false; | |
725 | ||
726 | #ifdef VM_SAO | |
727 | if (vma->vm_flags & VM_SAO) | |
728 | return false; | |
729 | #endif | |
730 | #ifdef VM_SPARC_ADI | |
731 | if (vma->vm_flags & VM_SPARC_ADI) | |
732 | return false; | |
733 | #endif | |
734 | ||
735 | return true; | |
736 | } | |
737 | ||
ef694222 BL |
738 | static struct vm_area_struct *find_mergeable_vma(struct mm_struct *mm, |
739 | unsigned long addr) | |
740 | { | |
741 | struct vm_area_struct *vma; | |
742 | if (ksm_test_exit(mm)) | |
743 | return NULL; | |
ff69fb81 LH |
744 | vma = vma_lookup(mm, addr); |
745 | if (!vma || !(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) | |
ef694222 BL |
746 | return NULL; |
747 | return vma; | |
748 | } | |
749 | ||
21fbd591 | 750 | static void break_cow(struct ksm_rmap_item *rmap_item) |
31dbd01f | 751 | { |
8dd3557a HD |
752 | struct mm_struct *mm = rmap_item->mm; |
753 | unsigned long addr = rmap_item->address; | |
31dbd01f IE |
754 | struct vm_area_struct *vma; |
755 | ||
4035c07a HD |
756 | /* |
757 | * It is not an accident that whenever we want to break COW | |
758 | * to undo, we also need to drop a reference to the anon_vma. | |
759 | */ | |
9e60109f | 760 | put_anon_vma(rmap_item->anon_vma); |
4035c07a | 761 | |
d8ed45c5 | 762 | mmap_read_lock(mm); |
ef694222 BL |
763 | vma = find_mergeable_vma(mm, addr); |
764 | if (vma) | |
49b06385 | 765 | break_ksm(vma, addr, false); |
d8ed45c5 | 766 | mmap_read_unlock(mm); |
31dbd01f IE |
767 | } |
768 | ||
21fbd591 | 769 | static struct page *get_mergeable_page(struct ksm_rmap_item *rmap_item) |
31dbd01f IE |
770 | { |
771 | struct mm_struct *mm = rmap_item->mm; | |
772 | unsigned long addr = rmap_item->address; | |
773 | struct vm_area_struct *vma; | |
774 | struct page *page; | |
775 | ||
d8ed45c5 | 776 | mmap_read_lock(mm); |
ef694222 BL |
777 | vma = find_mergeable_vma(mm, addr); |
778 | if (!vma) | |
31dbd01f IE |
779 | goto out; |
780 | ||
781 | page = follow_page(vma, addr, FOLL_GET); | |
f7091ed6 | 782 | if (IS_ERR_OR_NULL(page)) |
31dbd01f | 783 | goto out; |
f7091ed6 HW |
784 | if (is_zone_device_page(page)) |
785 | goto out_putpage; | |
f765f540 | 786 | if (PageAnon(page)) { |
31dbd01f IE |
787 | flush_anon_page(vma, page, addr); |
788 | flush_dcache_page(page); | |
789 | } else { | |
f7091ed6 | 790 | out_putpage: |
31dbd01f | 791 | put_page(page); |
c8f95ed1 AA |
792 | out: |
793 | page = NULL; | |
31dbd01f | 794 | } |
d8ed45c5 | 795 | mmap_read_unlock(mm); |
31dbd01f IE |
796 | return page; |
797 | } | |
798 | ||
90bd6fd3 PH |
799 | /* |
800 | * This helper is used for getting right index into array of tree roots. | |
801 | * When merge_across_nodes knob is set to 1, there are only two rb-trees for | |
802 | * stable and unstable pages from all nodes with roots in index 0. Otherwise, | |
803 | * every node has its own stable and unstable tree. | |
804 | */ | |
805 | static inline int get_kpfn_nid(unsigned long kpfn) | |
806 | { | |
d8fc16a8 | 807 | return ksm_merge_across_nodes ? 0 : NUMA(pfn_to_nid(kpfn)); |
90bd6fd3 PH |
808 | } |
809 | ||
21fbd591 | 810 | static struct ksm_stable_node *alloc_stable_node_chain(struct ksm_stable_node *dup, |
2c653d0e AA |
811 | struct rb_root *root) |
812 | { | |
21fbd591 | 813 | struct ksm_stable_node *chain = alloc_stable_node(); |
2c653d0e AA |
814 | VM_BUG_ON(is_stable_node_chain(dup)); |
815 | if (likely(chain)) { | |
816 | INIT_HLIST_HEAD(&chain->hlist); | |
817 | chain->chain_prune_time = jiffies; | |
818 | chain->rmap_hlist_len = STABLE_NODE_CHAIN; | |
819 | #if defined (CONFIG_DEBUG_VM) && defined(CONFIG_NUMA) | |
98fa15f3 | 820 | chain->nid = NUMA_NO_NODE; /* debug */ |
2c653d0e AA |
821 | #endif |
822 | ksm_stable_node_chains++; | |
823 | ||
824 | /* | |
825 | * Put the stable node chain in the first dimension of | |
826 | * the stable tree and at the same time remove the old | |
827 | * stable node. | |
828 | */ | |
829 | rb_replace_node(&dup->node, &chain->node, root); | |
830 | ||
831 | /* | |
832 | * Move the old stable node to the second dimension | |
833 | * queued in the hlist_dup. The invariant is that all | |
834 | * dup stable_nodes in the chain->hlist point to pages | |
457aef94 | 835 | * that are write protected and have the exact same |
2c653d0e AA |
836 | * content. |
837 | */ | |
838 | stable_node_chain_add_dup(dup, chain); | |
839 | } | |
840 | return chain; | |
841 | } | |
842 | ||
21fbd591 | 843 | static inline void free_stable_node_chain(struct ksm_stable_node *chain, |
2c653d0e AA |
844 | struct rb_root *root) |
845 | { | |
846 | rb_erase(&chain->node, root); | |
847 | free_stable_node(chain); | |
848 | ksm_stable_node_chains--; | |
849 | } | |
850 | ||
21fbd591 | 851 | static void remove_node_from_stable_tree(struct ksm_stable_node *stable_node) |
4035c07a | 852 | { |
21fbd591 | 853 | struct ksm_rmap_item *rmap_item; |
4035c07a | 854 | |
2c653d0e AA |
855 | /* check it's not STABLE_NODE_CHAIN or negative */ |
856 | BUG_ON(stable_node->rmap_hlist_len < 0); | |
857 | ||
b67bfe0d | 858 | hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) { |
739100c8 | 859 | if (rmap_item->hlist.next) { |
4035c07a | 860 | ksm_pages_sharing--; |
739100c8 SR |
861 | trace_ksm_remove_rmap_item(stable_node->kpfn, rmap_item, rmap_item->mm); |
862 | } else { | |
4035c07a | 863 | ksm_pages_shared--; |
739100c8 | 864 | } |
76093853 | 865 | |
866 | rmap_item->mm->ksm_merging_pages--; | |
867 | ||
2c653d0e AA |
868 | VM_BUG_ON(stable_node->rmap_hlist_len <= 0); |
869 | stable_node->rmap_hlist_len--; | |
9e60109f | 870 | put_anon_vma(rmap_item->anon_vma); |
4035c07a HD |
871 | rmap_item->address &= PAGE_MASK; |
872 | cond_resched(); | |
873 | } | |
874 | ||
2c653d0e AA |
875 | /* |
876 | * We need the second aligned pointer of the migrate_nodes | |
877 | * list_head to stay clear from the rb_parent_color union | |
878 | * (aligned and different than any node) and also different | |
879 | * from &migrate_nodes. This will verify that future list.h changes | |
815f0ddb | 880 | * don't break STABLE_NODE_DUP_HEAD. Only recent gcc can handle it. |
2c653d0e | 881 | */ |
2c653d0e AA |
882 | BUILD_BUG_ON(STABLE_NODE_DUP_HEAD <= &migrate_nodes); |
883 | BUILD_BUG_ON(STABLE_NODE_DUP_HEAD >= &migrate_nodes + 1); | |
2c653d0e | 884 | |
739100c8 | 885 | trace_ksm_remove_ksm_page(stable_node->kpfn); |
4146d2d6 HD |
886 | if (stable_node->head == &migrate_nodes) |
887 | list_del(&stable_node->list); | |
888 | else | |
2c653d0e | 889 | stable_node_dup_del(stable_node); |
4035c07a HD |
890 | free_stable_node(stable_node); |
891 | } | |
892 | ||
85b67b01 DH |
893 | enum ksm_get_folio_flags { |
894 | KSM_GET_FOLIO_NOLOCK, | |
895 | KSM_GET_FOLIO_LOCK, | |
896 | KSM_GET_FOLIO_TRYLOCK | |
2cee57d1 YS |
897 | }; |
898 | ||
4035c07a | 899 | /* |
b91f9472 | 900 | * ksm_get_folio: checks if the page indicated by the stable node |
4035c07a HD |
901 | * is still its ksm page, despite having held no reference to it. |
902 | * In which case we can trust the content of the page, and it | |
903 | * returns the gotten page; but if the page has now been zapped, | |
904 | * remove the stale node from the stable tree and return NULL. | |
c8d6553b | 905 | * But beware, the stable node's page might be being migrated. |
4035c07a HD |
906 | * |
907 | * You would expect the stable_node to hold a reference to the ksm page. | |
908 | * But if it increments the page's count, swapping out has to wait for | |
909 | * ksmd to come around again before it can free the page, which may take | |
910 | * seconds or even minutes: much too unresponsive. So instead we use a | |
911 | * "keyhole reference": access to the ksm page from the stable node peeps | |
912 | * out through its keyhole to see if that page still holds the right key, | |
913 | * pointing back to this stable node. This relies on freeing a PageAnon | |
914 | * page to reset its page->mapping to NULL, and relies on no other use of | |
915 | * a page to put something that might look like our key in page->mapping. | |
4035c07a HD |
916 | * is on its way to being freed; but it is an anomaly to bear in mind. |
917 | */ | |
b91f9472 | 918 | static struct folio *ksm_get_folio(struct ksm_stable_node *stable_node, |
85b67b01 | 919 | enum ksm_get_folio_flags flags) |
4035c07a | 920 | { |
b91f9472 | 921 | struct folio *folio; |
4035c07a | 922 | void *expected_mapping; |
c8d6553b | 923 | unsigned long kpfn; |
4035c07a | 924 | |
bda807d4 MK |
925 | expected_mapping = (void *)((unsigned long)stable_node | |
926 | PAGE_MAPPING_KSM); | |
c8d6553b | 927 | again: |
08df4774 | 928 | kpfn = READ_ONCE(stable_node->kpfn); /* Address dependency. */ |
b91f9472 AS |
929 | folio = pfn_folio(kpfn); |
930 | if (READ_ONCE(folio->mapping) != expected_mapping) | |
4035c07a | 931 | goto stale; |
c8d6553b HD |
932 | |
933 | /* | |
934 | * We cannot do anything with the page while its refcount is 0. | |
935 | * Usually 0 means free, or tail of a higher-order page: in which | |
936 | * case this node is no longer referenced, and should be freed; | |
1c4c3b99 | 937 | * however, it might mean that the page is under page_ref_freeze(). |
c8d6553b | 938 | * The __remove_mapping() case is easy, again the node is now stale; |
52d1e606 | 939 | * the same is in reuse_ksm_page() case; but if page is swapcache |
9800562f | 940 | * in folio_migrate_mapping(), it might still be our page, |
52d1e606 | 941 | * in which case it's essential to keep the node. |
c8d6553b | 942 | */ |
b91f9472 | 943 | while (!folio_try_get(folio)) { |
c8d6553b HD |
944 | /* |
945 | * Another check for page->mapping != expected_mapping would | |
946 | * work here too. We have chosen the !PageSwapCache test to | |
947 | * optimize the common case, when the page is or is about to | |
948 | * be freed: PageSwapCache is cleared (under spin_lock_irq) | |
1c4c3b99 | 949 | * in the ref_freeze section of __remove_mapping(); but Anon |
b91f9472 | 950 | * folio->mapping reset to NULL later, in free_pages_prepare(). |
c8d6553b | 951 | */ |
b91f9472 | 952 | if (!folio_test_swapcache(folio)) |
c8d6553b HD |
953 | goto stale; |
954 | cpu_relax(); | |
955 | } | |
956 | ||
b91f9472 AS |
957 | if (READ_ONCE(folio->mapping) != expected_mapping) { |
958 | folio_put(folio); | |
4035c07a HD |
959 | goto stale; |
960 | } | |
c8d6553b | 961 | |
85b67b01 | 962 | if (flags == KSM_GET_FOLIO_TRYLOCK) { |
b91f9472 AS |
963 | if (!folio_trylock(folio)) { |
964 | folio_put(folio); | |
2cee57d1 YS |
965 | return ERR_PTR(-EBUSY); |
966 | } | |
85b67b01 | 967 | } else if (flags == KSM_GET_FOLIO_LOCK) |
b91f9472 | 968 | folio_lock(folio); |
2cee57d1 | 969 | |
85b67b01 | 970 | if (flags != KSM_GET_FOLIO_NOLOCK) { |
b91f9472 AS |
971 | if (READ_ONCE(folio->mapping) != expected_mapping) { |
972 | folio_unlock(folio); | |
973 | folio_put(folio); | |
8aafa6a4 HD |
974 | goto stale; |
975 | } | |
976 | } | |
b91f9472 | 977 | return folio; |
c8d6553b | 978 | |
4035c07a | 979 | stale: |
c8d6553b HD |
980 | /* |
981 | * We come here from above when page->mapping or !PageSwapCache | |
982 | * suggests that the node is stale; but it might be under migration. | |
19138349 | 983 | * We need smp_rmb(), matching the smp_wmb() in folio_migrate_ksm(), |
c8d6553b HD |
984 | * before checking whether node->kpfn has been changed. |
985 | */ | |
986 | smp_rmb(); | |
4db0c3c2 | 987 | if (READ_ONCE(stable_node->kpfn) != kpfn) |
c8d6553b | 988 | goto again; |
4035c07a HD |
989 | remove_node_from_stable_tree(stable_node); |
990 | return NULL; | |
991 | } | |
992 | ||
31dbd01f IE |
993 | /* |
994 | * Removing rmap_item from stable or unstable tree. | |
995 | * This function will clean the information from the stable/unstable tree. | |
996 | */ | |
21fbd591 | 997 | static void remove_rmap_item_from_tree(struct ksm_rmap_item *rmap_item) |
31dbd01f | 998 | { |
7b6ba2c7 | 999 | if (rmap_item->address & STABLE_FLAG) { |
21fbd591 | 1000 | struct ksm_stable_node *stable_node; |
f39b6e2d | 1001 | struct folio *folio; |
31dbd01f | 1002 | |
7b6ba2c7 | 1003 | stable_node = rmap_item->head; |
85b67b01 | 1004 | folio = ksm_get_folio(stable_node, KSM_GET_FOLIO_LOCK); |
f39b6e2d | 1005 | if (!folio) |
4035c07a | 1006 | goto out; |
5ad64688 | 1007 | |
7b6ba2c7 | 1008 | hlist_del(&rmap_item->hlist); |
f39b6e2d AS |
1009 | folio_unlock(folio); |
1010 | folio_put(folio); | |
08beca44 | 1011 | |
98666f8a | 1012 | if (!hlist_empty(&stable_node->hlist)) |
4035c07a HD |
1013 | ksm_pages_sharing--; |
1014 | else | |
7b6ba2c7 | 1015 | ksm_pages_shared--; |
76093853 | 1016 | |
1017 | rmap_item->mm->ksm_merging_pages--; | |
1018 | ||
2c653d0e AA |
1019 | VM_BUG_ON(stable_node->rmap_hlist_len <= 0); |
1020 | stable_node->rmap_hlist_len--; | |
31dbd01f | 1021 | |
9e60109f | 1022 | put_anon_vma(rmap_item->anon_vma); |
c89a384e | 1023 | rmap_item->head = NULL; |
93d17715 | 1024 | rmap_item->address &= PAGE_MASK; |
31dbd01f | 1025 | |
7b6ba2c7 | 1026 | } else if (rmap_item->address & UNSTABLE_FLAG) { |
31dbd01f IE |
1027 | unsigned char age; |
1028 | /* | |
9ba69294 | 1029 | * Usually ksmd can and must skip the rb_erase, because |
31dbd01f | 1030 | * root_unstable_tree was already reset to RB_ROOT. |
9ba69294 HD |
1031 | * But be careful when an mm is exiting: do the rb_erase |
1032 | * if this rmap_item was inserted by this scan, rather | |
1033 | * than left over from before. | |
31dbd01f IE |
1034 | */ |
1035 | age = (unsigned char)(ksm_scan.seqnr - rmap_item->address); | |
cd551f97 | 1036 | BUG_ON(age > 1); |
31dbd01f | 1037 | if (!age) |
90bd6fd3 | 1038 | rb_erase(&rmap_item->node, |
ef53d16c | 1039 | root_unstable_tree + NUMA(rmap_item->nid)); |
473b0ce4 | 1040 | ksm_pages_unshared--; |
93d17715 | 1041 | rmap_item->address &= PAGE_MASK; |
31dbd01f | 1042 | } |
4035c07a | 1043 | out: |
31dbd01f IE |
1044 | cond_resched(); /* we're called from many long loops */ |
1045 | } | |
1046 | ||
21fbd591 | 1047 | static void remove_trailing_rmap_items(struct ksm_rmap_item **rmap_list) |
31dbd01f | 1048 | { |
6514d511 | 1049 | while (*rmap_list) { |
21fbd591 | 1050 | struct ksm_rmap_item *rmap_item = *rmap_list; |
6514d511 | 1051 | *rmap_list = rmap_item->rmap_list; |
31dbd01f | 1052 | remove_rmap_item_from_tree(rmap_item); |
31dbd01f IE |
1053 | free_rmap_item(rmap_item); |
1054 | } | |
1055 | } | |
1056 | ||
1057 | /* | |
e850dcf5 | 1058 | * Though it's very tempting to unmerge rmap_items from stable tree rather |
31dbd01f IE |
1059 | * than check every pte of a given vma, the locking doesn't quite work for |
1060 | * that - an rmap_item is assigned to the stable tree after inserting ksm | |
c1e8d7c6 | 1061 | * page and upping mmap_lock. Nor does it fit with the way we skip dup'ing |
31dbd01f IE |
1062 | * rmap_items from parent to child at fork time (so as not to waste time |
1063 | * if exit comes before the next scan reaches it). | |
81464e30 HD |
1064 | * |
1065 | * Similarly, although we'd like to remove rmap_items (so updating counts | |
1066 | * and freeing memory) when unmerging an area, it's easier to leave that | |
1067 | * to the next pass of ksmd - consider, for example, how ksmd might be | |
1068 | * in cmp_and_merge_page on one of the rmap_items we would be removing. | |
31dbd01f | 1069 | */ |
d952b791 | 1070 | static int unmerge_ksm_pages(struct vm_area_struct *vma, |
49b06385 | 1071 | unsigned long start, unsigned long end, bool lock_vma) |
31dbd01f IE |
1072 | { |
1073 | unsigned long addr; | |
d952b791 | 1074 | int err = 0; |
31dbd01f | 1075 | |
d952b791 | 1076 | for (addr = start; addr < end && !err; addr += PAGE_SIZE) { |
9ba69294 HD |
1077 | if (ksm_test_exit(vma->vm_mm)) |
1078 | break; | |
d952b791 HD |
1079 | if (signal_pending(current)) |
1080 | err = -ERESTARTSYS; | |
1081 | else | |
49b06385 | 1082 | err = break_ksm(vma, addr, lock_vma); |
d952b791 HD |
1083 | } |
1084 | return err; | |
31dbd01f IE |
1085 | } |
1086 | ||
21fbd591 | 1087 | static inline struct ksm_stable_node *folio_stable_node(struct folio *folio) |
19138349 MWO |
1088 | { |
1089 | return folio_test_ksm(folio) ? folio_raw_mapping(folio) : NULL; | |
1090 | } | |
1091 | ||
21fbd591 | 1092 | static inline struct ksm_stable_node *page_stable_node(struct page *page) |
88484826 | 1093 | { |
19138349 | 1094 | return folio_stable_node(page_folio(page)); |
88484826 MR |
1095 | } |
1096 | ||
b8b0ff24 AS |
1097 | static inline void folio_set_stable_node(struct folio *folio, |
1098 | struct ksm_stable_node *stable_node) | |
88484826 | 1099 | { |
452e862f AS |
1100 | VM_WARN_ON_FOLIO(folio_test_anon(folio) && PageAnonExclusive(&folio->page), folio); |
1101 | folio->mapping = (void *)((unsigned long)stable_node | PAGE_MAPPING_KSM); | |
88484826 MR |
1102 | } |
1103 | ||
2ffd8679 HD |
1104 | #ifdef CONFIG_SYSFS |
1105 | /* | |
1106 | * Only called through the sysfs control interface: | |
1107 | */ | |
21fbd591 | 1108 | static int remove_stable_node(struct ksm_stable_node *stable_node) |
cbf86cfe | 1109 | { |
9d5cc140 | 1110 | struct folio *folio; |
cbf86cfe HD |
1111 | int err; |
1112 | ||
85b67b01 | 1113 | folio = ksm_get_folio(stable_node, KSM_GET_FOLIO_LOCK); |
9d5cc140 | 1114 | if (!folio) { |
cbf86cfe | 1115 | /* |
9d5cc140 | 1116 | * ksm_get_folio did remove_node_from_stable_tree itself. |
cbf86cfe HD |
1117 | */ |
1118 | return 0; | |
1119 | } | |
1120 | ||
9a63236f AR |
1121 | /* |
1122 | * Page could be still mapped if this races with __mmput() running in | |
1123 | * between ksm_exit() and exit_mmap(). Just refuse to let | |
1124 | * merge_across_nodes/max_page_sharing be switched. | |
1125 | */ | |
1126 | err = -EBUSY; | |
9d5cc140 | 1127 | if (!folio_mapped(folio)) { |
cbf86cfe | 1128 | /* |
9d5cc140 AS |
1129 | * The stable node did not yet appear stale to ksm_get_folio(), |
1130 | * since that allows for an unmapped ksm folio to be recognized | |
8fdb3dbf | 1131 | * right up until it is freed; but the node is safe to remove. |
9d5cc140 AS |
1132 | * This folio might be in an LRU cache waiting to be freed, |
1133 | * or it might be in the swapcache (perhaps under writeback), | |
cbf86cfe HD |
1134 | * or it might have been removed from swapcache a moment ago. |
1135 | */ | |
9d5cc140 | 1136 | folio_set_stable_node(folio, NULL); |
cbf86cfe HD |
1137 | remove_node_from_stable_tree(stable_node); |
1138 | err = 0; | |
1139 | } | |
1140 | ||
9d5cc140 AS |
1141 | folio_unlock(folio); |
1142 | folio_put(folio); | |
cbf86cfe HD |
1143 | return err; |
1144 | } | |
1145 | ||
21fbd591 | 1146 | static int remove_stable_node_chain(struct ksm_stable_node *stable_node, |
2c653d0e AA |
1147 | struct rb_root *root) |
1148 | { | |
21fbd591 | 1149 | struct ksm_stable_node *dup; |
2c653d0e AA |
1150 | struct hlist_node *hlist_safe; |
1151 | ||
1152 | if (!is_stable_node_chain(stable_node)) { | |
1153 | VM_BUG_ON(is_stable_node_dup(stable_node)); | |
1154 | if (remove_stable_node(stable_node)) | |
1155 | return true; | |
1156 | else | |
1157 | return false; | |
1158 | } | |
1159 | ||
1160 | hlist_for_each_entry_safe(dup, hlist_safe, | |
1161 | &stable_node->hlist, hlist_dup) { | |
1162 | VM_BUG_ON(!is_stable_node_dup(dup)); | |
1163 | if (remove_stable_node(dup)) | |
1164 | return true; | |
1165 | } | |
1166 | BUG_ON(!hlist_empty(&stable_node->hlist)); | |
1167 | free_stable_node_chain(stable_node, root); | |
1168 | return false; | |
1169 | } | |
1170 | ||
cbf86cfe HD |
1171 | static int remove_all_stable_nodes(void) |
1172 | { | |
21fbd591 | 1173 | struct ksm_stable_node *stable_node, *next; |
cbf86cfe HD |
1174 | int nid; |
1175 | int err = 0; | |
1176 | ||
ef53d16c | 1177 | for (nid = 0; nid < ksm_nr_node_ids; nid++) { |
cbf86cfe HD |
1178 | while (root_stable_tree[nid].rb_node) { |
1179 | stable_node = rb_entry(root_stable_tree[nid].rb_node, | |
21fbd591 | 1180 | struct ksm_stable_node, node); |
2c653d0e AA |
1181 | if (remove_stable_node_chain(stable_node, |
1182 | root_stable_tree + nid)) { | |
cbf86cfe HD |
1183 | err = -EBUSY; |
1184 | break; /* proceed to next nid */ | |
1185 | } | |
1186 | cond_resched(); | |
1187 | } | |
1188 | } | |
03640418 | 1189 | list_for_each_entry_safe(stable_node, next, &migrate_nodes, list) { |
4146d2d6 HD |
1190 | if (remove_stable_node(stable_node)) |
1191 | err = -EBUSY; | |
1192 | cond_resched(); | |
1193 | } | |
cbf86cfe HD |
1194 | return err; |
1195 | } | |
1196 | ||
d952b791 | 1197 | static int unmerge_and_remove_all_rmap_items(void) |
31dbd01f | 1198 | { |
21fbd591 | 1199 | struct ksm_mm_slot *mm_slot; |
58730ab6 | 1200 | struct mm_slot *slot; |
31dbd01f IE |
1201 | struct mm_struct *mm; |
1202 | struct vm_area_struct *vma; | |
d952b791 HD |
1203 | int err = 0; |
1204 | ||
1205 | spin_lock(&ksm_mmlist_lock); | |
58730ab6 QZ |
1206 | slot = list_entry(ksm_mm_head.slot.mm_node.next, |
1207 | struct mm_slot, mm_node); | |
1208 | ksm_scan.mm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot); | |
d952b791 | 1209 | spin_unlock(&ksm_mmlist_lock); |
31dbd01f | 1210 | |
a5f18ba0 MWO |
1211 | for (mm_slot = ksm_scan.mm_slot; mm_slot != &ksm_mm_head; |
1212 | mm_slot = ksm_scan.mm_slot) { | |
58730ab6 | 1213 | VMA_ITERATOR(vmi, mm_slot->slot.mm, 0); |
a5f18ba0 | 1214 | |
58730ab6 | 1215 | mm = mm_slot->slot.mm; |
d8ed45c5 | 1216 | mmap_read_lock(mm); |
6db504ce LH |
1217 | |
1218 | /* | |
1219 | * Exit right away if mm is exiting to avoid lockdep issue in | |
1220 | * the maple tree | |
1221 | */ | |
1222 | if (ksm_test_exit(mm)) | |
1223 | goto mm_exiting; | |
1224 | ||
a5f18ba0 | 1225 | for_each_vma(vmi, vma) { |
31dbd01f IE |
1226 | if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) |
1227 | continue; | |
d952b791 | 1228 | err = unmerge_ksm_pages(vma, |
49b06385 | 1229 | vma->vm_start, vma->vm_end, false); |
9ba69294 HD |
1230 | if (err) |
1231 | goto error; | |
31dbd01f | 1232 | } |
9ba69294 | 1233 | |
6db504ce | 1234 | mm_exiting: |
420be4ed | 1235 | remove_trailing_rmap_items(&mm_slot->rmap_list); |
d8ed45c5 | 1236 | mmap_read_unlock(mm); |
d952b791 HD |
1237 | |
1238 | spin_lock(&ksm_mmlist_lock); | |
58730ab6 QZ |
1239 | slot = list_entry(mm_slot->slot.mm_node.next, |
1240 | struct mm_slot, mm_node); | |
1241 | ksm_scan.mm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot); | |
9ba69294 | 1242 | if (ksm_test_exit(mm)) { |
58730ab6 QZ |
1243 | hash_del(&mm_slot->slot.hash); |
1244 | list_del(&mm_slot->slot.mm_node); | |
9ba69294 HD |
1245 | spin_unlock(&ksm_mmlist_lock); |
1246 | ||
58730ab6 | 1247 | mm_slot_free(mm_slot_cache, mm_slot); |
9ba69294 | 1248 | clear_bit(MMF_VM_MERGEABLE, &mm->flags); |
d7597f59 | 1249 | clear_bit(MMF_VM_MERGE_ANY, &mm->flags); |
9ba69294 | 1250 | mmdrop(mm); |
7496fea9 | 1251 | } else |
9ba69294 | 1252 | spin_unlock(&ksm_mmlist_lock); |
31dbd01f IE |
1253 | } |
1254 | ||
cbf86cfe HD |
1255 | /* Clean up stable nodes, but don't worry if some are still busy */ |
1256 | remove_all_stable_nodes(); | |
d952b791 | 1257 | ksm_scan.seqnr = 0; |
9ba69294 HD |
1258 | return 0; |
1259 | ||
1260 | error: | |
d8ed45c5 | 1261 | mmap_read_unlock(mm); |
31dbd01f | 1262 | spin_lock(&ksm_mmlist_lock); |
d952b791 | 1263 | ksm_scan.mm_slot = &ksm_mm_head; |
31dbd01f | 1264 | spin_unlock(&ksm_mmlist_lock); |
d952b791 | 1265 | return err; |
31dbd01f | 1266 | } |
2ffd8679 | 1267 | #endif /* CONFIG_SYSFS */ |
31dbd01f | 1268 | |
31dbd01f IE |
1269 | static u32 calc_checksum(struct page *page) |
1270 | { | |
1271 | u32 checksum; | |
b3351989 | 1272 | void *addr = kmap_local_page(page); |
59e1a2f4 | 1273 | checksum = xxhash(addr, PAGE_SIZE, 0); |
b3351989 | 1274 | kunmap_local(addr); |
31dbd01f IE |
1275 | return checksum; |
1276 | } | |
1277 | ||
40d707f3 | 1278 | static int write_protect_page(struct vm_area_struct *vma, struct folio *folio, |
31dbd01f IE |
1279 | pte_t *orig_pte) |
1280 | { | |
1281 | struct mm_struct *mm = vma->vm_mm; | |
40d707f3 | 1282 | DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, 0, 0); |
31dbd01f IE |
1283 | int swapped; |
1284 | int err = -EFAULT; | |
ac46d4f3 | 1285 | struct mmu_notifier_range range; |
6c287605 | 1286 | bool anon_exclusive; |
c33c7948 | 1287 | pte_t entry; |
31dbd01f | 1288 | |
40d707f3 AS |
1289 | if (WARN_ON_ONCE(folio_test_large(folio))) |
1290 | return err; | |
1291 | ||
1292 | pvmw.address = page_address_in_vma(&folio->page, vma); | |
36eaff33 | 1293 | if (pvmw.address == -EFAULT) |
31dbd01f IE |
1294 | goto out; |
1295 | ||
7d4a8be0 | 1296 | mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, pvmw.address, |
ac46d4f3 JG |
1297 | pvmw.address + PAGE_SIZE); |
1298 | mmu_notifier_invalidate_range_start(&range); | |
6bdb913f | 1299 | |
36eaff33 | 1300 | if (!page_vma_mapped_walk(&pvmw)) |
6bdb913f | 1301 | goto out_mn; |
36eaff33 KS |
1302 | if (WARN_ONCE(!pvmw.pte, "Unexpected PMD mapping?")) |
1303 | goto out_unlock; | |
31dbd01f | 1304 | |
40d707f3 | 1305 | anon_exclusive = PageAnonExclusive(&folio->page); |
c33c7948 RR |
1306 | entry = ptep_get(pvmw.pte); |
1307 | if (pte_write(entry) || pte_dirty(entry) || | |
6c287605 | 1308 | anon_exclusive || mm_tlb_flush_pending(mm)) { |
40d707f3 AS |
1309 | swapped = folio_test_swapcache(folio); |
1310 | flush_cache_page(vma, pvmw.address, folio_pfn(folio)); | |
31dbd01f | 1311 | /* |
25985edc | 1312 | * Ok this is tricky, when get_user_pages_fast() run it doesn't |
31dbd01f | 1313 | * take any lock, therefore the check that we are going to make |
f0953a1b | 1314 | * with the pagecount against the mapcount is racy and |
31dbd01f IE |
1315 | * O_DIRECT can happen right after the check. |
1316 | * So we clear the pte and flush the tlb before the check | |
1317 | * this assure us that no O_DIRECT can happen after the check | |
1318 | * or in the middle of the check. | |
0f10851e JG |
1319 | * |
1320 | * No need to notify as we are downgrading page table to read | |
1321 | * only not changing it to point to a new page. | |
1322 | * | |
ee65728e | 1323 | * See Documentation/mm/mmu_notifier.rst |
31dbd01f | 1324 | */ |
0f10851e | 1325 | entry = ptep_clear_flush(vma, pvmw.address, pvmw.pte); |
31dbd01f IE |
1326 | /* |
1327 | * Check that no O_DIRECT or similar I/O is in progress on the | |
1328 | * page | |
1329 | */ | |
40d707f3 | 1330 | if (folio_mapcount(folio) + 1 + swapped != folio_ref_count(folio)) { |
36eaff33 | 1331 | set_pte_at(mm, pvmw.address, pvmw.pte, entry); |
31dbd01f IE |
1332 | goto out_unlock; |
1333 | } | |
6c287605 | 1334 | |
e3b4b137 DH |
1335 | /* See folio_try_share_anon_rmap_pte(): clear PTE first. */ |
1336 | if (anon_exclusive && | |
40d707f3 | 1337 | folio_try_share_anon_rmap_pte(folio, &folio->page)) { |
6c287605 DH |
1338 | set_pte_at(mm, pvmw.address, pvmw.pte, entry); |
1339 | goto out_unlock; | |
1340 | } | |
1341 | ||
4e31635c | 1342 | if (pte_dirty(entry)) |
40d707f3 | 1343 | folio_mark_dirty(folio); |
6a56ccbc DH |
1344 | entry = pte_mkclean(entry); |
1345 | ||
1346 | if (pte_write(entry)) | |
1347 | entry = pte_wrprotect(entry); | |
595cd8f2 | 1348 | |
f7842747 | 1349 | set_pte_at(mm, pvmw.address, pvmw.pte, entry); |
31dbd01f | 1350 | } |
c33c7948 | 1351 | *orig_pte = entry; |
31dbd01f IE |
1352 | err = 0; |
1353 | ||
1354 | out_unlock: | |
36eaff33 | 1355 | page_vma_mapped_walk_done(&pvmw); |
6bdb913f | 1356 | out_mn: |
ac46d4f3 | 1357 | mmu_notifier_invalidate_range_end(&range); |
31dbd01f IE |
1358 | out: |
1359 | return err; | |
1360 | } | |
1361 | ||
1362 | /** | |
1363 | * replace_page - replace page in vma by new ksm page | |
8dd3557a HD |
1364 | * @vma: vma that holds the pte pointing to page |
1365 | * @page: the page we are replacing by kpage | |
1366 | * @kpage: the ksm page we replace page by | |
31dbd01f IE |
1367 | * @orig_pte: the original value of the pte |
1368 | * | |
1369 | * Returns 0 on success, -EFAULT on failure. | |
1370 | */ | |
8dd3557a HD |
1371 | static int replace_page(struct vm_area_struct *vma, struct page *page, |
1372 | struct page *kpage, pte_t orig_pte) | |
31dbd01f | 1373 | { |
97729534 | 1374 | struct folio *kfolio = page_folio(kpage); |
31dbd01f | 1375 | struct mm_struct *mm = vma->vm_mm; |
b4e6f66e | 1376 | struct folio *folio; |
31dbd01f | 1377 | pmd_t *pmd; |
50722804 | 1378 | pmd_t pmde; |
31dbd01f | 1379 | pte_t *ptep; |
e86c59b1 | 1380 | pte_t newpte; |
31dbd01f IE |
1381 | spinlock_t *ptl; |
1382 | unsigned long addr; | |
31dbd01f | 1383 | int err = -EFAULT; |
ac46d4f3 | 1384 | struct mmu_notifier_range range; |
31dbd01f | 1385 | |
8dd3557a | 1386 | addr = page_address_in_vma(page, vma); |
31dbd01f IE |
1387 | if (addr == -EFAULT) |
1388 | goto out; | |
1389 | ||
6219049a BL |
1390 | pmd = mm_find_pmd(mm, addr); |
1391 | if (!pmd) | |
31dbd01f | 1392 | goto out; |
50722804 ZK |
1393 | /* |
1394 | * Some THP functions use the sequence pmdp_huge_clear_flush(), set_pmd_at() | |
1395 | * without holding anon_vma lock for write. So when looking for a | |
1396 | * genuine pmde (in which to find pte), test present and !THP together. | |
1397 | */ | |
26e1a0c3 | 1398 | pmde = pmdp_get_lockless(pmd); |
50722804 ZK |
1399 | if (!pmd_present(pmde) || pmd_trans_huge(pmde)) |
1400 | goto out; | |
31dbd01f | 1401 | |
7d4a8be0 | 1402 | mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, addr, |
6f4f13e8 | 1403 | addr + PAGE_SIZE); |
ac46d4f3 | 1404 | mmu_notifier_invalidate_range_start(&range); |
6bdb913f | 1405 | |
31dbd01f | 1406 | ptep = pte_offset_map_lock(mm, pmd, addr, &ptl); |
04dee9e8 HD |
1407 | if (!ptep) |
1408 | goto out_mn; | |
c33c7948 | 1409 | if (!pte_same(ptep_get(ptep), orig_pte)) { |
31dbd01f | 1410 | pte_unmap_unlock(ptep, ptl); |
6bdb913f | 1411 | goto out_mn; |
31dbd01f | 1412 | } |
6c287605 | 1413 | VM_BUG_ON_PAGE(PageAnonExclusive(page), page); |
97729534 DH |
1414 | VM_BUG_ON_FOLIO(folio_test_anon(kfolio) && PageAnonExclusive(kpage), |
1415 | kfolio); | |
31dbd01f | 1416 | |
e86c59b1 CI |
1417 | /* |
1418 | * No need to check ksm_use_zero_pages here: we can only have a | |
457aef94 | 1419 | * zero_page here if ksm_use_zero_pages was enabled already. |
e86c59b1 CI |
1420 | */ |
1421 | if (!is_zero_pfn(page_to_pfn(kpage))) { | |
97729534 DH |
1422 | folio_get(kfolio); |
1423 | folio_add_anon_rmap_pte(kfolio, kpage, vma, addr, RMAP_NONE); | |
e86c59b1 CI |
1424 | newpte = mk_pte(kpage, vma->vm_page_prot); |
1425 | } else { | |
79271476 | 1426 | /* |
1427 | * Use pte_mkdirty to mark the zero page mapped by KSM, and then | |
1428 | * we can easily track all KSM-placed zero pages by checking if | |
1429 | * the dirty bit in zero page's PTE is set. | |
1430 | */ | |
1431 | newpte = pte_mkdirty(pte_mkspecial(pfn_pte(page_to_pfn(kpage), vma->vm_page_prot))); | |
c2dc78b8 | 1432 | ksm_map_zero_page(mm); |
a38c015f CI |
1433 | /* |
1434 | * We're replacing an anonymous page with a zero page, which is | |
1435 | * not anonymous. We need to do proper accounting otherwise we | |
1436 | * will get wrong values in /proc, and a BUG message in dmesg | |
1437 | * when tearing down the mm. | |
1438 | */ | |
1439 | dec_mm_counter(mm, MM_ANONPAGES); | |
e86c59b1 | 1440 | } |
31dbd01f | 1441 | |
c33c7948 | 1442 | flush_cache_page(vma, addr, pte_pfn(ptep_get(ptep))); |
0f10851e JG |
1443 | /* |
1444 | * No need to notify as we are replacing a read only page with another | |
1445 | * read only page with the same content. | |
1446 | * | |
ee65728e | 1447 | * See Documentation/mm/mmu_notifier.rst |
0f10851e JG |
1448 | */ |
1449 | ptep_clear_flush(vma, addr, ptep); | |
f7842747 | 1450 | set_pte_at(mm, addr, ptep, newpte); |
31dbd01f | 1451 | |
b4e6f66e | 1452 | folio = page_folio(page); |
18e8612e | 1453 | folio_remove_rmap_pte(folio, page, vma); |
b4e6f66e MWO |
1454 | if (!folio_mapped(folio)) |
1455 | folio_free_swap(folio); | |
1456 | folio_put(folio); | |
31dbd01f IE |
1457 | |
1458 | pte_unmap_unlock(ptep, ptl); | |
1459 | err = 0; | |
6bdb913f | 1460 | out_mn: |
ac46d4f3 | 1461 | mmu_notifier_invalidate_range_end(&range); |
31dbd01f IE |
1462 | out: |
1463 | return err; | |
1464 | } | |
1465 | ||
1466 | /* | |
1467 | * try_to_merge_one_page - take two pages and merge them into one | |
8dd3557a HD |
1468 | * @vma: the vma that holds the pte pointing to page |
1469 | * @page: the PageAnon page that we want to replace with kpage | |
80e14822 HD |
1470 | * @kpage: the PageKsm page that we want to map instead of page, |
1471 | * or NULL the first time when we want to use page as kpage. | |
31dbd01f IE |
1472 | * |
1473 | * This function returns 0 if the pages were merged, -EFAULT otherwise. | |
1474 | */ | |
1475 | static int try_to_merge_one_page(struct vm_area_struct *vma, | |
8dd3557a | 1476 | struct page *page, struct page *kpage) |
31dbd01f IE |
1477 | { |
1478 | pte_t orig_pte = __pte(0); | |
1479 | int err = -EFAULT; | |
1480 | ||
db114b83 HD |
1481 | if (page == kpage) /* ksm page forked */ |
1482 | return 0; | |
1483 | ||
8dd3557a | 1484 | if (!PageAnon(page)) |
31dbd01f IE |
1485 | goto out; |
1486 | ||
31dbd01f IE |
1487 | /* |
1488 | * We need the page lock to read a stable PageSwapCache in | |
1489 | * write_protect_page(). We use trylock_page() instead of | |
1490 | * lock_page() because we don't want to wait here - we | |
1491 | * prefer to continue scanning and merging different pages, | |
1492 | * then come back to this page when it is unlocked. | |
1493 | */ | |
8dd3557a | 1494 | if (!trylock_page(page)) |
31e855ea | 1495 | goto out; |
f765f540 KS |
1496 | |
1497 | if (PageTransCompound(page)) { | |
a7306c34 | 1498 | if (split_huge_page(page)) |
f765f540 KS |
1499 | goto out_unlock; |
1500 | } | |
1501 | ||
31dbd01f IE |
1502 | /* |
1503 | * If this anonymous page is mapped only here, its pte may need | |
1504 | * to be write-protected. If it's mapped elsewhere, all of its | |
1505 | * ptes are necessarily already write-protected. But in either | |
1506 | * case, we need to lock and check page_count is not raised. | |
1507 | */ | |
40d707f3 | 1508 | if (write_protect_page(vma, page_folio(page), &orig_pte) == 0) { |
80e14822 HD |
1509 | if (!kpage) { |
1510 | /* | |
1511 | * While we hold page lock, upgrade page from | |
1512 | * PageAnon+anon_vma to PageKsm+NULL stable_node: | |
1513 | * stable_tree_insert() will update stable_node. | |
1514 | */ | |
452e862f | 1515 | folio_set_stable_node(page_folio(page), NULL); |
80e14822 | 1516 | mark_page_accessed(page); |
337ed7eb MK |
1517 | /* |
1518 | * Page reclaim just frees a clean page with no dirty | |
1519 | * ptes: make sure that the ksm page would be swapped. | |
1520 | */ | |
1521 | if (!PageDirty(page)) | |
1522 | SetPageDirty(page); | |
80e14822 HD |
1523 | err = 0; |
1524 | } else if (pages_identical(page, kpage)) | |
1525 | err = replace_page(vma, page, kpage, orig_pte); | |
1526 | } | |
31dbd01f | 1527 | |
f765f540 | 1528 | out_unlock: |
8dd3557a | 1529 | unlock_page(page); |
31dbd01f IE |
1530 | out: |
1531 | return err; | |
1532 | } | |
1533 | ||
81464e30 HD |
1534 | /* |
1535 | * try_to_merge_with_ksm_page - like try_to_merge_two_pages, | |
1536 | * but no new kernel page is allocated: kpage must already be a ksm page. | |
8dd3557a HD |
1537 | * |
1538 | * This function returns 0 if the pages were merged, -EFAULT otherwise. | |
81464e30 | 1539 | */ |
21fbd591 | 1540 | static int try_to_merge_with_ksm_page(struct ksm_rmap_item *rmap_item, |
8dd3557a | 1541 | struct page *page, struct page *kpage) |
81464e30 | 1542 | { |
8dd3557a | 1543 | struct mm_struct *mm = rmap_item->mm; |
81464e30 HD |
1544 | struct vm_area_struct *vma; |
1545 | int err = -EFAULT; | |
1546 | ||
d8ed45c5 | 1547 | mmap_read_lock(mm); |
85c6e8dd AA |
1548 | vma = find_mergeable_vma(mm, rmap_item->address); |
1549 | if (!vma) | |
81464e30 HD |
1550 | goto out; |
1551 | ||
8dd3557a | 1552 | err = try_to_merge_one_page(vma, page, kpage); |
db114b83 HD |
1553 | if (err) |
1554 | goto out; | |
1555 | ||
bc56620b HD |
1556 | /* Unstable nid is in union with stable anon_vma: remove first */ |
1557 | remove_rmap_item_from_tree(rmap_item); | |
1558 | ||
c1e8d7c6 | 1559 | /* Must get reference to anon_vma while still holding mmap_lock */ |
9e60109f PZ |
1560 | rmap_item->anon_vma = vma->anon_vma; |
1561 | get_anon_vma(vma->anon_vma); | |
81464e30 | 1562 | out: |
d8ed45c5 | 1563 | mmap_read_unlock(mm); |
739100c8 SR |
1564 | trace_ksm_merge_with_ksm_page(kpage, page_to_pfn(kpage ? kpage : page), |
1565 | rmap_item, mm, err); | |
81464e30 HD |
1566 | return err; |
1567 | } | |
1568 | ||
31dbd01f IE |
1569 | /* |
1570 | * try_to_merge_two_pages - take two identical pages and prepare them | |
1571 | * to be merged into one page. | |
1572 | * | |
8dd3557a HD |
1573 | * This function returns the kpage if we successfully merged two identical |
1574 | * pages into one ksm page, NULL otherwise. | |
31dbd01f | 1575 | * |
80e14822 | 1576 | * Note that this function upgrades page to ksm page: if one of the pages |
31dbd01f IE |
1577 | * is already a ksm page, try_to_merge_with_ksm_page should be used. |
1578 | */ | |
21fbd591 | 1579 | static struct page *try_to_merge_two_pages(struct ksm_rmap_item *rmap_item, |
8dd3557a | 1580 | struct page *page, |
21fbd591 | 1581 | struct ksm_rmap_item *tree_rmap_item, |
8dd3557a | 1582 | struct page *tree_page) |
31dbd01f | 1583 | { |
80e14822 | 1584 | int err; |
31dbd01f | 1585 | |
80e14822 | 1586 | err = try_to_merge_with_ksm_page(rmap_item, page, NULL); |
31dbd01f | 1587 | if (!err) { |
8dd3557a | 1588 | err = try_to_merge_with_ksm_page(tree_rmap_item, |
80e14822 | 1589 | tree_page, page); |
31dbd01f | 1590 | /* |
81464e30 HD |
1591 | * If that fails, we have a ksm page with only one pte |
1592 | * pointing to it: so break it. | |
31dbd01f | 1593 | */ |
4035c07a | 1594 | if (err) |
8dd3557a | 1595 | break_cow(rmap_item); |
31dbd01f | 1596 | } |
80e14822 | 1597 | return err ? NULL : page; |
31dbd01f IE |
1598 | } |
1599 | ||
2c653d0e | 1600 | static __always_inline |
21fbd591 | 1601 | bool __is_page_sharing_candidate(struct ksm_stable_node *stable_node, int offset) |
2c653d0e AA |
1602 | { |
1603 | VM_BUG_ON(stable_node->rmap_hlist_len < 0); | |
1604 | /* | |
1605 | * Check that at least one mapping still exists, otherwise | |
1606 | * there's no much point to merge and share with this | |
1607 | * stable_node, as the underlying tree_page of the other | |
1608 | * sharer is going to be freed soon. | |
1609 | */ | |
1610 | return stable_node->rmap_hlist_len && | |
1611 | stable_node->rmap_hlist_len + offset < ksm_max_page_sharing; | |
1612 | } | |
1613 | ||
1614 | static __always_inline | |
21fbd591 | 1615 | bool is_page_sharing_candidate(struct ksm_stable_node *stable_node) |
2c653d0e AA |
1616 | { |
1617 | return __is_page_sharing_candidate(stable_node, 0); | |
1618 | } | |
1619 | ||
79899cce AS |
1620 | static struct folio *stable_node_dup(struct ksm_stable_node **_stable_node_dup, |
1621 | struct ksm_stable_node **_stable_node, | |
1622 | struct rb_root *root, | |
1623 | bool prune_stale_stable_nodes) | |
2c653d0e | 1624 | { |
21fbd591 | 1625 | struct ksm_stable_node *dup, *found = NULL, *stable_node = *_stable_node; |
2c653d0e | 1626 | struct hlist_node *hlist_safe; |
6f528de2 | 1627 | struct folio *folio, *tree_folio = NULL; |
2c653d0e AA |
1628 | int nr = 0; |
1629 | int found_rmap_hlist_len; | |
1630 | ||
1631 | if (!prune_stale_stable_nodes || | |
1632 | time_before(jiffies, stable_node->chain_prune_time + | |
1633 | msecs_to_jiffies( | |
1634 | ksm_stable_node_chains_prune_millisecs))) | |
1635 | prune_stale_stable_nodes = false; | |
1636 | else | |
1637 | stable_node->chain_prune_time = jiffies; | |
1638 | ||
1639 | hlist_for_each_entry_safe(dup, hlist_safe, | |
1640 | &stable_node->hlist, hlist_dup) { | |
1641 | cond_resched(); | |
1642 | /* | |
1643 | * We must walk all stable_node_dup to prune the stale | |
1644 | * stable nodes during lookup. | |
1645 | * | |
6f528de2 | 1646 | * ksm_get_folio can drop the nodes from the |
2c653d0e AA |
1647 | * stable_node->hlist if they point to freed pages |
1648 | * (that's why we do a _safe walk). The "dup" | |
1649 | * stable_node parameter itself will be freed from | |
1650 | * under us if it returns NULL. | |
1651 | */ | |
85b67b01 | 1652 | folio = ksm_get_folio(dup, KSM_GET_FOLIO_NOLOCK); |
6f528de2 | 1653 | if (!folio) |
2c653d0e AA |
1654 | continue; |
1655 | nr += 1; | |
1656 | if (is_page_sharing_candidate(dup)) { | |
1657 | if (!found || | |
1658 | dup->rmap_hlist_len > found_rmap_hlist_len) { | |
1659 | if (found) | |
6f528de2 | 1660 | folio_put(tree_folio); |
2c653d0e AA |
1661 | found = dup; |
1662 | found_rmap_hlist_len = found->rmap_hlist_len; | |
6f528de2 | 1663 | tree_folio = folio; |
2c653d0e | 1664 | |
8dc5ffcd | 1665 | /* skip put_page for found dup */ |
2c653d0e AA |
1666 | if (!prune_stale_stable_nodes) |
1667 | break; | |
2c653d0e AA |
1668 | continue; |
1669 | } | |
1670 | } | |
6f528de2 | 1671 | folio_put(folio); |
2c653d0e AA |
1672 | } |
1673 | ||
80b18dfa AA |
1674 | if (found) { |
1675 | /* | |
1676 | * nr is counting all dups in the chain only if | |
1677 | * prune_stale_stable_nodes is true, otherwise we may | |
1678 | * break the loop at nr == 1 even if there are | |
1679 | * multiple entries. | |
1680 | */ | |
1681 | if (prune_stale_stable_nodes && nr == 1) { | |
2c653d0e AA |
1682 | /* |
1683 | * If there's not just one entry it would | |
1684 | * corrupt memory, better BUG_ON. In KSM | |
1685 | * context with no lock held it's not even | |
1686 | * fatal. | |
1687 | */ | |
1688 | BUG_ON(stable_node->hlist.first->next); | |
1689 | ||
1690 | /* | |
1691 | * There's just one entry and it is below the | |
1692 | * deduplication limit so drop the chain. | |
1693 | */ | |
1694 | rb_replace_node(&stable_node->node, &found->node, | |
1695 | root); | |
1696 | free_stable_node(stable_node); | |
1697 | ksm_stable_node_chains--; | |
1698 | ksm_stable_node_dups--; | |
b4fecc67 | 1699 | /* |
0ba1d0f7 AA |
1700 | * NOTE: the caller depends on the stable_node |
1701 | * to be equal to stable_node_dup if the chain | |
1702 | * was collapsed. | |
b4fecc67 | 1703 | */ |
0ba1d0f7 AA |
1704 | *_stable_node = found; |
1705 | /* | |
f0953a1b | 1706 | * Just for robustness, as stable_node is |
0ba1d0f7 AA |
1707 | * otherwise left as a stable pointer, the |
1708 | * compiler shall optimize it away at build | |
1709 | * time. | |
1710 | */ | |
1711 | stable_node = NULL; | |
80b18dfa AA |
1712 | } else if (stable_node->hlist.first != &found->hlist_dup && |
1713 | __is_page_sharing_candidate(found, 1)) { | |
2c653d0e | 1714 | /* |
80b18dfa AA |
1715 | * If the found stable_node dup can accept one |
1716 | * more future merge (in addition to the one | |
1717 | * that is underway) and is not at the head of | |
1718 | * the chain, put it there so next search will | |
1719 | * be quicker in the !prune_stale_stable_nodes | |
1720 | * case. | |
1721 | * | |
1722 | * NOTE: it would be inaccurate to use nr > 1 | |
1723 | * instead of checking the hlist.first pointer | |
1724 | * directly, because in the | |
1725 | * prune_stale_stable_nodes case "nr" isn't | |
1726 | * the position of the found dup in the chain, | |
1727 | * but the total number of dups in the chain. | |
2c653d0e AA |
1728 | */ |
1729 | hlist_del(&found->hlist_dup); | |
1730 | hlist_add_head(&found->hlist_dup, | |
1731 | &stable_node->hlist); | |
1732 | } | |
1733 | } | |
1734 | ||
8dc5ffcd | 1735 | *_stable_node_dup = found; |
79899cce | 1736 | return tree_folio; |
2c653d0e AA |
1737 | } |
1738 | ||
21fbd591 | 1739 | static struct ksm_stable_node *stable_node_dup_any(struct ksm_stable_node *stable_node, |
2c653d0e AA |
1740 | struct rb_root *root) |
1741 | { | |
1742 | if (!is_stable_node_chain(stable_node)) | |
1743 | return stable_node; | |
1744 | if (hlist_empty(&stable_node->hlist)) { | |
1745 | free_stable_node_chain(stable_node, root); | |
1746 | return NULL; | |
1747 | } | |
1748 | return hlist_entry(stable_node->hlist.first, | |
1749 | typeof(*stable_node), hlist_dup); | |
1750 | } | |
1751 | ||
8dc5ffcd | 1752 | /* |
79899cce | 1753 | * Like for ksm_get_folio, this function can free the *_stable_node and |
8dc5ffcd AA |
1754 | * *_stable_node_dup if the returned tree_page is NULL. |
1755 | * | |
1756 | * It can also free and overwrite *_stable_node with the found | |
1757 | * stable_node_dup if the chain is collapsed (in which case | |
1758 | * *_stable_node will be equal to *_stable_node_dup like if the chain | |
1759 | * never existed). It's up to the caller to verify tree_page is not | |
1760 | * NULL before dereferencing *_stable_node or *_stable_node_dup. | |
1761 | * | |
1762 | * *_stable_node_dup is really a second output parameter of this | |
1763 | * function and will be overwritten in all cases, the caller doesn't | |
1764 | * need to initialize it. | |
1765 | */ | |
79899cce AS |
1766 | static struct folio *__stable_node_chain(struct ksm_stable_node **_stable_node_dup, |
1767 | struct ksm_stable_node **_stable_node, | |
1768 | struct rb_root *root, | |
1769 | bool prune_stale_stable_nodes) | |
2c653d0e | 1770 | { |
21fbd591 | 1771 | struct ksm_stable_node *stable_node = *_stable_node; |
2c653d0e AA |
1772 | if (!is_stable_node_chain(stable_node)) { |
1773 | if (is_page_sharing_candidate(stable_node)) { | |
8dc5ffcd | 1774 | *_stable_node_dup = stable_node; |
85b67b01 | 1775 | return ksm_get_folio(stable_node, KSM_GET_FOLIO_NOLOCK); |
2c653d0e | 1776 | } |
8dc5ffcd AA |
1777 | /* |
1778 | * _stable_node_dup set to NULL means the stable_node | |
1779 | * reached the ksm_max_page_sharing limit. | |
1780 | */ | |
1781 | *_stable_node_dup = NULL; | |
2c653d0e AA |
1782 | return NULL; |
1783 | } | |
8dc5ffcd | 1784 | return stable_node_dup(_stable_node_dup, _stable_node, root, |
2c653d0e AA |
1785 | prune_stale_stable_nodes); |
1786 | } | |
1787 | ||
79899cce AS |
1788 | static __always_inline struct folio *chain_prune(struct ksm_stable_node **s_n_d, |
1789 | struct ksm_stable_node **s_n, | |
1790 | struct rb_root *root) | |
2c653d0e | 1791 | { |
8dc5ffcd | 1792 | return __stable_node_chain(s_n_d, s_n, root, true); |
2c653d0e AA |
1793 | } |
1794 | ||
79899cce AS |
1795 | static __always_inline struct folio *chain(struct ksm_stable_node **s_n_d, |
1796 | struct ksm_stable_node *s_n, | |
1797 | struct rb_root *root) | |
2c653d0e | 1798 | { |
21fbd591 | 1799 | struct ksm_stable_node *old_stable_node = s_n; |
79899cce | 1800 | struct folio *tree_folio; |
8dc5ffcd | 1801 | |
79899cce | 1802 | tree_folio = __stable_node_chain(s_n_d, &s_n, root, false); |
8dc5ffcd AA |
1803 | /* not pruning dups so s_n cannot have changed */ |
1804 | VM_BUG_ON(s_n != old_stable_node); | |
79899cce | 1805 | return tree_folio; |
2c653d0e AA |
1806 | } |
1807 | ||
31dbd01f | 1808 | /* |
8dd3557a | 1809 | * stable_tree_search - search for page inside the stable tree |
31dbd01f IE |
1810 | * |
1811 | * This function checks if there is a page inside the stable tree | |
1812 | * with identical content to the page that we are scanning right now. | |
1813 | * | |
7b6ba2c7 | 1814 | * This function returns the stable tree node of identical content if found, |
31dbd01f IE |
1815 | * NULL otherwise. |
1816 | */ | |
62b61f61 | 1817 | static struct page *stable_tree_search(struct page *page) |
31dbd01f | 1818 | { |
90bd6fd3 | 1819 | int nid; |
ef53d16c | 1820 | struct rb_root *root; |
4146d2d6 HD |
1821 | struct rb_node **new; |
1822 | struct rb_node *parent; | |
21fbd591 QZ |
1823 | struct ksm_stable_node *stable_node, *stable_node_dup, *stable_node_any; |
1824 | struct ksm_stable_node *page_node; | |
79899cce | 1825 | struct folio *folio; |
31dbd01f | 1826 | |
79899cce AS |
1827 | folio = page_folio(page); |
1828 | page_node = folio_stable_node(folio); | |
4146d2d6 HD |
1829 | if (page_node && page_node->head != &migrate_nodes) { |
1830 | /* ksm page forked */ | |
79899cce AS |
1831 | folio_get(folio); |
1832 | return &folio->page; | |
08beca44 HD |
1833 | } |
1834 | ||
79899cce | 1835 | nid = get_kpfn_nid(folio_pfn(folio)); |
ef53d16c | 1836 | root = root_stable_tree + nid; |
4146d2d6 | 1837 | again: |
ef53d16c | 1838 | new = &root->rb_node; |
4146d2d6 | 1839 | parent = NULL; |
90bd6fd3 | 1840 | |
4146d2d6 | 1841 | while (*new) { |
79899cce | 1842 | struct folio *tree_folio; |
31dbd01f IE |
1843 | int ret; |
1844 | ||
08beca44 | 1845 | cond_resched(); |
21fbd591 | 1846 | stable_node = rb_entry(*new, struct ksm_stable_node, node); |
2c653d0e | 1847 | stable_node_any = NULL; |
79899cce | 1848 | tree_folio = chain_prune(&stable_node_dup, &stable_node, root); |
b4fecc67 AA |
1849 | /* |
1850 | * NOTE: stable_node may have been freed by | |
1851 | * chain_prune() if the returned stable_node_dup is | |
1852 | * not NULL. stable_node_dup may have been inserted in | |
1853 | * the rbtree instead as a regular stable_node (in | |
1854 | * order to collapse the stable_node chain if a single | |
0ba1d0f7 | 1855 | * stable_node dup was found in it). In such case the |
3413b2c8 | 1856 | * stable_node is overwritten by the callee to point |
0ba1d0f7 AA |
1857 | * to the stable_node_dup that was collapsed in the |
1858 | * stable rbtree and stable_node will be equal to | |
1859 | * stable_node_dup like if the chain never existed. | |
b4fecc67 | 1860 | */ |
2c653d0e AA |
1861 | if (!stable_node_dup) { |
1862 | /* | |
1863 | * Either all stable_node dups were full in | |
1864 | * this stable_node chain, or this chain was | |
1865 | * empty and should be rb_erased. | |
1866 | */ | |
1867 | stable_node_any = stable_node_dup_any(stable_node, | |
1868 | root); | |
1869 | if (!stable_node_any) { | |
1870 | /* rb_erase just run */ | |
1871 | goto again; | |
1872 | } | |
1873 | /* | |
1874 | * Take any of the stable_node dups page of | |
1875 | * this stable_node chain to let the tree walk | |
1876 | * continue. All KSM pages belonging to the | |
1877 | * stable_node dups in a stable_node chain | |
1878 | * have the same content and they're | |
457aef94 | 1879 | * write protected at all times. Any will work |
2c653d0e AA |
1880 | * fine to continue the walk. |
1881 | */ | |
79899cce | 1882 | tree_folio = ksm_get_folio(stable_node_any, |
85b67b01 | 1883 | KSM_GET_FOLIO_NOLOCK); |
2c653d0e AA |
1884 | } |
1885 | VM_BUG_ON(!stable_node_dup ^ !!stable_node_any); | |
79899cce | 1886 | if (!tree_folio) { |
f2e5ff85 AA |
1887 | /* |
1888 | * If we walked over a stale stable_node, | |
79899cce | 1889 | * ksm_get_folio() will call rb_erase() and it |
f2e5ff85 AA |
1890 | * may rebalance the tree from under us. So |
1891 | * restart the search from scratch. Returning | |
1892 | * NULL would be safe too, but we'd generate | |
1893 | * false negative insertions just because some | |
1894 | * stable_node was stale. | |
1895 | */ | |
1896 | goto again; | |
1897 | } | |
31dbd01f | 1898 | |
79899cce AS |
1899 | ret = memcmp_pages(page, &tree_folio->page); |
1900 | folio_put(tree_folio); | |
31dbd01f | 1901 | |
4146d2d6 | 1902 | parent = *new; |
c8d6553b | 1903 | if (ret < 0) |
4146d2d6 | 1904 | new = &parent->rb_left; |
c8d6553b | 1905 | else if (ret > 0) |
4146d2d6 | 1906 | new = &parent->rb_right; |
c8d6553b | 1907 | else { |
2c653d0e AA |
1908 | if (page_node) { |
1909 | VM_BUG_ON(page_node->head != &migrate_nodes); | |
1910 | /* | |
2aa33912 DH |
1911 | * If the mapcount of our migrated KSM folio is |
1912 | * at most 1, we can merge it with another | |
1913 | * KSM folio where we know that we have space | |
1914 | * for one more mapping without exceeding the | |
1915 | * ksm_max_page_sharing limit: see | |
1916 | * chain_prune(). This way, we can avoid adding | |
1917 | * this stable node to the chain. | |
2c653d0e | 1918 | */ |
2aa33912 | 1919 | if (folio_mapcount(folio) > 1) |
2c653d0e AA |
1920 | goto chain_append; |
1921 | } | |
1922 | ||
1923 | if (!stable_node_dup) { | |
1924 | /* | |
1925 | * If the stable_node is a chain and | |
1926 | * we got a payload match in memcmp | |
1927 | * but we cannot merge the scanned | |
1928 | * page in any of the existing | |
1929 | * stable_node dups because they're | |
1930 | * all full, we need to wait the | |
1931 | * scanned page to find itself a match | |
1932 | * in the unstable tree to create a | |
1933 | * brand new KSM page to add later to | |
1934 | * the dups of this stable_node. | |
1935 | */ | |
1936 | return NULL; | |
1937 | } | |
1938 | ||
c8d6553b HD |
1939 | /* |
1940 | * Lock and unlock the stable_node's page (which | |
1941 | * might already have been migrated) so that page | |
1942 | * migration is sure to notice its raised count. | |
1943 | * It would be more elegant to return stable_node | |
1944 | * than kpage, but that involves more changes. | |
1945 | */ | |
79899cce | 1946 | tree_folio = ksm_get_folio(stable_node_dup, |
85b67b01 | 1947 | KSM_GET_FOLIO_TRYLOCK); |
2cee57d1 | 1948 | |
79899cce | 1949 | if (PTR_ERR(tree_folio) == -EBUSY) |
2cee57d1 YS |
1950 | return ERR_PTR(-EBUSY); |
1951 | ||
79899cce | 1952 | if (unlikely(!tree_folio)) |
2c653d0e AA |
1953 | /* |
1954 | * The tree may have been rebalanced, | |
1955 | * so re-evaluate parent and new. | |
1956 | */ | |
4146d2d6 | 1957 | goto again; |
79899cce | 1958 | folio_unlock(tree_folio); |
2c653d0e AA |
1959 | |
1960 | if (get_kpfn_nid(stable_node_dup->kpfn) != | |
1961 | NUMA(stable_node_dup->nid)) { | |
79899cce | 1962 | folio_put(tree_folio); |
2c653d0e AA |
1963 | goto replace; |
1964 | } | |
79899cce | 1965 | return &tree_folio->page; |
c8d6553b | 1966 | } |
31dbd01f IE |
1967 | } |
1968 | ||
4146d2d6 HD |
1969 | if (!page_node) |
1970 | return NULL; | |
1971 | ||
1972 | list_del(&page_node->list); | |
1973 | DO_NUMA(page_node->nid = nid); | |
1974 | rb_link_node(&page_node->node, parent, new); | |
ef53d16c | 1975 | rb_insert_color(&page_node->node, root); |
2c653d0e AA |
1976 | out: |
1977 | if (is_page_sharing_candidate(page_node)) { | |
79899cce AS |
1978 | folio_get(folio); |
1979 | return &folio->page; | |
2c653d0e AA |
1980 | } else |
1981 | return NULL; | |
4146d2d6 HD |
1982 | |
1983 | replace: | |
b4fecc67 AA |
1984 | /* |
1985 | * If stable_node was a chain and chain_prune collapsed it, | |
0ba1d0f7 AA |
1986 | * stable_node has been updated to be the new regular |
1987 | * stable_node. A collapse of the chain is indistinguishable | |
1988 | * from the case there was no chain in the stable | |
1989 | * rbtree. Otherwise stable_node is the chain and | |
1990 | * stable_node_dup is the dup to replace. | |
b4fecc67 | 1991 | */ |
0ba1d0f7 | 1992 | if (stable_node_dup == stable_node) { |
b4fecc67 AA |
1993 | VM_BUG_ON(is_stable_node_chain(stable_node_dup)); |
1994 | VM_BUG_ON(is_stable_node_dup(stable_node_dup)); | |
2c653d0e AA |
1995 | /* there is no chain */ |
1996 | if (page_node) { | |
1997 | VM_BUG_ON(page_node->head != &migrate_nodes); | |
1998 | list_del(&page_node->list); | |
1999 | DO_NUMA(page_node->nid = nid); | |
b4fecc67 AA |
2000 | rb_replace_node(&stable_node_dup->node, |
2001 | &page_node->node, | |
2c653d0e AA |
2002 | root); |
2003 | if (is_page_sharing_candidate(page_node)) | |
79899cce | 2004 | folio_get(folio); |
2c653d0e | 2005 | else |
79899cce | 2006 | folio = NULL; |
2c653d0e | 2007 | } else { |
b4fecc67 | 2008 | rb_erase(&stable_node_dup->node, root); |
79899cce | 2009 | folio = NULL; |
2c653d0e | 2010 | } |
4146d2d6 | 2011 | } else { |
2c653d0e AA |
2012 | VM_BUG_ON(!is_stable_node_chain(stable_node)); |
2013 | __stable_node_dup_del(stable_node_dup); | |
2014 | if (page_node) { | |
2015 | VM_BUG_ON(page_node->head != &migrate_nodes); | |
2016 | list_del(&page_node->list); | |
2017 | DO_NUMA(page_node->nid = nid); | |
2018 | stable_node_chain_add_dup(page_node, stable_node); | |
2019 | if (is_page_sharing_candidate(page_node)) | |
79899cce | 2020 | folio_get(folio); |
2c653d0e | 2021 | else |
79899cce | 2022 | folio = NULL; |
2c653d0e | 2023 | } else { |
79899cce | 2024 | folio = NULL; |
2c653d0e | 2025 | } |
4146d2d6 | 2026 | } |
2c653d0e AA |
2027 | stable_node_dup->head = &migrate_nodes; |
2028 | list_add(&stable_node_dup->list, stable_node_dup->head); | |
79899cce | 2029 | return &folio->page; |
2c653d0e AA |
2030 | |
2031 | chain_append: | |
2032 | /* stable_node_dup could be null if it reached the limit */ | |
2033 | if (!stable_node_dup) | |
2034 | stable_node_dup = stable_node_any; | |
b4fecc67 AA |
2035 | /* |
2036 | * If stable_node was a chain and chain_prune collapsed it, | |
0ba1d0f7 AA |
2037 | * stable_node has been updated to be the new regular |
2038 | * stable_node. A collapse of the chain is indistinguishable | |
2039 | * from the case there was no chain in the stable | |
2040 | * rbtree. Otherwise stable_node is the chain and | |
2041 | * stable_node_dup is the dup to replace. | |
b4fecc67 | 2042 | */ |
0ba1d0f7 | 2043 | if (stable_node_dup == stable_node) { |
b4fecc67 | 2044 | VM_BUG_ON(is_stable_node_dup(stable_node_dup)); |
2c653d0e AA |
2045 | /* chain is missing so create it */ |
2046 | stable_node = alloc_stable_node_chain(stable_node_dup, | |
2047 | root); | |
2048 | if (!stable_node) | |
2049 | return NULL; | |
2050 | } | |
2051 | /* | |
2052 | * Add this stable_node dup that was | |
2053 | * migrated to the stable_node chain | |
2054 | * of the current nid for this page | |
2055 | * content. | |
2056 | */ | |
b4fecc67 | 2057 | VM_BUG_ON(!is_stable_node_dup(stable_node_dup)); |
2c653d0e AA |
2058 | VM_BUG_ON(page_node->head != &migrate_nodes); |
2059 | list_del(&page_node->list); | |
2060 | DO_NUMA(page_node->nid = nid); | |
2061 | stable_node_chain_add_dup(page_node, stable_node); | |
2062 | goto out; | |
31dbd01f IE |
2063 | } |
2064 | ||
2065 | /* | |
e850dcf5 | 2066 | * stable_tree_insert - insert stable tree node pointing to new ksm page |
31dbd01f IE |
2067 | * into the stable tree. |
2068 | * | |
7b6ba2c7 HD |
2069 | * This function returns the stable tree node just allocated on success, |
2070 | * NULL otherwise. | |
31dbd01f | 2071 | */ |
79899cce | 2072 | static struct ksm_stable_node *stable_tree_insert(struct folio *kfolio) |
31dbd01f | 2073 | { |
90bd6fd3 PH |
2074 | int nid; |
2075 | unsigned long kpfn; | |
ef53d16c | 2076 | struct rb_root *root; |
90bd6fd3 | 2077 | struct rb_node **new; |
f2e5ff85 | 2078 | struct rb_node *parent; |
21fbd591 | 2079 | struct ksm_stable_node *stable_node, *stable_node_dup, *stable_node_any; |
2c653d0e | 2080 | bool need_chain = false; |
31dbd01f | 2081 | |
79899cce | 2082 | kpfn = folio_pfn(kfolio); |
90bd6fd3 | 2083 | nid = get_kpfn_nid(kpfn); |
ef53d16c | 2084 | root = root_stable_tree + nid; |
f2e5ff85 AA |
2085 | again: |
2086 | parent = NULL; | |
ef53d16c | 2087 | new = &root->rb_node; |
90bd6fd3 | 2088 | |
31dbd01f | 2089 | while (*new) { |
79899cce | 2090 | struct folio *tree_folio; |
31dbd01f IE |
2091 | int ret; |
2092 | ||
08beca44 | 2093 | cond_resched(); |
21fbd591 | 2094 | stable_node = rb_entry(*new, struct ksm_stable_node, node); |
2c653d0e | 2095 | stable_node_any = NULL; |
79899cce | 2096 | tree_folio = chain(&stable_node_dup, stable_node, root); |
2c653d0e AA |
2097 | if (!stable_node_dup) { |
2098 | /* | |
2099 | * Either all stable_node dups were full in | |
2100 | * this stable_node chain, or this chain was | |
2101 | * empty and should be rb_erased. | |
2102 | */ | |
2103 | stable_node_any = stable_node_dup_any(stable_node, | |
2104 | root); | |
2105 | if (!stable_node_any) { | |
2106 | /* rb_erase just run */ | |
2107 | goto again; | |
2108 | } | |
2109 | /* | |
2110 | * Take any of the stable_node dups page of | |
2111 | * this stable_node chain to let the tree walk | |
2112 | * continue. All KSM pages belonging to the | |
2113 | * stable_node dups in a stable_node chain | |
2114 | * have the same content and they're | |
457aef94 | 2115 | * write protected at all times. Any will work |
2c653d0e AA |
2116 | * fine to continue the walk. |
2117 | */ | |
79899cce | 2118 | tree_folio = ksm_get_folio(stable_node_any, |
85b67b01 | 2119 | KSM_GET_FOLIO_NOLOCK); |
2c653d0e AA |
2120 | } |
2121 | VM_BUG_ON(!stable_node_dup ^ !!stable_node_any); | |
79899cce | 2122 | if (!tree_folio) { |
f2e5ff85 AA |
2123 | /* |
2124 | * If we walked over a stale stable_node, | |
79899cce | 2125 | * ksm_get_folio() will call rb_erase() and it |
f2e5ff85 AA |
2126 | * may rebalance the tree from under us. So |
2127 | * restart the search from scratch. Returning | |
2128 | * NULL would be safe too, but we'd generate | |
2129 | * false negative insertions just because some | |
2130 | * stable_node was stale. | |
2131 | */ | |
2132 | goto again; | |
2133 | } | |
31dbd01f | 2134 | |
79899cce AS |
2135 | ret = memcmp_pages(&kfolio->page, &tree_folio->page); |
2136 | folio_put(tree_folio); | |
31dbd01f IE |
2137 | |
2138 | parent = *new; | |
2139 | if (ret < 0) | |
2140 | new = &parent->rb_left; | |
2141 | else if (ret > 0) | |
2142 | new = &parent->rb_right; | |
2143 | else { | |
2c653d0e AA |
2144 | need_chain = true; |
2145 | break; | |
31dbd01f IE |
2146 | } |
2147 | } | |
2148 | ||
2c653d0e AA |
2149 | stable_node_dup = alloc_stable_node(); |
2150 | if (!stable_node_dup) | |
7b6ba2c7 | 2151 | return NULL; |
31dbd01f | 2152 | |
2c653d0e AA |
2153 | INIT_HLIST_HEAD(&stable_node_dup->hlist); |
2154 | stable_node_dup->kpfn = kpfn; | |
2c653d0e AA |
2155 | stable_node_dup->rmap_hlist_len = 0; |
2156 | DO_NUMA(stable_node_dup->nid = nid); | |
2157 | if (!need_chain) { | |
2158 | rb_link_node(&stable_node_dup->node, parent, new); | |
2159 | rb_insert_color(&stable_node_dup->node, root); | |
2160 | } else { | |
2161 | if (!is_stable_node_chain(stable_node)) { | |
21fbd591 | 2162 | struct ksm_stable_node *orig = stable_node; |
2c653d0e AA |
2163 | /* chain is missing so create it */ |
2164 | stable_node = alloc_stable_node_chain(orig, root); | |
2165 | if (!stable_node) { | |
2166 | free_stable_node(stable_node_dup); | |
2167 | return NULL; | |
2168 | } | |
2169 | } | |
2170 | stable_node_chain_add_dup(stable_node_dup, stable_node); | |
2171 | } | |
08beca44 | 2172 | |
90e82349 CZ |
2173 | folio_set_stable_node(kfolio, stable_node_dup); |
2174 | ||
2c653d0e | 2175 | return stable_node_dup; |
31dbd01f IE |
2176 | } |
2177 | ||
2178 | /* | |
8dd3557a HD |
2179 | * unstable_tree_search_insert - search for identical page, |
2180 | * else insert rmap_item into the unstable tree. | |
31dbd01f IE |
2181 | * |
2182 | * This function searches for a page in the unstable tree identical to the | |
2183 | * page currently being scanned; and if no identical page is found in the | |
2184 | * tree, we insert rmap_item as a new object into the unstable tree. | |
2185 | * | |
2186 | * This function returns pointer to rmap_item found to be identical | |
2187 | * to the currently scanned page, NULL otherwise. | |
2188 | * | |
2189 | * This function does both searching and inserting, because they share | |
2190 | * the same walking algorithm in an rbtree. | |
2191 | */ | |
8dd3557a | 2192 | static |
21fbd591 | 2193 | struct ksm_rmap_item *unstable_tree_search_insert(struct ksm_rmap_item *rmap_item, |
8dd3557a HD |
2194 | struct page *page, |
2195 | struct page **tree_pagep) | |
31dbd01f | 2196 | { |
90bd6fd3 PH |
2197 | struct rb_node **new; |
2198 | struct rb_root *root; | |
31dbd01f | 2199 | struct rb_node *parent = NULL; |
90bd6fd3 PH |
2200 | int nid; |
2201 | ||
2202 | nid = get_kpfn_nid(page_to_pfn(page)); | |
ef53d16c | 2203 | root = root_unstable_tree + nid; |
90bd6fd3 | 2204 | new = &root->rb_node; |
31dbd01f IE |
2205 | |
2206 | while (*new) { | |
21fbd591 | 2207 | struct ksm_rmap_item *tree_rmap_item; |
8dd3557a | 2208 | struct page *tree_page; |
31dbd01f IE |
2209 | int ret; |
2210 | ||
d178f27f | 2211 | cond_resched(); |
21fbd591 | 2212 | tree_rmap_item = rb_entry(*new, struct ksm_rmap_item, node); |
8dd3557a | 2213 | tree_page = get_mergeable_page(tree_rmap_item); |
c8f95ed1 | 2214 | if (!tree_page) |
31dbd01f IE |
2215 | return NULL; |
2216 | ||
2217 | /* | |
8dd3557a | 2218 | * Don't substitute a ksm page for a forked page. |
31dbd01f | 2219 | */ |
8dd3557a HD |
2220 | if (page == tree_page) { |
2221 | put_page(tree_page); | |
31dbd01f IE |
2222 | return NULL; |
2223 | } | |
2224 | ||
8dd3557a | 2225 | ret = memcmp_pages(page, tree_page); |
31dbd01f IE |
2226 | |
2227 | parent = *new; | |
2228 | if (ret < 0) { | |
8dd3557a | 2229 | put_page(tree_page); |
31dbd01f IE |
2230 | new = &parent->rb_left; |
2231 | } else if (ret > 0) { | |
8dd3557a | 2232 | put_page(tree_page); |
31dbd01f | 2233 | new = &parent->rb_right; |
b599cbdf HD |
2234 | } else if (!ksm_merge_across_nodes && |
2235 | page_to_nid(tree_page) != nid) { | |
2236 | /* | |
2237 | * If tree_page has been migrated to another NUMA node, | |
2238 | * it will be flushed out and put in the right unstable | |
2239 | * tree next time: only merge with it when across_nodes. | |
2240 | */ | |
2241 | put_page(tree_page); | |
2242 | return NULL; | |
31dbd01f | 2243 | } else { |
8dd3557a | 2244 | *tree_pagep = tree_page; |
31dbd01f IE |
2245 | return tree_rmap_item; |
2246 | } | |
2247 | } | |
2248 | ||
7b6ba2c7 | 2249 | rmap_item->address |= UNSTABLE_FLAG; |
31dbd01f | 2250 | rmap_item->address |= (ksm_scan.seqnr & SEQNR_MASK); |
e850dcf5 | 2251 | DO_NUMA(rmap_item->nid = nid); |
31dbd01f | 2252 | rb_link_node(&rmap_item->node, parent, new); |
90bd6fd3 | 2253 | rb_insert_color(&rmap_item->node, root); |
31dbd01f | 2254 | |
473b0ce4 | 2255 | ksm_pages_unshared++; |
31dbd01f IE |
2256 | return NULL; |
2257 | } | |
2258 | ||
2259 | /* | |
2260 | * stable_tree_append - add another rmap_item to the linked list of | |
2261 | * rmap_items hanging off a given node of the stable tree, all sharing | |
2262 | * the same ksm page. | |
2263 | */ | |
21fbd591 QZ |
2264 | static void stable_tree_append(struct ksm_rmap_item *rmap_item, |
2265 | struct ksm_stable_node *stable_node, | |
2c653d0e | 2266 | bool max_page_sharing_bypass) |
31dbd01f | 2267 | { |
2c653d0e AA |
2268 | /* |
2269 | * rmap won't find this mapping if we don't insert the | |
2270 | * rmap_item in the right stable_node | |
2271 | * duplicate. page_migration could break later if rmap breaks, | |
2272 | * so we can as well crash here. We really need to check for | |
2273 | * rmap_hlist_len == STABLE_NODE_CHAIN, but we can as well check | |
457aef94 | 2274 | * for other negative values as an underflow if detected here |
2c653d0e AA |
2275 | * for the first time (and not when decreasing rmap_hlist_len) |
2276 | * would be sign of memory corruption in the stable_node. | |
2277 | */ | |
2278 | BUG_ON(stable_node->rmap_hlist_len < 0); | |
2279 | ||
2280 | stable_node->rmap_hlist_len++; | |
2281 | if (!max_page_sharing_bypass) | |
2282 | /* possibly non fatal but unexpected overflow, only warn */ | |
2283 | WARN_ON_ONCE(stable_node->rmap_hlist_len > | |
2284 | ksm_max_page_sharing); | |
2285 | ||
7b6ba2c7 | 2286 | rmap_item->head = stable_node; |
31dbd01f | 2287 | rmap_item->address |= STABLE_FLAG; |
7b6ba2c7 | 2288 | hlist_add_head(&rmap_item->hlist, &stable_node->hlist); |
e178dfde | 2289 | |
7b6ba2c7 HD |
2290 | if (rmap_item->hlist.next) |
2291 | ksm_pages_sharing++; | |
2292 | else | |
2293 | ksm_pages_shared++; | |
76093853 | 2294 | |
2295 | rmap_item->mm->ksm_merging_pages++; | |
31dbd01f IE |
2296 | } |
2297 | ||
2298 | /* | |
81464e30 HD |
2299 | * cmp_and_merge_page - first see if page can be merged into the stable tree; |
2300 | * if not, compare checksum to previous and if it's the same, see if page can | |
2301 | * be inserted into the unstable tree, or merged with a page already there and | |
2302 | * both transferred to the stable tree. | |
31dbd01f IE |
2303 | * |
2304 | * @page: the page that we are searching identical page to. | |
2305 | * @rmap_item: the reverse mapping into the virtual address of this page | |
2306 | */ | |
21fbd591 | 2307 | static void cmp_and_merge_page(struct page *page, struct ksm_rmap_item *rmap_item) |
31dbd01f | 2308 | { |
4b22927f | 2309 | struct mm_struct *mm = rmap_item->mm; |
21fbd591 | 2310 | struct ksm_rmap_item *tree_rmap_item; |
8dd3557a | 2311 | struct page *tree_page = NULL; |
21fbd591 | 2312 | struct ksm_stable_node *stable_node; |
8dd3557a | 2313 | struct page *kpage; |
31dbd01f IE |
2314 | unsigned int checksum; |
2315 | int err; | |
2c653d0e | 2316 | bool max_page_sharing_bypass = false; |
31dbd01f | 2317 | |
4146d2d6 HD |
2318 | stable_node = page_stable_node(page); |
2319 | if (stable_node) { | |
2320 | if (stable_node->head != &migrate_nodes && | |
2c653d0e AA |
2321 | get_kpfn_nid(READ_ONCE(stable_node->kpfn)) != |
2322 | NUMA(stable_node->nid)) { | |
2323 | stable_node_dup_del(stable_node); | |
4146d2d6 HD |
2324 | stable_node->head = &migrate_nodes; |
2325 | list_add(&stable_node->list, stable_node->head); | |
2326 | } | |
2327 | if (stable_node->head != &migrate_nodes && | |
2328 | rmap_item->head == stable_node) | |
2329 | return; | |
2c653d0e AA |
2330 | /* |
2331 | * If it's a KSM fork, allow it to go over the sharing limit | |
2332 | * without warnings. | |
2333 | */ | |
2334 | if (!is_page_sharing_candidate(stable_node)) | |
2335 | max_page_sharing_bypass = true; | |
4146d2d6 | 2336 | } |
31dbd01f IE |
2337 | |
2338 | /* We first start with searching the page inside the stable tree */ | |
62b61f61 | 2339 | kpage = stable_tree_search(page); |
4146d2d6 HD |
2340 | if (kpage == page && rmap_item->head == stable_node) { |
2341 | put_page(kpage); | |
2342 | return; | |
2343 | } | |
2344 | ||
2345 | remove_rmap_item_from_tree(rmap_item); | |
2346 | ||
62b61f61 | 2347 | if (kpage) { |
2cee57d1 YS |
2348 | if (PTR_ERR(kpage) == -EBUSY) |
2349 | return; | |
2350 | ||
08beca44 | 2351 | err = try_to_merge_with_ksm_page(rmap_item, page, kpage); |
31dbd01f IE |
2352 | if (!err) { |
2353 | /* | |
2354 | * The page was successfully merged: | |
2355 | * add its rmap_item to the stable tree. | |
2356 | */ | |
5ad64688 | 2357 | lock_page(kpage); |
2c653d0e AA |
2358 | stable_tree_append(rmap_item, page_stable_node(kpage), |
2359 | max_page_sharing_bypass); | |
5ad64688 | 2360 | unlock_page(kpage); |
31dbd01f | 2361 | } |
8dd3557a | 2362 | put_page(kpage); |
31dbd01f IE |
2363 | return; |
2364 | } | |
2365 | ||
2366 | /* | |
4035c07a HD |
2367 | * If the hash value of the page has changed from the last time |
2368 | * we calculated it, this page is changing frequently: therefore we | |
2369 | * don't want to insert it in the unstable tree, and we don't want | |
2370 | * to waste our time searching for something identical to it there. | |
31dbd01f IE |
2371 | */ |
2372 | checksum = calc_checksum(page); | |
2373 | if (rmap_item->oldchecksum != checksum) { | |
2374 | rmap_item->oldchecksum = checksum; | |
2375 | return; | |
2376 | } | |
2377 | ||
e86c59b1 CI |
2378 | /* |
2379 | * Same checksum as an empty page. We attempt to merge it with the | |
2380 | * appropriate zero page if the user enabled this via sysfs. | |
2381 | */ | |
2382 | if (ksm_use_zero_pages && (checksum == zero_checksum)) { | |
2383 | struct vm_area_struct *vma; | |
2384 | ||
d8ed45c5 | 2385 | mmap_read_lock(mm); |
4b22927f | 2386 | vma = find_mergeable_vma(mm, rmap_item->address); |
56df70a6 MS |
2387 | if (vma) { |
2388 | err = try_to_merge_one_page(vma, page, | |
2389 | ZERO_PAGE(rmap_item->address)); | |
739100c8 SR |
2390 | trace_ksm_merge_one_page( |
2391 | page_to_pfn(ZERO_PAGE(rmap_item->address)), | |
2392 | rmap_item, mm, err); | |
56df70a6 MS |
2393 | } else { |
2394 | /* | |
2395 | * If the vma is out of date, we do not need to | |
2396 | * continue. | |
2397 | */ | |
2398 | err = 0; | |
2399 | } | |
d8ed45c5 | 2400 | mmap_read_unlock(mm); |
e86c59b1 CI |
2401 | /* |
2402 | * In case of failure, the page was not really empty, so we | |
2403 | * need to continue. Otherwise we're done. | |
2404 | */ | |
2405 | if (!err) | |
2406 | return; | |
2407 | } | |
8dd3557a HD |
2408 | tree_rmap_item = |
2409 | unstable_tree_search_insert(rmap_item, page, &tree_page); | |
31dbd01f | 2410 | if (tree_rmap_item) { |
77da2ba0 CI |
2411 | bool split; |
2412 | ||
8dd3557a HD |
2413 | kpage = try_to_merge_two_pages(rmap_item, page, |
2414 | tree_rmap_item, tree_page); | |
77da2ba0 CI |
2415 | /* |
2416 | * If both pages we tried to merge belong to the same compound | |
2417 | * page, then we actually ended up increasing the reference | |
2418 | * count of the same compound page twice, and split_huge_page | |
2419 | * failed. | |
2420 | * Here we set a flag if that happened, and we use it later to | |
2421 | * try split_huge_page again. Since we call put_page right | |
2422 | * afterwards, the reference count will be correct and | |
2423 | * split_huge_page should succeed. | |
2424 | */ | |
2425 | split = PageTransCompound(page) | |
2426 | && compound_head(page) == compound_head(tree_page); | |
8dd3557a | 2427 | put_page(tree_page); |
8dd3557a | 2428 | if (kpage) { |
bc56620b HD |
2429 | /* |
2430 | * The pages were successfully merged: insert new | |
2431 | * node in the stable tree and add both rmap_items. | |
2432 | */ | |
5ad64688 | 2433 | lock_page(kpage); |
79899cce | 2434 | stable_node = stable_tree_insert(page_folio(kpage)); |
7b6ba2c7 | 2435 | if (stable_node) { |
2c653d0e AA |
2436 | stable_tree_append(tree_rmap_item, stable_node, |
2437 | false); | |
2438 | stable_tree_append(rmap_item, stable_node, | |
2439 | false); | |
7b6ba2c7 | 2440 | } |
5ad64688 | 2441 | unlock_page(kpage); |
7b6ba2c7 | 2442 | |
31dbd01f IE |
2443 | /* |
2444 | * If we fail to insert the page into the stable tree, | |
2445 | * we will have 2 virtual addresses that are pointing | |
2446 | * to a ksm page left outside the stable tree, | |
2447 | * in which case we need to break_cow on both. | |
2448 | */ | |
7b6ba2c7 | 2449 | if (!stable_node) { |
8dd3557a HD |
2450 | break_cow(tree_rmap_item); |
2451 | break_cow(rmap_item); | |
31dbd01f | 2452 | } |
77da2ba0 CI |
2453 | } else if (split) { |
2454 | /* | |
2455 | * We are here if we tried to merge two pages and | |
2456 | * failed because they both belonged to the same | |
2457 | * compound page. We will split the page now, but no | |
2458 | * merging will take place. | |
2459 | * We do not want to add the cost of a full lock; if | |
2460 | * the page is locked, it is better to skip it and | |
2461 | * perhaps try again later. | |
2462 | */ | |
2463 | if (!trylock_page(page)) | |
2464 | return; | |
2465 | split_huge_page(page); | |
2466 | unlock_page(page); | |
31dbd01f | 2467 | } |
31dbd01f IE |
2468 | } |
2469 | } | |
2470 | ||
21fbd591 QZ |
2471 | static struct ksm_rmap_item *get_next_rmap_item(struct ksm_mm_slot *mm_slot, |
2472 | struct ksm_rmap_item **rmap_list, | |
31dbd01f IE |
2473 | unsigned long addr) |
2474 | { | |
21fbd591 | 2475 | struct ksm_rmap_item *rmap_item; |
31dbd01f | 2476 | |
6514d511 HD |
2477 | while (*rmap_list) { |
2478 | rmap_item = *rmap_list; | |
93d17715 | 2479 | if ((rmap_item->address & PAGE_MASK) == addr) |
31dbd01f | 2480 | return rmap_item; |
31dbd01f IE |
2481 | if (rmap_item->address > addr) |
2482 | break; | |
6514d511 | 2483 | *rmap_list = rmap_item->rmap_list; |
31dbd01f | 2484 | remove_rmap_item_from_tree(rmap_item); |
31dbd01f IE |
2485 | free_rmap_item(rmap_item); |
2486 | } | |
2487 | ||
2488 | rmap_item = alloc_rmap_item(); | |
2489 | if (rmap_item) { | |
2490 | /* It has already been zeroed */ | |
58730ab6 | 2491 | rmap_item->mm = mm_slot->slot.mm; |
cb4df4ca | 2492 | rmap_item->mm->ksm_rmap_items++; |
31dbd01f | 2493 | rmap_item->address = addr; |
6514d511 HD |
2494 | rmap_item->rmap_list = *rmap_list; |
2495 | *rmap_list = rmap_item; | |
31dbd01f IE |
2496 | } |
2497 | return rmap_item; | |
2498 | } | |
2499 | ||
5e924ff5 SR |
2500 | /* |
2501 | * Calculate skip age for the ksm page age. The age determines how often | |
2502 | * de-duplicating has already been tried unsuccessfully. If the age is | |
2503 | * smaller, the scanning of this page is skipped for less scans. | |
2504 | * | |
2505 | * @age: rmap_item age of page | |
2506 | */ | |
2507 | static unsigned int skip_age(rmap_age_t age) | |
2508 | { | |
2509 | if (age <= 3) | |
2510 | return 1; | |
2511 | if (age <= 5) | |
2512 | return 2; | |
2513 | if (age <= 8) | |
2514 | return 4; | |
2515 | ||
2516 | return 8; | |
2517 | } | |
2518 | ||
2519 | /* | |
2520 | * Determines if a page should be skipped for the current scan. | |
2521 | * | |
2522 | * @page: page to check | |
2523 | * @rmap_item: associated rmap_item of page | |
2524 | */ | |
2525 | static bool should_skip_rmap_item(struct page *page, | |
2526 | struct ksm_rmap_item *rmap_item) | |
2527 | { | |
2528 | rmap_age_t age; | |
2529 | ||
2530 | if (!ksm_smart_scan) | |
2531 | return false; | |
2532 | ||
2533 | /* | |
2534 | * Never skip pages that are already KSM; pages cmp_and_merge_page() | |
2535 | * will essentially ignore them, but we still have to process them | |
2536 | * properly. | |
2537 | */ | |
2538 | if (PageKsm(page)) | |
2539 | return false; | |
2540 | ||
2541 | age = rmap_item->age; | |
2542 | if (age != U8_MAX) | |
2543 | rmap_item->age++; | |
2544 | ||
2545 | /* | |
2546 | * Smaller ages are not skipped, they need to get a chance to go | |
2547 | * through the different phases of the KSM merging. | |
2548 | */ | |
2549 | if (age < 3) | |
2550 | return false; | |
2551 | ||
2552 | /* | |
2553 | * Are we still allowed to skip? If not, then don't skip it | |
2554 | * and determine how much more often we are allowed to skip next. | |
2555 | */ | |
2556 | if (!rmap_item->remaining_skips) { | |
2557 | rmap_item->remaining_skips = skip_age(age); | |
2558 | return false; | |
2559 | } | |
2560 | ||
2561 | /* Skip this page */ | |
e5a68991 | 2562 | ksm_pages_skipped++; |
5e924ff5 SR |
2563 | rmap_item->remaining_skips--; |
2564 | remove_rmap_item_from_tree(rmap_item); | |
2565 | return true; | |
2566 | } | |
2567 | ||
21fbd591 | 2568 | static struct ksm_rmap_item *scan_get_next_rmap_item(struct page **page) |
31dbd01f IE |
2569 | { |
2570 | struct mm_struct *mm; | |
58730ab6 QZ |
2571 | struct ksm_mm_slot *mm_slot; |
2572 | struct mm_slot *slot; | |
31dbd01f | 2573 | struct vm_area_struct *vma; |
21fbd591 | 2574 | struct ksm_rmap_item *rmap_item; |
a5f18ba0 | 2575 | struct vma_iterator vmi; |
90bd6fd3 | 2576 | int nid; |
31dbd01f | 2577 | |
58730ab6 | 2578 | if (list_empty(&ksm_mm_head.slot.mm_node)) |
31dbd01f IE |
2579 | return NULL; |
2580 | ||
58730ab6 QZ |
2581 | mm_slot = ksm_scan.mm_slot; |
2582 | if (mm_slot == &ksm_mm_head) { | |
4e5fa4f5 | 2583 | advisor_start_scan(); |
739100c8 SR |
2584 | trace_ksm_start_scan(ksm_scan.seqnr, ksm_rmap_items); |
2585 | ||
2919bfd0 | 2586 | /* |
1fec6890 MWO |
2587 | * A number of pages can hang around indefinitely in per-cpu |
2588 | * LRU cache, raised page count preventing write_protect_page | |
2919bfd0 HD |
2589 | * from merging them. Though it doesn't really matter much, |
2590 | * it is puzzling to see some stuck in pages_volatile until | |
2591 | * other activity jostles them out, and they also prevented | |
2592 | * LTP's KSM test from succeeding deterministically; so drain | |
2593 | * them here (here rather than on entry to ksm_do_scan(), | |
2594 | * so we don't IPI too often when pages_to_scan is set low). | |
2595 | */ | |
2596 | lru_add_drain_all(); | |
2597 | ||
4146d2d6 HD |
2598 | /* |
2599 | * Whereas stale stable_nodes on the stable_tree itself | |
2600 | * get pruned in the regular course of stable_tree_search(), | |
2601 | * those moved out to the migrate_nodes list can accumulate: | |
2602 | * so prune them once before each full scan. | |
2603 | */ | |
2604 | if (!ksm_merge_across_nodes) { | |
21fbd591 | 2605 | struct ksm_stable_node *stable_node, *next; |
72556a4c | 2606 | struct folio *folio; |
4146d2d6 | 2607 | |
03640418 GT |
2608 | list_for_each_entry_safe(stable_node, next, |
2609 | &migrate_nodes, list) { | |
72556a4c | 2610 | folio = ksm_get_folio(stable_node, |
85b67b01 | 2611 | KSM_GET_FOLIO_NOLOCK); |
72556a4c AS |
2612 | if (folio) |
2613 | folio_put(folio); | |
4146d2d6 HD |
2614 | cond_resched(); |
2615 | } | |
2616 | } | |
2617 | ||
ef53d16c | 2618 | for (nid = 0; nid < ksm_nr_node_ids; nid++) |
90bd6fd3 | 2619 | root_unstable_tree[nid] = RB_ROOT; |
31dbd01f IE |
2620 | |
2621 | spin_lock(&ksm_mmlist_lock); | |
58730ab6 QZ |
2622 | slot = list_entry(mm_slot->slot.mm_node.next, |
2623 | struct mm_slot, mm_node); | |
2624 | mm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot); | |
2625 | ksm_scan.mm_slot = mm_slot; | |
31dbd01f | 2626 | spin_unlock(&ksm_mmlist_lock); |
2b472611 HD |
2627 | /* |
2628 | * Although we tested list_empty() above, a racing __ksm_exit | |
2629 | * of the last mm on the list may have removed it since then. | |
2630 | */ | |
58730ab6 | 2631 | if (mm_slot == &ksm_mm_head) |
2b472611 | 2632 | return NULL; |
31dbd01f IE |
2633 | next_mm: |
2634 | ksm_scan.address = 0; | |
58730ab6 | 2635 | ksm_scan.rmap_list = &mm_slot->rmap_list; |
31dbd01f IE |
2636 | } |
2637 | ||
58730ab6 | 2638 | slot = &mm_slot->slot; |
31dbd01f | 2639 | mm = slot->mm; |
a5f18ba0 MWO |
2640 | vma_iter_init(&vmi, mm, ksm_scan.address); |
2641 | ||
d8ed45c5 | 2642 | mmap_read_lock(mm); |
9ba69294 | 2643 | if (ksm_test_exit(mm)) |
a5f18ba0 | 2644 | goto no_vmas; |
9ba69294 | 2645 | |
a5f18ba0 | 2646 | for_each_vma(vmi, vma) { |
31dbd01f IE |
2647 | if (!(vma->vm_flags & VM_MERGEABLE)) |
2648 | continue; | |
2649 | if (ksm_scan.address < vma->vm_start) | |
2650 | ksm_scan.address = vma->vm_start; | |
2651 | if (!vma->anon_vma) | |
2652 | ksm_scan.address = vma->vm_end; | |
2653 | ||
2654 | while (ksm_scan.address < vma->vm_end) { | |
9ba69294 HD |
2655 | if (ksm_test_exit(mm)) |
2656 | break; | |
31dbd01f | 2657 | *page = follow_page(vma, ksm_scan.address, FOLL_GET); |
f7091ed6 | 2658 | if (IS_ERR_OR_NULL(*page)) { |
21ae5b01 AA |
2659 | ksm_scan.address += PAGE_SIZE; |
2660 | cond_resched(); | |
2661 | continue; | |
2662 | } | |
f7091ed6 HW |
2663 | if (is_zone_device_page(*page)) |
2664 | goto next_page; | |
f765f540 | 2665 | if (PageAnon(*page)) { |
31dbd01f IE |
2666 | flush_anon_page(vma, *page, ksm_scan.address); |
2667 | flush_dcache_page(*page); | |
58730ab6 | 2668 | rmap_item = get_next_rmap_item(mm_slot, |
6514d511 | 2669 | ksm_scan.rmap_list, ksm_scan.address); |
31dbd01f | 2670 | if (rmap_item) { |
6514d511 HD |
2671 | ksm_scan.rmap_list = |
2672 | &rmap_item->rmap_list; | |
5e924ff5 SR |
2673 | |
2674 | if (should_skip_rmap_item(*page, rmap_item)) | |
2675 | goto next_page; | |
2676 | ||
31dbd01f IE |
2677 | ksm_scan.address += PAGE_SIZE; |
2678 | } else | |
2679 | put_page(*page); | |
d8ed45c5 | 2680 | mmap_read_unlock(mm); |
31dbd01f IE |
2681 | return rmap_item; |
2682 | } | |
f7091ed6 | 2683 | next_page: |
21ae5b01 | 2684 | put_page(*page); |
31dbd01f IE |
2685 | ksm_scan.address += PAGE_SIZE; |
2686 | cond_resched(); | |
2687 | } | |
2688 | } | |
2689 | ||
9ba69294 | 2690 | if (ksm_test_exit(mm)) { |
a5f18ba0 | 2691 | no_vmas: |
9ba69294 | 2692 | ksm_scan.address = 0; |
58730ab6 | 2693 | ksm_scan.rmap_list = &mm_slot->rmap_list; |
9ba69294 | 2694 | } |
31dbd01f IE |
2695 | /* |
2696 | * Nuke all the rmap_items that are above this current rmap: | |
2697 | * because there were no VM_MERGEABLE vmas with such addresses. | |
2698 | */ | |
420be4ed | 2699 | remove_trailing_rmap_items(ksm_scan.rmap_list); |
31dbd01f IE |
2700 | |
2701 | spin_lock(&ksm_mmlist_lock); | |
58730ab6 QZ |
2702 | slot = list_entry(mm_slot->slot.mm_node.next, |
2703 | struct mm_slot, mm_node); | |
2704 | ksm_scan.mm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot); | |
cd551f97 HD |
2705 | if (ksm_scan.address == 0) { |
2706 | /* | |
c1e8d7c6 | 2707 | * We've completed a full scan of all vmas, holding mmap_lock |
cd551f97 HD |
2708 | * throughout, and found no VM_MERGEABLE: so do the same as |
2709 | * __ksm_exit does to remove this mm from all our lists now. | |
9ba69294 HD |
2710 | * This applies either when cleaning up after __ksm_exit |
2711 | * (but beware: we can reach here even before __ksm_exit), | |
2712 | * or when all VM_MERGEABLE areas have been unmapped (and | |
c1e8d7c6 | 2713 | * mmap_lock then protects against race with MADV_MERGEABLE). |
cd551f97 | 2714 | */ |
58730ab6 QZ |
2715 | hash_del(&mm_slot->slot.hash); |
2716 | list_del(&mm_slot->slot.mm_node); | |
9ba69294 HD |
2717 | spin_unlock(&ksm_mmlist_lock); |
2718 | ||
58730ab6 | 2719 | mm_slot_free(mm_slot_cache, mm_slot); |
cd551f97 | 2720 | clear_bit(MMF_VM_MERGEABLE, &mm->flags); |
d7597f59 | 2721 | clear_bit(MMF_VM_MERGE_ANY, &mm->flags); |
d8ed45c5 | 2722 | mmap_read_unlock(mm); |
9ba69294 HD |
2723 | mmdrop(mm); |
2724 | } else { | |
d8ed45c5 | 2725 | mmap_read_unlock(mm); |
7496fea9 | 2726 | /* |
3e4e28c5 | 2727 | * mmap_read_unlock(mm) first because after |
7496fea9 ZC |
2728 | * spin_unlock(&ksm_mmlist_lock) run, the "mm" may |
2729 | * already have been freed under us by __ksm_exit() | |
2730 | * because the "mm_slot" is still hashed and | |
2731 | * ksm_scan.mm_slot doesn't point to it anymore. | |
2732 | */ | |
2733 | spin_unlock(&ksm_mmlist_lock); | |
cd551f97 | 2734 | } |
31dbd01f IE |
2735 | |
2736 | /* Repeat until we've completed scanning the whole list */ | |
58730ab6 QZ |
2737 | mm_slot = ksm_scan.mm_slot; |
2738 | if (mm_slot != &ksm_mm_head) | |
31dbd01f IE |
2739 | goto next_mm; |
2740 | ||
4e5fa4f5 SR |
2741 | advisor_stop_scan(); |
2742 | ||
739100c8 | 2743 | trace_ksm_stop_scan(ksm_scan.seqnr, ksm_rmap_items); |
31dbd01f IE |
2744 | ksm_scan.seqnr++; |
2745 | return NULL; | |
2746 | } | |
2747 | ||
2748 | /** | |
2749 | * ksm_do_scan - the ksm scanner main worker function. | |
b7701a5f | 2750 | * @scan_npages: number of pages we want to scan before we return. |
31dbd01f IE |
2751 | */ |
2752 | static void ksm_do_scan(unsigned int scan_npages) | |
2753 | { | |
21fbd591 | 2754 | struct ksm_rmap_item *rmap_item; |
3f649ab7 | 2755 | struct page *page; |
31dbd01f | 2756 | |
730cdc2c | 2757 | while (scan_npages-- && likely(!freezing(current))) { |
31dbd01f IE |
2758 | cond_resched(); |
2759 | rmap_item = scan_get_next_rmap_item(&page); | |
2760 | if (!rmap_item) | |
2761 | return; | |
4146d2d6 | 2762 | cmp_and_merge_page(page, rmap_item); |
31dbd01f | 2763 | put_page(page); |
730cdc2c | 2764 | ksm_pages_scanned++; |
31dbd01f IE |
2765 | } |
2766 | } | |
2767 | ||
6e158384 HD |
2768 | static int ksmd_should_run(void) |
2769 | { | |
58730ab6 | 2770 | return (ksm_run & KSM_RUN_MERGE) && !list_empty(&ksm_mm_head.slot.mm_node); |
6e158384 HD |
2771 | } |
2772 | ||
31dbd01f IE |
2773 | static int ksm_scan_thread(void *nothing) |
2774 | { | |
fcf9a0ef KT |
2775 | unsigned int sleep_ms; |
2776 | ||
878aee7d | 2777 | set_freezable(); |
339aa624 | 2778 | set_user_nice(current, 5); |
31dbd01f IE |
2779 | |
2780 | while (!kthread_should_stop()) { | |
6e158384 | 2781 | mutex_lock(&ksm_thread_mutex); |
ef4d43a8 | 2782 | wait_while_offlining(); |
6e158384 | 2783 | if (ksmd_should_run()) |
31dbd01f | 2784 | ksm_do_scan(ksm_thread_pages_to_scan); |
6e158384 HD |
2785 | mutex_unlock(&ksm_thread_mutex); |
2786 | ||
2787 | if (ksmd_should_run()) { | |
fcf9a0ef | 2788 | sleep_ms = READ_ONCE(ksm_thread_sleep_millisecs); |
f55afd95 | 2789 | wait_event_freezable_timeout(ksm_iter_wait, |
fcf9a0ef KT |
2790 | sleep_ms != READ_ONCE(ksm_thread_sleep_millisecs), |
2791 | msecs_to_jiffies(sleep_ms)); | |
31dbd01f | 2792 | } else { |
878aee7d | 2793 | wait_event_freezable(ksm_thread_wait, |
6e158384 | 2794 | ksmd_should_run() || kthread_should_stop()); |
31dbd01f IE |
2795 | } |
2796 | } | |
2797 | return 0; | |
2798 | } | |
2799 | ||
d7597f59 SR |
2800 | static void __ksm_add_vma(struct vm_area_struct *vma) |
2801 | { | |
2802 | unsigned long vm_flags = vma->vm_flags; | |
2803 | ||
2804 | if (vm_flags & VM_MERGEABLE) | |
2805 | return; | |
2806 | ||
2807 | if (vma_ksm_compatible(vma)) | |
2808 | vm_flags_set(vma, VM_MERGEABLE); | |
2809 | } | |
2810 | ||
24139c07 DH |
2811 | static int __ksm_del_vma(struct vm_area_struct *vma) |
2812 | { | |
2813 | int err; | |
2814 | ||
2815 | if (!(vma->vm_flags & VM_MERGEABLE)) | |
2816 | return 0; | |
2817 | ||
2818 | if (vma->anon_vma) { | |
49b06385 | 2819 | err = unmerge_ksm_pages(vma, vma->vm_start, vma->vm_end, true); |
24139c07 DH |
2820 | if (err) |
2821 | return err; | |
2822 | } | |
2823 | ||
2824 | vm_flags_clear(vma, VM_MERGEABLE); | |
2825 | return 0; | |
2826 | } | |
d7597f59 SR |
2827 | /** |
2828 | * ksm_add_vma - Mark vma as mergeable if compatible | |
2829 | * | |
2830 | * @vma: Pointer to vma | |
2831 | */ | |
2832 | void ksm_add_vma(struct vm_area_struct *vma) | |
2833 | { | |
2834 | struct mm_struct *mm = vma->vm_mm; | |
2835 | ||
2836 | if (test_bit(MMF_VM_MERGE_ANY, &mm->flags)) | |
2837 | __ksm_add_vma(vma); | |
2838 | } | |
2839 | ||
2840 | static void ksm_add_vmas(struct mm_struct *mm) | |
2841 | { | |
2842 | struct vm_area_struct *vma; | |
2843 | ||
2844 | VMA_ITERATOR(vmi, mm, 0); | |
2845 | for_each_vma(vmi, vma) | |
2846 | __ksm_add_vma(vma); | |
2847 | } | |
2848 | ||
24139c07 DH |
2849 | static int ksm_del_vmas(struct mm_struct *mm) |
2850 | { | |
2851 | struct vm_area_struct *vma; | |
2852 | int err; | |
2853 | ||
2854 | VMA_ITERATOR(vmi, mm, 0); | |
2855 | for_each_vma(vmi, vma) { | |
2856 | err = __ksm_del_vma(vma); | |
2857 | if (err) | |
2858 | return err; | |
2859 | } | |
2860 | return 0; | |
2861 | } | |
2862 | ||
d7597f59 SR |
2863 | /** |
2864 | * ksm_enable_merge_any - Add mm to mm ksm list and enable merging on all | |
2865 | * compatible VMA's | |
2866 | * | |
2867 | * @mm: Pointer to mm | |
2868 | * | |
2869 | * Returns 0 on success, otherwise error code | |
2870 | */ | |
2871 | int ksm_enable_merge_any(struct mm_struct *mm) | |
2872 | { | |
2873 | int err; | |
2874 | ||
2875 | if (test_bit(MMF_VM_MERGE_ANY, &mm->flags)) | |
2876 | return 0; | |
2877 | ||
2878 | if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) { | |
2879 | err = __ksm_enter(mm); | |
2880 | if (err) | |
2881 | return err; | |
2882 | } | |
2883 | ||
2884 | set_bit(MMF_VM_MERGE_ANY, &mm->flags); | |
2885 | ksm_add_vmas(mm); | |
2886 | ||
2887 | return 0; | |
2888 | } | |
2889 | ||
24139c07 DH |
2890 | /** |
2891 | * ksm_disable_merge_any - Disable merging on all compatible VMA's of the mm, | |
2892 | * previously enabled via ksm_enable_merge_any(). | |
2893 | * | |
2894 | * Disabling merging implies unmerging any merged pages, like setting | |
2895 | * MADV_UNMERGEABLE would. If unmerging fails, the whole operation fails and | |
2896 | * merging on all compatible VMA's remains enabled. | |
2897 | * | |
2898 | * @mm: Pointer to mm | |
2899 | * | |
2900 | * Returns 0 on success, otherwise error code | |
2901 | */ | |
2902 | int ksm_disable_merge_any(struct mm_struct *mm) | |
2903 | { | |
2904 | int err; | |
2905 | ||
2906 | if (!test_bit(MMF_VM_MERGE_ANY, &mm->flags)) | |
2907 | return 0; | |
2908 | ||
2909 | err = ksm_del_vmas(mm); | |
2910 | if (err) { | |
2911 | ksm_add_vmas(mm); | |
2912 | return err; | |
2913 | } | |
2914 | ||
2915 | clear_bit(MMF_VM_MERGE_ANY, &mm->flags); | |
2916 | return 0; | |
2917 | } | |
2918 | ||
2c281f54 DH |
2919 | int ksm_disable(struct mm_struct *mm) |
2920 | { | |
2921 | mmap_assert_write_locked(mm); | |
2922 | ||
2923 | if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) | |
2924 | return 0; | |
2925 | if (test_bit(MMF_VM_MERGE_ANY, &mm->flags)) | |
2926 | return ksm_disable_merge_any(mm); | |
2927 | return ksm_del_vmas(mm); | |
2928 | } | |
2929 | ||
f8af4da3 HD |
2930 | int ksm_madvise(struct vm_area_struct *vma, unsigned long start, |
2931 | unsigned long end, int advice, unsigned long *vm_flags) | |
2932 | { | |
2933 | struct mm_struct *mm = vma->vm_mm; | |
d952b791 | 2934 | int err; |
f8af4da3 HD |
2935 | |
2936 | switch (advice) { | |
2937 | case MADV_MERGEABLE: | |
d7597f59 | 2938 | if (vma->vm_flags & VM_MERGEABLE) |
e1fb4a08 | 2939 | return 0; |
d7597f59 | 2940 | if (!vma_ksm_compatible(vma)) |
74a04967 | 2941 | return 0; |
cc2383ec | 2942 | |
d952b791 HD |
2943 | if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) { |
2944 | err = __ksm_enter(mm); | |
2945 | if (err) | |
2946 | return err; | |
2947 | } | |
f8af4da3 HD |
2948 | |
2949 | *vm_flags |= VM_MERGEABLE; | |
2950 | break; | |
2951 | ||
2952 | case MADV_UNMERGEABLE: | |
2953 | if (!(*vm_flags & VM_MERGEABLE)) | |
2954 | return 0; /* just ignore the advice */ | |
2955 | ||
d952b791 | 2956 | if (vma->anon_vma) { |
49b06385 | 2957 | err = unmerge_ksm_pages(vma, start, end, true); |
d952b791 HD |
2958 | if (err) |
2959 | return err; | |
2960 | } | |
f8af4da3 HD |
2961 | |
2962 | *vm_flags &= ~VM_MERGEABLE; | |
2963 | break; | |
2964 | } | |
2965 | ||
2966 | return 0; | |
2967 | } | |
33cf1707 | 2968 | EXPORT_SYMBOL_GPL(ksm_madvise); |
f8af4da3 HD |
2969 | |
2970 | int __ksm_enter(struct mm_struct *mm) | |
2971 | { | |
21fbd591 | 2972 | struct ksm_mm_slot *mm_slot; |
58730ab6 | 2973 | struct mm_slot *slot; |
6e158384 HD |
2974 | int needs_wakeup; |
2975 | ||
58730ab6 | 2976 | mm_slot = mm_slot_alloc(mm_slot_cache); |
31dbd01f IE |
2977 | if (!mm_slot) |
2978 | return -ENOMEM; | |
2979 | ||
58730ab6 QZ |
2980 | slot = &mm_slot->slot; |
2981 | ||
6e158384 | 2982 | /* Check ksm_run too? Would need tighter locking */ |
58730ab6 | 2983 | needs_wakeup = list_empty(&ksm_mm_head.slot.mm_node); |
6e158384 | 2984 | |
31dbd01f | 2985 | spin_lock(&ksm_mmlist_lock); |
58730ab6 | 2986 | mm_slot_insert(mm_slots_hash, mm, slot); |
31dbd01f | 2987 | /* |
cbf86cfe HD |
2988 | * When KSM_RUN_MERGE (or KSM_RUN_STOP), |
2989 | * insert just behind the scanning cursor, to let the area settle | |
31dbd01f IE |
2990 | * down a little; when fork is followed by immediate exec, we don't |
2991 | * want ksmd to waste time setting up and tearing down an rmap_list. | |
cbf86cfe HD |
2992 | * |
2993 | * But when KSM_RUN_UNMERGE, it's important to insert ahead of its | |
2994 | * scanning cursor, otherwise KSM pages in newly forked mms will be | |
2995 | * missed: then we might as well insert at the end of the list. | |
31dbd01f | 2996 | */ |
cbf86cfe | 2997 | if (ksm_run & KSM_RUN_UNMERGE) |
58730ab6 | 2998 | list_add_tail(&slot->mm_node, &ksm_mm_head.slot.mm_node); |
cbf86cfe | 2999 | else |
58730ab6 | 3000 | list_add_tail(&slot->mm_node, &ksm_scan.mm_slot->slot.mm_node); |
31dbd01f IE |
3001 | spin_unlock(&ksm_mmlist_lock); |
3002 | ||
f8af4da3 | 3003 | set_bit(MMF_VM_MERGEABLE, &mm->flags); |
f1f10076 | 3004 | mmgrab(mm); |
6e158384 HD |
3005 | |
3006 | if (needs_wakeup) | |
3007 | wake_up_interruptible(&ksm_thread_wait); | |
3008 | ||
739100c8 | 3009 | trace_ksm_enter(mm); |
f8af4da3 HD |
3010 | return 0; |
3011 | } | |
3012 | ||
1c2fb7a4 | 3013 | void __ksm_exit(struct mm_struct *mm) |
f8af4da3 | 3014 | { |
21fbd591 | 3015 | struct ksm_mm_slot *mm_slot; |
58730ab6 | 3016 | struct mm_slot *slot; |
9ba69294 | 3017 | int easy_to_free = 0; |
cd551f97 | 3018 | |
31dbd01f | 3019 | /* |
9ba69294 HD |
3020 | * This process is exiting: if it's straightforward (as is the |
3021 | * case when ksmd was never running), free mm_slot immediately. | |
3022 | * But if it's at the cursor or has rmap_items linked to it, use | |
c1e8d7c6 | 3023 | * mmap_lock to synchronize with any break_cows before pagetables |
9ba69294 HD |
3024 | * are freed, and leave the mm_slot on the list for ksmd to free. |
3025 | * Beware: ksm may already have noticed it exiting and freed the slot. | |
31dbd01f | 3026 | */ |
9ba69294 | 3027 | |
cd551f97 | 3028 | spin_lock(&ksm_mmlist_lock); |
58730ab6 QZ |
3029 | slot = mm_slot_lookup(mm_slots_hash, mm); |
3030 | mm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot); | |
9ba69294 | 3031 | if (mm_slot && ksm_scan.mm_slot != mm_slot) { |
6514d511 | 3032 | if (!mm_slot->rmap_list) { |
58730ab6 QZ |
3033 | hash_del(&slot->hash); |
3034 | list_del(&slot->mm_node); | |
9ba69294 HD |
3035 | easy_to_free = 1; |
3036 | } else { | |
58730ab6 QZ |
3037 | list_move(&slot->mm_node, |
3038 | &ksm_scan.mm_slot->slot.mm_node); | |
9ba69294 | 3039 | } |
cd551f97 | 3040 | } |
cd551f97 HD |
3041 | spin_unlock(&ksm_mmlist_lock); |
3042 | ||
9ba69294 | 3043 | if (easy_to_free) { |
58730ab6 | 3044 | mm_slot_free(mm_slot_cache, mm_slot); |
d7597f59 | 3045 | clear_bit(MMF_VM_MERGE_ANY, &mm->flags); |
9ba69294 HD |
3046 | clear_bit(MMF_VM_MERGEABLE, &mm->flags); |
3047 | mmdrop(mm); | |
3048 | } else if (mm_slot) { | |
d8ed45c5 ML |
3049 | mmap_write_lock(mm); |
3050 | mmap_write_unlock(mm); | |
9ba69294 | 3051 | } |
739100c8 SR |
3052 | |
3053 | trace_ksm_exit(mm); | |
31dbd01f IE |
3054 | } |
3055 | ||
96db66d9 | 3056 | struct folio *ksm_might_need_to_copy(struct folio *folio, |
1486fb50 | 3057 | struct vm_area_struct *vma, unsigned long addr) |
5ad64688 | 3058 | { |
96db66d9 | 3059 | struct page *page = folio_page(folio, 0); |
e05b3453 | 3060 | struct anon_vma *anon_vma = folio_anon_vma(folio); |
1486fb50 | 3061 | struct folio *new_folio; |
5ad64688 | 3062 | |
1486fb50 | 3063 | if (folio_test_large(folio)) |
96db66d9 | 3064 | return folio; |
1486fb50 KW |
3065 | |
3066 | if (folio_test_ksm(folio)) { | |
3067 | if (folio_stable_node(folio) && | |
cbf86cfe | 3068 | !(ksm_run & KSM_RUN_UNMERGE)) |
96db66d9 | 3069 | return folio; /* no need to copy it */ |
cbf86cfe | 3070 | } else if (!anon_vma) { |
96db66d9 | 3071 | return folio; /* no need to copy it */ |
1486fb50 | 3072 | } else if (folio->index == linear_page_index(vma, addr) && |
e1c63e11 | 3073 | anon_vma->root == vma->anon_vma->root) { |
96db66d9 | 3074 | return folio; /* still no need to copy it */ |
cbf86cfe | 3075 | } |
f985fc32 ML |
3076 | if (PageHWPoison(page)) |
3077 | return ERR_PTR(-EHWPOISON); | |
1486fb50 | 3078 | if (!folio_test_uptodate(folio)) |
96db66d9 | 3079 | return folio; /* let do_swap_page report the error */ |
cbf86cfe | 3080 | |
1486fb50 KW |
3081 | new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, addr, false); |
3082 | if (new_folio && | |
3083 | mem_cgroup_charge(new_folio, vma->vm_mm, GFP_KERNEL)) { | |
3084 | folio_put(new_folio); | |
3085 | new_folio = NULL; | |
62fdb163 | 3086 | } |
1486fb50 | 3087 | if (new_folio) { |
96db66d9 MWO |
3088 | if (copy_mc_user_highpage(folio_page(new_folio, 0), page, |
3089 | addr, vma)) { | |
1486fb50 | 3090 | folio_put(new_folio); |
96db66d9 | 3091 | memory_failure_queue(folio_pfn(folio), 0); |
6b970599 KW |
3092 | return ERR_PTR(-EHWPOISON); |
3093 | } | |
1486fb50 KW |
3094 | folio_set_dirty(new_folio); |
3095 | __folio_mark_uptodate(new_folio); | |
3096 | __folio_set_locked(new_folio); | |
4d45c3af YY |
3097 | #ifdef CONFIG_SWAP |
3098 | count_vm_event(KSM_SWPIN_COPY); | |
3099 | #endif | |
5ad64688 HD |
3100 | } |
3101 | ||
96db66d9 | 3102 | return new_folio; |
5ad64688 HD |
3103 | } |
3104 | ||
6d4675e6 | 3105 | void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc) |
e9995ef9 | 3106 | { |
21fbd591 QZ |
3107 | struct ksm_stable_node *stable_node; |
3108 | struct ksm_rmap_item *rmap_item; | |
e9995ef9 HD |
3109 | int search_new_forks = 0; |
3110 | ||
2f031c6f | 3111 | VM_BUG_ON_FOLIO(!folio_test_ksm(folio), folio); |
9f32624b JK |
3112 | |
3113 | /* | |
3114 | * Rely on the page lock to protect against concurrent modifications | |
3115 | * to that page's node of the stable tree. | |
3116 | */ | |
2f031c6f | 3117 | VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); |
e9995ef9 | 3118 | |
2f031c6f | 3119 | stable_node = folio_stable_node(folio); |
e9995ef9 | 3120 | if (!stable_node) |
1df631ae | 3121 | return; |
e9995ef9 | 3122 | again: |
b67bfe0d | 3123 | hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) { |
e9995ef9 | 3124 | struct anon_vma *anon_vma = rmap_item->anon_vma; |
5beb4930 | 3125 | struct anon_vma_chain *vmac; |
e9995ef9 HD |
3126 | struct vm_area_struct *vma; |
3127 | ||
ad12695f | 3128 | cond_resched(); |
6d4675e6 MK |
3129 | if (!anon_vma_trylock_read(anon_vma)) { |
3130 | if (rwc->try_lock) { | |
3131 | rwc->contended = true; | |
3132 | return; | |
3133 | } | |
3134 | anon_vma_lock_read(anon_vma); | |
3135 | } | |
bf181b9f ML |
3136 | anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root, |
3137 | 0, ULONG_MAX) { | |
1105a2fc JH |
3138 | unsigned long addr; |
3139 | ||
ad12695f | 3140 | cond_resched(); |
5beb4930 | 3141 | vma = vmac->vma; |
1105a2fc JH |
3142 | |
3143 | /* Ignore the stable/unstable/sqnr flags */ | |
cd7fae26 | 3144 | addr = rmap_item->address & PAGE_MASK; |
1105a2fc JH |
3145 | |
3146 | if (addr < vma->vm_start || addr >= vma->vm_end) | |
e9995ef9 HD |
3147 | continue; |
3148 | /* | |
3149 | * Initially we examine only the vma which covers this | |
3150 | * rmap_item; but later, if there is still work to do, | |
3151 | * we examine covering vmas in other mms: in case they | |
3152 | * were forked from the original since ksmd passed. | |
3153 | */ | |
3154 | if ((rmap_item->mm == vma->vm_mm) == search_new_forks) | |
3155 | continue; | |
3156 | ||
0dd1c7bb JK |
3157 | if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) |
3158 | continue; | |
3159 | ||
2f031c6f | 3160 | if (!rwc->rmap_one(folio, vma, addr, rwc->arg)) { |
b6b19f25 | 3161 | anon_vma_unlock_read(anon_vma); |
1df631ae | 3162 | return; |
e9995ef9 | 3163 | } |
2f031c6f | 3164 | if (rwc->done && rwc->done(folio)) { |
0dd1c7bb | 3165 | anon_vma_unlock_read(anon_vma); |
1df631ae | 3166 | return; |
0dd1c7bb | 3167 | } |
e9995ef9 | 3168 | } |
b6b19f25 | 3169 | anon_vma_unlock_read(anon_vma); |
e9995ef9 HD |
3170 | } |
3171 | if (!search_new_forks++) | |
3172 | goto again; | |
e9995ef9 HD |
3173 | } |
3174 | ||
4248d008 LX |
3175 | #ifdef CONFIG_MEMORY_FAILURE |
3176 | /* | |
3177 | * Collect processes when the error hit an ksm page. | |
3178 | */ | |
b650e1d2 MWO |
3179 | void collect_procs_ksm(struct folio *folio, struct page *page, |
3180 | struct list_head *to_kill, int force_early) | |
4248d008 LX |
3181 | { |
3182 | struct ksm_stable_node *stable_node; | |
3183 | struct ksm_rmap_item *rmap_item; | |
4248d008 LX |
3184 | struct vm_area_struct *vma; |
3185 | struct task_struct *tsk; | |
3186 | ||
3187 | stable_node = folio_stable_node(folio); | |
3188 | if (!stable_node) | |
3189 | return; | |
3190 | hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) { | |
3191 | struct anon_vma *av = rmap_item->anon_vma; | |
3192 | ||
3193 | anon_vma_lock_read(av); | |
d256d1cd | 3194 | rcu_read_lock(); |
4248d008 LX |
3195 | for_each_process(tsk) { |
3196 | struct anon_vma_chain *vmac; | |
3197 | unsigned long addr; | |
3198 | struct task_struct *t = | |
3199 | task_early_kill(tsk, force_early); | |
3200 | if (!t) | |
3201 | continue; | |
3202 | anon_vma_interval_tree_foreach(vmac, &av->rb_root, 0, | |
3203 | ULONG_MAX) | |
3204 | { | |
3205 | vma = vmac->vma; | |
3206 | if (vma->vm_mm == t->mm) { | |
3207 | addr = rmap_item->address & PAGE_MASK; | |
3208 | add_to_kill_ksm(t, page, vma, to_kill, | |
3209 | addr); | |
3210 | } | |
3211 | } | |
3212 | } | |
d256d1cd | 3213 | rcu_read_unlock(); |
4248d008 LX |
3214 | anon_vma_unlock_read(av); |
3215 | } | |
3216 | } | |
3217 | #endif | |
3218 | ||
52629506 | 3219 | #ifdef CONFIG_MIGRATION |
19138349 | 3220 | void folio_migrate_ksm(struct folio *newfolio, struct folio *folio) |
e9995ef9 | 3221 | { |
21fbd591 | 3222 | struct ksm_stable_node *stable_node; |
e9995ef9 | 3223 | |
19138349 MWO |
3224 | VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); |
3225 | VM_BUG_ON_FOLIO(!folio_test_locked(newfolio), newfolio); | |
3226 | VM_BUG_ON_FOLIO(newfolio->mapping != folio->mapping, newfolio); | |
e9995ef9 | 3227 | |
19138349 | 3228 | stable_node = folio_stable_node(folio); |
e9995ef9 | 3229 | if (stable_node) { |
19138349 MWO |
3230 | VM_BUG_ON_FOLIO(stable_node->kpfn != folio_pfn(folio), folio); |
3231 | stable_node->kpfn = folio_pfn(newfolio); | |
c8d6553b | 3232 | /* |
19138349 | 3233 | * newfolio->mapping was set in advance; now we need smp_wmb() |
c8d6553b | 3234 | * to make sure that the new stable_node->kpfn is visible |
79899cce | 3235 | * to ksm_get_folio() before it can see that folio->mapping |
19138349 | 3236 | * has gone stale (or that folio_test_swapcache has been cleared). |
c8d6553b HD |
3237 | */ |
3238 | smp_wmb(); | |
b8b0ff24 | 3239 | folio_set_stable_node(folio, NULL); |
e9995ef9 HD |
3240 | } |
3241 | } | |
3242 | #endif /* CONFIG_MIGRATION */ | |
3243 | ||
62b61f61 | 3244 | #ifdef CONFIG_MEMORY_HOTREMOVE |
ef4d43a8 HD |
3245 | static void wait_while_offlining(void) |
3246 | { | |
3247 | while (ksm_run & KSM_RUN_OFFLINE) { | |
3248 | mutex_unlock(&ksm_thread_mutex); | |
3249 | wait_on_bit(&ksm_run, ilog2(KSM_RUN_OFFLINE), | |
74316201 | 3250 | TASK_UNINTERRUPTIBLE); |
ef4d43a8 HD |
3251 | mutex_lock(&ksm_thread_mutex); |
3252 | } | |
3253 | } | |
3254 | ||
21fbd591 | 3255 | static bool stable_node_dup_remove_range(struct ksm_stable_node *stable_node, |
2c653d0e AA |
3256 | unsigned long start_pfn, |
3257 | unsigned long end_pfn) | |
3258 | { | |
3259 | if (stable_node->kpfn >= start_pfn && | |
3260 | stable_node->kpfn < end_pfn) { | |
3261 | /* | |
79899cce | 3262 | * Don't ksm_get_folio, page has already gone: |
2c653d0e AA |
3263 | * which is why we keep kpfn instead of page* |
3264 | */ | |
3265 | remove_node_from_stable_tree(stable_node); | |
3266 | return true; | |
3267 | } | |
3268 | return false; | |
3269 | } | |
3270 | ||
21fbd591 | 3271 | static bool stable_node_chain_remove_range(struct ksm_stable_node *stable_node, |
2c653d0e AA |
3272 | unsigned long start_pfn, |
3273 | unsigned long end_pfn, | |
3274 | struct rb_root *root) | |
3275 | { | |
21fbd591 | 3276 | struct ksm_stable_node *dup; |
2c653d0e AA |
3277 | struct hlist_node *hlist_safe; |
3278 | ||
3279 | if (!is_stable_node_chain(stable_node)) { | |
3280 | VM_BUG_ON(is_stable_node_dup(stable_node)); | |
3281 | return stable_node_dup_remove_range(stable_node, start_pfn, | |
3282 | end_pfn); | |
3283 | } | |
3284 | ||
3285 | hlist_for_each_entry_safe(dup, hlist_safe, | |
3286 | &stable_node->hlist, hlist_dup) { | |
3287 | VM_BUG_ON(!is_stable_node_dup(dup)); | |
3288 | stable_node_dup_remove_range(dup, start_pfn, end_pfn); | |
3289 | } | |
3290 | if (hlist_empty(&stable_node->hlist)) { | |
3291 | free_stable_node_chain(stable_node, root); | |
3292 | return true; /* notify caller that tree was rebalanced */ | |
3293 | } else | |
3294 | return false; | |
3295 | } | |
3296 | ||
ee0ea59c HD |
3297 | static void ksm_check_stable_tree(unsigned long start_pfn, |
3298 | unsigned long end_pfn) | |
62b61f61 | 3299 | { |
21fbd591 | 3300 | struct ksm_stable_node *stable_node, *next; |
62b61f61 | 3301 | struct rb_node *node; |
90bd6fd3 | 3302 | int nid; |
62b61f61 | 3303 | |
ef53d16c HD |
3304 | for (nid = 0; nid < ksm_nr_node_ids; nid++) { |
3305 | node = rb_first(root_stable_tree + nid); | |
ee0ea59c | 3306 | while (node) { |
21fbd591 | 3307 | stable_node = rb_entry(node, struct ksm_stable_node, node); |
2c653d0e AA |
3308 | if (stable_node_chain_remove_range(stable_node, |
3309 | start_pfn, end_pfn, | |
3310 | root_stable_tree + | |
3311 | nid)) | |
ef53d16c | 3312 | node = rb_first(root_stable_tree + nid); |
2c653d0e | 3313 | else |
ee0ea59c HD |
3314 | node = rb_next(node); |
3315 | cond_resched(); | |
90bd6fd3 | 3316 | } |
ee0ea59c | 3317 | } |
03640418 | 3318 | list_for_each_entry_safe(stable_node, next, &migrate_nodes, list) { |
4146d2d6 HD |
3319 | if (stable_node->kpfn >= start_pfn && |
3320 | stable_node->kpfn < end_pfn) | |
3321 | remove_node_from_stable_tree(stable_node); | |
3322 | cond_resched(); | |
3323 | } | |
62b61f61 HD |
3324 | } |
3325 | ||
3326 | static int ksm_memory_callback(struct notifier_block *self, | |
3327 | unsigned long action, void *arg) | |
3328 | { | |
3329 | struct memory_notify *mn = arg; | |
62b61f61 HD |
3330 | |
3331 | switch (action) { | |
3332 | case MEM_GOING_OFFLINE: | |
3333 | /* | |
ef4d43a8 HD |
3334 | * Prevent ksm_do_scan(), unmerge_and_remove_all_rmap_items() |
3335 | * and remove_all_stable_nodes() while memory is going offline: | |
3336 | * it is unsafe for them to touch the stable tree at this time. | |
3337 | * But unmerge_ksm_pages(), rmap lookups and other entry points | |
3338 | * which do not need the ksm_thread_mutex are all safe. | |
62b61f61 | 3339 | */ |
ef4d43a8 HD |
3340 | mutex_lock(&ksm_thread_mutex); |
3341 | ksm_run |= KSM_RUN_OFFLINE; | |
3342 | mutex_unlock(&ksm_thread_mutex); | |
62b61f61 HD |
3343 | break; |
3344 | ||
3345 | case MEM_OFFLINE: | |
3346 | /* | |
3347 | * Most of the work is done by page migration; but there might | |
3348 | * be a few stable_nodes left over, still pointing to struct | |
ee0ea59c | 3349 | * pages which have been offlined: prune those from the tree, |
79899cce | 3350 | * otherwise ksm_get_folio() might later try to access a |
ee0ea59c | 3351 | * non-existent struct page. |
62b61f61 | 3352 | */ |
ee0ea59c HD |
3353 | ksm_check_stable_tree(mn->start_pfn, |
3354 | mn->start_pfn + mn->nr_pages); | |
e4a9bc58 | 3355 | fallthrough; |
62b61f61 | 3356 | case MEM_CANCEL_OFFLINE: |
ef4d43a8 HD |
3357 | mutex_lock(&ksm_thread_mutex); |
3358 | ksm_run &= ~KSM_RUN_OFFLINE; | |
62b61f61 | 3359 | mutex_unlock(&ksm_thread_mutex); |
ef4d43a8 HD |
3360 | |
3361 | smp_mb(); /* wake_up_bit advises this */ | |
3362 | wake_up_bit(&ksm_run, ilog2(KSM_RUN_OFFLINE)); | |
62b61f61 HD |
3363 | break; |
3364 | } | |
3365 | return NOTIFY_OK; | |
3366 | } | |
ef4d43a8 HD |
3367 | #else |
3368 | static void wait_while_offlining(void) | |
3369 | { | |
3370 | } | |
62b61f61 HD |
3371 | #endif /* CONFIG_MEMORY_HOTREMOVE */ |
3372 | ||
d21077fb SR |
3373 | #ifdef CONFIG_PROC_FS |
3374 | long ksm_process_profit(struct mm_struct *mm) | |
3375 | { | |
c2dc78b8 | 3376 | return (long)(mm->ksm_merging_pages + mm_ksm_zero_pages(mm)) * PAGE_SIZE - |
d21077fb SR |
3377 | mm->ksm_rmap_items * sizeof(struct ksm_rmap_item); |
3378 | } | |
3379 | #endif /* CONFIG_PROC_FS */ | |
3380 | ||
2ffd8679 HD |
3381 | #ifdef CONFIG_SYSFS |
3382 | /* | |
3383 | * This all compiles without CONFIG_SYSFS, but is a waste of space. | |
3384 | */ | |
3385 | ||
31dbd01f IE |
3386 | #define KSM_ATTR_RO(_name) \ |
3387 | static struct kobj_attribute _name##_attr = __ATTR_RO(_name) | |
3388 | #define KSM_ATTR(_name) \ | |
1bad2e5c | 3389 | static struct kobj_attribute _name##_attr = __ATTR_RW(_name) |
31dbd01f IE |
3390 | |
3391 | static ssize_t sleep_millisecs_show(struct kobject *kobj, | |
3392 | struct kobj_attribute *attr, char *buf) | |
3393 | { | |
ae7a927d | 3394 | return sysfs_emit(buf, "%u\n", ksm_thread_sleep_millisecs); |
31dbd01f IE |
3395 | } |
3396 | ||
3397 | static ssize_t sleep_millisecs_store(struct kobject *kobj, | |
3398 | struct kobj_attribute *attr, | |
3399 | const char *buf, size_t count) | |
3400 | { | |
dfefd226 | 3401 | unsigned int msecs; |
31dbd01f IE |
3402 | int err; |
3403 | ||
dfefd226 AD |
3404 | err = kstrtouint(buf, 10, &msecs); |
3405 | if (err) | |
31dbd01f IE |
3406 | return -EINVAL; |
3407 | ||
3408 | ksm_thread_sleep_millisecs = msecs; | |
fcf9a0ef | 3409 | wake_up_interruptible(&ksm_iter_wait); |
31dbd01f IE |
3410 | |
3411 | return count; | |
3412 | } | |
3413 | KSM_ATTR(sleep_millisecs); | |
3414 | ||
3415 | static ssize_t pages_to_scan_show(struct kobject *kobj, | |
3416 | struct kobj_attribute *attr, char *buf) | |
3417 | { | |
ae7a927d | 3418 | return sysfs_emit(buf, "%u\n", ksm_thread_pages_to_scan); |
31dbd01f IE |
3419 | } |
3420 | ||
3421 | static ssize_t pages_to_scan_store(struct kobject *kobj, | |
3422 | struct kobj_attribute *attr, | |
3423 | const char *buf, size_t count) | |
3424 | { | |
dfefd226 | 3425 | unsigned int nr_pages; |
31dbd01f | 3426 | int err; |
31dbd01f | 3427 | |
4e5fa4f5 SR |
3428 | if (ksm_advisor != KSM_ADVISOR_NONE) |
3429 | return -EINVAL; | |
3430 | ||
dfefd226 AD |
3431 | err = kstrtouint(buf, 10, &nr_pages); |
3432 | if (err) | |
31dbd01f IE |
3433 | return -EINVAL; |
3434 | ||
3435 | ksm_thread_pages_to_scan = nr_pages; | |
3436 | ||
3437 | return count; | |
3438 | } | |
3439 | KSM_ATTR(pages_to_scan); | |
3440 | ||
3441 | static ssize_t run_show(struct kobject *kobj, struct kobj_attribute *attr, | |
3442 | char *buf) | |
3443 | { | |
ae7a927d | 3444 | return sysfs_emit(buf, "%lu\n", ksm_run); |
31dbd01f IE |
3445 | } |
3446 | ||
3447 | static ssize_t run_store(struct kobject *kobj, struct kobj_attribute *attr, | |
3448 | const char *buf, size_t count) | |
3449 | { | |
dfefd226 | 3450 | unsigned int flags; |
31dbd01f | 3451 | int err; |
31dbd01f | 3452 | |
dfefd226 AD |
3453 | err = kstrtouint(buf, 10, &flags); |
3454 | if (err) | |
31dbd01f IE |
3455 | return -EINVAL; |
3456 | if (flags > KSM_RUN_UNMERGE) | |
3457 | return -EINVAL; | |
3458 | ||
3459 | /* | |
3460 | * KSM_RUN_MERGE sets ksmd running, and 0 stops it running. | |
3461 | * KSM_RUN_UNMERGE stops it running and unmerges all rmap_items, | |
d0f209f6 HD |
3462 | * breaking COW to free the pages_shared (but leaves mm_slots |
3463 | * on the list for when ksmd may be set running again). | |
31dbd01f IE |
3464 | */ |
3465 | ||
3466 | mutex_lock(&ksm_thread_mutex); | |
ef4d43a8 | 3467 | wait_while_offlining(); |
31dbd01f IE |
3468 | if (ksm_run != flags) { |
3469 | ksm_run = flags; | |
d952b791 | 3470 | if (flags & KSM_RUN_UNMERGE) { |
e1e12d2f | 3471 | set_current_oom_origin(); |
d952b791 | 3472 | err = unmerge_and_remove_all_rmap_items(); |
e1e12d2f | 3473 | clear_current_oom_origin(); |
d952b791 HD |
3474 | if (err) { |
3475 | ksm_run = KSM_RUN_STOP; | |
3476 | count = err; | |
3477 | } | |
3478 | } | |
31dbd01f IE |
3479 | } |
3480 | mutex_unlock(&ksm_thread_mutex); | |
3481 | ||
3482 | if (flags & KSM_RUN_MERGE) | |
3483 | wake_up_interruptible(&ksm_thread_wait); | |
3484 | ||
3485 | return count; | |
3486 | } | |
3487 | KSM_ATTR(run); | |
3488 | ||
90bd6fd3 PH |
3489 | #ifdef CONFIG_NUMA |
3490 | static ssize_t merge_across_nodes_show(struct kobject *kobj, | |
ae7a927d | 3491 | struct kobj_attribute *attr, char *buf) |
90bd6fd3 | 3492 | { |
ae7a927d | 3493 | return sysfs_emit(buf, "%u\n", ksm_merge_across_nodes); |
90bd6fd3 PH |
3494 | } |
3495 | ||
3496 | static ssize_t merge_across_nodes_store(struct kobject *kobj, | |
3497 | struct kobj_attribute *attr, | |
3498 | const char *buf, size_t count) | |
3499 | { | |
3500 | int err; | |
3501 | unsigned long knob; | |
3502 | ||
3503 | err = kstrtoul(buf, 10, &knob); | |
3504 | if (err) | |
3505 | return err; | |
3506 | if (knob > 1) | |
3507 | return -EINVAL; | |
3508 | ||
3509 | mutex_lock(&ksm_thread_mutex); | |
ef4d43a8 | 3510 | wait_while_offlining(); |
90bd6fd3 | 3511 | if (ksm_merge_across_nodes != knob) { |
cbf86cfe | 3512 | if (ksm_pages_shared || remove_all_stable_nodes()) |
90bd6fd3 | 3513 | err = -EBUSY; |
ef53d16c HD |
3514 | else if (root_stable_tree == one_stable_tree) { |
3515 | struct rb_root *buf; | |
3516 | /* | |
3517 | * This is the first time that we switch away from the | |
3518 | * default of merging across nodes: must now allocate | |
3519 | * a buffer to hold as many roots as may be needed. | |
3520 | * Allocate stable and unstable together: | |
3521 | * MAXSMP NODES_SHIFT 10 will use 16kB. | |
3522 | */ | |
bafe1e14 JP |
3523 | buf = kcalloc(nr_node_ids + nr_node_ids, sizeof(*buf), |
3524 | GFP_KERNEL); | |
ef53d16c HD |
3525 | /* Let us assume that RB_ROOT is NULL is zero */ |
3526 | if (!buf) | |
3527 | err = -ENOMEM; | |
3528 | else { | |
3529 | root_stable_tree = buf; | |
3530 | root_unstable_tree = buf + nr_node_ids; | |
3531 | /* Stable tree is empty but not the unstable */ | |
3532 | root_unstable_tree[0] = one_unstable_tree[0]; | |
3533 | } | |
3534 | } | |
3535 | if (!err) { | |
90bd6fd3 | 3536 | ksm_merge_across_nodes = knob; |
ef53d16c HD |
3537 | ksm_nr_node_ids = knob ? 1 : nr_node_ids; |
3538 | } | |
90bd6fd3 PH |
3539 | } |
3540 | mutex_unlock(&ksm_thread_mutex); | |
3541 | ||
3542 | return err ? err : count; | |
3543 | } | |
3544 | KSM_ATTR(merge_across_nodes); | |
3545 | #endif | |
3546 | ||
e86c59b1 | 3547 | static ssize_t use_zero_pages_show(struct kobject *kobj, |
ae7a927d | 3548 | struct kobj_attribute *attr, char *buf) |
e86c59b1 | 3549 | { |
ae7a927d | 3550 | return sysfs_emit(buf, "%u\n", ksm_use_zero_pages); |
e86c59b1 CI |
3551 | } |
3552 | static ssize_t use_zero_pages_store(struct kobject *kobj, | |
3553 | struct kobj_attribute *attr, | |
3554 | const char *buf, size_t count) | |
3555 | { | |
3556 | int err; | |
3557 | bool value; | |
3558 | ||
3559 | err = kstrtobool(buf, &value); | |
3560 | if (err) | |
3561 | return -EINVAL; | |
3562 | ||
3563 | ksm_use_zero_pages = value; | |
3564 | ||
3565 | return count; | |
3566 | } | |
3567 | KSM_ATTR(use_zero_pages); | |
3568 | ||
2c653d0e AA |
3569 | static ssize_t max_page_sharing_show(struct kobject *kobj, |
3570 | struct kobj_attribute *attr, char *buf) | |
3571 | { | |
ae7a927d | 3572 | return sysfs_emit(buf, "%u\n", ksm_max_page_sharing); |
2c653d0e AA |
3573 | } |
3574 | ||
3575 | static ssize_t max_page_sharing_store(struct kobject *kobj, | |
3576 | struct kobj_attribute *attr, | |
3577 | const char *buf, size_t count) | |
3578 | { | |
3579 | int err; | |
3580 | int knob; | |
3581 | ||
3582 | err = kstrtoint(buf, 10, &knob); | |
3583 | if (err) | |
3584 | return err; | |
3585 | /* | |
3586 | * When a KSM page is created it is shared by 2 mappings. This | |
3587 | * being a signed comparison, it implicitly verifies it's not | |
3588 | * negative. | |
3589 | */ | |
3590 | if (knob < 2) | |
3591 | return -EINVAL; | |
3592 | ||
3593 | if (READ_ONCE(ksm_max_page_sharing) == knob) | |
3594 | return count; | |
3595 | ||
3596 | mutex_lock(&ksm_thread_mutex); | |
3597 | wait_while_offlining(); | |
3598 | if (ksm_max_page_sharing != knob) { | |
3599 | if (ksm_pages_shared || remove_all_stable_nodes()) | |
3600 | err = -EBUSY; | |
3601 | else | |
3602 | ksm_max_page_sharing = knob; | |
3603 | } | |
3604 | mutex_unlock(&ksm_thread_mutex); | |
3605 | ||
3606 | return err ? err : count; | |
3607 | } | |
3608 | KSM_ATTR(max_page_sharing); | |
3609 | ||
b348b5fe SR |
3610 | static ssize_t pages_scanned_show(struct kobject *kobj, |
3611 | struct kobj_attribute *attr, char *buf) | |
3612 | { | |
3613 | return sysfs_emit(buf, "%lu\n", ksm_pages_scanned); | |
3614 | } | |
3615 | KSM_ATTR_RO(pages_scanned); | |
3616 | ||
b4028260 HD |
3617 | static ssize_t pages_shared_show(struct kobject *kobj, |
3618 | struct kobj_attribute *attr, char *buf) | |
3619 | { | |
ae7a927d | 3620 | return sysfs_emit(buf, "%lu\n", ksm_pages_shared); |
b4028260 HD |
3621 | } |
3622 | KSM_ATTR_RO(pages_shared); | |
3623 | ||
3624 | static ssize_t pages_sharing_show(struct kobject *kobj, | |
3625 | struct kobj_attribute *attr, char *buf) | |
3626 | { | |
ae7a927d | 3627 | return sysfs_emit(buf, "%lu\n", ksm_pages_sharing); |
b4028260 HD |
3628 | } |
3629 | KSM_ATTR_RO(pages_sharing); | |
3630 | ||
473b0ce4 HD |
3631 | static ssize_t pages_unshared_show(struct kobject *kobj, |
3632 | struct kobj_attribute *attr, char *buf) | |
3633 | { | |
ae7a927d | 3634 | return sysfs_emit(buf, "%lu\n", ksm_pages_unshared); |
473b0ce4 HD |
3635 | } |
3636 | KSM_ATTR_RO(pages_unshared); | |
3637 | ||
3638 | static ssize_t pages_volatile_show(struct kobject *kobj, | |
3639 | struct kobj_attribute *attr, char *buf) | |
3640 | { | |
3641 | long ksm_pages_volatile; | |
3642 | ||
3643 | ksm_pages_volatile = ksm_rmap_items - ksm_pages_shared | |
3644 | - ksm_pages_sharing - ksm_pages_unshared; | |
3645 | /* | |
3646 | * It was not worth any locking to calculate that statistic, | |
3647 | * but it might therefore sometimes be negative: conceal that. | |
3648 | */ | |
3649 | if (ksm_pages_volatile < 0) | |
3650 | ksm_pages_volatile = 0; | |
ae7a927d | 3651 | return sysfs_emit(buf, "%ld\n", ksm_pages_volatile); |
473b0ce4 HD |
3652 | } |
3653 | KSM_ATTR_RO(pages_volatile); | |
3654 | ||
e5a68991 SR |
3655 | static ssize_t pages_skipped_show(struct kobject *kobj, |
3656 | struct kobj_attribute *attr, char *buf) | |
3657 | { | |
3658 | return sysfs_emit(buf, "%lu\n", ksm_pages_skipped); | |
3659 | } | |
3660 | KSM_ATTR_RO(pages_skipped); | |
3661 | ||
e2942062 | 3662 | static ssize_t ksm_zero_pages_show(struct kobject *kobj, |
3663 | struct kobj_attribute *attr, char *buf) | |
3664 | { | |
c2dc78b8 | 3665 | return sysfs_emit(buf, "%ld\n", atomic_long_read(&ksm_zero_pages)); |
e2942062 | 3666 | } |
3667 | KSM_ATTR_RO(ksm_zero_pages); | |
3668 | ||
d21077fb SR |
3669 | static ssize_t general_profit_show(struct kobject *kobj, |
3670 | struct kobj_attribute *attr, char *buf) | |
3671 | { | |
3672 | long general_profit; | |
3673 | ||
c2dc78b8 | 3674 | general_profit = (ksm_pages_sharing + atomic_long_read(&ksm_zero_pages)) * PAGE_SIZE - |
d21077fb SR |
3675 | ksm_rmap_items * sizeof(struct ksm_rmap_item); |
3676 | ||
3677 | return sysfs_emit(buf, "%ld\n", general_profit); | |
3678 | } | |
3679 | KSM_ATTR_RO(general_profit); | |
3680 | ||
2c653d0e AA |
3681 | static ssize_t stable_node_dups_show(struct kobject *kobj, |
3682 | struct kobj_attribute *attr, char *buf) | |
3683 | { | |
ae7a927d | 3684 | return sysfs_emit(buf, "%lu\n", ksm_stable_node_dups); |
2c653d0e AA |
3685 | } |
3686 | KSM_ATTR_RO(stable_node_dups); | |
3687 | ||
3688 | static ssize_t stable_node_chains_show(struct kobject *kobj, | |
3689 | struct kobj_attribute *attr, char *buf) | |
3690 | { | |
ae7a927d | 3691 | return sysfs_emit(buf, "%lu\n", ksm_stable_node_chains); |
2c653d0e AA |
3692 | } |
3693 | KSM_ATTR_RO(stable_node_chains); | |
3694 | ||
3695 | static ssize_t | |
3696 | stable_node_chains_prune_millisecs_show(struct kobject *kobj, | |
3697 | struct kobj_attribute *attr, | |
3698 | char *buf) | |
3699 | { | |
ae7a927d | 3700 | return sysfs_emit(buf, "%u\n", ksm_stable_node_chains_prune_millisecs); |
2c653d0e AA |
3701 | } |
3702 | ||
3703 | static ssize_t | |
3704 | stable_node_chains_prune_millisecs_store(struct kobject *kobj, | |
3705 | struct kobj_attribute *attr, | |
3706 | const char *buf, size_t count) | |
3707 | { | |
584ff0df | 3708 | unsigned int msecs; |
2c653d0e AA |
3709 | int err; |
3710 | ||
584ff0df ZB |
3711 | err = kstrtouint(buf, 10, &msecs); |
3712 | if (err) | |
2c653d0e AA |
3713 | return -EINVAL; |
3714 | ||
3715 | ksm_stable_node_chains_prune_millisecs = msecs; | |
3716 | ||
3717 | return count; | |
3718 | } | |
3719 | KSM_ATTR(stable_node_chains_prune_millisecs); | |
3720 | ||
473b0ce4 HD |
3721 | static ssize_t full_scans_show(struct kobject *kobj, |
3722 | struct kobj_attribute *attr, char *buf) | |
3723 | { | |
ae7a927d | 3724 | return sysfs_emit(buf, "%lu\n", ksm_scan.seqnr); |
473b0ce4 HD |
3725 | } |
3726 | KSM_ATTR_RO(full_scans); | |
3727 | ||
5e924ff5 SR |
3728 | static ssize_t smart_scan_show(struct kobject *kobj, |
3729 | struct kobj_attribute *attr, char *buf) | |
3730 | { | |
3731 | return sysfs_emit(buf, "%u\n", ksm_smart_scan); | |
3732 | } | |
3733 | ||
3734 | static ssize_t smart_scan_store(struct kobject *kobj, | |
3735 | struct kobj_attribute *attr, | |
3736 | const char *buf, size_t count) | |
3737 | { | |
3738 | int err; | |
3739 | bool value; | |
3740 | ||
3741 | err = kstrtobool(buf, &value); | |
3742 | if (err) | |
3743 | return -EINVAL; | |
3744 | ||
3745 | ksm_smart_scan = value; | |
3746 | return count; | |
3747 | } | |
3748 | KSM_ATTR(smart_scan); | |
3749 | ||
66790e9a SR |
3750 | static ssize_t advisor_mode_show(struct kobject *kobj, |
3751 | struct kobj_attribute *attr, char *buf) | |
3752 | { | |
3753 | const char *output; | |
3754 | ||
3755 | if (ksm_advisor == KSM_ADVISOR_NONE) | |
3756 | output = "[none] scan-time"; | |
3757 | else if (ksm_advisor == KSM_ADVISOR_SCAN_TIME) | |
3758 | output = "none [scan-time]"; | |
3759 | ||
3760 | return sysfs_emit(buf, "%s\n", output); | |
3761 | } | |
3762 | ||
3763 | static ssize_t advisor_mode_store(struct kobject *kobj, | |
3764 | struct kobj_attribute *attr, const char *buf, | |
3765 | size_t count) | |
3766 | { | |
3767 | enum ksm_advisor_type curr_advisor = ksm_advisor; | |
3768 | ||
3769 | if (sysfs_streq("scan-time", buf)) | |
3770 | ksm_advisor = KSM_ADVISOR_SCAN_TIME; | |
3771 | else if (sysfs_streq("none", buf)) | |
3772 | ksm_advisor = KSM_ADVISOR_NONE; | |
3773 | else | |
3774 | return -EINVAL; | |
3775 | ||
3776 | /* Set advisor default values */ | |
3777 | if (curr_advisor != ksm_advisor) | |
3778 | set_advisor_defaults(); | |
3779 | ||
3780 | return count; | |
3781 | } | |
3782 | KSM_ATTR(advisor_mode); | |
3783 | ||
3784 | static ssize_t advisor_max_cpu_show(struct kobject *kobj, | |
3785 | struct kobj_attribute *attr, char *buf) | |
3786 | { | |
3787 | return sysfs_emit(buf, "%u\n", ksm_advisor_max_cpu); | |
3788 | } | |
3789 | ||
3790 | static ssize_t advisor_max_cpu_store(struct kobject *kobj, | |
3791 | struct kobj_attribute *attr, | |
3792 | const char *buf, size_t count) | |
3793 | { | |
3794 | int err; | |
3795 | unsigned long value; | |
3796 | ||
3797 | err = kstrtoul(buf, 10, &value); | |
3798 | if (err) | |
3799 | return -EINVAL; | |
3800 | ||
3801 | ksm_advisor_max_cpu = value; | |
3802 | return count; | |
3803 | } | |
3804 | KSM_ATTR(advisor_max_cpu); | |
3805 | ||
3806 | static ssize_t advisor_min_pages_to_scan_show(struct kobject *kobj, | |
3807 | struct kobj_attribute *attr, char *buf) | |
3808 | { | |
3809 | return sysfs_emit(buf, "%lu\n", ksm_advisor_min_pages_to_scan); | |
3810 | } | |
3811 | ||
3812 | static ssize_t advisor_min_pages_to_scan_store(struct kobject *kobj, | |
3813 | struct kobj_attribute *attr, | |
3814 | const char *buf, size_t count) | |
3815 | { | |
3816 | int err; | |
3817 | unsigned long value; | |
3818 | ||
3819 | err = kstrtoul(buf, 10, &value); | |
3820 | if (err) | |
3821 | return -EINVAL; | |
3822 | ||
3823 | ksm_advisor_min_pages_to_scan = value; | |
3824 | return count; | |
3825 | } | |
3826 | KSM_ATTR(advisor_min_pages_to_scan); | |
3827 | ||
3828 | static ssize_t advisor_max_pages_to_scan_show(struct kobject *kobj, | |
3829 | struct kobj_attribute *attr, char *buf) | |
3830 | { | |
3831 | return sysfs_emit(buf, "%lu\n", ksm_advisor_max_pages_to_scan); | |
3832 | } | |
3833 | ||
3834 | static ssize_t advisor_max_pages_to_scan_store(struct kobject *kobj, | |
3835 | struct kobj_attribute *attr, | |
3836 | const char *buf, size_t count) | |
3837 | { | |
3838 | int err; | |
3839 | unsigned long value; | |
3840 | ||
3841 | err = kstrtoul(buf, 10, &value); | |
3842 | if (err) | |
3843 | return -EINVAL; | |
3844 | ||
3845 | ksm_advisor_max_pages_to_scan = value; | |
3846 | return count; | |
3847 | } | |
3848 | KSM_ATTR(advisor_max_pages_to_scan); | |
3849 | ||
3850 | static ssize_t advisor_target_scan_time_show(struct kobject *kobj, | |
3851 | struct kobj_attribute *attr, char *buf) | |
3852 | { | |
3853 | return sysfs_emit(buf, "%lu\n", ksm_advisor_target_scan_time); | |
3854 | } | |
3855 | ||
3856 | static ssize_t advisor_target_scan_time_store(struct kobject *kobj, | |
3857 | struct kobj_attribute *attr, | |
3858 | const char *buf, size_t count) | |
3859 | { | |
3860 | int err; | |
3861 | unsigned long value; | |
3862 | ||
3863 | err = kstrtoul(buf, 10, &value); | |
3864 | if (err) | |
3865 | return -EINVAL; | |
3866 | if (value < 1) | |
3867 | return -EINVAL; | |
3868 | ||
3869 | ksm_advisor_target_scan_time = value; | |
3870 | return count; | |
3871 | } | |
3872 | KSM_ATTR(advisor_target_scan_time); | |
3873 | ||
31dbd01f IE |
3874 | static struct attribute *ksm_attrs[] = { |
3875 | &sleep_millisecs_attr.attr, | |
3876 | &pages_to_scan_attr.attr, | |
3877 | &run_attr.attr, | |
b348b5fe | 3878 | &pages_scanned_attr.attr, |
b4028260 HD |
3879 | &pages_shared_attr.attr, |
3880 | &pages_sharing_attr.attr, | |
473b0ce4 HD |
3881 | &pages_unshared_attr.attr, |
3882 | &pages_volatile_attr.attr, | |
e5a68991 | 3883 | &pages_skipped_attr.attr, |
e2942062 | 3884 | &ksm_zero_pages_attr.attr, |
473b0ce4 | 3885 | &full_scans_attr.attr, |
90bd6fd3 PH |
3886 | #ifdef CONFIG_NUMA |
3887 | &merge_across_nodes_attr.attr, | |
3888 | #endif | |
2c653d0e AA |
3889 | &max_page_sharing_attr.attr, |
3890 | &stable_node_chains_attr.attr, | |
3891 | &stable_node_dups_attr.attr, | |
3892 | &stable_node_chains_prune_millisecs_attr.attr, | |
e86c59b1 | 3893 | &use_zero_pages_attr.attr, |
d21077fb | 3894 | &general_profit_attr.attr, |
5e924ff5 | 3895 | &smart_scan_attr.attr, |
66790e9a SR |
3896 | &advisor_mode_attr.attr, |
3897 | &advisor_max_cpu_attr.attr, | |
3898 | &advisor_min_pages_to_scan_attr.attr, | |
3899 | &advisor_max_pages_to_scan_attr.attr, | |
3900 | &advisor_target_scan_time_attr.attr, | |
31dbd01f IE |
3901 | NULL, |
3902 | }; | |
3903 | ||
f907c26a | 3904 | static const struct attribute_group ksm_attr_group = { |
31dbd01f IE |
3905 | .attrs = ksm_attrs, |
3906 | .name = "ksm", | |
3907 | }; | |
2ffd8679 | 3908 | #endif /* CONFIG_SYSFS */ |
31dbd01f IE |
3909 | |
3910 | static int __init ksm_init(void) | |
3911 | { | |
3912 | struct task_struct *ksm_thread; | |
3913 | int err; | |
3914 | ||
e86c59b1 CI |
3915 | /* The correct value depends on page size and endianness */ |
3916 | zero_checksum = calc_checksum(ZERO_PAGE(0)); | |
3917 | /* Default to false for backwards compatibility */ | |
3918 | ksm_use_zero_pages = false; | |
3919 | ||
31dbd01f IE |
3920 | err = ksm_slab_init(); |
3921 | if (err) | |
3922 | goto out; | |
3923 | ||
31dbd01f IE |
3924 | ksm_thread = kthread_run(ksm_scan_thread, NULL, "ksmd"); |
3925 | if (IS_ERR(ksm_thread)) { | |
25acde31 | 3926 | pr_err("ksm: creating kthread failed\n"); |
31dbd01f | 3927 | err = PTR_ERR(ksm_thread); |
d9f8984c | 3928 | goto out_free; |
31dbd01f IE |
3929 | } |
3930 | ||
2ffd8679 | 3931 | #ifdef CONFIG_SYSFS |
31dbd01f IE |
3932 | err = sysfs_create_group(mm_kobj, &ksm_attr_group); |
3933 | if (err) { | |
25acde31 | 3934 | pr_err("ksm: register sysfs failed\n"); |
2ffd8679 | 3935 | kthread_stop(ksm_thread); |
d9f8984c | 3936 | goto out_free; |
31dbd01f | 3937 | } |
c73602ad HD |
3938 | #else |
3939 | ksm_run = KSM_RUN_MERGE; /* no way for user to start it */ | |
3940 | ||
2ffd8679 | 3941 | #endif /* CONFIG_SYSFS */ |
31dbd01f | 3942 | |
62b61f61 | 3943 | #ifdef CONFIG_MEMORY_HOTREMOVE |
ef4d43a8 | 3944 | /* There is no significance to this priority 100 */ |
1eeaa4fd | 3945 | hotplug_memory_notifier(ksm_memory_callback, KSM_CALLBACK_PRI); |
62b61f61 | 3946 | #endif |
31dbd01f IE |
3947 | return 0; |
3948 | ||
d9f8984c | 3949 | out_free: |
31dbd01f IE |
3950 | ksm_slab_free(); |
3951 | out: | |
3952 | return err; | |
f8af4da3 | 3953 | } |
a64fb3cd | 3954 | subsys_initcall(ksm_init); |