]>
Commit | Line | Data |
---|---|---|
7a338472 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
f8af4da3 | 2 | /* |
31dbd01f IE |
3 | * Memory merging support. |
4 | * | |
5 | * This code enables dynamic sharing of identical pages found in different | |
6 | * memory areas, even if they are not shared by fork() | |
7 | * | |
36b2528d | 8 | * Copyright (C) 2008-2009 Red Hat, Inc. |
31dbd01f IE |
9 | * Authors: |
10 | * Izik Eidus | |
11 | * Andrea Arcangeli | |
12 | * Chris Wright | |
36b2528d | 13 | * Hugh Dickins |
f8af4da3 HD |
14 | */ |
15 | ||
16 | #include <linux/errno.h> | |
31dbd01f | 17 | #include <linux/mm.h> |
36090def | 18 | #include <linux/mm_inline.h> |
31dbd01f | 19 | #include <linux/fs.h> |
f8af4da3 | 20 | #include <linux/mman.h> |
31dbd01f | 21 | #include <linux/sched.h> |
6e84f315 | 22 | #include <linux/sched/mm.h> |
f7ccbae4 | 23 | #include <linux/sched/coredump.h> |
31dbd01f IE |
24 | #include <linux/rwsem.h> |
25 | #include <linux/pagemap.h> | |
26 | #include <linux/rmap.h> | |
27 | #include <linux/spinlock.h> | |
59e1a2f4 | 28 | #include <linux/xxhash.h> |
31dbd01f IE |
29 | #include <linux/delay.h> |
30 | #include <linux/kthread.h> | |
31 | #include <linux/wait.h> | |
32 | #include <linux/slab.h> | |
33 | #include <linux/rbtree.h> | |
62b61f61 | 34 | #include <linux/memory.h> |
31dbd01f | 35 | #include <linux/mmu_notifier.h> |
2c6854fd | 36 | #include <linux/swap.h> |
f8af4da3 | 37 | #include <linux/ksm.h> |
4ca3a69b | 38 | #include <linux/hashtable.h> |
878aee7d | 39 | #include <linux/freezer.h> |
72788c38 | 40 | #include <linux/oom.h> |
90bd6fd3 | 41 | #include <linux/numa.h> |
f8af4da3 | 42 | |
31dbd01f | 43 | #include <asm/tlbflush.h> |
73848b46 | 44 | #include "internal.h" |
31dbd01f | 45 | |
e850dcf5 HD |
46 | #ifdef CONFIG_NUMA |
47 | #define NUMA(x) (x) | |
48 | #define DO_NUMA(x) do { (x); } while (0) | |
49 | #else | |
50 | #define NUMA(x) (0) | |
51 | #define DO_NUMA(x) do { } while (0) | |
52 | #endif | |
53 | ||
5a2ca3ef MR |
54 | /** |
55 | * DOC: Overview | |
56 | * | |
31dbd01f IE |
57 | * A few notes about the KSM scanning process, |
58 | * to make it easier to understand the data structures below: | |
59 | * | |
60 | * In order to reduce excessive scanning, KSM sorts the memory pages by their | |
61 | * contents into a data structure that holds pointers to the pages' locations. | |
62 | * | |
63 | * Since the contents of the pages may change at any moment, KSM cannot just | |
64 | * insert the pages into a normal sorted tree and expect it to find anything. | |
65 | * Therefore KSM uses two data structures - the stable and the unstable tree. | |
66 | * | |
67 | * The stable tree holds pointers to all the merged pages (ksm pages), sorted | |
68 | * by their contents. Because each such page is write-protected, searching on | |
69 | * this tree is fully assured to be working (except when pages are unmapped), | |
70 | * and therefore this tree is called the stable tree. | |
71 | * | |
5a2ca3ef MR |
72 | * The stable tree node includes information required for reverse |
73 | * mapping from a KSM page to virtual addresses that map this page. | |
74 | * | |
75 | * In order to avoid large latencies of the rmap walks on KSM pages, | |
76 | * KSM maintains two types of nodes in the stable tree: | |
77 | * | |
78 | * * the regular nodes that keep the reverse mapping structures in a | |
79 | * linked list | |
80 | * * the "chains" that link nodes ("dups") that represent the same | |
81 | * write protected memory content, but each "dup" corresponds to a | |
82 | * different KSM page copy of that content | |
83 | * | |
84 | * Internally, the regular nodes, "dups" and "chains" are represented | |
9303c9d5 | 85 | * using the same struct stable_node structure. |
5a2ca3ef | 86 | * |
31dbd01f IE |
87 | * In addition to the stable tree, KSM uses a second data structure called the |
88 | * unstable tree: this tree holds pointers to pages which have been found to | |
89 | * be "unchanged for a period of time". The unstable tree sorts these pages | |
90 | * by their contents, but since they are not write-protected, KSM cannot rely | |
91 | * upon the unstable tree to work correctly - the unstable tree is liable to | |
92 | * be corrupted as its contents are modified, and so it is called unstable. | |
93 | * | |
94 | * KSM solves this problem by several techniques: | |
95 | * | |
96 | * 1) The unstable tree is flushed every time KSM completes scanning all | |
97 | * memory areas, and then the tree is rebuilt again from the beginning. | |
98 | * 2) KSM will only insert into the unstable tree, pages whose hash value | |
99 | * has not changed since the previous scan of all memory areas. | |
100 | * 3) The unstable tree is a RedBlack Tree - so its balancing is based on the | |
101 | * colors of the nodes and not on their contents, assuring that even when | |
102 | * the tree gets "corrupted" it won't get out of balance, so scanning time | |
103 | * remains the same (also, searching and inserting nodes in an rbtree uses | |
104 | * the same algorithm, so we have no overhead when we flush and rebuild). | |
105 | * 4) KSM never flushes the stable tree, which means that even if it were to | |
106 | * take 10 attempts to find a page in the unstable tree, once it is found, | |
107 | * it is secured in the stable tree. (When we scan a new page, we first | |
108 | * compare it against the stable tree, and then against the unstable tree.) | |
8fdb3dbf HD |
109 | * |
110 | * If the merge_across_nodes tunable is unset, then KSM maintains multiple | |
111 | * stable trees and multiple unstable trees: one of each for each NUMA node. | |
31dbd01f IE |
112 | */ |
113 | ||
114 | /** | |
115 | * struct mm_slot - ksm information per mm that is being scanned | |
116 | * @link: link to the mm_slots hash list | |
117 | * @mm_list: link into the mm_slots list, rooted in ksm_mm_head | |
6514d511 | 118 | * @rmap_list: head for this mm_slot's singly-linked list of rmap_items |
31dbd01f IE |
119 | * @mm: the mm that this information is valid for |
120 | */ | |
121 | struct mm_slot { | |
122 | struct hlist_node link; | |
123 | struct list_head mm_list; | |
6514d511 | 124 | struct rmap_item *rmap_list; |
31dbd01f IE |
125 | struct mm_struct *mm; |
126 | }; | |
127 | ||
128 | /** | |
129 | * struct ksm_scan - cursor for scanning | |
130 | * @mm_slot: the current mm_slot we are scanning | |
131 | * @address: the next address inside that to be scanned | |
6514d511 | 132 | * @rmap_list: link to the next rmap to be scanned in the rmap_list |
31dbd01f IE |
133 | * @seqnr: count of completed full scans (needed when removing unstable node) |
134 | * | |
135 | * There is only the one ksm_scan instance of this cursor structure. | |
136 | */ | |
137 | struct ksm_scan { | |
138 | struct mm_slot *mm_slot; | |
139 | unsigned long address; | |
6514d511 | 140 | struct rmap_item **rmap_list; |
31dbd01f IE |
141 | unsigned long seqnr; |
142 | }; | |
143 | ||
7b6ba2c7 HD |
144 | /** |
145 | * struct stable_node - node of the stable rbtree | |
146 | * @node: rb node of this ksm page in the stable tree | |
4146d2d6 | 147 | * @head: (overlaying parent) &migrate_nodes indicates temporarily on that list |
2c653d0e | 148 | * @hlist_dup: linked into the stable_node->hlist with a stable_node chain |
4146d2d6 | 149 | * @list: linked into migrate_nodes, pending placement in the proper node tree |
7b6ba2c7 | 150 | * @hlist: hlist head of rmap_items using this ksm page |
4146d2d6 | 151 | * @kpfn: page frame number of this ksm page (perhaps temporarily on wrong nid) |
2c653d0e AA |
152 | * @chain_prune_time: time of the last full garbage collection |
153 | * @rmap_hlist_len: number of rmap_item entries in hlist or STABLE_NODE_CHAIN | |
4146d2d6 | 154 | * @nid: NUMA node id of stable tree in which linked (may not match kpfn) |
7b6ba2c7 HD |
155 | */ |
156 | struct stable_node { | |
4146d2d6 HD |
157 | union { |
158 | struct rb_node node; /* when node of stable tree */ | |
159 | struct { /* when listed for migration */ | |
160 | struct list_head *head; | |
2c653d0e AA |
161 | struct { |
162 | struct hlist_node hlist_dup; | |
163 | struct list_head list; | |
164 | }; | |
4146d2d6 HD |
165 | }; |
166 | }; | |
7b6ba2c7 | 167 | struct hlist_head hlist; |
2c653d0e AA |
168 | union { |
169 | unsigned long kpfn; | |
170 | unsigned long chain_prune_time; | |
171 | }; | |
172 | /* | |
173 | * STABLE_NODE_CHAIN can be any negative number in | |
174 | * rmap_hlist_len negative range, but better not -1 to be able | |
175 | * to reliably detect underflows. | |
176 | */ | |
177 | #define STABLE_NODE_CHAIN -1024 | |
178 | int rmap_hlist_len; | |
4146d2d6 HD |
179 | #ifdef CONFIG_NUMA |
180 | int nid; | |
181 | #endif | |
7b6ba2c7 HD |
182 | }; |
183 | ||
31dbd01f IE |
184 | /** |
185 | * struct rmap_item - reverse mapping item for virtual addresses | |
6514d511 | 186 | * @rmap_list: next rmap_item in mm_slot's singly-linked rmap_list |
db114b83 | 187 | * @anon_vma: pointer to anon_vma for this mm,address, when in stable tree |
bc56620b | 188 | * @nid: NUMA node id of unstable tree in which linked (may not match page) |
31dbd01f IE |
189 | * @mm: the memory structure this rmap_item is pointing into |
190 | * @address: the virtual address this rmap_item tracks (+ flags in low bits) | |
191 | * @oldchecksum: previous checksum of the page at that virtual address | |
7b6ba2c7 HD |
192 | * @node: rb node of this rmap_item in the unstable tree |
193 | * @head: pointer to stable_node heading this list in the stable tree | |
194 | * @hlist: link into hlist of rmap_items hanging off that stable_node | |
31dbd01f IE |
195 | */ |
196 | struct rmap_item { | |
6514d511 | 197 | struct rmap_item *rmap_list; |
bc56620b HD |
198 | union { |
199 | struct anon_vma *anon_vma; /* when stable */ | |
200 | #ifdef CONFIG_NUMA | |
201 | int nid; /* when node of unstable tree */ | |
202 | #endif | |
203 | }; | |
31dbd01f IE |
204 | struct mm_struct *mm; |
205 | unsigned long address; /* + low bits used for flags below */ | |
7b6ba2c7 | 206 | unsigned int oldchecksum; /* when unstable */ |
31dbd01f | 207 | union { |
7b6ba2c7 HD |
208 | struct rb_node node; /* when node of unstable tree */ |
209 | struct { /* when listed from stable tree */ | |
210 | struct stable_node *head; | |
211 | struct hlist_node hlist; | |
212 | }; | |
31dbd01f IE |
213 | }; |
214 | }; | |
215 | ||
216 | #define SEQNR_MASK 0x0ff /* low bits of unstable tree seqnr */ | |
7b6ba2c7 HD |
217 | #define UNSTABLE_FLAG 0x100 /* is a node of the unstable tree */ |
218 | #define STABLE_FLAG 0x200 /* is listed from the stable tree */ | |
31dbd01f IE |
219 | |
220 | /* The stable and unstable tree heads */ | |
ef53d16c HD |
221 | static struct rb_root one_stable_tree[1] = { RB_ROOT }; |
222 | static struct rb_root one_unstable_tree[1] = { RB_ROOT }; | |
223 | static struct rb_root *root_stable_tree = one_stable_tree; | |
224 | static struct rb_root *root_unstable_tree = one_unstable_tree; | |
31dbd01f | 225 | |
4146d2d6 HD |
226 | /* Recently migrated nodes of stable tree, pending proper placement */ |
227 | static LIST_HEAD(migrate_nodes); | |
2c653d0e | 228 | #define STABLE_NODE_DUP_HEAD ((struct list_head *)&migrate_nodes.prev) |
4146d2d6 | 229 | |
4ca3a69b SL |
230 | #define MM_SLOTS_HASH_BITS 10 |
231 | static DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS); | |
31dbd01f IE |
232 | |
233 | static struct mm_slot ksm_mm_head = { | |
234 | .mm_list = LIST_HEAD_INIT(ksm_mm_head.mm_list), | |
235 | }; | |
236 | static struct ksm_scan ksm_scan = { | |
237 | .mm_slot = &ksm_mm_head, | |
238 | }; | |
239 | ||
240 | static struct kmem_cache *rmap_item_cache; | |
7b6ba2c7 | 241 | static struct kmem_cache *stable_node_cache; |
31dbd01f IE |
242 | static struct kmem_cache *mm_slot_cache; |
243 | ||
244 | /* The number of nodes in the stable tree */ | |
b4028260 | 245 | static unsigned long ksm_pages_shared; |
31dbd01f | 246 | |
e178dfde | 247 | /* The number of page slots additionally sharing those nodes */ |
b4028260 | 248 | static unsigned long ksm_pages_sharing; |
31dbd01f | 249 | |
473b0ce4 HD |
250 | /* The number of nodes in the unstable tree */ |
251 | static unsigned long ksm_pages_unshared; | |
252 | ||
253 | /* The number of rmap_items in use: to calculate pages_volatile */ | |
254 | static unsigned long ksm_rmap_items; | |
255 | ||
2c653d0e AA |
256 | /* The number of stable_node chains */ |
257 | static unsigned long ksm_stable_node_chains; | |
258 | ||
259 | /* The number of stable_node dups linked to the stable_node chains */ | |
260 | static unsigned long ksm_stable_node_dups; | |
261 | ||
262 | /* Delay in pruning stale stable_node_dups in the stable_node_chains */ | |
584ff0df | 263 | static unsigned int ksm_stable_node_chains_prune_millisecs = 2000; |
2c653d0e AA |
264 | |
265 | /* Maximum number of page slots sharing a stable node */ | |
266 | static int ksm_max_page_sharing = 256; | |
267 | ||
31dbd01f | 268 | /* Number of pages ksmd should scan in one batch */ |
2c6854fd | 269 | static unsigned int ksm_thread_pages_to_scan = 100; |
31dbd01f IE |
270 | |
271 | /* Milliseconds ksmd should sleep between batches */ | |
2ffd8679 | 272 | static unsigned int ksm_thread_sleep_millisecs = 20; |
31dbd01f | 273 | |
e86c59b1 CI |
274 | /* Checksum of an empty (zeroed) page */ |
275 | static unsigned int zero_checksum __read_mostly; | |
276 | ||
277 | /* Whether to merge empty (zeroed) pages with actual zero pages */ | |
278 | static bool ksm_use_zero_pages __read_mostly; | |
279 | ||
e850dcf5 | 280 | #ifdef CONFIG_NUMA |
90bd6fd3 PH |
281 | /* Zeroed when merging across nodes is not allowed */ |
282 | static unsigned int ksm_merge_across_nodes = 1; | |
ef53d16c | 283 | static int ksm_nr_node_ids = 1; |
e850dcf5 HD |
284 | #else |
285 | #define ksm_merge_across_nodes 1U | |
ef53d16c | 286 | #define ksm_nr_node_ids 1 |
e850dcf5 | 287 | #endif |
90bd6fd3 | 288 | |
31dbd01f IE |
289 | #define KSM_RUN_STOP 0 |
290 | #define KSM_RUN_MERGE 1 | |
291 | #define KSM_RUN_UNMERGE 2 | |
ef4d43a8 HD |
292 | #define KSM_RUN_OFFLINE 4 |
293 | static unsigned long ksm_run = KSM_RUN_STOP; | |
294 | static void wait_while_offlining(void); | |
31dbd01f IE |
295 | |
296 | static DECLARE_WAIT_QUEUE_HEAD(ksm_thread_wait); | |
fcf9a0ef | 297 | static DECLARE_WAIT_QUEUE_HEAD(ksm_iter_wait); |
31dbd01f IE |
298 | static DEFINE_MUTEX(ksm_thread_mutex); |
299 | static DEFINE_SPINLOCK(ksm_mmlist_lock); | |
300 | ||
301 | #define KSM_KMEM_CACHE(__struct, __flags) kmem_cache_create("ksm_"#__struct,\ | |
302 | sizeof(struct __struct), __alignof__(struct __struct),\ | |
303 | (__flags), NULL) | |
304 | ||
305 | static int __init ksm_slab_init(void) | |
306 | { | |
307 | rmap_item_cache = KSM_KMEM_CACHE(rmap_item, 0); | |
308 | if (!rmap_item_cache) | |
309 | goto out; | |
310 | ||
7b6ba2c7 HD |
311 | stable_node_cache = KSM_KMEM_CACHE(stable_node, 0); |
312 | if (!stable_node_cache) | |
313 | goto out_free1; | |
314 | ||
31dbd01f IE |
315 | mm_slot_cache = KSM_KMEM_CACHE(mm_slot, 0); |
316 | if (!mm_slot_cache) | |
7b6ba2c7 | 317 | goto out_free2; |
31dbd01f IE |
318 | |
319 | return 0; | |
320 | ||
7b6ba2c7 HD |
321 | out_free2: |
322 | kmem_cache_destroy(stable_node_cache); | |
323 | out_free1: | |
31dbd01f IE |
324 | kmem_cache_destroy(rmap_item_cache); |
325 | out: | |
326 | return -ENOMEM; | |
327 | } | |
328 | ||
329 | static void __init ksm_slab_free(void) | |
330 | { | |
331 | kmem_cache_destroy(mm_slot_cache); | |
7b6ba2c7 | 332 | kmem_cache_destroy(stable_node_cache); |
31dbd01f IE |
333 | kmem_cache_destroy(rmap_item_cache); |
334 | mm_slot_cache = NULL; | |
335 | } | |
336 | ||
2c653d0e AA |
337 | static __always_inline bool is_stable_node_chain(struct stable_node *chain) |
338 | { | |
339 | return chain->rmap_hlist_len == STABLE_NODE_CHAIN; | |
340 | } | |
341 | ||
342 | static __always_inline bool is_stable_node_dup(struct stable_node *dup) | |
343 | { | |
344 | return dup->head == STABLE_NODE_DUP_HEAD; | |
345 | } | |
346 | ||
347 | static inline void stable_node_chain_add_dup(struct stable_node *dup, | |
348 | struct stable_node *chain) | |
349 | { | |
350 | VM_BUG_ON(is_stable_node_dup(dup)); | |
351 | dup->head = STABLE_NODE_DUP_HEAD; | |
352 | VM_BUG_ON(!is_stable_node_chain(chain)); | |
353 | hlist_add_head(&dup->hlist_dup, &chain->hlist); | |
354 | ksm_stable_node_dups++; | |
355 | } | |
356 | ||
357 | static inline void __stable_node_dup_del(struct stable_node *dup) | |
358 | { | |
b4fecc67 | 359 | VM_BUG_ON(!is_stable_node_dup(dup)); |
2c653d0e AA |
360 | hlist_del(&dup->hlist_dup); |
361 | ksm_stable_node_dups--; | |
362 | } | |
363 | ||
364 | static inline void stable_node_dup_del(struct stable_node *dup) | |
365 | { | |
366 | VM_BUG_ON(is_stable_node_chain(dup)); | |
367 | if (is_stable_node_dup(dup)) | |
368 | __stable_node_dup_del(dup); | |
369 | else | |
370 | rb_erase(&dup->node, root_stable_tree + NUMA(dup->nid)); | |
371 | #ifdef CONFIG_DEBUG_VM | |
372 | dup->head = NULL; | |
373 | #endif | |
374 | } | |
375 | ||
31dbd01f IE |
376 | static inline struct rmap_item *alloc_rmap_item(void) |
377 | { | |
473b0ce4 HD |
378 | struct rmap_item *rmap_item; |
379 | ||
5b398e41 | 380 | rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL | |
381 | __GFP_NORETRY | __GFP_NOWARN); | |
473b0ce4 HD |
382 | if (rmap_item) |
383 | ksm_rmap_items++; | |
384 | return rmap_item; | |
31dbd01f IE |
385 | } |
386 | ||
387 | static inline void free_rmap_item(struct rmap_item *rmap_item) | |
388 | { | |
473b0ce4 | 389 | ksm_rmap_items--; |
31dbd01f IE |
390 | rmap_item->mm = NULL; /* debug safety */ |
391 | kmem_cache_free(rmap_item_cache, rmap_item); | |
392 | } | |
393 | ||
7b6ba2c7 HD |
394 | static inline struct stable_node *alloc_stable_node(void) |
395 | { | |
6213055f | 396 | /* |
397 | * The allocation can take too long with GFP_KERNEL when memory is under | |
398 | * pressure, which may lead to hung task warnings. Adding __GFP_HIGH | |
399 | * grants access to memory reserves, helping to avoid this problem. | |
400 | */ | |
401 | return kmem_cache_alloc(stable_node_cache, GFP_KERNEL | __GFP_HIGH); | |
7b6ba2c7 HD |
402 | } |
403 | ||
404 | static inline void free_stable_node(struct stable_node *stable_node) | |
405 | { | |
2c653d0e AA |
406 | VM_BUG_ON(stable_node->rmap_hlist_len && |
407 | !is_stable_node_chain(stable_node)); | |
7b6ba2c7 HD |
408 | kmem_cache_free(stable_node_cache, stable_node); |
409 | } | |
410 | ||
31dbd01f IE |
411 | static inline struct mm_slot *alloc_mm_slot(void) |
412 | { | |
413 | if (!mm_slot_cache) /* initialization failed */ | |
414 | return NULL; | |
415 | return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL); | |
416 | } | |
417 | ||
418 | static inline void free_mm_slot(struct mm_slot *mm_slot) | |
419 | { | |
420 | kmem_cache_free(mm_slot_cache, mm_slot); | |
421 | } | |
422 | ||
31dbd01f IE |
423 | static struct mm_slot *get_mm_slot(struct mm_struct *mm) |
424 | { | |
4ca3a69b SL |
425 | struct mm_slot *slot; |
426 | ||
b67bfe0d | 427 | hash_for_each_possible(mm_slots_hash, slot, link, (unsigned long)mm) |
4ca3a69b SL |
428 | if (slot->mm == mm) |
429 | return slot; | |
31dbd01f | 430 | |
31dbd01f IE |
431 | return NULL; |
432 | } | |
433 | ||
434 | static void insert_to_mm_slots_hash(struct mm_struct *mm, | |
435 | struct mm_slot *mm_slot) | |
436 | { | |
31dbd01f | 437 | mm_slot->mm = mm; |
4ca3a69b | 438 | hash_add(mm_slots_hash, &mm_slot->link, (unsigned long)mm); |
31dbd01f IE |
439 | } |
440 | ||
a913e182 HD |
441 | /* |
442 | * ksmd, and unmerge_and_remove_all_rmap_items(), must not touch an mm's | |
443 | * page tables after it has passed through ksm_exit() - which, if necessary, | |
c1e8d7c6 | 444 | * takes mmap_lock briefly to serialize against them. ksm_exit() does not set |
a913e182 HD |
445 | * a special flag: they can just back out as soon as mm_users goes to zero. |
446 | * ksm_test_exit() is used throughout to make this test for exit: in some | |
447 | * places for correctness, in some places just to avoid unnecessary work. | |
448 | */ | |
449 | static inline bool ksm_test_exit(struct mm_struct *mm) | |
450 | { | |
451 | return atomic_read(&mm->mm_users) == 0; | |
452 | } | |
453 | ||
31dbd01f IE |
454 | /* |
455 | * We use break_ksm to break COW on a ksm page: it's a stripped down | |
456 | * | |
7a9547fd | 457 | * if (get_user_pages(addr, 1, FOLL_WRITE, &page, NULL) == 1) |
31dbd01f IE |
458 | * put_page(page); |
459 | * | |
460 | * but taking great care only to touch a ksm page, in a VM_MERGEABLE vma, | |
461 | * in case the application has unmapped and remapped mm,addr meanwhile. | |
462 | * Could a ksm page appear anywhere else? Actually yes, in a VM_PFNMAP | |
bbcd53c9 | 463 | * mmap of /dev/mem, where we would not want to touch it. |
1b2ee126 DH |
464 | * |
465 | * FAULT_FLAG/FOLL_REMOTE are because we do this outside the context | |
466 | * of the process that owns 'vma'. We also do not want to enforce | |
467 | * protection keys here anyway. | |
31dbd01f | 468 | */ |
d952b791 | 469 | static int break_ksm(struct vm_area_struct *vma, unsigned long addr) |
31dbd01f IE |
470 | { |
471 | struct page *page; | |
50a7ca3c | 472 | vm_fault_t ret = 0; |
31dbd01f IE |
473 | |
474 | do { | |
475 | cond_resched(); | |
1b2ee126 DH |
476 | page = follow_page(vma, addr, |
477 | FOLL_GET | FOLL_MIGRATION | FOLL_REMOTE); | |
22eccdd7 | 478 | if (IS_ERR_OR_NULL(page)) |
31dbd01f IE |
479 | break; |
480 | if (PageKsm(page)) | |
dcddffd4 | 481 | ret = handle_mm_fault(vma, addr, |
bce617ed PX |
482 | FAULT_FLAG_WRITE | FAULT_FLAG_REMOTE, |
483 | NULL); | |
31dbd01f IE |
484 | else |
485 | ret = VM_FAULT_WRITE; | |
486 | put_page(page); | |
33692f27 | 487 | } while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | VM_FAULT_OOM))); |
d952b791 HD |
488 | /* |
489 | * We must loop because handle_mm_fault() may back out if there's | |
490 | * any difficulty e.g. if pte accessed bit gets updated concurrently. | |
491 | * | |
492 | * VM_FAULT_WRITE is what we have been hoping for: it indicates that | |
493 | * COW has been broken, even if the vma does not permit VM_WRITE; | |
494 | * but note that a concurrent fault might break PageKsm for us. | |
495 | * | |
496 | * VM_FAULT_SIGBUS could occur if we race with truncation of the | |
497 | * backing file, which also invalidates anonymous pages: that's | |
498 | * okay, that truncation will have unmapped the PageKsm for us. | |
499 | * | |
500 | * VM_FAULT_OOM: at the time of writing (late July 2009), setting | |
501 | * aside mem_cgroup limits, VM_FAULT_OOM would only be set if the | |
502 | * current task has TIF_MEMDIE set, and will be OOM killed on return | |
503 | * to user; and ksmd, having no mm, would never be chosen for that. | |
504 | * | |
505 | * But if the mm is in a limited mem_cgroup, then the fault may fail | |
506 | * with VM_FAULT_OOM even if the current task is not TIF_MEMDIE; and | |
507 | * even ksmd can fail in this way - though it's usually breaking ksm | |
508 | * just to undo a merge it made a moment before, so unlikely to oom. | |
509 | * | |
510 | * That's a pity: we might therefore have more kernel pages allocated | |
511 | * than we're counting as nodes in the stable tree; but ksm_do_scan | |
512 | * will retry to break_cow on each pass, so should recover the page | |
513 | * in due course. The important thing is to not let VM_MERGEABLE | |
514 | * be cleared while any such pages might remain in the area. | |
515 | */ | |
516 | return (ret & VM_FAULT_OOM) ? -ENOMEM : 0; | |
31dbd01f IE |
517 | } |
518 | ||
ef694222 BL |
519 | static struct vm_area_struct *find_mergeable_vma(struct mm_struct *mm, |
520 | unsigned long addr) | |
521 | { | |
522 | struct vm_area_struct *vma; | |
523 | if (ksm_test_exit(mm)) | |
524 | return NULL; | |
ff69fb81 LH |
525 | vma = vma_lookup(mm, addr); |
526 | if (!vma || !(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) | |
ef694222 BL |
527 | return NULL; |
528 | return vma; | |
529 | } | |
530 | ||
8dd3557a | 531 | static void break_cow(struct rmap_item *rmap_item) |
31dbd01f | 532 | { |
8dd3557a HD |
533 | struct mm_struct *mm = rmap_item->mm; |
534 | unsigned long addr = rmap_item->address; | |
31dbd01f IE |
535 | struct vm_area_struct *vma; |
536 | ||
4035c07a HD |
537 | /* |
538 | * It is not an accident that whenever we want to break COW | |
539 | * to undo, we also need to drop a reference to the anon_vma. | |
540 | */ | |
9e60109f | 541 | put_anon_vma(rmap_item->anon_vma); |
4035c07a | 542 | |
d8ed45c5 | 543 | mmap_read_lock(mm); |
ef694222 BL |
544 | vma = find_mergeable_vma(mm, addr); |
545 | if (vma) | |
546 | break_ksm(vma, addr); | |
d8ed45c5 | 547 | mmap_read_unlock(mm); |
31dbd01f IE |
548 | } |
549 | ||
550 | static struct page *get_mergeable_page(struct rmap_item *rmap_item) | |
551 | { | |
552 | struct mm_struct *mm = rmap_item->mm; | |
553 | unsigned long addr = rmap_item->address; | |
554 | struct vm_area_struct *vma; | |
555 | struct page *page; | |
556 | ||
d8ed45c5 | 557 | mmap_read_lock(mm); |
ef694222 BL |
558 | vma = find_mergeable_vma(mm, addr); |
559 | if (!vma) | |
31dbd01f IE |
560 | goto out; |
561 | ||
562 | page = follow_page(vma, addr, FOLL_GET); | |
22eccdd7 | 563 | if (IS_ERR_OR_NULL(page)) |
31dbd01f | 564 | goto out; |
f765f540 | 565 | if (PageAnon(page)) { |
31dbd01f IE |
566 | flush_anon_page(vma, page, addr); |
567 | flush_dcache_page(page); | |
568 | } else { | |
569 | put_page(page); | |
c8f95ed1 AA |
570 | out: |
571 | page = NULL; | |
31dbd01f | 572 | } |
d8ed45c5 | 573 | mmap_read_unlock(mm); |
31dbd01f IE |
574 | return page; |
575 | } | |
576 | ||
90bd6fd3 PH |
577 | /* |
578 | * This helper is used for getting right index into array of tree roots. | |
579 | * When merge_across_nodes knob is set to 1, there are only two rb-trees for | |
580 | * stable and unstable pages from all nodes with roots in index 0. Otherwise, | |
581 | * every node has its own stable and unstable tree. | |
582 | */ | |
583 | static inline int get_kpfn_nid(unsigned long kpfn) | |
584 | { | |
d8fc16a8 | 585 | return ksm_merge_across_nodes ? 0 : NUMA(pfn_to_nid(kpfn)); |
90bd6fd3 PH |
586 | } |
587 | ||
2c653d0e AA |
588 | static struct stable_node *alloc_stable_node_chain(struct stable_node *dup, |
589 | struct rb_root *root) | |
590 | { | |
591 | struct stable_node *chain = alloc_stable_node(); | |
592 | VM_BUG_ON(is_stable_node_chain(dup)); | |
593 | if (likely(chain)) { | |
594 | INIT_HLIST_HEAD(&chain->hlist); | |
595 | chain->chain_prune_time = jiffies; | |
596 | chain->rmap_hlist_len = STABLE_NODE_CHAIN; | |
597 | #if defined (CONFIG_DEBUG_VM) && defined(CONFIG_NUMA) | |
98fa15f3 | 598 | chain->nid = NUMA_NO_NODE; /* debug */ |
2c653d0e AA |
599 | #endif |
600 | ksm_stable_node_chains++; | |
601 | ||
602 | /* | |
603 | * Put the stable node chain in the first dimension of | |
604 | * the stable tree and at the same time remove the old | |
605 | * stable node. | |
606 | */ | |
607 | rb_replace_node(&dup->node, &chain->node, root); | |
608 | ||
609 | /* | |
610 | * Move the old stable node to the second dimension | |
611 | * queued in the hlist_dup. The invariant is that all | |
612 | * dup stable_nodes in the chain->hlist point to pages | |
457aef94 | 613 | * that are write protected and have the exact same |
2c653d0e AA |
614 | * content. |
615 | */ | |
616 | stable_node_chain_add_dup(dup, chain); | |
617 | } | |
618 | return chain; | |
619 | } | |
620 | ||
621 | static inline void free_stable_node_chain(struct stable_node *chain, | |
622 | struct rb_root *root) | |
623 | { | |
624 | rb_erase(&chain->node, root); | |
625 | free_stable_node(chain); | |
626 | ksm_stable_node_chains--; | |
627 | } | |
628 | ||
4035c07a HD |
629 | static void remove_node_from_stable_tree(struct stable_node *stable_node) |
630 | { | |
631 | struct rmap_item *rmap_item; | |
4035c07a | 632 | |
2c653d0e AA |
633 | /* check it's not STABLE_NODE_CHAIN or negative */ |
634 | BUG_ON(stable_node->rmap_hlist_len < 0); | |
635 | ||
b67bfe0d | 636 | hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) { |
4035c07a HD |
637 | if (rmap_item->hlist.next) |
638 | ksm_pages_sharing--; | |
639 | else | |
640 | ksm_pages_shared--; | |
2c653d0e AA |
641 | VM_BUG_ON(stable_node->rmap_hlist_len <= 0); |
642 | stable_node->rmap_hlist_len--; | |
9e60109f | 643 | put_anon_vma(rmap_item->anon_vma); |
4035c07a HD |
644 | rmap_item->address &= PAGE_MASK; |
645 | cond_resched(); | |
646 | } | |
647 | ||
2c653d0e AA |
648 | /* |
649 | * We need the second aligned pointer of the migrate_nodes | |
650 | * list_head to stay clear from the rb_parent_color union | |
651 | * (aligned and different than any node) and also different | |
652 | * from &migrate_nodes. This will verify that future list.h changes | |
815f0ddb | 653 | * don't break STABLE_NODE_DUP_HEAD. Only recent gcc can handle it. |
2c653d0e | 654 | */ |
2c653d0e AA |
655 | BUILD_BUG_ON(STABLE_NODE_DUP_HEAD <= &migrate_nodes); |
656 | BUILD_BUG_ON(STABLE_NODE_DUP_HEAD >= &migrate_nodes + 1); | |
2c653d0e | 657 | |
4146d2d6 HD |
658 | if (stable_node->head == &migrate_nodes) |
659 | list_del(&stable_node->list); | |
660 | else | |
2c653d0e | 661 | stable_node_dup_del(stable_node); |
4035c07a HD |
662 | free_stable_node(stable_node); |
663 | } | |
664 | ||
2cee57d1 YS |
665 | enum get_ksm_page_flags { |
666 | GET_KSM_PAGE_NOLOCK, | |
667 | GET_KSM_PAGE_LOCK, | |
668 | GET_KSM_PAGE_TRYLOCK | |
669 | }; | |
670 | ||
4035c07a HD |
671 | /* |
672 | * get_ksm_page: checks if the page indicated by the stable node | |
673 | * is still its ksm page, despite having held no reference to it. | |
674 | * In which case we can trust the content of the page, and it | |
675 | * returns the gotten page; but if the page has now been zapped, | |
676 | * remove the stale node from the stable tree and return NULL. | |
c8d6553b | 677 | * But beware, the stable node's page might be being migrated. |
4035c07a HD |
678 | * |
679 | * You would expect the stable_node to hold a reference to the ksm page. | |
680 | * But if it increments the page's count, swapping out has to wait for | |
681 | * ksmd to come around again before it can free the page, which may take | |
682 | * seconds or even minutes: much too unresponsive. So instead we use a | |
683 | * "keyhole reference": access to the ksm page from the stable node peeps | |
684 | * out through its keyhole to see if that page still holds the right key, | |
685 | * pointing back to this stable node. This relies on freeing a PageAnon | |
686 | * page to reset its page->mapping to NULL, and relies on no other use of | |
687 | * a page to put something that might look like our key in page->mapping. | |
4035c07a HD |
688 | * is on its way to being freed; but it is an anomaly to bear in mind. |
689 | */ | |
2cee57d1 YS |
690 | static struct page *get_ksm_page(struct stable_node *stable_node, |
691 | enum get_ksm_page_flags flags) | |
4035c07a HD |
692 | { |
693 | struct page *page; | |
694 | void *expected_mapping; | |
c8d6553b | 695 | unsigned long kpfn; |
4035c07a | 696 | |
bda807d4 MK |
697 | expected_mapping = (void *)((unsigned long)stable_node | |
698 | PAGE_MAPPING_KSM); | |
c8d6553b | 699 | again: |
08df4774 | 700 | kpfn = READ_ONCE(stable_node->kpfn); /* Address dependency. */ |
c8d6553b | 701 | page = pfn_to_page(kpfn); |
4db0c3c2 | 702 | if (READ_ONCE(page->mapping) != expected_mapping) |
4035c07a | 703 | goto stale; |
c8d6553b HD |
704 | |
705 | /* | |
706 | * We cannot do anything with the page while its refcount is 0. | |
707 | * Usually 0 means free, or tail of a higher-order page: in which | |
708 | * case this node is no longer referenced, and should be freed; | |
1c4c3b99 | 709 | * however, it might mean that the page is under page_ref_freeze(). |
c8d6553b | 710 | * The __remove_mapping() case is easy, again the node is now stale; |
52d1e606 KT |
711 | * the same is in reuse_ksm_page() case; but if page is swapcache |
712 | * in migrate_page_move_mapping(), it might still be our page, | |
713 | * in which case it's essential to keep the node. | |
c8d6553b HD |
714 | */ |
715 | while (!get_page_unless_zero(page)) { | |
716 | /* | |
717 | * Another check for page->mapping != expected_mapping would | |
718 | * work here too. We have chosen the !PageSwapCache test to | |
719 | * optimize the common case, when the page is or is about to | |
720 | * be freed: PageSwapCache is cleared (under spin_lock_irq) | |
1c4c3b99 | 721 | * in the ref_freeze section of __remove_mapping(); but Anon |
c8d6553b HD |
722 | * page->mapping reset to NULL later, in free_pages_prepare(). |
723 | */ | |
724 | if (!PageSwapCache(page)) | |
725 | goto stale; | |
726 | cpu_relax(); | |
727 | } | |
728 | ||
4db0c3c2 | 729 | if (READ_ONCE(page->mapping) != expected_mapping) { |
4035c07a HD |
730 | put_page(page); |
731 | goto stale; | |
732 | } | |
c8d6553b | 733 | |
2cee57d1 YS |
734 | if (flags == GET_KSM_PAGE_TRYLOCK) { |
735 | if (!trylock_page(page)) { | |
736 | put_page(page); | |
737 | return ERR_PTR(-EBUSY); | |
738 | } | |
739 | } else if (flags == GET_KSM_PAGE_LOCK) | |
8aafa6a4 | 740 | lock_page(page); |
2cee57d1 YS |
741 | |
742 | if (flags != GET_KSM_PAGE_NOLOCK) { | |
4db0c3c2 | 743 | if (READ_ONCE(page->mapping) != expected_mapping) { |
8aafa6a4 HD |
744 | unlock_page(page); |
745 | put_page(page); | |
746 | goto stale; | |
747 | } | |
748 | } | |
4035c07a | 749 | return page; |
c8d6553b | 750 | |
4035c07a | 751 | stale: |
c8d6553b HD |
752 | /* |
753 | * We come here from above when page->mapping or !PageSwapCache | |
754 | * suggests that the node is stale; but it might be under migration. | |
19138349 | 755 | * We need smp_rmb(), matching the smp_wmb() in folio_migrate_ksm(), |
c8d6553b HD |
756 | * before checking whether node->kpfn has been changed. |
757 | */ | |
758 | smp_rmb(); | |
4db0c3c2 | 759 | if (READ_ONCE(stable_node->kpfn) != kpfn) |
c8d6553b | 760 | goto again; |
4035c07a HD |
761 | remove_node_from_stable_tree(stable_node); |
762 | return NULL; | |
763 | } | |
764 | ||
31dbd01f IE |
765 | /* |
766 | * Removing rmap_item from stable or unstable tree. | |
767 | * This function will clean the information from the stable/unstable tree. | |
768 | */ | |
769 | static void remove_rmap_item_from_tree(struct rmap_item *rmap_item) | |
770 | { | |
7b6ba2c7 HD |
771 | if (rmap_item->address & STABLE_FLAG) { |
772 | struct stable_node *stable_node; | |
5ad64688 | 773 | struct page *page; |
31dbd01f | 774 | |
7b6ba2c7 | 775 | stable_node = rmap_item->head; |
62862290 | 776 | page = get_ksm_page(stable_node, GET_KSM_PAGE_LOCK); |
4035c07a HD |
777 | if (!page) |
778 | goto out; | |
5ad64688 | 779 | |
7b6ba2c7 | 780 | hlist_del(&rmap_item->hlist); |
62862290 | 781 | unlock_page(page); |
4035c07a | 782 | put_page(page); |
08beca44 | 783 | |
98666f8a | 784 | if (!hlist_empty(&stable_node->hlist)) |
4035c07a HD |
785 | ksm_pages_sharing--; |
786 | else | |
7b6ba2c7 | 787 | ksm_pages_shared--; |
2c653d0e AA |
788 | VM_BUG_ON(stable_node->rmap_hlist_len <= 0); |
789 | stable_node->rmap_hlist_len--; | |
31dbd01f | 790 | |
9e60109f | 791 | put_anon_vma(rmap_item->anon_vma); |
c89a384e | 792 | rmap_item->head = NULL; |
93d17715 | 793 | rmap_item->address &= PAGE_MASK; |
31dbd01f | 794 | |
7b6ba2c7 | 795 | } else if (rmap_item->address & UNSTABLE_FLAG) { |
31dbd01f IE |
796 | unsigned char age; |
797 | /* | |
9ba69294 | 798 | * Usually ksmd can and must skip the rb_erase, because |
31dbd01f | 799 | * root_unstable_tree was already reset to RB_ROOT. |
9ba69294 HD |
800 | * But be careful when an mm is exiting: do the rb_erase |
801 | * if this rmap_item was inserted by this scan, rather | |
802 | * than left over from before. | |
31dbd01f IE |
803 | */ |
804 | age = (unsigned char)(ksm_scan.seqnr - rmap_item->address); | |
cd551f97 | 805 | BUG_ON(age > 1); |
31dbd01f | 806 | if (!age) |
90bd6fd3 | 807 | rb_erase(&rmap_item->node, |
ef53d16c | 808 | root_unstable_tree + NUMA(rmap_item->nid)); |
473b0ce4 | 809 | ksm_pages_unshared--; |
93d17715 | 810 | rmap_item->address &= PAGE_MASK; |
31dbd01f | 811 | } |
4035c07a | 812 | out: |
31dbd01f IE |
813 | cond_resched(); /* we're called from many long loops */ |
814 | } | |
815 | ||
420be4ed | 816 | static void remove_trailing_rmap_items(struct rmap_item **rmap_list) |
31dbd01f | 817 | { |
6514d511 HD |
818 | while (*rmap_list) { |
819 | struct rmap_item *rmap_item = *rmap_list; | |
820 | *rmap_list = rmap_item->rmap_list; | |
31dbd01f | 821 | remove_rmap_item_from_tree(rmap_item); |
31dbd01f IE |
822 | free_rmap_item(rmap_item); |
823 | } | |
824 | } | |
825 | ||
826 | /* | |
e850dcf5 | 827 | * Though it's very tempting to unmerge rmap_items from stable tree rather |
31dbd01f IE |
828 | * than check every pte of a given vma, the locking doesn't quite work for |
829 | * that - an rmap_item is assigned to the stable tree after inserting ksm | |
c1e8d7c6 | 830 | * page and upping mmap_lock. Nor does it fit with the way we skip dup'ing |
31dbd01f IE |
831 | * rmap_items from parent to child at fork time (so as not to waste time |
832 | * if exit comes before the next scan reaches it). | |
81464e30 HD |
833 | * |
834 | * Similarly, although we'd like to remove rmap_items (so updating counts | |
835 | * and freeing memory) when unmerging an area, it's easier to leave that | |
836 | * to the next pass of ksmd - consider, for example, how ksmd might be | |
837 | * in cmp_and_merge_page on one of the rmap_items we would be removing. | |
31dbd01f | 838 | */ |
d952b791 HD |
839 | static int unmerge_ksm_pages(struct vm_area_struct *vma, |
840 | unsigned long start, unsigned long end) | |
31dbd01f IE |
841 | { |
842 | unsigned long addr; | |
d952b791 | 843 | int err = 0; |
31dbd01f | 844 | |
d952b791 | 845 | for (addr = start; addr < end && !err; addr += PAGE_SIZE) { |
9ba69294 HD |
846 | if (ksm_test_exit(vma->vm_mm)) |
847 | break; | |
d952b791 HD |
848 | if (signal_pending(current)) |
849 | err = -ERESTARTSYS; | |
850 | else | |
851 | err = break_ksm(vma, addr); | |
852 | } | |
853 | return err; | |
31dbd01f IE |
854 | } |
855 | ||
19138349 MWO |
856 | static inline struct stable_node *folio_stable_node(struct folio *folio) |
857 | { | |
858 | return folio_test_ksm(folio) ? folio_raw_mapping(folio) : NULL; | |
859 | } | |
860 | ||
88484826 MR |
861 | static inline struct stable_node *page_stable_node(struct page *page) |
862 | { | |
19138349 | 863 | return folio_stable_node(page_folio(page)); |
88484826 MR |
864 | } |
865 | ||
866 | static inline void set_page_stable_node(struct page *page, | |
867 | struct stable_node *stable_node) | |
868 | { | |
869 | page->mapping = (void *)((unsigned long)stable_node | PAGE_MAPPING_KSM); | |
870 | } | |
871 | ||
2ffd8679 HD |
872 | #ifdef CONFIG_SYSFS |
873 | /* | |
874 | * Only called through the sysfs control interface: | |
875 | */ | |
cbf86cfe HD |
876 | static int remove_stable_node(struct stable_node *stable_node) |
877 | { | |
878 | struct page *page; | |
879 | int err; | |
880 | ||
2cee57d1 | 881 | page = get_ksm_page(stable_node, GET_KSM_PAGE_LOCK); |
cbf86cfe HD |
882 | if (!page) { |
883 | /* | |
884 | * get_ksm_page did remove_node_from_stable_tree itself. | |
885 | */ | |
886 | return 0; | |
887 | } | |
888 | ||
9a63236f AR |
889 | /* |
890 | * Page could be still mapped if this races with __mmput() running in | |
891 | * between ksm_exit() and exit_mmap(). Just refuse to let | |
892 | * merge_across_nodes/max_page_sharing be switched. | |
893 | */ | |
894 | err = -EBUSY; | |
895 | if (!page_mapped(page)) { | |
cbf86cfe | 896 | /* |
8fdb3dbf HD |
897 | * The stable node did not yet appear stale to get_ksm_page(), |
898 | * since that allows for an unmapped ksm page to be recognized | |
899 | * right up until it is freed; but the node is safe to remove. | |
cbf86cfe HD |
900 | * This page might be in a pagevec waiting to be freed, |
901 | * or it might be PageSwapCache (perhaps under writeback), | |
902 | * or it might have been removed from swapcache a moment ago. | |
903 | */ | |
904 | set_page_stable_node(page, NULL); | |
905 | remove_node_from_stable_tree(stable_node); | |
906 | err = 0; | |
907 | } | |
908 | ||
909 | unlock_page(page); | |
910 | put_page(page); | |
911 | return err; | |
912 | } | |
913 | ||
2c653d0e AA |
914 | static int remove_stable_node_chain(struct stable_node *stable_node, |
915 | struct rb_root *root) | |
916 | { | |
917 | struct stable_node *dup; | |
918 | struct hlist_node *hlist_safe; | |
919 | ||
920 | if (!is_stable_node_chain(stable_node)) { | |
921 | VM_BUG_ON(is_stable_node_dup(stable_node)); | |
922 | if (remove_stable_node(stable_node)) | |
923 | return true; | |
924 | else | |
925 | return false; | |
926 | } | |
927 | ||
928 | hlist_for_each_entry_safe(dup, hlist_safe, | |
929 | &stable_node->hlist, hlist_dup) { | |
930 | VM_BUG_ON(!is_stable_node_dup(dup)); | |
931 | if (remove_stable_node(dup)) | |
932 | return true; | |
933 | } | |
934 | BUG_ON(!hlist_empty(&stable_node->hlist)); | |
935 | free_stable_node_chain(stable_node, root); | |
936 | return false; | |
937 | } | |
938 | ||
cbf86cfe HD |
939 | static int remove_all_stable_nodes(void) |
940 | { | |
03640418 | 941 | struct stable_node *stable_node, *next; |
cbf86cfe HD |
942 | int nid; |
943 | int err = 0; | |
944 | ||
ef53d16c | 945 | for (nid = 0; nid < ksm_nr_node_ids; nid++) { |
cbf86cfe HD |
946 | while (root_stable_tree[nid].rb_node) { |
947 | stable_node = rb_entry(root_stable_tree[nid].rb_node, | |
948 | struct stable_node, node); | |
2c653d0e AA |
949 | if (remove_stable_node_chain(stable_node, |
950 | root_stable_tree + nid)) { | |
cbf86cfe HD |
951 | err = -EBUSY; |
952 | break; /* proceed to next nid */ | |
953 | } | |
954 | cond_resched(); | |
955 | } | |
956 | } | |
03640418 | 957 | list_for_each_entry_safe(stable_node, next, &migrate_nodes, list) { |
4146d2d6 HD |
958 | if (remove_stable_node(stable_node)) |
959 | err = -EBUSY; | |
960 | cond_resched(); | |
961 | } | |
cbf86cfe HD |
962 | return err; |
963 | } | |
964 | ||
d952b791 | 965 | static int unmerge_and_remove_all_rmap_items(void) |
31dbd01f IE |
966 | { |
967 | struct mm_slot *mm_slot; | |
968 | struct mm_struct *mm; | |
969 | struct vm_area_struct *vma; | |
d952b791 HD |
970 | int err = 0; |
971 | ||
972 | spin_lock(&ksm_mmlist_lock); | |
9ba69294 | 973 | ksm_scan.mm_slot = list_entry(ksm_mm_head.mm_list.next, |
d952b791 HD |
974 | struct mm_slot, mm_list); |
975 | spin_unlock(&ksm_mmlist_lock); | |
31dbd01f | 976 | |
9ba69294 HD |
977 | for (mm_slot = ksm_scan.mm_slot; |
978 | mm_slot != &ksm_mm_head; mm_slot = ksm_scan.mm_slot) { | |
31dbd01f | 979 | mm = mm_slot->mm; |
d8ed45c5 | 980 | mmap_read_lock(mm); |
31dbd01f | 981 | for (vma = mm->mmap; vma; vma = vma->vm_next) { |
9ba69294 HD |
982 | if (ksm_test_exit(mm)) |
983 | break; | |
31dbd01f IE |
984 | if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) |
985 | continue; | |
d952b791 HD |
986 | err = unmerge_ksm_pages(vma, |
987 | vma->vm_start, vma->vm_end); | |
9ba69294 HD |
988 | if (err) |
989 | goto error; | |
31dbd01f | 990 | } |
9ba69294 | 991 | |
420be4ed | 992 | remove_trailing_rmap_items(&mm_slot->rmap_list); |
d8ed45c5 | 993 | mmap_read_unlock(mm); |
d952b791 HD |
994 | |
995 | spin_lock(&ksm_mmlist_lock); | |
9ba69294 | 996 | ksm_scan.mm_slot = list_entry(mm_slot->mm_list.next, |
d952b791 | 997 | struct mm_slot, mm_list); |
9ba69294 | 998 | if (ksm_test_exit(mm)) { |
4ca3a69b | 999 | hash_del(&mm_slot->link); |
9ba69294 HD |
1000 | list_del(&mm_slot->mm_list); |
1001 | spin_unlock(&ksm_mmlist_lock); | |
1002 | ||
1003 | free_mm_slot(mm_slot); | |
1004 | clear_bit(MMF_VM_MERGEABLE, &mm->flags); | |
9ba69294 | 1005 | mmdrop(mm); |
7496fea9 | 1006 | } else |
9ba69294 | 1007 | spin_unlock(&ksm_mmlist_lock); |
31dbd01f IE |
1008 | } |
1009 | ||
cbf86cfe HD |
1010 | /* Clean up stable nodes, but don't worry if some are still busy */ |
1011 | remove_all_stable_nodes(); | |
d952b791 | 1012 | ksm_scan.seqnr = 0; |
9ba69294 HD |
1013 | return 0; |
1014 | ||
1015 | error: | |
d8ed45c5 | 1016 | mmap_read_unlock(mm); |
31dbd01f | 1017 | spin_lock(&ksm_mmlist_lock); |
d952b791 | 1018 | ksm_scan.mm_slot = &ksm_mm_head; |
31dbd01f | 1019 | spin_unlock(&ksm_mmlist_lock); |
d952b791 | 1020 | return err; |
31dbd01f | 1021 | } |
2ffd8679 | 1022 | #endif /* CONFIG_SYSFS */ |
31dbd01f | 1023 | |
31dbd01f IE |
1024 | static u32 calc_checksum(struct page *page) |
1025 | { | |
1026 | u32 checksum; | |
9b04c5fe | 1027 | void *addr = kmap_atomic(page); |
59e1a2f4 | 1028 | checksum = xxhash(addr, PAGE_SIZE, 0); |
9b04c5fe | 1029 | kunmap_atomic(addr); |
31dbd01f IE |
1030 | return checksum; |
1031 | } | |
1032 | ||
31dbd01f IE |
1033 | static int write_protect_page(struct vm_area_struct *vma, struct page *page, |
1034 | pte_t *orig_pte) | |
1035 | { | |
1036 | struct mm_struct *mm = vma->vm_mm; | |
eed05e54 | 1037 | DEFINE_PAGE_VMA_WALK(pvmw, page, vma, 0, 0); |
31dbd01f IE |
1038 | int swapped; |
1039 | int err = -EFAULT; | |
ac46d4f3 | 1040 | struct mmu_notifier_range range; |
31dbd01f | 1041 | |
36eaff33 KS |
1042 | pvmw.address = page_address_in_vma(page, vma); |
1043 | if (pvmw.address == -EFAULT) | |
31dbd01f IE |
1044 | goto out; |
1045 | ||
29ad768c | 1046 | BUG_ON(PageTransCompound(page)); |
6bdb913f | 1047 | |
7269f999 | 1048 | mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, |
6f4f13e8 | 1049 | pvmw.address, |
ac46d4f3 JG |
1050 | pvmw.address + PAGE_SIZE); |
1051 | mmu_notifier_invalidate_range_start(&range); | |
6bdb913f | 1052 | |
36eaff33 | 1053 | if (!page_vma_mapped_walk(&pvmw)) |
6bdb913f | 1054 | goto out_mn; |
36eaff33 KS |
1055 | if (WARN_ONCE(!pvmw.pte, "Unexpected PMD mapping?")) |
1056 | goto out_unlock; | |
31dbd01f | 1057 | |
595cd8f2 | 1058 | if (pte_write(*pvmw.pte) || pte_dirty(*pvmw.pte) || |
b3a81d08 MK |
1059 | (pte_protnone(*pvmw.pte) && pte_savedwrite(*pvmw.pte)) || |
1060 | mm_tlb_flush_pending(mm)) { | |
31dbd01f IE |
1061 | pte_t entry; |
1062 | ||
1063 | swapped = PageSwapCache(page); | |
36eaff33 | 1064 | flush_cache_page(vma, pvmw.address, page_to_pfn(page)); |
31dbd01f | 1065 | /* |
25985edc | 1066 | * Ok this is tricky, when get_user_pages_fast() run it doesn't |
31dbd01f | 1067 | * take any lock, therefore the check that we are going to make |
f0953a1b | 1068 | * with the pagecount against the mapcount is racy and |
31dbd01f IE |
1069 | * O_DIRECT can happen right after the check. |
1070 | * So we clear the pte and flush the tlb before the check | |
1071 | * this assure us that no O_DIRECT can happen after the check | |
1072 | * or in the middle of the check. | |
0f10851e JG |
1073 | * |
1074 | * No need to notify as we are downgrading page table to read | |
1075 | * only not changing it to point to a new page. | |
1076 | * | |
ad56b738 | 1077 | * See Documentation/vm/mmu_notifier.rst |
31dbd01f | 1078 | */ |
0f10851e | 1079 | entry = ptep_clear_flush(vma, pvmw.address, pvmw.pte); |
31dbd01f IE |
1080 | /* |
1081 | * Check that no O_DIRECT or similar I/O is in progress on the | |
1082 | * page | |
1083 | */ | |
31e855ea | 1084 | if (page_mapcount(page) + 1 + swapped != page_count(page)) { |
36eaff33 | 1085 | set_pte_at(mm, pvmw.address, pvmw.pte, entry); |
31dbd01f IE |
1086 | goto out_unlock; |
1087 | } | |
4e31635c HD |
1088 | if (pte_dirty(entry)) |
1089 | set_page_dirty(page); | |
595cd8f2 AK |
1090 | |
1091 | if (pte_protnone(entry)) | |
1092 | entry = pte_mkclean(pte_clear_savedwrite(entry)); | |
1093 | else | |
1094 | entry = pte_mkclean(pte_wrprotect(entry)); | |
36eaff33 | 1095 | set_pte_at_notify(mm, pvmw.address, pvmw.pte, entry); |
31dbd01f | 1096 | } |
36eaff33 | 1097 | *orig_pte = *pvmw.pte; |
31dbd01f IE |
1098 | err = 0; |
1099 | ||
1100 | out_unlock: | |
36eaff33 | 1101 | page_vma_mapped_walk_done(&pvmw); |
6bdb913f | 1102 | out_mn: |
ac46d4f3 | 1103 | mmu_notifier_invalidate_range_end(&range); |
31dbd01f IE |
1104 | out: |
1105 | return err; | |
1106 | } | |
1107 | ||
1108 | /** | |
1109 | * replace_page - replace page in vma by new ksm page | |
8dd3557a HD |
1110 | * @vma: vma that holds the pte pointing to page |
1111 | * @page: the page we are replacing by kpage | |
1112 | * @kpage: the ksm page we replace page by | |
31dbd01f IE |
1113 | * @orig_pte: the original value of the pte |
1114 | * | |
1115 | * Returns 0 on success, -EFAULT on failure. | |
1116 | */ | |
8dd3557a HD |
1117 | static int replace_page(struct vm_area_struct *vma, struct page *page, |
1118 | struct page *kpage, pte_t orig_pte) | |
31dbd01f IE |
1119 | { |
1120 | struct mm_struct *mm = vma->vm_mm; | |
31dbd01f IE |
1121 | pmd_t *pmd; |
1122 | pte_t *ptep; | |
e86c59b1 | 1123 | pte_t newpte; |
31dbd01f IE |
1124 | spinlock_t *ptl; |
1125 | unsigned long addr; | |
31dbd01f | 1126 | int err = -EFAULT; |
ac46d4f3 | 1127 | struct mmu_notifier_range range; |
31dbd01f | 1128 | |
8dd3557a | 1129 | addr = page_address_in_vma(page, vma); |
31dbd01f IE |
1130 | if (addr == -EFAULT) |
1131 | goto out; | |
1132 | ||
6219049a BL |
1133 | pmd = mm_find_pmd(mm, addr); |
1134 | if (!pmd) | |
31dbd01f | 1135 | goto out; |
31dbd01f | 1136 | |
7269f999 | 1137 | mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, addr, |
6f4f13e8 | 1138 | addr + PAGE_SIZE); |
ac46d4f3 | 1139 | mmu_notifier_invalidate_range_start(&range); |
6bdb913f | 1140 | |
31dbd01f IE |
1141 | ptep = pte_offset_map_lock(mm, pmd, addr, &ptl); |
1142 | if (!pte_same(*ptep, orig_pte)) { | |
1143 | pte_unmap_unlock(ptep, ptl); | |
6bdb913f | 1144 | goto out_mn; |
31dbd01f IE |
1145 | } |
1146 | ||
e86c59b1 CI |
1147 | /* |
1148 | * No need to check ksm_use_zero_pages here: we can only have a | |
457aef94 | 1149 | * zero_page here if ksm_use_zero_pages was enabled already. |
e86c59b1 CI |
1150 | */ |
1151 | if (!is_zero_pfn(page_to_pfn(kpage))) { | |
1152 | get_page(kpage); | |
1153 | page_add_anon_rmap(kpage, vma, addr, false); | |
1154 | newpte = mk_pte(kpage, vma->vm_page_prot); | |
1155 | } else { | |
1156 | newpte = pte_mkspecial(pfn_pte(page_to_pfn(kpage), | |
1157 | vma->vm_page_prot)); | |
a38c015f CI |
1158 | /* |
1159 | * We're replacing an anonymous page with a zero page, which is | |
1160 | * not anonymous. We need to do proper accounting otherwise we | |
1161 | * will get wrong values in /proc, and a BUG message in dmesg | |
1162 | * when tearing down the mm. | |
1163 | */ | |
1164 | dec_mm_counter(mm, MM_ANONPAGES); | |
e86c59b1 | 1165 | } |
31dbd01f IE |
1166 | |
1167 | flush_cache_page(vma, addr, pte_pfn(*ptep)); | |
0f10851e JG |
1168 | /* |
1169 | * No need to notify as we are replacing a read only page with another | |
1170 | * read only page with the same content. | |
1171 | * | |
ad56b738 | 1172 | * See Documentation/vm/mmu_notifier.rst |
0f10851e JG |
1173 | */ |
1174 | ptep_clear_flush(vma, addr, ptep); | |
e86c59b1 | 1175 | set_pte_at_notify(mm, addr, ptep, newpte); |
31dbd01f | 1176 | |
cea86fe2 | 1177 | page_remove_rmap(page, vma, false); |
ae52a2ad HD |
1178 | if (!page_mapped(page)) |
1179 | try_to_free_swap(page); | |
8dd3557a | 1180 | put_page(page); |
31dbd01f IE |
1181 | |
1182 | pte_unmap_unlock(ptep, ptl); | |
1183 | err = 0; | |
6bdb913f | 1184 | out_mn: |
ac46d4f3 | 1185 | mmu_notifier_invalidate_range_end(&range); |
31dbd01f IE |
1186 | out: |
1187 | return err; | |
1188 | } | |
1189 | ||
1190 | /* | |
1191 | * try_to_merge_one_page - take two pages and merge them into one | |
8dd3557a HD |
1192 | * @vma: the vma that holds the pte pointing to page |
1193 | * @page: the PageAnon page that we want to replace with kpage | |
80e14822 HD |
1194 | * @kpage: the PageKsm page that we want to map instead of page, |
1195 | * or NULL the first time when we want to use page as kpage. | |
31dbd01f IE |
1196 | * |
1197 | * This function returns 0 if the pages were merged, -EFAULT otherwise. | |
1198 | */ | |
1199 | static int try_to_merge_one_page(struct vm_area_struct *vma, | |
8dd3557a | 1200 | struct page *page, struct page *kpage) |
31dbd01f IE |
1201 | { |
1202 | pte_t orig_pte = __pte(0); | |
1203 | int err = -EFAULT; | |
1204 | ||
db114b83 HD |
1205 | if (page == kpage) /* ksm page forked */ |
1206 | return 0; | |
1207 | ||
8dd3557a | 1208 | if (!PageAnon(page)) |
31dbd01f IE |
1209 | goto out; |
1210 | ||
31dbd01f IE |
1211 | /* |
1212 | * We need the page lock to read a stable PageSwapCache in | |
1213 | * write_protect_page(). We use trylock_page() instead of | |
1214 | * lock_page() because we don't want to wait here - we | |
1215 | * prefer to continue scanning and merging different pages, | |
1216 | * then come back to this page when it is unlocked. | |
1217 | */ | |
8dd3557a | 1218 | if (!trylock_page(page)) |
31e855ea | 1219 | goto out; |
f765f540 KS |
1220 | |
1221 | if (PageTransCompound(page)) { | |
a7306c34 | 1222 | if (split_huge_page(page)) |
f765f540 KS |
1223 | goto out_unlock; |
1224 | } | |
1225 | ||
31dbd01f IE |
1226 | /* |
1227 | * If this anonymous page is mapped only here, its pte may need | |
1228 | * to be write-protected. If it's mapped elsewhere, all of its | |
1229 | * ptes are necessarily already write-protected. But in either | |
1230 | * case, we need to lock and check page_count is not raised. | |
1231 | */ | |
80e14822 HD |
1232 | if (write_protect_page(vma, page, &orig_pte) == 0) { |
1233 | if (!kpage) { | |
1234 | /* | |
1235 | * While we hold page lock, upgrade page from | |
1236 | * PageAnon+anon_vma to PageKsm+NULL stable_node: | |
1237 | * stable_tree_insert() will update stable_node. | |
1238 | */ | |
1239 | set_page_stable_node(page, NULL); | |
1240 | mark_page_accessed(page); | |
337ed7eb MK |
1241 | /* |
1242 | * Page reclaim just frees a clean page with no dirty | |
1243 | * ptes: make sure that the ksm page would be swapped. | |
1244 | */ | |
1245 | if (!PageDirty(page)) | |
1246 | SetPageDirty(page); | |
80e14822 HD |
1247 | err = 0; |
1248 | } else if (pages_identical(page, kpage)) | |
1249 | err = replace_page(vma, page, kpage, orig_pte); | |
1250 | } | |
31dbd01f | 1251 | |
f765f540 | 1252 | out_unlock: |
8dd3557a | 1253 | unlock_page(page); |
31dbd01f IE |
1254 | out: |
1255 | return err; | |
1256 | } | |
1257 | ||
81464e30 HD |
1258 | /* |
1259 | * try_to_merge_with_ksm_page - like try_to_merge_two_pages, | |
1260 | * but no new kernel page is allocated: kpage must already be a ksm page. | |
8dd3557a HD |
1261 | * |
1262 | * This function returns 0 if the pages were merged, -EFAULT otherwise. | |
81464e30 | 1263 | */ |
8dd3557a HD |
1264 | static int try_to_merge_with_ksm_page(struct rmap_item *rmap_item, |
1265 | struct page *page, struct page *kpage) | |
81464e30 | 1266 | { |
8dd3557a | 1267 | struct mm_struct *mm = rmap_item->mm; |
81464e30 HD |
1268 | struct vm_area_struct *vma; |
1269 | int err = -EFAULT; | |
1270 | ||
d8ed45c5 | 1271 | mmap_read_lock(mm); |
85c6e8dd AA |
1272 | vma = find_mergeable_vma(mm, rmap_item->address); |
1273 | if (!vma) | |
81464e30 HD |
1274 | goto out; |
1275 | ||
8dd3557a | 1276 | err = try_to_merge_one_page(vma, page, kpage); |
db114b83 HD |
1277 | if (err) |
1278 | goto out; | |
1279 | ||
bc56620b HD |
1280 | /* Unstable nid is in union with stable anon_vma: remove first */ |
1281 | remove_rmap_item_from_tree(rmap_item); | |
1282 | ||
c1e8d7c6 | 1283 | /* Must get reference to anon_vma while still holding mmap_lock */ |
9e60109f PZ |
1284 | rmap_item->anon_vma = vma->anon_vma; |
1285 | get_anon_vma(vma->anon_vma); | |
81464e30 | 1286 | out: |
d8ed45c5 | 1287 | mmap_read_unlock(mm); |
81464e30 HD |
1288 | return err; |
1289 | } | |
1290 | ||
31dbd01f IE |
1291 | /* |
1292 | * try_to_merge_two_pages - take two identical pages and prepare them | |
1293 | * to be merged into one page. | |
1294 | * | |
8dd3557a HD |
1295 | * This function returns the kpage if we successfully merged two identical |
1296 | * pages into one ksm page, NULL otherwise. | |
31dbd01f | 1297 | * |
80e14822 | 1298 | * Note that this function upgrades page to ksm page: if one of the pages |
31dbd01f IE |
1299 | * is already a ksm page, try_to_merge_with_ksm_page should be used. |
1300 | */ | |
8dd3557a HD |
1301 | static struct page *try_to_merge_two_pages(struct rmap_item *rmap_item, |
1302 | struct page *page, | |
1303 | struct rmap_item *tree_rmap_item, | |
1304 | struct page *tree_page) | |
31dbd01f | 1305 | { |
80e14822 | 1306 | int err; |
31dbd01f | 1307 | |
80e14822 | 1308 | err = try_to_merge_with_ksm_page(rmap_item, page, NULL); |
31dbd01f | 1309 | if (!err) { |
8dd3557a | 1310 | err = try_to_merge_with_ksm_page(tree_rmap_item, |
80e14822 | 1311 | tree_page, page); |
31dbd01f | 1312 | /* |
81464e30 HD |
1313 | * If that fails, we have a ksm page with only one pte |
1314 | * pointing to it: so break it. | |
31dbd01f | 1315 | */ |
4035c07a | 1316 | if (err) |
8dd3557a | 1317 | break_cow(rmap_item); |
31dbd01f | 1318 | } |
80e14822 | 1319 | return err ? NULL : page; |
31dbd01f IE |
1320 | } |
1321 | ||
2c653d0e AA |
1322 | static __always_inline |
1323 | bool __is_page_sharing_candidate(struct stable_node *stable_node, int offset) | |
1324 | { | |
1325 | VM_BUG_ON(stable_node->rmap_hlist_len < 0); | |
1326 | /* | |
1327 | * Check that at least one mapping still exists, otherwise | |
1328 | * there's no much point to merge and share with this | |
1329 | * stable_node, as the underlying tree_page of the other | |
1330 | * sharer is going to be freed soon. | |
1331 | */ | |
1332 | return stable_node->rmap_hlist_len && | |
1333 | stable_node->rmap_hlist_len + offset < ksm_max_page_sharing; | |
1334 | } | |
1335 | ||
1336 | static __always_inline | |
1337 | bool is_page_sharing_candidate(struct stable_node *stable_node) | |
1338 | { | |
1339 | return __is_page_sharing_candidate(stable_node, 0); | |
1340 | } | |
1341 | ||
c01f0b54 CIK |
1342 | static struct page *stable_node_dup(struct stable_node **_stable_node_dup, |
1343 | struct stable_node **_stable_node, | |
1344 | struct rb_root *root, | |
1345 | bool prune_stale_stable_nodes) | |
2c653d0e | 1346 | { |
b4fecc67 | 1347 | struct stable_node *dup, *found = NULL, *stable_node = *_stable_node; |
2c653d0e | 1348 | struct hlist_node *hlist_safe; |
8dc5ffcd | 1349 | struct page *_tree_page, *tree_page = NULL; |
2c653d0e AA |
1350 | int nr = 0; |
1351 | int found_rmap_hlist_len; | |
1352 | ||
1353 | if (!prune_stale_stable_nodes || | |
1354 | time_before(jiffies, stable_node->chain_prune_time + | |
1355 | msecs_to_jiffies( | |
1356 | ksm_stable_node_chains_prune_millisecs))) | |
1357 | prune_stale_stable_nodes = false; | |
1358 | else | |
1359 | stable_node->chain_prune_time = jiffies; | |
1360 | ||
1361 | hlist_for_each_entry_safe(dup, hlist_safe, | |
1362 | &stable_node->hlist, hlist_dup) { | |
1363 | cond_resched(); | |
1364 | /* | |
1365 | * We must walk all stable_node_dup to prune the stale | |
1366 | * stable nodes during lookup. | |
1367 | * | |
1368 | * get_ksm_page can drop the nodes from the | |
1369 | * stable_node->hlist if they point to freed pages | |
1370 | * (that's why we do a _safe walk). The "dup" | |
1371 | * stable_node parameter itself will be freed from | |
1372 | * under us if it returns NULL. | |
1373 | */ | |
2cee57d1 | 1374 | _tree_page = get_ksm_page(dup, GET_KSM_PAGE_NOLOCK); |
2c653d0e AA |
1375 | if (!_tree_page) |
1376 | continue; | |
1377 | nr += 1; | |
1378 | if (is_page_sharing_candidate(dup)) { | |
1379 | if (!found || | |
1380 | dup->rmap_hlist_len > found_rmap_hlist_len) { | |
1381 | if (found) | |
8dc5ffcd | 1382 | put_page(tree_page); |
2c653d0e AA |
1383 | found = dup; |
1384 | found_rmap_hlist_len = found->rmap_hlist_len; | |
8dc5ffcd | 1385 | tree_page = _tree_page; |
2c653d0e | 1386 | |
8dc5ffcd | 1387 | /* skip put_page for found dup */ |
2c653d0e AA |
1388 | if (!prune_stale_stable_nodes) |
1389 | break; | |
2c653d0e AA |
1390 | continue; |
1391 | } | |
1392 | } | |
1393 | put_page(_tree_page); | |
1394 | } | |
1395 | ||
80b18dfa AA |
1396 | if (found) { |
1397 | /* | |
1398 | * nr is counting all dups in the chain only if | |
1399 | * prune_stale_stable_nodes is true, otherwise we may | |
1400 | * break the loop at nr == 1 even if there are | |
1401 | * multiple entries. | |
1402 | */ | |
1403 | if (prune_stale_stable_nodes && nr == 1) { | |
2c653d0e AA |
1404 | /* |
1405 | * If there's not just one entry it would | |
1406 | * corrupt memory, better BUG_ON. In KSM | |
1407 | * context with no lock held it's not even | |
1408 | * fatal. | |
1409 | */ | |
1410 | BUG_ON(stable_node->hlist.first->next); | |
1411 | ||
1412 | /* | |
1413 | * There's just one entry and it is below the | |
1414 | * deduplication limit so drop the chain. | |
1415 | */ | |
1416 | rb_replace_node(&stable_node->node, &found->node, | |
1417 | root); | |
1418 | free_stable_node(stable_node); | |
1419 | ksm_stable_node_chains--; | |
1420 | ksm_stable_node_dups--; | |
b4fecc67 | 1421 | /* |
0ba1d0f7 AA |
1422 | * NOTE: the caller depends on the stable_node |
1423 | * to be equal to stable_node_dup if the chain | |
1424 | * was collapsed. | |
b4fecc67 | 1425 | */ |
0ba1d0f7 AA |
1426 | *_stable_node = found; |
1427 | /* | |
f0953a1b | 1428 | * Just for robustness, as stable_node is |
0ba1d0f7 AA |
1429 | * otherwise left as a stable pointer, the |
1430 | * compiler shall optimize it away at build | |
1431 | * time. | |
1432 | */ | |
1433 | stable_node = NULL; | |
80b18dfa AA |
1434 | } else if (stable_node->hlist.first != &found->hlist_dup && |
1435 | __is_page_sharing_candidate(found, 1)) { | |
2c653d0e | 1436 | /* |
80b18dfa AA |
1437 | * If the found stable_node dup can accept one |
1438 | * more future merge (in addition to the one | |
1439 | * that is underway) and is not at the head of | |
1440 | * the chain, put it there so next search will | |
1441 | * be quicker in the !prune_stale_stable_nodes | |
1442 | * case. | |
1443 | * | |
1444 | * NOTE: it would be inaccurate to use nr > 1 | |
1445 | * instead of checking the hlist.first pointer | |
1446 | * directly, because in the | |
1447 | * prune_stale_stable_nodes case "nr" isn't | |
1448 | * the position of the found dup in the chain, | |
1449 | * but the total number of dups in the chain. | |
2c653d0e AA |
1450 | */ |
1451 | hlist_del(&found->hlist_dup); | |
1452 | hlist_add_head(&found->hlist_dup, | |
1453 | &stable_node->hlist); | |
1454 | } | |
1455 | } | |
1456 | ||
8dc5ffcd AA |
1457 | *_stable_node_dup = found; |
1458 | return tree_page; | |
2c653d0e AA |
1459 | } |
1460 | ||
1461 | static struct stable_node *stable_node_dup_any(struct stable_node *stable_node, | |
1462 | struct rb_root *root) | |
1463 | { | |
1464 | if (!is_stable_node_chain(stable_node)) | |
1465 | return stable_node; | |
1466 | if (hlist_empty(&stable_node->hlist)) { | |
1467 | free_stable_node_chain(stable_node, root); | |
1468 | return NULL; | |
1469 | } | |
1470 | return hlist_entry(stable_node->hlist.first, | |
1471 | typeof(*stable_node), hlist_dup); | |
1472 | } | |
1473 | ||
8dc5ffcd AA |
1474 | /* |
1475 | * Like for get_ksm_page, this function can free the *_stable_node and | |
1476 | * *_stable_node_dup if the returned tree_page is NULL. | |
1477 | * | |
1478 | * It can also free and overwrite *_stable_node with the found | |
1479 | * stable_node_dup if the chain is collapsed (in which case | |
1480 | * *_stable_node will be equal to *_stable_node_dup like if the chain | |
1481 | * never existed). It's up to the caller to verify tree_page is not | |
1482 | * NULL before dereferencing *_stable_node or *_stable_node_dup. | |
1483 | * | |
1484 | * *_stable_node_dup is really a second output parameter of this | |
1485 | * function and will be overwritten in all cases, the caller doesn't | |
1486 | * need to initialize it. | |
1487 | */ | |
1488 | static struct page *__stable_node_chain(struct stable_node **_stable_node_dup, | |
1489 | struct stable_node **_stable_node, | |
1490 | struct rb_root *root, | |
1491 | bool prune_stale_stable_nodes) | |
2c653d0e | 1492 | { |
b4fecc67 | 1493 | struct stable_node *stable_node = *_stable_node; |
2c653d0e AA |
1494 | if (!is_stable_node_chain(stable_node)) { |
1495 | if (is_page_sharing_candidate(stable_node)) { | |
8dc5ffcd | 1496 | *_stable_node_dup = stable_node; |
2cee57d1 | 1497 | return get_ksm_page(stable_node, GET_KSM_PAGE_NOLOCK); |
2c653d0e | 1498 | } |
8dc5ffcd AA |
1499 | /* |
1500 | * _stable_node_dup set to NULL means the stable_node | |
1501 | * reached the ksm_max_page_sharing limit. | |
1502 | */ | |
1503 | *_stable_node_dup = NULL; | |
2c653d0e AA |
1504 | return NULL; |
1505 | } | |
8dc5ffcd | 1506 | return stable_node_dup(_stable_node_dup, _stable_node, root, |
2c653d0e AA |
1507 | prune_stale_stable_nodes); |
1508 | } | |
1509 | ||
8dc5ffcd AA |
1510 | static __always_inline struct page *chain_prune(struct stable_node **s_n_d, |
1511 | struct stable_node **s_n, | |
1512 | struct rb_root *root) | |
2c653d0e | 1513 | { |
8dc5ffcd | 1514 | return __stable_node_chain(s_n_d, s_n, root, true); |
2c653d0e AA |
1515 | } |
1516 | ||
8dc5ffcd AA |
1517 | static __always_inline struct page *chain(struct stable_node **s_n_d, |
1518 | struct stable_node *s_n, | |
1519 | struct rb_root *root) | |
2c653d0e | 1520 | { |
8dc5ffcd AA |
1521 | struct stable_node *old_stable_node = s_n; |
1522 | struct page *tree_page; | |
1523 | ||
1524 | tree_page = __stable_node_chain(s_n_d, &s_n, root, false); | |
1525 | /* not pruning dups so s_n cannot have changed */ | |
1526 | VM_BUG_ON(s_n != old_stable_node); | |
1527 | return tree_page; | |
2c653d0e AA |
1528 | } |
1529 | ||
31dbd01f | 1530 | /* |
8dd3557a | 1531 | * stable_tree_search - search for page inside the stable tree |
31dbd01f IE |
1532 | * |
1533 | * This function checks if there is a page inside the stable tree | |
1534 | * with identical content to the page that we are scanning right now. | |
1535 | * | |
7b6ba2c7 | 1536 | * This function returns the stable tree node of identical content if found, |
31dbd01f IE |
1537 | * NULL otherwise. |
1538 | */ | |
62b61f61 | 1539 | static struct page *stable_tree_search(struct page *page) |
31dbd01f | 1540 | { |
90bd6fd3 | 1541 | int nid; |
ef53d16c | 1542 | struct rb_root *root; |
4146d2d6 HD |
1543 | struct rb_node **new; |
1544 | struct rb_node *parent; | |
2c653d0e | 1545 | struct stable_node *stable_node, *stable_node_dup, *stable_node_any; |
4146d2d6 | 1546 | struct stable_node *page_node; |
31dbd01f | 1547 | |
4146d2d6 HD |
1548 | page_node = page_stable_node(page); |
1549 | if (page_node && page_node->head != &migrate_nodes) { | |
1550 | /* ksm page forked */ | |
08beca44 | 1551 | get_page(page); |
62b61f61 | 1552 | return page; |
08beca44 HD |
1553 | } |
1554 | ||
90bd6fd3 | 1555 | nid = get_kpfn_nid(page_to_pfn(page)); |
ef53d16c | 1556 | root = root_stable_tree + nid; |
4146d2d6 | 1557 | again: |
ef53d16c | 1558 | new = &root->rb_node; |
4146d2d6 | 1559 | parent = NULL; |
90bd6fd3 | 1560 | |
4146d2d6 | 1561 | while (*new) { |
4035c07a | 1562 | struct page *tree_page; |
31dbd01f IE |
1563 | int ret; |
1564 | ||
08beca44 | 1565 | cond_resched(); |
4146d2d6 | 1566 | stable_node = rb_entry(*new, struct stable_node, node); |
2c653d0e | 1567 | stable_node_any = NULL; |
8dc5ffcd | 1568 | tree_page = chain_prune(&stable_node_dup, &stable_node, root); |
b4fecc67 AA |
1569 | /* |
1570 | * NOTE: stable_node may have been freed by | |
1571 | * chain_prune() if the returned stable_node_dup is | |
1572 | * not NULL. stable_node_dup may have been inserted in | |
1573 | * the rbtree instead as a regular stable_node (in | |
1574 | * order to collapse the stable_node chain if a single | |
0ba1d0f7 AA |
1575 | * stable_node dup was found in it). In such case the |
1576 | * stable_node is overwritten by the calleee to point | |
1577 | * to the stable_node_dup that was collapsed in the | |
1578 | * stable rbtree and stable_node will be equal to | |
1579 | * stable_node_dup like if the chain never existed. | |
b4fecc67 | 1580 | */ |
2c653d0e AA |
1581 | if (!stable_node_dup) { |
1582 | /* | |
1583 | * Either all stable_node dups were full in | |
1584 | * this stable_node chain, or this chain was | |
1585 | * empty and should be rb_erased. | |
1586 | */ | |
1587 | stable_node_any = stable_node_dup_any(stable_node, | |
1588 | root); | |
1589 | if (!stable_node_any) { | |
1590 | /* rb_erase just run */ | |
1591 | goto again; | |
1592 | } | |
1593 | /* | |
1594 | * Take any of the stable_node dups page of | |
1595 | * this stable_node chain to let the tree walk | |
1596 | * continue. All KSM pages belonging to the | |
1597 | * stable_node dups in a stable_node chain | |
1598 | * have the same content and they're | |
457aef94 | 1599 | * write protected at all times. Any will work |
2c653d0e AA |
1600 | * fine to continue the walk. |
1601 | */ | |
2cee57d1 YS |
1602 | tree_page = get_ksm_page(stable_node_any, |
1603 | GET_KSM_PAGE_NOLOCK); | |
2c653d0e AA |
1604 | } |
1605 | VM_BUG_ON(!stable_node_dup ^ !!stable_node_any); | |
f2e5ff85 AA |
1606 | if (!tree_page) { |
1607 | /* | |
1608 | * If we walked over a stale stable_node, | |
1609 | * get_ksm_page() will call rb_erase() and it | |
1610 | * may rebalance the tree from under us. So | |
1611 | * restart the search from scratch. Returning | |
1612 | * NULL would be safe too, but we'd generate | |
1613 | * false negative insertions just because some | |
1614 | * stable_node was stale. | |
1615 | */ | |
1616 | goto again; | |
1617 | } | |
31dbd01f | 1618 | |
4035c07a | 1619 | ret = memcmp_pages(page, tree_page); |
c8d6553b | 1620 | put_page(tree_page); |
31dbd01f | 1621 | |
4146d2d6 | 1622 | parent = *new; |
c8d6553b | 1623 | if (ret < 0) |
4146d2d6 | 1624 | new = &parent->rb_left; |
c8d6553b | 1625 | else if (ret > 0) |
4146d2d6 | 1626 | new = &parent->rb_right; |
c8d6553b | 1627 | else { |
2c653d0e AA |
1628 | if (page_node) { |
1629 | VM_BUG_ON(page_node->head != &migrate_nodes); | |
1630 | /* | |
1631 | * Test if the migrated page should be merged | |
1632 | * into a stable node dup. If the mapcount is | |
1633 | * 1 we can migrate it with another KSM page | |
1634 | * without adding it to the chain. | |
1635 | */ | |
1636 | if (page_mapcount(page) > 1) | |
1637 | goto chain_append; | |
1638 | } | |
1639 | ||
1640 | if (!stable_node_dup) { | |
1641 | /* | |
1642 | * If the stable_node is a chain and | |
1643 | * we got a payload match in memcmp | |
1644 | * but we cannot merge the scanned | |
1645 | * page in any of the existing | |
1646 | * stable_node dups because they're | |
1647 | * all full, we need to wait the | |
1648 | * scanned page to find itself a match | |
1649 | * in the unstable tree to create a | |
1650 | * brand new KSM page to add later to | |
1651 | * the dups of this stable_node. | |
1652 | */ | |
1653 | return NULL; | |
1654 | } | |
1655 | ||
c8d6553b HD |
1656 | /* |
1657 | * Lock and unlock the stable_node's page (which | |
1658 | * might already have been migrated) so that page | |
1659 | * migration is sure to notice its raised count. | |
1660 | * It would be more elegant to return stable_node | |
1661 | * than kpage, but that involves more changes. | |
1662 | */ | |
2cee57d1 YS |
1663 | tree_page = get_ksm_page(stable_node_dup, |
1664 | GET_KSM_PAGE_TRYLOCK); | |
1665 | ||
1666 | if (PTR_ERR(tree_page) == -EBUSY) | |
1667 | return ERR_PTR(-EBUSY); | |
1668 | ||
2c653d0e AA |
1669 | if (unlikely(!tree_page)) |
1670 | /* | |
1671 | * The tree may have been rebalanced, | |
1672 | * so re-evaluate parent and new. | |
1673 | */ | |
4146d2d6 | 1674 | goto again; |
2c653d0e AA |
1675 | unlock_page(tree_page); |
1676 | ||
1677 | if (get_kpfn_nid(stable_node_dup->kpfn) != | |
1678 | NUMA(stable_node_dup->nid)) { | |
1679 | put_page(tree_page); | |
1680 | goto replace; | |
1681 | } | |
1682 | return tree_page; | |
c8d6553b | 1683 | } |
31dbd01f IE |
1684 | } |
1685 | ||
4146d2d6 HD |
1686 | if (!page_node) |
1687 | return NULL; | |
1688 | ||
1689 | list_del(&page_node->list); | |
1690 | DO_NUMA(page_node->nid = nid); | |
1691 | rb_link_node(&page_node->node, parent, new); | |
ef53d16c | 1692 | rb_insert_color(&page_node->node, root); |
2c653d0e AA |
1693 | out: |
1694 | if (is_page_sharing_candidate(page_node)) { | |
1695 | get_page(page); | |
1696 | return page; | |
1697 | } else | |
1698 | return NULL; | |
4146d2d6 HD |
1699 | |
1700 | replace: | |
b4fecc67 AA |
1701 | /* |
1702 | * If stable_node was a chain and chain_prune collapsed it, | |
0ba1d0f7 AA |
1703 | * stable_node has been updated to be the new regular |
1704 | * stable_node. A collapse of the chain is indistinguishable | |
1705 | * from the case there was no chain in the stable | |
1706 | * rbtree. Otherwise stable_node is the chain and | |
1707 | * stable_node_dup is the dup to replace. | |
b4fecc67 | 1708 | */ |
0ba1d0f7 | 1709 | if (stable_node_dup == stable_node) { |
b4fecc67 AA |
1710 | VM_BUG_ON(is_stable_node_chain(stable_node_dup)); |
1711 | VM_BUG_ON(is_stable_node_dup(stable_node_dup)); | |
2c653d0e AA |
1712 | /* there is no chain */ |
1713 | if (page_node) { | |
1714 | VM_BUG_ON(page_node->head != &migrate_nodes); | |
1715 | list_del(&page_node->list); | |
1716 | DO_NUMA(page_node->nid = nid); | |
b4fecc67 AA |
1717 | rb_replace_node(&stable_node_dup->node, |
1718 | &page_node->node, | |
2c653d0e AA |
1719 | root); |
1720 | if (is_page_sharing_candidate(page_node)) | |
1721 | get_page(page); | |
1722 | else | |
1723 | page = NULL; | |
1724 | } else { | |
b4fecc67 | 1725 | rb_erase(&stable_node_dup->node, root); |
2c653d0e AA |
1726 | page = NULL; |
1727 | } | |
4146d2d6 | 1728 | } else { |
2c653d0e AA |
1729 | VM_BUG_ON(!is_stable_node_chain(stable_node)); |
1730 | __stable_node_dup_del(stable_node_dup); | |
1731 | if (page_node) { | |
1732 | VM_BUG_ON(page_node->head != &migrate_nodes); | |
1733 | list_del(&page_node->list); | |
1734 | DO_NUMA(page_node->nid = nid); | |
1735 | stable_node_chain_add_dup(page_node, stable_node); | |
1736 | if (is_page_sharing_candidate(page_node)) | |
1737 | get_page(page); | |
1738 | else | |
1739 | page = NULL; | |
1740 | } else { | |
1741 | page = NULL; | |
1742 | } | |
4146d2d6 | 1743 | } |
2c653d0e AA |
1744 | stable_node_dup->head = &migrate_nodes; |
1745 | list_add(&stable_node_dup->list, stable_node_dup->head); | |
4146d2d6 | 1746 | return page; |
2c653d0e AA |
1747 | |
1748 | chain_append: | |
1749 | /* stable_node_dup could be null if it reached the limit */ | |
1750 | if (!stable_node_dup) | |
1751 | stable_node_dup = stable_node_any; | |
b4fecc67 AA |
1752 | /* |
1753 | * If stable_node was a chain and chain_prune collapsed it, | |
0ba1d0f7 AA |
1754 | * stable_node has been updated to be the new regular |
1755 | * stable_node. A collapse of the chain is indistinguishable | |
1756 | * from the case there was no chain in the stable | |
1757 | * rbtree. Otherwise stable_node is the chain and | |
1758 | * stable_node_dup is the dup to replace. | |
b4fecc67 | 1759 | */ |
0ba1d0f7 | 1760 | if (stable_node_dup == stable_node) { |
b4fecc67 | 1761 | VM_BUG_ON(is_stable_node_dup(stable_node_dup)); |
2c653d0e AA |
1762 | /* chain is missing so create it */ |
1763 | stable_node = alloc_stable_node_chain(stable_node_dup, | |
1764 | root); | |
1765 | if (!stable_node) | |
1766 | return NULL; | |
1767 | } | |
1768 | /* | |
1769 | * Add this stable_node dup that was | |
1770 | * migrated to the stable_node chain | |
1771 | * of the current nid for this page | |
1772 | * content. | |
1773 | */ | |
b4fecc67 | 1774 | VM_BUG_ON(!is_stable_node_dup(stable_node_dup)); |
2c653d0e AA |
1775 | VM_BUG_ON(page_node->head != &migrate_nodes); |
1776 | list_del(&page_node->list); | |
1777 | DO_NUMA(page_node->nid = nid); | |
1778 | stable_node_chain_add_dup(page_node, stable_node); | |
1779 | goto out; | |
31dbd01f IE |
1780 | } |
1781 | ||
1782 | /* | |
e850dcf5 | 1783 | * stable_tree_insert - insert stable tree node pointing to new ksm page |
31dbd01f IE |
1784 | * into the stable tree. |
1785 | * | |
7b6ba2c7 HD |
1786 | * This function returns the stable tree node just allocated on success, |
1787 | * NULL otherwise. | |
31dbd01f | 1788 | */ |
7b6ba2c7 | 1789 | static struct stable_node *stable_tree_insert(struct page *kpage) |
31dbd01f | 1790 | { |
90bd6fd3 PH |
1791 | int nid; |
1792 | unsigned long kpfn; | |
ef53d16c | 1793 | struct rb_root *root; |
90bd6fd3 | 1794 | struct rb_node **new; |
f2e5ff85 | 1795 | struct rb_node *parent; |
2c653d0e AA |
1796 | struct stable_node *stable_node, *stable_node_dup, *stable_node_any; |
1797 | bool need_chain = false; | |
31dbd01f | 1798 | |
90bd6fd3 PH |
1799 | kpfn = page_to_pfn(kpage); |
1800 | nid = get_kpfn_nid(kpfn); | |
ef53d16c | 1801 | root = root_stable_tree + nid; |
f2e5ff85 AA |
1802 | again: |
1803 | parent = NULL; | |
ef53d16c | 1804 | new = &root->rb_node; |
90bd6fd3 | 1805 | |
31dbd01f | 1806 | while (*new) { |
4035c07a | 1807 | struct page *tree_page; |
31dbd01f IE |
1808 | int ret; |
1809 | ||
08beca44 | 1810 | cond_resched(); |
7b6ba2c7 | 1811 | stable_node = rb_entry(*new, struct stable_node, node); |
2c653d0e | 1812 | stable_node_any = NULL; |
8dc5ffcd | 1813 | tree_page = chain(&stable_node_dup, stable_node, root); |
2c653d0e AA |
1814 | if (!stable_node_dup) { |
1815 | /* | |
1816 | * Either all stable_node dups were full in | |
1817 | * this stable_node chain, or this chain was | |
1818 | * empty and should be rb_erased. | |
1819 | */ | |
1820 | stable_node_any = stable_node_dup_any(stable_node, | |
1821 | root); | |
1822 | if (!stable_node_any) { | |
1823 | /* rb_erase just run */ | |
1824 | goto again; | |
1825 | } | |
1826 | /* | |
1827 | * Take any of the stable_node dups page of | |
1828 | * this stable_node chain to let the tree walk | |
1829 | * continue. All KSM pages belonging to the | |
1830 | * stable_node dups in a stable_node chain | |
1831 | * have the same content and they're | |
457aef94 | 1832 | * write protected at all times. Any will work |
2c653d0e AA |
1833 | * fine to continue the walk. |
1834 | */ | |
2cee57d1 YS |
1835 | tree_page = get_ksm_page(stable_node_any, |
1836 | GET_KSM_PAGE_NOLOCK); | |
2c653d0e AA |
1837 | } |
1838 | VM_BUG_ON(!stable_node_dup ^ !!stable_node_any); | |
f2e5ff85 AA |
1839 | if (!tree_page) { |
1840 | /* | |
1841 | * If we walked over a stale stable_node, | |
1842 | * get_ksm_page() will call rb_erase() and it | |
1843 | * may rebalance the tree from under us. So | |
1844 | * restart the search from scratch. Returning | |
1845 | * NULL would be safe too, but we'd generate | |
1846 | * false negative insertions just because some | |
1847 | * stable_node was stale. | |
1848 | */ | |
1849 | goto again; | |
1850 | } | |
31dbd01f | 1851 | |
4035c07a HD |
1852 | ret = memcmp_pages(kpage, tree_page); |
1853 | put_page(tree_page); | |
31dbd01f IE |
1854 | |
1855 | parent = *new; | |
1856 | if (ret < 0) | |
1857 | new = &parent->rb_left; | |
1858 | else if (ret > 0) | |
1859 | new = &parent->rb_right; | |
1860 | else { | |
2c653d0e AA |
1861 | need_chain = true; |
1862 | break; | |
31dbd01f IE |
1863 | } |
1864 | } | |
1865 | ||
2c653d0e AA |
1866 | stable_node_dup = alloc_stable_node(); |
1867 | if (!stable_node_dup) | |
7b6ba2c7 | 1868 | return NULL; |
31dbd01f | 1869 | |
2c653d0e AA |
1870 | INIT_HLIST_HEAD(&stable_node_dup->hlist); |
1871 | stable_node_dup->kpfn = kpfn; | |
1872 | set_page_stable_node(kpage, stable_node_dup); | |
1873 | stable_node_dup->rmap_hlist_len = 0; | |
1874 | DO_NUMA(stable_node_dup->nid = nid); | |
1875 | if (!need_chain) { | |
1876 | rb_link_node(&stable_node_dup->node, parent, new); | |
1877 | rb_insert_color(&stable_node_dup->node, root); | |
1878 | } else { | |
1879 | if (!is_stable_node_chain(stable_node)) { | |
1880 | struct stable_node *orig = stable_node; | |
1881 | /* chain is missing so create it */ | |
1882 | stable_node = alloc_stable_node_chain(orig, root); | |
1883 | if (!stable_node) { | |
1884 | free_stable_node(stable_node_dup); | |
1885 | return NULL; | |
1886 | } | |
1887 | } | |
1888 | stable_node_chain_add_dup(stable_node_dup, stable_node); | |
1889 | } | |
08beca44 | 1890 | |
2c653d0e | 1891 | return stable_node_dup; |
31dbd01f IE |
1892 | } |
1893 | ||
1894 | /* | |
8dd3557a HD |
1895 | * unstable_tree_search_insert - search for identical page, |
1896 | * else insert rmap_item into the unstable tree. | |
31dbd01f IE |
1897 | * |
1898 | * This function searches for a page in the unstable tree identical to the | |
1899 | * page currently being scanned; and if no identical page is found in the | |
1900 | * tree, we insert rmap_item as a new object into the unstable tree. | |
1901 | * | |
1902 | * This function returns pointer to rmap_item found to be identical | |
1903 | * to the currently scanned page, NULL otherwise. | |
1904 | * | |
1905 | * This function does both searching and inserting, because they share | |
1906 | * the same walking algorithm in an rbtree. | |
1907 | */ | |
8dd3557a HD |
1908 | static |
1909 | struct rmap_item *unstable_tree_search_insert(struct rmap_item *rmap_item, | |
1910 | struct page *page, | |
1911 | struct page **tree_pagep) | |
31dbd01f | 1912 | { |
90bd6fd3 PH |
1913 | struct rb_node **new; |
1914 | struct rb_root *root; | |
31dbd01f | 1915 | struct rb_node *parent = NULL; |
90bd6fd3 PH |
1916 | int nid; |
1917 | ||
1918 | nid = get_kpfn_nid(page_to_pfn(page)); | |
ef53d16c | 1919 | root = root_unstable_tree + nid; |
90bd6fd3 | 1920 | new = &root->rb_node; |
31dbd01f IE |
1921 | |
1922 | while (*new) { | |
1923 | struct rmap_item *tree_rmap_item; | |
8dd3557a | 1924 | struct page *tree_page; |
31dbd01f IE |
1925 | int ret; |
1926 | ||
d178f27f | 1927 | cond_resched(); |
31dbd01f | 1928 | tree_rmap_item = rb_entry(*new, struct rmap_item, node); |
8dd3557a | 1929 | tree_page = get_mergeable_page(tree_rmap_item); |
c8f95ed1 | 1930 | if (!tree_page) |
31dbd01f IE |
1931 | return NULL; |
1932 | ||
1933 | /* | |
8dd3557a | 1934 | * Don't substitute a ksm page for a forked page. |
31dbd01f | 1935 | */ |
8dd3557a HD |
1936 | if (page == tree_page) { |
1937 | put_page(tree_page); | |
31dbd01f IE |
1938 | return NULL; |
1939 | } | |
1940 | ||
8dd3557a | 1941 | ret = memcmp_pages(page, tree_page); |
31dbd01f IE |
1942 | |
1943 | parent = *new; | |
1944 | if (ret < 0) { | |
8dd3557a | 1945 | put_page(tree_page); |
31dbd01f IE |
1946 | new = &parent->rb_left; |
1947 | } else if (ret > 0) { | |
8dd3557a | 1948 | put_page(tree_page); |
31dbd01f | 1949 | new = &parent->rb_right; |
b599cbdf HD |
1950 | } else if (!ksm_merge_across_nodes && |
1951 | page_to_nid(tree_page) != nid) { | |
1952 | /* | |
1953 | * If tree_page has been migrated to another NUMA node, | |
1954 | * it will be flushed out and put in the right unstable | |
1955 | * tree next time: only merge with it when across_nodes. | |
1956 | */ | |
1957 | put_page(tree_page); | |
1958 | return NULL; | |
31dbd01f | 1959 | } else { |
8dd3557a | 1960 | *tree_pagep = tree_page; |
31dbd01f IE |
1961 | return tree_rmap_item; |
1962 | } | |
1963 | } | |
1964 | ||
7b6ba2c7 | 1965 | rmap_item->address |= UNSTABLE_FLAG; |
31dbd01f | 1966 | rmap_item->address |= (ksm_scan.seqnr & SEQNR_MASK); |
e850dcf5 | 1967 | DO_NUMA(rmap_item->nid = nid); |
31dbd01f | 1968 | rb_link_node(&rmap_item->node, parent, new); |
90bd6fd3 | 1969 | rb_insert_color(&rmap_item->node, root); |
31dbd01f | 1970 | |
473b0ce4 | 1971 | ksm_pages_unshared++; |
31dbd01f IE |
1972 | return NULL; |
1973 | } | |
1974 | ||
1975 | /* | |
1976 | * stable_tree_append - add another rmap_item to the linked list of | |
1977 | * rmap_items hanging off a given node of the stable tree, all sharing | |
1978 | * the same ksm page. | |
1979 | */ | |
1980 | static void stable_tree_append(struct rmap_item *rmap_item, | |
2c653d0e AA |
1981 | struct stable_node *stable_node, |
1982 | bool max_page_sharing_bypass) | |
31dbd01f | 1983 | { |
2c653d0e AA |
1984 | /* |
1985 | * rmap won't find this mapping if we don't insert the | |
1986 | * rmap_item in the right stable_node | |
1987 | * duplicate. page_migration could break later if rmap breaks, | |
1988 | * so we can as well crash here. We really need to check for | |
1989 | * rmap_hlist_len == STABLE_NODE_CHAIN, but we can as well check | |
457aef94 | 1990 | * for other negative values as an underflow if detected here |
2c653d0e AA |
1991 | * for the first time (and not when decreasing rmap_hlist_len) |
1992 | * would be sign of memory corruption in the stable_node. | |
1993 | */ | |
1994 | BUG_ON(stable_node->rmap_hlist_len < 0); | |
1995 | ||
1996 | stable_node->rmap_hlist_len++; | |
1997 | if (!max_page_sharing_bypass) | |
1998 | /* possibly non fatal but unexpected overflow, only warn */ | |
1999 | WARN_ON_ONCE(stable_node->rmap_hlist_len > | |
2000 | ksm_max_page_sharing); | |
2001 | ||
7b6ba2c7 | 2002 | rmap_item->head = stable_node; |
31dbd01f | 2003 | rmap_item->address |= STABLE_FLAG; |
7b6ba2c7 | 2004 | hlist_add_head(&rmap_item->hlist, &stable_node->hlist); |
e178dfde | 2005 | |
7b6ba2c7 HD |
2006 | if (rmap_item->hlist.next) |
2007 | ksm_pages_sharing++; | |
2008 | else | |
2009 | ksm_pages_shared++; | |
31dbd01f IE |
2010 | } |
2011 | ||
2012 | /* | |
81464e30 HD |
2013 | * cmp_and_merge_page - first see if page can be merged into the stable tree; |
2014 | * if not, compare checksum to previous and if it's the same, see if page can | |
2015 | * be inserted into the unstable tree, or merged with a page already there and | |
2016 | * both transferred to the stable tree. | |
31dbd01f IE |
2017 | * |
2018 | * @page: the page that we are searching identical page to. | |
2019 | * @rmap_item: the reverse mapping into the virtual address of this page | |
2020 | */ | |
2021 | static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item) | |
2022 | { | |
4b22927f | 2023 | struct mm_struct *mm = rmap_item->mm; |
31dbd01f | 2024 | struct rmap_item *tree_rmap_item; |
8dd3557a | 2025 | struct page *tree_page = NULL; |
7b6ba2c7 | 2026 | struct stable_node *stable_node; |
8dd3557a | 2027 | struct page *kpage; |
31dbd01f IE |
2028 | unsigned int checksum; |
2029 | int err; | |
2c653d0e | 2030 | bool max_page_sharing_bypass = false; |
31dbd01f | 2031 | |
4146d2d6 HD |
2032 | stable_node = page_stable_node(page); |
2033 | if (stable_node) { | |
2034 | if (stable_node->head != &migrate_nodes && | |
2c653d0e AA |
2035 | get_kpfn_nid(READ_ONCE(stable_node->kpfn)) != |
2036 | NUMA(stable_node->nid)) { | |
2037 | stable_node_dup_del(stable_node); | |
4146d2d6 HD |
2038 | stable_node->head = &migrate_nodes; |
2039 | list_add(&stable_node->list, stable_node->head); | |
2040 | } | |
2041 | if (stable_node->head != &migrate_nodes && | |
2042 | rmap_item->head == stable_node) | |
2043 | return; | |
2c653d0e AA |
2044 | /* |
2045 | * If it's a KSM fork, allow it to go over the sharing limit | |
2046 | * without warnings. | |
2047 | */ | |
2048 | if (!is_page_sharing_candidate(stable_node)) | |
2049 | max_page_sharing_bypass = true; | |
4146d2d6 | 2050 | } |
31dbd01f IE |
2051 | |
2052 | /* We first start with searching the page inside the stable tree */ | |
62b61f61 | 2053 | kpage = stable_tree_search(page); |
4146d2d6 HD |
2054 | if (kpage == page && rmap_item->head == stable_node) { |
2055 | put_page(kpage); | |
2056 | return; | |
2057 | } | |
2058 | ||
2059 | remove_rmap_item_from_tree(rmap_item); | |
2060 | ||
62b61f61 | 2061 | if (kpage) { |
2cee57d1 YS |
2062 | if (PTR_ERR(kpage) == -EBUSY) |
2063 | return; | |
2064 | ||
08beca44 | 2065 | err = try_to_merge_with_ksm_page(rmap_item, page, kpage); |
31dbd01f IE |
2066 | if (!err) { |
2067 | /* | |
2068 | * The page was successfully merged: | |
2069 | * add its rmap_item to the stable tree. | |
2070 | */ | |
5ad64688 | 2071 | lock_page(kpage); |
2c653d0e AA |
2072 | stable_tree_append(rmap_item, page_stable_node(kpage), |
2073 | max_page_sharing_bypass); | |
5ad64688 | 2074 | unlock_page(kpage); |
31dbd01f | 2075 | } |
8dd3557a | 2076 | put_page(kpage); |
31dbd01f IE |
2077 | return; |
2078 | } | |
2079 | ||
2080 | /* | |
4035c07a HD |
2081 | * If the hash value of the page has changed from the last time |
2082 | * we calculated it, this page is changing frequently: therefore we | |
2083 | * don't want to insert it in the unstable tree, and we don't want | |
2084 | * to waste our time searching for something identical to it there. | |
31dbd01f IE |
2085 | */ |
2086 | checksum = calc_checksum(page); | |
2087 | if (rmap_item->oldchecksum != checksum) { | |
2088 | rmap_item->oldchecksum = checksum; | |
2089 | return; | |
2090 | } | |
2091 | ||
e86c59b1 CI |
2092 | /* |
2093 | * Same checksum as an empty page. We attempt to merge it with the | |
2094 | * appropriate zero page if the user enabled this via sysfs. | |
2095 | */ | |
2096 | if (ksm_use_zero_pages && (checksum == zero_checksum)) { | |
2097 | struct vm_area_struct *vma; | |
2098 | ||
d8ed45c5 | 2099 | mmap_read_lock(mm); |
4b22927f | 2100 | vma = find_mergeable_vma(mm, rmap_item->address); |
56df70a6 MS |
2101 | if (vma) { |
2102 | err = try_to_merge_one_page(vma, page, | |
2103 | ZERO_PAGE(rmap_item->address)); | |
2104 | } else { | |
2105 | /* | |
2106 | * If the vma is out of date, we do not need to | |
2107 | * continue. | |
2108 | */ | |
2109 | err = 0; | |
2110 | } | |
d8ed45c5 | 2111 | mmap_read_unlock(mm); |
e86c59b1 CI |
2112 | /* |
2113 | * In case of failure, the page was not really empty, so we | |
2114 | * need to continue. Otherwise we're done. | |
2115 | */ | |
2116 | if (!err) | |
2117 | return; | |
2118 | } | |
8dd3557a HD |
2119 | tree_rmap_item = |
2120 | unstable_tree_search_insert(rmap_item, page, &tree_page); | |
31dbd01f | 2121 | if (tree_rmap_item) { |
77da2ba0 CI |
2122 | bool split; |
2123 | ||
8dd3557a HD |
2124 | kpage = try_to_merge_two_pages(rmap_item, page, |
2125 | tree_rmap_item, tree_page); | |
77da2ba0 CI |
2126 | /* |
2127 | * If both pages we tried to merge belong to the same compound | |
2128 | * page, then we actually ended up increasing the reference | |
2129 | * count of the same compound page twice, and split_huge_page | |
2130 | * failed. | |
2131 | * Here we set a flag if that happened, and we use it later to | |
2132 | * try split_huge_page again. Since we call put_page right | |
2133 | * afterwards, the reference count will be correct and | |
2134 | * split_huge_page should succeed. | |
2135 | */ | |
2136 | split = PageTransCompound(page) | |
2137 | && compound_head(page) == compound_head(tree_page); | |
8dd3557a | 2138 | put_page(tree_page); |
8dd3557a | 2139 | if (kpage) { |
bc56620b HD |
2140 | /* |
2141 | * The pages were successfully merged: insert new | |
2142 | * node in the stable tree and add both rmap_items. | |
2143 | */ | |
5ad64688 | 2144 | lock_page(kpage); |
7b6ba2c7 HD |
2145 | stable_node = stable_tree_insert(kpage); |
2146 | if (stable_node) { | |
2c653d0e AA |
2147 | stable_tree_append(tree_rmap_item, stable_node, |
2148 | false); | |
2149 | stable_tree_append(rmap_item, stable_node, | |
2150 | false); | |
7b6ba2c7 | 2151 | } |
5ad64688 | 2152 | unlock_page(kpage); |
7b6ba2c7 | 2153 | |
31dbd01f IE |
2154 | /* |
2155 | * If we fail to insert the page into the stable tree, | |
2156 | * we will have 2 virtual addresses that are pointing | |
2157 | * to a ksm page left outside the stable tree, | |
2158 | * in which case we need to break_cow on both. | |
2159 | */ | |
7b6ba2c7 | 2160 | if (!stable_node) { |
8dd3557a HD |
2161 | break_cow(tree_rmap_item); |
2162 | break_cow(rmap_item); | |
31dbd01f | 2163 | } |
77da2ba0 CI |
2164 | } else if (split) { |
2165 | /* | |
2166 | * We are here if we tried to merge two pages and | |
2167 | * failed because they both belonged to the same | |
2168 | * compound page. We will split the page now, but no | |
2169 | * merging will take place. | |
2170 | * We do not want to add the cost of a full lock; if | |
2171 | * the page is locked, it is better to skip it and | |
2172 | * perhaps try again later. | |
2173 | */ | |
2174 | if (!trylock_page(page)) | |
2175 | return; | |
2176 | split_huge_page(page); | |
2177 | unlock_page(page); | |
31dbd01f | 2178 | } |
31dbd01f IE |
2179 | } |
2180 | } | |
2181 | ||
2182 | static struct rmap_item *get_next_rmap_item(struct mm_slot *mm_slot, | |
6514d511 | 2183 | struct rmap_item **rmap_list, |
31dbd01f IE |
2184 | unsigned long addr) |
2185 | { | |
2186 | struct rmap_item *rmap_item; | |
2187 | ||
6514d511 HD |
2188 | while (*rmap_list) { |
2189 | rmap_item = *rmap_list; | |
93d17715 | 2190 | if ((rmap_item->address & PAGE_MASK) == addr) |
31dbd01f | 2191 | return rmap_item; |
31dbd01f IE |
2192 | if (rmap_item->address > addr) |
2193 | break; | |
6514d511 | 2194 | *rmap_list = rmap_item->rmap_list; |
31dbd01f | 2195 | remove_rmap_item_from_tree(rmap_item); |
31dbd01f IE |
2196 | free_rmap_item(rmap_item); |
2197 | } | |
2198 | ||
2199 | rmap_item = alloc_rmap_item(); | |
2200 | if (rmap_item) { | |
2201 | /* It has already been zeroed */ | |
2202 | rmap_item->mm = mm_slot->mm; | |
2203 | rmap_item->address = addr; | |
6514d511 HD |
2204 | rmap_item->rmap_list = *rmap_list; |
2205 | *rmap_list = rmap_item; | |
31dbd01f IE |
2206 | } |
2207 | return rmap_item; | |
2208 | } | |
2209 | ||
2210 | static struct rmap_item *scan_get_next_rmap_item(struct page **page) | |
2211 | { | |
2212 | struct mm_struct *mm; | |
2213 | struct mm_slot *slot; | |
2214 | struct vm_area_struct *vma; | |
2215 | struct rmap_item *rmap_item; | |
90bd6fd3 | 2216 | int nid; |
31dbd01f IE |
2217 | |
2218 | if (list_empty(&ksm_mm_head.mm_list)) | |
2219 | return NULL; | |
2220 | ||
2221 | slot = ksm_scan.mm_slot; | |
2222 | if (slot == &ksm_mm_head) { | |
2919bfd0 HD |
2223 | /* |
2224 | * A number of pages can hang around indefinitely on per-cpu | |
2225 | * pagevecs, raised page count preventing write_protect_page | |
2226 | * from merging them. Though it doesn't really matter much, | |
2227 | * it is puzzling to see some stuck in pages_volatile until | |
2228 | * other activity jostles them out, and they also prevented | |
2229 | * LTP's KSM test from succeeding deterministically; so drain | |
2230 | * them here (here rather than on entry to ksm_do_scan(), | |
2231 | * so we don't IPI too often when pages_to_scan is set low). | |
2232 | */ | |
2233 | lru_add_drain_all(); | |
2234 | ||
4146d2d6 HD |
2235 | /* |
2236 | * Whereas stale stable_nodes on the stable_tree itself | |
2237 | * get pruned in the regular course of stable_tree_search(), | |
2238 | * those moved out to the migrate_nodes list can accumulate: | |
2239 | * so prune them once before each full scan. | |
2240 | */ | |
2241 | if (!ksm_merge_across_nodes) { | |
03640418 | 2242 | struct stable_node *stable_node, *next; |
4146d2d6 HD |
2243 | struct page *page; |
2244 | ||
03640418 GT |
2245 | list_for_each_entry_safe(stable_node, next, |
2246 | &migrate_nodes, list) { | |
2cee57d1 YS |
2247 | page = get_ksm_page(stable_node, |
2248 | GET_KSM_PAGE_NOLOCK); | |
4146d2d6 HD |
2249 | if (page) |
2250 | put_page(page); | |
2251 | cond_resched(); | |
2252 | } | |
2253 | } | |
2254 | ||
ef53d16c | 2255 | for (nid = 0; nid < ksm_nr_node_ids; nid++) |
90bd6fd3 | 2256 | root_unstable_tree[nid] = RB_ROOT; |
31dbd01f IE |
2257 | |
2258 | spin_lock(&ksm_mmlist_lock); | |
2259 | slot = list_entry(slot->mm_list.next, struct mm_slot, mm_list); | |
2260 | ksm_scan.mm_slot = slot; | |
2261 | spin_unlock(&ksm_mmlist_lock); | |
2b472611 HD |
2262 | /* |
2263 | * Although we tested list_empty() above, a racing __ksm_exit | |
2264 | * of the last mm on the list may have removed it since then. | |
2265 | */ | |
2266 | if (slot == &ksm_mm_head) | |
2267 | return NULL; | |
31dbd01f IE |
2268 | next_mm: |
2269 | ksm_scan.address = 0; | |
6514d511 | 2270 | ksm_scan.rmap_list = &slot->rmap_list; |
31dbd01f IE |
2271 | } |
2272 | ||
2273 | mm = slot->mm; | |
d8ed45c5 | 2274 | mmap_read_lock(mm); |
9ba69294 HD |
2275 | if (ksm_test_exit(mm)) |
2276 | vma = NULL; | |
2277 | else | |
2278 | vma = find_vma(mm, ksm_scan.address); | |
2279 | ||
2280 | for (; vma; vma = vma->vm_next) { | |
31dbd01f IE |
2281 | if (!(vma->vm_flags & VM_MERGEABLE)) |
2282 | continue; | |
2283 | if (ksm_scan.address < vma->vm_start) | |
2284 | ksm_scan.address = vma->vm_start; | |
2285 | if (!vma->anon_vma) | |
2286 | ksm_scan.address = vma->vm_end; | |
2287 | ||
2288 | while (ksm_scan.address < vma->vm_end) { | |
9ba69294 HD |
2289 | if (ksm_test_exit(mm)) |
2290 | break; | |
31dbd01f | 2291 | *page = follow_page(vma, ksm_scan.address, FOLL_GET); |
21ae5b01 AA |
2292 | if (IS_ERR_OR_NULL(*page)) { |
2293 | ksm_scan.address += PAGE_SIZE; | |
2294 | cond_resched(); | |
2295 | continue; | |
2296 | } | |
f765f540 | 2297 | if (PageAnon(*page)) { |
31dbd01f IE |
2298 | flush_anon_page(vma, *page, ksm_scan.address); |
2299 | flush_dcache_page(*page); | |
2300 | rmap_item = get_next_rmap_item(slot, | |
6514d511 | 2301 | ksm_scan.rmap_list, ksm_scan.address); |
31dbd01f | 2302 | if (rmap_item) { |
6514d511 HD |
2303 | ksm_scan.rmap_list = |
2304 | &rmap_item->rmap_list; | |
31dbd01f IE |
2305 | ksm_scan.address += PAGE_SIZE; |
2306 | } else | |
2307 | put_page(*page); | |
d8ed45c5 | 2308 | mmap_read_unlock(mm); |
31dbd01f IE |
2309 | return rmap_item; |
2310 | } | |
21ae5b01 | 2311 | put_page(*page); |
31dbd01f IE |
2312 | ksm_scan.address += PAGE_SIZE; |
2313 | cond_resched(); | |
2314 | } | |
2315 | } | |
2316 | ||
9ba69294 HD |
2317 | if (ksm_test_exit(mm)) { |
2318 | ksm_scan.address = 0; | |
6514d511 | 2319 | ksm_scan.rmap_list = &slot->rmap_list; |
9ba69294 | 2320 | } |
31dbd01f IE |
2321 | /* |
2322 | * Nuke all the rmap_items that are above this current rmap: | |
2323 | * because there were no VM_MERGEABLE vmas with such addresses. | |
2324 | */ | |
420be4ed | 2325 | remove_trailing_rmap_items(ksm_scan.rmap_list); |
31dbd01f IE |
2326 | |
2327 | spin_lock(&ksm_mmlist_lock); | |
cd551f97 HD |
2328 | ksm_scan.mm_slot = list_entry(slot->mm_list.next, |
2329 | struct mm_slot, mm_list); | |
2330 | if (ksm_scan.address == 0) { | |
2331 | /* | |
c1e8d7c6 | 2332 | * We've completed a full scan of all vmas, holding mmap_lock |
cd551f97 HD |
2333 | * throughout, and found no VM_MERGEABLE: so do the same as |
2334 | * __ksm_exit does to remove this mm from all our lists now. | |
9ba69294 HD |
2335 | * This applies either when cleaning up after __ksm_exit |
2336 | * (but beware: we can reach here even before __ksm_exit), | |
2337 | * or when all VM_MERGEABLE areas have been unmapped (and | |
c1e8d7c6 | 2338 | * mmap_lock then protects against race with MADV_MERGEABLE). |
cd551f97 | 2339 | */ |
4ca3a69b | 2340 | hash_del(&slot->link); |
cd551f97 | 2341 | list_del(&slot->mm_list); |
9ba69294 HD |
2342 | spin_unlock(&ksm_mmlist_lock); |
2343 | ||
cd551f97 HD |
2344 | free_mm_slot(slot); |
2345 | clear_bit(MMF_VM_MERGEABLE, &mm->flags); | |
d8ed45c5 | 2346 | mmap_read_unlock(mm); |
9ba69294 HD |
2347 | mmdrop(mm); |
2348 | } else { | |
d8ed45c5 | 2349 | mmap_read_unlock(mm); |
7496fea9 | 2350 | /* |
3e4e28c5 | 2351 | * mmap_read_unlock(mm) first because after |
7496fea9 ZC |
2352 | * spin_unlock(&ksm_mmlist_lock) run, the "mm" may |
2353 | * already have been freed under us by __ksm_exit() | |
2354 | * because the "mm_slot" is still hashed and | |
2355 | * ksm_scan.mm_slot doesn't point to it anymore. | |
2356 | */ | |
2357 | spin_unlock(&ksm_mmlist_lock); | |
cd551f97 | 2358 | } |
31dbd01f IE |
2359 | |
2360 | /* Repeat until we've completed scanning the whole list */ | |
cd551f97 | 2361 | slot = ksm_scan.mm_slot; |
31dbd01f IE |
2362 | if (slot != &ksm_mm_head) |
2363 | goto next_mm; | |
2364 | ||
31dbd01f IE |
2365 | ksm_scan.seqnr++; |
2366 | return NULL; | |
2367 | } | |
2368 | ||
2369 | /** | |
2370 | * ksm_do_scan - the ksm scanner main worker function. | |
b7701a5f | 2371 | * @scan_npages: number of pages we want to scan before we return. |
31dbd01f IE |
2372 | */ |
2373 | static void ksm_do_scan(unsigned int scan_npages) | |
2374 | { | |
2375 | struct rmap_item *rmap_item; | |
3f649ab7 | 2376 | struct page *page; |
31dbd01f | 2377 | |
878aee7d | 2378 | while (scan_npages-- && likely(!freezing(current))) { |
31dbd01f IE |
2379 | cond_resched(); |
2380 | rmap_item = scan_get_next_rmap_item(&page); | |
2381 | if (!rmap_item) | |
2382 | return; | |
4146d2d6 | 2383 | cmp_and_merge_page(page, rmap_item); |
31dbd01f IE |
2384 | put_page(page); |
2385 | } | |
2386 | } | |
2387 | ||
6e158384 HD |
2388 | static int ksmd_should_run(void) |
2389 | { | |
2390 | return (ksm_run & KSM_RUN_MERGE) && !list_empty(&ksm_mm_head.mm_list); | |
2391 | } | |
2392 | ||
31dbd01f IE |
2393 | static int ksm_scan_thread(void *nothing) |
2394 | { | |
fcf9a0ef KT |
2395 | unsigned int sleep_ms; |
2396 | ||
878aee7d | 2397 | set_freezable(); |
339aa624 | 2398 | set_user_nice(current, 5); |
31dbd01f IE |
2399 | |
2400 | while (!kthread_should_stop()) { | |
6e158384 | 2401 | mutex_lock(&ksm_thread_mutex); |
ef4d43a8 | 2402 | wait_while_offlining(); |
6e158384 | 2403 | if (ksmd_should_run()) |
31dbd01f | 2404 | ksm_do_scan(ksm_thread_pages_to_scan); |
6e158384 HD |
2405 | mutex_unlock(&ksm_thread_mutex); |
2406 | ||
878aee7d AA |
2407 | try_to_freeze(); |
2408 | ||
6e158384 | 2409 | if (ksmd_should_run()) { |
fcf9a0ef KT |
2410 | sleep_ms = READ_ONCE(ksm_thread_sleep_millisecs); |
2411 | wait_event_interruptible_timeout(ksm_iter_wait, | |
2412 | sleep_ms != READ_ONCE(ksm_thread_sleep_millisecs), | |
2413 | msecs_to_jiffies(sleep_ms)); | |
31dbd01f | 2414 | } else { |
878aee7d | 2415 | wait_event_freezable(ksm_thread_wait, |
6e158384 | 2416 | ksmd_should_run() || kthread_should_stop()); |
31dbd01f IE |
2417 | } |
2418 | } | |
2419 | return 0; | |
2420 | } | |
2421 | ||
f8af4da3 HD |
2422 | int ksm_madvise(struct vm_area_struct *vma, unsigned long start, |
2423 | unsigned long end, int advice, unsigned long *vm_flags) | |
2424 | { | |
2425 | struct mm_struct *mm = vma->vm_mm; | |
d952b791 | 2426 | int err; |
f8af4da3 HD |
2427 | |
2428 | switch (advice) { | |
2429 | case MADV_MERGEABLE: | |
2430 | /* | |
2431 | * Be somewhat over-protective for now! | |
2432 | */ | |
2433 | if (*vm_flags & (VM_MERGEABLE | VM_SHARED | VM_MAYSHARE | | |
2434 | VM_PFNMAP | VM_IO | VM_DONTEXPAND | | |
0661a336 | 2435 | VM_HUGETLB | VM_MIXEDMAP)) |
f8af4da3 HD |
2436 | return 0; /* just ignore the advice */ |
2437 | ||
e1fb4a08 DJ |
2438 | if (vma_is_dax(vma)) |
2439 | return 0; | |
2440 | ||
12564485 SA |
2441 | #ifdef VM_SAO |
2442 | if (*vm_flags & VM_SAO) | |
2443 | return 0; | |
2444 | #endif | |
74a04967 KA |
2445 | #ifdef VM_SPARC_ADI |
2446 | if (*vm_flags & VM_SPARC_ADI) | |
2447 | return 0; | |
2448 | #endif | |
cc2383ec | 2449 | |
d952b791 HD |
2450 | if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) { |
2451 | err = __ksm_enter(mm); | |
2452 | if (err) | |
2453 | return err; | |
2454 | } | |
f8af4da3 HD |
2455 | |
2456 | *vm_flags |= VM_MERGEABLE; | |
2457 | break; | |
2458 | ||
2459 | case MADV_UNMERGEABLE: | |
2460 | if (!(*vm_flags & VM_MERGEABLE)) | |
2461 | return 0; /* just ignore the advice */ | |
2462 | ||
d952b791 HD |
2463 | if (vma->anon_vma) { |
2464 | err = unmerge_ksm_pages(vma, start, end); | |
2465 | if (err) | |
2466 | return err; | |
2467 | } | |
f8af4da3 HD |
2468 | |
2469 | *vm_flags &= ~VM_MERGEABLE; | |
2470 | break; | |
2471 | } | |
2472 | ||
2473 | return 0; | |
2474 | } | |
33cf1707 | 2475 | EXPORT_SYMBOL_GPL(ksm_madvise); |
f8af4da3 HD |
2476 | |
2477 | int __ksm_enter(struct mm_struct *mm) | |
2478 | { | |
6e158384 HD |
2479 | struct mm_slot *mm_slot; |
2480 | int needs_wakeup; | |
2481 | ||
2482 | mm_slot = alloc_mm_slot(); | |
31dbd01f IE |
2483 | if (!mm_slot) |
2484 | return -ENOMEM; | |
2485 | ||
6e158384 HD |
2486 | /* Check ksm_run too? Would need tighter locking */ |
2487 | needs_wakeup = list_empty(&ksm_mm_head.mm_list); | |
2488 | ||
31dbd01f IE |
2489 | spin_lock(&ksm_mmlist_lock); |
2490 | insert_to_mm_slots_hash(mm, mm_slot); | |
2491 | /* | |
cbf86cfe HD |
2492 | * When KSM_RUN_MERGE (or KSM_RUN_STOP), |
2493 | * insert just behind the scanning cursor, to let the area settle | |
31dbd01f IE |
2494 | * down a little; when fork is followed by immediate exec, we don't |
2495 | * want ksmd to waste time setting up and tearing down an rmap_list. | |
cbf86cfe HD |
2496 | * |
2497 | * But when KSM_RUN_UNMERGE, it's important to insert ahead of its | |
2498 | * scanning cursor, otherwise KSM pages in newly forked mms will be | |
2499 | * missed: then we might as well insert at the end of the list. | |
31dbd01f | 2500 | */ |
cbf86cfe HD |
2501 | if (ksm_run & KSM_RUN_UNMERGE) |
2502 | list_add_tail(&mm_slot->mm_list, &ksm_mm_head.mm_list); | |
2503 | else | |
2504 | list_add_tail(&mm_slot->mm_list, &ksm_scan.mm_slot->mm_list); | |
31dbd01f IE |
2505 | spin_unlock(&ksm_mmlist_lock); |
2506 | ||
f8af4da3 | 2507 | set_bit(MMF_VM_MERGEABLE, &mm->flags); |
f1f10076 | 2508 | mmgrab(mm); |
6e158384 HD |
2509 | |
2510 | if (needs_wakeup) | |
2511 | wake_up_interruptible(&ksm_thread_wait); | |
2512 | ||
f8af4da3 HD |
2513 | return 0; |
2514 | } | |
2515 | ||
1c2fb7a4 | 2516 | void __ksm_exit(struct mm_struct *mm) |
f8af4da3 | 2517 | { |
cd551f97 | 2518 | struct mm_slot *mm_slot; |
9ba69294 | 2519 | int easy_to_free = 0; |
cd551f97 | 2520 | |
31dbd01f | 2521 | /* |
9ba69294 HD |
2522 | * This process is exiting: if it's straightforward (as is the |
2523 | * case when ksmd was never running), free mm_slot immediately. | |
2524 | * But if it's at the cursor or has rmap_items linked to it, use | |
c1e8d7c6 | 2525 | * mmap_lock to synchronize with any break_cows before pagetables |
9ba69294 HD |
2526 | * are freed, and leave the mm_slot on the list for ksmd to free. |
2527 | * Beware: ksm may already have noticed it exiting and freed the slot. | |
31dbd01f | 2528 | */ |
9ba69294 | 2529 | |
cd551f97 HD |
2530 | spin_lock(&ksm_mmlist_lock); |
2531 | mm_slot = get_mm_slot(mm); | |
9ba69294 | 2532 | if (mm_slot && ksm_scan.mm_slot != mm_slot) { |
6514d511 | 2533 | if (!mm_slot->rmap_list) { |
4ca3a69b | 2534 | hash_del(&mm_slot->link); |
9ba69294 HD |
2535 | list_del(&mm_slot->mm_list); |
2536 | easy_to_free = 1; | |
2537 | } else { | |
2538 | list_move(&mm_slot->mm_list, | |
2539 | &ksm_scan.mm_slot->mm_list); | |
2540 | } | |
cd551f97 | 2541 | } |
cd551f97 HD |
2542 | spin_unlock(&ksm_mmlist_lock); |
2543 | ||
9ba69294 HD |
2544 | if (easy_to_free) { |
2545 | free_mm_slot(mm_slot); | |
2546 | clear_bit(MMF_VM_MERGEABLE, &mm->flags); | |
2547 | mmdrop(mm); | |
2548 | } else if (mm_slot) { | |
d8ed45c5 ML |
2549 | mmap_write_lock(mm); |
2550 | mmap_write_unlock(mm); | |
9ba69294 | 2551 | } |
31dbd01f IE |
2552 | } |
2553 | ||
cbf86cfe | 2554 | struct page *ksm_might_need_to_copy(struct page *page, |
5ad64688 HD |
2555 | struct vm_area_struct *vma, unsigned long address) |
2556 | { | |
e05b3453 MWO |
2557 | struct folio *folio = page_folio(page); |
2558 | struct anon_vma *anon_vma = folio_anon_vma(folio); | |
5ad64688 HD |
2559 | struct page *new_page; |
2560 | ||
cbf86cfe HD |
2561 | if (PageKsm(page)) { |
2562 | if (page_stable_node(page) && | |
2563 | !(ksm_run & KSM_RUN_UNMERGE)) | |
2564 | return page; /* no need to copy it */ | |
2565 | } else if (!anon_vma) { | |
2566 | return page; /* no need to copy it */ | |
e1c63e11 NS |
2567 | } else if (page->index == linear_page_index(vma, address) && |
2568 | anon_vma->root == vma->anon_vma->root) { | |
cbf86cfe HD |
2569 | return page; /* still no need to copy it */ |
2570 | } | |
2571 | if (!PageUptodate(page)) | |
2572 | return page; /* let do_swap_page report the error */ | |
2573 | ||
5ad64688 | 2574 | new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); |
8f425e4e MWO |
2575 | if (new_page && |
2576 | mem_cgroup_charge(page_folio(new_page), vma->vm_mm, GFP_KERNEL)) { | |
62fdb163 HD |
2577 | put_page(new_page); |
2578 | new_page = NULL; | |
2579 | } | |
5ad64688 HD |
2580 | if (new_page) { |
2581 | copy_user_highpage(new_page, page, address, vma); | |
2582 | ||
2583 | SetPageDirty(new_page); | |
2584 | __SetPageUptodate(new_page); | |
48c935ad | 2585 | __SetPageLocked(new_page); |
4d45c3af YY |
2586 | #ifdef CONFIG_SWAP |
2587 | count_vm_event(KSM_SWPIN_COPY); | |
2588 | #endif | |
5ad64688 HD |
2589 | } |
2590 | ||
5ad64688 HD |
2591 | return new_page; |
2592 | } | |
2593 | ||
84fbbe21 | 2594 | void rmap_walk_ksm(struct folio *folio, const struct rmap_walk_control *rwc) |
e9995ef9 HD |
2595 | { |
2596 | struct stable_node *stable_node; | |
e9995ef9 | 2597 | struct rmap_item *rmap_item; |
e9995ef9 HD |
2598 | int search_new_forks = 0; |
2599 | ||
2f031c6f | 2600 | VM_BUG_ON_FOLIO(!folio_test_ksm(folio), folio); |
9f32624b JK |
2601 | |
2602 | /* | |
2603 | * Rely on the page lock to protect against concurrent modifications | |
2604 | * to that page's node of the stable tree. | |
2605 | */ | |
2f031c6f | 2606 | VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); |
e9995ef9 | 2607 | |
2f031c6f | 2608 | stable_node = folio_stable_node(folio); |
e9995ef9 | 2609 | if (!stable_node) |
1df631ae | 2610 | return; |
e9995ef9 | 2611 | again: |
b67bfe0d | 2612 | hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) { |
e9995ef9 | 2613 | struct anon_vma *anon_vma = rmap_item->anon_vma; |
5beb4930 | 2614 | struct anon_vma_chain *vmac; |
e9995ef9 HD |
2615 | struct vm_area_struct *vma; |
2616 | ||
ad12695f | 2617 | cond_resched(); |
b6b19f25 | 2618 | anon_vma_lock_read(anon_vma); |
bf181b9f ML |
2619 | anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root, |
2620 | 0, ULONG_MAX) { | |
1105a2fc JH |
2621 | unsigned long addr; |
2622 | ||
ad12695f | 2623 | cond_resched(); |
5beb4930 | 2624 | vma = vmac->vma; |
1105a2fc JH |
2625 | |
2626 | /* Ignore the stable/unstable/sqnr flags */ | |
cd7fae26 | 2627 | addr = rmap_item->address & PAGE_MASK; |
1105a2fc JH |
2628 | |
2629 | if (addr < vma->vm_start || addr >= vma->vm_end) | |
e9995ef9 HD |
2630 | continue; |
2631 | /* | |
2632 | * Initially we examine only the vma which covers this | |
2633 | * rmap_item; but later, if there is still work to do, | |
2634 | * we examine covering vmas in other mms: in case they | |
2635 | * were forked from the original since ksmd passed. | |
2636 | */ | |
2637 | if ((rmap_item->mm == vma->vm_mm) == search_new_forks) | |
2638 | continue; | |
2639 | ||
0dd1c7bb JK |
2640 | if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) |
2641 | continue; | |
2642 | ||
2f031c6f | 2643 | if (!rwc->rmap_one(folio, vma, addr, rwc->arg)) { |
b6b19f25 | 2644 | anon_vma_unlock_read(anon_vma); |
1df631ae | 2645 | return; |
e9995ef9 | 2646 | } |
2f031c6f | 2647 | if (rwc->done && rwc->done(folio)) { |
0dd1c7bb | 2648 | anon_vma_unlock_read(anon_vma); |
1df631ae | 2649 | return; |
0dd1c7bb | 2650 | } |
e9995ef9 | 2651 | } |
b6b19f25 | 2652 | anon_vma_unlock_read(anon_vma); |
e9995ef9 HD |
2653 | } |
2654 | if (!search_new_forks++) | |
2655 | goto again; | |
e9995ef9 HD |
2656 | } |
2657 | ||
52629506 | 2658 | #ifdef CONFIG_MIGRATION |
19138349 | 2659 | void folio_migrate_ksm(struct folio *newfolio, struct folio *folio) |
e9995ef9 HD |
2660 | { |
2661 | struct stable_node *stable_node; | |
2662 | ||
19138349 MWO |
2663 | VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); |
2664 | VM_BUG_ON_FOLIO(!folio_test_locked(newfolio), newfolio); | |
2665 | VM_BUG_ON_FOLIO(newfolio->mapping != folio->mapping, newfolio); | |
e9995ef9 | 2666 | |
19138349 | 2667 | stable_node = folio_stable_node(folio); |
e9995ef9 | 2668 | if (stable_node) { |
19138349 MWO |
2669 | VM_BUG_ON_FOLIO(stable_node->kpfn != folio_pfn(folio), folio); |
2670 | stable_node->kpfn = folio_pfn(newfolio); | |
c8d6553b | 2671 | /* |
19138349 | 2672 | * newfolio->mapping was set in advance; now we need smp_wmb() |
c8d6553b | 2673 | * to make sure that the new stable_node->kpfn is visible |
19138349 MWO |
2674 | * to get_ksm_page() before it can see that folio->mapping |
2675 | * has gone stale (or that folio_test_swapcache has been cleared). | |
c8d6553b HD |
2676 | */ |
2677 | smp_wmb(); | |
19138349 | 2678 | set_page_stable_node(&folio->page, NULL); |
e9995ef9 HD |
2679 | } |
2680 | } | |
2681 | #endif /* CONFIG_MIGRATION */ | |
2682 | ||
62b61f61 | 2683 | #ifdef CONFIG_MEMORY_HOTREMOVE |
ef4d43a8 HD |
2684 | static void wait_while_offlining(void) |
2685 | { | |
2686 | while (ksm_run & KSM_RUN_OFFLINE) { | |
2687 | mutex_unlock(&ksm_thread_mutex); | |
2688 | wait_on_bit(&ksm_run, ilog2(KSM_RUN_OFFLINE), | |
74316201 | 2689 | TASK_UNINTERRUPTIBLE); |
ef4d43a8 HD |
2690 | mutex_lock(&ksm_thread_mutex); |
2691 | } | |
2692 | } | |
2693 | ||
2c653d0e AA |
2694 | static bool stable_node_dup_remove_range(struct stable_node *stable_node, |
2695 | unsigned long start_pfn, | |
2696 | unsigned long end_pfn) | |
2697 | { | |
2698 | if (stable_node->kpfn >= start_pfn && | |
2699 | stable_node->kpfn < end_pfn) { | |
2700 | /* | |
2701 | * Don't get_ksm_page, page has already gone: | |
2702 | * which is why we keep kpfn instead of page* | |
2703 | */ | |
2704 | remove_node_from_stable_tree(stable_node); | |
2705 | return true; | |
2706 | } | |
2707 | return false; | |
2708 | } | |
2709 | ||
2710 | static bool stable_node_chain_remove_range(struct stable_node *stable_node, | |
2711 | unsigned long start_pfn, | |
2712 | unsigned long end_pfn, | |
2713 | struct rb_root *root) | |
2714 | { | |
2715 | struct stable_node *dup; | |
2716 | struct hlist_node *hlist_safe; | |
2717 | ||
2718 | if (!is_stable_node_chain(stable_node)) { | |
2719 | VM_BUG_ON(is_stable_node_dup(stable_node)); | |
2720 | return stable_node_dup_remove_range(stable_node, start_pfn, | |
2721 | end_pfn); | |
2722 | } | |
2723 | ||
2724 | hlist_for_each_entry_safe(dup, hlist_safe, | |
2725 | &stable_node->hlist, hlist_dup) { | |
2726 | VM_BUG_ON(!is_stable_node_dup(dup)); | |
2727 | stable_node_dup_remove_range(dup, start_pfn, end_pfn); | |
2728 | } | |
2729 | if (hlist_empty(&stable_node->hlist)) { | |
2730 | free_stable_node_chain(stable_node, root); | |
2731 | return true; /* notify caller that tree was rebalanced */ | |
2732 | } else | |
2733 | return false; | |
2734 | } | |
2735 | ||
ee0ea59c HD |
2736 | static void ksm_check_stable_tree(unsigned long start_pfn, |
2737 | unsigned long end_pfn) | |
62b61f61 | 2738 | { |
03640418 | 2739 | struct stable_node *stable_node, *next; |
62b61f61 | 2740 | struct rb_node *node; |
90bd6fd3 | 2741 | int nid; |
62b61f61 | 2742 | |
ef53d16c HD |
2743 | for (nid = 0; nid < ksm_nr_node_ids; nid++) { |
2744 | node = rb_first(root_stable_tree + nid); | |
ee0ea59c | 2745 | while (node) { |
90bd6fd3 | 2746 | stable_node = rb_entry(node, struct stable_node, node); |
2c653d0e AA |
2747 | if (stable_node_chain_remove_range(stable_node, |
2748 | start_pfn, end_pfn, | |
2749 | root_stable_tree + | |
2750 | nid)) | |
ef53d16c | 2751 | node = rb_first(root_stable_tree + nid); |
2c653d0e | 2752 | else |
ee0ea59c HD |
2753 | node = rb_next(node); |
2754 | cond_resched(); | |
90bd6fd3 | 2755 | } |
ee0ea59c | 2756 | } |
03640418 | 2757 | list_for_each_entry_safe(stable_node, next, &migrate_nodes, list) { |
4146d2d6 HD |
2758 | if (stable_node->kpfn >= start_pfn && |
2759 | stable_node->kpfn < end_pfn) | |
2760 | remove_node_from_stable_tree(stable_node); | |
2761 | cond_resched(); | |
2762 | } | |
62b61f61 HD |
2763 | } |
2764 | ||
2765 | static int ksm_memory_callback(struct notifier_block *self, | |
2766 | unsigned long action, void *arg) | |
2767 | { | |
2768 | struct memory_notify *mn = arg; | |
62b61f61 HD |
2769 | |
2770 | switch (action) { | |
2771 | case MEM_GOING_OFFLINE: | |
2772 | /* | |
ef4d43a8 HD |
2773 | * Prevent ksm_do_scan(), unmerge_and_remove_all_rmap_items() |
2774 | * and remove_all_stable_nodes() while memory is going offline: | |
2775 | * it is unsafe for them to touch the stable tree at this time. | |
2776 | * But unmerge_ksm_pages(), rmap lookups and other entry points | |
2777 | * which do not need the ksm_thread_mutex are all safe. | |
62b61f61 | 2778 | */ |
ef4d43a8 HD |
2779 | mutex_lock(&ksm_thread_mutex); |
2780 | ksm_run |= KSM_RUN_OFFLINE; | |
2781 | mutex_unlock(&ksm_thread_mutex); | |
62b61f61 HD |
2782 | break; |
2783 | ||
2784 | case MEM_OFFLINE: | |
2785 | /* | |
2786 | * Most of the work is done by page migration; but there might | |
2787 | * be a few stable_nodes left over, still pointing to struct | |
ee0ea59c HD |
2788 | * pages which have been offlined: prune those from the tree, |
2789 | * otherwise get_ksm_page() might later try to access a | |
2790 | * non-existent struct page. | |
62b61f61 | 2791 | */ |
ee0ea59c HD |
2792 | ksm_check_stable_tree(mn->start_pfn, |
2793 | mn->start_pfn + mn->nr_pages); | |
e4a9bc58 | 2794 | fallthrough; |
62b61f61 | 2795 | case MEM_CANCEL_OFFLINE: |
ef4d43a8 HD |
2796 | mutex_lock(&ksm_thread_mutex); |
2797 | ksm_run &= ~KSM_RUN_OFFLINE; | |
62b61f61 | 2798 | mutex_unlock(&ksm_thread_mutex); |
ef4d43a8 HD |
2799 | |
2800 | smp_mb(); /* wake_up_bit advises this */ | |
2801 | wake_up_bit(&ksm_run, ilog2(KSM_RUN_OFFLINE)); | |
62b61f61 HD |
2802 | break; |
2803 | } | |
2804 | return NOTIFY_OK; | |
2805 | } | |
ef4d43a8 HD |
2806 | #else |
2807 | static void wait_while_offlining(void) | |
2808 | { | |
2809 | } | |
62b61f61 HD |
2810 | #endif /* CONFIG_MEMORY_HOTREMOVE */ |
2811 | ||
2ffd8679 HD |
2812 | #ifdef CONFIG_SYSFS |
2813 | /* | |
2814 | * This all compiles without CONFIG_SYSFS, but is a waste of space. | |
2815 | */ | |
2816 | ||
31dbd01f IE |
2817 | #define KSM_ATTR_RO(_name) \ |
2818 | static struct kobj_attribute _name##_attr = __ATTR_RO(_name) | |
2819 | #define KSM_ATTR(_name) \ | |
1bad2e5c | 2820 | static struct kobj_attribute _name##_attr = __ATTR_RW(_name) |
31dbd01f IE |
2821 | |
2822 | static ssize_t sleep_millisecs_show(struct kobject *kobj, | |
2823 | struct kobj_attribute *attr, char *buf) | |
2824 | { | |
ae7a927d | 2825 | return sysfs_emit(buf, "%u\n", ksm_thread_sleep_millisecs); |
31dbd01f IE |
2826 | } |
2827 | ||
2828 | static ssize_t sleep_millisecs_store(struct kobject *kobj, | |
2829 | struct kobj_attribute *attr, | |
2830 | const char *buf, size_t count) | |
2831 | { | |
dfefd226 | 2832 | unsigned int msecs; |
31dbd01f IE |
2833 | int err; |
2834 | ||
dfefd226 AD |
2835 | err = kstrtouint(buf, 10, &msecs); |
2836 | if (err) | |
31dbd01f IE |
2837 | return -EINVAL; |
2838 | ||
2839 | ksm_thread_sleep_millisecs = msecs; | |
fcf9a0ef | 2840 | wake_up_interruptible(&ksm_iter_wait); |
31dbd01f IE |
2841 | |
2842 | return count; | |
2843 | } | |
2844 | KSM_ATTR(sleep_millisecs); | |
2845 | ||
2846 | static ssize_t pages_to_scan_show(struct kobject *kobj, | |
2847 | struct kobj_attribute *attr, char *buf) | |
2848 | { | |
ae7a927d | 2849 | return sysfs_emit(buf, "%u\n", ksm_thread_pages_to_scan); |
31dbd01f IE |
2850 | } |
2851 | ||
2852 | static ssize_t pages_to_scan_store(struct kobject *kobj, | |
2853 | struct kobj_attribute *attr, | |
2854 | const char *buf, size_t count) | |
2855 | { | |
dfefd226 | 2856 | unsigned int nr_pages; |
31dbd01f | 2857 | int err; |
31dbd01f | 2858 | |
dfefd226 AD |
2859 | err = kstrtouint(buf, 10, &nr_pages); |
2860 | if (err) | |
31dbd01f IE |
2861 | return -EINVAL; |
2862 | ||
2863 | ksm_thread_pages_to_scan = nr_pages; | |
2864 | ||
2865 | return count; | |
2866 | } | |
2867 | KSM_ATTR(pages_to_scan); | |
2868 | ||
2869 | static ssize_t run_show(struct kobject *kobj, struct kobj_attribute *attr, | |
2870 | char *buf) | |
2871 | { | |
ae7a927d | 2872 | return sysfs_emit(buf, "%lu\n", ksm_run); |
31dbd01f IE |
2873 | } |
2874 | ||
2875 | static ssize_t run_store(struct kobject *kobj, struct kobj_attribute *attr, | |
2876 | const char *buf, size_t count) | |
2877 | { | |
dfefd226 | 2878 | unsigned int flags; |
31dbd01f | 2879 | int err; |
31dbd01f | 2880 | |
dfefd226 AD |
2881 | err = kstrtouint(buf, 10, &flags); |
2882 | if (err) | |
31dbd01f IE |
2883 | return -EINVAL; |
2884 | if (flags > KSM_RUN_UNMERGE) | |
2885 | return -EINVAL; | |
2886 | ||
2887 | /* | |
2888 | * KSM_RUN_MERGE sets ksmd running, and 0 stops it running. | |
2889 | * KSM_RUN_UNMERGE stops it running and unmerges all rmap_items, | |
d0f209f6 HD |
2890 | * breaking COW to free the pages_shared (but leaves mm_slots |
2891 | * on the list for when ksmd may be set running again). | |
31dbd01f IE |
2892 | */ |
2893 | ||
2894 | mutex_lock(&ksm_thread_mutex); | |
ef4d43a8 | 2895 | wait_while_offlining(); |
31dbd01f IE |
2896 | if (ksm_run != flags) { |
2897 | ksm_run = flags; | |
d952b791 | 2898 | if (flags & KSM_RUN_UNMERGE) { |
e1e12d2f | 2899 | set_current_oom_origin(); |
d952b791 | 2900 | err = unmerge_and_remove_all_rmap_items(); |
e1e12d2f | 2901 | clear_current_oom_origin(); |
d952b791 HD |
2902 | if (err) { |
2903 | ksm_run = KSM_RUN_STOP; | |
2904 | count = err; | |
2905 | } | |
2906 | } | |
31dbd01f IE |
2907 | } |
2908 | mutex_unlock(&ksm_thread_mutex); | |
2909 | ||
2910 | if (flags & KSM_RUN_MERGE) | |
2911 | wake_up_interruptible(&ksm_thread_wait); | |
2912 | ||
2913 | return count; | |
2914 | } | |
2915 | KSM_ATTR(run); | |
2916 | ||
90bd6fd3 PH |
2917 | #ifdef CONFIG_NUMA |
2918 | static ssize_t merge_across_nodes_show(struct kobject *kobj, | |
ae7a927d | 2919 | struct kobj_attribute *attr, char *buf) |
90bd6fd3 | 2920 | { |
ae7a927d | 2921 | return sysfs_emit(buf, "%u\n", ksm_merge_across_nodes); |
90bd6fd3 PH |
2922 | } |
2923 | ||
2924 | static ssize_t merge_across_nodes_store(struct kobject *kobj, | |
2925 | struct kobj_attribute *attr, | |
2926 | const char *buf, size_t count) | |
2927 | { | |
2928 | int err; | |
2929 | unsigned long knob; | |
2930 | ||
2931 | err = kstrtoul(buf, 10, &knob); | |
2932 | if (err) | |
2933 | return err; | |
2934 | if (knob > 1) | |
2935 | return -EINVAL; | |
2936 | ||
2937 | mutex_lock(&ksm_thread_mutex); | |
ef4d43a8 | 2938 | wait_while_offlining(); |
90bd6fd3 | 2939 | if (ksm_merge_across_nodes != knob) { |
cbf86cfe | 2940 | if (ksm_pages_shared || remove_all_stable_nodes()) |
90bd6fd3 | 2941 | err = -EBUSY; |
ef53d16c HD |
2942 | else if (root_stable_tree == one_stable_tree) { |
2943 | struct rb_root *buf; | |
2944 | /* | |
2945 | * This is the first time that we switch away from the | |
2946 | * default of merging across nodes: must now allocate | |
2947 | * a buffer to hold as many roots as may be needed. | |
2948 | * Allocate stable and unstable together: | |
2949 | * MAXSMP NODES_SHIFT 10 will use 16kB. | |
2950 | */ | |
bafe1e14 JP |
2951 | buf = kcalloc(nr_node_ids + nr_node_ids, sizeof(*buf), |
2952 | GFP_KERNEL); | |
ef53d16c HD |
2953 | /* Let us assume that RB_ROOT is NULL is zero */ |
2954 | if (!buf) | |
2955 | err = -ENOMEM; | |
2956 | else { | |
2957 | root_stable_tree = buf; | |
2958 | root_unstable_tree = buf + nr_node_ids; | |
2959 | /* Stable tree is empty but not the unstable */ | |
2960 | root_unstable_tree[0] = one_unstable_tree[0]; | |
2961 | } | |
2962 | } | |
2963 | if (!err) { | |
90bd6fd3 | 2964 | ksm_merge_across_nodes = knob; |
ef53d16c HD |
2965 | ksm_nr_node_ids = knob ? 1 : nr_node_ids; |
2966 | } | |
90bd6fd3 PH |
2967 | } |
2968 | mutex_unlock(&ksm_thread_mutex); | |
2969 | ||
2970 | return err ? err : count; | |
2971 | } | |
2972 | KSM_ATTR(merge_across_nodes); | |
2973 | #endif | |
2974 | ||
e86c59b1 | 2975 | static ssize_t use_zero_pages_show(struct kobject *kobj, |
ae7a927d | 2976 | struct kobj_attribute *attr, char *buf) |
e86c59b1 | 2977 | { |
ae7a927d | 2978 | return sysfs_emit(buf, "%u\n", ksm_use_zero_pages); |
e86c59b1 CI |
2979 | } |
2980 | static ssize_t use_zero_pages_store(struct kobject *kobj, | |
2981 | struct kobj_attribute *attr, | |
2982 | const char *buf, size_t count) | |
2983 | { | |
2984 | int err; | |
2985 | bool value; | |
2986 | ||
2987 | err = kstrtobool(buf, &value); | |
2988 | if (err) | |
2989 | return -EINVAL; | |
2990 | ||
2991 | ksm_use_zero_pages = value; | |
2992 | ||
2993 | return count; | |
2994 | } | |
2995 | KSM_ATTR(use_zero_pages); | |
2996 | ||
2c653d0e AA |
2997 | static ssize_t max_page_sharing_show(struct kobject *kobj, |
2998 | struct kobj_attribute *attr, char *buf) | |
2999 | { | |
ae7a927d | 3000 | return sysfs_emit(buf, "%u\n", ksm_max_page_sharing); |
2c653d0e AA |
3001 | } |
3002 | ||
3003 | static ssize_t max_page_sharing_store(struct kobject *kobj, | |
3004 | struct kobj_attribute *attr, | |
3005 | const char *buf, size_t count) | |
3006 | { | |
3007 | int err; | |
3008 | int knob; | |
3009 | ||
3010 | err = kstrtoint(buf, 10, &knob); | |
3011 | if (err) | |
3012 | return err; | |
3013 | /* | |
3014 | * When a KSM page is created it is shared by 2 mappings. This | |
3015 | * being a signed comparison, it implicitly verifies it's not | |
3016 | * negative. | |
3017 | */ | |
3018 | if (knob < 2) | |
3019 | return -EINVAL; | |
3020 | ||
3021 | if (READ_ONCE(ksm_max_page_sharing) == knob) | |
3022 | return count; | |
3023 | ||
3024 | mutex_lock(&ksm_thread_mutex); | |
3025 | wait_while_offlining(); | |
3026 | if (ksm_max_page_sharing != knob) { | |
3027 | if (ksm_pages_shared || remove_all_stable_nodes()) | |
3028 | err = -EBUSY; | |
3029 | else | |
3030 | ksm_max_page_sharing = knob; | |
3031 | } | |
3032 | mutex_unlock(&ksm_thread_mutex); | |
3033 | ||
3034 | return err ? err : count; | |
3035 | } | |
3036 | KSM_ATTR(max_page_sharing); | |
3037 | ||
b4028260 HD |
3038 | static ssize_t pages_shared_show(struct kobject *kobj, |
3039 | struct kobj_attribute *attr, char *buf) | |
3040 | { | |
ae7a927d | 3041 | return sysfs_emit(buf, "%lu\n", ksm_pages_shared); |
b4028260 HD |
3042 | } |
3043 | KSM_ATTR_RO(pages_shared); | |
3044 | ||
3045 | static ssize_t pages_sharing_show(struct kobject *kobj, | |
3046 | struct kobj_attribute *attr, char *buf) | |
3047 | { | |
ae7a927d | 3048 | return sysfs_emit(buf, "%lu\n", ksm_pages_sharing); |
b4028260 HD |
3049 | } |
3050 | KSM_ATTR_RO(pages_sharing); | |
3051 | ||
473b0ce4 HD |
3052 | static ssize_t pages_unshared_show(struct kobject *kobj, |
3053 | struct kobj_attribute *attr, char *buf) | |
3054 | { | |
ae7a927d | 3055 | return sysfs_emit(buf, "%lu\n", ksm_pages_unshared); |
473b0ce4 HD |
3056 | } |
3057 | KSM_ATTR_RO(pages_unshared); | |
3058 | ||
3059 | static ssize_t pages_volatile_show(struct kobject *kobj, | |
3060 | struct kobj_attribute *attr, char *buf) | |
3061 | { | |
3062 | long ksm_pages_volatile; | |
3063 | ||
3064 | ksm_pages_volatile = ksm_rmap_items - ksm_pages_shared | |
3065 | - ksm_pages_sharing - ksm_pages_unshared; | |
3066 | /* | |
3067 | * It was not worth any locking to calculate that statistic, | |
3068 | * but it might therefore sometimes be negative: conceal that. | |
3069 | */ | |
3070 | if (ksm_pages_volatile < 0) | |
3071 | ksm_pages_volatile = 0; | |
ae7a927d | 3072 | return sysfs_emit(buf, "%ld\n", ksm_pages_volatile); |
473b0ce4 HD |
3073 | } |
3074 | KSM_ATTR_RO(pages_volatile); | |
3075 | ||
2c653d0e AA |
3076 | static ssize_t stable_node_dups_show(struct kobject *kobj, |
3077 | struct kobj_attribute *attr, char *buf) | |
3078 | { | |
ae7a927d | 3079 | return sysfs_emit(buf, "%lu\n", ksm_stable_node_dups); |
2c653d0e AA |
3080 | } |
3081 | KSM_ATTR_RO(stable_node_dups); | |
3082 | ||
3083 | static ssize_t stable_node_chains_show(struct kobject *kobj, | |
3084 | struct kobj_attribute *attr, char *buf) | |
3085 | { | |
ae7a927d | 3086 | return sysfs_emit(buf, "%lu\n", ksm_stable_node_chains); |
2c653d0e AA |
3087 | } |
3088 | KSM_ATTR_RO(stable_node_chains); | |
3089 | ||
3090 | static ssize_t | |
3091 | stable_node_chains_prune_millisecs_show(struct kobject *kobj, | |
3092 | struct kobj_attribute *attr, | |
3093 | char *buf) | |
3094 | { | |
ae7a927d | 3095 | return sysfs_emit(buf, "%u\n", ksm_stable_node_chains_prune_millisecs); |
2c653d0e AA |
3096 | } |
3097 | ||
3098 | static ssize_t | |
3099 | stable_node_chains_prune_millisecs_store(struct kobject *kobj, | |
3100 | struct kobj_attribute *attr, | |
3101 | const char *buf, size_t count) | |
3102 | { | |
584ff0df | 3103 | unsigned int msecs; |
2c653d0e AA |
3104 | int err; |
3105 | ||
584ff0df ZB |
3106 | err = kstrtouint(buf, 10, &msecs); |
3107 | if (err) | |
2c653d0e AA |
3108 | return -EINVAL; |
3109 | ||
3110 | ksm_stable_node_chains_prune_millisecs = msecs; | |
3111 | ||
3112 | return count; | |
3113 | } | |
3114 | KSM_ATTR(stable_node_chains_prune_millisecs); | |
3115 | ||
473b0ce4 HD |
3116 | static ssize_t full_scans_show(struct kobject *kobj, |
3117 | struct kobj_attribute *attr, char *buf) | |
3118 | { | |
ae7a927d | 3119 | return sysfs_emit(buf, "%lu\n", ksm_scan.seqnr); |
473b0ce4 HD |
3120 | } |
3121 | KSM_ATTR_RO(full_scans); | |
3122 | ||
31dbd01f IE |
3123 | static struct attribute *ksm_attrs[] = { |
3124 | &sleep_millisecs_attr.attr, | |
3125 | &pages_to_scan_attr.attr, | |
3126 | &run_attr.attr, | |
b4028260 HD |
3127 | &pages_shared_attr.attr, |
3128 | &pages_sharing_attr.attr, | |
473b0ce4 HD |
3129 | &pages_unshared_attr.attr, |
3130 | &pages_volatile_attr.attr, | |
3131 | &full_scans_attr.attr, | |
90bd6fd3 PH |
3132 | #ifdef CONFIG_NUMA |
3133 | &merge_across_nodes_attr.attr, | |
3134 | #endif | |
2c653d0e AA |
3135 | &max_page_sharing_attr.attr, |
3136 | &stable_node_chains_attr.attr, | |
3137 | &stable_node_dups_attr.attr, | |
3138 | &stable_node_chains_prune_millisecs_attr.attr, | |
e86c59b1 | 3139 | &use_zero_pages_attr.attr, |
31dbd01f IE |
3140 | NULL, |
3141 | }; | |
3142 | ||
f907c26a | 3143 | static const struct attribute_group ksm_attr_group = { |
31dbd01f IE |
3144 | .attrs = ksm_attrs, |
3145 | .name = "ksm", | |
3146 | }; | |
2ffd8679 | 3147 | #endif /* CONFIG_SYSFS */ |
31dbd01f IE |
3148 | |
3149 | static int __init ksm_init(void) | |
3150 | { | |
3151 | struct task_struct *ksm_thread; | |
3152 | int err; | |
3153 | ||
e86c59b1 CI |
3154 | /* The correct value depends on page size and endianness */ |
3155 | zero_checksum = calc_checksum(ZERO_PAGE(0)); | |
3156 | /* Default to false for backwards compatibility */ | |
3157 | ksm_use_zero_pages = false; | |
3158 | ||
31dbd01f IE |
3159 | err = ksm_slab_init(); |
3160 | if (err) | |
3161 | goto out; | |
3162 | ||
31dbd01f IE |
3163 | ksm_thread = kthread_run(ksm_scan_thread, NULL, "ksmd"); |
3164 | if (IS_ERR(ksm_thread)) { | |
25acde31 | 3165 | pr_err("ksm: creating kthread failed\n"); |
31dbd01f | 3166 | err = PTR_ERR(ksm_thread); |
d9f8984c | 3167 | goto out_free; |
31dbd01f IE |
3168 | } |
3169 | ||
2ffd8679 | 3170 | #ifdef CONFIG_SYSFS |
31dbd01f IE |
3171 | err = sysfs_create_group(mm_kobj, &ksm_attr_group); |
3172 | if (err) { | |
25acde31 | 3173 | pr_err("ksm: register sysfs failed\n"); |
2ffd8679 | 3174 | kthread_stop(ksm_thread); |
d9f8984c | 3175 | goto out_free; |
31dbd01f | 3176 | } |
c73602ad HD |
3177 | #else |
3178 | ksm_run = KSM_RUN_MERGE; /* no way for user to start it */ | |
3179 | ||
2ffd8679 | 3180 | #endif /* CONFIG_SYSFS */ |
31dbd01f | 3181 | |
62b61f61 | 3182 | #ifdef CONFIG_MEMORY_HOTREMOVE |
ef4d43a8 | 3183 | /* There is no significance to this priority 100 */ |
62b61f61 HD |
3184 | hotplug_memory_notifier(ksm_memory_callback, 100); |
3185 | #endif | |
31dbd01f IE |
3186 | return 0; |
3187 | ||
d9f8984c | 3188 | out_free: |
31dbd01f IE |
3189 | ksm_slab_free(); |
3190 | out: | |
3191 | return err; | |
f8af4da3 | 3192 | } |
a64fb3cd | 3193 | subsys_initcall(ksm_init); |