]>
Commit | Line | Data |
---|---|---|
c942fddf | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2b281117 SJ |
2 | /* |
3 | * zswap.c - zswap driver file | |
4 | * | |
42c06a0e | 5 | * zswap is a cache that takes pages that are in the process |
2b281117 SJ |
6 | * of being swapped out and attempts to compress and store them in a |
7 | * RAM-based memory pool. This can result in a significant I/O reduction on | |
8 | * the swap device and, in the case where decompressing from RAM is faster | |
9 | * than reading from the swap device, can also improve workload performance. | |
10 | * | |
11 | * Copyright (C) 2012 Seth Jennings <[email protected]> | |
2b281117 SJ |
12 | */ |
13 | ||
14 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
15 | ||
16 | #include <linux/module.h> | |
17 | #include <linux/cpu.h> | |
18 | #include <linux/highmem.h> | |
19 | #include <linux/slab.h> | |
20 | #include <linux/spinlock.h> | |
21 | #include <linux/types.h> | |
22 | #include <linux/atomic.h> | |
2b281117 SJ |
23 | #include <linux/rbtree.h> |
24 | #include <linux/swap.h> | |
25 | #include <linux/crypto.h> | |
1ec3b5fe | 26 | #include <linux/scatterlist.h> |
ddc1a5cb | 27 | #include <linux/mempolicy.h> |
2b281117 | 28 | #include <linux/mempool.h> |
12d79d64 | 29 | #include <linux/zpool.h> |
1ec3b5fe | 30 | #include <crypto/acompress.h> |
42c06a0e | 31 | #include <linux/zswap.h> |
2b281117 SJ |
32 | #include <linux/mm_types.h> |
33 | #include <linux/page-flags.h> | |
34 | #include <linux/swapops.h> | |
35 | #include <linux/writeback.h> | |
36 | #include <linux/pagemap.h> | |
45190f01 | 37 | #include <linux/workqueue.h> |
a65b0e76 | 38 | #include <linux/list_lru.h> |
2b281117 | 39 | |
014bb1de | 40 | #include "swap.h" |
e0228d59 | 41 | #include "internal.h" |
014bb1de | 42 | |
2b281117 SJ |
43 | /********************************* |
44 | * statistics | |
45 | **********************************/ | |
12d79d64 | 46 | /* Total bytes used by the compressed storage */ |
f6498b77 | 47 | u64 zswap_pool_total_size; |
2b281117 | 48 | /* The number of compressed pages currently stored in zswap */ |
f6498b77 | 49 | atomic_t zswap_stored_pages = ATOMIC_INIT(0); |
a85f878b SD |
50 | /* The number of same-value filled pages currently stored in zswap */ |
51 | static atomic_t zswap_same_filled_pages = ATOMIC_INIT(0); | |
2b281117 SJ |
52 | |
53 | /* | |
54 | * The statistics below are not protected from concurrent access for | |
55 | * performance reasons so they may not be a 100% accurate. However, | |
56 | * they do provide useful information on roughly how many times a | |
57 | * certain event is occurring. | |
58 | */ | |
59 | ||
60 | /* Pool limit was hit (see zswap_max_pool_percent) */ | |
61 | static u64 zswap_pool_limit_hit; | |
62 | /* Pages written back when pool limit was reached */ | |
63 | static u64 zswap_written_back_pages; | |
64 | /* Store failed due to a reclaim failure after pool limit was reached */ | |
65 | static u64 zswap_reject_reclaim_fail; | |
cb61dad8 NP |
66 | /* Store failed due to compression algorithm failure */ |
67 | static u64 zswap_reject_compress_fail; | |
2b281117 SJ |
68 | /* Compressed page was too big for the allocator to (optimally) store */ |
69 | static u64 zswap_reject_compress_poor; | |
70 | /* Store failed because underlying allocator could not get memory */ | |
71 | static u64 zswap_reject_alloc_fail; | |
72 | /* Store failed because the entry metadata could not be allocated (rare) */ | |
73 | static u64 zswap_reject_kmemcache_fail; | |
2b281117 | 74 | |
45190f01 VW |
75 | /* Shrinker work queue */ |
76 | static struct workqueue_struct *shrink_wq; | |
77 | /* Pool limit was hit, we need to calm down */ | |
78 | static bool zswap_pool_reached_full; | |
79 | ||
2b281117 SJ |
80 | /********************************* |
81 | * tunables | |
82 | **********************************/ | |
c00ed16a | 83 | |
bae21db8 DS |
84 | #define ZSWAP_PARAM_UNSET "" |
85 | ||
141fdeec LS |
86 | static int zswap_setup(void); |
87 | ||
bb8b93b5 MS |
88 | /* Enable/disable zswap */ |
89 | static bool zswap_enabled = IS_ENABLED(CONFIG_ZSWAP_DEFAULT_ON); | |
d7b028f5 DS |
90 | static int zswap_enabled_param_set(const char *, |
91 | const struct kernel_param *); | |
83aed6cd | 92 | static const struct kernel_param_ops zswap_enabled_param_ops = { |
d7b028f5 DS |
93 | .set = zswap_enabled_param_set, |
94 | .get = param_get_bool, | |
95 | }; | |
96 | module_param_cb(enabled, &zswap_enabled_param_ops, &zswap_enabled, 0644); | |
2b281117 | 97 | |
90b0fc26 | 98 | /* Crypto compressor to use */ |
bb8b93b5 | 99 | static char *zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT; |
90b0fc26 DS |
100 | static int zswap_compressor_param_set(const char *, |
101 | const struct kernel_param *); | |
83aed6cd | 102 | static const struct kernel_param_ops zswap_compressor_param_ops = { |
90b0fc26 | 103 | .set = zswap_compressor_param_set, |
c99b42c3 DS |
104 | .get = param_get_charp, |
105 | .free = param_free_charp, | |
90b0fc26 DS |
106 | }; |
107 | module_param_cb(compressor, &zswap_compressor_param_ops, | |
c99b42c3 | 108 | &zswap_compressor, 0644); |
2b281117 | 109 | |
90b0fc26 | 110 | /* Compressed storage zpool to use */ |
bb8b93b5 | 111 | static char *zswap_zpool_type = CONFIG_ZSWAP_ZPOOL_DEFAULT; |
90b0fc26 | 112 | static int zswap_zpool_param_set(const char *, const struct kernel_param *); |
83aed6cd | 113 | static const struct kernel_param_ops zswap_zpool_param_ops = { |
c99b42c3 DS |
114 | .set = zswap_zpool_param_set, |
115 | .get = param_get_charp, | |
116 | .free = param_free_charp, | |
90b0fc26 | 117 | }; |
c99b42c3 | 118 | module_param_cb(zpool, &zswap_zpool_param_ops, &zswap_zpool_type, 0644); |
12d79d64 | 119 | |
90b0fc26 DS |
120 | /* The maximum percentage of memory that the compressed pool can occupy */ |
121 | static unsigned int zswap_max_pool_percent = 20; | |
122 | module_param_named(max_pool_percent, zswap_max_pool_percent, uint, 0644); | |
60105e12 | 123 | |
45190f01 VW |
124 | /* The threshold for accepting new pages after the max_pool_percent was hit */ |
125 | static unsigned int zswap_accept_thr_percent = 90; /* of max pool size */ | |
126 | module_param_named(accept_threshold_percent, zswap_accept_thr_percent, | |
127 | uint, 0644); | |
128 | ||
cb325ddd MS |
129 | /* |
130 | * Enable/disable handling same-value filled pages (enabled by default). | |
131 | * If disabled every page is considered non-same-value filled. | |
132 | */ | |
a85f878b SD |
133 | static bool zswap_same_filled_pages_enabled = true; |
134 | module_param_named(same_filled_pages_enabled, zswap_same_filled_pages_enabled, | |
135 | bool, 0644); | |
136 | ||
cb325ddd MS |
137 | /* Enable/disable handling non-same-value filled pages (enabled by default) */ |
138 | static bool zswap_non_same_filled_pages_enabled = true; | |
139 | module_param_named(non_same_filled_pages_enabled, zswap_non_same_filled_pages_enabled, | |
140 | bool, 0644); | |
141 | ||
b8cf32dc YA |
142 | /* Number of zpools in zswap_pool (empirically determined for scalability) */ |
143 | #define ZSWAP_NR_ZPOOLS 32 | |
144 | ||
b5ba474f NP |
145 | /* Enable/disable memory pressure-based shrinker. */ |
146 | static bool zswap_shrinker_enabled = IS_ENABLED( | |
147 | CONFIG_ZSWAP_SHRINKER_DEFAULT_ON); | |
148 | module_param_named(shrinker_enabled, zswap_shrinker_enabled, bool, 0644); | |
149 | ||
501a06fe NP |
150 | bool is_zswap_enabled(void) |
151 | { | |
152 | return zswap_enabled; | |
153 | } | |
154 | ||
2b281117 | 155 | /********************************* |
f1c54846 | 156 | * data structures |
2b281117 | 157 | **********************************/ |
2b281117 | 158 | |
1ec3b5fe BS |
159 | struct crypto_acomp_ctx { |
160 | struct crypto_acomp *acomp; | |
161 | struct acomp_req *req; | |
162 | struct crypto_wait wait; | |
8ba2f844 CZ |
163 | u8 *buffer; |
164 | struct mutex mutex; | |
270700dd | 165 | bool is_sleepable; |
1ec3b5fe BS |
166 | }; |
167 | ||
f999f38b DC |
168 | /* |
169 | * The lock ordering is zswap_tree.lock -> zswap_pool.lru_lock. | |
170 | * The only case where lru_lock is not acquired while holding tree.lock is | |
171 | * when a zswap_entry is taken off the lru for writeback, in that case it | |
172 | * needs to be verified that it's still valid in the tree. | |
173 | */ | |
f1c54846 | 174 | struct zswap_pool { |
b8cf32dc | 175 | struct zpool *zpools[ZSWAP_NR_ZPOOLS]; |
1ec3b5fe | 176 | struct crypto_acomp_ctx __percpu *acomp_ctx; |
94ace3fe | 177 | struct percpu_ref ref; |
f1c54846 | 178 | struct list_head list; |
45190f01 | 179 | struct work_struct release_work; |
cab7a7e5 | 180 | struct hlist_node node; |
f1c54846 | 181 | char tfm_name[CRYPTO_MAX_ALG_NAME]; |
bf9b7df2 CZ |
182 | }; |
183 | ||
e35606e4 CZ |
184 | /* Global LRU lists shared by all zswap pools. */ |
185 | static struct list_lru zswap_list_lru; | |
186 | /* counter of pages stored in all zswap pools. */ | |
187 | static atomic_t zswap_nr_stored = ATOMIC_INIT(0); | |
188 | ||
189 | /* The lock protects zswap_next_shrink updates. */ | |
190 | static DEFINE_SPINLOCK(zswap_shrink_lock); | |
191 | static struct mem_cgroup *zswap_next_shrink; | |
192 | static struct work_struct zswap_shrink_work; | |
193 | static struct shrinker *zswap_shrinker; | |
2b281117 | 194 | |
2b281117 SJ |
195 | /* |
196 | * struct zswap_entry | |
197 | * | |
198 | * This structure contains the metadata for tracking a single compressed | |
199 | * page within zswap. | |
200 | * | |
201 | * rbnode - links the entry into red-black tree for the appropriate swap type | |
97157d89 | 202 | * swpentry - associated swap entry, the offset indexes into the red-black tree |
2b281117 | 203 | * length - the length in bytes of the compressed page data. Needed during |
f999f38b DC |
204 | * decompression. For a same value filled page length is 0, and both |
205 | * pool and lru are invalid and must be ignored. | |
f1c54846 DS |
206 | * pool - the zswap_pool the entry's data is in |
207 | * handle - zpool allocation handle that stores the compressed page data | |
a85f878b | 208 | * value - value of the same-value filled pages which have same content |
97157d89 | 209 | * objcg - the obj_cgroup that the compressed memory is charged to |
f999f38b | 210 | * lru - handle to the pool's lru used to evict pages. |
2b281117 SJ |
211 | */ |
212 | struct zswap_entry { | |
213 | struct rb_node rbnode; | |
0bb48849 | 214 | swp_entry_t swpentry; |
2b281117 | 215 | unsigned int length; |
f1c54846 | 216 | struct zswap_pool *pool; |
a85f878b SD |
217 | union { |
218 | unsigned long handle; | |
219 | unsigned long value; | |
220 | }; | |
f4840ccf | 221 | struct obj_cgroup *objcg; |
f999f38b | 222 | struct list_head lru; |
2b281117 SJ |
223 | }; |
224 | ||
2b281117 SJ |
225 | struct zswap_tree { |
226 | struct rb_root rbroot; | |
227 | spinlock_t lock; | |
2b281117 SJ |
228 | }; |
229 | ||
230 | static struct zswap_tree *zswap_trees[MAX_SWAPFILES]; | |
44c7c734 | 231 | static unsigned int nr_zswap_trees[MAX_SWAPFILES]; |
2b281117 | 232 | |
f1c54846 DS |
233 | /* RCU-protected iteration */ |
234 | static LIST_HEAD(zswap_pools); | |
235 | /* protects zswap_pools list modification */ | |
236 | static DEFINE_SPINLOCK(zswap_pools_lock); | |
32a4e169 DS |
237 | /* pool counter to provide unique names to zpool */ |
238 | static atomic_t zswap_pools_count = ATOMIC_INIT(0); | |
f1c54846 | 239 | |
9021ccec LS |
240 | enum zswap_init_type { |
241 | ZSWAP_UNINIT, | |
242 | ZSWAP_INIT_SUCCEED, | |
243 | ZSWAP_INIT_FAILED | |
244 | }; | |
90b0fc26 | 245 | |
9021ccec | 246 | static enum zswap_init_type zswap_init_state; |
90b0fc26 | 247 | |
141fdeec LS |
248 | /* used to ensure the integrity of initialization */ |
249 | static DEFINE_MUTEX(zswap_init_lock); | |
d7b028f5 | 250 | |
ae3d89a7 DS |
251 | /* init completed, but couldn't create the initial pool */ |
252 | static bool zswap_has_pool; | |
253 | ||
f1c54846 DS |
254 | /********************************* |
255 | * helpers and fwd declarations | |
256 | **********************************/ | |
257 | ||
44c7c734 CZ |
258 | static inline struct zswap_tree *swap_zswap_tree(swp_entry_t swp) |
259 | { | |
260 | return &zswap_trees[swp_type(swp)][swp_offset(swp) | |
261 | >> SWAP_ADDRESS_SPACE_SHIFT]; | |
262 | } | |
263 | ||
f1c54846 DS |
264 | #define zswap_pool_debug(msg, p) \ |
265 | pr_debug("%s pool %s/%s\n", msg, (p)->tfm_name, \ | |
b8cf32dc | 266 | zpool_get_type((p)->zpools[0])) |
f1c54846 | 267 | |
f1c54846 DS |
268 | static bool zswap_is_full(void) |
269 | { | |
ca79b0c2 AK |
270 | return totalram_pages() * zswap_max_pool_percent / 100 < |
271 | DIV_ROUND_UP(zswap_pool_total_size, PAGE_SIZE); | |
f1c54846 DS |
272 | } |
273 | ||
45190f01 VW |
274 | static bool zswap_can_accept(void) |
275 | { | |
276 | return totalram_pages() * zswap_accept_thr_percent / 100 * | |
277 | zswap_max_pool_percent / 100 > | |
278 | DIV_ROUND_UP(zswap_pool_total_size, PAGE_SIZE); | |
279 | } | |
280 | ||
b5ba474f NP |
281 | static u64 get_zswap_pool_size(struct zswap_pool *pool) |
282 | { | |
283 | u64 pool_size = 0; | |
284 | int i; | |
285 | ||
286 | for (i = 0; i < ZSWAP_NR_ZPOOLS; i++) | |
287 | pool_size += zpool_get_total_size(pool->zpools[i]); | |
288 | ||
289 | return pool_size; | |
290 | } | |
291 | ||
f1c54846 DS |
292 | static void zswap_update_total_size(void) |
293 | { | |
294 | struct zswap_pool *pool; | |
295 | u64 total = 0; | |
296 | ||
297 | rcu_read_lock(); | |
298 | ||
299 | list_for_each_entry_rcu(pool, &zswap_pools, list) | |
b5ba474f | 300 | total += get_zswap_pool_size(pool); |
f1c54846 DS |
301 | |
302 | rcu_read_unlock(); | |
303 | ||
304 | zswap_pool_total_size = total; | |
305 | } | |
306 | ||
a984649b JW |
307 | /********************************* |
308 | * pool functions | |
309 | **********************************/ | |
94ace3fe | 310 | static void __zswap_pool_empty(struct percpu_ref *ref); |
a984649b | 311 | |
a984649b JW |
312 | static struct zswap_pool *zswap_pool_create(char *type, char *compressor) |
313 | { | |
314 | int i; | |
315 | struct zswap_pool *pool; | |
316 | char name[38]; /* 'zswap' + 32 char (max) num + \0 */ | |
317 | gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM; | |
318 | int ret; | |
319 | ||
320 | if (!zswap_has_pool) { | |
321 | /* if either are unset, pool initialization failed, and we | |
322 | * need both params to be set correctly before trying to | |
323 | * create a pool. | |
324 | */ | |
325 | if (!strcmp(type, ZSWAP_PARAM_UNSET)) | |
326 | return NULL; | |
327 | if (!strcmp(compressor, ZSWAP_PARAM_UNSET)) | |
328 | return NULL; | |
329 | } | |
330 | ||
331 | pool = kzalloc(sizeof(*pool), GFP_KERNEL); | |
332 | if (!pool) | |
333 | return NULL; | |
334 | ||
335 | for (i = 0; i < ZSWAP_NR_ZPOOLS; i++) { | |
336 | /* unique name for each pool specifically required by zsmalloc */ | |
337 | snprintf(name, 38, "zswap%x", | |
338 | atomic_inc_return(&zswap_pools_count)); | |
339 | ||
340 | pool->zpools[i] = zpool_create_pool(type, name, gfp); | |
341 | if (!pool->zpools[i]) { | |
342 | pr_err("%s zpool not available\n", type); | |
343 | goto error; | |
344 | } | |
345 | } | |
346 | pr_debug("using %s zpool\n", zpool_get_type(pool->zpools[0])); | |
347 | ||
348 | strscpy(pool->tfm_name, compressor, sizeof(pool->tfm_name)); | |
349 | ||
350 | pool->acomp_ctx = alloc_percpu(*pool->acomp_ctx); | |
351 | if (!pool->acomp_ctx) { | |
352 | pr_err("percpu alloc failed\n"); | |
353 | goto error; | |
354 | } | |
355 | ||
356 | ret = cpuhp_state_add_instance(CPUHP_MM_ZSWP_POOL_PREPARE, | |
357 | &pool->node); | |
358 | if (ret) | |
359 | goto error; | |
360 | ||
a984649b JW |
361 | /* being the current pool takes 1 ref; this func expects the |
362 | * caller to always add the new pool as the current pool | |
363 | */ | |
94ace3fe CZ |
364 | ret = percpu_ref_init(&pool->ref, __zswap_pool_empty, |
365 | PERCPU_REF_ALLOW_REINIT, GFP_KERNEL); | |
366 | if (ret) | |
367 | goto ref_fail; | |
a984649b | 368 | INIT_LIST_HEAD(&pool->list); |
a984649b JW |
369 | |
370 | zswap_pool_debug("created", pool); | |
371 | ||
372 | return pool; | |
373 | ||
94ace3fe CZ |
374 | ref_fail: |
375 | cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node); | |
a984649b JW |
376 | error: |
377 | if (pool->acomp_ctx) | |
378 | free_percpu(pool->acomp_ctx); | |
379 | while (i--) | |
380 | zpool_destroy_pool(pool->zpools[i]); | |
381 | kfree(pool); | |
382 | return NULL; | |
383 | } | |
384 | ||
385 | static struct zswap_pool *__zswap_pool_create_fallback(void) | |
386 | { | |
387 | bool has_comp, has_zpool; | |
388 | ||
389 | has_comp = crypto_has_acomp(zswap_compressor, 0, 0); | |
390 | if (!has_comp && strcmp(zswap_compressor, | |
391 | CONFIG_ZSWAP_COMPRESSOR_DEFAULT)) { | |
392 | pr_err("compressor %s not available, using default %s\n", | |
393 | zswap_compressor, CONFIG_ZSWAP_COMPRESSOR_DEFAULT); | |
394 | param_free_charp(&zswap_compressor); | |
395 | zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT; | |
396 | has_comp = crypto_has_acomp(zswap_compressor, 0, 0); | |
397 | } | |
398 | if (!has_comp) { | |
399 | pr_err("default compressor %s not available\n", | |
400 | zswap_compressor); | |
401 | param_free_charp(&zswap_compressor); | |
402 | zswap_compressor = ZSWAP_PARAM_UNSET; | |
403 | } | |
404 | ||
405 | has_zpool = zpool_has_pool(zswap_zpool_type); | |
406 | if (!has_zpool && strcmp(zswap_zpool_type, | |
407 | CONFIG_ZSWAP_ZPOOL_DEFAULT)) { | |
408 | pr_err("zpool %s not available, using default %s\n", | |
409 | zswap_zpool_type, CONFIG_ZSWAP_ZPOOL_DEFAULT); | |
410 | param_free_charp(&zswap_zpool_type); | |
411 | zswap_zpool_type = CONFIG_ZSWAP_ZPOOL_DEFAULT; | |
412 | has_zpool = zpool_has_pool(zswap_zpool_type); | |
413 | } | |
414 | if (!has_zpool) { | |
415 | pr_err("default zpool %s not available\n", | |
416 | zswap_zpool_type); | |
417 | param_free_charp(&zswap_zpool_type); | |
418 | zswap_zpool_type = ZSWAP_PARAM_UNSET; | |
419 | } | |
420 | ||
421 | if (!has_comp || !has_zpool) | |
422 | return NULL; | |
423 | ||
424 | return zswap_pool_create(zswap_zpool_type, zswap_compressor); | |
425 | } | |
426 | ||
427 | static void zswap_pool_destroy(struct zswap_pool *pool) | |
428 | { | |
429 | int i; | |
430 | ||
431 | zswap_pool_debug("destroying", pool); | |
432 | ||
a984649b JW |
433 | cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node); |
434 | free_percpu(pool->acomp_ctx); | |
a984649b JW |
435 | |
436 | for (i = 0; i < ZSWAP_NR_ZPOOLS; i++) | |
437 | zpool_destroy_pool(pool->zpools[i]); | |
438 | kfree(pool); | |
439 | } | |
440 | ||
39f3ec8e JW |
441 | static void __zswap_pool_release(struct work_struct *work) |
442 | { | |
443 | struct zswap_pool *pool = container_of(work, typeof(*pool), | |
444 | release_work); | |
445 | ||
446 | synchronize_rcu(); | |
447 | ||
94ace3fe CZ |
448 | /* nobody should have been able to get a ref... */ |
449 | WARN_ON(!percpu_ref_is_zero(&pool->ref)); | |
450 | percpu_ref_exit(&pool->ref); | |
39f3ec8e JW |
451 | |
452 | /* pool is now off zswap_pools list and has no references. */ | |
453 | zswap_pool_destroy(pool); | |
454 | } | |
455 | ||
456 | static struct zswap_pool *zswap_pool_current(void); | |
457 | ||
94ace3fe | 458 | static void __zswap_pool_empty(struct percpu_ref *ref) |
39f3ec8e JW |
459 | { |
460 | struct zswap_pool *pool; | |
461 | ||
94ace3fe | 462 | pool = container_of(ref, typeof(*pool), ref); |
39f3ec8e | 463 | |
94ace3fe | 464 | spin_lock_bh(&zswap_pools_lock); |
39f3ec8e JW |
465 | |
466 | WARN_ON(pool == zswap_pool_current()); | |
467 | ||
468 | list_del_rcu(&pool->list); | |
469 | ||
470 | INIT_WORK(&pool->release_work, __zswap_pool_release); | |
471 | schedule_work(&pool->release_work); | |
472 | ||
94ace3fe | 473 | spin_unlock_bh(&zswap_pools_lock); |
39f3ec8e JW |
474 | } |
475 | ||
476 | static int __must_check zswap_pool_get(struct zswap_pool *pool) | |
477 | { | |
478 | if (!pool) | |
479 | return 0; | |
480 | ||
94ace3fe | 481 | return percpu_ref_tryget(&pool->ref); |
39f3ec8e JW |
482 | } |
483 | ||
484 | static void zswap_pool_put(struct zswap_pool *pool) | |
485 | { | |
94ace3fe | 486 | percpu_ref_put(&pool->ref); |
39f3ec8e JW |
487 | } |
488 | ||
c1a0ecb8 JW |
489 | static struct zswap_pool *__zswap_pool_current(void) |
490 | { | |
491 | struct zswap_pool *pool; | |
492 | ||
493 | pool = list_first_or_null_rcu(&zswap_pools, typeof(*pool), list); | |
494 | WARN_ONCE(!pool && zswap_has_pool, | |
495 | "%s: no page storage pool!\n", __func__); | |
496 | ||
497 | return pool; | |
498 | } | |
499 | ||
500 | static struct zswap_pool *zswap_pool_current(void) | |
501 | { | |
502 | assert_spin_locked(&zswap_pools_lock); | |
503 | ||
504 | return __zswap_pool_current(); | |
505 | } | |
506 | ||
507 | static struct zswap_pool *zswap_pool_current_get(void) | |
508 | { | |
509 | struct zswap_pool *pool; | |
510 | ||
511 | rcu_read_lock(); | |
512 | ||
513 | pool = __zswap_pool_current(); | |
514 | if (!zswap_pool_get(pool)) | |
515 | pool = NULL; | |
516 | ||
517 | rcu_read_unlock(); | |
518 | ||
519 | return pool; | |
520 | } | |
521 | ||
c1a0ecb8 JW |
522 | /* type and compressor must be null-terminated */ |
523 | static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor) | |
524 | { | |
525 | struct zswap_pool *pool; | |
526 | ||
527 | assert_spin_locked(&zswap_pools_lock); | |
528 | ||
529 | list_for_each_entry_rcu(pool, &zswap_pools, list) { | |
530 | if (strcmp(pool->tfm_name, compressor)) | |
531 | continue; | |
532 | /* all zpools share the same type */ | |
533 | if (strcmp(zpool_get_type(pool->zpools[0]), type)) | |
534 | continue; | |
535 | /* if we can't get it, it's about to be destroyed */ | |
536 | if (!zswap_pool_get(pool)) | |
537 | continue; | |
538 | return pool; | |
539 | } | |
540 | ||
541 | return NULL; | |
542 | } | |
543 | ||
abca07c0 JW |
544 | /********************************* |
545 | * param callbacks | |
546 | **********************************/ | |
547 | ||
548 | static bool zswap_pool_changed(const char *s, const struct kernel_param *kp) | |
549 | { | |
550 | /* no change required */ | |
551 | if (!strcmp(s, *(char **)kp->arg) && zswap_has_pool) | |
552 | return false; | |
553 | return true; | |
554 | } | |
555 | ||
556 | /* val must be a null-terminated string */ | |
557 | static int __zswap_param_set(const char *val, const struct kernel_param *kp, | |
558 | char *type, char *compressor) | |
559 | { | |
560 | struct zswap_pool *pool, *put_pool = NULL; | |
561 | char *s = strstrip((char *)val); | |
562 | int ret = 0; | |
563 | bool new_pool = false; | |
564 | ||
565 | mutex_lock(&zswap_init_lock); | |
566 | switch (zswap_init_state) { | |
567 | case ZSWAP_UNINIT: | |
568 | /* if this is load-time (pre-init) param setting, | |
569 | * don't create a pool; that's done during init. | |
570 | */ | |
571 | ret = param_set_charp(s, kp); | |
572 | break; | |
573 | case ZSWAP_INIT_SUCCEED: | |
574 | new_pool = zswap_pool_changed(s, kp); | |
575 | break; | |
576 | case ZSWAP_INIT_FAILED: | |
577 | pr_err("can't set param, initialization failed\n"); | |
578 | ret = -ENODEV; | |
579 | } | |
580 | mutex_unlock(&zswap_init_lock); | |
581 | ||
582 | /* no need to create a new pool, return directly */ | |
583 | if (!new_pool) | |
584 | return ret; | |
585 | ||
586 | if (!type) { | |
587 | if (!zpool_has_pool(s)) { | |
588 | pr_err("zpool %s not available\n", s); | |
589 | return -ENOENT; | |
590 | } | |
591 | type = s; | |
592 | } else if (!compressor) { | |
593 | if (!crypto_has_acomp(s, 0, 0)) { | |
594 | pr_err("compressor %s not available\n", s); | |
595 | return -ENOENT; | |
596 | } | |
597 | compressor = s; | |
598 | } else { | |
599 | WARN_ON(1); | |
600 | return -EINVAL; | |
601 | } | |
602 | ||
94ace3fe | 603 | spin_lock_bh(&zswap_pools_lock); |
abca07c0 JW |
604 | |
605 | pool = zswap_pool_find_get(type, compressor); | |
606 | if (pool) { | |
607 | zswap_pool_debug("using existing", pool); | |
608 | WARN_ON(pool == zswap_pool_current()); | |
609 | list_del_rcu(&pool->list); | |
610 | } | |
611 | ||
94ace3fe | 612 | spin_unlock_bh(&zswap_pools_lock); |
abca07c0 JW |
613 | |
614 | if (!pool) | |
615 | pool = zswap_pool_create(type, compressor); | |
94ace3fe CZ |
616 | else { |
617 | /* | |
618 | * Restore the initial ref dropped by percpu_ref_kill() | |
619 | * when the pool was decommissioned and switch it again | |
620 | * to percpu mode. | |
621 | */ | |
622 | percpu_ref_resurrect(&pool->ref); | |
623 | ||
624 | /* Drop the ref from zswap_pool_find_get(). */ | |
625 | zswap_pool_put(pool); | |
626 | } | |
abca07c0 JW |
627 | |
628 | if (pool) | |
629 | ret = param_set_charp(s, kp); | |
630 | else | |
631 | ret = -EINVAL; | |
632 | ||
94ace3fe | 633 | spin_lock_bh(&zswap_pools_lock); |
abca07c0 JW |
634 | |
635 | if (!ret) { | |
636 | put_pool = zswap_pool_current(); | |
637 | list_add_rcu(&pool->list, &zswap_pools); | |
638 | zswap_has_pool = true; | |
639 | } else if (pool) { | |
640 | /* add the possibly pre-existing pool to the end of the pools | |
641 | * list; if it's new (and empty) then it'll be removed and | |
642 | * destroyed by the put after we drop the lock | |
643 | */ | |
644 | list_add_tail_rcu(&pool->list, &zswap_pools); | |
645 | put_pool = pool; | |
646 | } | |
647 | ||
94ace3fe | 648 | spin_unlock_bh(&zswap_pools_lock); |
abca07c0 JW |
649 | |
650 | if (!zswap_has_pool && !pool) { | |
651 | /* if initial pool creation failed, and this pool creation also | |
652 | * failed, maybe both compressor and zpool params were bad. | |
653 | * Allow changing this param, so pool creation will succeed | |
654 | * when the other param is changed. We already verified this | |
655 | * param is ok in the zpool_has_pool() or crypto_has_acomp() | |
656 | * checks above. | |
657 | */ | |
658 | ret = param_set_charp(s, kp); | |
659 | } | |
660 | ||
661 | /* drop the ref from either the old current pool, | |
662 | * or the new pool we failed to add | |
663 | */ | |
664 | if (put_pool) | |
94ace3fe | 665 | percpu_ref_kill(&put_pool->ref); |
abca07c0 JW |
666 | |
667 | return ret; | |
668 | } | |
669 | ||
670 | static int zswap_compressor_param_set(const char *val, | |
671 | const struct kernel_param *kp) | |
672 | { | |
673 | return __zswap_param_set(val, kp, zswap_zpool_type, NULL); | |
674 | } | |
675 | ||
676 | static int zswap_zpool_param_set(const char *val, | |
677 | const struct kernel_param *kp) | |
678 | { | |
679 | return __zswap_param_set(val, kp, NULL, zswap_compressor); | |
680 | } | |
681 | ||
682 | static int zswap_enabled_param_set(const char *val, | |
683 | const struct kernel_param *kp) | |
684 | { | |
685 | int ret = -ENODEV; | |
686 | ||
687 | /* if this is load-time (pre-init) param setting, only set param. */ | |
688 | if (system_state != SYSTEM_RUNNING) | |
689 | return param_set_bool(val, kp); | |
690 | ||
691 | mutex_lock(&zswap_init_lock); | |
692 | switch (zswap_init_state) { | |
693 | case ZSWAP_UNINIT: | |
694 | if (zswap_setup()) | |
695 | break; | |
696 | fallthrough; | |
697 | case ZSWAP_INIT_SUCCEED: | |
698 | if (!zswap_has_pool) | |
699 | pr_err("can't enable, no pool configured\n"); | |
700 | else | |
701 | ret = param_set_bool(val, kp); | |
702 | break; | |
703 | case ZSWAP_INIT_FAILED: | |
704 | pr_err("can't enable, initialization failed\n"); | |
705 | } | |
706 | mutex_unlock(&zswap_init_lock); | |
707 | ||
708 | return ret; | |
709 | } | |
710 | ||
506a86c5 JW |
711 | /********************************* |
712 | * lru functions | |
713 | **********************************/ | |
714 | ||
a65b0e76 DC |
715 | /* should be called under RCU */ |
716 | #ifdef CONFIG_MEMCG | |
717 | static inline struct mem_cgroup *mem_cgroup_from_entry(struct zswap_entry *entry) | |
718 | { | |
719 | return entry->objcg ? obj_cgroup_memcg(entry->objcg) : NULL; | |
720 | } | |
721 | #else | |
722 | static inline struct mem_cgroup *mem_cgroup_from_entry(struct zswap_entry *entry) | |
723 | { | |
724 | return NULL; | |
725 | } | |
726 | #endif | |
727 | ||
728 | static inline int entry_to_nid(struct zswap_entry *entry) | |
729 | { | |
730 | return page_to_nid(virt_to_page(entry)); | |
731 | } | |
732 | ||
a65b0e76 DC |
733 | static void zswap_lru_add(struct list_lru *list_lru, struct zswap_entry *entry) |
734 | { | |
b5ba474f NP |
735 | atomic_long_t *nr_zswap_protected; |
736 | unsigned long lru_size, old, new; | |
a65b0e76 DC |
737 | int nid = entry_to_nid(entry); |
738 | struct mem_cgroup *memcg; | |
b5ba474f | 739 | struct lruvec *lruvec; |
a65b0e76 DC |
740 | |
741 | /* | |
742 | * Note that it is safe to use rcu_read_lock() here, even in the face of | |
743 | * concurrent memcg offlining. Thanks to the memcg->kmemcg_id indirection | |
744 | * used in list_lru lookup, only two scenarios are possible: | |
745 | * | |
746 | * 1. list_lru_add() is called before memcg->kmemcg_id is updated. The | |
747 | * new entry will be reparented to memcg's parent's list_lru. | |
748 | * 2. list_lru_add() is called after memcg->kmemcg_id is updated. The | |
749 | * new entry will be added directly to memcg's parent's list_lru. | |
750 | * | |
3f798aa6 | 751 | * Similar reasoning holds for list_lru_del(). |
a65b0e76 DC |
752 | */ |
753 | rcu_read_lock(); | |
754 | memcg = mem_cgroup_from_entry(entry); | |
755 | /* will always succeed */ | |
756 | list_lru_add(list_lru, &entry->lru, nid, memcg); | |
b5ba474f NP |
757 | |
758 | /* Update the protection area */ | |
759 | lru_size = list_lru_count_one(list_lru, nid, memcg); | |
760 | lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid)); | |
761 | nr_zswap_protected = &lruvec->zswap_lruvec_state.nr_zswap_protected; | |
762 | old = atomic_long_inc_return(nr_zswap_protected); | |
763 | /* | |
764 | * Decay to avoid overflow and adapt to changing workloads. | |
765 | * This is based on LRU reclaim cost decaying heuristics. | |
766 | */ | |
767 | do { | |
768 | new = old > lru_size / 4 ? old / 2 : old; | |
769 | } while (!atomic_long_try_cmpxchg(nr_zswap_protected, &old, new)); | |
a65b0e76 DC |
770 | rcu_read_unlock(); |
771 | } | |
772 | ||
773 | static void zswap_lru_del(struct list_lru *list_lru, struct zswap_entry *entry) | |
774 | { | |
775 | int nid = entry_to_nid(entry); | |
776 | struct mem_cgroup *memcg; | |
777 | ||
778 | rcu_read_lock(); | |
779 | memcg = mem_cgroup_from_entry(entry); | |
780 | /* will always succeed */ | |
781 | list_lru_del(list_lru, &entry->lru, nid, memcg); | |
782 | rcu_read_unlock(); | |
783 | } | |
784 | ||
5182661a JW |
785 | void zswap_lruvec_state_init(struct lruvec *lruvec) |
786 | { | |
787 | atomic_long_set(&lruvec->zswap_lruvec_state.nr_zswap_protected, 0); | |
788 | } | |
789 | ||
790 | void zswap_folio_swapin(struct folio *folio) | |
791 | { | |
792 | struct lruvec *lruvec; | |
793 | ||
794 | if (folio) { | |
795 | lruvec = folio_lruvec(folio); | |
796 | atomic_long_inc(&lruvec->zswap_lruvec_state.nr_zswap_protected); | |
797 | } | |
798 | } | |
799 | ||
800 | void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg) | |
801 | { | |
bf9b7df2 | 802 | /* lock out zswap shrinker walking memcg tree */ |
e35606e4 CZ |
803 | spin_lock(&zswap_shrink_lock); |
804 | if (zswap_next_shrink == memcg) | |
805 | zswap_next_shrink = mem_cgroup_iter(NULL, zswap_next_shrink, NULL); | |
806 | spin_unlock(&zswap_shrink_lock); | |
5182661a JW |
807 | } |
808 | ||
2b281117 SJ |
809 | /********************************* |
810 | * rbtree functions | |
811 | **********************************/ | |
812 | static struct zswap_entry *zswap_rb_search(struct rb_root *root, pgoff_t offset) | |
813 | { | |
814 | struct rb_node *node = root->rb_node; | |
815 | struct zswap_entry *entry; | |
0bb48849 | 816 | pgoff_t entry_offset; |
2b281117 SJ |
817 | |
818 | while (node) { | |
819 | entry = rb_entry(node, struct zswap_entry, rbnode); | |
0bb48849 DC |
820 | entry_offset = swp_offset(entry->swpentry); |
821 | if (entry_offset > offset) | |
2b281117 | 822 | node = node->rb_left; |
0bb48849 | 823 | else if (entry_offset < offset) |
2b281117 SJ |
824 | node = node->rb_right; |
825 | else | |
826 | return entry; | |
827 | } | |
828 | return NULL; | |
829 | } | |
830 | ||
831 | /* | |
832 | * In the case that a entry with the same offset is found, a pointer to | |
833 | * the existing entry is stored in dupentry and the function returns -EEXIST | |
834 | */ | |
835 | static int zswap_rb_insert(struct rb_root *root, struct zswap_entry *entry, | |
836 | struct zswap_entry **dupentry) | |
837 | { | |
838 | struct rb_node **link = &root->rb_node, *parent = NULL; | |
839 | struct zswap_entry *myentry; | |
0bb48849 | 840 | pgoff_t myentry_offset, entry_offset = swp_offset(entry->swpentry); |
2b281117 SJ |
841 | |
842 | while (*link) { | |
843 | parent = *link; | |
844 | myentry = rb_entry(parent, struct zswap_entry, rbnode); | |
0bb48849 DC |
845 | myentry_offset = swp_offset(myentry->swpentry); |
846 | if (myentry_offset > entry_offset) | |
2b281117 | 847 | link = &(*link)->rb_left; |
0bb48849 | 848 | else if (myentry_offset < entry_offset) |
2b281117 SJ |
849 | link = &(*link)->rb_right; |
850 | else { | |
851 | *dupentry = myentry; | |
852 | return -EEXIST; | |
853 | } | |
854 | } | |
855 | rb_link_node(&entry->rbnode, parent, link); | |
856 | rb_insert_color(&entry->rbnode, root); | |
857 | return 0; | |
858 | } | |
859 | ||
a230c20e | 860 | static void zswap_rb_erase(struct rb_root *root, struct zswap_entry *entry) |
0ab0abcf | 861 | { |
a230c20e CZ |
862 | rb_erase(&entry->rbnode, root); |
863 | RB_CLEAR_NODE(&entry->rbnode); | |
0ab0abcf WY |
864 | } |
865 | ||
36034bf6 JW |
866 | /********************************* |
867 | * zswap entry functions | |
868 | **********************************/ | |
869 | static struct kmem_cache *zswap_entry_cache; | |
870 | ||
871 | static struct zswap_entry *zswap_entry_cache_alloc(gfp_t gfp, int nid) | |
872 | { | |
873 | struct zswap_entry *entry; | |
874 | entry = kmem_cache_alloc_node(zswap_entry_cache, gfp, nid); | |
875 | if (!entry) | |
876 | return NULL; | |
36034bf6 JW |
877 | RB_CLEAR_NODE(&entry->rbnode); |
878 | return entry; | |
879 | } | |
880 | ||
881 | static void zswap_entry_cache_free(struct zswap_entry *entry) | |
882 | { | |
883 | kmem_cache_free(zswap_entry_cache, entry); | |
884 | } | |
885 | ||
b8cf32dc YA |
886 | static struct zpool *zswap_find_zpool(struct zswap_entry *entry) |
887 | { | |
888 | int i = 0; | |
889 | ||
890 | if (ZSWAP_NR_ZPOOLS > 1) | |
891 | i = hash_ptr(entry, ilog2(ZSWAP_NR_ZPOOLS)); | |
892 | ||
893 | return entry->pool->zpools[i]; | |
894 | } | |
895 | ||
0ab0abcf | 896 | /* |
12d79d64 | 897 | * Carries out the common pattern of freeing and entry's zpool allocation, |
0ab0abcf WY |
898 | * freeing the entry itself, and decrementing the number of stored pages. |
899 | */ | |
42398be2 | 900 | static void zswap_entry_free(struct zswap_entry *entry) |
0ab0abcf | 901 | { |
a85f878b SD |
902 | if (!entry->length) |
903 | atomic_dec(&zswap_same_filled_pages); | |
904 | else { | |
e35606e4 | 905 | zswap_lru_del(&zswap_list_lru, entry); |
b8cf32dc | 906 | zpool_free(zswap_find_zpool(entry), entry->handle); |
e35606e4 | 907 | atomic_dec(&zswap_nr_stored); |
a85f878b SD |
908 | zswap_pool_put(entry->pool); |
909 | } | |
2e601e1e JW |
910 | if (entry->objcg) { |
911 | obj_cgroup_uncharge_zswap(entry->objcg, entry->length); | |
912 | obj_cgroup_put(entry->objcg); | |
913 | } | |
0ab0abcf WY |
914 | zswap_entry_cache_free(entry); |
915 | atomic_dec(&zswap_stored_pages); | |
f1c54846 | 916 | zswap_update_total_size(); |
0ab0abcf WY |
917 | } |
918 | ||
7dd1f7f0 | 919 | /* |
a230c20e CZ |
920 | * The caller hold the tree lock and search the entry from the tree, |
921 | * so it must be on the tree, remove it from the tree and free it. | |
7dd1f7f0 JW |
922 | */ |
923 | static void zswap_invalidate_entry(struct zswap_tree *tree, | |
924 | struct zswap_entry *entry) | |
925 | { | |
a230c20e CZ |
926 | zswap_rb_erase(&tree->rbroot, entry); |
927 | zswap_entry_free(entry); | |
7dd1f7f0 JW |
928 | } |
929 | ||
f91e81d3 JW |
930 | /********************************* |
931 | * compressed storage functions | |
932 | **********************************/ | |
64f200b8 JW |
933 | static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node) |
934 | { | |
935 | struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node); | |
936 | struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu); | |
937 | struct crypto_acomp *acomp; | |
938 | struct acomp_req *req; | |
939 | int ret; | |
940 | ||
941 | mutex_init(&acomp_ctx->mutex); | |
942 | ||
943 | acomp_ctx->buffer = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu)); | |
944 | if (!acomp_ctx->buffer) | |
945 | return -ENOMEM; | |
946 | ||
947 | acomp = crypto_alloc_acomp_node(pool->tfm_name, 0, 0, cpu_to_node(cpu)); | |
948 | if (IS_ERR(acomp)) { | |
949 | pr_err("could not alloc crypto acomp %s : %ld\n", | |
950 | pool->tfm_name, PTR_ERR(acomp)); | |
951 | ret = PTR_ERR(acomp); | |
952 | goto acomp_fail; | |
953 | } | |
954 | acomp_ctx->acomp = acomp; | |
270700dd | 955 | acomp_ctx->is_sleepable = acomp_is_async(acomp); |
64f200b8 JW |
956 | |
957 | req = acomp_request_alloc(acomp_ctx->acomp); | |
958 | if (!req) { | |
959 | pr_err("could not alloc crypto acomp_request %s\n", | |
960 | pool->tfm_name); | |
961 | ret = -ENOMEM; | |
962 | goto req_fail; | |
963 | } | |
964 | acomp_ctx->req = req; | |
965 | ||
966 | crypto_init_wait(&acomp_ctx->wait); | |
967 | /* | |
968 | * if the backend of acomp is async zip, crypto_req_done() will wakeup | |
969 | * crypto_wait_req(); if the backend of acomp is scomp, the callback | |
970 | * won't be called, crypto_wait_req() will return without blocking. | |
971 | */ | |
972 | acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, | |
973 | crypto_req_done, &acomp_ctx->wait); | |
974 | ||
975 | return 0; | |
976 | ||
977 | req_fail: | |
978 | crypto_free_acomp(acomp_ctx->acomp); | |
979 | acomp_fail: | |
980 | kfree(acomp_ctx->buffer); | |
981 | return ret; | |
982 | } | |
983 | ||
984 | static int zswap_cpu_comp_dead(unsigned int cpu, struct hlist_node *node) | |
985 | { | |
986 | struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node); | |
987 | struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu); | |
988 | ||
989 | if (!IS_ERR_OR_NULL(acomp_ctx)) { | |
990 | if (!IS_ERR_OR_NULL(acomp_ctx->req)) | |
991 | acomp_request_free(acomp_ctx->req); | |
992 | if (!IS_ERR_OR_NULL(acomp_ctx->acomp)) | |
993 | crypto_free_acomp(acomp_ctx->acomp); | |
994 | kfree(acomp_ctx->buffer); | |
995 | } | |
996 | ||
997 | return 0; | |
998 | } | |
999 | ||
f91e81d3 JW |
1000 | static bool zswap_compress(struct folio *folio, struct zswap_entry *entry) |
1001 | { | |
1002 | struct crypto_acomp_ctx *acomp_ctx; | |
1003 | struct scatterlist input, output; | |
55e78c93 | 1004 | int comp_ret = 0, alloc_ret = 0; |
f91e81d3 JW |
1005 | unsigned int dlen = PAGE_SIZE; |
1006 | unsigned long handle; | |
1007 | struct zpool *zpool; | |
1008 | char *buf; | |
1009 | gfp_t gfp; | |
f91e81d3 JW |
1010 | u8 *dst; |
1011 | ||
1012 | acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx); | |
1013 | ||
1014 | mutex_lock(&acomp_ctx->mutex); | |
1015 | ||
1016 | dst = acomp_ctx->buffer; | |
1017 | sg_init_table(&input, 1); | |
1018 | sg_set_page(&input, &folio->page, PAGE_SIZE, 0); | |
1019 | ||
1020 | /* | |
1021 | * We need PAGE_SIZE * 2 here since there maybe over-compression case, | |
1022 | * and hardware-accelerators may won't check the dst buffer size, so | |
1023 | * giving the dst buffer with enough length to avoid buffer overflow. | |
1024 | */ | |
1025 | sg_init_one(&output, dst, PAGE_SIZE * 2); | |
1026 | acomp_request_set_params(acomp_ctx->req, &input, &output, PAGE_SIZE, dlen); | |
1027 | ||
1028 | /* | |
1029 | * it maybe looks a little bit silly that we send an asynchronous request, | |
1030 | * then wait for its completion synchronously. This makes the process look | |
1031 | * synchronous in fact. | |
1032 | * Theoretically, acomp supports users send multiple acomp requests in one | |
1033 | * acomp instance, then get those requests done simultaneously. but in this | |
1034 | * case, zswap actually does store and load page by page, there is no | |
1035 | * existing method to send the second page before the first page is done | |
1036 | * in one thread doing zwap. | |
1037 | * but in different threads running on different cpu, we have different | |
1038 | * acomp instance, so multiple threads can do (de)compression in parallel. | |
1039 | */ | |
55e78c93 | 1040 | comp_ret = crypto_wait_req(crypto_acomp_compress(acomp_ctx->req), &acomp_ctx->wait); |
f91e81d3 | 1041 | dlen = acomp_ctx->req->dlen; |
55e78c93 | 1042 | if (comp_ret) |
f91e81d3 | 1043 | goto unlock; |
f91e81d3 JW |
1044 | |
1045 | zpool = zswap_find_zpool(entry); | |
1046 | gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM; | |
1047 | if (zpool_malloc_support_movable(zpool)) | |
1048 | gfp |= __GFP_HIGHMEM | __GFP_MOVABLE; | |
55e78c93 BS |
1049 | alloc_ret = zpool_malloc(zpool, dlen, gfp, &handle); |
1050 | if (alloc_ret) | |
f91e81d3 | 1051 | goto unlock; |
f91e81d3 JW |
1052 | |
1053 | buf = zpool_map_handle(zpool, handle, ZPOOL_MM_WO); | |
1054 | memcpy(buf, dst, dlen); | |
1055 | zpool_unmap_handle(zpool, handle); | |
1056 | ||
1057 | entry->handle = handle; | |
1058 | entry->length = dlen; | |
1059 | ||
1060 | unlock: | |
55e78c93 BS |
1061 | if (comp_ret == -ENOSPC || alloc_ret == -ENOSPC) |
1062 | zswap_reject_compress_poor++; | |
1063 | else if (comp_ret) | |
1064 | zswap_reject_compress_fail++; | |
1065 | else if (alloc_ret) | |
1066 | zswap_reject_alloc_fail++; | |
1067 | ||
f91e81d3 | 1068 | mutex_unlock(&acomp_ctx->mutex); |
55e78c93 | 1069 | return comp_ret == 0 && alloc_ret == 0; |
f91e81d3 JW |
1070 | } |
1071 | ||
1072 | static void zswap_decompress(struct zswap_entry *entry, struct page *page) | |
1073 | { | |
1074 | struct zpool *zpool = zswap_find_zpool(entry); | |
1075 | struct scatterlist input, output; | |
1076 | struct crypto_acomp_ctx *acomp_ctx; | |
1077 | u8 *src; | |
1078 | ||
1079 | acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx); | |
1080 | mutex_lock(&acomp_ctx->mutex); | |
1081 | ||
1082 | src = zpool_map_handle(zpool, entry->handle, ZPOOL_MM_RO); | |
9c500835 BS |
1083 | /* |
1084 | * If zpool_map_handle is atomic, we cannot reliably utilize its mapped buffer | |
1085 | * to do crypto_acomp_decompress() which might sleep. In such cases, we must | |
1086 | * resort to copying the buffer to a temporary one. | |
1087 | * Meanwhile, zpool_map_handle() might return a non-linearly mapped buffer, | |
1088 | * such as a kmap address of high memory or even ever a vmap address. | |
1089 | * However, sg_init_one is only equipped to handle linearly mapped low memory. | |
1090 | * In such cases, we also must copy the buffer to a temporary and lowmem one. | |
1091 | */ | |
1092 | if ((acomp_ctx->is_sleepable && !zpool_can_sleep_mapped(zpool)) || | |
1093 | !virt_addr_valid(src)) { | |
f91e81d3 JW |
1094 | memcpy(acomp_ctx->buffer, src, entry->length); |
1095 | src = acomp_ctx->buffer; | |
1096 | zpool_unmap_handle(zpool, entry->handle); | |
1097 | } | |
1098 | ||
1099 | sg_init_one(&input, src, entry->length); | |
1100 | sg_init_table(&output, 1); | |
1101 | sg_set_page(&output, page, PAGE_SIZE, 0); | |
1102 | acomp_request_set_params(acomp_ctx->req, &input, &output, entry->length, PAGE_SIZE); | |
1103 | BUG_ON(crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait)); | |
1104 | BUG_ON(acomp_ctx->req->dlen != PAGE_SIZE); | |
1105 | mutex_unlock(&acomp_ctx->mutex); | |
1106 | ||
9c500835 | 1107 | if (src != acomp_ctx->buffer) |
f91e81d3 JW |
1108 | zpool_unmap_handle(zpool, entry->handle); |
1109 | } | |
1110 | ||
9986d35d JW |
1111 | /********************************* |
1112 | * writeback code | |
1113 | **********************************/ | |
1114 | /* | |
1115 | * Attempts to free an entry by adding a folio to the swap cache, | |
1116 | * decompressing the entry data into the folio, and issuing a | |
1117 | * bio write to write the folio back to the swap device. | |
1118 | * | |
1119 | * This can be thought of as a "resumed writeback" of the folio | |
1120 | * to the swap device. We are basically resuming the same swap | |
1121 | * writeback path that was intercepted with the zswap_store() | |
1122 | * in the first place. After the folio has been decompressed into | |
1123 | * the swap cache, the compressed version stored by zswap can be | |
1124 | * freed. | |
1125 | */ | |
1126 | static int zswap_writeback_entry(struct zswap_entry *entry, | |
1127 | swp_entry_t swpentry) | |
1128 | { | |
1129 | struct zswap_tree *tree; | |
1130 | struct folio *folio; | |
1131 | struct mempolicy *mpol; | |
1132 | bool folio_was_allocated; | |
1133 | struct writeback_control wbc = { | |
1134 | .sync_mode = WB_SYNC_NONE, | |
1135 | }; | |
1136 | ||
1137 | /* try to allocate swap cache folio */ | |
1138 | mpol = get_task_policy(current); | |
1139 | folio = __read_swap_cache_async(swpentry, GFP_KERNEL, mpol, | |
1140 | NO_INTERLEAVE_INDEX, &folio_was_allocated, true); | |
1141 | if (!folio) | |
1142 | return -ENOMEM; | |
1143 | ||
1144 | /* | |
1145 | * Found an existing folio, we raced with swapin or concurrent | |
1146 | * shrinker. We generally writeback cold folios from zswap, and | |
1147 | * swapin means the folio just became hot, so skip this folio. | |
1148 | * For unlikely concurrent shrinker case, it will be unlinked | |
1149 | * and freed when invalidated by the concurrent shrinker anyway. | |
1150 | */ | |
1151 | if (!folio_was_allocated) { | |
1152 | folio_put(folio); | |
1153 | return -EEXIST; | |
1154 | } | |
1155 | ||
1156 | /* | |
1157 | * folio is locked, and the swapcache is now secured against | |
f9c0f1c3 CZ |
1158 | * concurrent swapping to and from the slot, and concurrent |
1159 | * swapoff so we can safely dereference the zswap tree here. | |
1160 | * Verify that the swap entry hasn't been invalidated and recycled | |
1161 | * behind our backs, to avoid overwriting a new swap folio with | |
1162 | * old compressed data. Only when this is successful can the entry | |
1163 | * be dereferenced. | |
9986d35d JW |
1164 | */ |
1165 | tree = swap_zswap_tree(swpentry); | |
1166 | spin_lock(&tree->lock); | |
1167 | if (zswap_rb_search(&tree->rbroot, swp_offset(swpentry)) != entry) { | |
1168 | spin_unlock(&tree->lock); | |
1169 | delete_from_swap_cache(folio); | |
1170 | folio_unlock(folio); | |
1171 | folio_put(folio); | |
1172 | return -ENOMEM; | |
1173 | } | |
1174 | ||
1175 | /* Safe to deref entry after the entry is verified above. */ | |
a230c20e | 1176 | zswap_rb_erase(&tree->rbroot, entry); |
9986d35d JW |
1177 | spin_unlock(&tree->lock); |
1178 | ||
1179 | zswap_decompress(entry, &folio->page); | |
1180 | ||
1181 | count_vm_event(ZSWPWB); | |
1182 | if (entry->objcg) | |
1183 | count_objcg_event(entry->objcg, ZSWPWB); | |
1184 | ||
a230c20e | 1185 | zswap_entry_free(entry); |
9986d35d JW |
1186 | |
1187 | /* folio is up to date */ | |
1188 | folio_mark_uptodate(folio); | |
1189 | ||
1190 | /* move it to the tail of the inactive list after end_writeback */ | |
1191 | folio_set_reclaim(folio); | |
1192 | ||
1193 | /* start writeback */ | |
1194 | __swap_writepage(folio, &wbc); | |
1195 | folio_put(folio); | |
1196 | ||
1197 | return 0; | |
1198 | } | |
1199 | ||
b5ba474f NP |
1200 | /********************************* |
1201 | * shrinker functions | |
1202 | **********************************/ | |
1203 | static enum lru_status shrink_memcg_cb(struct list_head *item, struct list_lru_one *l, | |
eb23ee4f JW |
1204 | spinlock_t *lock, void *arg) |
1205 | { | |
1206 | struct zswap_entry *entry = container_of(item, struct zswap_entry, lru); | |
1207 | bool *encountered_page_in_swapcache = (bool *)arg; | |
1208 | swp_entry_t swpentry; | |
1209 | enum lru_status ret = LRU_REMOVED_RETRY; | |
1210 | int writeback_result; | |
1211 | ||
1212 | /* | |
f9c0f1c3 CZ |
1213 | * As soon as we drop the LRU lock, the entry can be freed by |
1214 | * a concurrent invalidation. This means the following: | |
eb23ee4f | 1215 | * |
f9c0f1c3 CZ |
1216 | * 1. We extract the swp_entry_t to the stack, allowing |
1217 | * zswap_writeback_entry() to pin the swap entry and | |
1218 | * then validate the zwap entry against that swap entry's | |
1219 | * tree using pointer value comparison. Only when that | |
1220 | * is successful can the entry be dereferenced. | |
eb23ee4f | 1221 | * |
f9c0f1c3 CZ |
1222 | * 2. Usually, objects are taken off the LRU for reclaim. In |
1223 | * this case this isn't possible, because if reclaim fails | |
1224 | * for whatever reason, we have no means of knowing if the | |
1225 | * entry is alive to put it back on the LRU. | |
eb23ee4f | 1226 | * |
f9c0f1c3 CZ |
1227 | * So rotate it before dropping the lock. If the entry is |
1228 | * written back or invalidated, the free path will unlink | |
1229 | * it. For failures, rotation is the right thing as well. | |
1230 | * | |
1231 | * Temporary failures, where the same entry should be tried | |
1232 | * again immediately, almost never happen for this shrinker. | |
1233 | * We don't do any trylocking; -ENOMEM comes closest, | |
1234 | * but that's extremely rare and doesn't happen spuriously | |
1235 | * either. Don't bother distinguishing this case. | |
eb23ee4f JW |
1236 | */ |
1237 | list_move_tail(item, &l->list); | |
1238 | ||
1239 | /* | |
1240 | * Once the lru lock is dropped, the entry might get freed. The | |
1241 | * swpentry is copied to the stack, and entry isn't deref'd again | |
1242 | * until the entry is verified to still be alive in the tree. | |
1243 | */ | |
1244 | swpentry = entry->swpentry; | |
1245 | ||
1246 | /* | |
1247 | * It's safe to drop the lock here because we return either | |
1248 | * LRU_REMOVED_RETRY or LRU_RETRY. | |
1249 | */ | |
1250 | spin_unlock(lock); | |
1251 | ||
1252 | writeback_result = zswap_writeback_entry(entry, swpentry); | |
1253 | ||
1254 | if (writeback_result) { | |
1255 | zswap_reject_reclaim_fail++; | |
1256 | ret = LRU_RETRY; | |
1257 | ||
1258 | /* | |
1259 | * Encountering a page already in swap cache is a sign that we are shrinking | |
1260 | * into the warmer region. We should terminate shrinking (if we're in the dynamic | |
1261 | * shrinker context). | |
1262 | */ | |
b49547ad CZ |
1263 | if (writeback_result == -EEXIST && encountered_page_in_swapcache) { |
1264 | ret = LRU_STOP; | |
eb23ee4f | 1265 | *encountered_page_in_swapcache = true; |
b49547ad | 1266 | } |
eb23ee4f JW |
1267 | } else { |
1268 | zswap_written_back_pages++; | |
1269 | } | |
1270 | ||
1271 | spin_lock(lock); | |
1272 | return ret; | |
1273 | } | |
b5ba474f NP |
1274 | |
1275 | static unsigned long zswap_shrinker_scan(struct shrinker *shrinker, | |
1276 | struct shrink_control *sc) | |
1277 | { | |
1278 | struct lruvec *lruvec = mem_cgroup_lruvec(sc->memcg, NODE_DATA(sc->nid)); | |
1279 | unsigned long shrink_ret, nr_protected, lru_size; | |
b5ba474f NP |
1280 | bool encountered_page_in_swapcache = false; |
1281 | ||
501a06fe NP |
1282 | if (!zswap_shrinker_enabled || |
1283 | !mem_cgroup_zswap_writeback_enabled(sc->memcg)) { | |
b5ba474f NP |
1284 | sc->nr_scanned = 0; |
1285 | return SHRINK_STOP; | |
1286 | } | |
1287 | ||
1288 | nr_protected = | |
1289 | atomic_long_read(&lruvec->zswap_lruvec_state.nr_zswap_protected); | |
e35606e4 | 1290 | lru_size = list_lru_shrink_count(&zswap_list_lru, sc); |
b5ba474f NP |
1291 | |
1292 | /* | |
1293 | * Abort if we are shrinking into the protected region. | |
1294 | * | |
1295 | * This short-circuiting is necessary because if we have too many multiple | |
1296 | * concurrent reclaimers getting the freeable zswap object counts at the | |
1297 | * same time (before any of them made reasonable progress), the total | |
1298 | * number of reclaimed objects might be more than the number of unprotected | |
1299 | * objects (i.e the reclaimers will reclaim into the protected area of the | |
1300 | * zswap LRU). | |
1301 | */ | |
1302 | if (nr_protected >= lru_size - sc->nr_to_scan) { | |
1303 | sc->nr_scanned = 0; | |
1304 | return SHRINK_STOP; | |
1305 | } | |
1306 | ||
e35606e4 | 1307 | shrink_ret = list_lru_shrink_walk(&zswap_list_lru, sc, &shrink_memcg_cb, |
b5ba474f NP |
1308 | &encountered_page_in_swapcache); |
1309 | ||
1310 | if (encountered_page_in_swapcache) | |
1311 | return SHRINK_STOP; | |
1312 | ||
1313 | return shrink_ret ? shrink_ret : SHRINK_STOP; | |
1314 | } | |
1315 | ||
1316 | static unsigned long zswap_shrinker_count(struct shrinker *shrinker, | |
1317 | struct shrink_control *sc) | |
1318 | { | |
b5ba474f NP |
1319 | struct mem_cgroup *memcg = sc->memcg; |
1320 | struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(sc->nid)); | |
1321 | unsigned long nr_backing, nr_stored, nr_freeable, nr_protected; | |
1322 | ||
501a06fe | 1323 | if (!zswap_shrinker_enabled || !mem_cgroup_zswap_writeback_enabled(memcg)) |
b5ba474f NP |
1324 | return 0; |
1325 | ||
30fb6a8d JW |
1326 | /* |
1327 | * The shrinker resumes swap writeback, which will enter block | |
1328 | * and may enter fs. XXX: Harmonize with vmscan.c __GFP_FS | |
1329 | * rules (may_enter_fs()), which apply on a per-folio basis. | |
1330 | */ | |
1331 | if (!gfp_has_io_fs(sc->gfp_mask)) | |
1332 | return 0; | |
1333 | ||
682886ec JW |
1334 | /* |
1335 | * For memcg, use the cgroup-wide ZSWAP stats since we don't | |
1336 | * have them per-node and thus per-lruvec. Careful if memcg is | |
1337 | * runtime-disabled: we can get sc->memcg == NULL, which is ok | |
1338 | * for the lruvec, but not for memcg_page_state(). | |
1339 | * | |
1340 | * Without memcg, use the zswap pool-wide metrics. | |
1341 | */ | |
1342 | if (!mem_cgroup_disabled()) { | |
1343 | mem_cgroup_flush_stats(memcg); | |
1344 | nr_backing = memcg_page_state(memcg, MEMCG_ZSWAP_B) >> PAGE_SHIFT; | |
1345 | nr_stored = memcg_page_state(memcg, MEMCG_ZSWAPPED); | |
1346 | } else { | |
1347 | nr_backing = zswap_pool_total_size >> PAGE_SHIFT; | |
1348 | nr_stored = atomic_read(&zswap_nr_stored); | |
1349 | } | |
b5ba474f NP |
1350 | |
1351 | if (!nr_stored) | |
1352 | return 0; | |
1353 | ||
1354 | nr_protected = | |
1355 | atomic_long_read(&lruvec->zswap_lruvec_state.nr_zswap_protected); | |
e35606e4 | 1356 | nr_freeable = list_lru_shrink_count(&zswap_list_lru, sc); |
b5ba474f NP |
1357 | /* |
1358 | * Subtract the lru size by an estimate of the number of pages | |
1359 | * that should be protected. | |
1360 | */ | |
1361 | nr_freeable = nr_freeable > nr_protected ? nr_freeable - nr_protected : 0; | |
1362 | ||
1363 | /* | |
1364 | * Scale the number of freeable pages by the memory saving factor. | |
1365 | * This ensures that the better zswap compresses memory, the fewer | |
1366 | * pages we will evict to swap (as it will otherwise incur IO for | |
1367 | * relatively small memory saving). | |
1368 | */ | |
1369 | return mult_frac(nr_freeable, nr_backing, nr_stored); | |
1370 | } | |
1371 | ||
bf9b7df2 | 1372 | static struct shrinker *zswap_alloc_shrinker(void) |
b5ba474f | 1373 | { |
bf9b7df2 CZ |
1374 | struct shrinker *shrinker; |
1375 | ||
1376 | shrinker = | |
b5ba474f | 1377 | shrinker_alloc(SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE, "mm-zswap"); |
bf9b7df2 CZ |
1378 | if (!shrinker) |
1379 | return NULL; | |
b5ba474f | 1380 | |
bf9b7df2 CZ |
1381 | shrinker->scan_objects = zswap_shrinker_scan; |
1382 | shrinker->count_objects = zswap_shrinker_count; | |
1383 | shrinker->batch = 0; | |
1384 | shrinker->seeks = DEFAULT_SEEKS; | |
1385 | return shrinker; | |
b5ba474f NP |
1386 | } |
1387 | ||
a65b0e76 DC |
1388 | static int shrink_memcg(struct mem_cgroup *memcg) |
1389 | { | |
a65b0e76 DC |
1390 | int nid, shrunk = 0; |
1391 | ||
501a06fe NP |
1392 | if (!mem_cgroup_zswap_writeback_enabled(memcg)) |
1393 | return -EINVAL; | |
1394 | ||
a65b0e76 DC |
1395 | /* |
1396 | * Skip zombies because their LRUs are reparented and we would be | |
1397 | * reclaiming from the parent instead of the dead memcg. | |
1398 | */ | |
1399 | if (memcg && !mem_cgroup_online(memcg)) | |
1400 | return -ENOENT; | |
1401 | ||
a65b0e76 DC |
1402 | for_each_node_state(nid, N_NORMAL_MEMORY) { |
1403 | unsigned long nr_to_walk = 1; | |
1404 | ||
e35606e4 | 1405 | shrunk += list_lru_walk_one(&zswap_list_lru, nid, memcg, |
a65b0e76 DC |
1406 | &shrink_memcg_cb, NULL, &nr_to_walk); |
1407 | } | |
a65b0e76 | 1408 | return shrunk ? 0 : -EAGAIN; |
f999f38b DC |
1409 | } |
1410 | ||
45190f01 VW |
1411 | static void shrink_worker(struct work_struct *w) |
1412 | { | |
a65b0e76 | 1413 | struct mem_cgroup *memcg; |
e0228d59 DC |
1414 | int ret, failures = 0; |
1415 | ||
a65b0e76 | 1416 | /* global reclaim will select cgroup in a round-robin fashion. */ |
e0228d59 | 1417 | do { |
e35606e4 CZ |
1418 | spin_lock(&zswap_shrink_lock); |
1419 | zswap_next_shrink = mem_cgroup_iter(NULL, zswap_next_shrink, NULL); | |
1420 | memcg = zswap_next_shrink; | |
a65b0e76 DC |
1421 | |
1422 | /* | |
1423 | * We need to retry if we have gone through a full round trip, or if we | |
1424 | * got an offline memcg (or else we risk undoing the effect of the | |
1425 | * zswap memcg offlining cleanup callback). This is not catastrophic | |
1426 | * per se, but it will keep the now offlined memcg hostage for a while. | |
1427 | * | |
1428 | * Note that if we got an online memcg, we will keep the extra | |
1429 | * reference in case the original reference obtained by mem_cgroup_iter | |
1430 | * is dropped by the zswap memcg offlining callback, ensuring that the | |
1431 | * memcg is not killed when we are reclaiming. | |
1432 | */ | |
1433 | if (!memcg) { | |
e35606e4 | 1434 | spin_unlock(&zswap_shrink_lock); |
a65b0e76 | 1435 | if (++failures == MAX_RECLAIM_RETRIES) |
e0228d59 | 1436 | break; |
a65b0e76 DC |
1437 | |
1438 | goto resched; | |
1439 | } | |
1440 | ||
1441 | if (!mem_cgroup_tryget_online(memcg)) { | |
1442 | /* drop the reference from mem_cgroup_iter() */ | |
1443 | mem_cgroup_iter_break(NULL, memcg); | |
e35606e4 CZ |
1444 | zswap_next_shrink = NULL; |
1445 | spin_unlock(&zswap_shrink_lock); | |
a65b0e76 | 1446 | |
e0228d59 DC |
1447 | if (++failures == MAX_RECLAIM_RETRIES) |
1448 | break; | |
a65b0e76 DC |
1449 | |
1450 | goto resched; | |
e0228d59 | 1451 | } |
e35606e4 | 1452 | spin_unlock(&zswap_shrink_lock); |
a65b0e76 DC |
1453 | |
1454 | ret = shrink_memcg(memcg); | |
1455 | /* drop the extra reference */ | |
1456 | mem_cgroup_put(memcg); | |
1457 | ||
1458 | if (ret == -EINVAL) | |
1459 | break; | |
1460 | if (ret && ++failures == MAX_RECLAIM_RETRIES) | |
1461 | break; | |
1462 | ||
1463 | resched: | |
e0228d59 DC |
1464 | cond_resched(); |
1465 | } while (!zswap_can_accept()); | |
45190f01 VW |
1466 | } |
1467 | ||
a85f878b SD |
1468 | static int zswap_is_page_same_filled(void *ptr, unsigned long *value) |
1469 | { | |
a85f878b | 1470 | unsigned long *page; |
62bf1258 TS |
1471 | unsigned long val; |
1472 | unsigned int pos, last_pos = PAGE_SIZE / sizeof(*page) - 1; | |
a85f878b SD |
1473 | |
1474 | page = (unsigned long *)ptr; | |
62bf1258 TS |
1475 | val = page[0]; |
1476 | ||
1477 | if (val != page[last_pos]) | |
1478 | return 0; | |
1479 | ||
1480 | for (pos = 1; pos < last_pos; pos++) { | |
1481 | if (val != page[pos]) | |
a85f878b SD |
1482 | return 0; |
1483 | } | |
62bf1258 TS |
1484 | |
1485 | *value = val; | |
1486 | ||
a85f878b SD |
1487 | return 1; |
1488 | } | |
1489 | ||
1490 | static void zswap_fill_page(void *ptr, unsigned long value) | |
1491 | { | |
1492 | unsigned long *page; | |
1493 | ||
1494 | page = (unsigned long *)ptr; | |
1495 | memset_l(page, value, PAGE_SIZE / sizeof(unsigned long)); | |
1496 | } | |
1497 | ||
34f4c198 | 1498 | bool zswap_store(struct folio *folio) |
2b281117 | 1499 | { |
3d2c9087 | 1500 | swp_entry_t swp = folio->swap; |
42c06a0e | 1501 | pgoff_t offset = swp_offset(swp); |
44c7c734 | 1502 | struct zswap_tree *tree = swap_zswap_tree(swp); |
2b281117 | 1503 | struct zswap_entry *entry, *dupentry; |
f4840ccf | 1504 | struct obj_cgroup *objcg = NULL; |
a65b0e76 | 1505 | struct mem_cgroup *memcg = NULL; |
42c06a0e | 1506 | |
34f4c198 MWO |
1507 | VM_WARN_ON_ONCE(!folio_test_locked(folio)); |
1508 | VM_WARN_ON_ONCE(!folio_test_swapcache(folio)); | |
2b281117 | 1509 | |
34f4c198 MWO |
1510 | /* Large folios aren't supported */ |
1511 | if (folio_test_large(folio)) | |
42c06a0e | 1512 | return false; |
7ba71669 | 1513 | |
678e54d4 | 1514 | if (!zswap_enabled) |
f576a1e8 | 1515 | goto check_old; |
678e54d4 | 1516 | |
074e3e26 | 1517 | objcg = get_obj_cgroup_from_folio(folio); |
a65b0e76 DC |
1518 | if (objcg && !obj_cgroup_may_zswap(objcg)) { |
1519 | memcg = get_mem_cgroup_from_objcg(objcg); | |
1520 | if (shrink_memcg(memcg)) { | |
1521 | mem_cgroup_put(memcg); | |
1522 | goto reject; | |
1523 | } | |
1524 | mem_cgroup_put(memcg); | |
1525 | } | |
f4840ccf | 1526 | |
2b281117 SJ |
1527 | /* reclaim space if needed */ |
1528 | if (zswap_is_full()) { | |
1529 | zswap_pool_limit_hit++; | |
45190f01 | 1530 | zswap_pool_reached_full = true; |
f4840ccf | 1531 | goto shrink; |
45190f01 | 1532 | } |
16e536ef | 1533 | |
45190f01 | 1534 | if (zswap_pool_reached_full) { |
42c06a0e | 1535 | if (!zswap_can_accept()) |
e0228d59 | 1536 | goto shrink; |
42c06a0e | 1537 | else |
45190f01 | 1538 | zswap_pool_reached_full = false; |
2b281117 SJ |
1539 | } |
1540 | ||
1541 | /* allocate entry */ | |
be7fc97c | 1542 | entry = zswap_entry_cache_alloc(GFP_KERNEL, folio_nid(folio)); |
2b281117 SJ |
1543 | if (!entry) { |
1544 | zswap_reject_kmemcache_fail++; | |
2b281117 SJ |
1545 | goto reject; |
1546 | } | |
1547 | ||
a85f878b | 1548 | if (zswap_same_filled_pages_enabled) { |
be7fc97c JW |
1549 | unsigned long value; |
1550 | u8 *src; | |
1551 | ||
1552 | src = kmap_local_folio(folio, 0); | |
a85f878b | 1553 | if (zswap_is_page_same_filled(src, &value)) { |
003ae2fb | 1554 | kunmap_local(src); |
a85f878b SD |
1555 | entry->length = 0; |
1556 | entry->value = value; | |
1557 | atomic_inc(&zswap_same_filled_pages); | |
1558 | goto insert_entry; | |
1559 | } | |
003ae2fb | 1560 | kunmap_local(src); |
a85f878b SD |
1561 | } |
1562 | ||
42c06a0e | 1563 | if (!zswap_non_same_filled_pages_enabled) |
cb325ddd | 1564 | goto freepage; |
cb325ddd | 1565 | |
f1c54846 DS |
1566 | /* if entry is successfully added, it keeps the reference */ |
1567 | entry->pool = zswap_pool_current_get(); | |
42c06a0e | 1568 | if (!entry->pool) |
f1c54846 | 1569 | goto freepage; |
f1c54846 | 1570 | |
a65b0e76 DC |
1571 | if (objcg) { |
1572 | memcg = get_mem_cgroup_from_objcg(objcg); | |
e35606e4 | 1573 | if (memcg_list_lru_alloc(memcg, &zswap_list_lru, GFP_KERNEL)) { |
a65b0e76 DC |
1574 | mem_cgroup_put(memcg); |
1575 | goto put_pool; | |
1576 | } | |
1577 | mem_cgroup_put(memcg); | |
1578 | } | |
1579 | ||
fa9ad6e2 JW |
1580 | if (!zswap_compress(folio, entry)) |
1581 | goto put_pool; | |
1ec3b5fe | 1582 | |
a85f878b | 1583 | insert_entry: |
be7fc97c | 1584 | entry->swpentry = swp; |
f4840ccf JW |
1585 | entry->objcg = objcg; |
1586 | if (objcg) { | |
1587 | obj_cgroup_charge_zswap(objcg, entry->length); | |
1588 | /* Account before objcg ref is moved to tree */ | |
1589 | count_objcg_event(objcg, ZSWPOUT); | |
1590 | } | |
1591 | ||
2b281117 SJ |
1592 | /* map */ |
1593 | spin_lock(&tree->lock); | |
ca56489c | 1594 | /* |
f576a1e8 CZ |
1595 | * The folio may have been dirtied again, invalidate the |
1596 | * possibly stale entry before inserting the new entry. | |
ca56489c | 1597 | */ |
f576a1e8 | 1598 | if (zswap_rb_insert(&tree->rbroot, entry, &dupentry) == -EEXIST) { |
56c67049 | 1599 | zswap_invalidate_entry(tree, dupentry); |
f576a1e8 | 1600 | WARN_ON(zswap_rb_insert(&tree->rbroot, entry, &dupentry)); |
42c06a0e | 1601 | } |
35499e2b | 1602 | if (entry->length) { |
a65b0e76 | 1603 | INIT_LIST_HEAD(&entry->lru); |
e35606e4 CZ |
1604 | zswap_lru_add(&zswap_list_lru, entry); |
1605 | atomic_inc(&zswap_nr_stored); | |
f999f38b | 1606 | } |
2b281117 SJ |
1607 | spin_unlock(&tree->lock); |
1608 | ||
1609 | /* update stats */ | |
1610 | atomic_inc(&zswap_stored_pages); | |
f1c54846 | 1611 | zswap_update_total_size(); |
f6498b77 | 1612 | count_vm_event(ZSWPOUT); |
2b281117 | 1613 | |
42c06a0e | 1614 | return true; |
2b281117 | 1615 | |
a65b0e76 | 1616 | put_pool: |
f1c54846 DS |
1617 | zswap_pool_put(entry->pool); |
1618 | freepage: | |
2b281117 SJ |
1619 | zswap_entry_cache_free(entry); |
1620 | reject: | |
f4840ccf JW |
1621 | if (objcg) |
1622 | obj_cgroup_put(objcg); | |
f576a1e8 CZ |
1623 | check_old: |
1624 | /* | |
1625 | * If the zswap store fails or zswap is disabled, we must invalidate the | |
1626 | * possibly stale entry which was previously stored at this offset. | |
1627 | * Otherwise, writeback could overwrite the new data in the swapfile. | |
1628 | */ | |
1629 | spin_lock(&tree->lock); | |
1630 | entry = zswap_rb_search(&tree->rbroot, offset); | |
1631 | if (entry) | |
1632 | zswap_invalidate_entry(tree, entry); | |
1633 | spin_unlock(&tree->lock); | |
42c06a0e | 1634 | return false; |
f4840ccf JW |
1635 | |
1636 | shrink: | |
e35606e4 | 1637 | queue_work(shrink_wq, &zswap_shrink_work); |
f4840ccf | 1638 | goto reject; |
2b281117 SJ |
1639 | } |
1640 | ||
ca54f6d8 | 1641 | bool zswap_load(struct folio *folio) |
2b281117 | 1642 | { |
3d2c9087 | 1643 | swp_entry_t swp = folio->swap; |
42c06a0e | 1644 | pgoff_t offset = swp_offset(swp); |
ca54f6d8 | 1645 | struct page *page = &folio->page; |
25cd2414 | 1646 | bool swapcache = folio_test_swapcache(folio); |
44c7c734 | 1647 | struct zswap_tree *tree = swap_zswap_tree(swp); |
2b281117 | 1648 | struct zswap_entry *entry; |
32acba4c | 1649 | u8 *dst; |
42c06a0e | 1650 | |
ca54f6d8 | 1651 | VM_WARN_ON_ONCE(!folio_test_locked(folio)); |
2b281117 | 1652 | |
2b281117 | 1653 | spin_lock(&tree->lock); |
5b297f70 | 1654 | entry = zswap_rb_search(&tree->rbroot, offset); |
2b281117 | 1655 | if (!entry) { |
2b281117 | 1656 | spin_unlock(&tree->lock); |
42c06a0e | 1657 | return false; |
2b281117 | 1658 | } |
25cd2414 JW |
1659 | /* |
1660 | * When reading into the swapcache, invalidate our entry. The | |
1661 | * swapcache can be the authoritative owner of the page and | |
1662 | * its mappings, and the pressure that results from having two | |
1663 | * in-memory copies outweighs any benefits of caching the | |
1664 | * compression work. | |
1665 | * | |
1666 | * (Most swapins go through the swapcache. The notable | |
1667 | * exception is the singleton fault on SWP_SYNCHRONOUS_IO | |
1668 | * files, which reads into a private page and may free it if | |
1669 | * the fault fails. We remain the primary owner of the entry.) | |
1670 | */ | |
1671 | if (swapcache) | |
1672 | zswap_rb_erase(&tree->rbroot, entry); | |
2b281117 SJ |
1673 | spin_unlock(&tree->lock); |
1674 | ||
66447fd0 | 1675 | if (entry->length) |
ff2972aa | 1676 | zswap_decompress(entry, page); |
66447fd0 | 1677 | else { |
003ae2fb | 1678 | dst = kmap_local_page(page); |
a85f878b | 1679 | zswap_fill_page(dst, entry->value); |
003ae2fb | 1680 | kunmap_local(dst); |
a85f878b SD |
1681 | } |
1682 | ||
f6498b77 | 1683 | count_vm_event(ZSWPIN); |
f4840ccf JW |
1684 | if (entry->objcg) |
1685 | count_objcg_event(entry->objcg, ZSWPIN); | |
c75f5c1e | 1686 | |
25cd2414 JW |
1687 | if (swapcache) { |
1688 | zswap_entry_free(entry); | |
1689 | folio_mark_dirty(folio); | |
1690 | } | |
c2e2ba77 | 1691 | |
66447fd0 | 1692 | return true; |
2b281117 SJ |
1693 | } |
1694 | ||
0827a1fb | 1695 | void zswap_invalidate(swp_entry_t swp) |
2b281117 | 1696 | { |
0827a1fb CZ |
1697 | pgoff_t offset = swp_offset(swp); |
1698 | struct zswap_tree *tree = swap_zswap_tree(swp); | |
2b281117 | 1699 | struct zswap_entry *entry; |
2b281117 | 1700 | |
2b281117 SJ |
1701 | spin_lock(&tree->lock); |
1702 | entry = zswap_rb_search(&tree->rbroot, offset); | |
06ed2289 JW |
1703 | if (entry) |
1704 | zswap_invalidate_entry(tree, entry); | |
2b281117 | 1705 | spin_unlock(&tree->lock); |
2b281117 SJ |
1706 | } |
1707 | ||
44c7c734 | 1708 | int zswap_swapon(int type, unsigned long nr_pages) |
42c06a0e | 1709 | { |
44c7c734 CZ |
1710 | struct zswap_tree *trees, *tree; |
1711 | unsigned int nr, i; | |
42c06a0e | 1712 | |
44c7c734 CZ |
1713 | nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES); |
1714 | trees = kvcalloc(nr, sizeof(*tree), GFP_KERNEL); | |
1715 | if (!trees) { | |
42c06a0e | 1716 | pr_err("alloc failed, zswap disabled for swap type %d\n", type); |
bb29fd77 | 1717 | return -ENOMEM; |
42c06a0e JW |
1718 | } |
1719 | ||
44c7c734 CZ |
1720 | for (i = 0; i < nr; i++) { |
1721 | tree = trees + i; | |
1722 | tree->rbroot = RB_ROOT; | |
1723 | spin_lock_init(&tree->lock); | |
1724 | } | |
1725 | ||
1726 | nr_zswap_trees[type] = nr; | |
1727 | zswap_trees[type] = trees; | |
bb29fd77 | 1728 | return 0; |
42c06a0e JW |
1729 | } |
1730 | ||
1731 | void zswap_swapoff(int type) | |
2b281117 | 1732 | { |
44c7c734 CZ |
1733 | struct zswap_tree *trees = zswap_trees[type]; |
1734 | unsigned int i; | |
2b281117 | 1735 | |
44c7c734 | 1736 | if (!trees) |
2b281117 SJ |
1737 | return; |
1738 | ||
83e68f25 YA |
1739 | /* try_to_unuse() invalidated all the entries already */ |
1740 | for (i = 0; i < nr_zswap_trees[type]; i++) | |
1741 | WARN_ON_ONCE(!RB_EMPTY_ROOT(&trees[i].rbroot)); | |
44c7c734 CZ |
1742 | |
1743 | kvfree(trees); | |
1744 | nr_zswap_trees[type] = 0; | |
aa9bca05 | 1745 | zswap_trees[type] = NULL; |
2b281117 SJ |
1746 | } |
1747 | ||
2b281117 SJ |
1748 | /********************************* |
1749 | * debugfs functions | |
1750 | **********************************/ | |
1751 | #ifdef CONFIG_DEBUG_FS | |
1752 | #include <linux/debugfs.h> | |
1753 | ||
1754 | static struct dentry *zswap_debugfs_root; | |
1755 | ||
141fdeec | 1756 | static int zswap_debugfs_init(void) |
2b281117 SJ |
1757 | { |
1758 | if (!debugfs_initialized()) | |
1759 | return -ENODEV; | |
1760 | ||
1761 | zswap_debugfs_root = debugfs_create_dir("zswap", NULL); | |
2b281117 | 1762 | |
0825a6f9 JP |
1763 | debugfs_create_u64("pool_limit_hit", 0444, |
1764 | zswap_debugfs_root, &zswap_pool_limit_hit); | |
1765 | debugfs_create_u64("reject_reclaim_fail", 0444, | |
1766 | zswap_debugfs_root, &zswap_reject_reclaim_fail); | |
1767 | debugfs_create_u64("reject_alloc_fail", 0444, | |
1768 | zswap_debugfs_root, &zswap_reject_alloc_fail); | |
1769 | debugfs_create_u64("reject_kmemcache_fail", 0444, | |
1770 | zswap_debugfs_root, &zswap_reject_kmemcache_fail); | |
cb61dad8 NP |
1771 | debugfs_create_u64("reject_compress_fail", 0444, |
1772 | zswap_debugfs_root, &zswap_reject_compress_fail); | |
0825a6f9 JP |
1773 | debugfs_create_u64("reject_compress_poor", 0444, |
1774 | zswap_debugfs_root, &zswap_reject_compress_poor); | |
1775 | debugfs_create_u64("written_back_pages", 0444, | |
1776 | zswap_debugfs_root, &zswap_written_back_pages); | |
0825a6f9 JP |
1777 | debugfs_create_u64("pool_total_size", 0444, |
1778 | zswap_debugfs_root, &zswap_pool_total_size); | |
1779 | debugfs_create_atomic_t("stored_pages", 0444, | |
1780 | zswap_debugfs_root, &zswap_stored_pages); | |
a85f878b | 1781 | debugfs_create_atomic_t("same_filled_pages", 0444, |
0825a6f9 | 1782 | zswap_debugfs_root, &zswap_same_filled_pages); |
2b281117 SJ |
1783 | |
1784 | return 0; | |
1785 | } | |
2b281117 | 1786 | #else |
141fdeec | 1787 | static int zswap_debugfs_init(void) |
2b281117 SJ |
1788 | { |
1789 | return 0; | |
1790 | } | |
2b281117 SJ |
1791 | #endif |
1792 | ||
1793 | /********************************* | |
1794 | * module init and exit | |
1795 | **********************************/ | |
141fdeec | 1796 | static int zswap_setup(void) |
2b281117 | 1797 | { |
f1c54846 | 1798 | struct zswap_pool *pool; |
ad7ed770 | 1799 | int ret; |
60105e12 | 1800 | |
b7919122 LS |
1801 | zswap_entry_cache = KMEM_CACHE(zswap_entry, 0); |
1802 | if (!zswap_entry_cache) { | |
2b281117 | 1803 | pr_err("entry cache creation failed\n"); |
f1c54846 | 1804 | goto cache_fail; |
2b281117 | 1805 | } |
f1c54846 | 1806 | |
cab7a7e5 SAS |
1807 | ret = cpuhp_setup_state_multi(CPUHP_MM_ZSWP_POOL_PREPARE, |
1808 | "mm/zswap_pool:prepare", | |
1809 | zswap_cpu_comp_prepare, | |
1810 | zswap_cpu_comp_dead); | |
1811 | if (ret) | |
1812 | goto hp_fail; | |
1813 | ||
bf9b7df2 CZ |
1814 | shrink_wq = alloc_workqueue("zswap-shrink", |
1815 | WQ_UNBOUND|WQ_MEM_RECLAIM, 1); | |
1816 | if (!shrink_wq) | |
1817 | goto shrink_wq_fail; | |
1818 | ||
e35606e4 CZ |
1819 | zswap_shrinker = zswap_alloc_shrinker(); |
1820 | if (!zswap_shrinker) | |
bf9b7df2 | 1821 | goto shrinker_fail; |
e35606e4 | 1822 | if (list_lru_init_memcg(&zswap_list_lru, zswap_shrinker)) |
bf9b7df2 | 1823 | goto lru_fail; |
e35606e4 | 1824 | shrinker_register(zswap_shrinker); |
bf9b7df2 | 1825 | |
e35606e4 | 1826 | INIT_WORK(&zswap_shrink_work, shrink_worker); |
bf9b7df2 | 1827 | |
f1c54846 | 1828 | pool = __zswap_pool_create_fallback(); |
ae3d89a7 DS |
1829 | if (pool) { |
1830 | pr_info("loaded using pool %s/%s\n", pool->tfm_name, | |
b8cf32dc | 1831 | zpool_get_type(pool->zpools[0])); |
ae3d89a7 DS |
1832 | list_add(&pool->list, &zswap_pools); |
1833 | zswap_has_pool = true; | |
1834 | } else { | |
f1c54846 | 1835 | pr_err("pool creation failed\n"); |
ae3d89a7 | 1836 | zswap_enabled = false; |
2b281117 | 1837 | } |
60105e12 | 1838 | |
2b281117 SJ |
1839 | if (zswap_debugfs_init()) |
1840 | pr_warn("debugfs initialization failed\n"); | |
9021ccec | 1841 | zswap_init_state = ZSWAP_INIT_SUCCEED; |
2b281117 | 1842 | return 0; |
f1c54846 | 1843 | |
bf9b7df2 | 1844 | lru_fail: |
e35606e4 | 1845 | shrinker_free(zswap_shrinker); |
bf9b7df2 CZ |
1846 | shrinker_fail: |
1847 | destroy_workqueue(shrink_wq); | |
1848 | shrink_wq_fail: | |
1849 | cpuhp_remove_multi_state(CPUHP_MM_ZSWP_POOL_PREPARE); | |
cab7a7e5 | 1850 | hp_fail: |
b7919122 | 1851 | kmem_cache_destroy(zswap_entry_cache); |
f1c54846 | 1852 | cache_fail: |
d7b028f5 | 1853 | /* if built-in, we aren't unloaded on failure; don't allow use */ |
9021ccec | 1854 | zswap_init_state = ZSWAP_INIT_FAILED; |
d7b028f5 | 1855 | zswap_enabled = false; |
2b281117 SJ |
1856 | return -ENOMEM; |
1857 | } | |
141fdeec LS |
1858 | |
1859 | static int __init zswap_init(void) | |
1860 | { | |
1861 | if (!zswap_enabled) | |
1862 | return 0; | |
1863 | return zswap_setup(); | |
1864 | } | |
2b281117 | 1865 | /* must be late so crypto has time to come up */ |
141fdeec | 1866 | late_initcall(zswap_init); |
2b281117 | 1867 | |
68386da8 | 1868 | MODULE_AUTHOR("Seth Jennings <[email protected]>"); |
2b281117 | 1869 | MODULE_DESCRIPTION("Compressed cache for swap pages"); |