]>
Commit | Line | Data |
---|---|---|
c942fddf | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2b281117 SJ |
2 | /* |
3 | * zswap.c - zswap driver file | |
4 | * | |
42c06a0e | 5 | * zswap is a cache that takes pages that are in the process |
2b281117 SJ |
6 | * of being swapped out and attempts to compress and store them in a |
7 | * RAM-based memory pool. This can result in a significant I/O reduction on | |
8 | * the swap device and, in the case where decompressing from RAM is faster | |
9 | * than reading from the swap device, can also improve workload performance. | |
10 | * | |
11 | * Copyright (C) 2012 Seth Jennings <[email protected]> | |
2b281117 SJ |
12 | */ |
13 | ||
14 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
15 | ||
16 | #include <linux/module.h> | |
17 | #include <linux/cpu.h> | |
18 | #include <linux/highmem.h> | |
19 | #include <linux/slab.h> | |
20 | #include <linux/spinlock.h> | |
21 | #include <linux/types.h> | |
22 | #include <linux/atomic.h> | |
2b281117 SJ |
23 | #include <linux/swap.h> |
24 | #include <linux/crypto.h> | |
1ec3b5fe | 25 | #include <linux/scatterlist.h> |
ddc1a5cb | 26 | #include <linux/mempolicy.h> |
2b281117 | 27 | #include <linux/mempool.h> |
12d79d64 | 28 | #include <linux/zpool.h> |
1ec3b5fe | 29 | #include <crypto/acompress.h> |
42c06a0e | 30 | #include <linux/zswap.h> |
2b281117 SJ |
31 | #include <linux/mm_types.h> |
32 | #include <linux/page-flags.h> | |
33 | #include <linux/swapops.h> | |
34 | #include <linux/writeback.h> | |
35 | #include <linux/pagemap.h> | |
45190f01 | 36 | #include <linux/workqueue.h> |
a65b0e76 | 37 | #include <linux/list_lru.h> |
2b281117 | 38 | |
014bb1de | 39 | #include "swap.h" |
e0228d59 | 40 | #include "internal.h" |
014bb1de | 41 | |
2b281117 SJ |
42 | /********************************* |
43 | * statistics | |
44 | **********************************/ | |
2b281117 | 45 | /* The number of compressed pages currently stored in zswap */ |
f6498b77 | 46 | atomic_t zswap_stored_pages = ATOMIC_INIT(0); |
a85f878b SD |
47 | /* The number of same-value filled pages currently stored in zswap */ |
48 | static atomic_t zswap_same_filled_pages = ATOMIC_INIT(0); | |
2b281117 SJ |
49 | |
50 | /* | |
51 | * The statistics below are not protected from concurrent access for | |
52 | * performance reasons so they may not be a 100% accurate. However, | |
53 | * they do provide useful information on roughly how many times a | |
54 | * certain event is occurring. | |
55 | */ | |
56 | ||
57 | /* Pool limit was hit (see zswap_max_pool_percent) */ | |
58 | static u64 zswap_pool_limit_hit; | |
59 | /* Pages written back when pool limit was reached */ | |
60 | static u64 zswap_written_back_pages; | |
61 | /* Store failed due to a reclaim failure after pool limit was reached */ | |
62 | static u64 zswap_reject_reclaim_fail; | |
cb61dad8 NP |
63 | /* Store failed due to compression algorithm failure */ |
64 | static u64 zswap_reject_compress_fail; | |
2b281117 SJ |
65 | /* Compressed page was too big for the allocator to (optimally) store */ |
66 | static u64 zswap_reject_compress_poor; | |
67 | /* Store failed because underlying allocator could not get memory */ | |
68 | static u64 zswap_reject_alloc_fail; | |
69 | /* Store failed because the entry metadata could not be allocated (rare) */ | |
70 | static u64 zswap_reject_kmemcache_fail; | |
2b281117 | 71 | |
45190f01 VW |
72 | /* Shrinker work queue */ |
73 | static struct workqueue_struct *shrink_wq; | |
74 | /* Pool limit was hit, we need to calm down */ | |
75 | static bool zswap_pool_reached_full; | |
76 | ||
2b281117 SJ |
77 | /********************************* |
78 | * tunables | |
79 | **********************************/ | |
c00ed16a | 80 | |
bae21db8 DS |
81 | #define ZSWAP_PARAM_UNSET "" |
82 | ||
141fdeec LS |
83 | static int zswap_setup(void); |
84 | ||
bb8b93b5 MS |
85 | /* Enable/disable zswap */ |
86 | static bool zswap_enabled = IS_ENABLED(CONFIG_ZSWAP_DEFAULT_ON); | |
d7b028f5 DS |
87 | static int zswap_enabled_param_set(const char *, |
88 | const struct kernel_param *); | |
83aed6cd | 89 | static const struct kernel_param_ops zswap_enabled_param_ops = { |
d7b028f5 DS |
90 | .set = zswap_enabled_param_set, |
91 | .get = param_get_bool, | |
92 | }; | |
93 | module_param_cb(enabled, &zswap_enabled_param_ops, &zswap_enabled, 0644); | |
2b281117 | 94 | |
90b0fc26 | 95 | /* Crypto compressor to use */ |
bb8b93b5 | 96 | static char *zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT; |
90b0fc26 DS |
97 | static int zswap_compressor_param_set(const char *, |
98 | const struct kernel_param *); | |
83aed6cd | 99 | static const struct kernel_param_ops zswap_compressor_param_ops = { |
90b0fc26 | 100 | .set = zswap_compressor_param_set, |
c99b42c3 DS |
101 | .get = param_get_charp, |
102 | .free = param_free_charp, | |
90b0fc26 DS |
103 | }; |
104 | module_param_cb(compressor, &zswap_compressor_param_ops, | |
c99b42c3 | 105 | &zswap_compressor, 0644); |
2b281117 | 106 | |
90b0fc26 | 107 | /* Compressed storage zpool to use */ |
bb8b93b5 | 108 | static char *zswap_zpool_type = CONFIG_ZSWAP_ZPOOL_DEFAULT; |
90b0fc26 | 109 | static int zswap_zpool_param_set(const char *, const struct kernel_param *); |
83aed6cd | 110 | static const struct kernel_param_ops zswap_zpool_param_ops = { |
c99b42c3 DS |
111 | .set = zswap_zpool_param_set, |
112 | .get = param_get_charp, | |
113 | .free = param_free_charp, | |
90b0fc26 | 114 | }; |
c99b42c3 | 115 | module_param_cb(zpool, &zswap_zpool_param_ops, &zswap_zpool_type, 0644); |
12d79d64 | 116 | |
90b0fc26 DS |
117 | /* The maximum percentage of memory that the compressed pool can occupy */ |
118 | static unsigned int zswap_max_pool_percent = 20; | |
119 | module_param_named(max_pool_percent, zswap_max_pool_percent, uint, 0644); | |
60105e12 | 120 | |
45190f01 VW |
121 | /* The threshold for accepting new pages after the max_pool_percent was hit */ |
122 | static unsigned int zswap_accept_thr_percent = 90; /* of max pool size */ | |
123 | module_param_named(accept_threshold_percent, zswap_accept_thr_percent, | |
124 | uint, 0644); | |
125 | ||
b8cf32dc YA |
126 | /* Number of zpools in zswap_pool (empirically determined for scalability) */ |
127 | #define ZSWAP_NR_ZPOOLS 32 | |
128 | ||
b5ba474f NP |
129 | /* Enable/disable memory pressure-based shrinker. */ |
130 | static bool zswap_shrinker_enabled = IS_ENABLED( | |
131 | CONFIG_ZSWAP_SHRINKER_DEFAULT_ON); | |
132 | module_param_named(shrinker_enabled, zswap_shrinker_enabled, bool, 0644); | |
133 | ||
501a06fe NP |
134 | bool is_zswap_enabled(void) |
135 | { | |
136 | return zswap_enabled; | |
137 | } | |
138 | ||
2b281117 | 139 | /********************************* |
f1c54846 | 140 | * data structures |
2b281117 | 141 | **********************************/ |
2b281117 | 142 | |
1ec3b5fe BS |
143 | struct crypto_acomp_ctx { |
144 | struct crypto_acomp *acomp; | |
145 | struct acomp_req *req; | |
146 | struct crypto_wait wait; | |
8ba2f844 CZ |
147 | u8 *buffer; |
148 | struct mutex mutex; | |
270700dd | 149 | bool is_sleepable; |
1ec3b5fe BS |
150 | }; |
151 | ||
f999f38b DC |
152 | /* |
153 | * The lock ordering is zswap_tree.lock -> zswap_pool.lru_lock. | |
154 | * The only case where lru_lock is not acquired while holding tree.lock is | |
155 | * when a zswap_entry is taken off the lru for writeback, in that case it | |
156 | * needs to be verified that it's still valid in the tree. | |
157 | */ | |
f1c54846 | 158 | struct zswap_pool { |
b8cf32dc | 159 | struct zpool *zpools[ZSWAP_NR_ZPOOLS]; |
1ec3b5fe | 160 | struct crypto_acomp_ctx __percpu *acomp_ctx; |
94ace3fe | 161 | struct percpu_ref ref; |
f1c54846 | 162 | struct list_head list; |
45190f01 | 163 | struct work_struct release_work; |
cab7a7e5 | 164 | struct hlist_node node; |
f1c54846 | 165 | char tfm_name[CRYPTO_MAX_ALG_NAME]; |
bf9b7df2 CZ |
166 | }; |
167 | ||
e35606e4 CZ |
168 | /* Global LRU lists shared by all zswap pools. */ |
169 | static struct list_lru zswap_list_lru; | |
e35606e4 CZ |
170 | |
171 | /* The lock protects zswap_next_shrink updates. */ | |
172 | static DEFINE_SPINLOCK(zswap_shrink_lock); | |
173 | static struct mem_cgroup *zswap_next_shrink; | |
174 | static struct work_struct zswap_shrink_work; | |
175 | static struct shrinker *zswap_shrinker; | |
2b281117 | 176 | |
2b281117 SJ |
177 | /* |
178 | * struct zswap_entry | |
179 | * | |
180 | * This structure contains the metadata for tracking a single compressed | |
181 | * page within zswap. | |
182 | * | |
97157d89 | 183 | * swpentry - associated swap entry, the offset indexes into the red-black tree |
2b281117 | 184 | * length - the length in bytes of the compressed page data. Needed during |
f999f38b DC |
185 | * decompression. For a same value filled page length is 0, and both |
186 | * pool and lru are invalid and must be ignored. | |
f1c54846 DS |
187 | * pool - the zswap_pool the entry's data is in |
188 | * handle - zpool allocation handle that stores the compressed page data | |
a85f878b | 189 | * value - value of the same-value filled pages which have same content |
97157d89 | 190 | * objcg - the obj_cgroup that the compressed memory is charged to |
f999f38b | 191 | * lru - handle to the pool's lru used to evict pages. |
2b281117 SJ |
192 | */ |
193 | struct zswap_entry { | |
0bb48849 | 194 | swp_entry_t swpentry; |
2b281117 | 195 | unsigned int length; |
f1c54846 | 196 | struct zswap_pool *pool; |
a85f878b SD |
197 | union { |
198 | unsigned long handle; | |
199 | unsigned long value; | |
200 | }; | |
f4840ccf | 201 | struct obj_cgroup *objcg; |
f999f38b | 202 | struct list_head lru; |
2b281117 SJ |
203 | }; |
204 | ||
796c2c23 | 205 | static struct xarray *zswap_trees[MAX_SWAPFILES]; |
44c7c734 | 206 | static unsigned int nr_zswap_trees[MAX_SWAPFILES]; |
2b281117 | 207 | |
f1c54846 DS |
208 | /* RCU-protected iteration */ |
209 | static LIST_HEAD(zswap_pools); | |
210 | /* protects zswap_pools list modification */ | |
211 | static DEFINE_SPINLOCK(zswap_pools_lock); | |
32a4e169 DS |
212 | /* pool counter to provide unique names to zpool */ |
213 | static atomic_t zswap_pools_count = ATOMIC_INIT(0); | |
f1c54846 | 214 | |
9021ccec LS |
215 | enum zswap_init_type { |
216 | ZSWAP_UNINIT, | |
217 | ZSWAP_INIT_SUCCEED, | |
218 | ZSWAP_INIT_FAILED | |
219 | }; | |
90b0fc26 | 220 | |
9021ccec | 221 | static enum zswap_init_type zswap_init_state; |
90b0fc26 | 222 | |
141fdeec LS |
223 | /* used to ensure the integrity of initialization */ |
224 | static DEFINE_MUTEX(zswap_init_lock); | |
d7b028f5 | 225 | |
ae3d89a7 DS |
226 | /* init completed, but couldn't create the initial pool */ |
227 | static bool zswap_has_pool; | |
228 | ||
f1c54846 DS |
229 | /********************************* |
230 | * helpers and fwd declarations | |
231 | **********************************/ | |
232 | ||
796c2c23 | 233 | static inline struct xarray *swap_zswap_tree(swp_entry_t swp) |
44c7c734 CZ |
234 | { |
235 | return &zswap_trees[swp_type(swp)][swp_offset(swp) | |
236 | >> SWAP_ADDRESS_SPACE_SHIFT]; | |
237 | } | |
238 | ||
f1c54846 DS |
239 | #define zswap_pool_debug(msg, p) \ |
240 | pr_debug("%s pool %s/%s\n", msg, (p)->tfm_name, \ | |
b8cf32dc | 241 | zpool_get_type((p)->zpools[0])) |
f1c54846 | 242 | |
a984649b JW |
243 | /********************************* |
244 | * pool functions | |
245 | **********************************/ | |
94ace3fe | 246 | static void __zswap_pool_empty(struct percpu_ref *ref); |
a984649b | 247 | |
a984649b JW |
248 | static struct zswap_pool *zswap_pool_create(char *type, char *compressor) |
249 | { | |
250 | int i; | |
251 | struct zswap_pool *pool; | |
252 | char name[38]; /* 'zswap' + 32 char (max) num + \0 */ | |
253 | gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM; | |
254 | int ret; | |
255 | ||
256 | if (!zswap_has_pool) { | |
257 | /* if either are unset, pool initialization failed, and we | |
258 | * need both params to be set correctly before trying to | |
259 | * create a pool. | |
260 | */ | |
261 | if (!strcmp(type, ZSWAP_PARAM_UNSET)) | |
262 | return NULL; | |
263 | if (!strcmp(compressor, ZSWAP_PARAM_UNSET)) | |
264 | return NULL; | |
265 | } | |
266 | ||
267 | pool = kzalloc(sizeof(*pool), GFP_KERNEL); | |
268 | if (!pool) | |
269 | return NULL; | |
270 | ||
271 | for (i = 0; i < ZSWAP_NR_ZPOOLS; i++) { | |
272 | /* unique name for each pool specifically required by zsmalloc */ | |
273 | snprintf(name, 38, "zswap%x", | |
274 | atomic_inc_return(&zswap_pools_count)); | |
275 | ||
276 | pool->zpools[i] = zpool_create_pool(type, name, gfp); | |
277 | if (!pool->zpools[i]) { | |
278 | pr_err("%s zpool not available\n", type); | |
279 | goto error; | |
280 | } | |
281 | } | |
282 | pr_debug("using %s zpool\n", zpool_get_type(pool->zpools[0])); | |
283 | ||
284 | strscpy(pool->tfm_name, compressor, sizeof(pool->tfm_name)); | |
285 | ||
286 | pool->acomp_ctx = alloc_percpu(*pool->acomp_ctx); | |
287 | if (!pool->acomp_ctx) { | |
288 | pr_err("percpu alloc failed\n"); | |
289 | goto error; | |
290 | } | |
291 | ||
292 | ret = cpuhp_state_add_instance(CPUHP_MM_ZSWP_POOL_PREPARE, | |
293 | &pool->node); | |
294 | if (ret) | |
295 | goto error; | |
296 | ||
a984649b JW |
297 | /* being the current pool takes 1 ref; this func expects the |
298 | * caller to always add the new pool as the current pool | |
299 | */ | |
94ace3fe CZ |
300 | ret = percpu_ref_init(&pool->ref, __zswap_pool_empty, |
301 | PERCPU_REF_ALLOW_REINIT, GFP_KERNEL); | |
302 | if (ret) | |
303 | goto ref_fail; | |
a984649b | 304 | INIT_LIST_HEAD(&pool->list); |
a984649b JW |
305 | |
306 | zswap_pool_debug("created", pool); | |
307 | ||
308 | return pool; | |
309 | ||
94ace3fe CZ |
310 | ref_fail: |
311 | cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node); | |
a984649b JW |
312 | error: |
313 | if (pool->acomp_ctx) | |
314 | free_percpu(pool->acomp_ctx); | |
315 | while (i--) | |
316 | zpool_destroy_pool(pool->zpools[i]); | |
317 | kfree(pool); | |
318 | return NULL; | |
319 | } | |
320 | ||
321 | static struct zswap_pool *__zswap_pool_create_fallback(void) | |
322 | { | |
323 | bool has_comp, has_zpool; | |
324 | ||
325 | has_comp = crypto_has_acomp(zswap_compressor, 0, 0); | |
326 | if (!has_comp && strcmp(zswap_compressor, | |
327 | CONFIG_ZSWAP_COMPRESSOR_DEFAULT)) { | |
328 | pr_err("compressor %s not available, using default %s\n", | |
329 | zswap_compressor, CONFIG_ZSWAP_COMPRESSOR_DEFAULT); | |
330 | param_free_charp(&zswap_compressor); | |
331 | zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT; | |
332 | has_comp = crypto_has_acomp(zswap_compressor, 0, 0); | |
333 | } | |
334 | if (!has_comp) { | |
335 | pr_err("default compressor %s not available\n", | |
336 | zswap_compressor); | |
337 | param_free_charp(&zswap_compressor); | |
338 | zswap_compressor = ZSWAP_PARAM_UNSET; | |
339 | } | |
340 | ||
341 | has_zpool = zpool_has_pool(zswap_zpool_type); | |
342 | if (!has_zpool && strcmp(zswap_zpool_type, | |
343 | CONFIG_ZSWAP_ZPOOL_DEFAULT)) { | |
344 | pr_err("zpool %s not available, using default %s\n", | |
345 | zswap_zpool_type, CONFIG_ZSWAP_ZPOOL_DEFAULT); | |
346 | param_free_charp(&zswap_zpool_type); | |
347 | zswap_zpool_type = CONFIG_ZSWAP_ZPOOL_DEFAULT; | |
348 | has_zpool = zpool_has_pool(zswap_zpool_type); | |
349 | } | |
350 | if (!has_zpool) { | |
351 | pr_err("default zpool %s not available\n", | |
352 | zswap_zpool_type); | |
353 | param_free_charp(&zswap_zpool_type); | |
354 | zswap_zpool_type = ZSWAP_PARAM_UNSET; | |
355 | } | |
356 | ||
357 | if (!has_comp || !has_zpool) | |
358 | return NULL; | |
359 | ||
360 | return zswap_pool_create(zswap_zpool_type, zswap_compressor); | |
361 | } | |
362 | ||
363 | static void zswap_pool_destroy(struct zswap_pool *pool) | |
364 | { | |
365 | int i; | |
366 | ||
367 | zswap_pool_debug("destroying", pool); | |
368 | ||
a984649b JW |
369 | cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node); |
370 | free_percpu(pool->acomp_ctx); | |
a984649b JW |
371 | |
372 | for (i = 0; i < ZSWAP_NR_ZPOOLS; i++) | |
373 | zpool_destroy_pool(pool->zpools[i]); | |
374 | kfree(pool); | |
375 | } | |
376 | ||
39f3ec8e JW |
377 | static void __zswap_pool_release(struct work_struct *work) |
378 | { | |
379 | struct zswap_pool *pool = container_of(work, typeof(*pool), | |
380 | release_work); | |
381 | ||
382 | synchronize_rcu(); | |
383 | ||
94ace3fe CZ |
384 | /* nobody should have been able to get a ref... */ |
385 | WARN_ON(!percpu_ref_is_zero(&pool->ref)); | |
386 | percpu_ref_exit(&pool->ref); | |
39f3ec8e JW |
387 | |
388 | /* pool is now off zswap_pools list and has no references. */ | |
389 | zswap_pool_destroy(pool); | |
390 | } | |
391 | ||
392 | static struct zswap_pool *zswap_pool_current(void); | |
393 | ||
94ace3fe | 394 | static void __zswap_pool_empty(struct percpu_ref *ref) |
39f3ec8e JW |
395 | { |
396 | struct zswap_pool *pool; | |
397 | ||
94ace3fe | 398 | pool = container_of(ref, typeof(*pool), ref); |
39f3ec8e | 399 | |
94ace3fe | 400 | spin_lock_bh(&zswap_pools_lock); |
39f3ec8e JW |
401 | |
402 | WARN_ON(pool == zswap_pool_current()); | |
403 | ||
404 | list_del_rcu(&pool->list); | |
405 | ||
406 | INIT_WORK(&pool->release_work, __zswap_pool_release); | |
407 | schedule_work(&pool->release_work); | |
408 | ||
94ace3fe | 409 | spin_unlock_bh(&zswap_pools_lock); |
39f3ec8e JW |
410 | } |
411 | ||
412 | static int __must_check zswap_pool_get(struct zswap_pool *pool) | |
413 | { | |
414 | if (!pool) | |
415 | return 0; | |
416 | ||
94ace3fe | 417 | return percpu_ref_tryget(&pool->ref); |
39f3ec8e JW |
418 | } |
419 | ||
420 | static void zswap_pool_put(struct zswap_pool *pool) | |
421 | { | |
94ace3fe | 422 | percpu_ref_put(&pool->ref); |
39f3ec8e JW |
423 | } |
424 | ||
c1a0ecb8 JW |
425 | static struct zswap_pool *__zswap_pool_current(void) |
426 | { | |
427 | struct zswap_pool *pool; | |
428 | ||
429 | pool = list_first_or_null_rcu(&zswap_pools, typeof(*pool), list); | |
430 | WARN_ONCE(!pool && zswap_has_pool, | |
431 | "%s: no page storage pool!\n", __func__); | |
432 | ||
433 | return pool; | |
434 | } | |
435 | ||
436 | static struct zswap_pool *zswap_pool_current(void) | |
437 | { | |
438 | assert_spin_locked(&zswap_pools_lock); | |
439 | ||
440 | return __zswap_pool_current(); | |
441 | } | |
442 | ||
443 | static struct zswap_pool *zswap_pool_current_get(void) | |
444 | { | |
445 | struct zswap_pool *pool; | |
446 | ||
447 | rcu_read_lock(); | |
448 | ||
449 | pool = __zswap_pool_current(); | |
450 | if (!zswap_pool_get(pool)) | |
451 | pool = NULL; | |
452 | ||
453 | rcu_read_unlock(); | |
454 | ||
455 | return pool; | |
456 | } | |
457 | ||
c1a0ecb8 JW |
458 | /* type and compressor must be null-terminated */ |
459 | static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor) | |
460 | { | |
461 | struct zswap_pool *pool; | |
462 | ||
463 | assert_spin_locked(&zswap_pools_lock); | |
464 | ||
465 | list_for_each_entry_rcu(pool, &zswap_pools, list) { | |
466 | if (strcmp(pool->tfm_name, compressor)) | |
467 | continue; | |
468 | /* all zpools share the same type */ | |
469 | if (strcmp(zpool_get_type(pool->zpools[0]), type)) | |
470 | continue; | |
471 | /* if we can't get it, it's about to be destroyed */ | |
472 | if (!zswap_pool_get(pool)) | |
473 | continue; | |
474 | return pool; | |
475 | } | |
476 | ||
477 | return NULL; | |
478 | } | |
479 | ||
91cdcd8d JW |
480 | static unsigned long zswap_max_pages(void) |
481 | { | |
482 | return totalram_pages() * zswap_max_pool_percent / 100; | |
483 | } | |
484 | ||
485 | static unsigned long zswap_accept_thr_pages(void) | |
486 | { | |
487 | return zswap_max_pages() * zswap_accept_thr_percent / 100; | |
488 | } | |
489 | ||
490 | unsigned long zswap_total_pages(void) | |
491 | { | |
492 | struct zswap_pool *pool; | |
4196b48d | 493 | unsigned long total = 0; |
91cdcd8d JW |
494 | |
495 | rcu_read_lock(); | |
496 | list_for_each_entry_rcu(pool, &zswap_pools, list) { | |
497 | int i; | |
498 | ||
499 | for (i = 0; i < ZSWAP_NR_ZPOOLS; i++) | |
4196b48d | 500 | total += zpool_get_total_pages(pool->zpools[i]); |
91cdcd8d JW |
501 | } |
502 | rcu_read_unlock(); | |
503 | ||
4196b48d | 504 | return total; |
91cdcd8d JW |
505 | } |
506 | ||
82e0f8e4 YA |
507 | static bool zswap_check_limits(void) |
508 | { | |
509 | unsigned long cur_pages = zswap_total_pages(); | |
510 | unsigned long max_pages = zswap_max_pages(); | |
511 | ||
512 | if (cur_pages >= max_pages) { | |
513 | zswap_pool_limit_hit++; | |
514 | zswap_pool_reached_full = true; | |
515 | } else if (zswap_pool_reached_full && | |
516 | cur_pages <= zswap_accept_thr_pages()) { | |
517 | zswap_pool_reached_full = false; | |
518 | } | |
519 | return zswap_pool_reached_full; | |
520 | } | |
521 | ||
abca07c0 JW |
522 | /********************************* |
523 | * param callbacks | |
524 | **********************************/ | |
525 | ||
526 | static bool zswap_pool_changed(const char *s, const struct kernel_param *kp) | |
527 | { | |
528 | /* no change required */ | |
529 | if (!strcmp(s, *(char **)kp->arg) && zswap_has_pool) | |
530 | return false; | |
531 | return true; | |
532 | } | |
533 | ||
534 | /* val must be a null-terminated string */ | |
535 | static int __zswap_param_set(const char *val, const struct kernel_param *kp, | |
536 | char *type, char *compressor) | |
537 | { | |
538 | struct zswap_pool *pool, *put_pool = NULL; | |
539 | char *s = strstrip((char *)val); | |
540 | int ret = 0; | |
541 | bool new_pool = false; | |
542 | ||
543 | mutex_lock(&zswap_init_lock); | |
544 | switch (zswap_init_state) { | |
545 | case ZSWAP_UNINIT: | |
546 | /* if this is load-time (pre-init) param setting, | |
547 | * don't create a pool; that's done during init. | |
548 | */ | |
549 | ret = param_set_charp(s, kp); | |
550 | break; | |
551 | case ZSWAP_INIT_SUCCEED: | |
552 | new_pool = zswap_pool_changed(s, kp); | |
553 | break; | |
554 | case ZSWAP_INIT_FAILED: | |
555 | pr_err("can't set param, initialization failed\n"); | |
556 | ret = -ENODEV; | |
557 | } | |
558 | mutex_unlock(&zswap_init_lock); | |
559 | ||
560 | /* no need to create a new pool, return directly */ | |
561 | if (!new_pool) | |
562 | return ret; | |
563 | ||
564 | if (!type) { | |
565 | if (!zpool_has_pool(s)) { | |
566 | pr_err("zpool %s not available\n", s); | |
567 | return -ENOENT; | |
568 | } | |
569 | type = s; | |
570 | } else if (!compressor) { | |
571 | if (!crypto_has_acomp(s, 0, 0)) { | |
572 | pr_err("compressor %s not available\n", s); | |
573 | return -ENOENT; | |
574 | } | |
575 | compressor = s; | |
576 | } else { | |
577 | WARN_ON(1); | |
578 | return -EINVAL; | |
579 | } | |
580 | ||
94ace3fe | 581 | spin_lock_bh(&zswap_pools_lock); |
abca07c0 JW |
582 | |
583 | pool = zswap_pool_find_get(type, compressor); | |
584 | if (pool) { | |
585 | zswap_pool_debug("using existing", pool); | |
586 | WARN_ON(pool == zswap_pool_current()); | |
587 | list_del_rcu(&pool->list); | |
588 | } | |
589 | ||
94ace3fe | 590 | spin_unlock_bh(&zswap_pools_lock); |
abca07c0 JW |
591 | |
592 | if (!pool) | |
593 | pool = zswap_pool_create(type, compressor); | |
94ace3fe CZ |
594 | else { |
595 | /* | |
596 | * Restore the initial ref dropped by percpu_ref_kill() | |
597 | * when the pool was decommissioned and switch it again | |
598 | * to percpu mode. | |
599 | */ | |
600 | percpu_ref_resurrect(&pool->ref); | |
601 | ||
602 | /* Drop the ref from zswap_pool_find_get(). */ | |
603 | zswap_pool_put(pool); | |
604 | } | |
abca07c0 JW |
605 | |
606 | if (pool) | |
607 | ret = param_set_charp(s, kp); | |
608 | else | |
609 | ret = -EINVAL; | |
610 | ||
94ace3fe | 611 | spin_lock_bh(&zswap_pools_lock); |
abca07c0 JW |
612 | |
613 | if (!ret) { | |
614 | put_pool = zswap_pool_current(); | |
615 | list_add_rcu(&pool->list, &zswap_pools); | |
616 | zswap_has_pool = true; | |
617 | } else if (pool) { | |
618 | /* add the possibly pre-existing pool to the end of the pools | |
619 | * list; if it's new (and empty) then it'll be removed and | |
620 | * destroyed by the put after we drop the lock | |
621 | */ | |
622 | list_add_tail_rcu(&pool->list, &zswap_pools); | |
623 | put_pool = pool; | |
624 | } | |
625 | ||
94ace3fe | 626 | spin_unlock_bh(&zswap_pools_lock); |
abca07c0 JW |
627 | |
628 | if (!zswap_has_pool && !pool) { | |
629 | /* if initial pool creation failed, and this pool creation also | |
630 | * failed, maybe both compressor and zpool params were bad. | |
631 | * Allow changing this param, so pool creation will succeed | |
632 | * when the other param is changed. We already verified this | |
633 | * param is ok in the zpool_has_pool() or crypto_has_acomp() | |
634 | * checks above. | |
635 | */ | |
636 | ret = param_set_charp(s, kp); | |
637 | } | |
638 | ||
639 | /* drop the ref from either the old current pool, | |
640 | * or the new pool we failed to add | |
641 | */ | |
642 | if (put_pool) | |
94ace3fe | 643 | percpu_ref_kill(&put_pool->ref); |
abca07c0 JW |
644 | |
645 | return ret; | |
646 | } | |
647 | ||
648 | static int zswap_compressor_param_set(const char *val, | |
649 | const struct kernel_param *kp) | |
650 | { | |
651 | return __zswap_param_set(val, kp, zswap_zpool_type, NULL); | |
652 | } | |
653 | ||
654 | static int zswap_zpool_param_set(const char *val, | |
655 | const struct kernel_param *kp) | |
656 | { | |
657 | return __zswap_param_set(val, kp, NULL, zswap_compressor); | |
658 | } | |
659 | ||
660 | static int zswap_enabled_param_set(const char *val, | |
661 | const struct kernel_param *kp) | |
662 | { | |
663 | int ret = -ENODEV; | |
664 | ||
665 | /* if this is load-time (pre-init) param setting, only set param. */ | |
666 | if (system_state != SYSTEM_RUNNING) | |
667 | return param_set_bool(val, kp); | |
668 | ||
669 | mutex_lock(&zswap_init_lock); | |
670 | switch (zswap_init_state) { | |
671 | case ZSWAP_UNINIT: | |
672 | if (zswap_setup()) | |
673 | break; | |
674 | fallthrough; | |
675 | case ZSWAP_INIT_SUCCEED: | |
676 | if (!zswap_has_pool) | |
677 | pr_err("can't enable, no pool configured\n"); | |
678 | else | |
679 | ret = param_set_bool(val, kp); | |
680 | break; | |
681 | case ZSWAP_INIT_FAILED: | |
682 | pr_err("can't enable, initialization failed\n"); | |
683 | } | |
684 | mutex_unlock(&zswap_init_lock); | |
685 | ||
686 | return ret; | |
687 | } | |
688 | ||
506a86c5 JW |
689 | /********************************* |
690 | * lru functions | |
691 | **********************************/ | |
692 | ||
a65b0e76 DC |
693 | /* should be called under RCU */ |
694 | #ifdef CONFIG_MEMCG | |
695 | static inline struct mem_cgroup *mem_cgroup_from_entry(struct zswap_entry *entry) | |
696 | { | |
697 | return entry->objcg ? obj_cgroup_memcg(entry->objcg) : NULL; | |
698 | } | |
699 | #else | |
700 | static inline struct mem_cgroup *mem_cgroup_from_entry(struct zswap_entry *entry) | |
701 | { | |
702 | return NULL; | |
703 | } | |
704 | #endif | |
705 | ||
706 | static inline int entry_to_nid(struct zswap_entry *entry) | |
707 | { | |
708 | return page_to_nid(virt_to_page(entry)); | |
709 | } | |
710 | ||
a65b0e76 DC |
711 | static void zswap_lru_add(struct list_lru *list_lru, struct zswap_entry *entry) |
712 | { | |
b5ba474f NP |
713 | atomic_long_t *nr_zswap_protected; |
714 | unsigned long lru_size, old, new; | |
a65b0e76 DC |
715 | int nid = entry_to_nid(entry); |
716 | struct mem_cgroup *memcg; | |
b5ba474f | 717 | struct lruvec *lruvec; |
a65b0e76 DC |
718 | |
719 | /* | |
720 | * Note that it is safe to use rcu_read_lock() here, even in the face of | |
721 | * concurrent memcg offlining. Thanks to the memcg->kmemcg_id indirection | |
722 | * used in list_lru lookup, only two scenarios are possible: | |
723 | * | |
724 | * 1. list_lru_add() is called before memcg->kmemcg_id is updated. The | |
725 | * new entry will be reparented to memcg's parent's list_lru. | |
726 | * 2. list_lru_add() is called after memcg->kmemcg_id is updated. The | |
727 | * new entry will be added directly to memcg's parent's list_lru. | |
728 | * | |
3f798aa6 | 729 | * Similar reasoning holds for list_lru_del(). |
a65b0e76 DC |
730 | */ |
731 | rcu_read_lock(); | |
732 | memcg = mem_cgroup_from_entry(entry); | |
733 | /* will always succeed */ | |
734 | list_lru_add(list_lru, &entry->lru, nid, memcg); | |
b5ba474f NP |
735 | |
736 | /* Update the protection area */ | |
737 | lru_size = list_lru_count_one(list_lru, nid, memcg); | |
738 | lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid)); | |
739 | nr_zswap_protected = &lruvec->zswap_lruvec_state.nr_zswap_protected; | |
740 | old = atomic_long_inc_return(nr_zswap_protected); | |
741 | /* | |
742 | * Decay to avoid overflow and adapt to changing workloads. | |
743 | * This is based on LRU reclaim cost decaying heuristics. | |
744 | */ | |
745 | do { | |
746 | new = old > lru_size / 4 ? old / 2 : old; | |
747 | } while (!atomic_long_try_cmpxchg(nr_zswap_protected, &old, new)); | |
a65b0e76 DC |
748 | rcu_read_unlock(); |
749 | } | |
750 | ||
751 | static void zswap_lru_del(struct list_lru *list_lru, struct zswap_entry *entry) | |
752 | { | |
753 | int nid = entry_to_nid(entry); | |
754 | struct mem_cgroup *memcg; | |
755 | ||
756 | rcu_read_lock(); | |
757 | memcg = mem_cgroup_from_entry(entry); | |
758 | /* will always succeed */ | |
759 | list_lru_del(list_lru, &entry->lru, nid, memcg); | |
760 | rcu_read_unlock(); | |
761 | } | |
762 | ||
5182661a JW |
763 | void zswap_lruvec_state_init(struct lruvec *lruvec) |
764 | { | |
765 | atomic_long_set(&lruvec->zswap_lruvec_state.nr_zswap_protected, 0); | |
766 | } | |
767 | ||
768 | void zswap_folio_swapin(struct folio *folio) | |
769 | { | |
770 | struct lruvec *lruvec; | |
771 | ||
772 | if (folio) { | |
773 | lruvec = folio_lruvec(folio); | |
774 | atomic_long_inc(&lruvec->zswap_lruvec_state.nr_zswap_protected); | |
775 | } | |
776 | } | |
777 | ||
778 | void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg) | |
779 | { | |
bf9b7df2 | 780 | /* lock out zswap shrinker walking memcg tree */ |
e35606e4 CZ |
781 | spin_lock(&zswap_shrink_lock); |
782 | if (zswap_next_shrink == memcg) | |
783 | zswap_next_shrink = mem_cgroup_iter(NULL, zswap_next_shrink, NULL); | |
784 | spin_unlock(&zswap_shrink_lock); | |
5182661a JW |
785 | } |
786 | ||
36034bf6 JW |
787 | /********************************* |
788 | * zswap entry functions | |
789 | **********************************/ | |
790 | static struct kmem_cache *zswap_entry_cache; | |
791 | ||
792 | static struct zswap_entry *zswap_entry_cache_alloc(gfp_t gfp, int nid) | |
793 | { | |
794 | struct zswap_entry *entry; | |
795 | entry = kmem_cache_alloc_node(zswap_entry_cache, gfp, nid); | |
796 | if (!entry) | |
797 | return NULL; | |
36034bf6 JW |
798 | return entry; |
799 | } | |
800 | ||
801 | static void zswap_entry_cache_free(struct zswap_entry *entry) | |
802 | { | |
803 | kmem_cache_free(zswap_entry_cache, entry); | |
804 | } | |
805 | ||
b8cf32dc YA |
806 | static struct zpool *zswap_find_zpool(struct zswap_entry *entry) |
807 | { | |
fea68a75 | 808 | return entry->pool->zpools[hash_ptr(entry, ilog2(ZSWAP_NR_ZPOOLS))]; |
b8cf32dc YA |
809 | } |
810 | ||
0ab0abcf | 811 | /* |
12d79d64 | 812 | * Carries out the common pattern of freeing and entry's zpool allocation, |
0ab0abcf WY |
813 | * freeing the entry itself, and decrementing the number of stored pages. |
814 | */ | |
42398be2 | 815 | static void zswap_entry_free(struct zswap_entry *entry) |
0ab0abcf | 816 | { |
a85f878b SD |
817 | if (!entry->length) |
818 | atomic_dec(&zswap_same_filled_pages); | |
819 | else { | |
e35606e4 | 820 | zswap_lru_del(&zswap_list_lru, entry); |
b8cf32dc | 821 | zpool_free(zswap_find_zpool(entry), entry->handle); |
a85f878b SD |
822 | zswap_pool_put(entry->pool); |
823 | } | |
2e601e1e JW |
824 | if (entry->objcg) { |
825 | obj_cgroup_uncharge_zswap(entry->objcg, entry->length); | |
826 | obj_cgroup_put(entry->objcg); | |
827 | } | |
0ab0abcf WY |
828 | zswap_entry_cache_free(entry); |
829 | atomic_dec(&zswap_stored_pages); | |
0ab0abcf WY |
830 | } |
831 | ||
f91e81d3 JW |
832 | /********************************* |
833 | * compressed storage functions | |
834 | **********************************/ | |
64f200b8 JW |
835 | static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node) |
836 | { | |
837 | struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node); | |
838 | struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu); | |
839 | struct crypto_acomp *acomp; | |
840 | struct acomp_req *req; | |
841 | int ret; | |
842 | ||
843 | mutex_init(&acomp_ctx->mutex); | |
844 | ||
845 | acomp_ctx->buffer = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu)); | |
846 | if (!acomp_ctx->buffer) | |
847 | return -ENOMEM; | |
848 | ||
849 | acomp = crypto_alloc_acomp_node(pool->tfm_name, 0, 0, cpu_to_node(cpu)); | |
850 | if (IS_ERR(acomp)) { | |
851 | pr_err("could not alloc crypto acomp %s : %ld\n", | |
852 | pool->tfm_name, PTR_ERR(acomp)); | |
853 | ret = PTR_ERR(acomp); | |
854 | goto acomp_fail; | |
855 | } | |
856 | acomp_ctx->acomp = acomp; | |
270700dd | 857 | acomp_ctx->is_sleepable = acomp_is_async(acomp); |
64f200b8 JW |
858 | |
859 | req = acomp_request_alloc(acomp_ctx->acomp); | |
860 | if (!req) { | |
861 | pr_err("could not alloc crypto acomp_request %s\n", | |
862 | pool->tfm_name); | |
863 | ret = -ENOMEM; | |
864 | goto req_fail; | |
865 | } | |
866 | acomp_ctx->req = req; | |
867 | ||
868 | crypto_init_wait(&acomp_ctx->wait); | |
869 | /* | |
870 | * if the backend of acomp is async zip, crypto_req_done() will wakeup | |
871 | * crypto_wait_req(); if the backend of acomp is scomp, the callback | |
872 | * won't be called, crypto_wait_req() will return without blocking. | |
873 | */ | |
874 | acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, | |
875 | crypto_req_done, &acomp_ctx->wait); | |
876 | ||
877 | return 0; | |
878 | ||
879 | req_fail: | |
880 | crypto_free_acomp(acomp_ctx->acomp); | |
881 | acomp_fail: | |
882 | kfree(acomp_ctx->buffer); | |
883 | return ret; | |
884 | } | |
885 | ||
886 | static int zswap_cpu_comp_dead(unsigned int cpu, struct hlist_node *node) | |
887 | { | |
888 | struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node); | |
889 | struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu); | |
890 | ||
891 | if (!IS_ERR_OR_NULL(acomp_ctx)) { | |
892 | if (!IS_ERR_OR_NULL(acomp_ctx->req)) | |
893 | acomp_request_free(acomp_ctx->req); | |
894 | if (!IS_ERR_OR_NULL(acomp_ctx->acomp)) | |
895 | crypto_free_acomp(acomp_ctx->acomp); | |
896 | kfree(acomp_ctx->buffer); | |
897 | } | |
898 | ||
899 | return 0; | |
900 | } | |
901 | ||
f91e81d3 JW |
902 | static bool zswap_compress(struct folio *folio, struct zswap_entry *entry) |
903 | { | |
904 | struct crypto_acomp_ctx *acomp_ctx; | |
905 | struct scatterlist input, output; | |
55e78c93 | 906 | int comp_ret = 0, alloc_ret = 0; |
f91e81d3 JW |
907 | unsigned int dlen = PAGE_SIZE; |
908 | unsigned long handle; | |
909 | struct zpool *zpool; | |
910 | char *buf; | |
911 | gfp_t gfp; | |
f91e81d3 JW |
912 | u8 *dst; |
913 | ||
914 | acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx); | |
915 | ||
916 | mutex_lock(&acomp_ctx->mutex); | |
917 | ||
918 | dst = acomp_ctx->buffer; | |
919 | sg_init_table(&input, 1); | |
920 | sg_set_page(&input, &folio->page, PAGE_SIZE, 0); | |
921 | ||
922 | /* | |
923 | * We need PAGE_SIZE * 2 here since there maybe over-compression case, | |
924 | * and hardware-accelerators may won't check the dst buffer size, so | |
925 | * giving the dst buffer with enough length to avoid buffer overflow. | |
926 | */ | |
927 | sg_init_one(&output, dst, PAGE_SIZE * 2); | |
928 | acomp_request_set_params(acomp_ctx->req, &input, &output, PAGE_SIZE, dlen); | |
929 | ||
930 | /* | |
931 | * it maybe looks a little bit silly that we send an asynchronous request, | |
932 | * then wait for its completion synchronously. This makes the process look | |
933 | * synchronous in fact. | |
934 | * Theoretically, acomp supports users send multiple acomp requests in one | |
935 | * acomp instance, then get those requests done simultaneously. but in this | |
936 | * case, zswap actually does store and load page by page, there is no | |
937 | * existing method to send the second page before the first page is done | |
938 | * in one thread doing zwap. | |
939 | * but in different threads running on different cpu, we have different | |
940 | * acomp instance, so multiple threads can do (de)compression in parallel. | |
941 | */ | |
55e78c93 | 942 | comp_ret = crypto_wait_req(crypto_acomp_compress(acomp_ctx->req), &acomp_ctx->wait); |
f91e81d3 | 943 | dlen = acomp_ctx->req->dlen; |
55e78c93 | 944 | if (comp_ret) |
f91e81d3 | 945 | goto unlock; |
f91e81d3 JW |
946 | |
947 | zpool = zswap_find_zpool(entry); | |
948 | gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM; | |
949 | if (zpool_malloc_support_movable(zpool)) | |
950 | gfp |= __GFP_HIGHMEM | __GFP_MOVABLE; | |
55e78c93 BS |
951 | alloc_ret = zpool_malloc(zpool, dlen, gfp, &handle); |
952 | if (alloc_ret) | |
f91e81d3 | 953 | goto unlock; |
f91e81d3 JW |
954 | |
955 | buf = zpool_map_handle(zpool, handle, ZPOOL_MM_WO); | |
956 | memcpy(buf, dst, dlen); | |
957 | zpool_unmap_handle(zpool, handle); | |
958 | ||
959 | entry->handle = handle; | |
960 | entry->length = dlen; | |
961 | ||
962 | unlock: | |
55e78c93 BS |
963 | if (comp_ret == -ENOSPC || alloc_ret == -ENOSPC) |
964 | zswap_reject_compress_poor++; | |
965 | else if (comp_ret) | |
966 | zswap_reject_compress_fail++; | |
967 | else if (alloc_ret) | |
968 | zswap_reject_alloc_fail++; | |
969 | ||
f91e81d3 | 970 | mutex_unlock(&acomp_ctx->mutex); |
55e78c93 | 971 | return comp_ret == 0 && alloc_ret == 0; |
f91e81d3 JW |
972 | } |
973 | ||
974 | static void zswap_decompress(struct zswap_entry *entry, struct page *page) | |
975 | { | |
976 | struct zpool *zpool = zswap_find_zpool(entry); | |
977 | struct scatterlist input, output; | |
978 | struct crypto_acomp_ctx *acomp_ctx; | |
979 | u8 *src; | |
980 | ||
981 | acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx); | |
982 | mutex_lock(&acomp_ctx->mutex); | |
983 | ||
984 | src = zpool_map_handle(zpool, entry->handle, ZPOOL_MM_RO); | |
9c500835 BS |
985 | /* |
986 | * If zpool_map_handle is atomic, we cannot reliably utilize its mapped buffer | |
987 | * to do crypto_acomp_decompress() which might sleep. In such cases, we must | |
988 | * resort to copying the buffer to a temporary one. | |
989 | * Meanwhile, zpool_map_handle() might return a non-linearly mapped buffer, | |
990 | * such as a kmap address of high memory or even ever a vmap address. | |
991 | * However, sg_init_one is only equipped to handle linearly mapped low memory. | |
992 | * In such cases, we also must copy the buffer to a temporary and lowmem one. | |
993 | */ | |
994 | if ((acomp_ctx->is_sleepable && !zpool_can_sleep_mapped(zpool)) || | |
995 | !virt_addr_valid(src)) { | |
f91e81d3 JW |
996 | memcpy(acomp_ctx->buffer, src, entry->length); |
997 | src = acomp_ctx->buffer; | |
998 | zpool_unmap_handle(zpool, entry->handle); | |
999 | } | |
1000 | ||
1001 | sg_init_one(&input, src, entry->length); | |
1002 | sg_init_table(&output, 1); | |
1003 | sg_set_page(&output, page, PAGE_SIZE, 0); | |
1004 | acomp_request_set_params(acomp_ctx->req, &input, &output, entry->length, PAGE_SIZE); | |
1005 | BUG_ON(crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait)); | |
1006 | BUG_ON(acomp_ctx->req->dlen != PAGE_SIZE); | |
1007 | mutex_unlock(&acomp_ctx->mutex); | |
1008 | ||
9c500835 | 1009 | if (src != acomp_ctx->buffer) |
f91e81d3 JW |
1010 | zpool_unmap_handle(zpool, entry->handle); |
1011 | } | |
1012 | ||
9986d35d JW |
1013 | /********************************* |
1014 | * writeback code | |
1015 | **********************************/ | |
1016 | /* | |
1017 | * Attempts to free an entry by adding a folio to the swap cache, | |
1018 | * decompressing the entry data into the folio, and issuing a | |
1019 | * bio write to write the folio back to the swap device. | |
1020 | * | |
1021 | * This can be thought of as a "resumed writeback" of the folio | |
1022 | * to the swap device. We are basically resuming the same swap | |
1023 | * writeback path that was intercepted with the zswap_store() | |
1024 | * in the first place. After the folio has been decompressed into | |
1025 | * the swap cache, the compressed version stored by zswap can be | |
1026 | * freed. | |
1027 | */ | |
1028 | static int zswap_writeback_entry(struct zswap_entry *entry, | |
1029 | swp_entry_t swpentry) | |
1030 | { | |
796c2c23 CL |
1031 | struct xarray *tree; |
1032 | pgoff_t offset = swp_offset(swpentry); | |
9986d35d JW |
1033 | struct folio *folio; |
1034 | struct mempolicy *mpol; | |
1035 | bool folio_was_allocated; | |
1036 | struct writeback_control wbc = { | |
1037 | .sync_mode = WB_SYNC_NONE, | |
1038 | }; | |
1039 | ||
1040 | /* try to allocate swap cache folio */ | |
1041 | mpol = get_task_policy(current); | |
1042 | folio = __read_swap_cache_async(swpentry, GFP_KERNEL, mpol, | |
1043 | NO_INTERLEAVE_INDEX, &folio_was_allocated, true); | |
1044 | if (!folio) | |
1045 | return -ENOMEM; | |
1046 | ||
1047 | /* | |
1048 | * Found an existing folio, we raced with swapin or concurrent | |
1049 | * shrinker. We generally writeback cold folios from zswap, and | |
1050 | * swapin means the folio just became hot, so skip this folio. | |
1051 | * For unlikely concurrent shrinker case, it will be unlinked | |
1052 | * and freed when invalidated by the concurrent shrinker anyway. | |
1053 | */ | |
1054 | if (!folio_was_allocated) { | |
1055 | folio_put(folio); | |
1056 | return -EEXIST; | |
1057 | } | |
1058 | ||
1059 | /* | |
1060 | * folio is locked, and the swapcache is now secured against | |
f9c0f1c3 CZ |
1061 | * concurrent swapping to and from the slot, and concurrent |
1062 | * swapoff so we can safely dereference the zswap tree here. | |
1063 | * Verify that the swap entry hasn't been invalidated and recycled | |
1064 | * behind our backs, to avoid overwriting a new swap folio with | |
1065 | * old compressed data. Only when this is successful can the entry | |
1066 | * be dereferenced. | |
9986d35d JW |
1067 | */ |
1068 | tree = swap_zswap_tree(swpentry); | |
796c2c23 | 1069 | if (entry != xa_cmpxchg(tree, offset, entry, NULL, GFP_KERNEL)) { |
9986d35d JW |
1070 | delete_from_swap_cache(folio); |
1071 | folio_unlock(folio); | |
1072 | folio_put(folio); | |
1073 | return -ENOMEM; | |
1074 | } | |
1075 | ||
9986d35d JW |
1076 | zswap_decompress(entry, &folio->page); |
1077 | ||
1078 | count_vm_event(ZSWPWB); | |
1079 | if (entry->objcg) | |
1080 | count_objcg_event(entry->objcg, ZSWPWB); | |
1081 | ||
a230c20e | 1082 | zswap_entry_free(entry); |
9986d35d JW |
1083 | |
1084 | /* folio is up to date */ | |
1085 | folio_mark_uptodate(folio); | |
1086 | ||
1087 | /* move it to the tail of the inactive list after end_writeback */ | |
1088 | folio_set_reclaim(folio); | |
1089 | ||
1090 | /* start writeback */ | |
1091 | __swap_writepage(folio, &wbc); | |
1092 | folio_put(folio); | |
1093 | ||
1094 | return 0; | |
1095 | } | |
1096 | ||
b5ba474f NP |
1097 | /********************************* |
1098 | * shrinker functions | |
1099 | **********************************/ | |
1100 | static enum lru_status shrink_memcg_cb(struct list_head *item, struct list_lru_one *l, | |
eb23ee4f JW |
1101 | spinlock_t *lock, void *arg) |
1102 | { | |
1103 | struct zswap_entry *entry = container_of(item, struct zswap_entry, lru); | |
1104 | bool *encountered_page_in_swapcache = (bool *)arg; | |
1105 | swp_entry_t swpentry; | |
1106 | enum lru_status ret = LRU_REMOVED_RETRY; | |
1107 | int writeback_result; | |
1108 | ||
1109 | /* | |
f9c0f1c3 CZ |
1110 | * As soon as we drop the LRU lock, the entry can be freed by |
1111 | * a concurrent invalidation. This means the following: | |
eb23ee4f | 1112 | * |
f9c0f1c3 CZ |
1113 | * 1. We extract the swp_entry_t to the stack, allowing |
1114 | * zswap_writeback_entry() to pin the swap entry and | |
1115 | * then validate the zwap entry against that swap entry's | |
1116 | * tree using pointer value comparison. Only when that | |
1117 | * is successful can the entry be dereferenced. | |
eb23ee4f | 1118 | * |
f9c0f1c3 CZ |
1119 | * 2. Usually, objects are taken off the LRU for reclaim. In |
1120 | * this case this isn't possible, because if reclaim fails | |
1121 | * for whatever reason, we have no means of knowing if the | |
1122 | * entry is alive to put it back on the LRU. | |
eb23ee4f | 1123 | * |
f9c0f1c3 CZ |
1124 | * So rotate it before dropping the lock. If the entry is |
1125 | * written back or invalidated, the free path will unlink | |
1126 | * it. For failures, rotation is the right thing as well. | |
1127 | * | |
1128 | * Temporary failures, where the same entry should be tried | |
1129 | * again immediately, almost never happen for this shrinker. | |
1130 | * We don't do any trylocking; -ENOMEM comes closest, | |
1131 | * but that's extremely rare and doesn't happen spuriously | |
1132 | * either. Don't bother distinguishing this case. | |
eb23ee4f JW |
1133 | */ |
1134 | list_move_tail(item, &l->list); | |
1135 | ||
1136 | /* | |
1137 | * Once the lru lock is dropped, the entry might get freed. The | |
1138 | * swpentry is copied to the stack, and entry isn't deref'd again | |
1139 | * until the entry is verified to still be alive in the tree. | |
1140 | */ | |
1141 | swpentry = entry->swpentry; | |
1142 | ||
1143 | /* | |
1144 | * It's safe to drop the lock here because we return either | |
1145 | * LRU_REMOVED_RETRY or LRU_RETRY. | |
1146 | */ | |
1147 | spin_unlock(lock); | |
1148 | ||
1149 | writeback_result = zswap_writeback_entry(entry, swpentry); | |
1150 | ||
1151 | if (writeback_result) { | |
1152 | zswap_reject_reclaim_fail++; | |
1153 | ret = LRU_RETRY; | |
1154 | ||
1155 | /* | |
1156 | * Encountering a page already in swap cache is a sign that we are shrinking | |
1157 | * into the warmer region. We should terminate shrinking (if we're in the dynamic | |
1158 | * shrinker context). | |
1159 | */ | |
b49547ad CZ |
1160 | if (writeback_result == -EEXIST && encountered_page_in_swapcache) { |
1161 | ret = LRU_STOP; | |
eb23ee4f | 1162 | *encountered_page_in_swapcache = true; |
b49547ad | 1163 | } |
eb23ee4f JW |
1164 | } else { |
1165 | zswap_written_back_pages++; | |
1166 | } | |
1167 | ||
1168 | spin_lock(lock); | |
1169 | return ret; | |
1170 | } | |
b5ba474f NP |
1171 | |
1172 | static unsigned long zswap_shrinker_scan(struct shrinker *shrinker, | |
1173 | struct shrink_control *sc) | |
1174 | { | |
1175 | struct lruvec *lruvec = mem_cgroup_lruvec(sc->memcg, NODE_DATA(sc->nid)); | |
1176 | unsigned long shrink_ret, nr_protected, lru_size; | |
b5ba474f NP |
1177 | bool encountered_page_in_swapcache = false; |
1178 | ||
501a06fe NP |
1179 | if (!zswap_shrinker_enabled || |
1180 | !mem_cgroup_zswap_writeback_enabled(sc->memcg)) { | |
b5ba474f NP |
1181 | sc->nr_scanned = 0; |
1182 | return SHRINK_STOP; | |
1183 | } | |
1184 | ||
1185 | nr_protected = | |
1186 | atomic_long_read(&lruvec->zswap_lruvec_state.nr_zswap_protected); | |
e35606e4 | 1187 | lru_size = list_lru_shrink_count(&zswap_list_lru, sc); |
b5ba474f NP |
1188 | |
1189 | /* | |
1190 | * Abort if we are shrinking into the protected region. | |
1191 | * | |
1192 | * This short-circuiting is necessary because if we have too many multiple | |
1193 | * concurrent reclaimers getting the freeable zswap object counts at the | |
1194 | * same time (before any of them made reasonable progress), the total | |
1195 | * number of reclaimed objects might be more than the number of unprotected | |
1196 | * objects (i.e the reclaimers will reclaim into the protected area of the | |
1197 | * zswap LRU). | |
1198 | */ | |
1199 | if (nr_protected >= lru_size - sc->nr_to_scan) { | |
1200 | sc->nr_scanned = 0; | |
1201 | return SHRINK_STOP; | |
1202 | } | |
1203 | ||
e35606e4 | 1204 | shrink_ret = list_lru_shrink_walk(&zswap_list_lru, sc, &shrink_memcg_cb, |
b5ba474f NP |
1205 | &encountered_page_in_swapcache); |
1206 | ||
1207 | if (encountered_page_in_swapcache) | |
1208 | return SHRINK_STOP; | |
1209 | ||
1210 | return shrink_ret ? shrink_ret : SHRINK_STOP; | |
1211 | } | |
1212 | ||
1213 | static unsigned long zswap_shrinker_count(struct shrinker *shrinker, | |
1214 | struct shrink_control *sc) | |
1215 | { | |
b5ba474f NP |
1216 | struct mem_cgroup *memcg = sc->memcg; |
1217 | struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(sc->nid)); | |
1218 | unsigned long nr_backing, nr_stored, nr_freeable, nr_protected; | |
1219 | ||
501a06fe | 1220 | if (!zswap_shrinker_enabled || !mem_cgroup_zswap_writeback_enabled(memcg)) |
b5ba474f NP |
1221 | return 0; |
1222 | ||
30fb6a8d JW |
1223 | /* |
1224 | * The shrinker resumes swap writeback, which will enter block | |
1225 | * and may enter fs. XXX: Harmonize with vmscan.c __GFP_FS | |
1226 | * rules (may_enter_fs()), which apply on a per-folio basis. | |
1227 | */ | |
1228 | if (!gfp_has_io_fs(sc->gfp_mask)) | |
1229 | return 0; | |
1230 | ||
682886ec JW |
1231 | /* |
1232 | * For memcg, use the cgroup-wide ZSWAP stats since we don't | |
1233 | * have them per-node and thus per-lruvec. Careful if memcg is | |
1234 | * runtime-disabled: we can get sc->memcg == NULL, which is ok | |
1235 | * for the lruvec, but not for memcg_page_state(). | |
1236 | * | |
1237 | * Without memcg, use the zswap pool-wide metrics. | |
1238 | */ | |
1239 | if (!mem_cgroup_disabled()) { | |
1240 | mem_cgroup_flush_stats(memcg); | |
1241 | nr_backing = memcg_page_state(memcg, MEMCG_ZSWAP_B) >> PAGE_SHIFT; | |
1242 | nr_stored = memcg_page_state(memcg, MEMCG_ZSWAPPED); | |
1243 | } else { | |
91cdcd8d | 1244 | nr_backing = zswap_total_pages(); |
cc9bc36e | 1245 | nr_stored = atomic_read(&zswap_stored_pages); |
682886ec | 1246 | } |
b5ba474f NP |
1247 | |
1248 | if (!nr_stored) | |
1249 | return 0; | |
1250 | ||
1251 | nr_protected = | |
1252 | atomic_long_read(&lruvec->zswap_lruvec_state.nr_zswap_protected); | |
e35606e4 | 1253 | nr_freeable = list_lru_shrink_count(&zswap_list_lru, sc); |
b5ba474f NP |
1254 | /* |
1255 | * Subtract the lru size by an estimate of the number of pages | |
1256 | * that should be protected. | |
1257 | */ | |
1258 | nr_freeable = nr_freeable > nr_protected ? nr_freeable - nr_protected : 0; | |
1259 | ||
1260 | /* | |
1261 | * Scale the number of freeable pages by the memory saving factor. | |
1262 | * This ensures that the better zswap compresses memory, the fewer | |
1263 | * pages we will evict to swap (as it will otherwise incur IO for | |
1264 | * relatively small memory saving). | |
cc9bc36e YA |
1265 | * |
1266 | * The memory saving factor calculated here takes same-filled pages into | |
1267 | * account, but those are not freeable since they almost occupy no | |
1268 | * space. Hence, we may scale nr_freeable down a little bit more than we | |
1269 | * should if we have a lot of same-filled pages. | |
b5ba474f NP |
1270 | */ |
1271 | return mult_frac(nr_freeable, nr_backing, nr_stored); | |
1272 | } | |
1273 | ||
bf9b7df2 | 1274 | static struct shrinker *zswap_alloc_shrinker(void) |
b5ba474f | 1275 | { |
bf9b7df2 CZ |
1276 | struct shrinker *shrinker; |
1277 | ||
1278 | shrinker = | |
b5ba474f | 1279 | shrinker_alloc(SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE, "mm-zswap"); |
bf9b7df2 CZ |
1280 | if (!shrinker) |
1281 | return NULL; | |
b5ba474f | 1282 | |
bf9b7df2 CZ |
1283 | shrinker->scan_objects = zswap_shrinker_scan; |
1284 | shrinker->count_objects = zswap_shrinker_count; | |
1285 | shrinker->batch = 0; | |
1286 | shrinker->seeks = DEFAULT_SEEKS; | |
1287 | return shrinker; | |
b5ba474f NP |
1288 | } |
1289 | ||
a65b0e76 DC |
1290 | static int shrink_memcg(struct mem_cgroup *memcg) |
1291 | { | |
a65b0e76 DC |
1292 | int nid, shrunk = 0; |
1293 | ||
501a06fe NP |
1294 | if (!mem_cgroup_zswap_writeback_enabled(memcg)) |
1295 | return -EINVAL; | |
1296 | ||
a65b0e76 DC |
1297 | /* |
1298 | * Skip zombies because their LRUs are reparented and we would be | |
1299 | * reclaiming from the parent instead of the dead memcg. | |
1300 | */ | |
1301 | if (memcg && !mem_cgroup_online(memcg)) | |
1302 | return -ENOENT; | |
1303 | ||
a65b0e76 DC |
1304 | for_each_node_state(nid, N_NORMAL_MEMORY) { |
1305 | unsigned long nr_to_walk = 1; | |
1306 | ||
e35606e4 | 1307 | shrunk += list_lru_walk_one(&zswap_list_lru, nid, memcg, |
a65b0e76 DC |
1308 | &shrink_memcg_cb, NULL, &nr_to_walk); |
1309 | } | |
a65b0e76 | 1310 | return shrunk ? 0 : -EAGAIN; |
f999f38b DC |
1311 | } |
1312 | ||
45190f01 VW |
1313 | static void shrink_worker(struct work_struct *w) |
1314 | { | |
a65b0e76 | 1315 | struct mem_cgroup *memcg; |
e0228d59 | 1316 | int ret, failures = 0; |
91cdcd8d JW |
1317 | unsigned long thr; |
1318 | ||
1319 | /* Reclaim down to the accept threshold */ | |
1320 | thr = zswap_accept_thr_pages(); | |
e0228d59 | 1321 | |
a65b0e76 | 1322 | /* global reclaim will select cgroup in a round-robin fashion. */ |
e0228d59 | 1323 | do { |
e35606e4 CZ |
1324 | spin_lock(&zswap_shrink_lock); |
1325 | zswap_next_shrink = mem_cgroup_iter(NULL, zswap_next_shrink, NULL); | |
1326 | memcg = zswap_next_shrink; | |
a65b0e76 DC |
1327 | |
1328 | /* | |
1329 | * We need to retry if we have gone through a full round trip, or if we | |
1330 | * got an offline memcg (or else we risk undoing the effect of the | |
1331 | * zswap memcg offlining cleanup callback). This is not catastrophic | |
1332 | * per se, but it will keep the now offlined memcg hostage for a while. | |
1333 | * | |
1334 | * Note that if we got an online memcg, we will keep the extra | |
1335 | * reference in case the original reference obtained by mem_cgroup_iter | |
1336 | * is dropped by the zswap memcg offlining callback, ensuring that the | |
1337 | * memcg is not killed when we are reclaiming. | |
1338 | */ | |
1339 | if (!memcg) { | |
e35606e4 | 1340 | spin_unlock(&zswap_shrink_lock); |
a65b0e76 | 1341 | if (++failures == MAX_RECLAIM_RETRIES) |
e0228d59 | 1342 | break; |
a65b0e76 DC |
1343 | |
1344 | goto resched; | |
1345 | } | |
1346 | ||
1347 | if (!mem_cgroup_tryget_online(memcg)) { | |
1348 | /* drop the reference from mem_cgroup_iter() */ | |
1349 | mem_cgroup_iter_break(NULL, memcg); | |
e35606e4 CZ |
1350 | zswap_next_shrink = NULL; |
1351 | spin_unlock(&zswap_shrink_lock); | |
a65b0e76 | 1352 | |
e0228d59 DC |
1353 | if (++failures == MAX_RECLAIM_RETRIES) |
1354 | break; | |
a65b0e76 DC |
1355 | |
1356 | goto resched; | |
e0228d59 | 1357 | } |
e35606e4 | 1358 | spin_unlock(&zswap_shrink_lock); |
a65b0e76 DC |
1359 | |
1360 | ret = shrink_memcg(memcg); | |
1361 | /* drop the extra reference */ | |
1362 | mem_cgroup_put(memcg); | |
1363 | ||
1364 | if (ret == -EINVAL) | |
1365 | break; | |
1366 | if (ret && ++failures == MAX_RECLAIM_RETRIES) | |
1367 | break; | |
a65b0e76 | 1368 | resched: |
e0228d59 | 1369 | cond_resched(); |
91cdcd8d | 1370 | } while (zswap_total_pages() > thr); |
45190f01 VW |
1371 | } |
1372 | ||
e87b8814 YA |
1373 | /********************************* |
1374 | * same-filled functions | |
1375 | **********************************/ | |
1376 | static bool zswap_is_folio_same_filled(struct folio *folio, unsigned long *value) | |
a85f878b | 1377 | { |
a85f878b | 1378 | unsigned long *page; |
62bf1258 TS |
1379 | unsigned long val; |
1380 | unsigned int pos, last_pos = PAGE_SIZE / sizeof(*page) - 1; | |
e87b8814 | 1381 | bool ret = false; |
a85f878b | 1382 | |
e87b8814 | 1383 | page = kmap_local_folio(folio, 0); |
62bf1258 TS |
1384 | val = page[0]; |
1385 | ||
1386 | if (val != page[last_pos]) | |
e87b8814 | 1387 | goto out; |
62bf1258 TS |
1388 | |
1389 | for (pos = 1; pos < last_pos; pos++) { | |
1390 | if (val != page[pos]) | |
e87b8814 | 1391 | goto out; |
a85f878b | 1392 | } |
62bf1258 TS |
1393 | |
1394 | *value = val; | |
e87b8814 YA |
1395 | ret = true; |
1396 | out: | |
1397 | kunmap_local(page); | |
1398 | return ret; | |
a85f878b SD |
1399 | } |
1400 | ||
1401 | static void zswap_fill_page(void *ptr, unsigned long value) | |
1402 | { | |
1403 | unsigned long *page; | |
1404 | ||
1405 | page = (unsigned long *)ptr; | |
1406 | memset_l(page, value, PAGE_SIZE / sizeof(unsigned long)); | |
1407 | } | |
1408 | ||
e87b8814 YA |
1409 | /********************************* |
1410 | * main API | |
1411 | **********************************/ | |
34f4c198 | 1412 | bool zswap_store(struct folio *folio) |
2b281117 | 1413 | { |
3d2c9087 | 1414 | swp_entry_t swp = folio->swap; |
42c06a0e | 1415 | pgoff_t offset = swp_offset(swp); |
796c2c23 CL |
1416 | struct xarray *tree = swap_zswap_tree(swp); |
1417 | struct zswap_entry *entry, *old; | |
f4840ccf | 1418 | struct obj_cgroup *objcg = NULL; |
a65b0e76 | 1419 | struct mem_cgroup *memcg = NULL; |
e87b8814 | 1420 | unsigned long value; |
42c06a0e | 1421 | |
34f4c198 MWO |
1422 | VM_WARN_ON_ONCE(!folio_test_locked(folio)); |
1423 | VM_WARN_ON_ONCE(!folio_test_swapcache(folio)); | |
2b281117 | 1424 | |
34f4c198 MWO |
1425 | /* Large folios aren't supported */ |
1426 | if (folio_test_large(folio)) | |
42c06a0e | 1427 | return false; |
7ba71669 | 1428 | |
678e54d4 | 1429 | if (!zswap_enabled) |
f576a1e8 | 1430 | goto check_old; |
678e54d4 | 1431 | |
91cdcd8d | 1432 | /* Check cgroup limits */ |
074e3e26 | 1433 | objcg = get_obj_cgroup_from_folio(folio); |
a65b0e76 DC |
1434 | if (objcg && !obj_cgroup_may_zswap(objcg)) { |
1435 | memcg = get_mem_cgroup_from_objcg(objcg); | |
1436 | if (shrink_memcg(memcg)) { | |
1437 | mem_cgroup_put(memcg); | |
1438 | goto reject; | |
1439 | } | |
1440 | mem_cgroup_put(memcg); | |
1441 | } | |
f4840ccf | 1442 | |
82e0f8e4 | 1443 | if (zswap_check_limits()) |
4ea3fa9d | 1444 | goto reject; |
2b281117 SJ |
1445 | |
1446 | /* allocate entry */ | |
be7fc97c | 1447 | entry = zswap_entry_cache_alloc(GFP_KERNEL, folio_nid(folio)); |
2b281117 SJ |
1448 | if (!entry) { |
1449 | zswap_reject_kmemcache_fail++; | |
2b281117 SJ |
1450 | goto reject; |
1451 | } | |
1452 | ||
e87b8814 YA |
1453 | if (zswap_is_folio_same_filled(folio, &value)) { |
1454 | entry->length = 0; | |
1455 | entry->value = value; | |
1456 | atomic_inc(&zswap_same_filled_pages); | |
1457 | goto store_entry; | |
a85f878b SD |
1458 | } |
1459 | ||
f1c54846 DS |
1460 | /* if entry is successfully added, it keeps the reference */ |
1461 | entry->pool = zswap_pool_current_get(); | |
42c06a0e | 1462 | if (!entry->pool) |
f1c54846 | 1463 | goto freepage; |
f1c54846 | 1464 | |
a65b0e76 DC |
1465 | if (objcg) { |
1466 | memcg = get_mem_cgroup_from_objcg(objcg); | |
e35606e4 | 1467 | if (memcg_list_lru_alloc(memcg, &zswap_list_lru, GFP_KERNEL)) { |
a65b0e76 DC |
1468 | mem_cgroup_put(memcg); |
1469 | goto put_pool; | |
1470 | } | |
1471 | mem_cgroup_put(memcg); | |
1472 | } | |
1473 | ||
fa9ad6e2 JW |
1474 | if (!zswap_compress(folio, entry)) |
1475 | goto put_pool; | |
1ec3b5fe | 1476 | |
e87b8814 | 1477 | store_entry: |
be7fc97c | 1478 | entry->swpentry = swp; |
f4840ccf | 1479 | entry->objcg = objcg; |
796c2c23 CL |
1480 | |
1481 | old = xa_store(tree, offset, entry, GFP_KERNEL); | |
1482 | if (xa_is_err(old)) { | |
1483 | int err = xa_err(old); | |
1484 | ||
1485 | WARN_ONCE(err != -ENOMEM, "unexpected xarray error: %d\n", err); | |
1486 | zswap_reject_alloc_fail++; | |
1487 | goto store_failed; | |
1488 | } | |
1489 | ||
1490 | /* | |
1491 | * We may have had an existing entry that became stale when | |
1492 | * the folio was redirtied and now the new version is being | |
1493 | * swapped out. Get rid of the old. | |
1494 | */ | |
1495 | if (old) | |
1496 | zswap_entry_free(old); | |
1497 | ||
f4840ccf JW |
1498 | if (objcg) { |
1499 | obj_cgroup_charge_zswap(objcg, entry->length); | |
f4840ccf JW |
1500 | count_objcg_event(objcg, ZSWPOUT); |
1501 | } | |
1502 | ||
ca56489c | 1503 | /* |
796c2c23 CL |
1504 | * We finish initializing the entry while it's already in xarray. |
1505 | * This is safe because: | |
1506 | * | |
1507 | * 1. Concurrent stores and invalidations are excluded by folio lock. | |
1508 | * | |
1509 | * 2. Writeback is excluded by the entry not being on the LRU yet. | |
1510 | * The publishing order matters to prevent writeback from seeing | |
1511 | * an incoherent entry. | |
ca56489c | 1512 | */ |
35499e2b | 1513 | if (entry->length) { |
a65b0e76 | 1514 | INIT_LIST_HEAD(&entry->lru); |
e35606e4 | 1515 | zswap_lru_add(&zswap_list_lru, entry); |
f999f38b | 1516 | } |
2b281117 SJ |
1517 | |
1518 | /* update stats */ | |
1519 | atomic_inc(&zswap_stored_pages); | |
f6498b77 | 1520 | count_vm_event(ZSWPOUT); |
2b281117 | 1521 | |
42c06a0e | 1522 | return true; |
2b281117 | 1523 | |
796c2c23 CL |
1524 | store_failed: |
1525 | if (!entry->length) | |
1526 | atomic_dec(&zswap_same_filled_pages); | |
1527 | else { | |
1528 | zpool_free(zswap_find_zpool(entry), entry->handle); | |
a65b0e76 | 1529 | put_pool: |
796c2c23 CL |
1530 | zswap_pool_put(entry->pool); |
1531 | } | |
f1c54846 | 1532 | freepage: |
2b281117 SJ |
1533 | zswap_entry_cache_free(entry); |
1534 | reject: | |
91b71e78 | 1535 | obj_cgroup_put(objcg); |
4ea3fa9d YA |
1536 | if (zswap_pool_reached_full) |
1537 | queue_work(shrink_wq, &zswap_shrink_work); | |
f576a1e8 CZ |
1538 | check_old: |
1539 | /* | |
1540 | * If the zswap store fails or zswap is disabled, we must invalidate the | |
1541 | * possibly stale entry which was previously stored at this offset. | |
1542 | * Otherwise, writeback could overwrite the new data in the swapfile. | |
1543 | */ | |
796c2c23 | 1544 | entry = xa_erase(tree, offset); |
f576a1e8 | 1545 | if (entry) |
796c2c23 | 1546 | zswap_entry_free(entry); |
42c06a0e | 1547 | return false; |
2b281117 SJ |
1548 | } |
1549 | ||
ca54f6d8 | 1550 | bool zswap_load(struct folio *folio) |
2b281117 | 1551 | { |
3d2c9087 | 1552 | swp_entry_t swp = folio->swap; |
42c06a0e | 1553 | pgoff_t offset = swp_offset(swp); |
ca54f6d8 | 1554 | struct page *page = &folio->page; |
25cd2414 | 1555 | bool swapcache = folio_test_swapcache(folio); |
796c2c23 | 1556 | struct xarray *tree = swap_zswap_tree(swp); |
2b281117 | 1557 | struct zswap_entry *entry; |
32acba4c | 1558 | u8 *dst; |
42c06a0e | 1559 | |
ca54f6d8 | 1560 | VM_WARN_ON_ONCE(!folio_test_locked(folio)); |
2b281117 | 1561 | |
25cd2414 JW |
1562 | /* |
1563 | * When reading into the swapcache, invalidate our entry. The | |
1564 | * swapcache can be the authoritative owner of the page and | |
1565 | * its mappings, and the pressure that results from having two | |
1566 | * in-memory copies outweighs any benefits of caching the | |
1567 | * compression work. | |
1568 | * | |
1569 | * (Most swapins go through the swapcache. The notable | |
1570 | * exception is the singleton fault on SWP_SYNCHRONOUS_IO | |
1571 | * files, which reads into a private page and may free it if | |
1572 | * the fault fails. We remain the primary owner of the entry.) | |
1573 | */ | |
1574 | if (swapcache) | |
796c2c23 CL |
1575 | entry = xa_erase(tree, offset); |
1576 | else | |
1577 | entry = xa_load(tree, offset); | |
1578 | ||
1579 | if (!entry) | |
1580 | return false; | |
2b281117 | 1581 | |
66447fd0 | 1582 | if (entry->length) |
ff2972aa | 1583 | zswap_decompress(entry, page); |
66447fd0 | 1584 | else { |
003ae2fb | 1585 | dst = kmap_local_page(page); |
a85f878b | 1586 | zswap_fill_page(dst, entry->value); |
003ae2fb | 1587 | kunmap_local(dst); |
a85f878b SD |
1588 | } |
1589 | ||
f6498b77 | 1590 | count_vm_event(ZSWPIN); |
f4840ccf JW |
1591 | if (entry->objcg) |
1592 | count_objcg_event(entry->objcg, ZSWPIN); | |
c75f5c1e | 1593 | |
25cd2414 JW |
1594 | if (swapcache) { |
1595 | zswap_entry_free(entry); | |
1596 | folio_mark_dirty(folio); | |
1597 | } | |
c2e2ba77 | 1598 | |
66447fd0 | 1599 | return true; |
2b281117 SJ |
1600 | } |
1601 | ||
0827a1fb | 1602 | void zswap_invalidate(swp_entry_t swp) |
2b281117 | 1603 | { |
0827a1fb | 1604 | pgoff_t offset = swp_offset(swp); |
796c2c23 | 1605 | struct xarray *tree = swap_zswap_tree(swp); |
2b281117 | 1606 | struct zswap_entry *entry; |
2b281117 | 1607 | |
796c2c23 | 1608 | entry = xa_erase(tree, offset); |
06ed2289 | 1609 | if (entry) |
796c2c23 | 1610 | zswap_entry_free(entry); |
2b281117 SJ |
1611 | } |
1612 | ||
44c7c734 | 1613 | int zswap_swapon(int type, unsigned long nr_pages) |
42c06a0e | 1614 | { |
796c2c23 | 1615 | struct xarray *trees, *tree; |
44c7c734 | 1616 | unsigned int nr, i; |
42c06a0e | 1617 | |
44c7c734 CZ |
1618 | nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES); |
1619 | trees = kvcalloc(nr, sizeof(*tree), GFP_KERNEL); | |
1620 | if (!trees) { | |
42c06a0e | 1621 | pr_err("alloc failed, zswap disabled for swap type %d\n", type); |
bb29fd77 | 1622 | return -ENOMEM; |
42c06a0e JW |
1623 | } |
1624 | ||
796c2c23 CL |
1625 | for (i = 0; i < nr; i++) |
1626 | xa_init(trees + i); | |
44c7c734 CZ |
1627 | |
1628 | nr_zswap_trees[type] = nr; | |
1629 | zswap_trees[type] = trees; | |
bb29fd77 | 1630 | return 0; |
42c06a0e JW |
1631 | } |
1632 | ||
1633 | void zswap_swapoff(int type) | |
2b281117 | 1634 | { |
796c2c23 | 1635 | struct xarray *trees = zswap_trees[type]; |
44c7c734 | 1636 | unsigned int i; |
2b281117 | 1637 | |
44c7c734 | 1638 | if (!trees) |
2b281117 SJ |
1639 | return; |
1640 | ||
83e68f25 YA |
1641 | /* try_to_unuse() invalidated all the entries already */ |
1642 | for (i = 0; i < nr_zswap_trees[type]; i++) | |
796c2c23 | 1643 | WARN_ON_ONCE(!xa_empty(trees + i)); |
44c7c734 CZ |
1644 | |
1645 | kvfree(trees); | |
1646 | nr_zswap_trees[type] = 0; | |
aa9bca05 | 1647 | zswap_trees[type] = NULL; |
2b281117 SJ |
1648 | } |
1649 | ||
2b281117 SJ |
1650 | /********************************* |
1651 | * debugfs functions | |
1652 | **********************************/ | |
1653 | #ifdef CONFIG_DEBUG_FS | |
1654 | #include <linux/debugfs.h> | |
1655 | ||
1656 | static struct dentry *zswap_debugfs_root; | |
1657 | ||
91cdcd8d JW |
1658 | static int debugfs_get_total_size(void *data, u64 *val) |
1659 | { | |
1660 | *val = zswap_total_pages() * PAGE_SIZE; | |
1661 | return 0; | |
1662 | } | |
1663 | DEFINE_DEBUGFS_ATTRIBUTE(total_size_fops, debugfs_get_total_size, NULL, "%llu\n"); | |
1664 | ||
141fdeec | 1665 | static int zswap_debugfs_init(void) |
2b281117 SJ |
1666 | { |
1667 | if (!debugfs_initialized()) | |
1668 | return -ENODEV; | |
1669 | ||
1670 | zswap_debugfs_root = debugfs_create_dir("zswap", NULL); | |
2b281117 | 1671 | |
0825a6f9 JP |
1672 | debugfs_create_u64("pool_limit_hit", 0444, |
1673 | zswap_debugfs_root, &zswap_pool_limit_hit); | |
1674 | debugfs_create_u64("reject_reclaim_fail", 0444, | |
1675 | zswap_debugfs_root, &zswap_reject_reclaim_fail); | |
1676 | debugfs_create_u64("reject_alloc_fail", 0444, | |
1677 | zswap_debugfs_root, &zswap_reject_alloc_fail); | |
1678 | debugfs_create_u64("reject_kmemcache_fail", 0444, | |
1679 | zswap_debugfs_root, &zswap_reject_kmemcache_fail); | |
cb61dad8 NP |
1680 | debugfs_create_u64("reject_compress_fail", 0444, |
1681 | zswap_debugfs_root, &zswap_reject_compress_fail); | |
0825a6f9 JP |
1682 | debugfs_create_u64("reject_compress_poor", 0444, |
1683 | zswap_debugfs_root, &zswap_reject_compress_poor); | |
1684 | debugfs_create_u64("written_back_pages", 0444, | |
1685 | zswap_debugfs_root, &zswap_written_back_pages); | |
91cdcd8d JW |
1686 | debugfs_create_file("pool_total_size", 0444, |
1687 | zswap_debugfs_root, NULL, &total_size_fops); | |
0825a6f9 JP |
1688 | debugfs_create_atomic_t("stored_pages", 0444, |
1689 | zswap_debugfs_root, &zswap_stored_pages); | |
a85f878b | 1690 | debugfs_create_atomic_t("same_filled_pages", 0444, |
0825a6f9 | 1691 | zswap_debugfs_root, &zswap_same_filled_pages); |
2b281117 SJ |
1692 | |
1693 | return 0; | |
1694 | } | |
2b281117 | 1695 | #else |
141fdeec | 1696 | static int zswap_debugfs_init(void) |
2b281117 SJ |
1697 | { |
1698 | return 0; | |
1699 | } | |
2b281117 SJ |
1700 | #endif |
1701 | ||
1702 | /********************************* | |
1703 | * module init and exit | |
1704 | **********************************/ | |
141fdeec | 1705 | static int zswap_setup(void) |
2b281117 | 1706 | { |
f1c54846 | 1707 | struct zswap_pool *pool; |
ad7ed770 | 1708 | int ret; |
60105e12 | 1709 | |
b7919122 LS |
1710 | zswap_entry_cache = KMEM_CACHE(zswap_entry, 0); |
1711 | if (!zswap_entry_cache) { | |
2b281117 | 1712 | pr_err("entry cache creation failed\n"); |
f1c54846 | 1713 | goto cache_fail; |
2b281117 | 1714 | } |
f1c54846 | 1715 | |
cab7a7e5 SAS |
1716 | ret = cpuhp_setup_state_multi(CPUHP_MM_ZSWP_POOL_PREPARE, |
1717 | "mm/zswap_pool:prepare", | |
1718 | zswap_cpu_comp_prepare, | |
1719 | zswap_cpu_comp_dead); | |
1720 | if (ret) | |
1721 | goto hp_fail; | |
1722 | ||
bf9b7df2 CZ |
1723 | shrink_wq = alloc_workqueue("zswap-shrink", |
1724 | WQ_UNBOUND|WQ_MEM_RECLAIM, 1); | |
1725 | if (!shrink_wq) | |
1726 | goto shrink_wq_fail; | |
1727 | ||
e35606e4 CZ |
1728 | zswap_shrinker = zswap_alloc_shrinker(); |
1729 | if (!zswap_shrinker) | |
bf9b7df2 | 1730 | goto shrinker_fail; |
e35606e4 | 1731 | if (list_lru_init_memcg(&zswap_list_lru, zswap_shrinker)) |
bf9b7df2 | 1732 | goto lru_fail; |
e35606e4 | 1733 | shrinker_register(zswap_shrinker); |
bf9b7df2 | 1734 | |
e35606e4 | 1735 | INIT_WORK(&zswap_shrink_work, shrink_worker); |
bf9b7df2 | 1736 | |
f1c54846 | 1737 | pool = __zswap_pool_create_fallback(); |
ae3d89a7 DS |
1738 | if (pool) { |
1739 | pr_info("loaded using pool %s/%s\n", pool->tfm_name, | |
b8cf32dc | 1740 | zpool_get_type(pool->zpools[0])); |
ae3d89a7 DS |
1741 | list_add(&pool->list, &zswap_pools); |
1742 | zswap_has_pool = true; | |
1743 | } else { | |
f1c54846 | 1744 | pr_err("pool creation failed\n"); |
ae3d89a7 | 1745 | zswap_enabled = false; |
2b281117 | 1746 | } |
60105e12 | 1747 | |
2b281117 SJ |
1748 | if (zswap_debugfs_init()) |
1749 | pr_warn("debugfs initialization failed\n"); | |
9021ccec | 1750 | zswap_init_state = ZSWAP_INIT_SUCCEED; |
2b281117 | 1751 | return 0; |
f1c54846 | 1752 | |
bf9b7df2 | 1753 | lru_fail: |
e35606e4 | 1754 | shrinker_free(zswap_shrinker); |
bf9b7df2 CZ |
1755 | shrinker_fail: |
1756 | destroy_workqueue(shrink_wq); | |
1757 | shrink_wq_fail: | |
1758 | cpuhp_remove_multi_state(CPUHP_MM_ZSWP_POOL_PREPARE); | |
cab7a7e5 | 1759 | hp_fail: |
b7919122 | 1760 | kmem_cache_destroy(zswap_entry_cache); |
f1c54846 | 1761 | cache_fail: |
d7b028f5 | 1762 | /* if built-in, we aren't unloaded on failure; don't allow use */ |
9021ccec | 1763 | zswap_init_state = ZSWAP_INIT_FAILED; |
d7b028f5 | 1764 | zswap_enabled = false; |
2b281117 SJ |
1765 | return -ENOMEM; |
1766 | } | |
141fdeec LS |
1767 | |
1768 | static int __init zswap_init(void) | |
1769 | { | |
1770 | if (!zswap_enabled) | |
1771 | return 0; | |
1772 | return zswap_setup(); | |
1773 | } | |
2b281117 | 1774 | /* must be late so crypto has time to come up */ |
141fdeec | 1775 | late_initcall(zswap_init); |
2b281117 | 1776 | |
68386da8 | 1777 | MODULE_AUTHOR("Seth Jennings <[email protected]>"); |
2b281117 | 1778 | MODULE_DESCRIPTION("Compressed cache for swap pages"); |