]> Git Repo - linux.git/blame - mm/zswap.c
mm: add optional close() to struct vm_special_mapping
[linux.git] / mm / zswap.c
CommitLineData
c942fddf 1// SPDX-License-Identifier: GPL-2.0-or-later
2b281117
SJ
2/*
3 * zswap.c - zswap driver file
4 *
42c06a0e 5 * zswap is a cache that takes pages that are in the process
2b281117
SJ
6 * of being swapped out and attempts to compress and store them in a
7 * RAM-based memory pool. This can result in a significant I/O reduction on
8 * the swap device and, in the case where decompressing from RAM is faster
9 * than reading from the swap device, can also improve workload performance.
10 *
11 * Copyright (C) 2012 Seth Jennings <[email protected]>
2b281117
SJ
12*/
13
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
16#include <linux/module.h>
17#include <linux/cpu.h>
18#include <linux/highmem.h>
19#include <linux/slab.h>
20#include <linux/spinlock.h>
21#include <linux/types.h>
22#include <linux/atomic.h>
2b281117
SJ
23#include <linux/swap.h>
24#include <linux/crypto.h>
1ec3b5fe 25#include <linux/scatterlist.h>
ddc1a5cb 26#include <linux/mempolicy.h>
2b281117 27#include <linux/mempool.h>
12d79d64 28#include <linux/zpool.h>
1ec3b5fe 29#include <crypto/acompress.h>
42c06a0e 30#include <linux/zswap.h>
2b281117
SJ
31#include <linux/mm_types.h>
32#include <linux/page-flags.h>
33#include <linux/swapops.h>
34#include <linux/writeback.h>
35#include <linux/pagemap.h>
45190f01 36#include <linux/workqueue.h>
a65b0e76 37#include <linux/list_lru.h>
2b281117 38
014bb1de 39#include "swap.h"
e0228d59 40#include "internal.h"
014bb1de 41
2b281117
SJ
42/*********************************
43* statistics
44**********************************/
2b281117 45/* The number of compressed pages currently stored in zswap */
f6498b77 46atomic_t zswap_stored_pages = ATOMIC_INIT(0);
a85f878b
SD
47/* The number of same-value filled pages currently stored in zswap */
48static atomic_t zswap_same_filled_pages = ATOMIC_INIT(0);
2b281117
SJ
49
50/*
51 * The statistics below are not protected from concurrent access for
52 * performance reasons so they may not be a 100% accurate. However,
53 * they do provide useful information on roughly how many times a
54 * certain event is occurring.
55*/
56
57/* Pool limit was hit (see zswap_max_pool_percent) */
58static u64 zswap_pool_limit_hit;
59/* Pages written back when pool limit was reached */
60static u64 zswap_written_back_pages;
61/* Store failed due to a reclaim failure after pool limit was reached */
62static u64 zswap_reject_reclaim_fail;
cb61dad8
NP
63/* Store failed due to compression algorithm failure */
64static u64 zswap_reject_compress_fail;
2b281117
SJ
65/* Compressed page was too big for the allocator to (optimally) store */
66static u64 zswap_reject_compress_poor;
67/* Store failed because underlying allocator could not get memory */
68static u64 zswap_reject_alloc_fail;
69/* Store failed because the entry metadata could not be allocated (rare) */
70static u64 zswap_reject_kmemcache_fail;
2b281117 71
45190f01
VW
72/* Shrinker work queue */
73static struct workqueue_struct *shrink_wq;
74/* Pool limit was hit, we need to calm down */
75static bool zswap_pool_reached_full;
76
2b281117
SJ
77/*********************************
78* tunables
79**********************************/
c00ed16a 80
bae21db8
DS
81#define ZSWAP_PARAM_UNSET ""
82
141fdeec
LS
83static int zswap_setup(void);
84
bb8b93b5 85/* Enable/disable zswap */
2d4d2b1c 86static DEFINE_STATIC_KEY_MAYBE(CONFIG_ZSWAP_DEFAULT_ON, zswap_ever_enabled);
bb8b93b5 87static bool zswap_enabled = IS_ENABLED(CONFIG_ZSWAP_DEFAULT_ON);
d7b028f5
DS
88static int zswap_enabled_param_set(const char *,
89 const struct kernel_param *);
83aed6cd 90static const struct kernel_param_ops zswap_enabled_param_ops = {
d7b028f5
DS
91 .set = zswap_enabled_param_set,
92 .get = param_get_bool,
93};
94module_param_cb(enabled, &zswap_enabled_param_ops, &zswap_enabled, 0644);
2b281117 95
90b0fc26 96/* Crypto compressor to use */
bb8b93b5 97static char *zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT;
90b0fc26
DS
98static int zswap_compressor_param_set(const char *,
99 const struct kernel_param *);
83aed6cd 100static const struct kernel_param_ops zswap_compressor_param_ops = {
90b0fc26 101 .set = zswap_compressor_param_set,
c99b42c3
DS
102 .get = param_get_charp,
103 .free = param_free_charp,
90b0fc26
DS
104};
105module_param_cb(compressor, &zswap_compressor_param_ops,
c99b42c3 106 &zswap_compressor, 0644);
2b281117 107
90b0fc26 108/* Compressed storage zpool to use */
bb8b93b5 109static char *zswap_zpool_type = CONFIG_ZSWAP_ZPOOL_DEFAULT;
90b0fc26 110static int zswap_zpool_param_set(const char *, const struct kernel_param *);
83aed6cd 111static const struct kernel_param_ops zswap_zpool_param_ops = {
c99b42c3
DS
112 .set = zswap_zpool_param_set,
113 .get = param_get_charp,
114 .free = param_free_charp,
90b0fc26 115};
c99b42c3 116module_param_cb(zpool, &zswap_zpool_param_ops, &zswap_zpool_type, 0644);
12d79d64 117
90b0fc26
DS
118/* The maximum percentage of memory that the compressed pool can occupy */
119static unsigned int zswap_max_pool_percent = 20;
120module_param_named(max_pool_percent, zswap_max_pool_percent, uint, 0644);
60105e12 121
45190f01
VW
122/* The threshold for accepting new pages after the max_pool_percent was hit */
123static unsigned int zswap_accept_thr_percent = 90; /* of max pool size */
124module_param_named(accept_threshold_percent, zswap_accept_thr_percent,
125 uint, 0644);
126
b5ba474f
NP
127/* Enable/disable memory pressure-based shrinker. */
128static bool zswap_shrinker_enabled = IS_ENABLED(
129 CONFIG_ZSWAP_SHRINKER_DEFAULT_ON);
130module_param_named(shrinker_enabled, zswap_shrinker_enabled, bool, 0644);
131
2b33a97c 132bool zswap_is_enabled(void)
501a06fe
NP
133{
134 return zswap_enabled;
135}
136
2d4d2b1c
YA
137bool zswap_never_enabled(void)
138{
139 return !static_branch_maybe(CONFIG_ZSWAP_DEFAULT_ON, &zswap_ever_enabled);
140}
141
2b281117 142/*********************************
f1c54846 143* data structures
2b281117 144**********************************/
2b281117 145
1ec3b5fe
BS
146struct crypto_acomp_ctx {
147 struct crypto_acomp *acomp;
148 struct acomp_req *req;
149 struct crypto_wait wait;
8ba2f844
CZ
150 u8 *buffer;
151 struct mutex mutex;
270700dd 152 bool is_sleepable;
1ec3b5fe
BS
153};
154
f999f38b
DC
155/*
156 * The lock ordering is zswap_tree.lock -> zswap_pool.lru_lock.
157 * The only case where lru_lock is not acquired while holding tree.lock is
158 * when a zswap_entry is taken off the lru for writeback, in that case it
159 * needs to be verified that it's still valid in the tree.
160 */
f1c54846 161struct zswap_pool {
8edc9c4e 162 struct zpool *zpool;
1ec3b5fe 163 struct crypto_acomp_ctx __percpu *acomp_ctx;
94ace3fe 164 struct percpu_ref ref;
f1c54846 165 struct list_head list;
45190f01 166 struct work_struct release_work;
cab7a7e5 167 struct hlist_node node;
f1c54846 168 char tfm_name[CRYPTO_MAX_ALG_NAME];
bf9b7df2
CZ
169};
170
e35606e4
CZ
171/* Global LRU lists shared by all zswap pools. */
172static struct list_lru zswap_list_lru;
e35606e4
CZ
173
174/* The lock protects zswap_next_shrink updates. */
175static DEFINE_SPINLOCK(zswap_shrink_lock);
176static struct mem_cgroup *zswap_next_shrink;
177static struct work_struct zswap_shrink_work;
178static struct shrinker *zswap_shrinker;
2b281117 179
2b281117
SJ
180/*
181 * struct zswap_entry
182 *
183 * This structure contains the metadata for tracking a single compressed
184 * page within zswap.
185 *
97157d89 186 * swpentry - associated swap entry, the offset indexes into the red-black tree
2b281117 187 * length - the length in bytes of the compressed page data. Needed during
f999f38b
DC
188 * decompression. For a same value filled page length is 0, and both
189 * pool and lru are invalid and must be ignored.
e31c38e0
NP
190 * referenced - true if the entry recently entered the zswap pool. Unset by the
191 * writeback logic. The entry is only reclaimed by the writeback
192 * logic if referenced is unset. See comments in the shrinker
193 * section for context.
f1c54846
DS
194 * pool - the zswap_pool the entry's data is in
195 * handle - zpool allocation handle that stores the compressed page data
a85f878b 196 * value - value of the same-value filled pages which have same content
97157d89 197 * objcg - the obj_cgroup that the compressed memory is charged to
f999f38b 198 * lru - handle to the pool's lru used to evict pages.
2b281117
SJ
199 */
200struct zswap_entry {
0bb48849 201 swp_entry_t swpentry;
2b281117 202 unsigned int length;
e31c38e0 203 bool referenced;
f1c54846 204 struct zswap_pool *pool;
a85f878b
SD
205 union {
206 unsigned long handle;
207 unsigned long value;
208 };
f4840ccf 209 struct obj_cgroup *objcg;
f999f38b 210 struct list_head lru;
2b281117
SJ
211};
212
796c2c23 213static struct xarray *zswap_trees[MAX_SWAPFILES];
44c7c734 214static unsigned int nr_zswap_trees[MAX_SWAPFILES];
2b281117 215
f1c54846
DS
216/* RCU-protected iteration */
217static LIST_HEAD(zswap_pools);
218/* protects zswap_pools list modification */
219static DEFINE_SPINLOCK(zswap_pools_lock);
32a4e169
DS
220/* pool counter to provide unique names to zpool */
221static atomic_t zswap_pools_count = ATOMIC_INIT(0);
f1c54846 222
9021ccec
LS
223enum zswap_init_type {
224 ZSWAP_UNINIT,
225 ZSWAP_INIT_SUCCEED,
226 ZSWAP_INIT_FAILED
227};
90b0fc26 228
9021ccec 229static enum zswap_init_type zswap_init_state;
90b0fc26 230
141fdeec
LS
231/* used to ensure the integrity of initialization */
232static DEFINE_MUTEX(zswap_init_lock);
d7b028f5 233
ae3d89a7
DS
234/* init completed, but couldn't create the initial pool */
235static bool zswap_has_pool;
236
f1c54846
DS
237/*********************************
238* helpers and fwd declarations
239**********************************/
240
796c2c23 241static inline struct xarray *swap_zswap_tree(swp_entry_t swp)
44c7c734
CZ
242{
243 return &zswap_trees[swp_type(swp)][swp_offset(swp)
244 >> SWAP_ADDRESS_SPACE_SHIFT];
245}
246
f1c54846
DS
247#define zswap_pool_debug(msg, p) \
248 pr_debug("%s pool %s/%s\n", msg, (p)->tfm_name, \
8edc9c4e 249 zpool_get_type((p)->zpool))
f1c54846 250
a984649b
JW
251/*********************************
252* pool functions
253**********************************/
94ace3fe 254static void __zswap_pool_empty(struct percpu_ref *ref);
a984649b 255
a984649b
JW
256static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
257{
a984649b
JW
258 struct zswap_pool *pool;
259 char name[38]; /* 'zswap' + 32 char (max) num + \0 */
260 gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
261 int ret;
262
263 if (!zswap_has_pool) {
264 /* if either are unset, pool initialization failed, and we
265 * need both params to be set correctly before trying to
266 * create a pool.
267 */
268 if (!strcmp(type, ZSWAP_PARAM_UNSET))
269 return NULL;
270 if (!strcmp(compressor, ZSWAP_PARAM_UNSET))
271 return NULL;
272 }
273
274 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
275 if (!pool)
276 return NULL;
277
8edc9c4e
CZ
278 /* unique name for each pool specifically required by zsmalloc */
279 snprintf(name, 38, "zswap%x", atomic_inc_return(&zswap_pools_count));
280 pool->zpool = zpool_create_pool(type, name, gfp);
281 if (!pool->zpool) {
282 pr_err("%s zpool not available\n", type);
283 goto error;
a984649b 284 }
8edc9c4e 285 pr_debug("using %s zpool\n", zpool_get_type(pool->zpool));
a984649b
JW
286
287 strscpy(pool->tfm_name, compressor, sizeof(pool->tfm_name));
288
289 pool->acomp_ctx = alloc_percpu(*pool->acomp_ctx);
290 if (!pool->acomp_ctx) {
291 pr_err("percpu alloc failed\n");
292 goto error;
293 }
294
295 ret = cpuhp_state_add_instance(CPUHP_MM_ZSWP_POOL_PREPARE,
296 &pool->node);
297 if (ret)
298 goto error;
299
a984649b
JW
300 /* being the current pool takes 1 ref; this func expects the
301 * caller to always add the new pool as the current pool
302 */
94ace3fe
CZ
303 ret = percpu_ref_init(&pool->ref, __zswap_pool_empty,
304 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL);
305 if (ret)
306 goto ref_fail;
a984649b 307 INIT_LIST_HEAD(&pool->list);
a984649b
JW
308
309 zswap_pool_debug("created", pool);
310
311 return pool;
312
94ace3fe
CZ
313ref_fail:
314 cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node);
a984649b
JW
315error:
316 if (pool->acomp_ctx)
317 free_percpu(pool->acomp_ctx);
8edc9c4e
CZ
318 if (pool->zpool)
319 zpool_destroy_pool(pool->zpool);
a984649b
JW
320 kfree(pool);
321 return NULL;
322}
323
324static struct zswap_pool *__zswap_pool_create_fallback(void)
325{
326 bool has_comp, has_zpool;
327
328 has_comp = crypto_has_acomp(zswap_compressor, 0, 0);
329 if (!has_comp && strcmp(zswap_compressor,
330 CONFIG_ZSWAP_COMPRESSOR_DEFAULT)) {
331 pr_err("compressor %s not available, using default %s\n",
332 zswap_compressor, CONFIG_ZSWAP_COMPRESSOR_DEFAULT);
333 param_free_charp(&zswap_compressor);
334 zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT;
335 has_comp = crypto_has_acomp(zswap_compressor, 0, 0);
336 }
337 if (!has_comp) {
338 pr_err("default compressor %s not available\n",
339 zswap_compressor);
340 param_free_charp(&zswap_compressor);
341 zswap_compressor = ZSWAP_PARAM_UNSET;
342 }
343
344 has_zpool = zpool_has_pool(zswap_zpool_type);
345 if (!has_zpool && strcmp(zswap_zpool_type,
346 CONFIG_ZSWAP_ZPOOL_DEFAULT)) {
347 pr_err("zpool %s not available, using default %s\n",
348 zswap_zpool_type, CONFIG_ZSWAP_ZPOOL_DEFAULT);
349 param_free_charp(&zswap_zpool_type);
350 zswap_zpool_type = CONFIG_ZSWAP_ZPOOL_DEFAULT;
351 has_zpool = zpool_has_pool(zswap_zpool_type);
352 }
353 if (!has_zpool) {
354 pr_err("default zpool %s not available\n",
355 zswap_zpool_type);
356 param_free_charp(&zswap_zpool_type);
357 zswap_zpool_type = ZSWAP_PARAM_UNSET;
358 }
359
360 if (!has_comp || !has_zpool)
361 return NULL;
362
363 return zswap_pool_create(zswap_zpool_type, zswap_compressor);
364}
365
366static void zswap_pool_destroy(struct zswap_pool *pool)
367{
a984649b
JW
368 zswap_pool_debug("destroying", pool);
369
a984649b
JW
370 cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node);
371 free_percpu(pool->acomp_ctx);
a984649b 372
8edc9c4e 373 zpool_destroy_pool(pool->zpool);
a984649b
JW
374 kfree(pool);
375}
376
39f3ec8e
JW
377static void __zswap_pool_release(struct work_struct *work)
378{
379 struct zswap_pool *pool = container_of(work, typeof(*pool),
380 release_work);
381
382 synchronize_rcu();
383
94ace3fe
CZ
384 /* nobody should have been able to get a ref... */
385 WARN_ON(!percpu_ref_is_zero(&pool->ref));
386 percpu_ref_exit(&pool->ref);
39f3ec8e
JW
387
388 /* pool is now off zswap_pools list and has no references. */
389 zswap_pool_destroy(pool);
390}
391
392static struct zswap_pool *zswap_pool_current(void);
393
94ace3fe 394static void __zswap_pool_empty(struct percpu_ref *ref)
39f3ec8e
JW
395{
396 struct zswap_pool *pool;
397
94ace3fe 398 pool = container_of(ref, typeof(*pool), ref);
39f3ec8e 399
94ace3fe 400 spin_lock_bh(&zswap_pools_lock);
39f3ec8e
JW
401
402 WARN_ON(pool == zswap_pool_current());
403
404 list_del_rcu(&pool->list);
405
406 INIT_WORK(&pool->release_work, __zswap_pool_release);
407 schedule_work(&pool->release_work);
408
94ace3fe 409 spin_unlock_bh(&zswap_pools_lock);
39f3ec8e
JW
410}
411
412static int __must_check zswap_pool_get(struct zswap_pool *pool)
413{
414 if (!pool)
415 return 0;
416
94ace3fe 417 return percpu_ref_tryget(&pool->ref);
39f3ec8e
JW
418}
419
420static void zswap_pool_put(struct zswap_pool *pool)
421{
94ace3fe 422 percpu_ref_put(&pool->ref);
39f3ec8e
JW
423}
424
c1a0ecb8
JW
425static struct zswap_pool *__zswap_pool_current(void)
426{
427 struct zswap_pool *pool;
428
429 pool = list_first_or_null_rcu(&zswap_pools, typeof(*pool), list);
430 WARN_ONCE(!pool && zswap_has_pool,
431 "%s: no page storage pool!\n", __func__);
432
433 return pool;
434}
435
436static struct zswap_pool *zswap_pool_current(void)
437{
438 assert_spin_locked(&zswap_pools_lock);
439
440 return __zswap_pool_current();
441}
442
443static struct zswap_pool *zswap_pool_current_get(void)
444{
445 struct zswap_pool *pool;
446
447 rcu_read_lock();
448
449 pool = __zswap_pool_current();
450 if (!zswap_pool_get(pool))
451 pool = NULL;
452
453 rcu_read_unlock();
454
455 return pool;
456}
457
c1a0ecb8
JW
458/* type and compressor must be null-terminated */
459static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor)
460{
461 struct zswap_pool *pool;
462
463 assert_spin_locked(&zswap_pools_lock);
464
465 list_for_each_entry_rcu(pool, &zswap_pools, list) {
466 if (strcmp(pool->tfm_name, compressor))
467 continue;
8edc9c4e 468 if (strcmp(zpool_get_type(pool->zpool), type))
c1a0ecb8
JW
469 continue;
470 /* if we can't get it, it's about to be destroyed */
471 if (!zswap_pool_get(pool))
472 continue;
473 return pool;
474 }
475
476 return NULL;
477}
478
91cdcd8d
JW
479static unsigned long zswap_max_pages(void)
480{
481 return totalram_pages() * zswap_max_pool_percent / 100;
482}
483
484static unsigned long zswap_accept_thr_pages(void)
485{
486 return zswap_max_pages() * zswap_accept_thr_percent / 100;
487}
488
489unsigned long zswap_total_pages(void)
490{
491 struct zswap_pool *pool;
4196b48d 492 unsigned long total = 0;
91cdcd8d
JW
493
494 rcu_read_lock();
8edc9c4e
CZ
495 list_for_each_entry_rcu(pool, &zswap_pools, list)
496 total += zpool_get_total_pages(pool->zpool);
91cdcd8d
JW
497 rcu_read_unlock();
498
4196b48d 499 return total;
91cdcd8d
JW
500}
501
82e0f8e4
YA
502static bool zswap_check_limits(void)
503{
504 unsigned long cur_pages = zswap_total_pages();
505 unsigned long max_pages = zswap_max_pages();
506
507 if (cur_pages >= max_pages) {
508 zswap_pool_limit_hit++;
509 zswap_pool_reached_full = true;
510 } else if (zswap_pool_reached_full &&
511 cur_pages <= zswap_accept_thr_pages()) {
512 zswap_pool_reached_full = false;
513 }
514 return zswap_pool_reached_full;
515}
516
abca07c0
JW
517/*********************************
518* param callbacks
519**********************************/
520
521static bool zswap_pool_changed(const char *s, const struct kernel_param *kp)
522{
523 /* no change required */
524 if (!strcmp(s, *(char **)kp->arg) && zswap_has_pool)
525 return false;
526 return true;
527}
528
529/* val must be a null-terminated string */
530static int __zswap_param_set(const char *val, const struct kernel_param *kp,
531 char *type, char *compressor)
532{
533 struct zswap_pool *pool, *put_pool = NULL;
534 char *s = strstrip((char *)val);
535 int ret = 0;
536 bool new_pool = false;
537
538 mutex_lock(&zswap_init_lock);
539 switch (zswap_init_state) {
540 case ZSWAP_UNINIT:
541 /* if this is load-time (pre-init) param setting,
542 * don't create a pool; that's done during init.
543 */
544 ret = param_set_charp(s, kp);
545 break;
546 case ZSWAP_INIT_SUCCEED:
547 new_pool = zswap_pool_changed(s, kp);
548 break;
549 case ZSWAP_INIT_FAILED:
550 pr_err("can't set param, initialization failed\n");
551 ret = -ENODEV;
552 }
553 mutex_unlock(&zswap_init_lock);
554
555 /* no need to create a new pool, return directly */
556 if (!new_pool)
557 return ret;
558
559 if (!type) {
560 if (!zpool_has_pool(s)) {
561 pr_err("zpool %s not available\n", s);
562 return -ENOENT;
563 }
564 type = s;
565 } else if (!compressor) {
566 if (!crypto_has_acomp(s, 0, 0)) {
567 pr_err("compressor %s not available\n", s);
568 return -ENOENT;
569 }
570 compressor = s;
571 } else {
572 WARN_ON(1);
573 return -EINVAL;
574 }
575
94ace3fe 576 spin_lock_bh(&zswap_pools_lock);
abca07c0
JW
577
578 pool = zswap_pool_find_get(type, compressor);
579 if (pool) {
580 zswap_pool_debug("using existing", pool);
581 WARN_ON(pool == zswap_pool_current());
582 list_del_rcu(&pool->list);
583 }
584
94ace3fe 585 spin_unlock_bh(&zswap_pools_lock);
abca07c0
JW
586
587 if (!pool)
588 pool = zswap_pool_create(type, compressor);
94ace3fe
CZ
589 else {
590 /*
591 * Restore the initial ref dropped by percpu_ref_kill()
592 * when the pool was decommissioned and switch it again
593 * to percpu mode.
594 */
595 percpu_ref_resurrect(&pool->ref);
596
597 /* Drop the ref from zswap_pool_find_get(). */
598 zswap_pool_put(pool);
599 }
abca07c0
JW
600
601 if (pool)
602 ret = param_set_charp(s, kp);
603 else
604 ret = -EINVAL;
605
94ace3fe 606 spin_lock_bh(&zswap_pools_lock);
abca07c0
JW
607
608 if (!ret) {
609 put_pool = zswap_pool_current();
610 list_add_rcu(&pool->list, &zswap_pools);
611 zswap_has_pool = true;
612 } else if (pool) {
613 /* add the possibly pre-existing pool to the end of the pools
614 * list; if it's new (and empty) then it'll be removed and
615 * destroyed by the put after we drop the lock
616 */
617 list_add_tail_rcu(&pool->list, &zswap_pools);
618 put_pool = pool;
619 }
620
94ace3fe 621 spin_unlock_bh(&zswap_pools_lock);
abca07c0
JW
622
623 if (!zswap_has_pool && !pool) {
624 /* if initial pool creation failed, and this pool creation also
625 * failed, maybe both compressor and zpool params were bad.
626 * Allow changing this param, so pool creation will succeed
627 * when the other param is changed. We already verified this
628 * param is ok in the zpool_has_pool() or crypto_has_acomp()
629 * checks above.
630 */
631 ret = param_set_charp(s, kp);
632 }
633
634 /* drop the ref from either the old current pool,
635 * or the new pool we failed to add
636 */
637 if (put_pool)
94ace3fe 638 percpu_ref_kill(&put_pool->ref);
abca07c0
JW
639
640 return ret;
641}
642
643static int zswap_compressor_param_set(const char *val,
644 const struct kernel_param *kp)
645{
646 return __zswap_param_set(val, kp, zswap_zpool_type, NULL);
647}
648
649static int zswap_zpool_param_set(const char *val,
650 const struct kernel_param *kp)
651{
652 return __zswap_param_set(val, kp, NULL, zswap_compressor);
653}
654
655static int zswap_enabled_param_set(const char *val,
656 const struct kernel_param *kp)
657{
658 int ret = -ENODEV;
659
660 /* if this is load-time (pre-init) param setting, only set param. */
661 if (system_state != SYSTEM_RUNNING)
662 return param_set_bool(val, kp);
663
664 mutex_lock(&zswap_init_lock);
665 switch (zswap_init_state) {
666 case ZSWAP_UNINIT:
667 if (zswap_setup())
668 break;
669 fallthrough;
670 case ZSWAP_INIT_SUCCEED:
671 if (!zswap_has_pool)
672 pr_err("can't enable, no pool configured\n");
673 else
674 ret = param_set_bool(val, kp);
675 break;
676 case ZSWAP_INIT_FAILED:
677 pr_err("can't enable, initialization failed\n");
678 }
679 mutex_unlock(&zswap_init_lock);
680
681 return ret;
682}
683
506a86c5
JW
684/*********************************
685* lru functions
686**********************************/
687
a65b0e76
DC
688/* should be called under RCU */
689#ifdef CONFIG_MEMCG
690static inline struct mem_cgroup *mem_cgroup_from_entry(struct zswap_entry *entry)
691{
692 return entry->objcg ? obj_cgroup_memcg(entry->objcg) : NULL;
693}
694#else
695static inline struct mem_cgroup *mem_cgroup_from_entry(struct zswap_entry *entry)
696{
697 return NULL;
698}
699#endif
700
701static inline int entry_to_nid(struct zswap_entry *entry)
702{
703 return page_to_nid(virt_to_page(entry));
704}
705
a65b0e76
DC
706static void zswap_lru_add(struct list_lru *list_lru, struct zswap_entry *entry)
707{
708 int nid = entry_to_nid(entry);
709 struct mem_cgroup *memcg;
710
711 /*
712 * Note that it is safe to use rcu_read_lock() here, even in the face of
713 * concurrent memcg offlining. Thanks to the memcg->kmemcg_id indirection
714 * used in list_lru lookup, only two scenarios are possible:
715 *
716 * 1. list_lru_add() is called before memcg->kmemcg_id is updated. The
717 * new entry will be reparented to memcg's parent's list_lru.
718 * 2. list_lru_add() is called after memcg->kmemcg_id is updated. The
719 * new entry will be added directly to memcg's parent's list_lru.
720 *
3f798aa6 721 * Similar reasoning holds for list_lru_del().
a65b0e76
DC
722 */
723 rcu_read_lock();
724 memcg = mem_cgroup_from_entry(entry);
725 /* will always succeed */
726 list_lru_add(list_lru, &entry->lru, nid, memcg);
727 rcu_read_unlock();
728}
729
730static void zswap_lru_del(struct list_lru *list_lru, struct zswap_entry *entry)
731{
732 int nid = entry_to_nid(entry);
733 struct mem_cgroup *memcg;
734
735 rcu_read_lock();
736 memcg = mem_cgroup_from_entry(entry);
737 /* will always succeed */
738 list_lru_del(list_lru, &entry->lru, nid, memcg);
739 rcu_read_unlock();
740}
741
5182661a
JW
742void zswap_lruvec_state_init(struct lruvec *lruvec)
743{
e31c38e0 744 atomic_long_set(&lruvec->zswap_lruvec_state.nr_disk_swapins, 0);
5182661a
JW
745}
746
747void zswap_folio_swapin(struct folio *folio)
748{
749 struct lruvec *lruvec;
750
751 if (folio) {
752 lruvec = folio_lruvec(folio);
e31c38e0 753 atomic_long_inc(&lruvec->zswap_lruvec_state.nr_disk_swapins);
5182661a
JW
754 }
755}
756
c5519e0a
TF
757/*
758 * This function should be called when a memcg is being offlined.
759 *
760 * Since the global shrinker shrink_worker() may hold a reference
761 * of the memcg, we must check and release the reference in
762 * zswap_next_shrink.
763 *
764 * shrink_worker() must handle the case where this function releases
765 * the reference of memcg being shrunk.
766 */
5182661a
JW
767void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg)
768{
bf9b7df2 769 /* lock out zswap shrinker walking memcg tree */
e35606e4 770 spin_lock(&zswap_shrink_lock);
c5519e0a
TF
771 if (zswap_next_shrink == memcg) {
772 do {
773 zswap_next_shrink = mem_cgroup_iter(NULL, zswap_next_shrink, NULL);
774 } while (zswap_next_shrink && !mem_cgroup_online(zswap_next_shrink));
775 }
e35606e4 776 spin_unlock(&zswap_shrink_lock);
5182661a
JW
777}
778
36034bf6
JW
779/*********************************
780* zswap entry functions
781**********************************/
782static struct kmem_cache *zswap_entry_cache;
783
784static struct zswap_entry *zswap_entry_cache_alloc(gfp_t gfp, int nid)
785{
786 struct zswap_entry *entry;
787 entry = kmem_cache_alloc_node(zswap_entry_cache, gfp, nid);
788 if (!entry)
789 return NULL;
36034bf6
JW
790 return entry;
791}
792
793static void zswap_entry_cache_free(struct zswap_entry *entry)
794{
795 kmem_cache_free(zswap_entry_cache, entry);
796}
797
0ab0abcf 798/*
12d79d64 799 * Carries out the common pattern of freeing and entry's zpool allocation,
0ab0abcf
WY
800 * freeing the entry itself, and decrementing the number of stored pages.
801 */
42398be2 802static void zswap_entry_free(struct zswap_entry *entry)
0ab0abcf 803{
a85f878b
SD
804 if (!entry->length)
805 atomic_dec(&zswap_same_filled_pages);
806 else {
e35606e4 807 zswap_lru_del(&zswap_list_lru, entry);
b749cb0d 808 zpool_free(entry->pool->zpool, entry->handle);
a85f878b
SD
809 zswap_pool_put(entry->pool);
810 }
2e601e1e
JW
811 if (entry->objcg) {
812 obj_cgroup_uncharge_zswap(entry->objcg, entry->length);
813 obj_cgroup_put(entry->objcg);
814 }
0ab0abcf
WY
815 zswap_entry_cache_free(entry);
816 atomic_dec(&zswap_stored_pages);
0ab0abcf
WY
817}
818
f91e81d3
JW
819/*********************************
820* compressed storage functions
821**********************************/
64f200b8
JW
822static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node)
823{
824 struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
825 struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
826 struct crypto_acomp *acomp;
827 struct acomp_req *req;
828 int ret;
829
830 mutex_init(&acomp_ctx->mutex);
831
832 acomp_ctx->buffer = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu));
833 if (!acomp_ctx->buffer)
834 return -ENOMEM;
835
836 acomp = crypto_alloc_acomp_node(pool->tfm_name, 0, 0, cpu_to_node(cpu));
837 if (IS_ERR(acomp)) {
838 pr_err("could not alloc crypto acomp %s : %ld\n",
839 pool->tfm_name, PTR_ERR(acomp));
840 ret = PTR_ERR(acomp);
841 goto acomp_fail;
842 }
843 acomp_ctx->acomp = acomp;
270700dd 844 acomp_ctx->is_sleepable = acomp_is_async(acomp);
64f200b8
JW
845
846 req = acomp_request_alloc(acomp_ctx->acomp);
847 if (!req) {
848 pr_err("could not alloc crypto acomp_request %s\n",
849 pool->tfm_name);
850 ret = -ENOMEM;
851 goto req_fail;
852 }
853 acomp_ctx->req = req;
854
855 crypto_init_wait(&acomp_ctx->wait);
856 /*
857 * if the backend of acomp is async zip, crypto_req_done() will wakeup
858 * crypto_wait_req(); if the backend of acomp is scomp, the callback
859 * won't be called, crypto_wait_req() will return without blocking.
860 */
861 acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
862 crypto_req_done, &acomp_ctx->wait);
863
864 return 0;
865
866req_fail:
867 crypto_free_acomp(acomp_ctx->acomp);
868acomp_fail:
869 kfree(acomp_ctx->buffer);
870 return ret;
871}
872
873static int zswap_cpu_comp_dead(unsigned int cpu, struct hlist_node *node)
874{
875 struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
876 struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
877
878 if (!IS_ERR_OR_NULL(acomp_ctx)) {
879 if (!IS_ERR_OR_NULL(acomp_ctx->req))
880 acomp_request_free(acomp_ctx->req);
881 if (!IS_ERR_OR_NULL(acomp_ctx->acomp))
882 crypto_free_acomp(acomp_ctx->acomp);
883 kfree(acomp_ctx->buffer);
884 }
885
886 return 0;
887}
888
f91e81d3
JW
889static bool zswap_compress(struct folio *folio, struct zswap_entry *entry)
890{
891 struct crypto_acomp_ctx *acomp_ctx;
892 struct scatterlist input, output;
55e78c93 893 int comp_ret = 0, alloc_ret = 0;
f91e81d3
JW
894 unsigned int dlen = PAGE_SIZE;
895 unsigned long handle;
896 struct zpool *zpool;
897 char *buf;
898 gfp_t gfp;
f91e81d3
JW
899 u8 *dst;
900
901 acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
902
903 mutex_lock(&acomp_ctx->mutex);
904
905 dst = acomp_ctx->buffer;
906 sg_init_table(&input, 1);
5d19f5de 907 sg_set_folio(&input, folio, PAGE_SIZE, 0);
f91e81d3
JW
908
909 /*
910 * We need PAGE_SIZE * 2 here since there maybe over-compression case,
911 * and hardware-accelerators may won't check the dst buffer size, so
912 * giving the dst buffer with enough length to avoid buffer overflow.
913 */
914 sg_init_one(&output, dst, PAGE_SIZE * 2);
915 acomp_request_set_params(acomp_ctx->req, &input, &output, PAGE_SIZE, dlen);
916
917 /*
918 * it maybe looks a little bit silly that we send an asynchronous request,
919 * then wait for its completion synchronously. This makes the process look
920 * synchronous in fact.
921 * Theoretically, acomp supports users send multiple acomp requests in one
922 * acomp instance, then get those requests done simultaneously. but in this
923 * case, zswap actually does store and load page by page, there is no
924 * existing method to send the second page before the first page is done
925 * in one thread doing zwap.
926 * but in different threads running on different cpu, we have different
927 * acomp instance, so multiple threads can do (de)compression in parallel.
928 */
55e78c93 929 comp_ret = crypto_wait_req(crypto_acomp_compress(acomp_ctx->req), &acomp_ctx->wait);
f91e81d3 930 dlen = acomp_ctx->req->dlen;
55e78c93 931 if (comp_ret)
f91e81d3 932 goto unlock;
f91e81d3 933
8edc9c4e 934 zpool = entry->pool->zpool;
f91e81d3
JW
935 gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
936 if (zpool_malloc_support_movable(zpool))
937 gfp |= __GFP_HIGHMEM | __GFP_MOVABLE;
55e78c93
BS
938 alloc_ret = zpool_malloc(zpool, dlen, gfp, &handle);
939 if (alloc_ret)
f91e81d3 940 goto unlock;
f91e81d3
JW
941
942 buf = zpool_map_handle(zpool, handle, ZPOOL_MM_WO);
943 memcpy(buf, dst, dlen);
944 zpool_unmap_handle(zpool, handle);
945
946 entry->handle = handle;
947 entry->length = dlen;
948
949unlock:
55e78c93
BS
950 if (comp_ret == -ENOSPC || alloc_ret == -ENOSPC)
951 zswap_reject_compress_poor++;
952 else if (comp_ret)
953 zswap_reject_compress_fail++;
954 else if (alloc_ret)
955 zswap_reject_alloc_fail++;
956
f91e81d3 957 mutex_unlock(&acomp_ctx->mutex);
55e78c93 958 return comp_ret == 0 && alloc_ret == 0;
f91e81d3
JW
959}
960
5d19f5de 961static void zswap_decompress(struct zswap_entry *entry, struct folio *folio)
f91e81d3 962{
8edc9c4e 963 struct zpool *zpool = entry->pool->zpool;
f91e81d3
JW
964 struct scatterlist input, output;
965 struct crypto_acomp_ctx *acomp_ctx;
966 u8 *src;
967
968 acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
969 mutex_lock(&acomp_ctx->mutex);
970
971 src = zpool_map_handle(zpool, entry->handle, ZPOOL_MM_RO);
9c500835
BS
972 /*
973 * If zpool_map_handle is atomic, we cannot reliably utilize its mapped buffer
974 * to do crypto_acomp_decompress() which might sleep. In such cases, we must
975 * resort to copying the buffer to a temporary one.
976 * Meanwhile, zpool_map_handle() might return a non-linearly mapped buffer,
977 * such as a kmap address of high memory or even ever a vmap address.
978 * However, sg_init_one is only equipped to handle linearly mapped low memory.
979 * In such cases, we also must copy the buffer to a temporary and lowmem one.
980 */
981 if ((acomp_ctx->is_sleepable && !zpool_can_sleep_mapped(zpool)) ||
982 !virt_addr_valid(src)) {
f91e81d3
JW
983 memcpy(acomp_ctx->buffer, src, entry->length);
984 src = acomp_ctx->buffer;
985 zpool_unmap_handle(zpool, entry->handle);
986 }
987
988 sg_init_one(&input, src, entry->length);
989 sg_init_table(&output, 1);
5d19f5de 990 sg_set_folio(&output, folio, PAGE_SIZE, 0);
f91e81d3
JW
991 acomp_request_set_params(acomp_ctx->req, &input, &output, entry->length, PAGE_SIZE);
992 BUG_ON(crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait));
993 BUG_ON(acomp_ctx->req->dlen != PAGE_SIZE);
994 mutex_unlock(&acomp_ctx->mutex);
995
9c500835 996 if (src != acomp_ctx->buffer)
f91e81d3
JW
997 zpool_unmap_handle(zpool, entry->handle);
998}
999
9986d35d
JW
1000/*********************************
1001* writeback code
1002**********************************/
1003/*
1004 * Attempts to free an entry by adding a folio to the swap cache,
1005 * decompressing the entry data into the folio, and issuing a
1006 * bio write to write the folio back to the swap device.
1007 *
1008 * This can be thought of as a "resumed writeback" of the folio
1009 * to the swap device. We are basically resuming the same swap
1010 * writeback path that was intercepted with the zswap_store()
1011 * in the first place. After the folio has been decompressed into
1012 * the swap cache, the compressed version stored by zswap can be
1013 * freed.
1014 */
1015static int zswap_writeback_entry(struct zswap_entry *entry,
1016 swp_entry_t swpentry)
1017{
796c2c23
CL
1018 struct xarray *tree;
1019 pgoff_t offset = swp_offset(swpentry);
9986d35d
JW
1020 struct folio *folio;
1021 struct mempolicy *mpol;
1022 bool folio_was_allocated;
1023 struct writeback_control wbc = {
1024 .sync_mode = WB_SYNC_NONE,
1025 };
1026
1027 /* try to allocate swap cache folio */
1028 mpol = get_task_policy(current);
1029 folio = __read_swap_cache_async(swpentry, GFP_KERNEL, mpol,
1030 NO_INTERLEAVE_INDEX, &folio_was_allocated, true);
1031 if (!folio)
1032 return -ENOMEM;
1033
1034 /*
1035 * Found an existing folio, we raced with swapin or concurrent
1036 * shrinker. We generally writeback cold folios from zswap, and
1037 * swapin means the folio just became hot, so skip this folio.
1038 * For unlikely concurrent shrinker case, it will be unlinked
1039 * and freed when invalidated by the concurrent shrinker anyway.
1040 */
1041 if (!folio_was_allocated) {
1042 folio_put(folio);
1043 return -EEXIST;
1044 }
1045
1046 /*
1047 * folio is locked, and the swapcache is now secured against
f9c0f1c3
CZ
1048 * concurrent swapping to and from the slot, and concurrent
1049 * swapoff so we can safely dereference the zswap tree here.
1050 * Verify that the swap entry hasn't been invalidated and recycled
1051 * behind our backs, to avoid overwriting a new swap folio with
1052 * old compressed data. Only when this is successful can the entry
1053 * be dereferenced.
9986d35d
JW
1054 */
1055 tree = swap_zswap_tree(swpentry);
796c2c23 1056 if (entry != xa_cmpxchg(tree, offset, entry, NULL, GFP_KERNEL)) {
9986d35d
JW
1057 delete_from_swap_cache(folio);
1058 folio_unlock(folio);
1059 folio_put(folio);
1060 return -ENOMEM;
1061 }
1062
5d19f5de 1063 zswap_decompress(entry, folio);
9986d35d
JW
1064
1065 count_vm_event(ZSWPWB);
1066 if (entry->objcg)
1067 count_objcg_event(entry->objcg, ZSWPWB);
1068
a230c20e 1069 zswap_entry_free(entry);
9986d35d
JW
1070
1071 /* folio is up to date */
1072 folio_mark_uptodate(folio);
1073
1074 /* move it to the tail of the inactive list after end_writeback */
1075 folio_set_reclaim(folio);
1076
1077 /* start writeback */
1078 __swap_writepage(folio, &wbc);
1079 folio_put(folio);
1080
1081 return 0;
1082}
1083
b5ba474f
NP
1084/*********************************
1085* shrinker functions
1086**********************************/
e31c38e0
NP
1087/*
1088 * The dynamic shrinker is modulated by the following factors:
1089 *
1090 * 1. Each zswap entry has a referenced bit, which the shrinker unsets (giving
1091 * the entry a second chance) before rotating it in the LRU list. If the
1092 * entry is considered again by the shrinker, with its referenced bit unset,
1093 * it is written back. The writeback rate as a result is dynamically
1094 * adjusted by the pool activities - if the pool is dominated by new entries
1095 * (i.e lots of recent zswapouts), these entries will be protected and
1096 * the writeback rate will slow down. On the other hand, if the pool has a
1097 * lot of stagnant entries, these entries will be reclaimed immediately,
1098 * effectively increasing the writeback rate.
1099 *
1100 * 2. Swapins counter: If we observe swapins, it is a sign that we are
1101 * overshrinking and should slow down. We maintain a swapins counter, which
1102 * is consumed and subtract from the number of eligible objects on the LRU
1103 * in zswap_shrinker_count().
1104 *
1105 * 3. Compression ratio. The better the workload compresses, the less gains we
1106 * can expect from writeback. We scale down the number of objects available
1107 * for reclaim by this ratio.
1108 */
b5ba474f 1109static enum lru_status shrink_memcg_cb(struct list_head *item, struct list_lru_one *l,
eb23ee4f
JW
1110 spinlock_t *lock, void *arg)
1111{
1112 struct zswap_entry *entry = container_of(item, struct zswap_entry, lru);
1113 bool *encountered_page_in_swapcache = (bool *)arg;
1114 swp_entry_t swpentry;
1115 enum lru_status ret = LRU_REMOVED_RETRY;
1116 int writeback_result;
1117
e31c38e0
NP
1118 /*
1119 * Second chance algorithm: if the entry has its referenced bit set, give it
1120 * a second chance. Only clear the referenced bit and rotate it in the
1121 * zswap's LRU list.
1122 */
1123 if (entry->referenced) {
1124 entry->referenced = false;
1125 return LRU_ROTATE;
1126 }
1127
eb23ee4f 1128 /*
f9c0f1c3
CZ
1129 * As soon as we drop the LRU lock, the entry can be freed by
1130 * a concurrent invalidation. This means the following:
eb23ee4f 1131 *
f9c0f1c3
CZ
1132 * 1. We extract the swp_entry_t to the stack, allowing
1133 * zswap_writeback_entry() to pin the swap entry and
1134 * then validate the zwap entry against that swap entry's
1135 * tree using pointer value comparison. Only when that
1136 * is successful can the entry be dereferenced.
eb23ee4f 1137 *
f9c0f1c3
CZ
1138 * 2. Usually, objects are taken off the LRU for reclaim. In
1139 * this case this isn't possible, because if reclaim fails
1140 * for whatever reason, we have no means of knowing if the
1141 * entry is alive to put it back on the LRU.
eb23ee4f 1142 *
f9c0f1c3
CZ
1143 * So rotate it before dropping the lock. If the entry is
1144 * written back or invalidated, the free path will unlink
1145 * it. For failures, rotation is the right thing as well.
1146 *
1147 * Temporary failures, where the same entry should be tried
1148 * again immediately, almost never happen for this shrinker.
1149 * We don't do any trylocking; -ENOMEM comes closest,
1150 * but that's extremely rare and doesn't happen spuriously
1151 * either. Don't bother distinguishing this case.
eb23ee4f
JW
1152 */
1153 list_move_tail(item, &l->list);
1154
1155 /*
1156 * Once the lru lock is dropped, the entry might get freed. The
1157 * swpentry is copied to the stack, and entry isn't deref'd again
1158 * until the entry is verified to still be alive in the tree.
1159 */
1160 swpentry = entry->swpentry;
1161
1162 /*
1163 * It's safe to drop the lock here because we return either
1164 * LRU_REMOVED_RETRY or LRU_RETRY.
1165 */
1166 spin_unlock(lock);
1167
1168 writeback_result = zswap_writeback_entry(entry, swpentry);
1169
1170 if (writeback_result) {
1171 zswap_reject_reclaim_fail++;
1172 ret = LRU_RETRY;
1173
1174 /*
1175 * Encountering a page already in swap cache is a sign that we are shrinking
1176 * into the warmer region. We should terminate shrinking (if we're in the dynamic
1177 * shrinker context).
1178 */
b49547ad
CZ
1179 if (writeback_result == -EEXIST && encountered_page_in_swapcache) {
1180 ret = LRU_STOP;
eb23ee4f 1181 *encountered_page_in_swapcache = true;
b49547ad 1182 }
eb23ee4f
JW
1183 } else {
1184 zswap_written_back_pages++;
1185 }
1186
1187 spin_lock(lock);
1188 return ret;
1189}
b5ba474f
NP
1190
1191static unsigned long zswap_shrinker_scan(struct shrinker *shrinker,
1192 struct shrink_control *sc)
1193{
e31c38e0 1194 unsigned long shrink_ret;
b5ba474f
NP
1195 bool encountered_page_in_swapcache = false;
1196
501a06fe
NP
1197 if (!zswap_shrinker_enabled ||
1198 !mem_cgroup_zswap_writeback_enabled(sc->memcg)) {
b5ba474f
NP
1199 sc->nr_scanned = 0;
1200 return SHRINK_STOP;
1201 }
1202
e35606e4 1203 shrink_ret = list_lru_shrink_walk(&zswap_list_lru, sc, &shrink_memcg_cb,
b5ba474f
NP
1204 &encountered_page_in_swapcache);
1205
1206 if (encountered_page_in_swapcache)
1207 return SHRINK_STOP;
1208
1209 return shrink_ret ? shrink_ret : SHRINK_STOP;
1210}
1211
1212static unsigned long zswap_shrinker_count(struct shrinker *shrinker,
1213 struct shrink_control *sc)
1214{
b5ba474f
NP
1215 struct mem_cgroup *memcg = sc->memcg;
1216 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(sc->nid));
e31c38e0
NP
1217 atomic_long_t *nr_disk_swapins =
1218 &lruvec->zswap_lruvec_state.nr_disk_swapins;
1219 unsigned long nr_backing, nr_stored, nr_freeable, nr_disk_swapins_cur,
1220 nr_remain;
b5ba474f 1221
501a06fe 1222 if (!zswap_shrinker_enabled || !mem_cgroup_zswap_writeback_enabled(memcg))
b5ba474f
NP
1223 return 0;
1224
30fb6a8d
JW
1225 /*
1226 * The shrinker resumes swap writeback, which will enter block
1227 * and may enter fs. XXX: Harmonize with vmscan.c __GFP_FS
1228 * rules (may_enter_fs()), which apply on a per-folio basis.
1229 */
1230 if (!gfp_has_io_fs(sc->gfp_mask))
1231 return 0;
1232
682886ec
JW
1233 /*
1234 * For memcg, use the cgroup-wide ZSWAP stats since we don't
1235 * have them per-node and thus per-lruvec. Careful if memcg is
1236 * runtime-disabled: we can get sc->memcg == NULL, which is ok
1237 * for the lruvec, but not for memcg_page_state().
1238 *
1239 * Without memcg, use the zswap pool-wide metrics.
1240 */
1241 if (!mem_cgroup_disabled()) {
1242 mem_cgroup_flush_stats(memcg);
1243 nr_backing = memcg_page_state(memcg, MEMCG_ZSWAP_B) >> PAGE_SHIFT;
1244 nr_stored = memcg_page_state(memcg, MEMCG_ZSWAPPED);
1245 } else {
91cdcd8d 1246 nr_backing = zswap_total_pages();
cc9bc36e 1247 nr_stored = atomic_read(&zswap_stored_pages);
682886ec 1248 }
b5ba474f
NP
1249
1250 if (!nr_stored)
1251 return 0;
1252
e35606e4 1253 nr_freeable = list_lru_shrink_count(&zswap_list_lru, sc);
e31c38e0
NP
1254 if (!nr_freeable)
1255 return 0;
1256
b5ba474f 1257 /*
e31c38e0
NP
1258 * Subtract from the lru size the number of pages that are recently swapped
1259 * in from disk. The idea is that had we protect the zswap's LRU by this
1260 * amount of pages, these disk swapins would not have happened.
b5ba474f 1261 */
e31c38e0
NP
1262 nr_disk_swapins_cur = atomic_long_read(nr_disk_swapins);
1263 do {
1264 if (nr_freeable >= nr_disk_swapins_cur)
1265 nr_remain = 0;
1266 else
1267 nr_remain = nr_disk_swapins_cur - nr_freeable;
1268 } while (!atomic_long_try_cmpxchg(
1269 nr_disk_swapins, &nr_disk_swapins_cur, nr_remain));
1270
1271 nr_freeable -= nr_disk_swapins_cur - nr_remain;
1272 if (!nr_freeable)
1273 return 0;
b5ba474f
NP
1274
1275 /*
1276 * Scale the number of freeable pages by the memory saving factor.
1277 * This ensures that the better zswap compresses memory, the fewer
1278 * pages we will evict to swap (as it will otherwise incur IO for
1279 * relatively small memory saving).
cc9bc36e
YA
1280 *
1281 * The memory saving factor calculated here takes same-filled pages into
1282 * account, but those are not freeable since they almost occupy no
1283 * space. Hence, we may scale nr_freeable down a little bit more than we
1284 * should if we have a lot of same-filled pages.
b5ba474f
NP
1285 */
1286 return mult_frac(nr_freeable, nr_backing, nr_stored);
1287}
1288
bf9b7df2 1289static struct shrinker *zswap_alloc_shrinker(void)
b5ba474f 1290{
bf9b7df2
CZ
1291 struct shrinker *shrinker;
1292
1293 shrinker =
b5ba474f 1294 shrinker_alloc(SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE, "mm-zswap");
bf9b7df2
CZ
1295 if (!shrinker)
1296 return NULL;
b5ba474f 1297
bf9b7df2
CZ
1298 shrinker->scan_objects = zswap_shrinker_scan;
1299 shrinker->count_objects = zswap_shrinker_count;
1300 shrinker->batch = 0;
1301 shrinker->seeks = DEFAULT_SEEKS;
1302 return shrinker;
b5ba474f
NP
1303}
1304
a65b0e76
DC
1305static int shrink_memcg(struct mem_cgroup *memcg)
1306{
81920438 1307 int nid, shrunk = 0, scanned = 0;
a65b0e76 1308
501a06fe 1309 if (!mem_cgroup_zswap_writeback_enabled(memcg))
81920438 1310 return -ENOENT;
501a06fe 1311
a65b0e76
DC
1312 /*
1313 * Skip zombies because their LRUs are reparented and we would be
1314 * reclaiming from the parent instead of the dead memcg.
1315 */
1316 if (memcg && !mem_cgroup_online(memcg))
1317 return -ENOENT;
1318
a65b0e76
DC
1319 for_each_node_state(nid, N_NORMAL_MEMORY) {
1320 unsigned long nr_to_walk = 1;
1321
e35606e4 1322 shrunk += list_lru_walk_one(&zswap_list_lru, nid, memcg,
a65b0e76 1323 &shrink_memcg_cb, NULL, &nr_to_walk);
81920438 1324 scanned += 1 - nr_to_walk;
a65b0e76 1325 }
81920438
TF
1326
1327 if (!scanned)
1328 return -ENOENT;
1329
a65b0e76 1330 return shrunk ? 0 : -EAGAIN;
f999f38b
DC
1331}
1332
45190f01
VW
1333static void shrink_worker(struct work_struct *w)
1334{
a65b0e76 1335 struct mem_cgroup *memcg;
81920438 1336 int ret, failures = 0, attempts = 0;
91cdcd8d
JW
1337 unsigned long thr;
1338
1339 /* Reclaim down to the accept threshold */
1340 thr = zswap_accept_thr_pages();
e0228d59 1341
c5519e0a 1342 /*
81920438
TF
1343 * Global reclaim will select cgroup in a round-robin fashion from all
1344 * online memcgs, but memcgs that have no pages in zswap and
1345 * writeback-disabled memcgs (memory.zswap.writeback=0) are not
1346 * candidates for shrinking.
1347 *
1348 * Shrinking will be aborted if we encounter the following
1349 * MAX_RECLAIM_RETRIES times:
1350 * - No writeback-candidate memcgs found in a memcg tree walk.
1351 * - Shrinking a writeback-candidate memcg failed.
c5519e0a
TF
1352 *
1353 * We save iteration cursor memcg into zswap_next_shrink,
1354 * which can be modified by the offline memcg cleaner
1355 * zswap_memcg_offline_cleanup().
1356 *
1357 * Since the offline cleaner is called only once, we cannot leave an
1358 * offline memcg reference in zswap_next_shrink.
1359 * We can rely on the cleaner only if we get online memcg under lock.
1360 *
1361 * If we get an offline memcg, we cannot determine if the cleaner has
1362 * already been called or will be called later. We must put back the
1363 * reference before returning from this function. Otherwise, the
1364 * offline memcg left in zswap_next_shrink will hold the reference
1365 * until the next run of shrink_worker().
1366 */
e0228d59 1367 do {
a65b0e76 1368 /*
c5519e0a
TF
1369 * Start shrinking from the next memcg after zswap_next_shrink.
1370 * When the offline cleaner has already advanced the cursor,
1371 * advancing the cursor here overlooks one memcg, but this
1372 * should be negligibly rare.
a65b0e76 1373 *
c5519e0a
TF
1374 * If we get an online memcg, keep the extra reference in case
1375 * the original one obtained by mem_cgroup_iter() is dropped by
1376 * zswap_memcg_offline_cleanup() while we are shrinking the
1377 * memcg.
a65b0e76 1378 */
c5519e0a
TF
1379 spin_lock(&zswap_shrink_lock);
1380 do {
1381 memcg = mem_cgroup_iter(NULL, zswap_next_shrink, NULL);
1382 zswap_next_shrink = memcg;
1383 } while (memcg && !mem_cgroup_tryget_online(memcg));
1384 spin_unlock(&zswap_shrink_lock);
a65b0e76 1385
c5519e0a 1386 if (!memcg) {
81920438
TF
1387 /*
1388 * Continue shrinking without incrementing failures if
1389 * we found candidate memcgs in the last tree walk.
1390 */
1391 if (!attempts && ++failures == MAX_RECLAIM_RETRIES)
e0228d59 1392 break;
a65b0e76 1393
81920438 1394 attempts = 0;
a65b0e76 1395 goto resched;
e0228d59 1396 }
a65b0e76
DC
1397
1398 ret = shrink_memcg(memcg);
1399 /* drop the extra reference */
1400 mem_cgroup_put(memcg);
1401
81920438
TF
1402 /*
1403 * There are no writeback-candidate pages in the memcg.
1404 * This is not an issue as long as we can find another memcg
1405 * with pages in zswap. Skip this without incrementing attempts
1406 * and failures.
1407 */
1408 if (ret == -ENOENT)
1409 continue;
1410 ++attempts;
1411
a65b0e76
DC
1412 if (ret && ++failures == MAX_RECLAIM_RETRIES)
1413 break;
a65b0e76 1414resched:
e0228d59 1415 cond_resched();
91cdcd8d 1416 } while (zswap_total_pages() > thr);
45190f01
VW
1417}
1418
e87b8814
YA
1419/*********************************
1420* same-filled functions
1421**********************************/
1422static bool zswap_is_folio_same_filled(struct folio *folio, unsigned long *value)
a85f878b 1423{
5a3f572a 1424 unsigned long *data;
62bf1258 1425 unsigned long val;
5a3f572a 1426 unsigned int pos, last_pos = PAGE_SIZE / sizeof(*data) - 1;
e87b8814 1427 bool ret = false;
a85f878b 1428
5a3f572a
YA
1429 data = kmap_local_folio(folio, 0);
1430 val = data[0];
62bf1258 1431
5a3f572a 1432 if (val != data[last_pos])
e87b8814 1433 goto out;
62bf1258
TS
1434
1435 for (pos = 1; pos < last_pos; pos++) {
5a3f572a 1436 if (val != data[pos])
e87b8814 1437 goto out;
a85f878b 1438 }
62bf1258
TS
1439
1440 *value = val;
e87b8814
YA
1441 ret = true;
1442out:
5a3f572a 1443 kunmap_local(data);
e87b8814 1444 return ret;
a85f878b
SD
1445}
1446
5a3f572a 1447static void zswap_fill_folio(struct folio *folio, unsigned long value)
a85f878b 1448{
5a3f572a 1449 unsigned long *data = kmap_local_folio(folio, 0);
a85f878b 1450
5a3f572a
YA
1451 memset_l(data, value, PAGE_SIZE / sizeof(unsigned long));
1452 kunmap_local(data);
a85f878b
SD
1453}
1454
e87b8814
YA
1455/*********************************
1456* main API
1457**********************************/
34f4c198 1458bool zswap_store(struct folio *folio)
2b281117 1459{
3d2c9087 1460 swp_entry_t swp = folio->swap;
42c06a0e 1461 pgoff_t offset = swp_offset(swp);
796c2c23
CL
1462 struct xarray *tree = swap_zswap_tree(swp);
1463 struct zswap_entry *entry, *old;
f4840ccf 1464 struct obj_cgroup *objcg = NULL;
a65b0e76 1465 struct mem_cgroup *memcg = NULL;
e87b8814 1466 unsigned long value;
42c06a0e 1467
34f4c198
MWO
1468 VM_WARN_ON_ONCE(!folio_test_locked(folio));
1469 VM_WARN_ON_ONCE(!folio_test_swapcache(folio));
2b281117 1470
34f4c198
MWO
1471 /* Large folios aren't supported */
1472 if (folio_test_large(folio))
42c06a0e 1473 return false;
7ba71669 1474
678e54d4 1475 if (!zswap_enabled)
f576a1e8 1476 goto check_old;
678e54d4 1477
91cdcd8d 1478 /* Check cgroup limits */
074e3e26 1479 objcg = get_obj_cgroup_from_folio(folio);
a65b0e76
DC
1480 if (objcg && !obj_cgroup_may_zswap(objcg)) {
1481 memcg = get_mem_cgroup_from_objcg(objcg);
1482 if (shrink_memcg(memcg)) {
1483 mem_cgroup_put(memcg);
1484 goto reject;
1485 }
1486 mem_cgroup_put(memcg);
1487 }
f4840ccf 1488
82e0f8e4 1489 if (zswap_check_limits())
4ea3fa9d 1490 goto reject;
2b281117
SJ
1491
1492 /* allocate entry */
be7fc97c 1493 entry = zswap_entry_cache_alloc(GFP_KERNEL, folio_nid(folio));
2b281117
SJ
1494 if (!entry) {
1495 zswap_reject_kmemcache_fail++;
2b281117
SJ
1496 goto reject;
1497 }
1498
e87b8814
YA
1499 if (zswap_is_folio_same_filled(folio, &value)) {
1500 entry->length = 0;
1501 entry->value = value;
1502 atomic_inc(&zswap_same_filled_pages);
1503 goto store_entry;
a85f878b
SD
1504 }
1505
f1c54846
DS
1506 /* if entry is successfully added, it keeps the reference */
1507 entry->pool = zswap_pool_current_get();
42c06a0e 1508 if (!entry->pool)
f1c54846 1509 goto freepage;
f1c54846 1510
a65b0e76
DC
1511 if (objcg) {
1512 memcg = get_mem_cgroup_from_objcg(objcg);
e35606e4 1513 if (memcg_list_lru_alloc(memcg, &zswap_list_lru, GFP_KERNEL)) {
a65b0e76
DC
1514 mem_cgroup_put(memcg);
1515 goto put_pool;
1516 }
1517 mem_cgroup_put(memcg);
1518 }
1519
fa9ad6e2
JW
1520 if (!zswap_compress(folio, entry))
1521 goto put_pool;
1ec3b5fe 1522
e87b8814 1523store_entry:
be7fc97c 1524 entry->swpentry = swp;
f4840ccf 1525 entry->objcg = objcg;
e31c38e0 1526 entry->referenced = true;
796c2c23
CL
1527
1528 old = xa_store(tree, offset, entry, GFP_KERNEL);
1529 if (xa_is_err(old)) {
1530 int err = xa_err(old);
1531
1532 WARN_ONCE(err != -ENOMEM, "unexpected xarray error: %d\n", err);
1533 zswap_reject_alloc_fail++;
1534 goto store_failed;
1535 }
1536
1537 /*
1538 * We may have had an existing entry that became stale when
1539 * the folio was redirtied and now the new version is being
1540 * swapped out. Get rid of the old.
1541 */
1542 if (old)
1543 zswap_entry_free(old);
1544
f4840ccf
JW
1545 if (objcg) {
1546 obj_cgroup_charge_zswap(objcg, entry->length);
f4840ccf
JW
1547 count_objcg_event(objcg, ZSWPOUT);
1548 }
1549
ca56489c 1550 /*
796c2c23
CL
1551 * We finish initializing the entry while it's already in xarray.
1552 * This is safe because:
1553 *
1554 * 1. Concurrent stores and invalidations are excluded by folio lock.
1555 *
1556 * 2. Writeback is excluded by the entry not being on the LRU yet.
1557 * The publishing order matters to prevent writeback from seeing
1558 * an incoherent entry.
ca56489c 1559 */
35499e2b 1560 if (entry->length) {
a65b0e76 1561 INIT_LIST_HEAD(&entry->lru);
e35606e4 1562 zswap_lru_add(&zswap_list_lru, entry);
f999f38b 1563 }
2b281117
SJ
1564
1565 /* update stats */
1566 atomic_inc(&zswap_stored_pages);
f6498b77 1567 count_vm_event(ZSWPOUT);
2b281117 1568
42c06a0e 1569 return true;
2b281117 1570
796c2c23
CL
1571store_failed:
1572 if (!entry->length)
1573 atomic_dec(&zswap_same_filled_pages);
1574 else {
8edc9c4e 1575 zpool_free(entry->pool->zpool, entry->handle);
a65b0e76 1576put_pool:
796c2c23
CL
1577 zswap_pool_put(entry->pool);
1578 }
f1c54846 1579freepage:
2b281117
SJ
1580 zswap_entry_cache_free(entry);
1581reject:
91b71e78 1582 obj_cgroup_put(objcg);
4ea3fa9d
YA
1583 if (zswap_pool_reached_full)
1584 queue_work(shrink_wq, &zswap_shrink_work);
f576a1e8
CZ
1585check_old:
1586 /*
1587 * If the zswap store fails or zswap is disabled, we must invalidate the
1588 * possibly stale entry which was previously stored at this offset.
1589 * Otherwise, writeback could overwrite the new data in the swapfile.
1590 */
796c2c23 1591 entry = xa_erase(tree, offset);
f576a1e8 1592 if (entry)
796c2c23 1593 zswap_entry_free(entry);
42c06a0e 1594 return false;
2b281117
SJ
1595}
1596
ca54f6d8 1597bool zswap_load(struct folio *folio)
2b281117 1598{
3d2c9087 1599 swp_entry_t swp = folio->swap;
42c06a0e 1600 pgoff_t offset = swp_offset(swp);
25cd2414 1601 bool swapcache = folio_test_swapcache(folio);
796c2c23 1602 struct xarray *tree = swap_zswap_tree(swp);
2b281117 1603 struct zswap_entry *entry;
42c06a0e 1604
ca54f6d8 1605 VM_WARN_ON_ONCE(!folio_test_locked(folio));
2b281117 1606
2d4d2b1c
YA
1607 if (zswap_never_enabled())
1608 return false;
1609
c63f210d
YA
1610 /*
1611 * Large folios should not be swapped in while zswap is being used, as
1612 * they are not properly handled. Zswap does not properly load large
1613 * folios, and a large folio may only be partially in zswap.
1614 *
1615 * Return true without marking the folio uptodate so that an IO error is
1616 * emitted (e.g. do_swap_page() will sigbus).
1617 */
1618 if (WARN_ON_ONCE(folio_test_large(folio)))
1619 return true;
1620
25cd2414
JW
1621 /*
1622 * When reading into the swapcache, invalidate our entry. The
1623 * swapcache can be the authoritative owner of the page and
1624 * its mappings, and the pressure that results from having two
1625 * in-memory copies outweighs any benefits of caching the
1626 * compression work.
1627 *
1628 * (Most swapins go through the swapcache. The notable
1629 * exception is the singleton fault on SWP_SYNCHRONOUS_IO
1630 * files, which reads into a private page and may free it if
1631 * the fault fails. We remain the primary owner of the entry.)
1632 */
1633 if (swapcache)
796c2c23
CL
1634 entry = xa_erase(tree, offset);
1635 else
1636 entry = xa_load(tree, offset);
1637
1638 if (!entry)
1639 return false;
2b281117 1640
66447fd0 1641 if (entry->length)
5d19f5de 1642 zswap_decompress(entry, folio);
5a3f572a
YA
1643 else
1644 zswap_fill_folio(folio, entry->value);
a85f878b 1645
f6498b77 1646 count_vm_event(ZSWPIN);
f4840ccf
JW
1647 if (entry->objcg)
1648 count_objcg_event(entry->objcg, ZSWPIN);
c75f5c1e 1649
25cd2414
JW
1650 if (swapcache) {
1651 zswap_entry_free(entry);
1652 folio_mark_dirty(folio);
1653 }
c2e2ba77 1654
c63f210d 1655 folio_mark_uptodate(folio);
66447fd0 1656 return true;
2b281117
SJ
1657}
1658
0827a1fb 1659void zswap_invalidate(swp_entry_t swp)
2b281117 1660{
0827a1fb 1661 pgoff_t offset = swp_offset(swp);
796c2c23 1662 struct xarray *tree = swap_zswap_tree(swp);
2b281117 1663 struct zswap_entry *entry;
2b281117 1664
796c2c23 1665 entry = xa_erase(tree, offset);
06ed2289 1666 if (entry)
796c2c23 1667 zswap_entry_free(entry);
2b281117
SJ
1668}
1669
44c7c734 1670int zswap_swapon(int type, unsigned long nr_pages)
42c06a0e 1671{
796c2c23 1672 struct xarray *trees, *tree;
44c7c734 1673 unsigned int nr, i;
42c06a0e 1674
44c7c734
CZ
1675 nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES);
1676 trees = kvcalloc(nr, sizeof(*tree), GFP_KERNEL);
1677 if (!trees) {
42c06a0e 1678 pr_err("alloc failed, zswap disabled for swap type %d\n", type);
bb29fd77 1679 return -ENOMEM;
42c06a0e
JW
1680 }
1681
796c2c23
CL
1682 for (i = 0; i < nr; i++)
1683 xa_init(trees + i);
44c7c734
CZ
1684
1685 nr_zswap_trees[type] = nr;
1686 zswap_trees[type] = trees;
bb29fd77 1687 return 0;
42c06a0e
JW
1688}
1689
1690void zswap_swapoff(int type)
2b281117 1691{
796c2c23 1692 struct xarray *trees = zswap_trees[type];
44c7c734 1693 unsigned int i;
2b281117 1694
44c7c734 1695 if (!trees)
2b281117
SJ
1696 return;
1697
83e68f25
YA
1698 /* try_to_unuse() invalidated all the entries already */
1699 for (i = 0; i < nr_zswap_trees[type]; i++)
796c2c23 1700 WARN_ON_ONCE(!xa_empty(trees + i));
44c7c734
CZ
1701
1702 kvfree(trees);
1703 nr_zswap_trees[type] = 0;
aa9bca05 1704 zswap_trees[type] = NULL;
2b281117
SJ
1705}
1706
2b281117
SJ
1707/*********************************
1708* debugfs functions
1709**********************************/
1710#ifdef CONFIG_DEBUG_FS
1711#include <linux/debugfs.h>
1712
1713static struct dentry *zswap_debugfs_root;
1714
91cdcd8d
JW
1715static int debugfs_get_total_size(void *data, u64 *val)
1716{
1717 *val = zswap_total_pages() * PAGE_SIZE;
1718 return 0;
1719}
1720DEFINE_DEBUGFS_ATTRIBUTE(total_size_fops, debugfs_get_total_size, NULL, "%llu\n");
1721
141fdeec 1722static int zswap_debugfs_init(void)
2b281117
SJ
1723{
1724 if (!debugfs_initialized())
1725 return -ENODEV;
1726
1727 zswap_debugfs_root = debugfs_create_dir("zswap", NULL);
2b281117 1728
0825a6f9
JP
1729 debugfs_create_u64("pool_limit_hit", 0444,
1730 zswap_debugfs_root, &zswap_pool_limit_hit);
1731 debugfs_create_u64("reject_reclaim_fail", 0444,
1732 zswap_debugfs_root, &zswap_reject_reclaim_fail);
1733 debugfs_create_u64("reject_alloc_fail", 0444,
1734 zswap_debugfs_root, &zswap_reject_alloc_fail);
1735 debugfs_create_u64("reject_kmemcache_fail", 0444,
1736 zswap_debugfs_root, &zswap_reject_kmemcache_fail);
cb61dad8
NP
1737 debugfs_create_u64("reject_compress_fail", 0444,
1738 zswap_debugfs_root, &zswap_reject_compress_fail);
0825a6f9
JP
1739 debugfs_create_u64("reject_compress_poor", 0444,
1740 zswap_debugfs_root, &zswap_reject_compress_poor);
1741 debugfs_create_u64("written_back_pages", 0444,
1742 zswap_debugfs_root, &zswap_written_back_pages);
91cdcd8d
JW
1743 debugfs_create_file("pool_total_size", 0444,
1744 zswap_debugfs_root, NULL, &total_size_fops);
0825a6f9
JP
1745 debugfs_create_atomic_t("stored_pages", 0444,
1746 zswap_debugfs_root, &zswap_stored_pages);
a85f878b 1747 debugfs_create_atomic_t("same_filled_pages", 0444,
0825a6f9 1748 zswap_debugfs_root, &zswap_same_filled_pages);
2b281117
SJ
1749
1750 return 0;
1751}
2b281117 1752#else
141fdeec 1753static int zswap_debugfs_init(void)
2b281117
SJ
1754{
1755 return 0;
1756}
2b281117
SJ
1757#endif
1758
1759/*********************************
1760* module init and exit
1761**********************************/
141fdeec 1762static int zswap_setup(void)
2b281117 1763{
f1c54846 1764 struct zswap_pool *pool;
ad7ed770 1765 int ret;
60105e12 1766
b7919122
LS
1767 zswap_entry_cache = KMEM_CACHE(zswap_entry, 0);
1768 if (!zswap_entry_cache) {
2b281117 1769 pr_err("entry cache creation failed\n");
f1c54846 1770 goto cache_fail;
2b281117 1771 }
f1c54846 1772
cab7a7e5
SAS
1773 ret = cpuhp_setup_state_multi(CPUHP_MM_ZSWP_POOL_PREPARE,
1774 "mm/zswap_pool:prepare",
1775 zswap_cpu_comp_prepare,
1776 zswap_cpu_comp_dead);
1777 if (ret)
1778 goto hp_fail;
1779
bf9b7df2
CZ
1780 shrink_wq = alloc_workqueue("zswap-shrink",
1781 WQ_UNBOUND|WQ_MEM_RECLAIM, 1);
1782 if (!shrink_wq)
1783 goto shrink_wq_fail;
1784
e35606e4
CZ
1785 zswap_shrinker = zswap_alloc_shrinker();
1786 if (!zswap_shrinker)
bf9b7df2 1787 goto shrinker_fail;
e35606e4 1788 if (list_lru_init_memcg(&zswap_list_lru, zswap_shrinker))
bf9b7df2 1789 goto lru_fail;
e35606e4 1790 shrinker_register(zswap_shrinker);
bf9b7df2 1791
e35606e4 1792 INIT_WORK(&zswap_shrink_work, shrink_worker);
bf9b7df2 1793
f1c54846 1794 pool = __zswap_pool_create_fallback();
ae3d89a7
DS
1795 if (pool) {
1796 pr_info("loaded using pool %s/%s\n", pool->tfm_name,
8edc9c4e 1797 zpool_get_type(pool->zpool));
ae3d89a7
DS
1798 list_add(&pool->list, &zswap_pools);
1799 zswap_has_pool = true;
2d4d2b1c 1800 static_branch_enable(&zswap_ever_enabled);
ae3d89a7 1801 } else {
f1c54846 1802 pr_err("pool creation failed\n");
ae3d89a7 1803 zswap_enabled = false;
2b281117 1804 }
60105e12 1805
2b281117
SJ
1806 if (zswap_debugfs_init())
1807 pr_warn("debugfs initialization failed\n");
9021ccec 1808 zswap_init_state = ZSWAP_INIT_SUCCEED;
2b281117 1809 return 0;
f1c54846 1810
bf9b7df2 1811lru_fail:
e35606e4 1812 shrinker_free(zswap_shrinker);
bf9b7df2
CZ
1813shrinker_fail:
1814 destroy_workqueue(shrink_wq);
1815shrink_wq_fail:
1816 cpuhp_remove_multi_state(CPUHP_MM_ZSWP_POOL_PREPARE);
cab7a7e5 1817hp_fail:
b7919122 1818 kmem_cache_destroy(zswap_entry_cache);
f1c54846 1819cache_fail:
d7b028f5 1820 /* if built-in, we aren't unloaded on failure; don't allow use */
9021ccec 1821 zswap_init_state = ZSWAP_INIT_FAILED;
d7b028f5 1822 zswap_enabled = false;
2b281117
SJ
1823 return -ENOMEM;
1824}
141fdeec
LS
1825
1826static int __init zswap_init(void)
1827{
1828 if (!zswap_enabled)
1829 return 0;
1830 return zswap_setup();
1831}
2b281117 1832/* must be late so crypto has time to come up */
141fdeec 1833late_initcall(zswap_init);
2b281117 1834
68386da8 1835MODULE_AUTHOR("Seth Jennings <[email protected]>");
2b281117 1836MODULE_DESCRIPTION("Compressed cache for swap pages");
This page took 0.955555 seconds and 4 git commands to generate.