]> Git Repo - linux.git/blame - mm/zswap.c
writeback: remove a use of write_cache_pages() from do_writepages()
[linux.git] / mm / zswap.c
CommitLineData
c942fddf 1// SPDX-License-Identifier: GPL-2.0-or-later
2b281117
SJ
2/*
3 * zswap.c - zswap driver file
4 *
42c06a0e 5 * zswap is a cache that takes pages that are in the process
2b281117
SJ
6 * of being swapped out and attempts to compress and store them in a
7 * RAM-based memory pool. This can result in a significant I/O reduction on
8 * the swap device and, in the case where decompressing from RAM is faster
9 * than reading from the swap device, can also improve workload performance.
10 *
11 * Copyright (C) 2012 Seth Jennings <[email protected]>
2b281117
SJ
12*/
13
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
16#include <linux/module.h>
17#include <linux/cpu.h>
18#include <linux/highmem.h>
19#include <linux/slab.h>
20#include <linux/spinlock.h>
21#include <linux/types.h>
22#include <linux/atomic.h>
2b281117
SJ
23#include <linux/rbtree.h>
24#include <linux/swap.h>
25#include <linux/crypto.h>
1ec3b5fe 26#include <linux/scatterlist.h>
ddc1a5cb 27#include <linux/mempolicy.h>
2b281117 28#include <linux/mempool.h>
12d79d64 29#include <linux/zpool.h>
1ec3b5fe 30#include <crypto/acompress.h>
42c06a0e 31#include <linux/zswap.h>
2b281117
SJ
32#include <linux/mm_types.h>
33#include <linux/page-flags.h>
34#include <linux/swapops.h>
35#include <linux/writeback.h>
36#include <linux/pagemap.h>
45190f01 37#include <linux/workqueue.h>
a65b0e76 38#include <linux/list_lru.h>
2b281117 39
014bb1de 40#include "swap.h"
e0228d59 41#include "internal.h"
014bb1de 42
2b281117
SJ
43/*********************************
44* statistics
45**********************************/
12d79d64 46/* Total bytes used by the compressed storage */
f6498b77 47u64 zswap_pool_total_size;
2b281117 48/* The number of compressed pages currently stored in zswap */
f6498b77 49atomic_t zswap_stored_pages = ATOMIC_INIT(0);
a85f878b
SD
50/* The number of same-value filled pages currently stored in zswap */
51static atomic_t zswap_same_filled_pages = ATOMIC_INIT(0);
2b281117
SJ
52
53/*
54 * The statistics below are not protected from concurrent access for
55 * performance reasons so they may not be a 100% accurate. However,
56 * they do provide useful information on roughly how many times a
57 * certain event is occurring.
58*/
59
60/* Pool limit was hit (see zswap_max_pool_percent) */
61static u64 zswap_pool_limit_hit;
62/* Pages written back when pool limit was reached */
63static u64 zswap_written_back_pages;
64/* Store failed due to a reclaim failure after pool limit was reached */
65static u64 zswap_reject_reclaim_fail;
cb61dad8
NP
66/* Store failed due to compression algorithm failure */
67static u64 zswap_reject_compress_fail;
2b281117
SJ
68/* Compressed page was too big for the allocator to (optimally) store */
69static u64 zswap_reject_compress_poor;
70/* Store failed because underlying allocator could not get memory */
71static u64 zswap_reject_alloc_fail;
72/* Store failed because the entry metadata could not be allocated (rare) */
73static u64 zswap_reject_kmemcache_fail;
2b281117 74
45190f01
VW
75/* Shrinker work queue */
76static struct workqueue_struct *shrink_wq;
77/* Pool limit was hit, we need to calm down */
78static bool zswap_pool_reached_full;
79
2b281117
SJ
80/*********************************
81* tunables
82**********************************/
c00ed16a 83
bae21db8
DS
84#define ZSWAP_PARAM_UNSET ""
85
141fdeec
LS
86static int zswap_setup(void);
87
bb8b93b5
MS
88/* Enable/disable zswap */
89static bool zswap_enabled = IS_ENABLED(CONFIG_ZSWAP_DEFAULT_ON);
d7b028f5
DS
90static int zswap_enabled_param_set(const char *,
91 const struct kernel_param *);
83aed6cd 92static const struct kernel_param_ops zswap_enabled_param_ops = {
d7b028f5
DS
93 .set = zswap_enabled_param_set,
94 .get = param_get_bool,
95};
96module_param_cb(enabled, &zswap_enabled_param_ops, &zswap_enabled, 0644);
2b281117 97
90b0fc26 98/* Crypto compressor to use */
bb8b93b5 99static char *zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT;
90b0fc26
DS
100static int zswap_compressor_param_set(const char *,
101 const struct kernel_param *);
83aed6cd 102static const struct kernel_param_ops zswap_compressor_param_ops = {
90b0fc26 103 .set = zswap_compressor_param_set,
c99b42c3
DS
104 .get = param_get_charp,
105 .free = param_free_charp,
90b0fc26
DS
106};
107module_param_cb(compressor, &zswap_compressor_param_ops,
c99b42c3 108 &zswap_compressor, 0644);
2b281117 109
90b0fc26 110/* Compressed storage zpool to use */
bb8b93b5 111static char *zswap_zpool_type = CONFIG_ZSWAP_ZPOOL_DEFAULT;
90b0fc26 112static int zswap_zpool_param_set(const char *, const struct kernel_param *);
83aed6cd 113static const struct kernel_param_ops zswap_zpool_param_ops = {
c99b42c3
DS
114 .set = zswap_zpool_param_set,
115 .get = param_get_charp,
116 .free = param_free_charp,
90b0fc26 117};
c99b42c3 118module_param_cb(zpool, &zswap_zpool_param_ops, &zswap_zpool_type, 0644);
12d79d64 119
90b0fc26
DS
120/* The maximum percentage of memory that the compressed pool can occupy */
121static unsigned int zswap_max_pool_percent = 20;
122module_param_named(max_pool_percent, zswap_max_pool_percent, uint, 0644);
60105e12 123
45190f01
VW
124/* The threshold for accepting new pages after the max_pool_percent was hit */
125static unsigned int zswap_accept_thr_percent = 90; /* of max pool size */
126module_param_named(accept_threshold_percent, zswap_accept_thr_percent,
127 uint, 0644);
128
cb325ddd
MS
129/*
130 * Enable/disable handling same-value filled pages (enabled by default).
131 * If disabled every page is considered non-same-value filled.
132 */
a85f878b
SD
133static bool zswap_same_filled_pages_enabled = true;
134module_param_named(same_filled_pages_enabled, zswap_same_filled_pages_enabled,
135 bool, 0644);
136
cb325ddd
MS
137/* Enable/disable handling non-same-value filled pages (enabled by default) */
138static bool zswap_non_same_filled_pages_enabled = true;
139module_param_named(non_same_filled_pages_enabled, zswap_non_same_filled_pages_enabled,
140 bool, 0644);
141
b8cf32dc
YA
142/* Number of zpools in zswap_pool (empirically determined for scalability) */
143#define ZSWAP_NR_ZPOOLS 32
144
b5ba474f
NP
145/* Enable/disable memory pressure-based shrinker. */
146static bool zswap_shrinker_enabled = IS_ENABLED(
147 CONFIG_ZSWAP_SHRINKER_DEFAULT_ON);
148module_param_named(shrinker_enabled, zswap_shrinker_enabled, bool, 0644);
149
501a06fe
NP
150bool is_zswap_enabled(void)
151{
152 return zswap_enabled;
153}
154
2b281117 155/*********************************
f1c54846 156* data structures
2b281117 157**********************************/
2b281117 158
1ec3b5fe
BS
159struct crypto_acomp_ctx {
160 struct crypto_acomp *acomp;
161 struct acomp_req *req;
162 struct crypto_wait wait;
8ba2f844
CZ
163 u8 *buffer;
164 struct mutex mutex;
1ec3b5fe
BS
165};
166
f999f38b
DC
167/*
168 * The lock ordering is zswap_tree.lock -> zswap_pool.lru_lock.
169 * The only case where lru_lock is not acquired while holding tree.lock is
170 * when a zswap_entry is taken off the lru for writeback, in that case it
171 * needs to be verified that it's still valid in the tree.
172 */
f1c54846 173struct zswap_pool {
b8cf32dc 174 struct zpool *zpools[ZSWAP_NR_ZPOOLS];
1ec3b5fe 175 struct crypto_acomp_ctx __percpu *acomp_ctx;
f1c54846
DS
176 struct kref kref;
177 struct list_head list;
45190f01
VW
178 struct work_struct release_work;
179 struct work_struct shrink_work;
cab7a7e5 180 struct hlist_node node;
f1c54846 181 char tfm_name[CRYPTO_MAX_ALG_NAME];
a65b0e76
DC
182 struct list_lru list_lru;
183 struct mem_cgroup *next_shrink;
b5ba474f
NP
184 struct shrinker *shrinker;
185 atomic_t nr_stored;
2b281117
SJ
186};
187
2b281117
SJ
188/*
189 * struct zswap_entry
190 *
191 * This structure contains the metadata for tracking a single compressed
192 * page within zswap.
193 *
194 * rbnode - links the entry into red-black tree for the appropriate swap type
97157d89 195 * swpentry - associated swap entry, the offset indexes into the red-black tree
2b281117 196 * length - the length in bytes of the compressed page data. Needed during
f999f38b
DC
197 * decompression. For a same value filled page length is 0, and both
198 * pool and lru are invalid and must be ignored.
f1c54846
DS
199 * pool - the zswap_pool the entry's data is in
200 * handle - zpool allocation handle that stores the compressed page data
a85f878b 201 * value - value of the same-value filled pages which have same content
97157d89 202 * objcg - the obj_cgroup that the compressed memory is charged to
f999f38b 203 * lru - handle to the pool's lru used to evict pages.
2b281117
SJ
204 */
205struct zswap_entry {
206 struct rb_node rbnode;
0bb48849 207 swp_entry_t swpentry;
2b281117 208 unsigned int length;
f1c54846 209 struct zswap_pool *pool;
a85f878b
SD
210 union {
211 unsigned long handle;
212 unsigned long value;
213 };
f4840ccf 214 struct obj_cgroup *objcg;
f999f38b 215 struct list_head lru;
2b281117
SJ
216};
217
2b281117
SJ
218struct zswap_tree {
219 struct rb_root rbroot;
220 spinlock_t lock;
2b281117
SJ
221};
222
223static struct zswap_tree *zswap_trees[MAX_SWAPFILES];
44c7c734 224static unsigned int nr_zswap_trees[MAX_SWAPFILES];
2b281117 225
f1c54846
DS
226/* RCU-protected iteration */
227static LIST_HEAD(zswap_pools);
228/* protects zswap_pools list modification */
229static DEFINE_SPINLOCK(zswap_pools_lock);
32a4e169
DS
230/* pool counter to provide unique names to zpool */
231static atomic_t zswap_pools_count = ATOMIC_INIT(0);
f1c54846 232
9021ccec
LS
233enum zswap_init_type {
234 ZSWAP_UNINIT,
235 ZSWAP_INIT_SUCCEED,
236 ZSWAP_INIT_FAILED
237};
90b0fc26 238
9021ccec 239static enum zswap_init_type zswap_init_state;
90b0fc26 240
141fdeec
LS
241/* used to ensure the integrity of initialization */
242static DEFINE_MUTEX(zswap_init_lock);
d7b028f5 243
ae3d89a7
DS
244/* init completed, but couldn't create the initial pool */
245static bool zswap_has_pool;
246
f1c54846
DS
247/*********************************
248* helpers and fwd declarations
249**********************************/
250
44c7c734
CZ
251static inline struct zswap_tree *swap_zswap_tree(swp_entry_t swp)
252{
253 return &zswap_trees[swp_type(swp)][swp_offset(swp)
254 >> SWAP_ADDRESS_SPACE_SHIFT];
255}
256
f1c54846
DS
257#define zswap_pool_debug(msg, p) \
258 pr_debug("%s pool %s/%s\n", msg, (p)->tfm_name, \
b8cf32dc 259 zpool_get_type((p)->zpools[0]))
f1c54846 260
f1c54846
DS
261static bool zswap_is_full(void)
262{
ca79b0c2
AK
263 return totalram_pages() * zswap_max_pool_percent / 100 <
264 DIV_ROUND_UP(zswap_pool_total_size, PAGE_SIZE);
f1c54846
DS
265}
266
45190f01
VW
267static bool zswap_can_accept(void)
268{
269 return totalram_pages() * zswap_accept_thr_percent / 100 *
270 zswap_max_pool_percent / 100 >
271 DIV_ROUND_UP(zswap_pool_total_size, PAGE_SIZE);
272}
273
b5ba474f
NP
274static u64 get_zswap_pool_size(struct zswap_pool *pool)
275{
276 u64 pool_size = 0;
277 int i;
278
279 for (i = 0; i < ZSWAP_NR_ZPOOLS; i++)
280 pool_size += zpool_get_total_size(pool->zpools[i]);
281
282 return pool_size;
283}
284
f1c54846
DS
285static void zswap_update_total_size(void)
286{
287 struct zswap_pool *pool;
288 u64 total = 0;
289
290 rcu_read_lock();
291
292 list_for_each_entry_rcu(pool, &zswap_pools, list)
b5ba474f 293 total += get_zswap_pool_size(pool);
f1c54846
DS
294
295 rcu_read_unlock();
296
297 zswap_pool_total_size = total;
298}
299
a984649b
JW
300/*********************************
301* pool functions
302**********************************/
303
304static void zswap_alloc_shrinker(struct zswap_pool *pool);
305static void shrink_worker(struct work_struct *w);
306
307static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
308{
309 int i;
310 struct zswap_pool *pool;
311 char name[38]; /* 'zswap' + 32 char (max) num + \0 */
312 gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
313 int ret;
314
315 if (!zswap_has_pool) {
316 /* if either are unset, pool initialization failed, and we
317 * need both params to be set correctly before trying to
318 * create a pool.
319 */
320 if (!strcmp(type, ZSWAP_PARAM_UNSET))
321 return NULL;
322 if (!strcmp(compressor, ZSWAP_PARAM_UNSET))
323 return NULL;
324 }
325
326 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
327 if (!pool)
328 return NULL;
329
330 for (i = 0; i < ZSWAP_NR_ZPOOLS; i++) {
331 /* unique name for each pool specifically required by zsmalloc */
332 snprintf(name, 38, "zswap%x",
333 atomic_inc_return(&zswap_pools_count));
334
335 pool->zpools[i] = zpool_create_pool(type, name, gfp);
336 if (!pool->zpools[i]) {
337 pr_err("%s zpool not available\n", type);
338 goto error;
339 }
340 }
341 pr_debug("using %s zpool\n", zpool_get_type(pool->zpools[0]));
342
343 strscpy(pool->tfm_name, compressor, sizeof(pool->tfm_name));
344
345 pool->acomp_ctx = alloc_percpu(*pool->acomp_ctx);
346 if (!pool->acomp_ctx) {
347 pr_err("percpu alloc failed\n");
348 goto error;
349 }
350
351 ret = cpuhp_state_add_instance(CPUHP_MM_ZSWP_POOL_PREPARE,
352 &pool->node);
353 if (ret)
354 goto error;
355
356 zswap_alloc_shrinker(pool);
357 if (!pool->shrinker)
358 goto error;
359
360 pr_debug("using %s compressor\n", pool->tfm_name);
361
362 /* being the current pool takes 1 ref; this func expects the
363 * caller to always add the new pool as the current pool
364 */
365 kref_init(&pool->kref);
366 INIT_LIST_HEAD(&pool->list);
367 if (list_lru_init_memcg(&pool->list_lru, pool->shrinker))
368 goto lru_fail;
369 shrinker_register(pool->shrinker);
370 INIT_WORK(&pool->shrink_work, shrink_worker);
371 atomic_set(&pool->nr_stored, 0);
372
373 zswap_pool_debug("created", pool);
374
375 return pool;
376
377lru_fail:
378 list_lru_destroy(&pool->list_lru);
379 shrinker_free(pool->shrinker);
380error:
381 if (pool->acomp_ctx)
382 free_percpu(pool->acomp_ctx);
383 while (i--)
384 zpool_destroy_pool(pool->zpools[i]);
385 kfree(pool);
386 return NULL;
387}
388
389static struct zswap_pool *__zswap_pool_create_fallback(void)
390{
391 bool has_comp, has_zpool;
392
393 has_comp = crypto_has_acomp(zswap_compressor, 0, 0);
394 if (!has_comp && strcmp(zswap_compressor,
395 CONFIG_ZSWAP_COMPRESSOR_DEFAULT)) {
396 pr_err("compressor %s not available, using default %s\n",
397 zswap_compressor, CONFIG_ZSWAP_COMPRESSOR_DEFAULT);
398 param_free_charp(&zswap_compressor);
399 zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT;
400 has_comp = crypto_has_acomp(zswap_compressor, 0, 0);
401 }
402 if (!has_comp) {
403 pr_err("default compressor %s not available\n",
404 zswap_compressor);
405 param_free_charp(&zswap_compressor);
406 zswap_compressor = ZSWAP_PARAM_UNSET;
407 }
408
409 has_zpool = zpool_has_pool(zswap_zpool_type);
410 if (!has_zpool && strcmp(zswap_zpool_type,
411 CONFIG_ZSWAP_ZPOOL_DEFAULT)) {
412 pr_err("zpool %s not available, using default %s\n",
413 zswap_zpool_type, CONFIG_ZSWAP_ZPOOL_DEFAULT);
414 param_free_charp(&zswap_zpool_type);
415 zswap_zpool_type = CONFIG_ZSWAP_ZPOOL_DEFAULT;
416 has_zpool = zpool_has_pool(zswap_zpool_type);
417 }
418 if (!has_zpool) {
419 pr_err("default zpool %s not available\n",
420 zswap_zpool_type);
421 param_free_charp(&zswap_zpool_type);
422 zswap_zpool_type = ZSWAP_PARAM_UNSET;
423 }
424
425 if (!has_comp || !has_zpool)
426 return NULL;
427
428 return zswap_pool_create(zswap_zpool_type, zswap_compressor);
429}
430
431static void zswap_pool_destroy(struct zswap_pool *pool)
432{
433 int i;
434
435 zswap_pool_debug("destroying", pool);
436
437 shrinker_free(pool->shrinker);
438 cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node);
439 free_percpu(pool->acomp_ctx);
440 list_lru_destroy(&pool->list_lru);
441
442 spin_lock(&zswap_pools_lock);
443 mem_cgroup_iter_break(NULL, pool->next_shrink);
444 pool->next_shrink = NULL;
445 spin_unlock(&zswap_pools_lock);
446
447 for (i = 0; i < ZSWAP_NR_ZPOOLS; i++)
448 zpool_destroy_pool(pool->zpools[i]);
449 kfree(pool);
450}
451
39f3ec8e
JW
452static void __zswap_pool_release(struct work_struct *work)
453{
454 struct zswap_pool *pool = container_of(work, typeof(*pool),
455 release_work);
456
457 synchronize_rcu();
458
459 /* nobody should have been able to get a kref... */
460 WARN_ON(kref_get_unless_zero(&pool->kref));
461
462 /* pool is now off zswap_pools list and has no references. */
463 zswap_pool_destroy(pool);
464}
465
466static struct zswap_pool *zswap_pool_current(void);
467
468static void __zswap_pool_empty(struct kref *kref)
469{
470 struct zswap_pool *pool;
471
472 pool = container_of(kref, typeof(*pool), kref);
473
474 spin_lock(&zswap_pools_lock);
475
476 WARN_ON(pool == zswap_pool_current());
477
478 list_del_rcu(&pool->list);
479
480 INIT_WORK(&pool->release_work, __zswap_pool_release);
481 schedule_work(&pool->release_work);
482
483 spin_unlock(&zswap_pools_lock);
484}
485
486static int __must_check zswap_pool_get(struct zswap_pool *pool)
487{
488 if (!pool)
489 return 0;
490
491 return kref_get_unless_zero(&pool->kref);
492}
493
494static void zswap_pool_put(struct zswap_pool *pool)
495{
496 kref_put(&pool->kref, __zswap_pool_empty);
497}
498
c1a0ecb8
JW
499static struct zswap_pool *__zswap_pool_current(void)
500{
501 struct zswap_pool *pool;
502
503 pool = list_first_or_null_rcu(&zswap_pools, typeof(*pool), list);
504 WARN_ONCE(!pool && zswap_has_pool,
505 "%s: no page storage pool!\n", __func__);
506
507 return pool;
508}
509
510static struct zswap_pool *zswap_pool_current(void)
511{
512 assert_spin_locked(&zswap_pools_lock);
513
514 return __zswap_pool_current();
515}
516
517static struct zswap_pool *zswap_pool_current_get(void)
518{
519 struct zswap_pool *pool;
520
521 rcu_read_lock();
522
523 pool = __zswap_pool_current();
524 if (!zswap_pool_get(pool))
525 pool = NULL;
526
527 rcu_read_unlock();
528
529 return pool;
530}
531
532static struct zswap_pool *zswap_pool_last_get(void)
533{
534 struct zswap_pool *pool, *last = NULL;
535
536 rcu_read_lock();
537
538 list_for_each_entry_rcu(pool, &zswap_pools, list)
539 last = pool;
540 WARN_ONCE(!last && zswap_has_pool,
541 "%s: no page storage pool!\n", __func__);
542 if (!zswap_pool_get(last))
543 last = NULL;
544
545 rcu_read_unlock();
546
547 return last;
548}
549
550/* type and compressor must be null-terminated */
551static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor)
552{
553 struct zswap_pool *pool;
554
555 assert_spin_locked(&zswap_pools_lock);
556
557 list_for_each_entry_rcu(pool, &zswap_pools, list) {
558 if (strcmp(pool->tfm_name, compressor))
559 continue;
560 /* all zpools share the same type */
561 if (strcmp(zpool_get_type(pool->zpools[0]), type))
562 continue;
563 /* if we can't get it, it's about to be destroyed */
564 if (!zswap_pool_get(pool))
565 continue;
566 return pool;
567 }
568
569 return NULL;
570}
571
abca07c0
JW
572/*********************************
573* param callbacks
574**********************************/
575
576static bool zswap_pool_changed(const char *s, const struct kernel_param *kp)
577{
578 /* no change required */
579 if (!strcmp(s, *(char **)kp->arg) && zswap_has_pool)
580 return false;
581 return true;
582}
583
584/* val must be a null-terminated string */
585static int __zswap_param_set(const char *val, const struct kernel_param *kp,
586 char *type, char *compressor)
587{
588 struct zswap_pool *pool, *put_pool = NULL;
589 char *s = strstrip((char *)val);
590 int ret = 0;
591 bool new_pool = false;
592
593 mutex_lock(&zswap_init_lock);
594 switch (zswap_init_state) {
595 case ZSWAP_UNINIT:
596 /* if this is load-time (pre-init) param setting,
597 * don't create a pool; that's done during init.
598 */
599 ret = param_set_charp(s, kp);
600 break;
601 case ZSWAP_INIT_SUCCEED:
602 new_pool = zswap_pool_changed(s, kp);
603 break;
604 case ZSWAP_INIT_FAILED:
605 pr_err("can't set param, initialization failed\n");
606 ret = -ENODEV;
607 }
608 mutex_unlock(&zswap_init_lock);
609
610 /* no need to create a new pool, return directly */
611 if (!new_pool)
612 return ret;
613
614 if (!type) {
615 if (!zpool_has_pool(s)) {
616 pr_err("zpool %s not available\n", s);
617 return -ENOENT;
618 }
619 type = s;
620 } else if (!compressor) {
621 if (!crypto_has_acomp(s, 0, 0)) {
622 pr_err("compressor %s not available\n", s);
623 return -ENOENT;
624 }
625 compressor = s;
626 } else {
627 WARN_ON(1);
628 return -EINVAL;
629 }
630
631 spin_lock(&zswap_pools_lock);
632
633 pool = zswap_pool_find_get(type, compressor);
634 if (pool) {
635 zswap_pool_debug("using existing", pool);
636 WARN_ON(pool == zswap_pool_current());
637 list_del_rcu(&pool->list);
638 }
639
640 spin_unlock(&zswap_pools_lock);
641
642 if (!pool)
643 pool = zswap_pool_create(type, compressor);
644
645 if (pool)
646 ret = param_set_charp(s, kp);
647 else
648 ret = -EINVAL;
649
650 spin_lock(&zswap_pools_lock);
651
652 if (!ret) {
653 put_pool = zswap_pool_current();
654 list_add_rcu(&pool->list, &zswap_pools);
655 zswap_has_pool = true;
656 } else if (pool) {
657 /* add the possibly pre-existing pool to the end of the pools
658 * list; if it's new (and empty) then it'll be removed and
659 * destroyed by the put after we drop the lock
660 */
661 list_add_tail_rcu(&pool->list, &zswap_pools);
662 put_pool = pool;
663 }
664
665 spin_unlock(&zswap_pools_lock);
666
667 if (!zswap_has_pool && !pool) {
668 /* if initial pool creation failed, and this pool creation also
669 * failed, maybe both compressor and zpool params were bad.
670 * Allow changing this param, so pool creation will succeed
671 * when the other param is changed. We already verified this
672 * param is ok in the zpool_has_pool() or crypto_has_acomp()
673 * checks above.
674 */
675 ret = param_set_charp(s, kp);
676 }
677
678 /* drop the ref from either the old current pool,
679 * or the new pool we failed to add
680 */
681 if (put_pool)
682 zswap_pool_put(put_pool);
683
684 return ret;
685}
686
687static int zswap_compressor_param_set(const char *val,
688 const struct kernel_param *kp)
689{
690 return __zswap_param_set(val, kp, zswap_zpool_type, NULL);
691}
692
693static int zswap_zpool_param_set(const char *val,
694 const struct kernel_param *kp)
695{
696 return __zswap_param_set(val, kp, NULL, zswap_compressor);
697}
698
699static int zswap_enabled_param_set(const char *val,
700 const struct kernel_param *kp)
701{
702 int ret = -ENODEV;
703
704 /* if this is load-time (pre-init) param setting, only set param. */
705 if (system_state != SYSTEM_RUNNING)
706 return param_set_bool(val, kp);
707
708 mutex_lock(&zswap_init_lock);
709 switch (zswap_init_state) {
710 case ZSWAP_UNINIT:
711 if (zswap_setup())
712 break;
713 fallthrough;
714 case ZSWAP_INIT_SUCCEED:
715 if (!zswap_has_pool)
716 pr_err("can't enable, no pool configured\n");
717 else
718 ret = param_set_bool(val, kp);
719 break;
720 case ZSWAP_INIT_FAILED:
721 pr_err("can't enable, initialization failed\n");
722 }
723 mutex_unlock(&zswap_init_lock);
724
725 return ret;
726}
727
506a86c5
JW
728/*********************************
729* lru functions
730**********************************/
731
a65b0e76
DC
732/* should be called under RCU */
733#ifdef CONFIG_MEMCG
734static inline struct mem_cgroup *mem_cgroup_from_entry(struct zswap_entry *entry)
735{
736 return entry->objcg ? obj_cgroup_memcg(entry->objcg) : NULL;
737}
738#else
739static inline struct mem_cgroup *mem_cgroup_from_entry(struct zswap_entry *entry)
740{
741 return NULL;
742}
743#endif
744
745static inline int entry_to_nid(struct zswap_entry *entry)
746{
747 return page_to_nid(virt_to_page(entry));
748}
749
a65b0e76
DC
750static void zswap_lru_add(struct list_lru *list_lru, struct zswap_entry *entry)
751{
b5ba474f
NP
752 atomic_long_t *nr_zswap_protected;
753 unsigned long lru_size, old, new;
a65b0e76
DC
754 int nid = entry_to_nid(entry);
755 struct mem_cgroup *memcg;
b5ba474f 756 struct lruvec *lruvec;
a65b0e76
DC
757
758 /*
759 * Note that it is safe to use rcu_read_lock() here, even in the face of
760 * concurrent memcg offlining. Thanks to the memcg->kmemcg_id indirection
761 * used in list_lru lookup, only two scenarios are possible:
762 *
763 * 1. list_lru_add() is called before memcg->kmemcg_id is updated. The
764 * new entry will be reparented to memcg's parent's list_lru.
765 * 2. list_lru_add() is called after memcg->kmemcg_id is updated. The
766 * new entry will be added directly to memcg's parent's list_lru.
767 *
3f798aa6 768 * Similar reasoning holds for list_lru_del().
a65b0e76
DC
769 */
770 rcu_read_lock();
771 memcg = mem_cgroup_from_entry(entry);
772 /* will always succeed */
773 list_lru_add(list_lru, &entry->lru, nid, memcg);
b5ba474f
NP
774
775 /* Update the protection area */
776 lru_size = list_lru_count_one(list_lru, nid, memcg);
777 lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
778 nr_zswap_protected = &lruvec->zswap_lruvec_state.nr_zswap_protected;
779 old = atomic_long_inc_return(nr_zswap_protected);
780 /*
781 * Decay to avoid overflow and adapt to changing workloads.
782 * This is based on LRU reclaim cost decaying heuristics.
783 */
784 do {
785 new = old > lru_size / 4 ? old / 2 : old;
786 } while (!atomic_long_try_cmpxchg(nr_zswap_protected, &old, new));
a65b0e76
DC
787 rcu_read_unlock();
788}
789
790static void zswap_lru_del(struct list_lru *list_lru, struct zswap_entry *entry)
791{
792 int nid = entry_to_nid(entry);
793 struct mem_cgroup *memcg;
794
795 rcu_read_lock();
796 memcg = mem_cgroup_from_entry(entry);
797 /* will always succeed */
798 list_lru_del(list_lru, &entry->lru, nid, memcg);
799 rcu_read_unlock();
800}
801
5182661a
JW
802void zswap_lruvec_state_init(struct lruvec *lruvec)
803{
804 atomic_long_set(&lruvec->zswap_lruvec_state.nr_zswap_protected, 0);
805}
806
807void zswap_folio_swapin(struct folio *folio)
808{
809 struct lruvec *lruvec;
810
811 if (folio) {
812 lruvec = folio_lruvec(folio);
813 atomic_long_inc(&lruvec->zswap_lruvec_state.nr_zswap_protected);
814 }
815}
816
817void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg)
818{
819 struct zswap_pool *pool;
820
821 /* lock out zswap pools list modification */
822 spin_lock(&zswap_pools_lock);
823 list_for_each_entry(pool, &zswap_pools, list) {
824 if (pool->next_shrink == memcg)
825 pool->next_shrink = mem_cgroup_iter(NULL, pool->next_shrink, NULL);
826 }
827 spin_unlock(&zswap_pools_lock);
828}
829
2b281117
SJ
830/*********************************
831* rbtree functions
832**********************************/
833static struct zswap_entry *zswap_rb_search(struct rb_root *root, pgoff_t offset)
834{
835 struct rb_node *node = root->rb_node;
836 struct zswap_entry *entry;
0bb48849 837 pgoff_t entry_offset;
2b281117
SJ
838
839 while (node) {
840 entry = rb_entry(node, struct zswap_entry, rbnode);
0bb48849
DC
841 entry_offset = swp_offset(entry->swpentry);
842 if (entry_offset > offset)
2b281117 843 node = node->rb_left;
0bb48849 844 else if (entry_offset < offset)
2b281117
SJ
845 node = node->rb_right;
846 else
847 return entry;
848 }
849 return NULL;
850}
851
852/*
853 * In the case that a entry with the same offset is found, a pointer to
854 * the existing entry is stored in dupentry and the function returns -EEXIST
855 */
856static int zswap_rb_insert(struct rb_root *root, struct zswap_entry *entry,
857 struct zswap_entry **dupentry)
858{
859 struct rb_node **link = &root->rb_node, *parent = NULL;
860 struct zswap_entry *myentry;
0bb48849 861 pgoff_t myentry_offset, entry_offset = swp_offset(entry->swpentry);
2b281117
SJ
862
863 while (*link) {
864 parent = *link;
865 myentry = rb_entry(parent, struct zswap_entry, rbnode);
0bb48849
DC
866 myentry_offset = swp_offset(myentry->swpentry);
867 if (myentry_offset > entry_offset)
2b281117 868 link = &(*link)->rb_left;
0bb48849 869 else if (myentry_offset < entry_offset)
2b281117
SJ
870 link = &(*link)->rb_right;
871 else {
872 *dupentry = myentry;
873 return -EEXIST;
874 }
875 }
876 rb_link_node(&entry->rbnode, parent, link);
877 rb_insert_color(&entry->rbnode, root);
878 return 0;
879}
880
a230c20e 881static void zswap_rb_erase(struct rb_root *root, struct zswap_entry *entry)
0ab0abcf 882{
a230c20e
CZ
883 rb_erase(&entry->rbnode, root);
884 RB_CLEAR_NODE(&entry->rbnode);
0ab0abcf
WY
885}
886
36034bf6
JW
887/*********************************
888* zswap entry functions
889**********************************/
890static struct kmem_cache *zswap_entry_cache;
891
892static struct zswap_entry *zswap_entry_cache_alloc(gfp_t gfp, int nid)
893{
894 struct zswap_entry *entry;
895 entry = kmem_cache_alloc_node(zswap_entry_cache, gfp, nid);
896 if (!entry)
897 return NULL;
36034bf6
JW
898 RB_CLEAR_NODE(&entry->rbnode);
899 return entry;
900}
901
902static void zswap_entry_cache_free(struct zswap_entry *entry)
903{
904 kmem_cache_free(zswap_entry_cache, entry);
905}
906
b8cf32dc
YA
907static struct zpool *zswap_find_zpool(struct zswap_entry *entry)
908{
909 int i = 0;
910
911 if (ZSWAP_NR_ZPOOLS > 1)
912 i = hash_ptr(entry, ilog2(ZSWAP_NR_ZPOOLS));
913
914 return entry->pool->zpools[i];
915}
916
0ab0abcf 917/*
12d79d64 918 * Carries out the common pattern of freeing and entry's zpool allocation,
0ab0abcf
WY
919 * freeing the entry itself, and decrementing the number of stored pages.
920 */
42398be2 921static void zswap_entry_free(struct zswap_entry *entry)
0ab0abcf 922{
a85f878b
SD
923 if (!entry->length)
924 atomic_dec(&zswap_same_filled_pages);
925 else {
a65b0e76 926 zswap_lru_del(&entry->pool->list_lru, entry);
b8cf32dc 927 zpool_free(zswap_find_zpool(entry), entry->handle);
b5ba474f 928 atomic_dec(&entry->pool->nr_stored);
a85f878b
SD
929 zswap_pool_put(entry->pool);
930 }
2e601e1e
JW
931 if (entry->objcg) {
932 obj_cgroup_uncharge_zswap(entry->objcg, entry->length);
933 obj_cgroup_put(entry->objcg);
934 }
0ab0abcf
WY
935 zswap_entry_cache_free(entry);
936 atomic_dec(&zswap_stored_pages);
f1c54846 937 zswap_update_total_size();
0ab0abcf
WY
938}
939
7dd1f7f0 940/*
a230c20e
CZ
941 * The caller hold the tree lock and search the entry from the tree,
942 * so it must be on the tree, remove it from the tree and free it.
7dd1f7f0
JW
943 */
944static void zswap_invalidate_entry(struct zswap_tree *tree,
945 struct zswap_entry *entry)
946{
a230c20e
CZ
947 zswap_rb_erase(&tree->rbroot, entry);
948 zswap_entry_free(entry);
7dd1f7f0
JW
949}
950
f91e81d3
JW
951/*********************************
952* compressed storage functions
953**********************************/
64f200b8
JW
954static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node)
955{
956 struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
957 struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
958 struct crypto_acomp *acomp;
959 struct acomp_req *req;
960 int ret;
961
962 mutex_init(&acomp_ctx->mutex);
963
964 acomp_ctx->buffer = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu));
965 if (!acomp_ctx->buffer)
966 return -ENOMEM;
967
968 acomp = crypto_alloc_acomp_node(pool->tfm_name, 0, 0, cpu_to_node(cpu));
969 if (IS_ERR(acomp)) {
970 pr_err("could not alloc crypto acomp %s : %ld\n",
971 pool->tfm_name, PTR_ERR(acomp));
972 ret = PTR_ERR(acomp);
973 goto acomp_fail;
974 }
975 acomp_ctx->acomp = acomp;
976
977 req = acomp_request_alloc(acomp_ctx->acomp);
978 if (!req) {
979 pr_err("could not alloc crypto acomp_request %s\n",
980 pool->tfm_name);
981 ret = -ENOMEM;
982 goto req_fail;
983 }
984 acomp_ctx->req = req;
985
986 crypto_init_wait(&acomp_ctx->wait);
987 /*
988 * if the backend of acomp is async zip, crypto_req_done() will wakeup
989 * crypto_wait_req(); if the backend of acomp is scomp, the callback
990 * won't be called, crypto_wait_req() will return without blocking.
991 */
992 acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
993 crypto_req_done, &acomp_ctx->wait);
994
995 return 0;
996
997req_fail:
998 crypto_free_acomp(acomp_ctx->acomp);
999acomp_fail:
1000 kfree(acomp_ctx->buffer);
1001 return ret;
1002}
1003
1004static int zswap_cpu_comp_dead(unsigned int cpu, struct hlist_node *node)
1005{
1006 struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
1007 struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
1008
1009 if (!IS_ERR_OR_NULL(acomp_ctx)) {
1010 if (!IS_ERR_OR_NULL(acomp_ctx->req))
1011 acomp_request_free(acomp_ctx->req);
1012 if (!IS_ERR_OR_NULL(acomp_ctx->acomp))
1013 crypto_free_acomp(acomp_ctx->acomp);
1014 kfree(acomp_ctx->buffer);
1015 }
1016
1017 return 0;
1018}
1019
f91e81d3
JW
1020static bool zswap_compress(struct folio *folio, struct zswap_entry *entry)
1021{
1022 struct crypto_acomp_ctx *acomp_ctx;
1023 struct scatterlist input, output;
55e78c93 1024 int comp_ret = 0, alloc_ret = 0;
f91e81d3
JW
1025 unsigned int dlen = PAGE_SIZE;
1026 unsigned long handle;
1027 struct zpool *zpool;
1028 char *buf;
1029 gfp_t gfp;
f91e81d3
JW
1030 u8 *dst;
1031
1032 acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
1033
1034 mutex_lock(&acomp_ctx->mutex);
1035
1036 dst = acomp_ctx->buffer;
1037 sg_init_table(&input, 1);
1038 sg_set_page(&input, &folio->page, PAGE_SIZE, 0);
1039
1040 /*
1041 * We need PAGE_SIZE * 2 here since there maybe over-compression case,
1042 * and hardware-accelerators may won't check the dst buffer size, so
1043 * giving the dst buffer with enough length to avoid buffer overflow.
1044 */
1045 sg_init_one(&output, dst, PAGE_SIZE * 2);
1046 acomp_request_set_params(acomp_ctx->req, &input, &output, PAGE_SIZE, dlen);
1047
1048 /*
1049 * it maybe looks a little bit silly that we send an asynchronous request,
1050 * then wait for its completion synchronously. This makes the process look
1051 * synchronous in fact.
1052 * Theoretically, acomp supports users send multiple acomp requests in one
1053 * acomp instance, then get those requests done simultaneously. but in this
1054 * case, zswap actually does store and load page by page, there is no
1055 * existing method to send the second page before the first page is done
1056 * in one thread doing zwap.
1057 * but in different threads running on different cpu, we have different
1058 * acomp instance, so multiple threads can do (de)compression in parallel.
1059 */
55e78c93 1060 comp_ret = crypto_wait_req(crypto_acomp_compress(acomp_ctx->req), &acomp_ctx->wait);
f91e81d3 1061 dlen = acomp_ctx->req->dlen;
55e78c93 1062 if (comp_ret)
f91e81d3 1063 goto unlock;
f91e81d3
JW
1064
1065 zpool = zswap_find_zpool(entry);
1066 gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
1067 if (zpool_malloc_support_movable(zpool))
1068 gfp |= __GFP_HIGHMEM | __GFP_MOVABLE;
55e78c93
BS
1069 alloc_ret = zpool_malloc(zpool, dlen, gfp, &handle);
1070 if (alloc_ret)
f91e81d3 1071 goto unlock;
f91e81d3
JW
1072
1073 buf = zpool_map_handle(zpool, handle, ZPOOL_MM_WO);
1074 memcpy(buf, dst, dlen);
1075 zpool_unmap_handle(zpool, handle);
1076
1077 entry->handle = handle;
1078 entry->length = dlen;
1079
1080unlock:
55e78c93
BS
1081 if (comp_ret == -ENOSPC || alloc_ret == -ENOSPC)
1082 zswap_reject_compress_poor++;
1083 else if (comp_ret)
1084 zswap_reject_compress_fail++;
1085 else if (alloc_ret)
1086 zswap_reject_alloc_fail++;
1087
f91e81d3 1088 mutex_unlock(&acomp_ctx->mutex);
55e78c93 1089 return comp_ret == 0 && alloc_ret == 0;
f91e81d3
JW
1090}
1091
1092static void zswap_decompress(struct zswap_entry *entry, struct page *page)
1093{
1094 struct zpool *zpool = zswap_find_zpool(entry);
1095 struct scatterlist input, output;
1096 struct crypto_acomp_ctx *acomp_ctx;
1097 u8 *src;
1098
1099 acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
1100 mutex_lock(&acomp_ctx->mutex);
1101
1102 src = zpool_map_handle(zpool, entry->handle, ZPOOL_MM_RO);
1103 if (!zpool_can_sleep_mapped(zpool)) {
1104 memcpy(acomp_ctx->buffer, src, entry->length);
1105 src = acomp_ctx->buffer;
1106 zpool_unmap_handle(zpool, entry->handle);
1107 }
1108
1109 sg_init_one(&input, src, entry->length);
1110 sg_init_table(&output, 1);
1111 sg_set_page(&output, page, PAGE_SIZE, 0);
1112 acomp_request_set_params(acomp_ctx->req, &input, &output, entry->length, PAGE_SIZE);
1113 BUG_ON(crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait));
1114 BUG_ON(acomp_ctx->req->dlen != PAGE_SIZE);
1115 mutex_unlock(&acomp_ctx->mutex);
1116
1117 if (zpool_can_sleep_mapped(zpool))
1118 zpool_unmap_handle(zpool, entry->handle);
1119}
1120
9986d35d
JW
1121/*********************************
1122* writeback code
1123**********************************/
1124/*
1125 * Attempts to free an entry by adding a folio to the swap cache,
1126 * decompressing the entry data into the folio, and issuing a
1127 * bio write to write the folio back to the swap device.
1128 *
1129 * This can be thought of as a "resumed writeback" of the folio
1130 * to the swap device. We are basically resuming the same swap
1131 * writeback path that was intercepted with the zswap_store()
1132 * in the first place. After the folio has been decompressed into
1133 * the swap cache, the compressed version stored by zswap can be
1134 * freed.
1135 */
1136static int zswap_writeback_entry(struct zswap_entry *entry,
1137 swp_entry_t swpentry)
1138{
1139 struct zswap_tree *tree;
1140 struct folio *folio;
1141 struct mempolicy *mpol;
1142 bool folio_was_allocated;
1143 struct writeback_control wbc = {
1144 .sync_mode = WB_SYNC_NONE,
1145 };
1146
1147 /* try to allocate swap cache folio */
1148 mpol = get_task_policy(current);
1149 folio = __read_swap_cache_async(swpentry, GFP_KERNEL, mpol,
1150 NO_INTERLEAVE_INDEX, &folio_was_allocated, true);
1151 if (!folio)
1152 return -ENOMEM;
1153
1154 /*
1155 * Found an existing folio, we raced with swapin or concurrent
1156 * shrinker. We generally writeback cold folios from zswap, and
1157 * swapin means the folio just became hot, so skip this folio.
1158 * For unlikely concurrent shrinker case, it will be unlinked
1159 * and freed when invalidated by the concurrent shrinker anyway.
1160 */
1161 if (!folio_was_allocated) {
1162 folio_put(folio);
1163 return -EEXIST;
1164 }
1165
1166 /*
1167 * folio is locked, and the swapcache is now secured against
f9c0f1c3
CZ
1168 * concurrent swapping to and from the slot, and concurrent
1169 * swapoff so we can safely dereference the zswap tree here.
1170 * Verify that the swap entry hasn't been invalidated and recycled
1171 * behind our backs, to avoid overwriting a new swap folio with
1172 * old compressed data. Only when this is successful can the entry
1173 * be dereferenced.
9986d35d
JW
1174 */
1175 tree = swap_zswap_tree(swpentry);
1176 spin_lock(&tree->lock);
1177 if (zswap_rb_search(&tree->rbroot, swp_offset(swpentry)) != entry) {
1178 spin_unlock(&tree->lock);
1179 delete_from_swap_cache(folio);
1180 folio_unlock(folio);
1181 folio_put(folio);
1182 return -ENOMEM;
1183 }
1184
1185 /* Safe to deref entry after the entry is verified above. */
a230c20e 1186 zswap_rb_erase(&tree->rbroot, entry);
9986d35d
JW
1187 spin_unlock(&tree->lock);
1188
1189 zswap_decompress(entry, &folio->page);
1190
1191 count_vm_event(ZSWPWB);
1192 if (entry->objcg)
1193 count_objcg_event(entry->objcg, ZSWPWB);
1194
a230c20e 1195 zswap_entry_free(entry);
9986d35d
JW
1196
1197 /* folio is up to date */
1198 folio_mark_uptodate(folio);
1199
1200 /* move it to the tail of the inactive list after end_writeback */
1201 folio_set_reclaim(folio);
1202
1203 /* start writeback */
1204 __swap_writepage(folio, &wbc);
1205 folio_put(folio);
1206
1207 return 0;
1208}
1209
b5ba474f
NP
1210/*********************************
1211* shrinker functions
1212**********************************/
1213static enum lru_status shrink_memcg_cb(struct list_head *item, struct list_lru_one *l,
eb23ee4f
JW
1214 spinlock_t *lock, void *arg)
1215{
1216 struct zswap_entry *entry = container_of(item, struct zswap_entry, lru);
1217 bool *encountered_page_in_swapcache = (bool *)arg;
1218 swp_entry_t swpentry;
1219 enum lru_status ret = LRU_REMOVED_RETRY;
1220 int writeback_result;
1221
1222 /*
f9c0f1c3
CZ
1223 * As soon as we drop the LRU lock, the entry can be freed by
1224 * a concurrent invalidation. This means the following:
eb23ee4f 1225 *
f9c0f1c3
CZ
1226 * 1. We extract the swp_entry_t to the stack, allowing
1227 * zswap_writeback_entry() to pin the swap entry and
1228 * then validate the zwap entry against that swap entry's
1229 * tree using pointer value comparison. Only when that
1230 * is successful can the entry be dereferenced.
eb23ee4f 1231 *
f9c0f1c3
CZ
1232 * 2. Usually, objects are taken off the LRU for reclaim. In
1233 * this case this isn't possible, because if reclaim fails
1234 * for whatever reason, we have no means of knowing if the
1235 * entry is alive to put it back on the LRU.
eb23ee4f 1236 *
f9c0f1c3
CZ
1237 * So rotate it before dropping the lock. If the entry is
1238 * written back or invalidated, the free path will unlink
1239 * it. For failures, rotation is the right thing as well.
1240 *
1241 * Temporary failures, where the same entry should be tried
1242 * again immediately, almost never happen for this shrinker.
1243 * We don't do any trylocking; -ENOMEM comes closest,
1244 * but that's extremely rare and doesn't happen spuriously
1245 * either. Don't bother distinguishing this case.
eb23ee4f
JW
1246 */
1247 list_move_tail(item, &l->list);
1248
1249 /*
1250 * Once the lru lock is dropped, the entry might get freed. The
1251 * swpentry is copied to the stack, and entry isn't deref'd again
1252 * until the entry is verified to still be alive in the tree.
1253 */
1254 swpentry = entry->swpentry;
1255
1256 /*
1257 * It's safe to drop the lock here because we return either
1258 * LRU_REMOVED_RETRY or LRU_RETRY.
1259 */
1260 spin_unlock(lock);
1261
1262 writeback_result = zswap_writeback_entry(entry, swpentry);
1263
1264 if (writeback_result) {
1265 zswap_reject_reclaim_fail++;
1266 ret = LRU_RETRY;
1267
1268 /*
1269 * Encountering a page already in swap cache is a sign that we are shrinking
1270 * into the warmer region. We should terminate shrinking (if we're in the dynamic
1271 * shrinker context).
1272 */
b49547ad
CZ
1273 if (writeback_result == -EEXIST && encountered_page_in_swapcache) {
1274 ret = LRU_STOP;
eb23ee4f 1275 *encountered_page_in_swapcache = true;
b49547ad 1276 }
eb23ee4f
JW
1277 } else {
1278 zswap_written_back_pages++;
1279 }
1280
1281 spin_lock(lock);
1282 return ret;
1283}
b5ba474f
NP
1284
1285static unsigned long zswap_shrinker_scan(struct shrinker *shrinker,
1286 struct shrink_control *sc)
1287{
1288 struct lruvec *lruvec = mem_cgroup_lruvec(sc->memcg, NODE_DATA(sc->nid));
1289 unsigned long shrink_ret, nr_protected, lru_size;
1290 struct zswap_pool *pool = shrinker->private_data;
1291 bool encountered_page_in_swapcache = false;
1292
501a06fe
NP
1293 if (!zswap_shrinker_enabled ||
1294 !mem_cgroup_zswap_writeback_enabled(sc->memcg)) {
b5ba474f
NP
1295 sc->nr_scanned = 0;
1296 return SHRINK_STOP;
1297 }
1298
1299 nr_protected =
1300 atomic_long_read(&lruvec->zswap_lruvec_state.nr_zswap_protected);
1301 lru_size = list_lru_shrink_count(&pool->list_lru, sc);
1302
1303 /*
1304 * Abort if we are shrinking into the protected region.
1305 *
1306 * This short-circuiting is necessary because if we have too many multiple
1307 * concurrent reclaimers getting the freeable zswap object counts at the
1308 * same time (before any of them made reasonable progress), the total
1309 * number of reclaimed objects might be more than the number of unprotected
1310 * objects (i.e the reclaimers will reclaim into the protected area of the
1311 * zswap LRU).
1312 */
1313 if (nr_protected >= lru_size - sc->nr_to_scan) {
1314 sc->nr_scanned = 0;
1315 return SHRINK_STOP;
1316 }
1317
1318 shrink_ret = list_lru_shrink_walk(&pool->list_lru, sc, &shrink_memcg_cb,
1319 &encountered_page_in_swapcache);
1320
1321 if (encountered_page_in_swapcache)
1322 return SHRINK_STOP;
1323
1324 return shrink_ret ? shrink_ret : SHRINK_STOP;
1325}
1326
1327static unsigned long zswap_shrinker_count(struct shrinker *shrinker,
1328 struct shrink_control *sc)
1329{
1330 struct zswap_pool *pool = shrinker->private_data;
1331 struct mem_cgroup *memcg = sc->memcg;
1332 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(sc->nid));
1333 unsigned long nr_backing, nr_stored, nr_freeable, nr_protected;
1334
501a06fe 1335 if (!zswap_shrinker_enabled || !mem_cgroup_zswap_writeback_enabled(memcg))
b5ba474f
NP
1336 return 0;
1337
1338#ifdef CONFIG_MEMCG_KMEM
7d7ef0a4 1339 mem_cgroup_flush_stats(memcg);
b5ba474f
NP
1340 nr_backing = memcg_page_state(memcg, MEMCG_ZSWAP_B) >> PAGE_SHIFT;
1341 nr_stored = memcg_page_state(memcg, MEMCG_ZSWAPPED);
1342#else
1343 /* use pool stats instead of memcg stats */
1344 nr_backing = get_zswap_pool_size(pool) >> PAGE_SHIFT;
1345 nr_stored = atomic_read(&pool->nr_stored);
1346#endif
1347
1348 if (!nr_stored)
1349 return 0;
1350
1351 nr_protected =
1352 atomic_long_read(&lruvec->zswap_lruvec_state.nr_zswap_protected);
1353 nr_freeable = list_lru_shrink_count(&pool->list_lru, sc);
1354 /*
1355 * Subtract the lru size by an estimate of the number of pages
1356 * that should be protected.
1357 */
1358 nr_freeable = nr_freeable > nr_protected ? nr_freeable - nr_protected : 0;
1359
1360 /*
1361 * Scale the number of freeable pages by the memory saving factor.
1362 * This ensures that the better zswap compresses memory, the fewer
1363 * pages we will evict to swap (as it will otherwise incur IO for
1364 * relatively small memory saving).
1365 */
1366 return mult_frac(nr_freeable, nr_backing, nr_stored);
1367}
1368
1369static void zswap_alloc_shrinker(struct zswap_pool *pool)
1370{
1371 pool->shrinker =
1372 shrinker_alloc(SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE, "mm-zswap");
1373 if (!pool->shrinker)
1374 return;
1375
1376 pool->shrinker->private_data = pool;
1377 pool->shrinker->scan_objects = zswap_shrinker_scan;
1378 pool->shrinker->count_objects = zswap_shrinker_count;
1379 pool->shrinker->batch = 0;
1380 pool->shrinker->seeks = DEFAULT_SEEKS;
1381}
1382
a65b0e76
DC
1383static int shrink_memcg(struct mem_cgroup *memcg)
1384{
1385 struct zswap_pool *pool;
1386 int nid, shrunk = 0;
1387
501a06fe
NP
1388 if (!mem_cgroup_zswap_writeback_enabled(memcg))
1389 return -EINVAL;
1390
a65b0e76
DC
1391 /*
1392 * Skip zombies because their LRUs are reparented and we would be
1393 * reclaiming from the parent instead of the dead memcg.
1394 */
1395 if (memcg && !mem_cgroup_online(memcg))
1396 return -ENOENT;
1397
1398 pool = zswap_pool_current_get();
1399 if (!pool)
1400 return -EINVAL;
1401
1402 for_each_node_state(nid, N_NORMAL_MEMORY) {
1403 unsigned long nr_to_walk = 1;
1404
1405 shrunk += list_lru_walk_one(&pool->list_lru, nid, memcg,
1406 &shrink_memcg_cb, NULL, &nr_to_walk);
1407 }
1408 zswap_pool_put(pool);
1409 return shrunk ? 0 : -EAGAIN;
f999f38b
DC
1410}
1411
45190f01
VW
1412static void shrink_worker(struct work_struct *w)
1413{
1414 struct zswap_pool *pool = container_of(w, typeof(*pool),
1415 shrink_work);
a65b0e76 1416 struct mem_cgroup *memcg;
e0228d59
DC
1417 int ret, failures = 0;
1418
a65b0e76 1419 /* global reclaim will select cgroup in a round-robin fashion. */
e0228d59 1420 do {
a65b0e76
DC
1421 spin_lock(&zswap_pools_lock);
1422 pool->next_shrink = mem_cgroup_iter(NULL, pool->next_shrink, NULL);
1423 memcg = pool->next_shrink;
1424
1425 /*
1426 * We need to retry if we have gone through a full round trip, or if we
1427 * got an offline memcg (or else we risk undoing the effect of the
1428 * zswap memcg offlining cleanup callback). This is not catastrophic
1429 * per se, but it will keep the now offlined memcg hostage for a while.
1430 *
1431 * Note that if we got an online memcg, we will keep the extra
1432 * reference in case the original reference obtained by mem_cgroup_iter
1433 * is dropped by the zswap memcg offlining callback, ensuring that the
1434 * memcg is not killed when we are reclaiming.
1435 */
1436 if (!memcg) {
1437 spin_unlock(&zswap_pools_lock);
1438 if (++failures == MAX_RECLAIM_RETRIES)
e0228d59 1439 break;
a65b0e76
DC
1440
1441 goto resched;
1442 }
1443
1444 if (!mem_cgroup_tryget_online(memcg)) {
1445 /* drop the reference from mem_cgroup_iter() */
1446 mem_cgroup_iter_break(NULL, memcg);
1447 pool->next_shrink = NULL;
1448 spin_unlock(&zswap_pools_lock);
1449
e0228d59
DC
1450 if (++failures == MAX_RECLAIM_RETRIES)
1451 break;
a65b0e76
DC
1452
1453 goto resched;
e0228d59 1454 }
a65b0e76
DC
1455 spin_unlock(&zswap_pools_lock);
1456
1457 ret = shrink_memcg(memcg);
1458 /* drop the extra reference */
1459 mem_cgroup_put(memcg);
1460
1461 if (ret == -EINVAL)
1462 break;
1463 if (ret && ++failures == MAX_RECLAIM_RETRIES)
1464 break;
1465
1466resched:
e0228d59
DC
1467 cond_resched();
1468 } while (!zswap_can_accept());
45190f01
VW
1469 zswap_pool_put(pool);
1470}
1471
a85f878b
SD
1472static int zswap_is_page_same_filled(void *ptr, unsigned long *value)
1473{
a85f878b 1474 unsigned long *page;
62bf1258
TS
1475 unsigned long val;
1476 unsigned int pos, last_pos = PAGE_SIZE / sizeof(*page) - 1;
a85f878b
SD
1477
1478 page = (unsigned long *)ptr;
62bf1258
TS
1479 val = page[0];
1480
1481 if (val != page[last_pos])
1482 return 0;
1483
1484 for (pos = 1; pos < last_pos; pos++) {
1485 if (val != page[pos])
a85f878b
SD
1486 return 0;
1487 }
62bf1258
TS
1488
1489 *value = val;
1490
a85f878b
SD
1491 return 1;
1492}
1493
1494static void zswap_fill_page(void *ptr, unsigned long value)
1495{
1496 unsigned long *page;
1497
1498 page = (unsigned long *)ptr;
1499 memset_l(page, value, PAGE_SIZE / sizeof(unsigned long));
1500}
1501
34f4c198 1502bool zswap_store(struct folio *folio)
2b281117 1503{
3d2c9087 1504 swp_entry_t swp = folio->swap;
42c06a0e 1505 pgoff_t offset = swp_offset(swp);
44c7c734 1506 struct zswap_tree *tree = swap_zswap_tree(swp);
2b281117 1507 struct zswap_entry *entry, *dupentry;
f4840ccf 1508 struct obj_cgroup *objcg = NULL;
a65b0e76 1509 struct mem_cgroup *memcg = NULL;
be7fc97c 1510 struct zswap_pool *shrink_pool;
42c06a0e 1511
34f4c198
MWO
1512 VM_WARN_ON_ONCE(!folio_test_locked(folio));
1513 VM_WARN_ON_ONCE(!folio_test_swapcache(folio));
2b281117 1514
34f4c198
MWO
1515 /* Large folios aren't supported */
1516 if (folio_test_large(folio))
42c06a0e 1517 return false;
7ba71669 1518
678e54d4 1519 if (!zswap_enabled)
f576a1e8 1520 goto check_old;
678e54d4 1521
074e3e26 1522 objcg = get_obj_cgroup_from_folio(folio);
a65b0e76
DC
1523 if (objcg && !obj_cgroup_may_zswap(objcg)) {
1524 memcg = get_mem_cgroup_from_objcg(objcg);
1525 if (shrink_memcg(memcg)) {
1526 mem_cgroup_put(memcg);
1527 goto reject;
1528 }
1529 mem_cgroup_put(memcg);
1530 }
f4840ccf 1531
2b281117
SJ
1532 /* reclaim space if needed */
1533 if (zswap_is_full()) {
1534 zswap_pool_limit_hit++;
45190f01 1535 zswap_pool_reached_full = true;
f4840ccf 1536 goto shrink;
45190f01 1537 }
16e536ef 1538
45190f01 1539 if (zswap_pool_reached_full) {
42c06a0e 1540 if (!zswap_can_accept())
e0228d59 1541 goto shrink;
42c06a0e 1542 else
45190f01 1543 zswap_pool_reached_full = false;
2b281117
SJ
1544 }
1545
1546 /* allocate entry */
be7fc97c 1547 entry = zswap_entry_cache_alloc(GFP_KERNEL, folio_nid(folio));
2b281117
SJ
1548 if (!entry) {
1549 zswap_reject_kmemcache_fail++;
2b281117
SJ
1550 goto reject;
1551 }
1552
a85f878b 1553 if (zswap_same_filled_pages_enabled) {
be7fc97c
JW
1554 unsigned long value;
1555 u8 *src;
1556
1557 src = kmap_local_folio(folio, 0);
a85f878b 1558 if (zswap_is_page_same_filled(src, &value)) {
003ae2fb 1559 kunmap_local(src);
a85f878b
SD
1560 entry->length = 0;
1561 entry->value = value;
1562 atomic_inc(&zswap_same_filled_pages);
1563 goto insert_entry;
1564 }
003ae2fb 1565 kunmap_local(src);
a85f878b
SD
1566 }
1567
42c06a0e 1568 if (!zswap_non_same_filled_pages_enabled)
cb325ddd 1569 goto freepage;
cb325ddd 1570
f1c54846
DS
1571 /* if entry is successfully added, it keeps the reference */
1572 entry->pool = zswap_pool_current_get();
42c06a0e 1573 if (!entry->pool)
f1c54846 1574 goto freepage;
f1c54846 1575
a65b0e76
DC
1576 if (objcg) {
1577 memcg = get_mem_cgroup_from_objcg(objcg);
1578 if (memcg_list_lru_alloc(memcg, &entry->pool->list_lru, GFP_KERNEL)) {
1579 mem_cgroup_put(memcg);
1580 goto put_pool;
1581 }
1582 mem_cgroup_put(memcg);
1583 }
1584
fa9ad6e2
JW
1585 if (!zswap_compress(folio, entry))
1586 goto put_pool;
1ec3b5fe 1587
a85f878b 1588insert_entry:
be7fc97c 1589 entry->swpentry = swp;
f4840ccf
JW
1590 entry->objcg = objcg;
1591 if (objcg) {
1592 obj_cgroup_charge_zswap(objcg, entry->length);
1593 /* Account before objcg ref is moved to tree */
1594 count_objcg_event(objcg, ZSWPOUT);
1595 }
1596
2b281117
SJ
1597 /* map */
1598 spin_lock(&tree->lock);
ca56489c 1599 /*
f576a1e8
CZ
1600 * The folio may have been dirtied again, invalidate the
1601 * possibly stale entry before inserting the new entry.
ca56489c 1602 */
f576a1e8 1603 if (zswap_rb_insert(&tree->rbroot, entry, &dupentry) == -EEXIST) {
56c67049 1604 zswap_invalidate_entry(tree, dupentry);
f576a1e8 1605 WARN_ON(zswap_rb_insert(&tree->rbroot, entry, &dupentry));
42c06a0e 1606 }
35499e2b 1607 if (entry->length) {
a65b0e76
DC
1608 INIT_LIST_HEAD(&entry->lru);
1609 zswap_lru_add(&entry->pool->list_lru, entry);
b5ba474f 1610 atomic_inc(&entry->pool->nr_stored);
f999f38b 1611 }
2b281117
SJ
1612 spin_unlock(&tree->lock);
1613
1614 /* update stats */
1615 atomic_inc(&zswap_stored_pages);
f1c54846 1616 zswap_update_total_size();
f6498b77 1617 count_vm_event(ZSWPOUT);
2b281117 1618
42c06a0e 1619 return true;
2b281117 1620
a65b0e76 1621put_pool:
f1c54846
DS
1622 zswap_pool_put(entry->pool);
1623freepage:
2b281117
SJ
1624 zswap_entry_cache_free(entry);
1625reject:
f4840ccf
JW
1626 if (objcg)
1627 obj_cgroup_put(objcg);
f576a1e8
CZ
1628check_old:
1629 /*
1630 * If the zswap store fails or zswap is disabled, we must invalidate the
1631 * possibly stale entry which was previously stored at this offset.
1632 * Otherwise, writeback could overwrite the new data in the swapfile.
1633 */
1634 spin_lock(&tree->lock);
1635 entry = zswap_rb_search(&tree->rbroot, offset);
1636 if (entry)
1637 zswap_invalidate_entry(tree, entry);
1638 spin_unlock(&tree->lock);
42c06a0e 1639 return false;
f4840ccf
JW
1640
1641shrink:
be7fc97c
JW
1642 shrink_pool = zswap_pool_last_get();
1643 if (shrink_pool && !queue_work(shrink_wq, &shrink_pool->shrink_work))
1644 zswap_pool_put(shrink_pool);
f4840ccf 1645 goto reject;
2b281117
SJ
1646}
1647
ca54f6d8 1648bool zswap_load(struct folio *folio)
2b281117 1649{
3d2c9087 1650 swp_entry_t swp = folio->swap;
42c06a0e 1651 pgoff_t offset = swp_offset(swp);
ca54f6d8 1652 struct page *page = &folio->page;
44c7c734 1653 struct zswap_tree *tree = swap_zswap_tree(swp);
2b281117 1654 struct zswap_entry *entry;
32acba4c 1655 u8 *dst;
42c06a0e 1656
ca54f6d8 1657 VM_WARN_ON_ONCE(!folio_test_locked(folio));
2b281117 1658
2b281117 1659 spin_lock(&tree->lock);
5b297f70 1660 entry = zswap_rb_search(&tree->rbroot, offset);
2b281117 1661 if (!entry) {
2b281117 1662 spin_unlock(&tree->lock);
42c06a0e 1663 return false;
2b281117 1664 }
a230c20e 1665 zswap_rb_erase(&tree->rbroot, entry);
2b281117
SJ
1666 spin_unlock(&tree->lock);
1667
66447fd0 1668 if (entry->length)
ff2972aa 1669 zswap_decompress(entry, page);
66447fd0 1670 else {
003ae2fb 1671 dst = kmap_local_page(page);
a85f878b 1672 zswap_fill_page(dst, entry->value);
003ae2fb 1673 kunmap_local(dst);
a85f878b
SD
1674 }
1675
f6498b77 1676 count_vm_event(ZSWPIN);
f4840ccf
JW
1677 if (entry->objcg)
1678 count_objcg_event(entry->objcg, ZSWPIN);
c75f5c1e 1679
a230c20e 1680 zswap_entry_free(entry);
2b281117 1681
c2e2ba77
CZ
1682 folio_mark_dirty(folio);
1683
66447fd0 1684 return true;
2b281117
SJ
1685}
1686
0827a1fb 1687void zswap_invalidate(swp_entry_t swp)
2b281117 1688{
0827a1fb
CZ
1689 pgoff_t offset = swp_offset(swp);
1690 struct zswap_tree *tree = swap_zswap_tree(swp);
2b281117 1691 struct zswap_entry *entry;
2b281117 1692
2b281117
SJ
1693 spin_lock(&tree->lock);
1694 entry = zswap_rb_search(&tree->rbroot, offset);
06ed2289
JW
1695 if (entry)
1696 zswap_invalidate_entry(tree, entry);
2b281117 1697 spin_unlock(&tree->lock);
2b281117
SJ
1698}
1699
44c7c734 1700int zswap_swapon(int type, unsigned long nr_pages)
42c06a0e 1701{
44c7c734
CZ
1702 struct zswap_tree *trees, *tree;
1703 unsigned int nr, i;
42c06a0e 1704
44c7c734
CZ
1705 nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES);
1706 trees = kvcalloc(nr, sizeof(*tree), GFP_KERNEL);
1707 if (!trees) {
42c06a0e 1708 pr_err("alloc failed, zswap disabled for swap type %d\n", type);
bb29fd77 1709 return -ENOMEM;
42c06a0e
JW
1710 }
1711
44c7c734
CZ
1712 for (i = 0; i < nr; i++) {
1713 tree = trees + i;
1714 tree->rbroot = RB_ROOT;
1715 spin_lock_init(&tree->lock);
1716 }
1717
1718 nr_zswap_trees[type] = nr;
1719 zswap_trees[type] = trees;
bb29fd77 1720 return 0;
42c06a0e
JW
1721}
1722
1723void zswap_swapoff(int type)
2b281117 1724{
44c7c734
CZ
1725 struct zswap_tree *trees = zswap_trees[type];
1726 unsigned int i;
2b281117 1727
44c7c734 1728 if (!trees)
2b281117
SJ
1729 return;
1730
83e68f25
YA
1731 /* try_to_unuse() invalidated all the entries already */
1732 for (i = 0; i < nr_zswap_trees[type]; i++)
1733 WARN_ON_ONCE(!RB_EMPTY_ROOT(&trees[i].rbroot));
44c7c734
CZ
1734
1735 kvfree(trees);
1736 nr_zswap_trees[type] = 0;
aa9bca05 1737 zswap_trees[type] = NULL;
2b281117
SJ
1738}
1739
2b281117
SJ
1740/*********************************
1741* debugfs functions
1742**********************************/
1743#ifdef CONFIG_DEBUG_FS
1744#include <linux/debugfs.h>
1745
1746static struct dentry *zswap_debugfs_root;
1747
141fdeec 1748static int zswap_debugfs_init(void)
2b281117
SJ
1749{
1750 if (!debugfs_initialized())
1751 return -ENODEV;
1752
1753 zswap_debugfs_root = debugfs_create_dir("zswap", NULL);
2b281117 1754
0825a6f9
JP
1755 debugfs_create_u64("pool_limit_hit", 0444,
1756 zswap_debugfs_root, &zswap_pool_limit_hit);
1757 debugfs_create_u64("reject_reclaim_fail", 0444,
1758 zswap_debugfs_root, &zswap_reject_reclaim_fail);
1759 debugfs_create_u64("reject_alloc_fail", 0444,
1760 zswap_debugfs_root, &zswap_reject_alloc_fail);
1761 debugfs_create_u64("reject_kmemcache_fail", 0444,
1762 zswap_debugfs_root, &zswap_reject_kmemcache_fail);
cb61dad8
NP
1763 debugfs_create_u64("reject_compress_fail", 0444,
1764 zswap_debugfs_root, &zswap_reject_compress_fail);
0825a6f9
JP
1765 debugfs_create_u64("reject_compress_poor", 0444,
1766 zswap_debugfs_root, &zswap_reject_compress_poor);
1767 debugfs_create_u64("written_back_pages", 0444,
1768 zswap_debugfs_root, &zswap_written_back_pages);
0825a6f9
JP
1769 debugfs_create_u64("pool_total_size", 0444,
1770 zswap_debugfs_root, &zswap_pool_total_size);
1771 debugfs_create_atomic_t("stored_pages", 0444,
1772 zswap_debugfs_root, &zswap_stored_pages);
a85f878b 1773 debugfs_create_atomic_t("same_filled_pages", 0444,
0825a6f9 1774 zswap_debugfs_root, &zswap_same_filled_pages);
2b281117
SJ
1775
1776 return 0;
1777}
2b281117 1778#else
141fdeec 1779static int zswap_debugfs_init(void)
2b281117
SJ
1780{
1781 return 0;
1782}
2b281117
SJ
1783#endif
1784
1785/*********************************
1786* module init and exit
1787**********************************/
141fdeec 1788static int zswap_setup(void)
2b281117 1789{
f1c54846 1790 struct zswap_pool *pool;
ad7ed770 1791 int ret;
60105e12 1792
b7919122
LS
1793 zswap_entry_cache = KMEM_CACHE(zswap_entry, 0);
1794 if (!zswap_entry_cache) {
2b281117 1795 pr_err("entry cache creation failed\n");
f1c54846 1796 goto cache_fail;
2b281117 1797 }
f1c54846 1798
cab7a7e5
SAS
1799 ret = cpuhp_setup_state_multi(CPUHP_MM_ZSWP_POOL_PREPARE,
1800 "mm/zswap_pool:prepare",
1801 zswap_cpu_comp_prepare,
1802 zswap_cpu_comp_dead);
1803 if (ret)
1804 goto hp_fail;
1805
f1c54846 1806 pool = __zswap_pool_create_fallback();
ae3d89a7
DS
1807 if (pool) {
1808 pr_info("loaded using pool %s/%s\n", pool->tfm_name,
b8cf32dc 1809 zpool_get_type(pool->zpools[0]));
ae3d89a7
DS
1810 list_add(&pool->list, &zswap_pools);
1811 zswap_has_pool = true;
1812 } else {
f1c54846 1813 pr_err("pool creation failed\n");
ae3d89a7 1814 zswap_enabled = false;
2b281117 1815 }
60105e12 1816
8409a385
RM
1817 shrink_wq = alloc_workqueue("zswap-shrink",
1818 WQ_UNBOUND|WQ_MEM_RECLAIM, 1);
45190f01
VW
1819 if (!shrink_wq)
1820 goto fallback_fail;
1821
2b281117
SJ
1822 if (zswap_debugfs_init())
1823 pr_warn("debugfs initialization failed\n");
9021ccec 1824 zswap_init_state = ZSWAP_INIT_SUCCEED;
2b281117 1825 return 0;
f1c54846 1826
45190f01 1827fallback_fail:
38aeb071
DC
1828 if (pool)
1829 zswap_pool_destroy(pool);
cab7a7e5 1830hp_fail:
b7919122 1831 kmem_cache_destroy(zswap_entry_cache);
f1c54846 1832cache_fail:
d7b028f5 1833 /* if built-in, we aren't unloaded on failure; don't allow use */
9021ccec 1834 zswap_init_state = ZSWAP_INIT_FAILED;
d7b028f5 1835 zswap_enabled = false;
2b281117
SJ
1836 return -ENOMEM;
1837}
141fdeec
LS
1838
1839static int __init zswap_init(void)
1840{
1841 if (!zswap_enabled)
1842 return 0;
1843 return zswap_setup();
1844}
2b281117 1845/* must be late so crypto has time to come up */
141fdeec 1846late_initcall(zswap_init);
2b281117 1847
68386da8 1848MODULE_AUTHOR("Seth Jennings <[email protected]>");
2b281117 1849MODULE_DESCRIPTION("Compressed cache for swap pages");
This page took 0.913432 seconds and 4 git commands to generate.