1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * zswap.c - zswap driver file
5 * zswap is a cache that takes pages that are in the process
6 * of being swapped out and attempts to compress and store them in a
7 * RAM-based memory pool. This can result in a significant I/O reduction on
8 * the swap device and, in the case where decompressing from RAM is faster
9 * than reading from the swap device, can also improve workload performance.
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16 #include <linux/module.h>
17 #include <linux/cpu.h>
18 #include <linux/highmem.h>
19 #include <linux/slab.h>
20 #include <linux/spinlock.h>
21 #include <linux/types.h>
22 #include <linux/atomic.h>
23 #include <linux/swap.h>
24 #include <linux/crypto.h>
25 #include <linux/scatterlist.h>
26 #include <linux/mempolicy.h>
27 #include <linux/mempool.h>
28 #include <linux/zpool.h>
29 #include <crypto/acompress.h>
30 #include <linux/zswap.h>
31 #include <linux/mm_types.h>
32 #include <linux/page-flags.h>
33 #include <linux/swapops.h>
34 #include <linux/writeback.h>
35 #include <linux/pagemap.h>
36 #include <linux/workqueue.h>
37 #include <linux/list_lru.h>
42 /*********************************
44 **********************************/
45 /* The number of compressed pages currently stored in zswap */
46 atomic_t zswap_stored_pages = ATOMIC_INIT(0);
47 /* The number of same-value filled pages currently stored in zswap */
48 static atomic_t zswap_same_filled_pages = ATOMIC_INIT(0);
51 * The statistics below are not protected from concurrent access for
52 * performance reasons so they may not be a 100% accurate. However,
53 * they do provide useful information on roughly how many times a
54 * certain event is occurring.
57 /* Pool limit was hit (see zswap_max_pool_percent) */
58 static u64 zswap_pool_limit_hit;
59 /* Pages written back when pool limit was reached */
60 static u64 zswap_written_back_pages;
61 /* Store failed due to a reclaim failure after pool limit was reached */
62 static u64 zswap_reject_reclaim_fail;
63 /* Store failed due to compression algorithm failure */
64 static u64 zswap_reject_compress_fail;
65 /* Compressed page was too big for the allocator to (optimally) store */
66 static u64 zswap_reject_compress_poor;
67 /* Store failed because underlying allocator could not get memory */
68 static u64 zswap_reject_alloc_fail;
69 /* Store failed because the entry metadata could not be allocated (rare) */
70 static u64 zswap_reject_kmemcache_fail;
72 /* Shrinker work queue */
73 static struct workqueue_struct *shrink_wq;
74 /* Pool limit was hit, we need to calm down */
75 static bool zswap_pool_reached_full;
77 /*********************************
79 **********************************/
81 #define ZSWAP_PARAM_UNSET ""
83 static int zswap_setup(void);
85 /* Enable/disable zswap */
86 static DEFINE_STATIC_KEY_MAYBE(CONFIG_ZSWAP_DEFAULT_ON, zswap_ever_enabled);
87 static bool zswap_enabled = IS_ENABLED(CONFIG_ZSWAP_DEFAULT_ON);
88 static int zswap_enabled_param_set(const char *,
89 const struct kernel_param *);
90 static const struct kernel_param_ops zswap_enabled_param_ops = {
91 .set = zswap_enabled_param_set,
92 .get = param_get_bool,
94 module_param_cb(enabled, &zswap_enabled_param_ops, &zswap_enabled, 0644);
96 /* Crypto compressor to use */
97 static char *zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT;
98 static int zswap_compressor_param_set(const char *,
99 const struct kernel_param *);
100 static const struct kernel_param_ops zswap_compressor_param_ops = {
101 .set = zswap_compressor_param_set,
102 .get = param_get_charp,
103 .free = param_free_charp,
105 module_param_cb(compressor, &zswap_compressor_param_ops,
106 &zswap_compressor, 0644);
108 /* Compressed storage zpool to use */
109 static char *zswap_zpool_type = CONFIG_ZSWAP_ZPOOL_DEFAULT;
110 static int zswap_zpool_param_set(const char *, const struct kernel_param *);
111 static const struct kernel_param_ops zswap_zpool_param_ops = {
112 .set = zswap_zpool_param_set,
113 .get = param_get_charp,
114 .free = param_free_charp,
116 module_param_cb(zpool, &zswap_zpool_param_ops, &zswap_zpool_type, 0644);
118 /* The maximum percentage of memory that the compressed pool can occupy */
119 static unsigned int zswap_max_pool_percent = 20;
120 module_param_named(max_pool_percent, zswap_max_pool_percent, uint, 0644);
122 /* The threshold for accepting new pages after the max_pool_percent was hit */
123 static unsigned int zswap_accept_thr_percent = 90; /* of max pool size */
124 module_param_named(accept_threshold_percent, zswap_accept_thr_percent,
127 /* Enable/disable memory pressure-based shrinker. */
128 static bool zswap_shrinker_enabled = IS_ENABLED(
129 CONFIG_ZSWAP_SHRINKER_DEFAULT_ON);
130 module_param_named(shrinker_enabled, zswap_shrinker_enabled, bool, 0644);
132 bool zswap_is_enabled(void)
134 return zswap_enabled;
137 bool zswap_never_enabled(void)
139 return !static_branch_maybe(CONFIG_ZSWAP_DEFAULT_ON, &zswap_ever_enabled);
142 /*********************************
144 **********************************/
146 struct crypto_acomp_ctx {
147 struct crypto_acomp *acomp;
148 struct acomp_req *req;
149 struct crypto_wait wait;
156 * The lock ordering is zswap_tree.lock -> zswap_pool.lru_lock.
157 * The only case where lru_lock is not acquired while holding tree.lock is
158 * when a zswap_entry is taken off the lru for writeback, in that case it
159 * needs to be verified that it's still valid in the tree.
163 struct crypto_acomp_ctx __percpu *acomp_ctx;
164 struct percpu_ref ref;
165 struct list_head list;
166 struct work_struct release_work;
167 struct hlist_node node;
168 char tfm_name[CRYPTO_MAX_ALG_NAME];
171 /* Global LRU lists shared by all zswap pools. */
172 static struct list_lru zswap_list_lru;
174 /* The lock protects zswap_next_shrink updates. */
175 static DEFINE_SPINLOCK(zswap_shrink_lock);
176 static struct mem_cgroup *zswap_next_shrink;
177 static struct work_struct zswap_shrink_work;
178 static struct shrinker *zswap_shrinker;
183 * This structure contains the metadata for tracking a single compressed
186 * swpentry - associated swap entry, the offset indexes into the red-black tree
187 * length - the length in bytes of the compressed page data. Needed during
188 * decompression. For a same value filled page length is 0, and both
189 * pool and lru are invalid and must be ignored.
190 * pool - the zswap_pool the entry's data is in
191 * handle - zpool allocation handle that stores the compressed page data
192 * value - value of the same-value filled pages which have same content
193 * objcg - the obj_cgroup that the compressed memory is charged to
194 * lru - handle to the pool's lru used to evict pages.
197 swp_entry_t swpentry;
199 struct zswap_pool *pool;
201 unsigned long handle;
204 struct obj_cgroup *objcg;
205 struct list_head lru;
208 static struct xarray *zswap_trees[MAX_SWAPFILES];
209 static unsigned int nr_zswap_trees[MAX_SWAPFILES];
211 /* RCU-protected iteration */
212 static LIST_HEAD(zswap_pools);
213 /* protects zswap_pools list modification */
214 static DEFINE_SPINLOCK(zswap_pools_lock);
215 /* pool counter to provide unique names to zpool */
216 static atomic_t zswap_pools_count = ATOMIC_INIT(0);
218 enum zswap_init_type {
224 static enum zswap_init_type zswap_init_state;
226 /* used to ensure the integrity of initialization */
227 static DEFINE_MUTEX(zswap_init_lock);
229 /* init completed, but couldn't create the initial pool */
230 static bool zswap_has_pool;
232 /*********************************
233 * helpers and fwd declarations
234 **********************************/
236 static inline struct xarray *swap_zswap_tree(swp_entry_t swp)
238 return &zswap_trees[swp_type(swp)][swp_offset(swp)
239 >> SWAP_ADDRESS_SPACE_SHIFT];
242 #define zswap_pool_debug(msg, p) \
243 pr_debug("%s pool %s/%s\n", msg, (p)->tfm_name, \
244 zpool_get_type((p)->zpool))
246 /*********************************
248 **********************************/
249 static void __zswap_pool_empty(struct percpu_ref *ref);
251 static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
253 struct zswap_pool *pool;
254 char name[38]; /* 'zswap' + 32 char (max) num + \0 */
255 gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
258 if (!zswap_has_pool) {
259 /* if either are unset, pool initialization failed, and we
260 * need both params to be set correctly before trying to
263 if (!strcmp(type, ZSWAP_PARAM_UNSET))
265 if (!strcmp(compressor, ZSWAP_PARAM_UNSET))
269 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
273 /* unique name for each pool specifically required by zsmalloc */
274 snprintf(name, 38, "zswap%x", atomic_inc_return(&zswap_pools_count));
275 pool->zpool = zpool_create_pool(type, name, gfp);
277 pr_err("%s zpool not available\n", type);
280 pr_debug("using %s zpool\n", zpool_get_type(pool->zpool));
282 strscpy(pool->tfm_name, compressor, sizeof(pool->tfm_name));
284 pool->acomp_ctx = alloc_percpu(*pool->acomp_ctx);
285 if (!pool->acomp_ctx) {
286 pr_err("percpu alloc failed\n");
290 ret = cpuhp_state_add_instance(CPUHP_MM_ZSWP_POOL_PREPARE,
295 /* being the current pool takes 1 ref; this func expects the
296 * caller to always add the new pool as the current pool
298 ret = percpu_ref_init(&pool->ref, __zswap_pool_empty,
299 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL);
302 INIT_LIST_HEAD(&pool->list);
304 zswap_pool_debug("created", pool);
309 cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node);
312 free_percpu(pool->acomp_ctx);
314 zpool_destroy_pool(pool->zpool);
319 static struct zswap_pool *__zswap_pool_create_fallback(void)
321 bool has_comp, has_zpool;
323 has_comp = crypto_has_acomp(zswap_compressor, 0, 0);
324 if (!has_comp && strcmp(zswap_compressor,
325 CONFIG_ZSWAP_COMPRESSOR_DEFAULT)) {
326 pr_err("compressor %s not available, using default %s\n",
327 zswap_compressor, CONFIG_ZSWAP_COMPRESSOR_DEFAULT);
328 param_free_charp(&zswap_compressor);
329 zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT;
330 has_comp = crypto_has_acomp(zswap_compressor, 0, 0);
333 pr_err("default compressor %s not available\n",
335 param_free_charp(&zswap_compressor);
336 zswap_compressor = ZSWAP_PARAM_UNSET;
339 has_zpool = zpool_has_pool(zswap_zpool_type);
340 if (!has_zpool && strcmp(zswap_zpool_type,
341 CONFIG_ZSWAP_ZPOOL_DEFAULT)) {
342 pr_err("zpool %s not available, using default %s\n",
343 zswap_zpool_type, CONFIG_ZSWAP_ZPOOL_DEFAULT);
344 param_free_charp(&zswap_zpool_type);
345 zswap_zpool_type = CONFIG_ZSWAP_ZPOOL_DEFAULT;
346 has_zpool = zpool_has_pool(zswap_zpool_type);
349 pr_err("default zpool %s not available\n",
351 param_free_charp(&zswap_zpool_type);
352 zswap_zpool_type = ZSWAP_PARAM_UNSET;
355 if (!has_comp || !has_zpool)
358 return zswap_pool_create(zswap_zpool_type, zswap_compressor);
361 static void zswap_pool_destroy(struct zswap_pool *pool)
363 zswap_pool_debug("destroying", pool);
365 cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node);
366 free_percpu(pool->acomp_ctx);
368 zpool_destroy_pool(pool->zpool);
372 static void __zswap_pool_release(struct work_struct *work)
374 struct zswap_pool *pool = container_of(work, typeof(*pool),
379 /* nobody should have been able to get a ref... */
380 WARN_ON(!percpu_ref_is_zero(&pool->ref));
381 percpu_ref_exit(&pool->ref);
383 /* pool is now off zswap_pools list and has no references. */
384 zswap_pool_destroy(pool);
387 static struct zswap_pool *zswap_pool_current(void);
389 static void __zswap_pool_empty(struct percpu_ref *ref)
391 struct zswap_pool *pool;
393 pool = container_of(ref, typeof(*pool), ref);
395 spin_lock_bh(&zswap_pools_lock);
397 WARN_ON(pool == zswap_pool_current());
399 list_del_rcu(&pool->list);
401 INIT_WORK(&pool->release_work, __zswap_pool_release);
402 schedule_work(&pool->release_work);
404 spin_unlock_bh(&zswap_pools_lock);
407 static int __must_check zswap_pool_get(struct zswap_pool *pool)
412 return percpu_ref_tryget(&pool->ref);
415 static void zswap_pool_put(struct zswap_pool *pool)
417 percpu_ref_put(&pool->ref);
420 static struct zswap_pool *__zswap_pool_current(void)
422 struct zswap_pool *pool;
424 pool = list_first_or_null_rcu(&zswap_pools, typeof(*pool), list);
425 WARN_ONCE(!pool && zswap_has_pool,
426 "%s: no page storage pool!\n", __func__);
431 static struct zswap_pool *zswap_pool_current(void)
433 assert_spin_locked(&zswap_pools_lock);
435 return __zswap_pool_current();
438 static struct zswap_pool *zswap_pool_current_get(void)
440 struct zswap_pool *pool;
444 pool = __zswap_pool_current();
445 if (!zswap_pool_get(pool))
453 /* type and compressor must be null-terminated */
454 static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor)
456 struct zswap_pool *pool;
458 assert_spin_locked(&zswap_pools_lock);
460 list_for_each_entry_rcu(pool, &zswap_pools, list) {
461 if (strcmp(pool->tfm_name, compressor))
463 if (strcmp(zpool_get_type(pool->zpool), type))
465 /* if we can't get it, it's about to be destroyed */
466 if (!zswap_pool_get(pool))
474 static unsigned long zswap_max_pages(void)
476 return totalram_pages() * zswap_max_pool_percent / 100;
479 static unsigned long zswap_accept_thr_pages(void)
481 return zswap_max_pages() * zswap_accept_thr_percent / 100;
484 unsigned long zswap_total_pages(void)
486 struct zswap_pool *pool;
487 unsigned long total = 0;
490 list_for_each_entry_rcu(pool, &zswap_pools, list)
491 total += zpool_get_total_pages(pool->zpool);
497 static bool zswap_check_limits(void)
499 unsigned long cur_pages = zswap_total_pages();
500 unsigned long max_pages = zswap_max_pages();
502 if (cur_pages >= max_pages) {
503 zswap_pool_limit_hit++;
504 zswap_pool_reached_full = true;
505 } else if (zswap_pool_reached_full &&
506 cur_pages <= zswap_accept_thr_pages()) {
507 zswap_pool_reached_full = false;
509 return zswap_pool_reached_full;
512 /*********************************
514 **********************************/
516 static bool zswap_pool_changed(const char *s, const struct kernel_param *kp)
518 /* no change required */
519 if (!strcmp(s, *(char **)kp->arg) && zswap_has_pool)
524 /* val must be a null-terminated string */
525 static int __zswap_param_set(const char *val, const struct kernel_param *kp,
526 char *type, char *compressor)
528 struct zswap_pool *pool, *put_pool = NULL;
529 char *s = strstrip((char *)val);
531 bool new_pool = false;
533 mutex_lock(&zswap_init_lock);
534 switch (zswap_init_state) {
536 /* if this is load-time (pre-init) param setting,
537 * don't create a pool; that's done during init.
539 ret = param_set_charp(s, kp);
541 case ZSWAP_INIT_SUCCEED:
542 new_pool = zswap_pool_changed(s, kp);
544 case ZSWAP_INIT_FAILED:
545 pr_err("can't set param, initialization failed\n");
548 mutex_unlock(&zswap_init_lock);
550 /* no need to create a new pool, return directly */
555 if (!zpool_has_pool(s)) {
556 pr_err("zpool %s not available\n", s);
560 } else if (!compressor) {
561 if (!crypto_has_acomp(s, 0, 0)) {
562 pr_err("compressor %s not available\n", s);
571 spin_lock_bh(&zswap_pools_lock);
573 pool = zswap_pool_find_get(type, compressor);
575 zswap_pool_debug("using existing", pool);
576 WARN_ON(pool == zswap_pool_current());
577 list_del_rcu(&pool->list);
580 spin_unlock_bh(&zswap_pools_lock);
583 pool = zswap_pool_create(type, compressor);
586 * Restore the initial ref dropped by percpu_ref_kill()
587 * when the pool was decommissioned and switch it again
590 percpu_ref_resurrect(&pool->ref);
592 /* Drop the ref from zswap_pool_find_get(). */
593 zswap_pool_put(pool);
597 ret = param_set_charp(s, kp);
601 spin_lock_bh(&zswap_pools_lock);
604 put_pool = zswap_pool_current();
605 list_add_rcu(&pool->list, &zswap_pools);
606 zswap_has_pool = true;
608 /* add the possibly pre-existing pool to the end of the pools
609 * list; if it's new (and empty) then it'll be removed and
610 * destroyed by the put after we drop the lock
612 list_add_tail_rcu(&pool->list, &zswap_pools);
616 spin_unlock_bh(&zswap_pools_lock);
618 if (!zswap_has_pool && !pool) {
619 /* if initial pool creation failed, and this pool creation also
620 * failed, maybe both compressor and zpool params were bad.
621 * Allow changing this param, so pool creation will succeed
622 * when the other param is changed. We already verified this
623 * param is ok in the zpool_has_pool() or crypto_has_acomp()
626 ret = param_set_charp(s, kp);
629 /* drop the ref from either the old current pool,
630 * or the new pool we failed to add
633 percpu_ref_kill(&put_pool->ref);
638 static int zswap_compressor_param_set(const char *val,
639 const struct kernel_param *kp)
641 return __zswap_param_set(val, kp, zswap_zpool_type, NULL);
644 static int zswap_zpool_param_set(const char *val,
645 const struct kernel_param *kp)
647 return __zswap_param_set(val, kp, NULL, zswap_compressor);
650 static int zswap_enabled_param_set(const char *val,
651 const struct kernel_param *kp)
655 /* if this is load-time (pre-init) param setting, only set param. */
656 if (system_state != SYSTEM_RUNNING)
657 return param_set_bool(val, kp);
659 mutex_lock(&zswap_init_lock);
660 switch (zswap_init_state) {
665 case ZSWAP_INIT_SUCCEED:
667 pr_err("can't enable, no pool configured\n");
669 ret = param_set_bool(val, kp);
671 case ZSWAP_INIT_FAILED:
672 pr_err("can't enable, initialization failed\n");
674 mutex_unlock(&zswap_init_lock);
679 /*********************************
681 **********************************/
683 /* should be called under RCU */
685 static inline struct mem_cgroup *mem_cgroup_from_entry(struct zswap_entry *entry)
687 return entry->objcg ? obj_cgroup_memcg(entry->objcg) : NULL;
690 static inline struct mem_cgroup *mem_cgroup_from_entry(struct zswap_entry *entry)
696 static inline int entry_to_nid(struct zswap_entry *entry)
698 return page_to_nid(virt_to_page(entry));
701 static void zswap_lru_add(struct list_lru *list_lru, struct zswap_entry *entry)
703 atomic_long_t *nr_zswap_protected;
704 unsigned long lru_size, old, new;
705 int nid = entry_to_nid(entry);
706 struct mem_cgroup *memcg;
707 struct lruvec *lruvec;
710 * Note that it is safe to use rcu_read_lock() here, even in the face of
711 * concurrent memcg offlining. Thanks to the memcg->kmemcg_id indirection
712 * used in list_lru lookup, only two scenarios are possible:
714 * 1. list_lru_add() is called before memcg->kmemcg_id is updated. The
715 * new entry will be reparented to memcg's parent's list_lru.
716 * 2. list_lru_add() is called after memcg->kmemcg_id is updated. The
717 * new entry will be added directly to memcg's parent's list_lru.
719 * Similar reasoning holds for list_lru_del().
722 memcg = mem_cgroup_from_entry(entry);
723 /* will always succeed */
724 list_lru_add(list_lru, &entry->lru, nid, memcg);
726 /* Update the protection area */
727 lru_size = list_lru_count_one(list_lru, nid, memcg);
728 lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
729 nr_zswap_protected = &lruvec->zswap_lruvec_state.nr_zswap_protected;
730 old = atomic_long_inc_return(nr_zswap_protected);
732 * Decay to avoid overflow and adapt to changing workloads.
733 * This is based on LRU reclaim cost decaying heuristics.
736 new = old > lru_size / 4 ? old / 2 : old;
737 } while (!atomic_long_try_cmpxchg(nr_zswap_protected, &old, new));
741 static void zswap_lru_del(struct list_lru *list_lru, struct zswap_entry *entry)
743 int nid = entry_to_nid(entry);
744 struct mem_cgroup *memcg;
747 memcg = mem_cgroup_from_entry(entry);
748 /* will always succeed */
749 list_lru_del(list_lru, &entry->lru, nid, memcg);
753 void zswap_lruvec_state_init(struct lruvec *lruvec)
755 atomic_long_set(&lruvec->zswap_lruvec_state.nr_zswap_protected, 0);
758 void zswap_folio_swapin(struct folio *folio)
760 struct lruvec *lruvec;
763 lruvec = folio_lruvec(folio);
764 atomic_long_inc(&lruvec->zswap_lruvec_state.nr_zswap_protected);
768 void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg)
770 /* lock out zswap shrinker walking memcg tree */
771 spin_lock(&zswap_shrink_lock);
772 if (zswap_next_shrink == memcg)
773 zswap_next_shrink = mem_cgroup_iter(NULL, zswap_next_shrink, NULL);
774 spin_unlock(&zswap_shrink_lock);
777 /*********************************
778 * zswap entry functions
779 **********************************/
780 static struct kmem_cache *zswap_entry_cache;
782 static struct zswap_entry *zswap_entry_cache_alloc(gfp_t gfp, int nid)
784 struct zswap_entry *entry;
785 entry = kmem_cache_alloc_node(zswap_entry_cache, gfp, nid);
791 static void zswap_entry_cache_free(struct zswap_entry *entry)
793 kmem_cache_free(zswap_entry_cache, entry);
797 * Carries out the common pattern of freeing and entry's zpool allocation,
798 * freeing the entry itself, and decrementing the number of stored pages.
800 static void zswap_entry_free(struct zswap_entry *entry)
803 atomic_dec(&zswap_same_filled_pages);
805 zswap_lru_del(&zswap_list_lru, entry);
806 zpool_free(entry->pool->zpool, entry->handle);
807 zswap_pool_put(entry->pool);
810 obj_cgroup_uncharge_zswap(entry->objcg, entry->length);
811 obj_cgroup_put(entry->objcg);
813 zswap_entry_cache_free(entry);
814 atomic_dec(&zswap_stored_pages);
817 /*********************************
818 * compressed storage functions
819 **********************************/
820 static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node)
822 struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
823 struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
824 struct crypto_acomp *acomp;
825 struct acomp_req *req;
828 mutex_init(&acomp_ctx->mutex);
830 acomp_ctx->buffer = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu));
831 if (!acomp_ctx->buffer)
834 acomp = crypto_alloc_acomp_node(pool->tfm_name, 0, 0, cpu_to_node(cpu));
836 pr_err("could not alloc crypto acomp %s : %ld\n",
837 pool->tfm_name, PTR_ERR(acomp));
838 ret = PTR_ERR(acomp);
841 acomp_ctx->acomp = acomp;
842 acomp_ctx->is_sleepable = acomp_is_async(acomp);
844 req = acomp_request_alloc(acomp_ctx->acomp);
846 pr_err("could not alloc crypto acomp_request %s\n",
851 acomp_ctx->req = req;
853 crypto_init_wait(&acomp_ctx->wait);
855 * if the backend of acomp is async zip, crypto_req_done() will wakeup
856 * crypto_wait_req(); if the backend of acomp is scomp, the callback
857 * won't be called, crypto_wait_req() will return without blocking.
859 acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
860 crypto_req_done, &acomp_ctx->wait);
865 crypto_free_acomp(acomp_ctx->acomp);
867 kfree(acomp_ctx->buffer);
871 static int zswap_cpu_comp_dead(unsigned int cpu, struct hlist_node *node)
873 struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
874 struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
876 if (!IS_ERR_OR_NULL(acomp_ctx)) {
877 if (!IS_ERR_OR_NULL(acomp_ctx->req))
878 acomp_request_free(acomp_ctx->req);
879 if (!IS_ERR_OR_NULL(acomp_ctx->acomp))
880 crypto_free_acomp(acomp_ctx->acomp);
881 kfree(acomp_ctx->buffer);
887 static bool zswap_compress(struct folio *folio, struct zswap_entry *entry)
889 struct crypto_acomp_ctx *acomp_ctx;
890 struct scatterlist input, output;
891 int comp_ret = 0, alloc_ret = 0;
892 unsigned int dlen = PAGE_SIZE;
893 unsigned long handle;
899 acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
901 mutex_lock(&acomp_ctx->mutex);
903 dst = acomp_ctx->buffer;
904 sg_init_table(&input, 1);
905 sg_set_folio(&input, folio, PAGE_SIZE, 0);
908 * We need PAGE_SIZE * 2 here since there maybe over-compression case,
909 * and hardware-accelerators may won't check the dst buffer size, so
910 * giving the dst buffer with enough length to avoid buffer overflow.
912 sg_init_one(&output, dst, PAGE_SIZE * 2);
913 acomp_request_set_params(acomp_ctx->req, &input, &output, PAGE_SIZE, dlen);
916 * it maybe looks a little bit silly that we send an asynchronous request,
917 * then wait for its completion synchronously. This makes the process look
918 * synchronous in fact.
919 * Theoretically, acomp supports users send multiple acomp requests in one
920 * acomp instance, then get those requests done simultaneously. but in this
921 * case, zswap actually does store and load page by page, there is no
922 * existing method to send the second page before the first page is done
923 * in one thread doing zwap.
924 * but in different threads running on different cpu, we have different
925 * acomp instance, so multiple threads can do (de)compression in parallel.
927 comp_ret = crypto_wait_req(crypto_acomp_compress(acomp_ctx->req), &acomp_ctx->wait);
928 dlen = acomp_ctx->req->dlen;
932 zpool = entry->pool->zpool;
933 gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
934 if (zpool_malloc_support_movable(zpool))
935 gfp |= __GFP_HIGHMEM | __GFP_MOVABLE;
936 alloc_ret = zpool_malloc(zpool, dlen, gfp, &handle);
940 buf = zpool_map_handle(zpool, handle, ZPOOL_MM_WO);
941 memcpy(buf, dst, dlen);
942 zpool_unmap_handle(zpool, handle);
944 entry->handle = handle;
945 entry->length = dlen;
948 if (comp_ret == -ENOSPC || alloc_ret == -ENOSPC)
949 zswap_reject_compress_poor++;
951 zswap_reject_compress_fail++;
953 zswap_reject_alloc_fail++;
955 mutex_unlock(&acomp_ctx->mutex);
956 return comp_ret == 0 && alloc_ret == 0;
959 static void zswap_decompress(struct zswap_entry *entry, struct folio *folio)
961 struct zpool *zpool = entry->pool->zpool;
962 struct scatterlist input, output;
963 struct crypto_acomp_ctx *acomp_ctx;
966 acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
967 mutex_lock(&acomp_ctx->mutex);
969 src = zpool_map_handle(zpool, entry->handle, ZPOOL_MM_RO);
971 * If zpool_map_handle is atomic, we cannot reliably utilize its mapped buffer
972 * to do crypto_acomp_decompress() which might sleep. In such cases, we must
973 * resort to copying the buffer to a temporary one.
974 * Meanwhile, zpool_map_handle() might return a non-linearly mapped buffer,
975 * such as a kmap address of high memory or even ever a vmap address.
976 * However, sg_init_one is only equipped to handle linearly mapped low memory.
977 * In such cases, we also must copy the buffer to a temporary and lowmem one.
979 if ((acomp_ctx->is_sleepable && !zpool_can_sleep_mapped(zpool)) ||
980 !virt_addr_valid(src)) {
981 memcpy(acomp_ctx->buffer, src, entry->length);
982 src = acomp_ctx->buffer;
983 zpool_unmap_handle(zpool, entry->handle);
986 sg_init_one(&input, src, entry->length);
987 sg_init_table(&output, 1);
988 sg_set_folio(&output, folio, PAGE_SIZE, 0);
989 acomp_request_set_params(acomp_ctx->req, &input, &output, entry->length, PAGE_SIZE);
990 BUG_ON(crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait));
991 BUG_ON(acomp_ctx->req->dlen != PAGE_SIZE);
992 mutex_unlock(&acomp_ctx->mutex);
994 if (src != acomp_ctx->buffer)
995 zpool_unmap_handle(zpool, entry->handle);
998 /*********************************
1000 **********************************/
1002 * Attempts to free an entry by adding a folio to the swap cache,
1003 * decompressing the entry data into the folio, and issuing a
1004 * bio write to write the folio back to the swap device.
1006 * This can be thought of as a "resumed writeback" of the folio
1007 * to the swap device. We are basically resuming the same swap
1008 * writeback path that was intercepted with the zswap_store()
1009 * in the first place. After the folio has been decompressed into
1010 * the swap cache, the compressed version stored by zswap can be
1013 static int zswap_writeback_entry(struct zswap_entry *entry,
1014 swp_entry_t swpentry)
1016 struct xarray *tree;
1017 pgoff_t offset = swp_offset(swpentry);
1018 struct folio *folio;
1019 struct mempolicy *mpol;
1020 bool folio_was_allocated;
1021 struct writeback_control wbc = {
1022 .sync_mode = WB_SYNC_NONE,
1025 /* try to allocate swap cache folio */
1026 mpol = get_task_policy(current);
1027 folio = __read_swap_cache_async(swpentry, GFP_KERNEL, mpol,
1028 NO_INTERLEAVE_INDEX, &folio_was_allocated, true);
1033 * Found an existing folio, we raced with swapin or concurrent
1034 * shrinker. We generally writeback cold folios from zswap, and
1035 * swapin means the folio just became hot, so skip this folio.
1036 * For unlikely concurrent shrinker case, it will be unlinked
1037 * and freed when invalidated by the concurrent shrinker anyway.
1039 if (!folio_was_allocated) {
1045 * folio is locked, and the swapcache is now secured against
1046 * concurrent swapping to and from the slot, and concurrent
1047 * swapoff so we can safely dereference the zswap tree here.
1048 * Verify that the swap entry hasn't been invalidated and recycled
1049 * behind our backs, to avoid overwriting a new swap folio with
1050 * old compressed data. Only when this is successful can the entry
1053 tree = swap_zswap_tree(swpentry);
1054 if (entry != xa_cmpxchg(tree, offset, entry, NULL, GFP_KERNEL)) {
1055 delete_from_swap_cache(folio);
1056 folio_unlock(folio);
1061 zswap_decompress(entry, folio);
1063 count_vm_event(ZSWPWB);
1065 count_objcg_event(entry->objcg, ZSWPWB);
1067 zswap_entry_free(entry);
1069 /* folio is up to date */
1070 folio_mark_uptodate(folio);
1072 /* move it to the tail of the inactive list after end_writeback */
1073 folio_set_reclaim(folio);
1075 /* start writeback */
1076 __swap_writepage(folio, &wbc);
1082 /*********************************
1083 * shrinker functions
1084 **********************************/
1085 static enum lru_status shrink_memcg_cb(struct list_head *item, struct list_lru_one *l,
1086 spinlock_t *lock, void *arg)
1088 struct zswap_entry *entry = container_of(item, struct zswap_entry, lru);
1089 bool *encountered_page_in_swapcache = (bool *)arg;
1090 swp_entry_t swpentry;
1091 enum lru_status ret = LRU_REMOVED_RETRY;
1092 int writeback_result;
1095 * As soon as we drop the LRU lock, the entry can be freed by
1096 * a concurrent invalidation. This means the following:
1098 * 1. We extract the swp_entry_t to the stack, allowing
1099 * zswap_writeback_entry() to pin the swap entry and
1100 * then validate the zwap entry against that swap entry's
1101 * tree using pointer value comparison. Only when that
1102 * is successful can the entry be dereferenced.
1104 * 2. Usually, objects are taken off the LRU for reclaim. In
1105 * this case this isn't possible, because if reclaim fails
1106 * for whatever reason, we have no means of knowing if the
1107 * entry is alive to put it back on the LRU.
1109 * So rotate it before dropping the lock. If the entry is
1110 * written back or invalidated, the free path will unlink
1111 * it. For failures, rotation is the right thing as well.
1113 * Temporary failures, where the same entry should be tried
1114 * again immediately, almost never happen for this shrinker.
1115 * We don't do any trylocking; -ENOMEM comes closest,
1116 * but that's extremely rare and doesn't happen spuriously
1117 * either. Don't bother distinguishing this case.
1119 list_move_tail(item, &l->list);
1122 * Once the lru lock is dropped, the entry might get freed. The
1123 * swpentry is copied to the stack, and entry isn't deref'd again
1124 * until the entry is verified to still be alive in the tree.
1126 swpentry = entry->swpentry;
1129 * It's safe to drop the lock here because we return either
1130 * LRU_REMOVED_RETRY or LRU_RETRY.
1134 writeback_result = zswap_writeback_entry(entry, swpentry);
1136 if (writeback_result) {
1137 zswap_reject_reclaim_fail++;
1141 * Encountering a page already in swap cache is a sign that we are shrinking
1142 * into the warmer region. We should terminate shrinking (if we're in the dynamic
1143 * shrinker context).
1145 if (writeback_result == -EEXIST && encountered_page_in_swapcache) {
1147 *encountered_page_in_swapcache = true;
1150 zswap_written_back_pages++;
1157 static unsigned long zswap_shrinker_scan(struct shrinker *shrinker,
1158 struct shrink_control *sc)
1160 struct lruvec *lruvec = mem_cgroup_lruvec(sc->memcg, NODE_DATA(sc->nid));
1161 unsigned long shrink_ret, nr_protected, lru_size;
1162 bool encountered_page_in_swapcache = false;
1164 if (!zswap_shrinker_enabled ||
1165 !mem_cgroup_zswap_writeback_enabled(sc->memcg)) {
1171 atomic_long_read(&lruvec->zswap_lruvec_state.nr_zswap_protected);
1172 lru_size = list_lru_shrink_count(&zswap_list_lru, sc);
1175 * Abort if we are shrinking into the protected region.
1177 * This short-circuiting is necessary because if we have too many multiple
1178 * concurrent reclaimers getting the freeable zswap object counts at the
1179 * same time (before any of them made reasonable progress), the total
1180 * number of reclaimed objects might be more than the number of unprotected
1181 * objects (i.e the reclaimers will reclaim into the protected area of the
1184 if (nr_protected >= lru_size - sc->nr_to_scan) {
1189 shrink_ret = list_lru_shrink_walk(&zswap_list_lru, sc, &shrink_memcg_cb,
1190 &encountered_page_in_swapcache);
1192 if (encountered_page_in_swapcache)
1195 return shrink_ret ? shrink_ret : SHRINK_STOP;
1198 static unsigned long zswap_shrinker_count(struct shrinker *shrinker,
1199 struct shrink_control *sc)
1201 struct mem_cgroup *memcg = sc->memcg;
1202 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(sc->nid));
1203 unsigned long nr_backing, nr_stored, nr_freeable, nr_protected;
1205 if (!zswap_shrinker_enabled || !mem_cgroup_zswap_writeback_enabled(memcg))
1209 * The shrinker resumes swap writeback, which will enter block
1210 * and may enter fs. XXX: Harmonize with vmscan.c __GFP_FS
1211 * rules (may_enter_fs()), which apply on a per-folio basis.
1213 if (!gfp_has_io_fs(sc->gfp_mask))
1217 * For memcg, use the cgroup-wide ZSWAP stats since we don't
1218 * have them per-node and thus per-lruvec. Careful if memcg is
1219 * runtime-disabled: we can get sc->memcg == NULL, which is ok
1220 * for the lruvec, but not for memcg_page_state().
1222 * Without memcg, use the zswap pool-wide metrics.
1224 if (!mem_cgroup_disabled()) {
1225 mem_cgroup_flush_stats(memcg);
1226 nr_backing = memcg_page_state(memcg, MEMCG_ZSWAP_B) >> PAGE_SHIFT;
1227 nr_stored = memcg_page_state(memcg, MEMCG_ZSWAPPED);
1229 nr_backing = zswap_total_pages();
1230 nr_stored = atomic_read(&zswap_stored_pages);
1237 atomic_long_read(&lruvec->zswap_lruvec_state.nr_zswap_protected);
1238 nr_freeable = list_lru_shrink_count(&zswap_list_lru, sc);
1240 * Subtract the lru size by an estimate of the number of pages
1241 * that should be protected.
1243 nr_freeable = nr_freeable > nr_protected ? nr_freeable - nr_protected : 0;
1246 * Scale the number of freeable pages by the memory saving factor.
1247 * This ensures that the better zswap compresses memory, the fewer
1248 * pages we will evict to swap (as it will otherwise incur IO for
1249 * relatively small memory saving).
1251 * The memory saving factor calculated here takes same-filled pages into
1252 * account, but those are not freeable since they almost occupy no
1253 * space. Hence, we may scale nr_freeable down a little bit more than we
1254 * should if we have a lot of same-filled pages.
1256 return mult_frac(nr_freeable, nr_backing, nr_stored);
1259 static struct shrinker *zswap_alloc_shrinker(void)
1261 struct shrinker *shrinker;
1264 shrinker_alloc(SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE, "mm-zswap");
1268 shrinker->scan_objects = zswap_shrinker_scan;
1269 shrinker->count_objects = zswap_shrinker_count;
1270 shrinker->batch = 0;
1271 shrinker->seeks = DEFAULT_SEEKS;
1275 static int shrink_memcg(struct mem_cgroup *memcg)
1277 int nid, shrunk = 0;
1279 if (!mem_cgroup_zswap_writeback_enabled(memcg))
1283 * Skip zombies because their LRUs are reparented and we would be
1284 * reclaiming from the parent instead of the dead memcg.
1286 if (memcg && !mem_cgroup_online(memcg))
1289 for_each_node_state(nid, N_NORMAL_MEMORY) {
1290 unsigned long nr_to_walk = 1;
1292 shrunk += list_lru_walk_one(&zswap_list_lru, nid, memcg,
1293 &shrink_memcg_cb, NULL, &nr_to_walk);
1295 return shrunk ? 0 : -EAGAIN;
1298 static void shrink_worker(struct work_struct *w)
1300 struct mem_cgroup *memcg;
1301 int ret, failures = 0;
1304 /* Reclaim down to the accept threshold */
1305 thr = zswap_accept_thr_pages();
1307 /* global reclaim will select cgroup in a round-robin fashion. */
1309 spin_lock(&zswap_shrink_lock);
1310 zswap_next_shrink = mem_cgroup_iter(NULL, zswap_next_shrink, NULL);
1311 memcg = zswap_next_shrink;
1314 * We need to retry if we have gone through a full round trip, or if we
1315 * got an offline memcg (or else we risk undoing the effect of the
1316 * zswap memcg offlining cleanup callback). This is not catastrophic
1317 * per se, but it will keep the now offlined memcg hostage for a while.
1319 * Note that if we got an online memcg, we will keep the extra
1320 * reference in case the original reference obtained by mem_cgroup_iter
1321 * is dropped by the zswap memcg offlining callback, ensuring that the
1322 * memcg is not killed when we are reclaiming.
1325 spin_unlock(&zswap_shrink_lock);
1326 if (++failures == MAX_RECLAIM_RETRIES)
1332 if (!mem_cgroup_tryget_online(memcg)) {
1333 /* drop the reference from mem_cgroup_iter() */
1334 mem_cgroup_iter_break(NULL, memcg);
1335 zswap_next_shrink = NULL;
1336 spin_unlock(&zswap_shrink_lock);
1338 if (++failures == MAX_RECLAIM_RETRIES)
1343 spin_unlock(&zswap_shrink_lock);
1345 ret = shrink_memcg(memcg);
1346 /* drop the extra reference */
1347 mem_cgroup_put(memcg);
1351 if (ret && ++failures == MAX_RECLAIM_RETRIES)
1355 } while (zswap_total_pages() > thr);
1358 /*********************************
1359 * same-filled functions
1360 **********************************/
1361 static bool zswap_is_folio_same_filled(struct folio *folio, unsigned long *value)
1363 unsigned long *data;
1365 unsigned int pos, last_pos = PAGE_SIZE / sizeof(*data) - 1;
1368 data = kmap_local_folio(folio, 0);
1371 if (val != data[last_pos])
1374 for (pos = 1; pos < last_pos; pos++) {
1375 if (val != data[pos])
1386 static void zswap_fill_folio(struct folio *folio, unsigned long value)
1388 unsigned long *data = kmap_local_folio(folio, 0);
1390 memset_l(data, value, PAGE_SIZE / sizeof(unsigned long));
1394 /*********************************
1396 **********************************/
1397 bool zswap_store(struct folio *folio)
1399 swp_entry_t swp = folio->swap;
1400 pgoff_t offset = swp_offset(swp);
1401 struct xarray *tree = swap_zswap_tree(swp);
1402 struct zswap_entry *entry, *old;
1403 struct obj_cgroup *objcg = NULL;
1404 struct mem_cgroup *memcg = NULL;
1405 unsigned long value;
1407 VM_WARN_ON_ONCE(!folio_test_locked(folio));
1408 VM_WARN_ON_ONCE(!folio_test_swapcache(folio));
1410 /* Large folios aren't supported */
1411 if (folio_test_large(folio))
1417 /* Check cgroup limits */
1418 objcg = get_obj_cgroup_from_folio(folio);
1419 if (objcg && !obj_cgroup_may_zswap(objcg)) {
1420 memcg = get_mem_cgroup_from_objcg(objcg);
1421 if (shrink_memcg(memcg)) {
1422 mem_cgroup_put(memcg);
1425 mem_cgroup_put(memcg);
1428 if (zswap_check_limits())
1431 /* allocate entry */
1432 entry = zswap_entry_cache_alloc(GFP_KERNEL, folio_nid(folio));
1434 zswap_reject_kmemcache_fail++;
1438 if (zswap_is_folio_same_filled(folio, &value)) {
1440 entry->value = value;
1441 atomic_inc(&zswap_same_filled_pages);
1445 /* if entry is successfully added, it keeps the reference */
1446 entry->pool = zswap_pool_current_get();
1451 memcg = get_mem_cgroup_from_objcg(objcg);
1452 if (memcg_list_lru_alloc(memcg, &zswap_list_lru, GFP_KERNEL)) {
1453 mem_cgroup_put(memcg);
1456 mem_cgroup_put(memcg);
1459 if (!zswap_compress(folio, entry))
1463 entry->swpentry = swp;
1464 entry->objcg = objcg;
1466 old = xa_store(tree, offset, entry, GFP_KERNEL);
1467 if (xa_is_err(old)) {
1468 int err = xa_err(old);
1470 WARN_ONCE(err != -ENOMEM, "unexpected xarray error: %d\n", err);
1471 zswap_reject_alloc_fail++;
1476 * We may have had an existing entry that became stale when
1477 * the folio was redirtied and now the new version is being
1478 * swapped out. Get rid of the old.
1481 zswap_entry_free(old);
1484 obj_cgroup_charge_zswap(objcg, entry->length);
1485 count_objcg_event(objcg, ZSWPOUT);
1489 * We finish initializing the entry while it's already in xarray.
1490 * This is safe because:
1492 * 1. Concurrent stores and invalidations are excluded by folio lock.
1494 * 2. Writeback is excluded by the entry not being on the LRU yet.
1495 * The publishing order matters to prevent writeback from seeing
1496 * an incoherent entry.
1498 if (entry->length) {
1499 INIT_LIST_HEAD(&entry->lru);
1500 zswap_lru_add(&zswap_list_lru, entry);
1504 atomic_inc(&zswap_stored_pages);
1505 count_vm_event(ZSWPOUT);
1511 atomic_dec(&zswap_same_filled_pages);
1513 zpool_free(entry->pool->zpool, entry->handle);
1515 zswap_pool_put(entry->pool);
1518 zswap_entry_cache_free(entry);
1520 obj_cgroup_put(objcg);
1521 if (zswap_pool_reached_full)
1522 queue_work(shrink_wq, &zswap_shrink_work);
1525 * If the zswap store fails or zswap is disabled, we must invalidate the
1526 * possibly stale entry which was previously stored at this offset.
1527 * Otherwise, writeback could overwrite the new data in the swapfile.
1529 entry = xa_erase(tree, offset);
1531 zswap_entry_free(entry);
1535 bool zswap_load(struct folio *folio)
1537 swp_entry_t swp = folio->swap;
1538 pgoff_t offset = swp_offset(swp);
1539 bool swapcache = folio_test_swapcache(folio);
1540 struct xarray *tree = swap_zswap_tree(swp);
1541 struct zswap_entry *entry;
1543 VM_WARN_ON_ONCE(!folio_test_locked(folio));
1545 if (zswap_never_enabled())
1549 * Large folios should not be swapped in while zswap is being used, as
1550 * they are not properly handled. Zswap does not properly load large
1551 * folios, and a large folio may only be partially in zswap.
1553 * Return true without marking the folio uptodate so that an IO error is
1554 * emitted (e.g. do_swap_page() will sigbus).
1556 if (WARN_ON_ONCE(folio_test_large(folio)))
1560 * When reading into the swapcache, invalidate our entry. The
1561 * swapcache can be the authoritative owner of the page and
1562 * its mappings, and the pressure that results from having two
1563 * in-memory copies outweighs any benefits of caching the
1566 * (Most swapins go through the swapcache. The notable
1567 * exception is the singleton fault on SWP_SYNCHRONOUS_IO
1568 * files, which reads into a private page and may free it if
1569 * the fault fails. We remain the primary owner of the entry.)
1572 entry = xa_erase(tree, offset);
1574 entry = xa_load(tree, offset);
1580 zswap_decompress(entry, folio);
1582 zswap_fill_folio(folio, entry->value);
1584 count_vm_event(ZSWPIN);
1586 count_objcg_event(entry->objcg, ZSWPIN);
1589 zswap_entry_free(entry);
1590 folio_mark_dirty(folio);
1593 folio_mark_uptodate(folio);
1597 void zswap_invalidate(swp_entry_t swp)
1599 pgoff_t offset = swp_offset(swp);
1600 struct xarray *tree = swap_zswap_tree(swp);
1601 struct zswap_entry *entry;
1603 entry = xa_erase(tree, offset);
1605 zswap_entry_free(entry);
1608 int zswap_swapon(int type, unsigned long nr_pages)
1610 struct xarray *trees, *tree;
1613 nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES);
1614 trees = kvcalloc(nr, sizeof(*tree), GFP_KERNEL);
1616 pr_err("alloc failed, zswap disabled for swap type %d\n", type);
1620 for (i = 0; i < nr; i++)
1623 nr_zswap_trees[type] = nr;
1624 zswap_trees[type] = trees;
1628 void zswap_swapoff(int type)
1630 struct xarray *trees = zswap_trees[type];
1636 /* try_to_unuse() invalidated all the entries already */
1637 for (i = 0; i < nr_zswap_trees[type]; i++)
1638 WARN_ON_ONCE(!xa_empty(trees + i));
1641 nr_zswap_trees[type] = 0;
1642 zswap_trees[type] = NULL;
1645 /*********************************
1647 **********************************/
1648 #ifdef CONFIG_DEBUG_FS
1649 #include <linux/debugfs.h>
1651 static struct dentry *zswap_debugfs_root;
1653 static int debugfs_get_total_size(void *data, u64 *val)
1655 *val = zswap_total_pages() * PAGE_SIZE;
1658 DEFINE_DEBUGFS_ATTRIBUTE(total_size_fops, debugfs_get_total_size, NULL, "%llu\n");
1660 static int zswap_debugfs_init(void)
1662 if (!debugfs_initialized())
1665 zswap_debugfs_root = debugfs_create_dir("zswap", NULL);
1667 debugfs_create_u64("pool_limit_hit", 0444,
1668 zswap_debugfs_root, &zswap_pool_limit_hit);
1669 debugfs_create_u64("reject_reclaim_fail", 0444,
1670 zswap_debugfs_root, &zswap_reject_reclaim_fail);
1671 debugfs_create_u64("reject_alloc_fail", 0444,
1672 zswap_debugfs_root, &zswap_reject_alloc_fail);
1673 debugfs_create_u64("reject_kmemcache_fail", 0444,
1674 zswap_debugfs_root, &zswap_reject_kmemcache_fail);
1675 debugfs_create_u64("reject_compress_fail", 0444,
1676 zswap_debugfs_root, &zswap_reject_compress_fail);
1677 debugfs_create_u64("reject_compress_poor", 0444,
1678 zswap_debugfs_root, &zswap_reject_compress_poor);
1679 debugfs_create_u64("written_back_pages", 0444,
1680 zswap_debugfs_root, &zswap_written_back_pages);
1681 debugfs_create_file("pool_total_size", 0444,
1682 zswap_debugfs_root, NULL, &total_size_fops);
1683 debugfs_create_atomic_t("stored_pages", 0444,
1684 zswap_debugfs_root, &zswap_stored_pages);
1685 debugfs_create_atomic_t("same_filled_pages", 0444,
1686 zswap_debugfs_root, &zswap_same_filled_pages);
1691 static int zswap_debugfs_init(void)
1697 /*********************************
1698 * module init and exit
1699 **********************************/
1700 static int zswap_setup(void)
1702 struct zswap_pool *pool;
1705 zswap_entry_cache = KMEM_CACHE(zswap_entry, 0);
1706 if (!zswap_entry_cache) {
1707 pr_err("entry cache creation failed\n");
1711 ret = cpuhp_setup_state_multi(CPUHP_MM_ZSWP_POOL_PREPARE,
1712 "mm/zswap_pool:prepare",
1713 zswap_cpu_comp_prepare,
1714 zswap_cpu_comp_dead);
1718 shrink_wq = alloc_workqueue("zswap-shrink",
1719 WQ_UNBOUND|WQ_MEM_RECLAIM, 1);
1721 goto shrink_wq_fail;
1723 zswap_shrinker = zswap_alloc_shrinker();
1724 if (!zswap_shrinker)
1726 if (list_lru_init_memcg(&zswap_list_lru, zswap_shrinker))
1728 shrinker_register(zswap_shrinker);
1730 INIT_WORK(&zswap_shrink_work, shrink_worker);
1732 pool = __zswap_pool_create_fallback();
1734 pr_info("loaded using pool %s/%s\n", pool->tfm_name,
1735 zpool_get_type(pool->zpool));
1736 list_add(&pool->list, &zswap_pools);
1737 zswap_has_pool = true;
1738 static_branch_enable(&zswap_ever_enabled);
1740 pr_err("pool creation failed\n");
1741 zswap_enabled = false;
1744 if (zswap_debugfs_init())
1745 pr_warn("debugfs initialization failed\n");
1746 zswap_init_state = ZSWAP_INIT_SUCCEED;
1750 shrinker_free(zswap_shrinker);
1752 destroy_workqueue(shrink_wq);
1754 cpuhp_remove_multi_state(CPUHP_MM_ZSWP_POOL_PREPARE);
1756 kmem_cache_destroy(zswap_entry_cache);
1758 /* if built-in, we aren't unloaded on failure; don't allow use */
1759 zswap_init_state = ZSWAP_INIT_FAILED;
1760 zswap_enabled = false;
1764 static int __init zswap_init(void)
1768 return zswap_setup();
1770 /* must be late so crypto has time to come up */
1771 late_initcall(zswap_init);
1774 MODULE_DESCRIPTION("Compressed cache for swap pages");