1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * zswap.c - zswap driver file
5 * zswap is a cache that takes pages that are in the process
6 * of being swapped out and attempts to compress and store them in a
7 * RAM-based memory pool. This can result in a significant I/O reduction on
8 * the swap device and, in the case where decompressing from RAM is faster
9 * than reading from the swap device, can also improve workload performance.
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16 #include <linux/module.h>
17 #include <linux/cpu.h>
18 #include <linux/highmem.h>
19 #include <linux/slab.h>
20 #include <linux/spinlock.h>
21 #include <linux/types.h>
22 #include <linux/atomic.h>
23 #include <linux/swap.h>
24 #include <linux/crypto.h>
25 #include <linux/scatterlist.h>
26 #include <linux/mempolicy.h>
27 #include <linux/mempool.h>
28 #include <linux/zpool.h>
29 #include <crypto/acompress.h>
30 #include <linux/zswap.h>
31 #include <linux/mm_types.h>
32 #include <linux/page-flags.h>
33 #include <linux/swapops.h>
34 #include <linux/writeback.h>
35 #include <linux/pagemap.h>
36 #include <linux/workqueue.h>
37 #include <linux/list_lru.h>
42 /*********************************
44 **********************************/
45 /* The number of compressed pages currently stored in zswap */
46 atomic_t zswap_stored_pages = ATOMIC_INIT(0);
47 /* The number of same-value filled pages currently stored in zswap */
48 static atomic_t zswap_same_filled_pages = ATOMIC_INIT(0);
51 * The statistics below are not protected from concurrent access for
52 * performance reasons so they may not be a 100% accurate. However,
53 * they do provide useful information on roughly how many times a
54 * certain event is occurring.
57 /* Pool limit was hit (see zswap_max_pool_percent) */
58 static u64 zswap_pool_limit_hit;
59 /* Pages written back when pool limit was reached */
60 static u64 zswap_written_back_pages;
61 /* Store failed due to a reclaim failure after pool limit was reached */
62 static u64 zswap_reject_reclaim_fail;
63 /* Store failed due to compression algorithm failure */
64 static u64 zswap_reject_compress_fail;
65 /* Compressed page was too big for the allocator to (optimally) store */
66 static u64 zswap_reject_compress_poor;
67 /* Store failed because underlying allocator could not get memory */
68 static u64 zswap_reject_alloc_fail;
69 /* Store failed because the entry metadata could not be allocated (rare) */
70 static u64 zswap_reject_kmemcache_fail;
72 /* Shrinker work queue */
73 static struct workqueue_struct *shrink_wq;
74 /* Pool limit was hit, we need to calm down */
75 static bool zswap_pool_reached_full;
77 /*********************************
79 **********************************/
81 #define ZSWAP_PARAM_UNSET ""
83 static int zswap_setup(void);
85 /* Enable/disable zswap */
86 static bool zswap_enabled = IS_ENABLED(CONFIG_ZSWAP_DEFAULT_ON);
87 static int zswap_enabled_param_set(const char *,
88 const struct kernel_param *);
89 static const struct kernel_param_ops zswap_enabled_param_ops = {
90 .set = zswap_enabled_param_set,
91 .get = param_get_bool,
93 module_param_cb(enabled, &zswap_enabled_param_ops, &zswap_enabled, 0644);
95 /* Crypto compressor to use */
96 static char *zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT;
97 static int zswap_compressor_param_set(const char *,
98 const struct kernel_param *);
99 static const struct kernel_param_ops zswap_compressor_param_ops = {
100 .set = zswap_compressor_param_set,
101 .get = param_get_charp,
102 .free = param_free_charp,
104 module_param_cb(compressor, &zswap_compressor_param_ops,
105 &zswap_compressor, 0644);
107 /* Compressed storage zpool to use */
108 static char *zswap_zpool_type = CONFIG_ZSWAP_ZPOOL_DEFAULT;
109 static int zswap_zpool_param_set(const char *, const struct kernel_param *);
110 static const struct kernel_param_ops zswap_zpool_param_ops = {
111 .set = zswap_zpool_param_set,
112 .get = param_get_charp,
113 .free = param_free_charp,
115 module_param_cb(zpool, &zswap_zpool_param_ops, &zswap_zpool_type, 0644);
117 /* The maximum percentage of memory that the compressed pool can occupy */
118 static unsigned int zswap_max_pool_percent = 20;
119 module_param_named(max_pool_percent, zswap_max_pool_percent, uint, 0644);
121 /* The threshold for accepting new pages after the max_pool_percent was hit */
122 static unsigned int zswap_accept_thr_percent = 90; /* of max pool size */
123 module_param_named(accept_threshold_percent, zswap_accept_thr_percent,
126 /* Number of zpools in zswap_pool (empirically determined for scalability) */
127 #define ZSWAP_NR_ZPOOLS 32
129 /* Enable/disable memory pressure-based shrinker. */
130 static bool zswap_shrinker_enabled = IS_ENABLED(
131 CONFIG_ZSWAP_SHRINKER_DEFAULT_ON);
132 module_param_named(shrinker_enabled, zswap_shrinker_enabled, bool, 0644);
134 bool is_zswap_enabled(void)
136 return zswap_enabled;
139 /*********************************
141 **********************************/
143 struct crypto_acomp_ctx {
144 struct crypto_acomp *acomp;
145 struct acomp_req *req;
146 struct crypto_wait wait;
153 * The lock ordering is zswap_tree.lock -> zswap_pool.lru_lock.
154 * The only case where lru_lock is not acquired while holding tree.lock is
155 * when a zswap_entry is taken off the lru for writeback, in that case it
156 * needs to be verified that it's still valid in the tree.
159 struct zpool *zpools[ZSWAP_NR_ZPOOLS];
160 struct crypto_acomp_ctx __percpu *acomp_ctx;
161 struct percpu_ref ref;
162 struct list_head list;
163 struct work_struct release_work;
164 struct hlist_node node;
165 char tfm_name[CRYPTO_MAX_ALG_NAME];
168 /* Global LRU lists shared by all zswap pools. */
169 static struct list_lru zswap_list_lru;
171 /* The lock protects zswap_next_shrink updates. */
172 static DEFINE_SPINLOCK(zswap_shrink_lock);
173 static struct mem_cgroup *zswap_next_shrink;
174 static struct work_struct zswap_shrink_work;
175 static struct shrinker *zswap_shrinker;
180 * This structure contains the metadata for tracking a single compressed
183 * swpentry - associated swap entry, the offset indexes into the red-black tree
184 * length - the length in bytes of the compressed page data. Needed during
185 * decompression. For a same value filled page length is 0, and both
186 * pool and lru are invalid and must be ignored.
187 * pool - the zswap_pool the entry's data is in
188 * handle - zpool allocation handle that stores the compressed page data
189 * value - value of the same-value filled pages which have same content
190 * objcg - the obj_cgroup that the compressed memory is charged to
191 * lru - handle to the pool's lru used to evict pages.
194 swp_entry_t swpentry;
196 struct zswap_pool *pool;
198 unsigned long handle;
201 struct obj_cgroup *objcg;
202 struct list_head lru;
205 static struct xarray *zswap_trees[MAX_SWAPFILES];
206 static unsigned int nr_zswap_trees[MAX_SWAPFILES];
208 /* RCU-protected iteration */
209 static LIST_HEAD(zswap_pools);
210 /* protects zswap_pools list modification */
211 static DEFINE_SPINLOCK(zswap_pools_lock);
212 /* pool counter to provide unique names to zpool */
213 static atomic_t zswap_pools_count = ATOMIC_INIT(0);
215 enum zswap_init_type {
221 static enum zswap_init_type zswap_init_state;
223 /* used to ensure the integrity of initialization */
224 static DEFINE_MUTEX(zswap_init_lock);
226 /* init completed, but couldn't create the initial pool */
227 static bool zswap_has_pool;
229 /*********************************
230 * helpers and fwd declarations
231 **********************************/
233 static inline struct xarray *swap_zswap_tree(swp_entry_t swp)
235 return &zswap_trees[swp_type(swp)][swp_offset(swp)
236 >> SWAP_ADDRESS_SPACE_SHIFT];
239 #define zswap_pool_debug(msg, p) \
240 pr_debug("%s pool %s/%s\n", msg, (p)->tfm_name, \
241 zpool_get_type((p)->zpools[0]))
243 /*********************************
245 **********************************/
246 static void __zswap_pool_empty(struct percpu_ref *ref);
248 static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
251 struct zswap_pool *pool;
252 char name[38]; /* 'zswap' + 32 char (max) num + \0 */
253 gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
256 if (!zswap_has_pool) {
257 /* if either are unset, pool initialization failed, and we
258 * need both params to be set correctly before trying to
261 if (!strcmp(type, ZSWAP_PARAM_UNSET))
263 if (!strcmp(compressor, ZSWAP_PARAM_UNSET))
267 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
271 for (i = 0; i < ZSWAP_NR_ZPOOLS; i++) {
272 /* unique name for each pool specifically required by zsmalloc */
273 snprintf(name, 38, "zswap%x",
274 atomic_inc_return(&zswap_pools_count));
276 pool->zpools[i] = zpool_create_pool(type, name, gfp);
277 if (!pool->zpools[i]) {
278 pr_err("%s zpool not available\n", type);
282 pr_debug("using %s zpool\n", zpool_get_type(pool->zpools[0]));
284 strscpy(pool->tfm_name, compressor, sizeof(pool->tfm_name));
286 pool->acomp_ctx = alloc_percpu(*pool->acomp_ctx);
287 if (!pool->acomp_ctx) {
288 pr_err("percpu alloc failed\n");
292 ret = cpuhp_state_add_instance(CPUHP_MM_ZSWP_POOL_PREPARE,
297 /* being the current pool takes 1 ref; this func expects the
298 * caller to always add the new pool as the current pool
300 ret = percpu_ref_init(&pool->ref, __zswap_pool_empty,
301 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL);
304 INIT_LIST_HEAD(&pool->list);
306 zswap_pool_debug("created", pool);
311 cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node);
314 free_percpu(pool->acomp_ctx);
316 zpool_destroy_pool(pool->zpools[i]);
321 static struct zswap_pool *__zswap_pool_create_fallback(void)
323 bool has_comp, has_zpool;
325 has_comp = crypto_has_acomp(zswap_compressor, 0, 0);
326 if (!has_comp && strcmp(zswap_compressor,
327 CONFIG_ZSWAP_COMPRESSOR_DEFAULT)) {
328 pr_err("compressor %s not available, using default %s\n",
329 zswap_compressor, CONFIG_ZSWAP_COMPRESSOR_DEFAULT);
330 param_free_charp(&zswap_compressor);
331 zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT;
332 has_comp = crypto_has_acomp(zswap_compressor, 0, 0);
335 pr_err("default compressor %s not available\n",
337 param_free_charp(&zswap_compressor);
338 zswap_compressor = ZSWAP_PARAM_UNSET;
341 has_zpool = zpool_has_pool(zswap_zpool_type);
342 if (!has_zpool && strcmp(zswap_zpool_type,
343 CONFIG_ZSWAP_ZPOOL_DEFAULT)) {
344 pr_err("zpool %s not available, using default %s\n",
345 zswap_zpool_type, CONFIG_ZSWAP_ZPOOL_DEFAULT);
346 param_free_charp(&zswap_zpool_type);
347 zswap_zpool_type = CONFIG_ZSWAP_ZPOOL_DEFAULT;
348 has_zpool = zpool_has_pool(zswap_zpool_type);
351 pr_err("default zpool %s not available\n",
353 param_free_charp(&zswap_zpool_type);
354 zswap_zpool_type = ZSWAP_PARAM_UNSET;
357 if (!has_comp || !has_zpool)
360 return zswap_pool_create(zswap_zpool_type, zswap_compressor);
363 static void zswap_pool_destroy(struct zswap_pool *pool)
367 zswap_pool_debug("destroying", pool);
369 cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node);
370 free_percpu(pool->acomp_ctx);
372 for (i = 0; i < ZSWAP_NR_ZPOOLS; i++)
373 zpool_destroy_pool(pool->zpools[i]);
377 static void __zswap_pool_release(struct work_struct *work)
379 struct zswap_pool *pool = container_of(work, typeof(*pool),
384 /* nobody should have been able to get a ref... */
385 WARN_ON(!percpu_ref_is_zero(&pool->ref));
386 percpu_ref_exit(&pool->ref);
388 /* pool is now off zswap_pools list and has no references. */
389 zswap_pool_destroy(pool);
392 static struct zswap_pool *zswap_pool_current(void);
394 static void __zswap_pool_empty(struct percpu_ref *ref)
396 struct zswap_pool *pool;
398 pool = container_of(ref, typeof(*pool), ref);
400 spin_lock_bh(&zswap_pools_lock);
402 WARN_ON(pool == zswap_pool_current());
404 list_del_rcu(&pool->list);
406 INIT_WORK(&pool->release_work, __zswap_pool_release);
407 schedule_work(&pool->release_work);
409 spin_unlock_bh(&zswap_pools_lock);
412 static int __must_check zswap_pool_get(struct zswap_pool *pool)
417 return percpu_ref_tryget(&pool->ref);
420 static void zswap_pool_put(struct zswap_pool *pool)
422 percpu_ref_put(&pool->ref);
425 static struct zswap_pool *__zswap_pool_current(void)
427 struct zswap_pool *pool;
429 pool = list_first_or_null_rcu(&zswap_pools, typeof(*pool), list);
430 WARN_ONCE(!pool && zswap_has_pool,
431 "%s: no page storage pool!\n", __func__);
436 static struct zswap_pool *zswap_pool_current(void)
438 assert_spin_locked(&zswap_pools_lock);
440 return __zswap_pool_current();
443 static struct zswap_pool *zswap_pool_current_get(void)
445 struct zswap_pool *pool;
449 pool = __zswap_pool_current();
450 if (!zswap_pool_get(pool))
458 /* type and compressor must be null-terminated */
459 static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor)
461 struct zswap_pool *pool;
463 assert_spin_locked(&zswap_pools_lock);
465 list_for_each_entry_rcu(pool, &zswap_pools, list) {
466 if (strcmp(pool->tfm_name, compressor))
468 /* all zpools share the same type */
469 if (strcmp(zpool_get_type(pool->zpools[0]), type))
471 /* if we can't get it, it's about to be destroyed */
472 if (!zswap_pool_get(pool))
480 static unsigned long zswap_max_pages(void)
482 return totalram_pages() * zswap_max_pool_percent / 100;
485 static unsigned long zswap_accept_thr_pages(void)
487 return zswap_max_pages() * zswap_accept_thr_percent / 100;
490 unsigned long zswap_total_pages(void)
492 struct zswap_pool *pool;
493 unsigned long total = 0;
496 list_for_each_entry_rcu(pool, &zswap_pools, list) {
499 for (i = 0; i < ZSWAP_NR_ZPOOLS; i++)
500 total += zpool_get_total_pages(pool->zpools[i]);
507 static bool zswap_check_limits(void)
509 unsigned long cur_pages = zswap_total_pages();
510 unsigned long max_pages = zswap_max_pages();
512 if (cur_pages >= max_pages) {
513 zswap_pool_limit_hit++;
514 zswap_pool_reached_full = true;
515 } else if (zswap_pool_reached_full &&
516 cur_pages <= zswap_accept_thr_pages()) {
517 zswap_pool_reached_full = false;
519 return zswap_pool_reached_full;
522 /*********************************
524 **********************************/
526 static bool zswap_pool_changed(const char *s, const struct kernel_param *kp)
528 /* no change required */
529 if (!strcmp(s, *(char **)kp->arg) && zswap_has_pool)
534 /* val must be a null-terminated string */
535 static int __zswap_param_set(const char *val, const struct kernel_param *kp,
536 char *type, char *compressor)
538 struct zswap_pool *pool, *put_pool = NULL;
539 char *s = strstrip((char *)val);
541 bool new_pool = false;
543 mutex_lock(&zswap_init_lock);
544 switch (zswap_init_state) {
546 /* if this is load-time (pre-init) param setting,
547 * don't create a pool; that's done during init.
549 ret = param_set_charp(s, kp);
551 case ZSWAP_INIT_SUCCEED:
552 new_pool = zswap_pool_changed(s, kp);
554 case ZSWAP_INIT_FAILED:
555 pr_err("can't set param, initialization failed\n");
558 mutex_unlock(&zswap_init_lock);
560 /* no need to create a new pool, return directly */
565 if (!zpool_has_pool(s)) {
566 pr_err("zpool %s not available\n", s);
570 } else if (!compressor) {
571 if (!crypto_has_acomp(s, 0, 0)) {
572 pr_err("compressor %s not available\n", s);
581 spin_lock_bh(&zswap_pools_lock);
583 pool = zswap_pool_find_get(type, compressor);
585 zswap_pool_debug("using existing", pool);
586 WARN_ON(pool == zswap_pool_current());
587 list_del_rcu(&pool->list);
590 spin_unlock_bh(&zswap_pools_lock);
593 pool = zswap_pool_create(type, compressor);
596 * Restore the initial ref dropped by percpu_ref_kill()
597 * when the pool was decommissioned and switch it again
600 percpu_ref_resurrect(&pool->ref);
602 /* Drop the ref from zswap_pool_find_get(). */
603 zswap_pool_put(pool);
607 ret = param_set_charp(s, kp);
611 spin_lock_bh(&zswap_pools_lock);
614 put_pool = zswap_pool_current();
615 list_add_rcu(&pool->list, &zswap_pools);
616 zswap_has_pool = true;
618 /* add the possibly pre-existing pool to the end of the pools
619 * list; if it's new (and empty) then it'll be removed and
620 * destroyed by the put after we drop the lock
622 list_add_tail_rcu(&pool->list, &zswap_pools);
626 spin_unlock_bh(&zswap_pools_lock);
628 if (!zswap_has_pool && !pool) {
629 /* if initial pool creation failed, and this pool creation also
630 * failed, maybe both compressor and zpool params were bad.
631 * Allow changing this param, so pool creation will succeed
632 * when the other param is changed. We already verified this
633 * param is ok in the zpool_has_pool() or crypto_has_acomp()
636 ret = param_set_charp(s, kp);
639 /* drop the ref from either the old current pool,
640 * or the new pool we failed to add
643 percpu_ref_kill(&put_pool->ref);
648 static int zswap_compressor_param_set(const char *val,
649 const struct kernel_param *kp)
651 return __zswap_param_set(val, kp, zswap_zpool_type, NULL);
654 static int zswap_zpool_param_set(const char *val,
655 const struct kernel_param *kp)
657 return __zswap_param_set(val, kp, NULL, zswap_compressor);
660 static int zswap_enabled_param_set(const char *val,
661 const struct kernel_param *kp)
665 /* if this is load-time (pre-init) param setting, only set param. */
666 if (system_state != SYSTEM_RUNNING)
667 return param_set_bool(val, kp);
669 mutex_lock(&zswap_init_lock);
670 switch (zswap_init_state) {
675 case ZSWAP_INIT_SUCCEED:
677 pr_err("can't enable, no pool configured\n");
679 ret = param_set_bool(val, kp);
681 case ZSWAP_INIT_FAILED:
682 pr_err("can't enable, initialization failed\n");
684 mutex_unlock(&zswap_init_lock);
689 /*********************************
691 **********************************/
693 /* should be called under RCU */
695 static inline struct mem_cgroup *mem_cgroup_from_entry(struct zswap_entry *entry)
697 return entry->objcg ? obj_cgroup_memcg(entry->objcg) : NULL;
700 static inline struct mem_cgroup *mem_cgroup_from_entry(struct zswap_entry *entry)
706 static inline int entry_to_nid(struct zswap_entry *entry)
708 return page_to_nid(virt_to_page(entry));
711 static void zswap_lru_add(struct list_lru *list_lru, struct zswap_entry *entry)
713 atomic_long_t *nr_zswap_protected;
714 unsigned long lru_size, old, new;
715 int nid = entry_to_nid(entry);
716 struct mem_cgroup *memcg;
717 struct lruvec *lruvec;
720 * Note that it is safe to use rcu_read_lock() here, even in the face of
721 * concurrent memcg offlining. Thanks to the memcg->kmemcg_id indirection
722 * used in list_lru lookup, only two scenarios are possible:
724 * 1. list_lru_add() is called before memcg->kmemcg_id is updated. The
725 * new entry will be reparented to memcg's parent's list_lru.
726 * 2. list_lru_add() is called after memcg->kmemcg_id is updated. The
727 * new entry will be added directly to memcg's parent's list_lru.
729 * Similar reasoning holds for list_lru_del().
732 memcg = mem_cgroup_from_entry(entry);
733 /* will always succeed */
734 list_lru_add(list_lru, &entry->lru, nid, memcg);
736 /* Update the protection area */
737 lru_size = list_lru_count_one(list_lru, nid, memcg);
738 lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
739 nr_zswap_protected = &lruvec->zswap_lruvec_state.nr_zswap_protected;
740 old = atomic_long_inc_return(nr_zswap_protected);
742 * Decay to avoid overflow and adapt to changing workloads.
743 * This is based on LRU reclaim cost decaying heuristics.
746 new = old > lru_size / 4 ? old / 2 : old;
747 } while (!atomic_long_try_cmpxchg(nr_zswap_protected, &old, new));
751 static void zswap_lru_del(struct list_lru *list_lru, struct zswap_entry *entry)
753 int nid = entry_to_nid(entry);
754 struct mem_cgroup *memcg;
757 memcg = mem_cgroup_from_entry(entry);
758 /* will always succeed */
759 list_lru_del(list_lru, &entry->lru, nid, memcg);
763 void zswap_lruvec_state_init(struct lruvec *lruvec)
765 atomic_long_set(&lruvec->zswap_lruvec_state.nr_zswap_protected, 0);
768 void zswap_folio_swapin(struct folio *folio)
770 struct lruvec *lruvec;
773 lruvec = folio_lruvec(folio);
774 atomic_long_inc(&lruvec->zswap_lruvec_state.nr_zswap_protected);
778 void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg)
780 /* lock out zswap shrinker walking memcg tree */
781 spin_lock(&zswap_shrink_lock);
782 if (zswap_next_shrink == memcg)
783 zswap_next_shrink = mem_cgroup_iter(NULL, zswap_next_shrink, NULL);
784 spin_unlock(&zswap_shrink_lock);
787 /*********************************
788 * zswap entry functions
789 **********************************/
790 static struct kmem_cache *zswap_entry_cache;
792 static struct zswap_entry *zswap_entry_cache_alloc(gfp_t gfp, int nid)
794 struct zswap_entry *entry;
795 entry = kmem_cache_alloc_node(zswap_entry_cache, gfp, nid);
801 static void zswap_entry_cache_free(struct zswap_entry *entry)
803 kmem_cache_free(zswap_entry_cache, entry);
806 static struct zpool *zswap_find_zpool(struct zswap_entry *entry)
808 return entry->pool->zpools[hash_ptr(entry, ilog2(ZSWAP_NR_ZPOOLS))];
812 * Carries out the common pattern of freeing and entry's zpool allocation,
813 * freeing the entry itself, and decrementing the number of stored pages.
815 static void zswap_entry_free(struct zswap_entry *entry)
818 atomic_dec(&zswap_same_filled_pages);
820 zswap_lru_del(&zswap_list_lru, entry);
821 zpool_free(zswap_find_zpool(entry), entry->handle);
822 zswap_pool_put(entry->pool);
825 obj_cgroup_uncharge_zswap(entry->objcg, entry->length);
826 obj_cgroup_put(entry->objcg);
828 zswap_entry_cache_free(entry);
829 atomic_dec(&zswap_stored_pages);
832 /*********************************
833 * compressed storage functions
834 **********************************/
835 static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node)
837 struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
838 struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
839 struct crypto_acomp *acomp;
840 struct acomp_req *req;
843 mutex_init(&acomp_ctx->mutex);
845 acomp_ctx->buffer = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu));
846 if (!acomp_ctx->buffer)
849 acomp = crypto_alloc_acomp_node(pool->tfm_name, 0, 0, cpu_to_node(cpu));
851 pr_err("could not alloc crypto acomp %s : %ld\n",
852 pool->tfm_name, PTR_ERR(acomp));
853 ret = PTR_ERR(acomp);
856 acomp_ctx->acomp = acomp;
857 acomp_ctx->is_sleepable = acomp_is_async(acomp);
859 req = acomp_request_alloc(acomp_ctx->acomp);
861 pr_err("could not alloc crypto acomp_request %s\n",
866 acomp_ctx->req = req;
868 crypto_init_wait(&acomp_ctx->wait);
870 * if the backend of acomp is async zip, crypto_req_done() will wakeup
871 * crypto_wait_req(); if the backend of acomp is scomp, the callback
872 * won't be called, crypto_wait_req() will return without blocking.
874 acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
875 crypto_req_done, &acomp_ctx->wait);
880 crypto_free_acomp(acomp_ctx->acomp);
882 kfree(acomp_ctx->buffer);
886 static int zswap_cpu_comp_dead(unsigned int cpu, struct hlist_node *node)
888 struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
889 struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
891 if (!IS_ERR_OR_NULL(acomp_ctx)) {
892 if (!IS_ERR_OR_NULL(acomp_ctx->req))
893 acomp_request_free(acomp_ctx->req);
894 if (!IS_ERR_OR_NULL(acomp_ctx->acomp))
895 crypto_free_acomp(acomp_ctx->acomp);
896 kfree(acomp_ctx->buffer);
902 static bool zswap_compress(struct folio *folio, struct zswap_entry *entry)
904 struct crypto_acomp_ctx *acomp_ctx;
905 struct scatterlist input, output;
906 int comp_ret = 0, alloc_ret = 0;
907 unsigned int dlen = PAGE_SIZE;
908 unsigned long handle;
914 acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
916 mutex_lock(&acomp_ctx->mutex);
918 dst = acomp_ctx->buffer;
919 sg_init_table(&input, 1);
920 sg_set_page(&input, &folio->page, PAGE_SIZE, 0);
923 * We need PAGE_SIZE * 2 here since there maybe over-compression case,
924 * and hardware-accelerators may won't check the dst buffer size, so
925 * giving the dst buffer with enough length to avoid buffer overflow.
927 sg_init_one(&output, dst, PAGE_SIZE * 2);
928 acomp_request_set_params(acomp_ctx->req, &input, &output, PAGE_SIZE, dlen);
931 * it maybe looks a little bit silly that we send an asynchronous request,
932 * then wait for its completion synchronously. This makes the process look
933 * synchronous in fact.
934 * Theoretically, acomp supports users send multiple acomp requests in one
935 * acomp instance, then get those requests done simultaneously. but in this
936 * case, zswap actually does store and load page by page, there is no
937 * existing method to send the second page before the first page is done
938 * in one thread doing zwap.
939 * but in different threads running on different cpu, we have different
940 * acomp instance, so multiple threads can do (de)compression in parallel.
942 comp_ret = crypto_wait_req(crypto_acomp_compress(acomp_ctx->req), &acomp_ctx->wait);
943 dlen = acomp_ctx->req->dlen;
947 zpool = zswap_find_zpool(entry);
948 gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
949 if (zpool_malloc_support_movable(zpool))
950 gfp |= __GFP_HIGHMEM | __GFP_MOVABLE;
951 alloc_ret = zpool_malloc(zpool, dlen, gfp, &handle);
955 buf = zpool_map_handle(zpool, handle, ZPOOL_MM_WO);
956 memcpy(buf, dst, dlen);
957 zpool_unmap_handle(zpool, handle);
959 entry->handle = handle;
960 entry->length = dlen;
963 if (comp_ret == -ENOSPC || alloc_ret == -ENOSPC)
964 zswap_reject_compress_poor++;
966 zswap_reject_compress_fail++;
968 zswap_reject_alloc_fail++;
970 mutex_unlock(&acomp_ctx->mutex);
971 return comp_ret == 0 && alloc_ret == 0;
974 static void zswap_decompress(struct zswap_entry *entry, struct page *page)
976 struct zpool *zpool = zswap_find_zpool(entry);
977 struct scatterlist input, output;
978 struct crypto_acomp_ctx *acomp_ctx;
981 acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
982 mutex_lock(&acomp_ctx->mutex);
984 src = zpool_map_handle(zpool, entry->handle, ZPOOL_MM_RO);
986 * If zpool_map_handle is atomic, we cannot reliably utilize its mapped buffer
987 * to do crypto_acomp_decompress() which might sleep. In such cases, we must
988 * resort to copying the buffer to a temporary one.
989 * Meanwhile, zpool_map_handle() might return a non-linearly mapped buffer,
990 * such as a kmap address of high memory or even ever a vmap address.
991 * However, sg_init_one is only equipped to handle linearly mapped low memory.
992 * In such cases, we also must copy the buffer to a temporary and lowmem one.
994 if ((acomp_ctx->is_sleepable && !zpool_can_sleep_mapped(zpool)) ||
995 !virt_addr_valid(src)) {
996 memcpy(acomp_ctx->buffer, src, entry->length);
997 src = acomp_ctx->buffer;
998 zpool_unmap_handle(zpool, entry->handle);
1001 sg_init_one(&input, src, entry->length);
1002 sg_init_table(&output, 1);
1003 sg_set_page(&output, page, PAGE_SIZE, 0);
1004 acomp_request_set_params(acomp_ctx->req, &input, &output, entry->length, PAGE_SIZE);
1005 BUG_ON(crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait));
1006 BUG_ON(acomp_ctx->req->dlen != PAGE_SIZE);
1007 mutex_unlock(&acomp_ctx->mutex);
1009 if (src != acomp_ctx->buffer)
1010 zpool_unmap_handle(zpool, entry->handle);
1013 /*********************************
1015 **********************************/
1017 * Attempts to free an entry by adding a folio to the swap cache,
1018 * decompressing the entry data into the folio, and issuing a
1019 * bio write to write the folio back to the swap device.
1021 * This can be thought of as a "resumed writeback" of the folio
1022 * to the swap device. We are basically resuming the same swap
1023 * writeback path that was intercepted with the zswap_store()
1024 * in the first place. After the folio has been decompressed into
1025 * the swap cache, the compressed version stored by zswap can be
1028 static int zswap_writeback_entry(struct zswap_entry *entry,
1029 swp_entry_t swpentry)
1031 struct xarray *tree;
1032 pgoff_t offset = swp_offset(swpentry);
1033 struct folio *folio;
1034 struct mempolicy *mpol;
1035 bool folio_was_allocated;
1036 struct writeback_control wbc = {
1037 .sync_mode = WB_SYNC_NONE,
1040 /* try to allocate swap cache folio */
1041 mpol = get_task_policy(current);
1042 folio = __read_swap_cache_async(swpentry, GFP_KERNEL, mpol,
1043 NO_INTERLEAVE_INDEX, &folio_was_allocated, true);
1048 * Found an existing folio, we raced with swapin or concurrent
1049 * shrinker. We generally writeback cold folios from zswap, and
1050 * swapin means the folio just became hot, so skip this folio.
1051 * For unlikely concurrent shrinker case, it will be unlinked
1052 * and freed when invalidated by the concurrent shrinker anyway.
1054 if (!folio_was_allocated) {
1060 * folio is locked, and the swapcache is now secured against
1061 * concurrent swapping to and from the slot, and concurrent
1062 * swapoff so we can safely dereference the zswap tree here.
1063 * Verify that the swap entry hasn't been invalidated and recycled
1064 * behind our backs, to avoid overwriting a new swap folio with
1065 * old compressed data. Only when this is successful can the entry
1068 tree = swap_zswap_tree(swpentry);
1069 if (entry != xa_cmpxchg(tree, offset, entry, NULL, GFP_KERNEL)) {
1070 delete_from_swap_cache(folio);
1071 folio_unlock(folio);
1076 zswap_decompress(entry, &folio->page);
1078 count_vm_event(ZSWPWB);
1080 count_objcg_event(entry->objcg, ZSWPWB);
1082 zswap_entry_free(entry);
1084 /* folio is up to date */
1085 folio_mark_uptodate(folio);
1087 /* move it to the tail of the inactive list after end_writeback */
1088 folio_set_reclaim(folio);
1090 /* start writeback */
1091 __swap_writepage(folio, &wbc);
1097 /*********************************
1098 * shrinker functions
1099 **********************************/
1100 static enum lru_status shrink_memcg_cb(struct list_head *item, struct list_lru_one *l,
1101 spinlock_t *lock, void *arg)
1103 struct zswap_entry *entry = container_of(item, struct zswap_entry, lru);
1104 bool *encountered_page_in_swapcache = (bool *)arg;
1105 swp_entry_t swpentry;
1106 enum lru_status ret = LRU_REMOVED_RETRY;
1107 int writeback_result;
1110 * As soon as we drop the LRU lock, the entry can be freed by
1111 * a concurrent invalidation. This means the following:
1113 * 1. We extract the swp_entry_t to the stack, allowing
1114 * zswap_writeback_entry() to pin the swap entry and
1115 * then validate the zwap entry against that swap entry's
1116 * tree using pointer value comparison. Only when that
1117 * is successful can the entry be dereferenced.
1119 * 2. Usually, objects are taken off the LRU for reclaim. In
1120 * this case this isn't possible, because if reclaim fails
1121 * for whatever reason, we have no means of knowing if the
1122 * entry is alive to put it back on the LRU.
1124 * So rotate it before dropping the lock. If the entry is
1125 * written back or invalidated, the free path will unlink
1126 * it. For failures, rotation is the right thing as well.
1128 * Temporary failures, where the same entry should be tried
1129 * again immediately, almost never happen for this shrinker.
1130 * We don't do any trylocking; -ENOMEM comes closest,
1131 * but that's extremely rare and doesn't happen spuriously
1132 * either. Don't bother distinguishing this case.
1134 list_move_tail(item, &l->list);
1137 * Once the lru lock is dropped, the entry might get freed. The
1138 * swpentry is copied to the stack, and entry isn't deref'd again
1139 * until the entry is verified to still be alive in the tree.
1141 swpentry = entry->swpentry;
1144 * It's safe to drop the lock here because we return either
1145 * LRU_REMOVED_RETRY or LRU_RETRY.
1149 writeback_result = zswap_writeback_entry(entry, swpentry);
1151 if (writeback_result) {
1152 zswap_reject_reclaim_fail++;
1156 * Encountering a page already in swap cache is a sign that we are shrinking
1157 * into the warmer region. We should terminate shrinking (if we're in the dynamic
1158 * shrinker context).
1160 if (writeback_result == -EEXIST && encountered_page_in_swapcache) {
1162 *encountered_page_in_swapcache = true;
1165 zswap_written_back_pages++;
1172 static unsigned long zswap_shrinker_scan(struct shrinker *shrinker,
1173 struct shrink_control *sc)
1175 struct lruvec *lruvec = mem_cgroup_lruvec(sc->memcg, NODE_DATA(sc->nid));
1176 unsigned long shrink_ret, nr_protected, lru_size;
1177 bool encountered_page_in_swapcache = false;
1179 if (!zswap_shrinker_enabled ||
1180 !mem_cgroup_zswap_writeback_enabled(sc->memcg)) {
1186 atomic_long_read(&lruvec->zswap_lruvec_state.nr_zswap_protected);
1187 lru_size = list_lru_shrink_count(&zswap_list_lru, sc);
1190 * Abort if we are shrinking into the protected region.
1192 * This short-circuiting is necessary because if we have too many multiple
1193 * concurrent reclaimers getting the freeable zswap object counts at the
1194 * same time (before any of them made reasonable progress), the total
1195 * number of reclaimed objects might be more than the number of unprotected
1196 * objects (i.e the reclaimers will reclaim into the protected area of the
1199 if (nr_protected >= lru_size - sc->nr_to_scan) {
1204 shrink_ret = list_lru_shrink_walk(&zswap_list_lru, sc, &shrink_memcg_cb,
1205 &encountered_page_in_swapcache);
1207 if (encountered_page_in_swapcache)
1210 return shrink_ret ? shrink_ret : SHRINK_STOP;
1213 static unsigned long zswap_shrinker_count(struct shrinker *shrinker,
1214 struct shrink_control *sc)
1216 struct mem_cgroup *memcg = sc->memcg;
1217 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(sc->nid));
1218 unsigned long nr_backing, nr_stored, nr_freeable, nr_protected;
1220 if (!zswap_shrinker_enabled || !mem_cgroup_zswap_writeback_enabled(memcg))
1224 * The shrinker resumes swap writeback, which will enter block
1225 * and may enter fs. XXX: Harmonize with vmscan.c __GFP_FS
1226 * rules (may_enter_fs()), which apply on a per-folio basis.
1228 if (!gfp_has_io_fs(sc->gfp_mask))
1232 * For memcg, use the cgroup-wide ZSWAP stats since we don't
1233 * have them per-node and thus per-lruvec. Careful if memcg is
1234 * runtime-disabled: we can get sc->memcg == NULL, which is ok
1235 * for the lruvec, but not for memcg_page_state().
1237 * Without memcg, use the zswap pool-wide metrics.
1239 if (!mem_cgroup_disabled()) {
1240 mem_cgroup_flush_stats(memcg);
1241 nr_backing = memcg_page_state(memcg, MEMCG_ZSWAP_B) >> PAGE_SHIFT;
1242 nr_stored = memcg_page_state(memcg, MEMCG_ZSWAPPED);
1244 nr_backing = zswap_total_pages();
1245 nr_stored = atomic_read(&zswap_stored_pages);
1252 atomic_long_read(&lruvec->zswap_lruvec_state.nr_zswap_protected);
1253 nr_freeable = list_lru_shrink_count(&zswap_list_lru, sc);
1255 * Subtract the lru size by an estimate of the number of pages
1256 * that should be protected.
1258 nr_freeable = nr_freeable > nr_protected ? nr_freeable - nr_protected : 0;
1261 * Scale the number of freeable pages by the memory saving factor.
1262 * This ensures that the better zswap compresses memory, the fewer
1263 * pages we will evict to swap (as it will otherwise incur IO for
1264 * relatively small memory saving).
1266 * The memory saving factor calculated here takes same-filled pages into
1267 * account, but those are not freeable since they almost occupy no
1268 * space. Hence, we may scale nr_freeable down a little bit more than we
1269 * should if we have a lot of same-filled pages.
1271 return mult_frac(nr_freeable, nr_backing, nr_stored);
1274 static struct shrinker *zswap_alloc_shrinker(void)
1276 struct shrinker *shrinker;
1279 shrinker_alloc(SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE, "mm-zswap");
1283 shrinker->scan_objects = zswap_shrinker_scan;
1284 shrinker->count_objects = zswap_shrinker_count;
1285 shrinker->batch = 0;
1286 shrinker->seeks = DEFAULT_SEEKS;
1290 static int shrink_memcg(struct mem_cgroup *memcg)
1292 int nid, shrunk = 0;
1294 if (!mem_cgroup_zswap_writeback_enabled(memcg))
1298 * Skip zombies because their LRUs are reparented and we would be
1299 * reclaiming from the parent instead of the dead memcg.
1301 if (memcg && !mem_cgroup_online(memcg))
1304 for_each_node_state(nid, N_NORMAL_MEMORY) {
1305 unsigned long nr_to_walk = 1;
1307 shrunk += list_lru_walk_one(&zswap_list_lru, nid, memcg,
1308 &shrink_memcg_cb, NULL, &nr_to_walk);
1310 return shrunk ? 0 : -EAGAIN;
1313 static void shrink_worker(struct work_struct *w)
1315 struct mem_cgroup *memcg;
1316 int ret, failures = 0;
1319 /* Reclaim down to the accept threshold */
1320 thr = zswap_accept_thr_pages();
1322 /* global reclaim will select cgroup in a round-robin fashion. */
1324 spin_lock(&zswap_shrink_lock);
1325 zswap_next_shrink = mem_cgroup_iter(NULL, zswap_next_shrink, NULL);
1326 memcg = zswap_next_shrink;
1329 * We need to retry if we have gone through a full round trip, or if we
1330 * got an offline memcg (or else we risk undoing the effect of the
1331 * zswap memcg offlining cleanup callback). This is not catastrophic
1332 * per se, but it will keep the now offlined memcg hostage for a while.
1334 * Note that if we got an online memcg, we will keep the extra
1335 * reference in case the original reference obtained by mem_cgroup_iter
1336 * is dropped by the zswap memcg offlining callback, ensuring that the
1337 * memcg is not killed when we are reclaiming.
1340 spin_unlock(&zswap_shrink_lock);
1341 if (++failures == MAX_RECLAIM_RETRIES)
1347 if (!mem_cgroup_tryget_online(memcg)) {
1348 /* drop the reference from mem_cgroup_iter() */
1349 mem_cgroup_iter_break(NULL, memcg);
1350 zswap_next_shrink = NULL;
1351 spin_unlock(&zswap_shrink_lock);
1353 if (++failures == MAX_RECLAIM_RETRIES)
1358 spin_unlock(&zswap_shrink_lock);
1360 ret = shrink_memcg(memcg);
1361 /* drop the extra reference */
1362 mem_cgroup_put(memcg);
1366 if (ret && ++failures == MAX_RECLAIM_RETRIES)
1370 } while (zswap_total_pages() > thr);
1373 /*********************************
1374 * same-filled functions
1375 **********************************/
1376 static bool zswap_is_folio_same_filled(struct folio *folio, unsigned long *value)
1378 unsigned long *page;
1380 unsigned int pos, last_pos = PAGE_SIZE / sizeof(*page) - 1;
1383 page = kmap_local_folio(folio, 0);
1386 if (val != page[last_pos])
1389 for (pos = 1; pos < last_pos; pos++) {
1390 if (val != page[pos])
1401 static void zswap_fill_page(void *ptr, unsigned long value)
1403 unsigned long *page;
1405 page = (unsigned long *)ptr;
1406 memset_l(page, value, PAGE_SIZE / sizeof(unsigned long));
1409 /*********************************
1411 **********************************/
1412 bool zswap_store(struct folio *folio)
1414 swp_entry_t swp = folio->swap;
1415 pgoff_t offset = swp_offset(swp);
1416 struct xarray *tree = swap_zswap_tree(swp);
1417 struct zswap_entry *entry, *old;
1418 struct obj_cgroup *objcg = NULL;
1419 struct mem_cgroup *memcg = NULL;
1420 unsigned long value;
1422 VM_WARN_ON_ONCE(!folio_test_locked(folio));
1423 VM_WARN_ON_ONCE(!folio_test_swapcache(folio));
1425 /* Large folios aren't supported */
1426 if (folio_test_large(folio))
1432 /* Check cgroup limits */
1433 objcg = get_obj_cgroup_from_folio(folio);
1434 if (objcg && !obj_cgroup_may_zswap(objcg)) {
1435 memcg = get_mem_cgroup_from_objcg(objcg);
1436 if (shrink_memcg(memcg)) {
1437 mem_cgroup_put(memcg);
1440 mem_cgroup_put(memcg);
1443 if (zswap_check_limits())
1446 /* allocate entry */
1447 entry = zswap_entry_cache_alloc(GFP_KERNEL, folio_nid(folio));
1449 zswap_reject_kmemcache_fail++;
1453 if (zswap_is_folio_same_filled(folio, &value)) {
1455 entry->value = value;
1456 atomic_inc(&zswap_same_filled_pages);
1460 /* if entry is successfully added, it keeps the reference */
1461 entry->pool = zswap_pool_current_get();
1466 memcg = get_mem_cgroup_from_objcg(objcg);
1467 if (memcg_list_lru_alloc(memcg, &zswap_list_lru, GFP_KERNEL)) {
1468 mem_cgroup_put(memcg);
1471 mem_cgroup_put(memcg);
1474 if (!zswap_compress(folio, entry))
1478 entry->swpentry = swp;
1479 entry->objcg = objcg;
1481 old = xa_store(tree, offset, entry, GFP_KERNEL);
1482 if (xa_is_err(old)) {
1483 int err = xa_err(old);
1485 WARN_ONCE(err != -ENOMEM, "unexpected xarray error: %d\n", err);
1486 zswap_reject_alloc_fail++;
1491 * We may have had an existing entry that became stale when
1492 * the folio was redirtied and now the new version is being
1493 * swapped out. Get rid of the old.
1496 zswap_entry_free(old);
1499 obj_cgroup_charge_zswap(objcg, entry->length);
1500 count_objcg_event(objcg, ZSWPOUT);
1504 * We finish initializing the entry while it's already in xarray.
1505 * This is safe because:
1507 * 1. Concurrent stores and invalidations are excluded by folio lock.
1509 * 2. Writeback is excluded by the entry not being on the LRU yet.
1510 * The publishing order matters to prevent writeback from seeing
1511 * an incoherent entry.
1513 if (entry->length) {
1514 INIT_LIST_HEAD(&entry->lru);
1515 zswap_lru_add(&zswap_list_lru, entry);
1519 atomic_inc(&zswap_stored_pages);
1520 count_vm_event(ZSWPOUT);
1526 atomic_dec(&zswap_same_filled_pages);
1528 zpool_free(zswap_find_zpool(entry), entry->handle);
1530 zswap_pool_put(entry->pool);
1533 zswap_entry_cache_free(entry);
1535 obj_cgroup_put(objcg);
1536 if (zswap_pool_reached_full)
1537 queue_work(shrink_wq, &zswap_shrink_work);
1540 * If the zswap store fails or zswap is disabled, we must invalidate the
1541 * possibly stale entry which was previously stored at this offset.
1542 * Otherwise, writeback could overwrite the new data in the swapfile.
1544 entry = xa_erase(tree, offset);
1546 zswap_entry_free(entry);
1550 bool zswap_load(struct folio *folio)
1552 swp_entry_t swp = folio->swap;
1553 pgoff_t offset = swp_offset(swp);
1554 struct page *page = &folio->page;
1555 bool swapcache = folio_test_swapcache(folio);
1556 struct xarray *tree = swap_zswap_tree(swp);
1557 struct zswap_entry *entry;
1560 VM_WARN_ON_ONCE(!folio_test_locked(folio));
1563 * When reading into the swapcache, invalidate our entry. The
1564 * swapcache can be the authoritative owner of the page and
1565 * its mappings, and the pressure that results from having two
1566 * in-memory copies outweighs any benefits of caching the
1569 * (Most swapins go through the swapcache. The notable
1570 * exception is the singleton fault on SWP_SYNCHRONOUS_IO
1571 * files, which reads into a private page and may free it if
1572 * the fault fails. We remain the primary owner of the entry.)
1575 entry = xa_erase(tree, offset);
1577 entry = xa_load(tree, offset);
1583 zswap_decompress(entry, page);
1585 dst = kmap_local_page(page);
1586 zswap_fill_page(dst, entry->value);
1590 count_vm_event(ZSWPIN);
1592 count_objcg_event(entry->objcg, ZSWPIN);
1595 zswap_entry_free(entry);
1596 folio_mark_dirty(folio);
1602 void zswap_invalidate(swp_entry_t swp)
1604 pgoff_t offset = swp_offset(swp);
1605 struct xarray *tree = swap_zswap_tree(swp);
1606 struct zswap_entry *entry;
1608 entry = xa_erase(tree, offset);
1610 zswap_entry_free(entry);
1613 int zswap_swapon(int type, unsigned long nr_pages)
1615 struct xarray *trees, *tree;
1618 nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES);
1619 trees = kvcalloc(nr, sizeof(*tree), GFP_KERNEL);
1621 pr_err("alloc failed, zswap disabled for swap type %d\n", type);
1625 for (i = 0; i < nr; i++)
1628 nr_zswap_trees[type] = nr;
1629 zswap_trees[type] = trees;
1633 void zswap_swapoff(int type)
1635 struct xarray *trees = zswap_trees[type];
1641 /* try_to_unuse() invalidated all the entries already */
1642 for (i = 0; i < nr_zswap_trees[type]; i++)
1643 WARN_ON_ONCE(!xa_empty(trees + i));
1646 nr_zswap_trees[type] = 0;
1647 zswap_trees[type] = NULL;
1650 /*********************************
1652 **********************************/
1653 #ifdef CONFIG_DEBUG_FS
1654 #include <linux/debugfs.h>
1656 static struct dentry *zswap_debugfs_root;
1658 static int debugfs_get_total_size(void *data, u64 *val)
1660 *val = zswap_total_pages() * PAGE_SIZE;
1663 DEFINE_DEBUGFS_ATTRIBUTE(total_size_fops, debugfs_get_total_size, NULL, "%llu\n");
1665 static int zswap_debugfs_init(void)
1667 if (!debugfs_initialized())
1670 zswap_debugfs_root = debugfs_create_dir("zswap", NULL);
1672 debugfs_create_u64("pool_limit_hit", 0444,
1673 zswap_debugfs_root, &zswap_pool_limit_hit);
1674 debugfs_create_u64("reject_reclaim_fail", 0444,
1675 zswap_debugfs_root, &zswap_reject_reclaim_fail);
1676 debugfs_create_u64("reject_alloc_fail", 0444,
1677 zswap_debugfs_root, &zswap_reject_alloc_fail);
1678 debugfs_create_u64("reject_kmemcache_fail", 0444,
1679 zswap_debugfs_root, &zswap_reject_kmemcache_fail);
1680 debugfs_create_u64("reject_compress_fail", 0444,
1681 zswap_debugfs_root, &zswap_reject_compress_fail);
1682 debugfs_create_u64("reject_compress_poor", 0444,
1683 zswap_debugfs_root, &zswap_reject_compress_poor);
1684 debugfs_create_u64("written_back_pages", 0444,
1685 zswap_debugfs_root, &zswap_written_back_pages);
1686 debugfs_create_file("pool_total_size", 0444,
1687 zswap_debugfs_root, NULL, &total_size_fops);
1688 debugfs_create_atomic_t("stored_pages", 0444,
1689 zswap_debugfs_root, &zswap_stored_pages);
1690 debugfs_create_atomic_t("same_filled_pages", 0444,
1691 zswap_debugfs_root, &zswap_same_filled_pages);
1696 static int zswap_debugfs_init(void)
1702 /*********************************
1703 * module init and exit
1704 **********************************/
1705 static int zswap_setup(void)
1707 struct zswap_pool *pool;
1710 zswap_entry_cache = KMEM_CACHE(zswap_entry, 0);
1711 if (!zswap_entry_cache) {
1712 pr_err("entry cache creation failed\n");
1716 ret = cpuhp_setup_state_multi(CPUHP_MM_ZSWP_POOL_PREPARE,
1717 "mm/zswap_pool:prepare",
1718 zswap_cpu_comp_prepare,
1719 zswap_cpu_comp_dead);
1723 shrink_wq = alloc_workqueue("zswap-shrink",
1724 WQ_UNBOUND|WQ_MEM_RECLAIM, 1);
1726 goto shrink_wq_fail;
1728 zswap_shrinker = zswap_alloc_shrinker();
1729 if (!zswap_shrinker)
1731 if (list_lru_init_memcg(&zswap_list_lru, zswap_shrinker))
1733 shrinker_register(zswap_shrinker);
1735 INIT_WORK(&zswap_shrink_work, shrink_worker);
1737 pool = __zswap_pool_create_fallback();
1739 pr_info("loaded using pool %s/%s\n", pool->tfm_name,
1740 zpool_get_type(pool->zpools[0]));
1741 list_add(&pool->list, &zswap_pools);
1742 zswap_has_pool = true;
1744 pr_err("pool creation failed\n");
1745 zswap_enabled = false;
1748 if (zswap_debugfs_init())
1749 pr_warn("debugfs initialization failed\n");
1750 zswap_init_state = ZSWAP_INIT_SUCCEED;
1754 shrinker_free(zswap_shrinker);
1756 destroy_workqueue(shrink_wq);
1758 cpuhp_remove_multi_state(CPUHP_MM_ZSWP_POOL_PREPARE);
1760 kmem_cache_destroy(zswap_entry_cache);
1762 /* if built-in, we aren't unloaded on failure; don't allow use */
1763 zswap_init_state = ZSWAP_INIT_FAILED;
1764 zswap_enabled = false;
1768 static int __init zswap_init(void)
1772 return zswap_setup();
1774 /* must be late so crypto has time to come up */
1775 late_initcall(zswap_init);
1778 MODULE_DESCRIPTION("Compressed cache for swap pages");