]> Git Repo - linux.git/blob - mm/zswap.c
Introduce cpu_dcache_is_aliasing() across all architectures
[linux.git] / mm / zswap.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * zswap.c - zswap driver file
4  *
5  * zswap is a cache that takes pages that are in the process
6  * of being swapped out and attempts to compress and store them in a
7  * RAM-based memory pool.  This can result in a significant I/O reduction on
8  * the swap device and, in the case where decompressing from RAM is faster
9  * than reading from the swap device, can also improve workload performance.
10  *
11  * Copyright (C) 2012  Seth Jennings <[email protected]>
12 */
13
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
16 #include <linux/module.h>
17 #include <linux/cpu.h>
18 #include <linux/highmem.h>
19 #include <linux/slab.h>
20 #include <linux/spinlock.h>
21 #include <linux/types.h>
22 #include <linux/atomic.h>
23 #include <linux/rbtree.h>
24 #include <linux/swap.h>
25 #include <linux/crypto.h>
26 #include <linux/scatterlist.h>
27 #include <linux/mempolicy.h>
28 #include <linux/mempool.h>
29 #include <linux/zpool.h>
30 #include <crypto/acompress.h>
31 #include <linux/zswap.h>
32 #include <linux/mm_types.h>
33 #include <linux/page-flags.h>
34 #include <linux/swapops.h>
35 #include <linux/writeback.h>
36 #include <linux/pagemap.h>
37 #include <linux/workqueue.h>
38 #include <linux/list_lru.h>
39
40 #include "swap.h"
41 #include "internal.h"
42
43 /*********************************
44 * statistics
45 **********************************/
46 /* Total bytes used by the compressed storage */
47 u64 zswap_pool_total_size;
48 /* The number of compressed pages currently stored in zswap */
49 atomic_t zswap_stored_pages = ATOMIC_INIT(0);
50 /* The number of same-value filled pages currently stored in zswap */
51 static atomic_t zswap_same_filled_pages = ATOMIC_INIT(0);
52
53 /*
54  * The statistics below are not protected from concurrent access for
55  * performance reasons so they may not be a 100% accurate.  However,
56  * they do provide useful information on roughly how many times a
57  * certain event is occurring.
58 */
59
60 /* Pool limit was hit (see zswap_max_pool_percent) */
61 static u64 zswap_pool_limit_hit;
62 /* Pages written back when pool limit was reached */
63 static u64 zswap_written_back_pages;
64 /* Store failed due to a reclaim failure after pool limit was reached */
65 static u64 zswap_reject_reclaim_fail;
66 /* Store failed due to compression algorithm failure */
67 static u64 zswap_reject_compress_fail;
68 /* Compressed page was too big for the allocator to (optimally) store */
69 static u64 zswap_reject_compress_poor;
70 /* Store failed because underlying allocator could not get memory */
71 static u64 zswap_reject_alloc_fail;
72 /* Store failed because the entry metadata could not be allocated (rare) */
73 static u64 zswap_reject_kmemcache_fail;
74
75 /* Shrinker work queue */
76 static struct workqueue_struct *shrink_wq;
77 /* Pool limit was hit, we need to calm down */
78 static bool zswap_pool_reached_full;
79
80 /*********************************
81 * tunables
82 **********************************/
83
84 #define ZSWAP_PARAM_UNSET ""
85
86 static int zswap_setup(void);
87
88 /* Enable/disable zswap */
89 static bool zswap_enabled = IS_ENABLED(CONFIG_ZSWAP_DEFAULT_ON);
90 static int zswap_enabled_param_set(const char *,
91                                    const struct kernel_param *);
92 static const struct kernel_param_ops zswap_enabled_param_ops = {
93         .set =          zswap_enabled_param_set,
94         .get =          param_get_bool,
95 };
96 module_param_cb(enabled, &zswap_enabled_param_ops, &zswap_enabled, 0644);
97
98 /* Crypto compressor to use */
99 static char *zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT;
100 static int zswap_compressor_param_set(const char *,
101                                       const struct kernel_param *);
102 static const struct kernel_param_ops zswap_compressor_param_ops = {
103         .set =          zswap_compressor_param_set,
104         .get =          param_get_charp,
105         .free =         param_free_charp,
106 };
107 module_param_cb(compressor, &zswap_compressor_param_ops,
108                 &zswap_compressor, 0644);
109
110 /* Compressed storage zpool to use */
111 static char *zswap_zpool_type = CONFIG_ZSWAP_ZPOOL_DEFAULT;
112 static int zswap_zpool_param_set(const char *, const struct kernel_param *);
113 static const struct kernel_param_ops zswap_zpool_param_ops = {
114         .set =          zswap_zpool_param_set,
115         .get =          param_get_charp,
116         .free =         param_free_charp,
117 };
118 module_param_cb(zpool, &zswap_zpool_param_ops, &zswap_zpool_type, 0644);
119
120 /* The maximum percentage of memory that the compressed pool can occupy */
121 static unsigned int zswap_max_pool_percent = 20;
122 module_param_named(max_pool_percent, zswap_max_pool_percent, uint, 0644);
123
124 /* The threshold for accepting new pages after the max_pool_percent was hit */
125 static unsigned int zswap_accept_thr_percent = 90; /* of max pool size */
126 module_param_named(accept_threshold_percent, zswap_accept_thr_percent,
127                    uint, 0644);
128
129 /*
130  * Enable/disable handling same-value filled pages (enabled by default).
131  * If disabled every page is considered non-same-value filled.
132  */
133 static bool zswap_same_filled_pages_enabled = true;
134 module_param_named(same_filled_pages_enabled, zswap_same_filled_pages_enabled,
135                    bool, 0644);
136
137 /* Enable/disable handling non-same-value filled pages (enabled by default) */
138 static bool zswap_non_same_filled_pages_enabled = true;
139 module_param_named(non_same_filled_pages_enabled, zswap_non_same_filled_pages_enabled,
140                    bool, 0644);
141
142 /* Number of zpools in zswap_pool (empirically determined for scalability) */
143 #define ZSWAP_NR_ZPOOLS 32
144
145 /* Enable/disable memory pressure-based shrinker. */
146 static bool zswap_shrinker_enabled = IS_ENABLED(
147                 CONFIG_ZSWAP_SHRINKER_DEFAULT_ON);
148 module_param_named(shrinker_enabled, zswap_shrinker_enabled, bool, 0644);
149
150 bool is_zswap_enabled(void)
151 {
152         return zswap_enabled;
153 }
154
155 /*********************************
156 * data structures
157 **********************************/
158
159 struct crypto_acomp_ctx {
160         struct crypto_acomp *acomp;
161         struct acomp_req *req;
162         struct crypto_wait wait;
163         u8 *buffer;
164         struct mutex mutex;
165 };
166
167 /*
168  * The lock ordering is zswap_tree.lock -> zswap_pool.lru_lock.
169  * The only case where lru_lock is not acquired while holding tree.lock is
170  * when a zswap_entry is taken off the lru for writeback, in that case it
171  * needs to be verified that it's still valid in the tree.
172  */
173 struct zswap_pool {
174         struct zpool *zpools[ZSWAP_NR_ZPOOLS];
175         struct crypto_acomp_ctx __percpu *acomp_ctx;
176         struct kref kref;
177         struct list_head list;
178         struct work_struct release_work;
179         struct work_struct shrink_work;
180         struct hlist_node node;
181         char tfm_name[CRYPTO_MAX_ALG_NAME];
182         struct list_lru list_lru;
183         struct mem_cgroup *next_shrink;
184         struct shrinker *shrinker;
185         atomic_t nr_stored;
186 };
187
188 /*
189  * struct zswap_entry
190  *
191  * This structure contains the metadata for tracking a single compressed
192  * page within zswap.
193  *
194  * rbnode - links the entry into red-black tree for the appropriate swap type
195  * swpentry - associated swap entry, the offset indexes into the red-black tree
196  * length - the length in bytes of the compressed page data.  Needed during
197  *          decompression. For a same value filled page length is 0, and both
198  *          pool and lru are invalid and must be ignored.
199  * pool - the zswap_pool the entry's data is in
200  * handle - zpool allocation handle that stores the compressed page data
201  * value - value of the same-value filled pages which have same content
202  * objcg - the obj_cgroup that the compressed memory is charged to
203  * lru - handle to the pool's lru used to evict pages.
204  */
205 struct zswap_entry {
206         struct rb_node rbnode;
207         swp_entry_t swpentry;
208         unsigned int length;
209         struct zswap_pool *pool;
210         union {
211                 unsigned long handle;
212                 unsigned long value;
213         };
214         struct obj_cgroup *objcg;
215         struct list_head lru;
216 };
217
218 struct zswap_tree {
219         struct rb_root rbroot;
220         spinlock_t lock;
221 };
222
223 static struct zswap_tree *zswap_trees[MAX_SWAPFILES];
224 static unsigned int nr_zswap_trees[MAX_SWAPFILES];
225
226 /* RCU-protected iteration */
227 static LIST_HEAD(zswap_pools);
228 /* protects zswap_pools list modification */
229 static DEFINE_SPINLOCK(zswap_pools_lock);
230 /* pool counter to provide unique names to zpool */
231 static atomic_t zswap_pools_count = ATOMIC_INIT(0);
232
233 enum zswap_init_type {
234         ZSWAP_UNINIT,
235         ZSWAP_INIT_SUCCEED,
236         ZSWAP_INIT_FAILED
237 };
238
239 static enum zswap_init_type zswap_init_state;
240
241 /* used to ensure the integrity of initialization */
242 static DEFINE_MUTEX(zswap_init_lock);
243
244 /* init completed, but couldn't create the initial pool */
245 static bool zswap_has_pool;
246
247 /*********************************
248 * helpers and fwd declarations
249 **********************************/
250
251 static inline struct zswap_tree *swap_zswap_tree(swp_entry_t swp)
252 {
253         return &zswap_trees[swp_type(swp)][swp_offset(swp)
254                 >> SWAP_ADDRESS_SPACE_SHIFT];
255 }
256
257 #define zswap_pool_debug(msg, p)                                \
258         pr_debug("%s pool %s/%s\n", msg, (p)->tfm_name,         \
259                  zpool_get_type((p)->zpools[0]))
260
261 static bool zswap_is_full(void)
262 {
263         return totalram_pages() * zswap_max_pool_percent / 100 <
264                         DIV_ROUND_UP(zswap_pool_total_size, PAGE_SIZE);
265 }
266
267 static bool zswap_can_accept(void)
268 {
269         return totalram_pages() * zswap_accept_thr_percent / 100 *
270                                 zswap_max_pool_percent / 100 >
271                         DIV_ROUND_UP(zswap_pool_total_size, PAGE_SIZE);
272 }
273
274 static u64 get_zswap_pool_size(struct zswap_pool *pool)
275 {
276         u64 pool_size = 0;
277         int i;
278
279         for (i = 0; i < ZSWAP_NR_ZPOOLS; i++)
280                 pool_size += zpool_get_total_size(pool->zpools[i]);
281
282         return pool_size;
283 }
284
285 static void zswap_update_total_size(void)
286 {
287         struct zswap_pool *pool;
288         u64 total = 0;
289
290         rcu_read_lock();
291
292         list_for_each_entry_rcu(pool, &zswap_pools, list)
293                 total += get_zswap_pool_size(pool);
294
295         rcu_read_unlock();
296
297         zswap_pool_total_size = total;
298 }
299
300 /*********************************
301 * pool functions
302 **********************************/
303
304 static void zswap_alloc_shrinker(struct zswap_pool *pool);
305 static void shrink_worker(struct work_struct *w);
306
307 static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
308 {
309         int i;
310         struct zswap_pool *pool;
311         char name[38]; /* 'zswap' + 32 char (max) num + \0 */
312         gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
313         int ret;
314
315         if (!zswap_has_pool) {
316                 /* if either are unset, pool initialization failed, and we
317                  * need both params to be set correctly before trying to
318                  * create a pool.
319                  */
320                 if (!strcmp(type, ZSWAP_PARAM_UNSET))
321                         return NULL;
322                 if (!strcmp(compressor, ZSWAP_PARAM_UNSET))
323                         return NULL;
324         }
325
326         pool = kzalloc(sizeof(*pool), GFP_KERNEL);
327         if (!pool)
328                 return NULL;
329
330         for (i = 0; i < ZSWAP_NR_ZPOOLS; i++) {
331                 /* unique name for each pool specifically required by zsmalloc */
332                 snprintf(name, 38, "zswap%x",
333                          atomic_inc_return(&zswap_pools_count));
334
335                 pool->zpools[i] = zpool_create_pool(type, name, gfp);
336                 if (!pool->zpools[i]) {
337                         pr_err("%s zpool not available\n", type);
338                         goto error;
339                 }
340         }
341         pr_debug("using %s zpool\n", zpool_get_type(pool->zpools[0]));
342
343         strscpy(pool->tfm_name, compressor, sizeof(pool->tfm_name));
344
345         pool->acomp_ctx = alloc_percpu(*pool->acomp_ctx);
346         if (!pool->acomp_ctx) {
347                 pr_err("percpu alloc failed\n");
348                 goto error;
349         }
350
351         ret = cpuhp_state_add_instance(CPUHP_MM_ZSWP_POOL_PREPARE,
352                                        &pool->node);
353         if (ret)
354                 goto error;
355
356         zswap_alloc_shrinker(pool);
357         if (!pool->shrinker)
358                 goto error;
359
360         pr_debug("using %s compressor\n", pool->tfm_name);
361
362         /* being the current pool takes 1 ref; this func expects the
363          * caller to always add the new pool as the current pool
364          */
365         kref_init(&pool->kref);
366         INIT_LIST_HEAD(&pool->list);
367         if (list_lru_init_memcg(&pool->list_lru, pool->shrinker))
368                 goto lru_fail;
369         shrinker_register(pool->shrinker);
370         INIT_WORK(&pool->shrink_work, shrink_worker);
371         atomic_set(&pool->nr_stored, 0);
372
373         zswap_pool_debug("created", pool);
374
375         return pool;
376
377 lru_fail:
378         list_lru_destroy(&pool->list_lru);
379         shrinker_free(pool->shrinker);
380 error:
381         if (pool->acomp_ctx)
382                 free_percpu(pool->acomp_ctx);
383         while (i--)
384                 zpool_destroy_pool(pool->zpools[i]);
385         kfree(pool);
386         return NULL;
387 }
388
389 static struct zswap_pool *__zswap_pool_create_fallback(void)
390 {
391         bool has_comp, has_zpool;
392
393         has_comp = crypto_has_acomp(zswap_compressor, 0, 0);
394         if (!has_comp && strcmp(zswap_compressor,
395                                 CONFIG_ZSWAP_COMPRESSOR_DEFAULT)) {
396                 pr_err("compressor %s not available, using default %s\n",
397                        zswap_compressor, CONFIG_ZSWAP_COMPRESSOR_DEFAULT);
398                 param_free_charp(&zswap_compressor);
399                 zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT;
400                 has_comp = crypto_has_acomp(zswap_compressor, 0, 0);
401         }
402         if (!has_comp) {
403                 pr_err("default compressor %s not available\n",
404                        zswap_compressor);
405                 param_free_charp(&zswap_compressor);
406                 zswap_compressor = ZSWAP_PARAM_UNSET;
407         }
408
409         has_zpool = zpool_has_pool(zswap_zpool_type);
410         if (!has_zpool && strcmp(zswap_zpool_type,
411                                  CONFIG_ZSWAP_ZPOOL_DEFAULT)) {
412                 pr_err("zpool %s not available, using default %s\n",
413                        zswap_zpool_type, CONFIG_ZSWAP_ZPOOL_DEFAULT);
414                 param_free_charp(&zswap_zpool_type);
415                 zswap_zpool_type = CONFIG_ZSWAP_ZPOOL_DEFAULT;
416                 has_zpool = zpool_has_pool(zswap_zpool_type);
417         }
418         if (!has_zpool) {
419                 pr_err("default zpool %s not available\n",
420                        zswap_zpool_type);
421                 param_free_charp(&zswap_zpool_type);
422                 zswap_zpool_type = ZSWAP_PARAM_UNSET;
423         }
424
425         if (!has_comp || !has_zpool)
426                 return NULL;
427
428         return zswap_pool_create(zswap_zpool_type, zswap_compressor);
429 }
430
431 static void zswap_pool_destroy(struct zswap_pool *pool)
432 {
433         int i;
434
435         zswap_pool_debug("destroying", pool);
436
437         shrinker_free(pool->shrinker);
438         cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node);
439         free_percpu(pool->acomp_ctx);
440         list_lru_destroy(&pool->list_lru);
441
442         spin_lock(&zswap_pools_lock);
443         mem_cgroup_iter_break(NULL, pool->next_shrink);
444         pool->next_shrink = NULL;
445         spin_unlock(&zswap_pools_lock);
446
447         for (i = 0; i < ZSWAP_NR_ZPOOLS; i++)
448                 zpool_destroy_pool(pool->zpools[i]);
449         kfree(pool);
450 }
451
452 static void __zswap_pool_release(struct work_struct *work)
453 {
454         struct zswap_pool *pool = container_of(work, typeof(*pool),
455                                                 release_work);
456
457         synchronize_rcu();
458
459         /* nobody should have been able to get a kref... */
460         WARN_ON(kref_get_unless_zero(&pool->kref));
461
462         /* pool is now off zswap_pools list and has no references. */
463         zswap_pool_destroy(pool);
464 }
465
466 static struct zswap_pool *zswap_pool_current(void);
467
468 static void __zswap_pool_empty(struct kref *kref)
469 {
470         struct zswap_pool *pool;
471
472         pool = container_of(kref, typeof(*pool), kref);
473
474         spin_lock(&zswap_pools_lock);
475
476         WARN_ON(pool == zswap_pool_current());
477
478         list_del_rcu(&pool->list);
479
480         INIT_WORK(&pool->release_work, __zswap_pool_release);
481         schedule_work(&pool->release_work);
482
483         spin_unlock(&zswap_pools_lock);
484 }
485
486 static int __must_check zswap_pool_get(struct zswap_pool *pool)
487 {
488         if (!pool)
489                 return 0;
490
491         return kref_get_unless_zero(&pool->kref);
492 }
493
494 static void zswap_pool_put(struct zswap_pool *pool)
495 {
496         kref_put(&pool->kref, __zswap_pool_empty);
497 }
498
499 static struct zswap_pool *__zswap_pool_current(void)
500 {
501         struct zswap_pool *pool;
502
503         pool = list_first_or_null_rcu(&zswap_pools, typeof(*pool), list);
504         WARN_ONCE(!pool && zswap_has_pool,
505                   "%s: no page storage pool!\n", __func__);
506
507         return pool;
508 }
509
510 static struct zswap_pool *zswap_pool_current(void)
511 {
512         assert_spin_locked(&zswap_pools_lock);
513
514         return __zswap_pool_current();
515 }
516
517 static struct zswap_pool *zswap_pool_current_get(void)
518 {
519         struct zswap_pool *pool;
520
521         rcu_read_lock();
522
523         pool = __zswap_pool_current();
524         if (!zswap_pool_get(pool))
525                 pool = NULL;
526
527         rcu_read_unlock();
528
529         return pool;
530 }
531
532 static struct zswap_pool *zswap_pool_last_get(void)
533 {
534         struct zswap_pool *pool, *last = NULL;
535
536         rcu_read_lock();
537
538         list_for_each_entry_rcu(pool, &zswap_pools, list)
539                 last = pool;
540         WARN_ONCE(!last && zswap_has_pool,
541                   "%s: no page storage pool!\n", __func__);
542         if (!zswap_pool_get(last))
543                 last = NULL;
544
545         rcu_read_unlock();
546
547         return last;
548 }
549
550 /* type and compressor must be null-terminated */
551 static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor)
552 {
553         struct zswap_pool *pool;
554
555         assert_spin_locked(&zswap_pools_lock);
556
557         list_for_each_entry_rcu(pool, &zswap_pools, list) {
558                 if (strcmp(pool->tfm_name, compressor))
559                         continue;
560                 /* all zpools share the same type */
561                 if (strcmp(zpool_get_type(pool->zpools[0]), type))
562                         continue;
563                 /* if we can't get it, it's about to be destroyed */
564                 if (!zswap_pool_get(pool))
565                         continue;
566                 return pool;
567         }
568
569         return NULL;
570 }
571
572 /*********************************
573 * param callbacks
574 **********************************/
575
576 static bool zswap_pool_changed(const char *s, const struct kernel_param *kp)
577 {
578         /* no change required */
579         if (!strcmp(s, *(char **)kp->arg) && zswap_has_pool)
580                 return false;
581         return true;
582 }
583
584 /* val must be a null-terminated string */
585 static int __zswap_param_set(const char *val, const struct kernel_param *kp,
586                              char *type, char *compressor)
587 {
588         struct zswap_pool *pool, *put_pool = NULL;
589         char *s = strstrip((char *)val);
590         int ret = 0;
591         bool new_pool = false;
592
593         mutex_lock(&zswap_init_lock);
594         switch (zswap_init_state) {
595         case ZSWAP_UNINIT:
596                 /* if this is load-time (pre-init) param setting,
597                  * don't create a pool; that's done during init.
598                  */
599                 ret = param_set_charp(s, kp);
600                 break;
601         case ZSWAP_INIT_SUCCEED:
602                 new_pool = zswap_pool_changed(s, kp);
603                 break;
604         case ZSWAP_INIT_FAILED:
605                 pr_err("can't set param, initialization failed\n");
606                 ret = -ENODEV;
607         }
608         mutex_unlock(&zswap_init_lock);
609
610         /* no need to create a new pool, return directly */
611         if (!new_pool)
612                 return ret;
613
614         if (!type) {
615                 if (!zpool_has_pool(s)) {
616                         pr_err("zpool %s not available\n", s);
617                         return -ENOENT;
618                 }
619                 type = s;
620         } else if (!compressor) {
621                 if (!crypto_has_acomp(s, 0, 0)) {
622                         pr_err("compressor %s not available\n", s);
623                         return -ENOENT;
624                 }
625                 compressor = s;
626         } else {
627                 WARN_ON(1);
628                 return -EINVAL;
629         }
630
631         spin_lock(&zswap_pools_lock);
632
633         pool = zswap_pool_find_get(type, compressor);
634         if (pool) {
635                 zswap_pool_debug("using existing", pool);
636                 WARN_ON(pool == zswap_pool_current());
637                 list_del_rcu(&pool->list);
638         }
639
640         spin_unlock(&zswap_pools_lock);
641
642         if (!pool)
643                 pool = zswap_pool_create(type, compressor);
644
645         if (pool)
646                 ret = param_set_charp(s, kp);
647         else
648                 ret = -EINVAL;
649
650         spin_lock(&zswap_pools_lock);
651
652         if (!ret) {
653                 put_pool = zswap_pool_current();
654                 list_add_rcu(&pool->list, &zswap_pools);
655                 zswap_has_pool = true;
656         } else if (pool) {
657                 /* add the possibly pre-existing pool to the end of the pools
658                  * list; if it's new (and empty) then it'll be removed and
659                  * destroyed by the put after we drop the lock
660                  */
661                 list_add_tail_rcu(&pool->list, &zswap_pools);
662                 put_pool = pool;
663         }
664
665         spin_unlock(&zswap_pools_lock);
666
667         if (!zswap_has_pool && !pool) {
668                 /* if initial pool creation failed, and this pool creation also
669                  * failed, maybe both compressor and zpool params were bad.
670                  * Allow changing this param, so pool creation will succeed
671                  * when the other param is changed. We already verified this
672                  * param is ok in the zpool_has_pool() or crypto_has_acomp()
673                  * checks above.
674                  */
675                 ret = param_set_charp(s, kp);
676         }
677
678         /* drop the ref from either the old current pool,
679          * or the new pool we failed to add
680          */
681         if (put_pool)
682                 zswap_pool_put(put_pool);
683
684         return ret;
685 }
686
687 static int zswap_compressor_param_set(const char *val,
688                                       const struct kernel_param *kp)
689 {
690         return __zswap_param_set(val, kp, zswap_zpool_type, NULL);
691 }
692
693 static int zswap_zpool_param_set(const char *val,
694                                  const struct kernel_param *kp)
695 {
696         return __zswap_param_set(val, kp, NULL, zswap_compressor);
697 }
698
699 static int zswap_enabled_param_set(const char *val,
700                                    const struct kernel_param *kp)
701 {
702         int ret = -ENODEV;
703
704         /* if this is load-time (pre-init) param setting, only set param. */
705         if (system_state != SYSTEM_RUNNING)
706                 return param_set_bool(val, kp);
707
708         mutex_lock(&zswap_init_lock);
709         switch (zswap_init_state) {
710         case ZSWAP_UNINIT:
711                 if (zswap_setup())
712                         break;
713                 fallthrough;
714         case ZSWAP_INIT_SUCCEED:
715                 if (!zswap_has_pool)
716                         pr_err("can't enable, no pool configured\n");
717                 else
718                         ret = param_set_bool(val, kp);
719                 break;
720         case ZSWAP_INIT_FAILED:
721                 pr_err("can't enable, initialization failed\n");
722         }
723         mutex_unlock(&zswap_init_lock);
724
725         return ret;
726 }
727
728 /*********************************
729 * lru functions
730 **********************************/
731
732 /* should be called under RCU */
733 #ifdef CONFIG_MEMCG
734 static inline struct mem_cgroup *mem_cgroup_from_entry(struct zswap_entry *entry)
735 {
736         return entry->objcg ? obj_cgroup_memcg(entry->objcg) : NULL;
737 }
738 #else
739 static inline struct mem_cgroup *mem_cgroup_from_entry(struct zswap_entry *entry)
740 {
741         return NULL;
742 }
743 #endif
744
745 static inline int entry_to_nid(struct zswap_entry *entry)
746 {
747         return page_to_nid(virt_to_page(entry));
748 }
749
750 static void zswap_lru_add(struct list_lru *list_lru, struct zswap_entry *entry)
751 {
752         atomic_long_t *nr_zswap_protected;
753         unsigned long lru_size, old, new;
754         int nid = entry_to_nid(entry);
755         struct mem_cgroup *memcg;
756         struct lruvec *lruvec;
757
758         /*
759          * Note that it is safe to use rcu_read_lock() here, even in the face of
760          * concurrent memcg offlining. Thanks to the memcg->kmemcg_id indirection
761          * used in list_lru lookup, only two scenarios are possible:
762          *
763          * 1. list_lru_add() is called before memcg->kmemcg_id is updated. The
764          *    new entry will be reparented to memcg's parent's list_lru.
765          * 2. list_lru_add() is called after memcg->kmemcg_id is updated. The
766          *    new entry will be added directly to memcg's parent's list_lru.
767          *
768          * Similar reasoning holds for list_lru_del().
769          */
770         rcu_read_lock();
771         memcg = mem_cgroup_from_entry(entry);
772         /* will always succeed */
773         list_lru_add(list_lru, &entry->lru, nid, memcg);
774
775         /* Update the protection area */
776         lru_size = list_lru_count_one(list_lru, nid, memcg);
777         lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
778         nr_zswap_protected = &lruvec->zswap_lruvec_state.nr_zswap_protected;
779         old = atomic_long_inc_return(nr_zswap_protected);
780         /*
781          * Decay to avoid overflow and adapt to changing workloads.
782          * This is based on LRU reclaim cost decaying heuristics.
783          */
784         do {
785                 new = old > lru_size / 4 ? old / 2 : old;
786         } while (!atomic_long_try_cmpxchg(nr_zswap_protected, &old, new));
787         rcu_read_unlock();
788 }
789
790 static void zswap_lru_del(struct list_lru *list_lru, struct zswap_entry *entry)
791 {
792         int nid = entry_to_nid(entry);
793         struct mem_cgroup *memcg;
794
795         rcu_read_lock();
796         memcg = mem_cgroup_from_entry(entry);
797         /* will always succeed */
798         list_lru_del(list_lru, &entry->lru, nid, memcg);
799         rcu_read_unlock();
800 }
801
802 void zswap_lruvec_state_init(struct lruvec *lruvec)
803 {
804         atomic_long_set(&lruvec->zswap_lruvec_state.nr_zswap_protected, 0);
805 }
806
807 void zswap_folio_swapin(struct folio *folio)
808 {
809         struct lruvec *lruvec;
810
811         if (folio) {
812                 lruvec = folio_lruvec(folio);
813                 atomic_long_inc(&lruvec->zswap_lruvec_state.nr_zswap_protected);
814         }
815 }
816
817 void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg)
818 {
819         struct zswap_pool *pool;
820
821         /* lock out zswap pools list modification */
822         spin_lock(&zswap_pools_lock);
823         list_for_each_entry(pool, &zswap_pools, list) {
824                 if (pool->next_shrink == memcg)
825                         pool->next_shrink = mem_cgroup_iter(NULL, pool->next_shrink, NULL);
826         }
827         spin_unlock(&zswap_pools_lock);
828 }
829
830 /*********************************
831 * rbtree functions
832 **********************************/
833 static struct zswap_entry *zswap_rb_search(struct rb_root *root, pgoff_t offset)
834 {
835         struct rb_node *node = root->rb_node;
836         struct zswap_entry *entry;
837         pgoff_t entry_offset;
838
839         while (node) {
840                 entry = rb_entry(node, struct zswap_entry, rbnode);
841                 entry_offset = swp_offset(entry->swpentry);
842                 if (entry_offset > offset)
843                         node = node->rb_left;
844                 else if (entry_offset < offset)
845                         node = node->rb_right;
846                 else
847                         return entry;
848         }
849         return NULL;
850 }
851
852 /*
853  * In the case that a entry with the same offset is found, a pointer to
854  * the existing entry is stored in dupentry and the function returns -EEXIST
855  */
856 static int zswap_rb_insert(struct rb_root *root, struct zswap_entry *entry,
857                         struct zswap_entry **dupentry)
858 {
859         struct rb_node **link = &root->rb_node, *parent = NULL;
860         struct zswap_entry *myentry;
861         pgoff_t myentry_offset, entry_offset = swp_offset(entry->swpentry);
862
863         while (*link) {
864                 parent = *link;
865                 myentry = rb_entry(parent, struct zswap_entry, rbnode);
866                 myentry_offset = swp_offset(myentry->swpentry);
867                 if (myentry_offset > entry_offset)
868                         link = &(*link)->rb_left;
869                 else if (myentry_offset < entry_offset)
870                         link = &(*link)->rb_right;
871                 else {
872                         *dupentry = myentry;
873                         return -EEXIST;
874                 }
875         }
876         rb_link_node(&entry->rbnode, parent, link);
877         rb_insert_color(&entry->rbnode, root);
878         return 0;
879 }
880
881 static void zswap_rb_erase(struct rb_root *root, struct zswap_entry *entry)
882 {
883         rb_erase(&entry->rbnode, root);
884         RB_CLEAR_NODE(&entry->rbnode);
885 }
886
887 /*********************************
888 * zswap entry functions
889 **********************************/
890 static struct kmem_cache *zswap_entry_cache;
891
892 static struct zswap_entry *zswap_entry_cache_alloc(gfp_t gfp, int nid)
893 {
894         struct zswap_entry *entry;
895         entry = kmem_cache_alloc_node(zswap_entry_cache, gfp, nid);
896         if (!entry)
897                 return NULL;
898         RB_CLEAR_NODE(&entry->rbnode);
899         return entry;
900 }
901
902 static void zswap_entry_cache_free(struct zswap_entry *entry)
903 {
904         kmem_cache_free(zswap_entry_cache, entry);
905 }
906
907 static struct zpool *zswap_find_zpool(struct zswap_entry *entry)
908 {
909         int i = 0;
910
911         if (ZSWAP_NR_ZPOOLS > 1)
912                 i = hash_ptr(entry, ilog2(ZSWAP_NR_ZPOOLS));
913
914         return entry->pool->zpools[i];
915 }
916
917 /*
918  * Carries out the common pattern of freeing and entry's zpool allocation,
919  * freeing the entry itself, and decrementing the number of stored pages.
920  */
921 static void zswap_entry_free(struct zswap_entry *entry)
922 {
923         if (!entry->length)
924                 atomic_dec(&zswap_same_filled_pages);
925         else {
926                 zswap_lru_del(&entry->pool->list_lru, entry);
927                 zpool_free(zswap_find_zpool(entry), entry->handle);
928                 atomic_dec(&entry->pool->nr_stored);
929                 zswap_pool_put(entry->pool);
930         }
931         if (entry->objcg) {
932                 obj_cgroup_uncharge_zswap(entry->objcg, entry->length);
933                 obj_cgroup_put(entry->objcg);
934         }
935         zswap_entry_cache_free(entry);
936         atomic_dec(&zswap_stored_pages);
937         zswap_update_total_size();
938 }
939
940 /*
941  * The caller hold the tree lock and search the entry from the tree,
942  * so it must be on the tree, remove it from the tree and free it.
943  */
944 static void zswap_invalidate_entry(struct zswap_tree *tree,
945                                    struct zswap_entry *entry)
946 {
947         zswap_rb_erase(&tree->rbroot, entry);
948         zswap_entry_free(entry);
949 }
950
951 /*********************************
952 * compressed storage functions
953 **********************************/
954 static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node)
955 {
956         struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
957         struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
958         struct crypto_acomp *acomp;
959         struct acomp_req *req;
960         int ret;
961
962         mutex_init(&acomp_ctx->mutex);
963
964         acomp_ctx->buffer = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu));
965         if (!acomp_ctx->buffer)
966                 return -ENOMEM;
967
968         acomp = crypto_alloc_acomp_node(pool->tfm_name, 0, 0, cpu_to_node(cpu));
969         if (IS_ERR(acomp)) {
970                 pr_err("could not alloc crypto acomp %s : %ld\n",
971                                 pool->tfm_name, PTR_ERR(acomp));
972                 ret = PTR_ERR(acomp);
973                 goto acomp_fail;
974         }
975         acomp_ctx->acomp = acomp;
976
977         req = acomp_request_alloc(acomp_ctx->acomp);
978         if (!req) {
979                 pr_err("could not alloc crypto acomp_request %s\n",
980                        pool->tfm_name);
981                 ret = -ENOMEM;
982                 goto req_fail;
983         }
984         acomp_ctx->req = req;
985
986         crypto_init_wait(&acomp_ctx->wait);
987         /*
988          * if the backend of acomp is async zip, crypto_req_done() will wakeup
989          * crypto_wait_req(); if the backend of acomp is scomp, the callback
990          * won't be called, crypto_wait_req() will return without blocking.
991          */
992         acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
993                                    crypto_req_done, &acomp_ctx->wait);
994
995         return 0;
996
997 req_fail:
998         crypto_free_acomp(acomp_ctx->acomp);
999 acomp_fail:
1000         kfree(acomp_ctx->buffer);
1001         return ret;
1002 }
1003
1004 static int zswap_cpu_comp_dead(unsigned int cpu, struct hlist_node *node)
1005 {
1006         struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
1007         struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
1008
1009         if (!IS_ERR_OR_NULL(acomp_ctx)) {
1010                 if (!IS_ERR_OR_NULL(acomp_ctx->req))
1011                         acomp_request_free(acomp_ctx->req);
1012                 if (!IS_ERR_OR_NULL(acomp_ctx->acomp))
1013                         crypto_free_acomp(acomp_ctx->acomp);
1014                 kfree(acomp_ctx->buffer);
1015         }
1016
1017         return 0;
1018 }
1019
1020 static bool zswap_compress(struct folio *folio, struct zswap_entry *entry)
1021 {
1022         struct crypto_acomp_ctx *acomp_ctx;
1023         struct scatterlist input, output;
1024         unsigned int dlen = PAGE_SIZE;
1025         unsigned long handle;
1026         struct zpool *zpool;
1027         char *buf;
1028         gfp_t gfp;
1029         int ret;
1030         u8 *dst;
1031
1032         acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
1033
1034         mutex_lock(&acomp_ctx->mutex);
1035
1036         dst = acomp_ctx->buffer;
1037         sg_init_table(&input, 1);
1038         sg_set_page(&input, &folio->page, PAGE_SIZE, 0);
1039
1040         /*
1041          * We need PAGE_SIZE * 2 here since there maybe over-compression case,
1042          * and hardware-accelerators may won't check the dst buffer size, so
1043          * giving the dst buffer with enough length to avoid buffer overflow.
1044          */
1045         sg_init_one(&output, dst, PAGE_SIZE * 2);
1046         acomp_request_set_params(acomp_ctx->req, &input, &output, PAGE_SIZE, dlen);
1047
1048         /*
1049          * it maybe looks a little bit silly that we send an asynchronous request,
1050          * then wait for its completion synchronously. This makes the process look
1051          * synchronous in fact.
1052          * Theoretically, acomp supports users send multiple acomp requests in one
1053          * acomp instance, then get those requests done simultaneously. but in this
1054          * case, zswap actually does store and load page by page, there is no
1055          * existing method to send the second page before the first page is done
1056          * in one thread doing zwap.
1057          * but in different threads running on different cpu, we have different
1058          * acomp instance, so multiple threads can do (de)compression in parallel.
1059          */
1060         ret = crypto_wait_req(crypto_acomp_compress(acomp_ctx->req), &acomp_ctx->wait);
1061         dlen = acomp_ctx->req->dlen;
1062         if (ret) {
1063                 zswap_reject_compress_fail++;
1064                 goto unlock;
1065         }
1066
1067         zpool = zswap_find_zpool(entry);
1068         gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
1069         if (zpool_malloc_support_movable(zpool))
1070                 gfp |= __GFP_HIGHMEM | __GFP_MOVABLE;
1071         ret = zpool_malloc(zpool, dlen, gfp, &handle);
1072         if (ret == -ENOSPC) {
1073                 zswap_reject_compress_poor++;
1074                 goto unlock;
1075         }
1076         if (ret) {
1077                 zswap_reject_alloc_fail++;
1078                 goto unlock;
1079         }
1080
1081         buf = zpool_map_handle(zpool, handle, ZPOOL_MM_WO);
1082         memcpy(buf, dst, dlen);
1083         zpool_unmap_handle(zpool, handle);
1084
1085         entry->handle = handle;
1086         entry->length = dlen;
1087
1088 unlock:
1089         mutex_unlock(&acomp_ctx->mutex);
1090         return ret == 0;
1091 }
1092
1093 static void zswap_decompress(struct zswap_entry *entry, struct page *page)
1094 {
1095         struct zpool *zpool = zswap_find_zpool(entry);
1096         struct scatterlist input, output;
1097         struct crypto_acomp_ctx *acomp_ctx;
1098         u8 *src;
1099
1100         acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
1101         mutex_lock(&acomp_ctx->mutex);
1102
1103         src = zpool_map_handle(zpool, entry->handle, ZPOOL_MM_RO);
1104         if (!zpool_can_sleep_mapped(zpool)) {
1105                 memcpy(acomp_ctx->buffer, src, entry->length);
1106                 src = acomp_ctx->buffer;
1107                 zpool_unmap_handle(zpool, entry->handle);
1108         }
1109
1110         sg_init_one(&input, src, entry->length);
1111         sg_init_table(&output, 1);
1112         sg_set_page(&output, page, PAGE_SIZE, 0);
1113         acomp_request_set_params(acomp_ctx->req, &input, &output, entry->length, PAGE_SIZE);
1114         BUG_ON(crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait));
1115         BUG_ON(acomp_ctx->req->dlen != PAGE_SIZE);
1116         mutex_unlock(&acomp_ctx->mutex);
1117
1118         if (zpool_can_sleep_mapped(zpool))
1119                 zpool_unmap_handle(zpool, entry->handle);
1120 }
1121
1122 /*********************************
1123 * writeback code
1124 **********************************/
1125 /*
1126  * Attempts to free an entry by adding a folio to the swap cache,
1127  * decompressing the entry data into the folio, and issuing a
1128  * bio write to write the folio back to the swap device.
1129  *
1130  * This can be thought of as a "resumed writeback" of the folio
1131  * to the swap device.  We are basically resuming the same swap
1132  * writeback path that was intercepted with the zswap_store()
1133  * in the first place.  After the folio has been decompressed into
1134  * the swap cache, the compressed version stored by zswap can be
1135  * freed.
1136  */
1137 static int zswap_writeback_entry(struct zswap_entry *entry,
1138                                  swp_entry_t swpentry)
1139 {
1140         struct zswap_tree *tree;
1141         struct folio *folio;
1142         struct mempolicy *mpol;
1143         bool folio_was_allocated;
1144         struct writeback_control wbc = {
1145                 .sync_mode = WB_SYNC_NONE,
1146         };
1147
1148         /* try to allocate swap cache folio */
1149         mpol = get_task_policy(current);
1150         folio = __read_swap_cache_async(swpentry, GFP_KERNEL, mpol,
1151                                 NO_INTERLEAVE_INDEX, &folio_was_allocated, true);
1152         if (!folio)
1153                 return -ENOMEM;
1154
1155         /*
1156          * Found an existing folio, we raced with swapin or concurrent
1157          * shrinker. We generally writeback cold folios from zswap, and
1158          * swapin means the folio just became hot, so skip this folio.
1159          * For unlikely concurrent shrinker case, it will be unlinked
1160          * and freed when invalidated by the concurrent shrinker anyway.
1161          */
1162         if (!folio_was_allocated) {
1163                 folio_put(folio);
1164                 return -EEXIST;
1165         }
1166
1167         /*
1168          * folio is locked, and the swapcache is now secured against
1169          * concurrent swapping to and from the slot, and concurrent
1170          * swapoff so we can safely dereference the zswap tree here.
1171          * Verify that the swap entry hasn't been invalidated and recycled
1172          * behind our backs, to avoid overwriting a new swap folio with
1173          * old compressed data. Only when this is successful can the entry
1174          * be dereferenced.
1175          */
1176         tree = swap_zswap_tree(swpentry);
1177         spin_lock(&tree->lock);
1178         if (zswap_rb_search(&tree->rbroot, swp_offset(swpentry)) != entry) {
1179                 spin_unlock(&tree->lock);
1180                 delete_from_swap_cache(folio);
1181                 folio_unlock(folio);
1182                 folio_put(folio);
1183                 return -ENOMEM;
1184         }
1185
1186         /* Safe to deref entry after the entry is verified above. */
1187         zswap_rb_erase(&tree->rbroot, entry);
1188         spin_unlock(&tree->lock);
1189
1190         zswap_decompress(entry, &folio->page);
1191
1192         count_vm_event(ZSWPWB);
1193         if (entry->objcg)
1194                 count_objcg_event(entry->objcg, ZSWPWB);
1195
1196         zswap_entry_free(entry);
1197
1198         /* folio is up to date */
1199         folio_mark_uptodate(folio);
1200
1201         /* move it to the tail of the inactive list after end_writeback */
1202         folio_set_reclaim(folio);
1203
1204         /* start writeback */
1205         __swap_writepage(folio, &wbc);
1206         folio_put(folio);
1207
1208         return 0;
1209 }
1210
1211 /*********************************
1212 * shrinker functions
1213 **********************************/
1214 static enum lru_status shrink_memcg_cb(struct list_head *item, struct list_lru_one *l,
1215                                        spinlock_t *lock, void *arg)
1216 {
1217         struct zswap_entry *entry = container_of(item, struct zswap_entry, lru);
1218         bool *encountered_page_in_swapcache = (bool *)arg;
1219         swp_entry_t swpentry;
1220         enum lru_status ret = LRU_REMOVED_RETRY;
1221         int writeback_result;
1222
1223         /*
1224          * As soon as we drop the LRU lock, the entry can be freed by
1225          * a concurrent invalidation. This means the following:
1226          *
1227          * 1. We extract the swp_entry_t to the stack, allowing
1228          *    zswap_writeback_entry() to pin the swap entry and
1229          *    then validate the zwap entry against that swap entry's
1230          *    tree using pointer value comparison. Only when that
1231          *    is successful can the entry be dereferenced.
1232          *
1233          * 2. Usually, objects are taken off the LRU for reclaim. In
1234          *    this case this isn't possible, because if reclaim fails
1235          *    for whatever reason, we have no means of knowing if the
1236          *    entry is alive to put it back on the LRU.
1237          *
1238          *    So rotate it before dropping the lock. If the entry is
1239          *    written back or invalidated, the free path will unlink
1240          *    it. For failures, rotation is the right thing as well.
1241          *
1242          *    Temporary failures, where the same entry should be tried
1243          *    again immediately, almost never happen for this shrinker.
1244          *    We don't do any trylocking; -ENOMEM comes closest,
1245          *    but that's extremely rare and doesn't happen spuriously
1246          *    either. Don't bother distinguishing this case.
1247          */
1248         list_move_tail(item, &l->list);
1249
1250         /*
1251          * Once the lru lock is dropped, the entry might get freed. The
1252          * swpentry is copied to the stack, and entry isn't deref'd again
1253          * until the entry is verified to still be alive in the tree.
1254          */
1255         swpentry = entry->swpentry;
1256
1257         /*
1258          * It's safe to drop the lock here because we return either
1259          * LRU_REMOVED_RETRY or LRU_RETRY.
1260          */
1261         spin_unlock(lock);
1262
1263         writeback_result = zswap_writeback_entry(entry, swpentry);
1264
1265         if (writeback_result) {
1266                 zswap_reject_reclaim_fail++;
1267                 ret = LRU_RETRY;
1268
1269                 /*
1270                  * Encountering a page already in swap cache is a sign that we are shrinking
1271                  * into the warmer region. We should terminate shrinking (if we're in the dynamic
1272                  * shrinker context).
1273                  */
1274                 if (writeback_result == -EEXIST && encountered_page_in_swapcache) {
1275                         ret = LRU_STOP;
1276                         *encountered_page_in_swapcache = true;
1277                 }
1278         } else {
1279                 zswap_written_back_pages++;
1280         }
1281
1282         spin_lock(lock);
1283         return ret;
1284 }
1285
1286 static unsigned long zswap_shrinker_scan(struct shrinker *shrinker,
1287                 struct shrink_control *sc)
1288 {
1289         struct lruvec *lruvec = mem_cgroup_lruvec(sc->memcg, NODE_DATA(sc->nid));
1290         unsigned long shrink_ret, nr_protected, lru_size;
1291         struct zswap_pool *pool = shrinker->private_data;
1292         bool encountered_page_in_swapcache = false;
1293
1294         if (!zswap_shrinker_enabled ||
1295                         !mem_cgroup_zswap_writeback_enabled(sc->memcg)) {
1296                 sc->nr_scanned = 0;
1297                 return SHRINK_STOP;
1298         }
1299
1300         nr_protected =
1301                 atomic_long_read(&lruvec->zswap_lruvec_state.nr_zswap_protected);
1302         lru_size = list_lru_shrink_count(&pool->list_lru, sc);
1303
1304         /*
1305          * Abort if we are shrinking into the protected region.
1306          *
1307          * This short-circuiting is necessary because if we have too many multiple
1308          * concurrent reclaimers getting the freeable zswap object counts at the
1309          * same time (before any of them made reasonable progress), the total
1310          * number of reclaimed objects might be more than the number of unprotected
1311          * objects (i.e the reclaimers will reclaim into the protected area of the
1312          * zswap LRU).
1313          */
1314         if (nr_protected >= lru_size - sc->nr_to_scan) {
1315                 sc->nr_scanned = 0;
1316                 return SHRINK_STOP;
1317         }
1318
1319         shrink_ret = list_lru_shrink_walk(&pool->list_lru, sc, &shrink_memcg_cb,
1320                 &encountered_page_in_swapcache);
1321
1322         if (encountered_page_in_swapcache)
1323                 return SHRINK_STOP;
1324
1325         return shrink_ret ? shrink_ret : SHRINK_STOP;
1326 }
1327
1328 static unsigned long zswap_shrinker_count(struct shrinker *shrinker,
1329                 struct shrink_control *sc)
1330 {
1331         struct zswap_pool *pool = shrinker->private_data;
1332         struct mem_cgroup *memcg = sc->memcg;
1333         struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(sc->nid));
1334         unsigned long nr_backing, nr_stored, nr_freeable, nr_protected;
1335
1336         if (!zswap_shrinker_enabled || !mem_cgroup_zswap_writeback_enabled(memcg))
1337                 return 0;
1338
1339 #ifdef CONFIG_MEMCG_KMEM
1340         mem_cgroup_flush_stats(memcg);
1341         nr_backing = memcg_page_state(memcg, MEMCG_ZSWAP_B) >> PAGE_SHIFT;
1342         nr_stored = memcg_page_state(memcg, MEMCG_ZSWAPPED);
1343 #else
1344         /* use pool stats instead of memcg stats */
1345         nr_backing = get_zswap_pool_size(pool) >> PAGE_SHIFT;
1346         nr_stored = atomic_read(&pool->nr_stored);
1347 #endif
1348
1349         if (!nr_stored)
1350                 return 0;
1351
1352         nr_protected =
1353                 atomic_long_read(&lruvec->zswap_lruvec_state.nr_zswap_protected);
1354         nr_freeable = list_lru_shrink_count(&pool->list_lru, sc);
1355         /*
1356          * Subtract the lru size by an estimate of the number of pages
1357          * that should be protected.
1358          */
1359         nr_freeable = nr_freeable > nr_protected ? nr_freeable - nr_protected : 0;
1360
1361         /*
1362          * Scale the number of freeable pages by the memory saving factor.
1363          * This ensures that the better zswap compresses memory, the fewer
1364          * pages we will evict to swap (as it will otherwise incur IO for
1365          * relatively small memory saving).
1366          */
1367         return mult_frac(nr_freeable, nr_backing, nr_stored);
1368 }
1369
1370 static void zswap_alloc_shrinker(struct zswap_pool *pool)
1371 {
1372         pool->shrinker =
1373                 shrinker_alloc(SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE, "mm-zswap");
1374         if (!pool->shrinker)
1375                 return;
1376
1377         pool->shrinker->private_data = pool;
1378         pool->shrinker->scan_objects = zswap_shrinker_scan;
1379         pool->shrinker->count_objects = zswap_shrinker_count;
1380         pool->shrinker->batch = 0;
1381         pool->shrinker->seeks = DEFAULT_SEEKS;
1382 }
1383
1384 static int shrink_memcg(struct mem_cgroup *memcg)
1385 {
1386         struct zswap_pool *pool;
1387         int nid, shrunk = 0;
1388
1389         if (!mem_cgroup_zswap_writeback_enabled(memcg))
1390                 return -EINVAL;
1391
1392         /*
1393          * Skip zombies because their LRUs are reparented and we would be
1394          * reclaiming from the parent instead of the dead memcg.
1395          */
1396         if (memcg && !mem_cgroup_online(memcg))
1397                 return -ENOENT;
1398
1399         pool = zswap_pool_current_get();
1400         if (!pool)
1401                 return -EINVAL;
1402
1403         for_each_node_state(nid, N_NORMAL_MEMORY) {
1404                 unsigned long nr_to_walk = 1;
1405
1406                 shrunk += list_lru_walk_one(&pool->list_lru, nid, memcg,
1407                                             &shrink_memcg_cb, NULL, &nr_to_walk);
1408         }
1409         zswap_pool_put(pool);
1410         return shrunk ? 0 : -EAGAIN;
1411 }
1412
1413 static void shrink_worker(struct work_struct *w)
1414 {
1415         struct zswap_pool *pool = container_of(w, typeof(*pool),
1416                                                 shrink_work);
1417         struct mem_cgroup *memcg;
1418         int ret, failures = 0;
1419
1420         /* global reclaim will select cgroup in a round-robin fashion. */
1421         do {
1422                 spin_lock(&zswap_pools_lock);
1423                 pool->next_shrink = mem_cgroup_iter(NULL, pool->next_shrink, NULL);
1424                 memcg = pool->next_shrink;
1425
1426                 /*
1427                  * We need to retry if we have gone through a full round trip, or if we
1428                  * got an offline memcg (or else we risk undoing the effect of the
1429                  * zswap memcg offlining cleanup callback). This is not catastrophic
1430                  * per se, but it will keep the now offlined memcg hostage for a while.
1431                  *
1432                  * Note that if we got an online memcg, we will keep the extra
1433                  * reference in case the original reference obtained by mem_cgroup_iter
1434                  * is dropped by the zswap memcg offlining callback, ensuring that the
1435                  * memcg is not killed when we are reclaiming.
1436                  */
1437                 if (!memcg) {
1438                         spin_unlock(&zswap_pools_lock);
1439                         if (++failures == MAX_RECLAIM_RETRIES)
1440                                 break;
1441
1442                         goto resched;
1443                 }
1444
1445                 if (!mem_cgroup_tryget_online(memcg)) {
1446                         /* drop the reference from mem_cgroup_iter() */
1447                         mem_cgroup_iter_break(NULL, memcg);
1448                         pool->next_shrink = NULL;
1449                         spin_unlock(&zswap_pools_lock);
1450
1451                         if (++failures == MAX_RECLAIM_RETRIES)
1452                                 break;
1453
1454                         goto resched;
1455                 }
1456                 spin_unlock(&zswap_pools_lock);
1457
1458                 ret = shrink_memcg(memcg);
1459                 /* drop the extra reference */
1460                 mem_cgroup_put(memcg);
1461
1462                 if (ret == -EINVAL)
1463                         break;
1464                 if (ret && ++failures == MAX_RECLAIM_RETRIES)
1465                         break;
1466
1467 resched:
1468                 cond_resched();
1469         } while (!zswap_can_accept());
1470         zswap_pool_put(pool);
1471 }
1472
1473 static int zswap_is_page_same_filled(void *ptr, unsigned long *value)
1474 {
1475         unsigned long *page;
1476         unsigned long val;
1477         unsigned int pos, last_pos = PAGE_SIZE / sizeof(*page) - 1;
1478
1479         page = (unsigned long *)ptr;
1480         val = page[0];
1481
1482         if (val != page[last_pos])
1483                 return 0;
1484
1485         for (pos = 1; pos < last_pos; pos++) {
1486                 if (val != page[pos])
1487                         return 0;
1488         }
1489
1490         *value = val;
1491
1492         return 1;
1493 }
1494
1495 static void zswap_fill_page(void *ptr, unsigned long value)
1496 {
1497         unsigned long *page;
1498
1499         page = (unsigned long *)ptr;
1500         memset_l(page, value, PAGE_SIZE / sizeof(unsigned long));
1501 }
1502
1503 bool zswap_store(struct folio *folio)
1504 {
1505         swp_entry_t swp = folio->swap;
1506         pgoff_t offset = swp_offset(swp);
1507         struct zswap_tree *tree = swap_zswap_tree(swp);
1508         struct zswap_entry *entry, *dupentry;
1509         struct obj_cgroup *objcg = NULL;
1510         struct mem_cgroup *memcg = NULL;
1511         struct zswap_pool *shrink_pool;
1512
1513         VM_WARN_ON_ONCE(!folio_test_locked(folio));
1514         VM_WARN_ON_ONCE(!folio_test_swapcache(folio));
1515
1516         /* Large folios aren't supported */
1517         if (folio_test_large(folio))
1518                 return false;
1519
1520         if (!zswap_enabled)
1521                 goto check_old;
1522
1523         objcg = get_obj_cgroup_from_folio(folio);
1524         if (objcg && !obj_cgroup_may_zswap(objcg)) {
1525                 memcg = get_mem_cgroup_from_objcg(objcg);
1526                 if (shrink_memcg(memcg)) {
1527                         mem_cgroup_put(memcg);
1528                         goto reject;
1529                 }
1530                 mem_cgroup_put(memcg);
1531         }
1532
1533         /* reclaim space if needed */
1534         if (zswap_is_full()) {
1535                 zswap_pool_limit_hit++;
1536                 zswap_pool_reached_full = true;
1537                 goto shrink;
1538         }
1539
1540         if (zswap_pool_reached_full) {
1541                if (!zswap_can_accept())
1542                         goto shrink;
1543                 else
1544                         zswap_pool_reached_full = false;
1545         }
1546
1547         /* allocate entry */
1548         entry = zswap_entry_cache_alloc(GFP_KERNEL, folio_nid(folio));
1549         if (!entry) {
1550                 zswap_reject_kmemcache_fail++;
1551                 goto reject;
1552         }
1553
1554         if (zswap_same_filled_pages_enabled) {
1555                 unsigned long value;
1556                 u8 *src;
1557
1558                 src = kmap_local_folio(folio, 0);
1559                 if (zswap_is_page_same_filled(src, &value)) {
1560                         kunmap_local(src);
1561                         entry->length = 0;
1562                         entry->value = value;
1563                         atomic_inc(&zswap_same_filled_pages);
1564                         goto insert_entry;
1565                 }
1566                 kunmap_local(src);
1567         }
1568
1569         if (!zswap_non_same_filled_pages_enabled)
1570                 goto freepage;
1571
1572         /* if entry is successfully added, it keeps the reference */
1573         entry->pool = zswap_pool_current_get();
1574         if (!entry->pool)
1575                 goto freepage;
1576
1577         if (objcg) {
1578                 memcg = get_mem_cgroup_from_objcg(objcg);
1579                 if (memcg_list_lru_alloc(memcg, &entry->pool->list_lru, GFP_KERNEL)) {
1580                         mem_cgroup_put(memcg);
1581                         goto put_pool;
1582                 }
1583                 mem_cgroup_put(memcg);
1584         }
1585
1586         if (!zswap_compress(folio, entry))
1587                 goto put_pool;
1588
1589 insert_entry:
1590         entry->swpentry = swp;
1591         entry->objcg = objcg;
1592         if (objcg) {
1593                 obj_cgroup_charge_zswap(objcg, entry->length);
1594                 /* Account before objcg ref is moved to tree */
1595                 count_objcg_event(objcg, ZSWPOUT);
1596         }
1597
1598         /* map */
1599         spin_lock(&tree->lock);
1600         /*
1601          * The folio may have been dirtied again, invalidate the
1602          * possibly stale entry before inserting the new entry.
1603          */
1604         if (zswap_rb_insert(&tree->rbroot, entry, &dupentry) == -EEXIST) {
1605                 zswap_invalidate_entry(tree, dupentry);
1606                 WARN_ON(zswap_rb_insert(&tree->rbroot, entry, &dupentry));
1607         }
1608         if (entry->length) {
1609                 INIT_LIST_HEAD(&entry->lru);
1610                 zswap_lru_add(&entry->pool->list_lru, entry);
1611                 atomic_inc(&entry->pool->nr_stored);
1612         }
1613         spin_unlock(&tree->lock);
1614
1615         /* update stats */
1616         atomic_inc(&zswap_stored_pages);
1617         zswap_update_total_size();
1618         count_vm_event(ZSWPOUT);
1619
1620         return true;
1621
1622 put_pool:
1623         zswap_pool_put(entry->pool);
1624 freepage:
1625         zswap_entry_cache_free(entry);
1626 reject:
1627         if (objcg)
1628                 obj_cgroup_put(objcg);
1629 check_old:
1630         /*
1631          * If the zswap store fails or zswap is disabled, we must invalidate the
1632          * possibly stale entry which was previously stored at this offset.
1633          * Otherwise, writeback could overwrite the new data in the swapfile.
1634          */
1635         spin_lock(&tree->lock);
1636         entry = zswap_rb_search(&tree->rbroot, offset);
1637         if (entry)
1638                 zswap_invalidate_entry(tree, entry);
1639         spin_unlock(&tree->lock);
1640         return false;
1641
1642 shrink:
1643         shrink_pool = zswap_pool_last_get();
1644         if (shrink_pool && !queue_work(shrink_wq, &shrink_pool->shrink_work))
1645                 zswap_pool_put(shrink_pool);
1646         goto reject;
1647 }
1648
1649 bool zswap_load(struct folio *folio)
1650 {
1651         swp_entry_t swp = folio->swap;
1652         pgoff_t offset = swp_offset(swp);
1653         struct page *page = &folio->page;
1654         struct zswap_tree *tree = swap_zswap_tree(swp);
1655         struct zswap_entry *entry;
1656         u8 *dst;
1657
1658         VM_WARN_ON_ONCE(!folio_test_locked(folio));
1659
1660         spin_lock(&tree->lock);
1661         entry = zswap_rb_search(&tree->rbroot, offset);
1662         if (!entry) {
1663                 spin_unlock(&tree->lock);
1664                 return false;
1665         }
1666         zswap_rb_erase(&tree->rbroot, entry);
1667         spin_unlock(&tree->lock);
1668
1669         if (entry->length)
1670                 zswap_decompress(entry, page);
1671         else {
1672                 dst = kmap_local_page(page);
1673                 zswap_fill_page(dst, entry->value);
1674                 kunmap_local(dst);
1675         }
1676
1677         count_vm_event(ZSWPIN);
1678         if (entry->objcg)
1679                 count_objcg_event(entry->objcg, ZSWPIN);
1680
1681         zswap_entry_free(entry);
1682
1683         folio_mark_dirty(folio);
1684
1685         return true;
1686 }
1687
1688 void zswap_invalidate(swp_entry_t swp)
1689 {
1690         pgoff_t offset = swp_offset(swp);
1691         struct zswap_tree *tree = swap_zswap_tree(swp);
1692         struct zswap_entry *entry;
1693
1694         spin_lock(&tree->lock);
1695         entry = zswap_rb_search(&tree->rbroot, offset);
1696         if (entry)
1697                 zswap_invalidate_entry(tree, entry);
1698         spin_unlock(&tree->lock);
1699 }
1700
1701 int zswap_swapon(int type, unsigned long nr_pages)
1702 {
1703         struct zswap_tree *trees, *tree;
1704         unsigned int nr, i;
1705
1706         nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES);
1707         trees = kvcalloc(nr, sizeof(*tree), GFP_KERNEL);
1708         if (!trees) {
1709                 pr_err("alloc failed, zswap disabled for swap type %d\n", type);
1710                 return -ENOMEM;
1711         }
1712
1713         for (i = 0; i < nr; i++) {
1714                 tree = trees + i;
1715                 tree->rbroot = RB_ROOT;
1716                 spin_lock_init(&tree->lock);
1717         }
1718
1719         nr_zswap_trees[type] = nr;
1720         zswap_trees[type] = trees;
1721         return 0;
1722 }
1723
1724 void zswap_swapoff(int type)
1725 {
1726         struct zswap_tree *trees = zswap_trees[type];
1727         unsigned int i;
1728
1729         if (!trees)
1730                 return;
1731
1732         /* try_to_unuse() invalidated all the entries already */
1733         for (i = 0; i < nr_zswap_trees[type]; i++)
1734                 WARN_ON_ONCE(!RB_EMPTY_ROOT(&trees[i].rbroot));
1735
1736         kvfree(trees);
1737         nr_zswap_trees[type] = 0;
1738         zswap_trees[type] = NULL;
1739 }
1740
1741 /*********************************
1742 * debugfs functions
1743 **********************************/
1744 #ifdef CONFIG_DEBUG_FS
1745 #include <linux/debugfs.h>
1746
1747 static struct dentry *zswap_debugfs_root;
1748
1749 static int zswap_debugfs_init(void)
1750 {
1751         if (!debugfs_initialized())
1752                 return -ENODEV;
1753
1754         zswap_debugfs_root = debugfs_create_dir("zswap", NULL);
1755
1756         debugfs_create_u64("pool_limit_hit", 0444,
1757                            zswap_debugfs_root, &zswap_pool_limit_hit);
1758         debugfs_create_u64("reject_reclaim_fail", 0444,
1759                            zswap_debugfs_root, &zswap_reject_reclaim_fail);
1760         debugfs_create_u64("reject_alloc_fail", 0444,
1761                            zswap_debugfs_root, &zswap_reject_alloc_fail);
1762         debugfs_create_u64("reject_kmemcache_fail", 0444,
1763                            zswap_debugfs_root, &zswap_reject_kmemcache_fail);
1764         debugfs_create_u64("reject_compress_fail", 0444,
1765                            zswap_debugfs_root, &zswap_reject_compress_fail);
1766         debugfs_create_u64("reject_compress_poor", 0444,
1767                            zswap_debugfs_root, &zswap_reject_compress_poor);
1768         debugfs_create_u64("written_back_pages", 0444,
1769                            zswap_debugfs_root, &zswap_written_back_pages);
1770         debugfs_create_u64("pool_total_size", 0444,
1771                            zswap_debugfs_root, &zswap_pool_total_size);
1772         debugfs_create_atomic_t("stored_pages", 0444,
1773                                 zswap_debugfs_root, &zswap_stored_pages);
1774         debugfs_create_atomic_t("same_filled_pages", 0444,
1775                                 zswap_debugfs_root, &zswap_same_filled_pages);
1776
1777         return 0;
1778 }
1779 #else
1780 static int zswap_debugfs_init(void)
1781 {
1782         return 0;
1783 }
1784 #endif
1785
1786 /*********************************
1787 * module init and exit
1788 **********************************/
1789 static int zswap_setup(void)
1790 {
1791         struct zswap_pool *pool;
1792         int ret;
1793
1794         zswap_entry_cache = KMEM_CACHE(zswap_entry, 0);
1795         if (!zswap_entry_cache) {
1796                 pr_err("entry cache creation failed\n");
1797                 goto cache_fail;
1798         }
1799
1800         ret = cpuhp_setup_state_multi(CPUHP_MM_ZSWP_POOL_PREPARE,
1801                                       "mm/zswap_pool:prepare",
1802                                       zswap_cpu_comp_prepare,
1803                                       zswap_cpu_comp_dead);
1804         if (ret)
1805                 goto hp_fail;
1806
1807         pool = __zswap_pool_create_fallback();
1808         if (pool) {
1809                 pr_info("loaded using pool %s/%s\n", pool->tfm_name,
1810                         zpool_get_type(pool->zpools[0]));
1811                 list_add(&pool->list, &zswap_pools);
1812                 zswap_has_pool = true;
1813         } else {
1814                 pr_err("pool creation failed\n");
1815                 zswap_enabled = false;
1816         }
1817
1818         shrink_wq = alloc_workqueue("zswap-shrink",
1819                         WQ_UNBOUND|WQ_MEM_RECLAIM, 1);
1820         if (!shrink_wq)
1821                 goto fallback_fail;
1822
1823         if (zswap_debugfs_init())
1824                 pr_warn("debugfs initialization failed\n");
1825         zswap_init_state = ZSWAP_INIT_SUCCEED;
1826         return 0;
1827
1828 fallback_fail:
1829         if (pool)
1830                 zswap_pool_destroy(pool);
1831 hp_fail:
1832         kmem_cache_destroy(zswap_entry_cache);
1833 cache_fail:
1834         /* if built-in, we aren't unloaded on failure; don't allow use */
1835         zswap_init_state = ZSWAP_INIT_FAILED;
1836         zswap_enabled = false;
1837         return -ENOMEM;
1838 }
1839
1840 static int __init zswap_init(void)
1841 {
1842         if (!zswap_enabled)
1843                 return 0;
1844         return zswap_setup();
1845 }
1846 /* must be late so crypto has time to come up */
1847 late_initcall(zswap_init);
1848
1849 MODULE_AUTHOR("Seth Jennings <[email protected]>");
1850 MODULE_DESCRIPTION("Compressed cache for swap pages");
This page took 0.18319 seconds and 4 git commands to generate.