]> Git Repo - linux.git/blame - mm/zswap.c
mm: zswap: further cleanup zswap_store()
[linux.git] / mm / zswap.c
CommitLineData
c942fddf 1// SPDX-License-Identifier: GPL-2.0-or-later
2b281117
SJ
2/*
3 * zswap.c - zswap driver file
4 *
42c06a0e 5 * zswap is a cache that takes pages that are in the process
2b281117
SJ
6 * of being swapped out and attempts to compress and store them in a
7 * RAM-based memory pool. This can result in a significant I/O reduction on
8 * the swap device and, in the case where decompressing from RAM is faster
9 * than reading from the swap device, can also improve workload performance.
10 *
11 * Copyright (C) 2012 Seth Jennings <[email protected]>
2b281117
SJ
12*/
13
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
16#include <linux/module.h>
17#include <linux/cpu.h>
18#include <linux/highmem.h>
19#include <linux/slab.h>
20#include <linux/spinlock.h>
21#include <linux/types.h>
22#include <linux/atomic.h>
2b281117
SJ
23#include <linux/rbtree.h>
24#include <linux/swap.h>
25#include <linux/crypto.h>
1ec3b5fe 26#include <linux/scatterlist.h>
ddc1a5cb 27#include <linux/mempolicy.h>
2b281117 28#include <linux/mempool.h>
12d79d64 29#include <linux/zpool.h>
1ec3b5fe 30#include <crypto/acompress.h>
42c06a0e 31#include <linux/zswap.h>
2b281117
SJ
32#include <linux/mm_types.h>
33#include <linux/page-flags.h>
34#include <linux/swapops.h>
35#include <linux/writeback.h>
36#include <linux/pagemap.h>
45190f01 37#include <linux/workqueue.h>
a65b0e76 38#include <linux/list_lru.h>
2b281117 39
014bb1de 40#include "swap.h"
e0228d59 41#include "internal.h"
014bb1de 42
2b281117
SJ
43/*********************************
44* statistics
45**********************************/
12d79d64 46/* Total bytes used by the compressed storage */
f6498b77 47u64 zswap_pool_total_size;
2b281117 48/* The number of compressed pages currently stored in zswap */
f6498b77 49atomic_t zswap_stored_pages = ATOMIC_INIT(0);
a85f878b
SD
50/* The number of same-value filled pages currently stored in zswap */
51static atomic_t zswap_same_filled_pages = ATOMIC_INIT(0);
2b281117
SJ
52
53/*
54 * The statistics below are not protected from concurrent access for
55 * performance reasons so they may not be a 100% accurate. However,
56 * they do provide useful information on roughly how many times a
57 * certain event is occurring.
58*/
59
60/* Pool limit was hit (see zswap_max_pool_percent) */
61static u64 zswap_pool_limit_hit;
62/* Pages written back when pool limit was reached */
63static u64 zswap_written_back_pages;
64/* Store failed due to a reclaim failure after pool limit was reached */
65static u64 zswap_reject_reclaim_fail;
cb61dad8
NP
66/* Store failed due to compression algorithm failure */
67static u64 zswap_reject_compress_fail;
2b281117
SJ
68/* Compressed page was too big for the allocator to (optimally) store */
69static u64 zswap_reject_compress_poor;
70/* Store failed because underlying allocator could not get memory */
71static u64 zswap_reject_alloc_fail;
72/* Store failed because the entry metadata could not be allocated (rare) */
73static u64 zswap_reject_kmemcache_fail;
74/* Duplicate store was encountered (rare) */
75static u64 zswap_duplicate_entry;
76
45190f01
VW
77/* Shrinker work queue */
78static struct workqueue_struct *shrink_wq;
79/* Pool limit was hit, we need to calm down */
80static bool zswap_pool_reached_full;
81
2b281117
SJ
82/*********************************
83* tunables
84**********************************/
c00ed16a 85
bae21db8
DS
86#define ZSWAP_PARAM_UNSET ""
87
141fdeec
LS
88static int zswap_setup(void);
89
bb8b93b5
MS
90/* Enable/disable zswap */
91static bool zswap_enabled = IS_ENABLED(CONFIG_ZSWAP_DEFAULT_ON);
d7b028f5
DS
92static int zswap_enabled_param_set(const char *,
93 const struct kernel_param *);
83aed6cd 94static const struct kernel_param_ops zswap_enabled_param_ops = {
d7b028f5
DS
95 .set = zswap_enabled_param_set,
96 .get = param_get_bool,
97};
98module_param_cb(enabled, &zswap_enabled_param_ops, &zswap_enabled, 0644);
2b281117 99
90b0fc26 100/* Crypto compressor to use */
bb8b93b5 101static char *zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT;
90b0fc26
DS
102static int zswap_compressor_param_set(const char *,
103 const struct kernel_param *);
83aed6cd 104static const struct kernel_param_ops zswap_compressor_param_ops = {
90b0fc26 105 .set = zswap_compressor_param_set,
c99b42c3
DS
106 .get = param_get_charp,
107 .free = param_free_charp,
90b0fc26
DS
108};
109module_param_cb(compressor, &zswap_compressor_param_ops,
c99b42c3 110 &zswap_compressor, 0644);
2b281117 111
90b0fc26 112/* Compressed storage zpool to use */
bb8b93b5 113static char *zswap_zpool_type = CONFIG_ZSWAP_ZPOOL_DEFAULT;
90b0fc26 114static int zswap_zpool_param_set(const char *, const struct kernel_param *);
83aed6cd 115static const struct kernel_param_ops zswap_zpool_param_ops = {
c99b42c3
DS
116 .set = zswap_zpool_param_set,
117 .get = param_get_charp,
118 .free = param_free_charp,
90b0fc26 119};
c99b42c3 120module_param_cb(zpool, &zswap_zpool_param_ops, &zswap_zpool_type, 0644);
12d79d64 121
90b0fc26
DS
122/* The maximum percentage of memory that the compressed pool can occupy */
123static unsigned int zswap_max_pool_percent = 20;
124module_param_named(max_pool_percent, zswap_max_pool_percent, uint, 0644);
60105e12 125
45190f01
VW
126/* The threshold for accepting new pages after the max_pool_percent was hit */
127static unsigned int zswap_accept_thr_percent = 90; /* of max pool size */
128module_param_named(accept_threshold_percent, zswap_accept_thr_percent,
129 uint, 0644);
130
cb325ddd
MS
131/*
132 * Enable/disable handling same-value filled pages (enabled by default).
133 * If disabled every page is considered non-same-value filled.
134 */
a85f878b
SD
135static bool zswap_same_filled_pages_enabled = true;
136module_param_named(same_filled_pages_enabled, zswap_same_filled_pages_enabled,
137 bool, 0644);
138
cb325ddd
MS
139/* Enable/disable handling non-same-value filled pages (enabled by default) */
140static bool zswap_non_same_filled_pages_enabled = true;
141module_param_named(non_same_filled_pages_enabled, zswap_non_same_filled_pages_enabled,
142 bool, 0644);
143
b9c91c43
YA
144static bool zswap_exclusive_loads_enabled = IS_ENABLED(
145 CONFIG_ZSWAP_EXCLUSIVE_LOADS_DEFAULT_ON);
146module_param_named(exclusive_loads, zswap_exclusive_loads_enabled, bool, 0644);
147
b8cf32dc
YA
148/* Number of zpools in zswap_pool (empirically determined for scalability) */
149#define ZSWAP_NR_ZPOOLS 32
150
b5ba474f
NP
151/* Enable/disable memory pressure-based shrinker. */
152static bool zswap_shrinker_enabled = IS_ENABLED(
153 CONFIG_ZSWAP_SHRINKER_DEFAULT_ON);
154module_param_named(shrinker_enabled, zswap_shrinker_enabled, bool, 0644);
155
501a06fe
NP
156bool is_zswap_enabled(void)
157{
158 return zswap_enabled;
159}
160
2b281117 161/*********************************
f1c54846 162* data structures
2b281117 163**********************************/
2b281117 164
1ec3b5fe
BS
165struct crypto_acomp_ctx {
166 struct crypto_acomp *acomp;
167 struct acomp_req *req;
168 struct crypto_wait wait;
8ba2f844
CZ
169 u8 *buffer;
170 struct mutex mutex;
1ec3b5fe
BS
171};
172
f999f38b
DC
173/*
174 * The lock ordering is zswap_tree.lock -> zswap_pool.lru_lock.
175 * The only case where lru_lock is not acquired while holding tree.lock is
176 * when a zswap_entry is taken off the lru for writeback, in that case it
177 * needs to be verified that it's still valid in the tree.
178 */
f1c54846 179struct zswap_pool {
b8cf32dc 180 struct zpool *zpools[ZSWAP_NR_ZPOOLS];
1ec3b5fe 181 struct crypto_acomp_ctx __percpu *acomp_ctx;
f1c54846
DS
182 struct kref kref;
183 struct list_head list;
45190f01
VW
184 struct work_struct release_work;
185 struct work_struct shrink_work;
cab7a7e5 186 struct hlist_node node;
f1c54846 187 char tfm_name[CRYPTO_MAX_ALG_NAME];
a65b0e76
DC
188 struct list_lru list_lru;
189 struct mem_cgroup *next_shrink;
b5ba474f
NP
190 struct shrinker *shrinker;
191 atomic_t nr_stored;
2b281117
SJ
192};
193
2b281117
SJ
194/*
195 * struct zswap_entry
196 *
197 * This structure contains the metadata for tracking a single compressed
198 * page within zswap.
199 *
200 * rbnode - links the entry into red-black tree for the appropriate swap type
97157d89 201 * swpentry - associated swap entry, the offset indexes into the red-black tree
2b281117
SJ
202 * refcount - the number of outstanding reference to the entry. This is needed
203 * to protect against premature freeing of the entry by code
6b452516 204 * concurrent calls to load, invalidate, and writeback. The lock
2b281117
SJ
205 * for the zswap_tree structure that contains the entry must
206 * be held while changing the refcount. Since the lock must
207 * be held, there is no reason to also make refcount atomic.
2b281117 208 * length - the length in bytes of the compressed page data. Needed during
f999f38b
DC
209 * decompression. For a same value filled page length is 0, and both
210 * pool and lru are invalid and must be ignored.
f1c54846
DS
211 * pool - the zswap_pool the entry's data is in
212 * handle - zpool allocation handle that stores the compressed page data
a85f878b 213 * value - value of the same-value filled pages which have same content
97157d89 214 * objcg - the obj_cgroup that the compressed memory is charged to
f999f38b 215 * lru - handle to the pool's lru used to evict pages.
2b281117
SJ
216 */
217struct zswap_entry {
218 struct rb_node rbnode;
0bb48849 219 swp_entry_t swpentry;
2b281117
SJ
220 int refcount;
221 unsigned int length;
f1c54846 222 struct zswap_pool *pool;
a85f878b
SD
223 union {
224 unsigned long handle;
225 unsigned long value;
226 };
f4840ccf 227 struct obj_cgroup *objcg;
f999f38b 228 struct list_head lru;
2b281117
SJ
229};
230
2b281117
SJ
231/*
232 * The tree lock in the zswap_tree struct protects a few things:
233 * - the rbtree
234 * - the refcount field of each entry in the tree
235 */
236struct zswap_tree {
237 struct rb_root rbroot;
238 spinlock_t lock;
2b281117
SJ
239};
240
241static struct zswap_tree *zswap_trees[MAX_SWAPFILES];
44c7c734 242static unsigned int nr_zswap_trees[MAX_SWAPFILES];
2b281117 243
f1c54846
DS
244/* RCU-protected iteration */
245static LIST_HEAD(zswap_pools);
246/* protects zswap_pools list modification */
247static DEFINE_SPINLOCK(zswap_pools_lock);
32a4e169
DS
248/* pool counter to provide unique names to zpool */
249static atomic_t zswap_pools_count = ATOMIC_INIT(0);
f1c54846 250
9021ccec
LS
251enum zswap_init_type {
252 ZSWAP_UNINIT,
253 ZSWAP_INIT_SUCCEED,
254 ZSWAP_INIT_FAILED
255};
90b0fc26 256
9021ccec 257static enum zswap_init_type zswap_init_state;
90b0fc26 258
141fdeec
LS
259/* used to ensure the integrity of initialization */
260static DEFINE_MUTEX(zswap_init_lock);
d7b028f5 261
ae3d89a7
DS
262/* init completed, but couldn't create the initial pool */
263static bool zswap_has_pool;
264
f1c54846
DS
265/*********************************
266* helpers and fwd declarations
267**********************************/
268
44c7c734
CZ
269static inline struct zswap_tree *swap_zswap_tree(swp_entry_t swp)
270{
271 return &zswap_trees[swp_type(swp)][swp_offset(swp)
272 >> SWAP_ADDRESS_SPACE_SHIFT];
273}
274
f1c54846
DS
275#define zswap_pool_debug(msg, p) \
276 pr_debug("%s pool %s/%s\n", msg, (p)->tfm_name, \
b8cf32dc 277 zpool_get_type((p)->zpools[0]))
f1c54846 278
0bb48849 279static int zswap_writeback_entry(struct zswap_entry *entry,
5878303c 280 swp_entry_t swpentry);
f1c54846
DS
281static int zswap_pool_get(struct zswap_pool *pool);
282static void zswap_pool_put(struct zswap_pool *pool);
283
f1c54846
DS
284static bool zswap_is_full(void)
285{
ca79b0c2
AK
286 return totalram_pages() * zswap_max_pool_percent / 100 <
287 DIV_ROUND_UP(zswap_pool_total_size, PAGE_SIZE);
f1c54846
DS
288}
289
45190f01
VW
290static bool zswap_can_accept(void)
291{
292 return totalram_pages() * zswap_accept_thr_percent / 100 *
293 zswap_max_pool_percent / 100 >
294 DIV_ROUND_UP(zswap_pool_total_size, PAGE_SIZE);
295}
296
b5ba474f
NP
297static u64 get_zswap_pool_size(struct zswap_pool *pool)
298{
299 u64 pool_size = 0;
300 int i;
301
302 for (i = 0; i < ZSWAP_NR_ZPOOLS; i++)
303 pool_size += zpool_get_total_size(pool->zpools[i]);
304
305 return pool_size;
306}
307
f1c54846
DS
308static void zswap_update_total_size(void)
309{
310 struct zswap_pool *pool;
311 u64 total = 0;
312
313 rcu_read_lock();
314
315 list_for_each_entry_rcu(pool, &zswap_pools, list)
b5ba474f 316 total += get_zswap_pool_size(pool);
f1c54846
DS
317
318 rcu_read_unlock();
319
320 zswap_pool_total_size = total;
321}
322
a65b0e76
DC
323/* should be called under RCU */
324#ifdef CONFIG_MEMCG
325static inline struct mem_cgroup *mem_cgroup_from_entry(struct zswap_entry *entry)
326{
327 return entry->objcg ? obj_cgroup_memcg(entry->objcg) : NULL;
328}
329#else
330static inline struct mem_cgroup *mem_cgroup_from_entry(struct zswap_entry *entry)
331{
332 return NULL;
333}
334#endif
335
336static inline int entry_to_nid(struct zswap_entry *entry)
337{
338 return page_to_nid(virt_to_page(entry));
339}
340
341void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg)
342{
343 struct zswap_pool *pool;
344
345 /* lock out zswap pools list modification */
346 spin_lock(&zswap_pools_lock);
347 list_for_each_entry(pool, &zswap_pools, list) {
348 if (pool->next_shrink == memcg)
349 pool->next_shrink = mem_cgroup_iter(NULL, pool->next_shrink, NULL);
350 }
351 spin_unlock(&zswap_pools_lock);
352}
353
2b281117
SJ
354/*********************************
355* zswap entry functions
356**********************************/
357static struct kmem_cache *zswap_entry_cache;
358
a65b0e76 359static struct zswap_entry *zswap_entry_cache_alloc(gfp_t gfp, int nid)
2b281117
SJ
360{
361 struct zswap_entry *entry;
a65b0e76 362 entry = kmem_cache_alloc_node(zswap_entry_cache, gfp, nid);
2b281117
SJ
363 if (!entry)
364 return NULL;
365 entry->refcount = 1;
0ab0abcf 366 RB_CLEAR_NODE(&entry->rbnode);
2b281117
SJ
367 return entry;
368}
369
370static void zswap_entry_cache_free(struct zswap_entry *entry)
371{
372 kmem_cache_free(zswap_entry_cache, entry);
373}
374
b5ba474f
NP
375/*********************************
376* zswap lruvec functions
377**********************************/
378void zswap_lruvec_state_init(struct lruvec *lruvec)
379{
380 atomic_long_set(&lruvec->zswap_lruvec_state.nr_zswap_protected, 0);
381}
382
96c7b0b4 383void zswap_folio_swapin(struct folio *folio)
b5ba474f
NP
384{
385 struct lruvec *lruvec;
386
16e96ba5
NP
387 VM_WARN_ON_ONCE(!folio_test_locked(folio));
388 lruvec = folio_lruvec(folio);
389 atomic_long_inc(&lruvec->zswap_lruvec_state.nr_zswap_protected);
b5ba474f
NP
390}
391
a65b0e76
DC
392/*********************************
393* lru functions
394**********************************/
395static void zswap_lru_add(struct list_lru *list_lru, struct zswap_entry *entry)
396{
b5ba474f
NP
397 atomic_long_t *nr_zswap_protected;
398 unsigned long lru_size, old, new;
a65b0e76
DC
399 int nid = entry_to_nid(entry);
400 struct mem_cgroup *memcg;
b5ba474f 401 struct lruvec *lruvec;
a65b0e76
DC
402
403 /*
404 * Note that it is safe to use rcu_read_lock() here, even in the face of
405 * concurrent memcg offlining. Thanks to the memcg->kmemcg_id indirection
406 * used in list_lru lookup, only two scenarios are possible:
407 *
408 * 1. list_lru_add() is called before memcg->kmemcg_id is updated. The
409 * new entry will be reparented to memcg's parent's list_lru.
410 * 2. list_lru_add() is called after memcg->kmemcg_id is updated. The
411 * new entry will be added directly to memcg's parent's list_lru.
412 *
3f798aa6 413 * Similar reasoning holds for list_lru_del().
a65b0e76
DC
414 */
415 rcu_read_lock();
416 memcg = mem_cgroup_from_entry(entry);
417 /* will always succeed */
418 list_lru_add(list_lru, &entry->lru, nid, memcg);
b5ba474f
NP
419
420 /* Update the protection area */
421 lru_size = list_lru_count_one(list_lru, nid, memcg);
422 lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
423 nr_zswap_protected = &lruvec->zswap_lruvec_state.nr_zswap_protected;
424 old = atomic_long_inc_return(nr_zswap_protected);
425 /*
426 * Decay to avoid overflow and adapt to changing workloads.
427 * This is based on LRU reclaim cost decaying heuristics.
428 */
429 do {
430 new = old > lru_size / 4 ? old / 2 : old;
431 } while (!atomic_long_try_cmpxchg(nr_zswap_protected, &old, new));
a65b0e76
DC
432 rcu_read_unlock();
433}
434
435static void zswap_lru_del(struct list_lru *list_lru, struct zswap_entry *entry)
436{
437 int nid = entry_to_nid(entry);
438 struct mem_cgroup *memcg;
439
440 rcu_read_lock();
441 memcg = mem_cgroup_from_entry(entry);
442 /* will always succeed */
443 list_lru_del(list_lru, &entry->lru, nid, memcg);
444 rcu_read_unlock();
445}
446
2b281117
SJ
447/*********************************
448* rbtree functions
449**********************************/
450static struct zswap_entry *zswap_rb_search(struct rb_root *root, pgoff_t offset)
451{
452 struct rb_node *node = root->rb_node;
453 struct zswap_entry *entry;
0bb48849 454 pgoff_t entry_offset;
2b281117
SJ
455
456 while (node) {
457 entry = rb_entry(node, struct zswap_entry, rbnode);
0bb48849
DC
458 entry_offset = swp_offset(entry->swpentry);
459 if (entry_offset > offset)
2b281117 460 node = node->rb_left;
0bb48849 461 else if (entry_offset < offset)
2b281117
SJ
462 node = node->rb_right;
463 else
464 return entry;
465 }
466 return NULL;
467}
468
469/*
470 * In the case that a entry with the same offset is found, a pointer to
471 * the existing entry is stored in dupentry and the function returns -EEXIST
472 */
473static int zswap_rb_insert(struct rb_root *root, struct zswap_entry *entry,
474 struct zswap_entry **dupentry)
475{
476 struct rb_node **link = &root->rb_node, *parent = NULL;
477 struct zswap_entry *myentry;
0bb48849 478 pgoff_t myentry_offset, entry_offset = swp_offset(entry->swpentry);
2b281117
SJ
479
480 while (*link) {
481 parent = *link;
482 myentry = rb_entry(parent, struct zswap_entry, rbnode);
0bb48849
DC
483 myentry_offset = swp_offset(myentry->swpentry);
484 if (myentry_offset > entry_offset)
2b281117 485 link = &(*link)->rb_left;
0bb48849 486 else if (myentry_offset < entry_offset)
2b281117
SJ
487 link = &(*link)->rb_right;
488 else {
489 *dupentry = myentry;
490 return -EEXIST;
491 }
492 }
493 rb_link_node(&entry->rbnode, parent, link);
494 rb_insert_color(&entry->rbnode, root);
495 return 0;
496}
497
18a93707 498static bool zswap_rb_erase(struct rb_root *root, struct zswap_entry *entry)
0ab0abcf
WY
499{
500 if (!RB_EMPTY_NODE(&entry->rbnode)) {
501 rb_erase(&entry->rbnode, root);
502 RB_CLEAR_NODE(&entry->rbnode);
18a93707 503 return true;
0ab0abcf 504 }
18a93707 505 return false;
0ab0abcf
WY
506}
507
b8cf32dc
YA
508static struct zpool *zswap_find_zpool(struct zswap_entry *entry)
509{
510 int i = 0;
511
512 if (ZSWAP_NR_ZPOOLS > 1)
513 i = hash_ptr(entry, ilog2(ZSWAP_NR_ZPOOLS));
514
515 return entry->pool->zpools[i];
516}
517
0ab0abcf 518/*
12d79d64 519 * Carries out the common pattern of freeing and entry's zpool allocation,
0ab0abcf
WY
520 * freeing the entry itself, and decrementing the number of stored pages.
521 */
42398be2 522static void zswap_entry_free(struct zswap_entry *entry)
0ab0abcf 523{
a85f878b
SD
524 if (!entry->length)
525 atomic_dec(&zswap_same_filled_pages);
526 else {
a65b0e76 527 zswap_lru_del(&entry->pool->list_lru, entry);
b8cf32dc 528 zpool_free(zswap_find_zpool(entry), entry->handle);
b5ba474f 529 atomic_dec(&entry->pool->nr_stored);
a85f878b
SD
530 zswap_pool_put(entry->pool);
531 }
2e601e1e
JW
532 if (entry->objcg) {
533 obj_cgroup_uncharge_zswap(entry->objcg, entry->length);
534 obj_cgroup_put(entry->objcg);
535 }
0ab0abcf
WY
536 zswap_entry_cache_free(entry);
537 atomic_dec(&zswap_stored_pages);
f1c54846 538 zswap_update_total_size();
0ab0abcf
WY
539}
540
541/* caller must hold the tree lock */
542static void zswap_entry_get(struct zswap_entry *entry)
543{
e477559c 544 WARN_ON_ONCE(!entry->refcount);
0ab0abcf
WY
545 entry->refcount++;
546}
547
dab7711f 548/* caller must hold the tree lock */
db128f5f 549static void zswap_entry_put(struct zswap_entry *entry)
0ab0abcf 550{
dab7711f
JW
551 WARN_ON_ONCE(!entry->refcount);
552 if (--entry->refcount == 0) {
73108957 553 WARN_ON_ONCE(!RB_EMPTY_NODE(&entry->rbnode));
42398be2 554 zswap_entry_free(entry);
0ab0abcf
WY
555 }
556}
557
7dd1f7f0
JW
558/*
559 * If the entry is still valid in the tree, drop the initial ref and remove it
560 * from the tree. This function must be called with an additional ref held,
561 * otherwise it may race with another invalidation freeing the entry.
562 */
563static void zswap_invalidate_entry(struct zswap_tree *tree,
564 struct zswap_entry *entry)
565{
566 if (zswap_rb_erase(&tree->rbroot, entry))
567 zswap_entry_put(entry);
568}
569
b5ba474f
NP
570/*********************************
571* shrinker functions
572**********************************/
573static enum lru_status shrink_memcg_cb(struct list_head *item, struct list_lru_one *l,
574 spinlock_t *lock, void *arg);
575
576static unsigned long zswap_shrinker_scan(struct shrinker *shrinker,
577 struct shrink_control *sc)
578{
579 struct lruvec *lruvec = mem_cgroup_lruvec(sc->memcg, NODE_DATA(sc->nid));
580 unsigned long shrink_ret, nr_protected, lru_size;
581 struct zswap_pool *pool = shrinker->private_data;
582 bool encountered_page_in_swapcache = false;
583
501a06fe
NP
584 if (!zswap_shrinker_enabled ||
585 !mem_cgroup_zswap_writeback_enabled(sc->memcg)) {
b5ba474f
NP
586 sc->nr_scanned = 0;
587 return SHRINK_STOP;
588 }
589
590 nr_protected =
591 atomic_long_read(&lruvec->zswap_lruvec_state.nr_zswap_protected);
592 lru_size = list_lru_shrink_count(&pool->list_lru, sc);
593
594 /*
595 * Abort if we are shrinking into the protected region.
596 *
597 * This short-circuiting is necessary because if we have too many multiple
598 * concurrent reclaimers getting the freeable zswap object counts at the
599 * same time (before any of them made reasonable progress), the total
600 * number of reclaimed objects might be more than the number of unprotected
601 * objects (i.e the reclaimers will reclaim into the protected area of the
602 * zswap LRU).
603 */
604 if (nr_protected >= lru_size - sc->nr_to_scan) {
605 sc->nr_scanned = 0;
606 return SHRINK_STOP;
607 }
608
609 shrink_ret = list_lru_shrink_walk(&pool->list_lru, sc, &shrink_memcg_cb,
610 &encountered_page_in_swapcache);
611
612 if (encountered_page_in_swapcache)
613 return SHRINK_STOP;
614
615 return shrink_ret ? shrink_ret : SHRINK_STOP;
616}
617
618static unsigned long zswap_shrinker_count(struct shrinker *shrinker,
619 struct shrink_control *sc)
620{
621 struct zswap_pool *pool = shrinker->private_data;
622 struct mem_cgroup *memcg = sc->memcg;
623 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(sc->nid));
624 unsigned long nr_backing, nr_stored, nr_freeable, nr_protected;
625
501a06fe 626 if (!zswap_shrinker_enabled || !mem_cgroup_zswap_writeback_enabled(memcg))
b5ba474f
NP
627 return 0;
628
629#ifdef CONFIG_MEMCG_KMEM
7d7ef0a4 630 mem_cgroup_flush_stats(memcg);
b5ba474f
NP
631 nr_backing = memcg_page_state(memcg, MEMCG_ZSWAP_B) >> PAGE_SHIFT;
632 nr_stored = memcg_page_state(memcg, MEMCG_ZSWAPPED);
633#else
634 /* use pool stats instead of memcg stats */
635 nr_backing = get_zswap_pool_size(pool) >> PAGE_SHIFT;
636 nr_stored = atomic_read(&pool->nr_stored);
637#endif
638
639 if (!nr_stored)
640 return 0;
641
642 nr_protected =
643 atomic_long_read(&lruvec->zswap_lruvec_state.nr_zswap_protected);
644 nr_freeable = list_lru_shrink_count(&pool->list_lru, sc);
645 /*
646 * Subtract the lru size by an estimate of the number of pages
647 * that should be protected.
648 */
649 nr_freeable = nr_freeable > nr_protected ? nr_freeable - nr_protected : 0;
650
651 /*
652 * Scale the number of freeable pages by the memory saving factor.
653 * This ensures that the better zswap compresses memory, the fewer
654 * pages we will evict to swap (as it will otherwise incur IO for
655 * relatively small memory saving).
656 */
657 return mult_frac(nr_freeable, nr_backing, nr_stored);
658}
659
660static void zswap_alloc_shrinker(struct zswap_pool *pool)
661{
662 pool->shrinker =
663 shrinker_alloc(SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE, "mm-zswap");
664 if (!pool->shrinker)
665 return;
666
667 pool->shrinker->private_data = pool;
668 pool->shrinker->scan_objects = zswap_shrinker_scan;
669 pool->shrinker->count_objects = zswap_shrinker_count;
670 pool->shrinker->batch = 0;
671 pool->shrinker->seeks = DEFAULT_SEEKS;
672}
673
2b281117
SJ
674/*********************************
675* per-cpu code
676**********************************/
cab7a7e5 677static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node)
f1c54846 678{
cab7a7e5 679 struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
1ec3b5fe
BS
680 struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
681 struct crypto_acomp *acomp;
682 struct acomp_req *req;
8ba2f844
CZ
683 int ret;
684
685 mutex_init(&acomp_ctx->mutex);
686
687 acomp_ctx->buffer = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu));
688 if (!acomp_ctx->buffer)
689 return -ENOMEM;
1ec3b5fe
BS
690
691 acomp = crypto_alloc_acomp_node(pool->tfm_name, 0, 0, cpu_to_node(cpu));
692 if (IS_ERR(acomp)) {
693 pr_err("could not alloc crypto acomp %s : %ld\n",
694 pool->tfm_name, PTR_ERR(acomp));
8ba2f844
CZ
695 ret = PTR_ERR(acomp);
696 goto acomp_fail;
1ec3b5fe
BS
697 }
698 acomp_ctx->acomp = acomp;
f1c54846 699
1ec3b5fe
BS
700 req = acomp_request_alloc(acomp_ctx->acomp);
701 if (!req) {
702 pr_err("could not alloc crypto acomp_request %s\n",
703 pool->tfm_name);
8ba2f844
CZ
704 ret = -ENOMEM;
705 goto req_fail;
cab7a7e5 706 }
1ec3b5fe
BS
707 acomp_ctx->req = req;
708
709 crypto_init_wait(&acomp_ctx->wait);
710 /*
711 * if the backend of acomp is async zip, crypto_req_done() will wakeup
712 * crypto_wait_req(); if the backend of acomp is scomp, the callback
713 * won't be called, crypto_wait_req() will return without blocking.
714 */
715 acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
716 crypto_req_done, &acomp_ctx->wait);
717
2b281117 718 return 0;
8ba2f844
CZ
719
720req_fail:
721 crypto_free_acomp(acomp_ctx->acomp);
722acomp_fail:
723 kfree(acomp_ctx->buffer);
724 return ret;
2b281117
SJ
725}
726
cab7a7e5 727static int zswap_cpu_comp_dead(unsigned int cpu, struct hlist_node *node)
f1c54846 728{
cab7a7e5 729 struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
1ec3b5fe
BS
730 struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
731
732 if (!IS_ERR_OR_NULL(acomp_ctx)) {
733 if (!IS_ERR_OR_NULL(acomp_ctx->req))
734 acomp_request_free(acomp_ctx->req);
735 if (!IS_ERR_OR_NULL(acomp_ctx->acomp))
736 crypto_free_acomp(acomp_ctx->acomp);
8ba2f844 737 kfree(acomp_ctx->buffer);
1ec3b5fe 738 }
f1c54846 739
cab7a7e5 740 return 0;
f1c54846
DS
741}
742
2b281117 743/*********************************
f1c54846 744* pool functions
2b281117 745**********************************/
f1c54846
DS
746
747static struct zswap_pool *__zswap_pool_current(void)
2b281117 748{
f1c54846
DS
749 struct zswap_pool *pool;
750
751 pool = list_first_or_null_rcu(&zswap_pools, typeof(*pool), list);
ae3d89a7
DS
752 WARN_ONCE(!pool && zswap_has_pool,
753 "%s: no page storage pool!\n", __func__);
f1c54846
DS
754
755 return pool;
756}
757
758static struct zswap_pool *zswap_pool_current(void)
759{
760 assert_spin_locked(&zswap_pools_lock);
761
762 return __zswap_pool_current();
763}
764
765static struct zswap_pool *zswap_pool_current_get(void)
766{
767 struct zswap_pool *pool;
768
769 rcu_read_lock();
770
771 pool = __zswap_pool_current();
ae3d89a7 772 if (!zswap_pool_get(pool))
f1c54846
DS
773 pool = NULL;
774
775 rcu_read_unlock();
776
777 return pool;
778}
779
780static struct zswap_pool *zswap_pool_last_get(void)
781{
782 struct zswap_pool *pool, *last = NULL;
783
784 rcu_read_lock();
785
786 list_for_each_entry_rcu(pool, &zswap_pools, list)
787 last = pool;
ae3d89a7
DS
788 WARN_ONCE(!last && zswap_has_pool,
789 "%s: no page storage pool!\n", __func__);
790 if (!zswap_pool_get(last))
f1c54846
DS
791 last = NULL;
792
793 rcu_read_unlock();
794
795 return last;
796}
797
8bc8b228 798/* type and compressor must be null-terminated */
f1c54846
DS
799static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor)
800{
801 struct zswap_pool *pool;
802
803 assert_spin_locked(&zswap_pools_lock);
804
805 list_for_each_entry_rcu(pool, &zswap_pools, list) {
8bc8b228 806 if (strcmp(pool->tfm_name, compressor))
f1c54846 807 continue;
b8cf32dc
YA
808 /* all zpools share the same type */
809 if (strcmp(zpool_get_type(pool->zpools[0]), type))
f1c54846
DS
810 continue;
811 /* if we can't get it, it's about to be destroyed */
812 if (!zswap_pool_get(pool))
813 continue;
814 return pool;
815 }
816
817 return NULL;
818}
819
a65b0e76
DC
820static enum lru_status shrink_memcg_cb(struct list_head *item, struct list_lru_one *l,
821 spinlock_t *lock, void *arg)
f999f38b 822{
a65b0e76 823 struct zswap_entry *entry = container_of(item, struct zswap_entry, lru);
b5ba474f 824 bool *encountered_page_in_swapcache = (bool *)arg;
5878303c 825 swp_entry_t swpentry;
a65b0e76
DC
826 enum lru_status ret = LRU_REMOVED_RETRY;
827 int writeback_result;
f999f38b 828
5878303c
CZ
829 /*
830 * Rotate the entry to the tail before unlocking the LRU,
831 * so that in case of an invalidation race concurrent
832 * reclaimers don't waste their time on it.
833 *
834 * If writeback succeeds, or failure is due to the entry
835 * being invalidated by the swap subsystem, the invalidation
836 * will unlink and free it.
837 *
838 * Temporary failures, where the same entry should be tried
839 * again immediately, almost never happen for this shrinker.
840 * We don't do any trylocking; -ENOMEM comes closest,
841 * but that's extremely rare and doesn't happen spuriously
842 * either. Don't bother distinguishing this case.
843 *
844 * But since they do exist in theory, the entry cannot just
845 * be unlinked, or we could leak it. Hence, rotate.
846 */
847 list_move_tail(item, &l->list);
848
f999f38b
DC
849 /*
850 * Once the lru lock is dropped, the entry might get freed. The
5878303c 851 * swpentry is copied to the stack, and entry isn't deref'd again
f999f38b
DC
852 * until the entry is verified to still be alive in the tree.
853 */
5878303c
CZ
854 swpentry = entry->swpentry;
855
a65b0e76
DC
856 /*
857 * It's safe to drop the lock here because we return either
858 * LRU_REMOVED_RETRY or LRU_RETRY.
859 */
860 spin_unlock(lock);
f999f38b 861
5878303c 862 writeback_result = zswap_writeback_entry(entry, swpentry);
f999f38b 863
a65b0e76
DC
864 if (writeback_result) {
865 zswap_reject_reclaim_fail++;
a65b0e76 866 ret = LRU_RETRY;
b5ba474f
NP
867
868 /*
869 * Encountering a page already in swap cache is a sign that we are shrinking
870 * into the warmer region. We should terminate shrinking (if we're in the dynamic
871 * shrinker context).
872 */
27d3969b 873 if (writeback_result == -EEXIST && encountered_page_in_swapcache)
b5ba474f 874 *encountered_page_in_swapcache = true;
5878303c
CZ
875 } else {
876 zswap_written_back_pages++;
f999f38b 877 }
7108cc3f 878
a65b0e76
DC
879 spin_lock(lock);
880 return ret;
881}
882
883static int shrink_memcg(struct mem_cgroup *memcg)
884{
885 struct zswap_pool *pool;
886 int nid, shrunk = 0;
887
501a06fe
NP
888 if (!mem_cgroup_zswap_writeback_enabled(memcg))
889 return -EINVAL;
890
a65b0e76
DC
891 /*
892 * Skip zombies because their LRUs are reparented and we would be
893 * reclaiming from the parent instead of the dead memcg.
894 */
895 if (memcg && !mem_cgroup_online(memcg))
896 return -ENOENT;
897
898 pool = zswap_pool_current_get();
899 if (!pool)
900 return -EINVAL;
901
902 for_each_node_state(nid, N_NORMAL_MEMORY) {
903 unsigned long nr_to_walk = 1;
904
905 shrunk += list_lru_walk_one(&pool->list_lru, nid, memcg,
906 &shrink_memcg_cb, NULL, &nr_to_walk);
907 }
908 zswap_pool_put(pool);
909 return shrunk ? 0 : -EAGAIN;
f999f38b
DC
910}
911
45190f01
VW
912static void shrink_worker(struct work_struct *w)
913{
914 struct zswap_pool *pool = container_of(w, typeof(*pool),
915 shrink_work);
a65b0e76 916 struct mem_cgroup *memcg;
e0228d59
DC
917 int ret, failures = 0;
918
a65b0e76 919 /* global reclaim will select cgroup in a round-robin fashion. */
e0228d59 920 do {
a65b0e76
DC
921 spin_lock(&zswap_pools_lock);
922 pool->next_shrink = mem_cgroup_iter(NULL, pool->next_shrink, NULL);
923 memcg = pool->next_shrink;
924
925 /*
926 * We need to retry if we have gone through a full round trip, or if we
927 * got an offline memcg (or else we risk undoing the effect of the
928 * zswap memcg offlining cleanup callback). This is not catastrophic
929 * per se, but it will keep the now offlined memcg hostage for a while.
930 *
931 * Note that if we got an online memcg, we will keep the extra
932 * reference in case the original reference obtained by mem_cgroup_iter
933 * is dropped by the zswap memcg offlining callback, ensuring that the
934 * memcg is not killed when we are reclaiming.
935 */
936 if (!memcg) {
937 spin_unlock(&zswap_pools_lock);
938 if (++failures == MAX_RECLAIM_RETRIES)
e0228d59 939 break;
a65b0e76
DC
940
941 goto resched;
942 }
943
944 if (!mem_cgroup_tryget_online(memcg)) {
945 /* drop the reference from mem_cgroup_iter() */
946 mem_cgroup_iter_break(NULL, memcg);
947 pool->next_shrink = NULL;
948 spin_unlock(&zswap_pools_lock);
949
e0228d59
DC
950 if (++failures == MAX_RECLAIM_RETRIES)
951 break;
a65b0e76
DC
952
953 goto resched;
e0228d59 954 }
a65b0e76
DC
955 spin_unlock(&zswap_pools_lock);
956
957 ret = shrink_memcg(memcg);
958 /* drop the extra reference */
959 mem_cgroup_put(memcg);
960
961 if (ret == -EINVAL)
962 break;
963 if (ret && ++failures == MAX_RECLAIM_RETRIES)
964 break;
965
966resched:
e0228d59
DC
967 cond_resched();
968 } while (!zswap_can_accept());
45190f01
VW
969 zswap_pool_put(pool);
970}
971
f1c54846
DS
972static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
973{
b8cf32dc 974 int i;
f1c54846 975 struct zswap_pool *pool;
32a4e169 976 char name[38]; /* 'zswap' + 32 char (max) num + \0 */
d0164adc 977 gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
cab7a7e5 978 int ret;
f1c54846 979
bae21db8
DS
980 if (!zswap_has_pool) {
981 /* if either are unset, pool initialization failed, and we
982 * need both params to be set correctly before trying to
983 * create a pool.
984 */
985 if (!strcmp(type, ZSWAP_PARAM_UNSET))
986 return NULL;
987 if (!strcmp(compressor, ZSWAP_PARAM_UNSET))
988 return NULL;
989 }
990
f1c54846 991 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
f4ae0ce0 992 if (!pool)
f1c54846 993 return NULL;
f1c54846 994
b8cf32dc
YA
995 for (i = 0; i < ZSWAP_NR_ZPOOLS; i++) {
996 /* unique name for each pool specifically required by zsmalloc */
997 snprintf(name, 38, "zswap%x",
998 atomic_inc_return(&zswap_pools_count));
32a4e169 999
b8cf32dc
YA
1000 pool->zpools[i] = zpool_create_pool(type, name, gfp);
1001 if (!pool->zpools[i]) {
1002 pr_err("%s zpool not available\n", type);
1003 goto error;
1004 }
f1c54846 1005 }
b8cf32dc 1006 pr_debug("using %s zpool\n", zpool_get_type(pool->zpools[0]));
f1c54846 1007
79cd4202 1008 strscpy(pool->tfm_name, compressor, sizeof(pool->tfm_name));
1ec3b5fe
BS
1009
1010 pool->acomp_ctx = alloc_percpu(*pool->acomp_ctx);
1011 if (!pool->acomp_ctx) {
f1c54846
DS
1012 pr_err("percpu alloc failed\n");
1013 goto error;
1014 }
1015
cab7a7e5
SAS
1016 ret = cpuhp_state_add_instance(CPUHP_MM_ZSWP_POOL_PREPARE,
1017 &pool->node);
1018 if (ret)
f1c54846 1019 goto error;
b5ba474f
NP
1020
1021 zswap_alloc_shrinker(pool);
1022 if (!pool->shrinker)
1023 goto error;
1024
f1c54846
DS
1025 pr_debug("using %s compressor\n", pool->tfm_name);
1026
1027 /* being the current pool takes 1 ref; this func expects the
1028 * caller to always add the new pool as the current pool
1029 */
1030 kref_init(&pool->kref);
1031 INIT_LIST_HEAD(&pool->list);
b5ba474f
NP
1032 if (list_lru_init_memcg(&pool->list_lru, pool->shrinker))
1033 goto lru_fail;
1034 shrinker_register(pool->shrinker);
45190f01 1035 INIT_WORK(&pool->shrink_work, shrink_worker);
b5ba474f 1036 atomic_set(&pool->nr_stored, 0);
f1c54846
DS
1037
1038 zswap_pool_debug("created", pool);
1039
1040 return pool;
1041
b5ba474f
NP
1042lru_fail:
1043 list_lru_destroy(&pool->list_lru);
1044 shrinker_free(pool->shrinker);
f1c54846 1045error:
1ec3b5fe
BS
1046 if (pool->acomp_ctx)
1047 free_percpu(pool->acomp_ctx);
b8cf32dc
YA
1048 while (i--)
1049 zpool_destroy_pool(pool->zpools[i]);
f1c54846
DS
1050 kfree(pool);
1051 return NULL;
1052}
1053
141fdeec 1054static struct zswap_pool *__zswap_pool_create_fallback(void)
f1c54846 1055{
bae21db8
DS
1056 bool has_comp, has_zpool;
1057
1ec3b5fe 1058 has_comp = crypto_has_acomp(zswap_compressor, 0, 0);
bb8b93b5
MS
1059 if (!has_comp && strcmp(zswap_compressor,
1060 CONFIG_ZSWAP_COMPRESSOR_DEFAULT)) {
f1c54846 1061 pr_err("compressor %s not available, using default %s\n",
bb8b93b5 1062 zswap_compressor, CONFIG_ZSWAP_COMPRESSOR_DEFAULT);
c99b42c3 1063 param_free_charp(&zswap_compressor);
bb8b93b5 1064 zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT;
1ec3b5fe 1065 has_comp = crypto_has_acomp(zswap_compressor, 0, 0);
f1c54846 1066 }
bae21db8
DS
1067 if (!has_comp) {
1068 pr_err("default compressor %s not available\n",
1069 zswap_compressor);
1070 param_free_charp(&zswap_compressor);
1071 zswap_compressor = ZSWAP_PARAM_UNSET;
1072 }
1073
1074 has_zpool = zpool_has_pool(zswap_zpool_type);
bb8b93b5
MS
1075 if (!has_zpool && strcmp(zswap_zpool_type,
1076 CONFIG_ZSWAP_ZPOOL_DEFAULT)) {
f1c54846 1077 pr_err("zpool %s not available, using default %s\n",
bb8b93b5 1078 zswap_zpool_type, CONFIG_ZSWAP_ZPOOL_DEFAULT);
c99b42c3 1079 param_free_charp(&zswap_zpool_type);
bb8b93b5 1080 zswap_zpool_type = CONFIG_ZSWAP_ZPOOL_DEFAULT;
bae21db8 1081 has_zpool = zpool_has_pool(zswap_zpool_type);
f1c54846 1082 }
bae21db8
DS
1083 if (!has_zpool) {
1084 pr_err("default zpool %s not available\n",
1085 zswap_zpool_type);
1086 param_free_charp(&zswap_zpool_type);
1087 zswap_zpool_type = ZSWAP_PARAM_UNSET;
1088 }
1089
1090 if (!has_comp || !has_zpool)
1091 return NULL;
f1c54846
DS
1092
1093 return zswap_pool_create(zswap_zpool_type, zswap_compressor);
1094}
1095
1096static void zswap_pool_destroy(struct zswap_pool *pool)
1097{
b8cf32dc
YA
1098 int i;
1099
f1c54846
DS
1100 zswap_pool_debug("destroying", pool);
1101
b5ba474f 1102 shrinker_free(pool->shrinker);
cab7a7e5 1103 cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node);
1ec3b5fe 1104 free_percpu(pool->acomp_ctx);
a65b0e76
DC
1105 list_lru_destroy(&pool->list_lru);
1106
1107 spin_lock(&zswap_pools_lock);
1108 mem_cgroup_iter_break(NULL, pool->next_shrink);
1109 pool->next_shrink = NULL;
1110 spin_unlock(&zswap_pools_lock);
1111
b8cf32dc
YA
1112 for (i = 0; i < ZSWAP_NR_ZPOOLS; i++)
1113 zpool_destroy_pool(pool->zpools[i]);
f1c54846
DS
1114 kfree(pool);
1115}
1116
1117static int __must_check zswap_pool_get(struct zswap_pool *pool)
1118{
ae3d89a7
DS
1119 if (!pool)
1120 return 0;
1121
f1c54846
DS
1122 return kref_get_unless_zero(&pool->kref);
1123}
1124
200867af 1125static void __zswap_pool_release(struct work_struct *work)
f1c54846 1126{
45190f01
VW
1127 struct zswap_pool *pool = container_of(work, typeof(*pool),
1128 release_work);
200867af
DS
1129
1130 synchronize_rcu();
f1c54846
DS
1131
1132 /* nobody should have been able to get a kref... */
1133 WARN_ON(kref_get_unless_zero(&pool->kref));
1134
1135 /* pool is now off zswap_pools list and has no references. */
1136 zswap_pool_destroy(pool);
1137}
1138
1139static void __zswap_pool_empty(struct kref *kref)
1140{
1141 struct zswap_pool *pool;
1142
1143 pool = container_of(kref, typeof(*pool), kref);
1144
1145 spin_lock(&zswap_pools_lock);
1146
1147 WARN_ON(pool == zswap_pool_current());
1148
1149 list_del_rcu(&pool->list);
200867af 1150
45190f01
VW
1151 INIT_WORK(&pool->release_work, __zswap_pool_release);
1152 schedule_work(&pool->release_work);
f1c54846
DS
1153
1154 spin_unlock(&zswap_pools_lock);
1155}
1156
1157static void zswap_pool_put(struct zswap_pool *pool)
1158{
1159 kref_put(&pool->kref, __zswap_pool_empty);
2b281117
SJ
1160}
1161
90b0fc26
DS
1162/*********************************
1163* param callbacks
1164**********************************/
1165
141fdeec
LS
1166static bool zswap_pool_changed(const char *s, const struct kernel_param *kp)
1167{
1168 /* no change required */
1169 if (!strcmp(s, *(char **)kp->arg) && zswap_has_pool)
1170 return false;
1171 return true;
1172}
1173
c99b42c3 1174/* val must be a null-terminated string */
90b0fc26
DS
1175static int __zswap_param_set(const char *val, const struct kernel_param *kp,
1176 char *type, char *compressor)
1177{
1178 struct zswap_pool *pool, *put_pool = NULL;
c99b42c3 1179 char *s = strstrip((char *)val);
141fdeec
LS
1180 int ret = 0;
1181 bool new_pool = false;
90b0fc26 1182
141fdeec 1183 mutex_lock(&zswap_init_lock);
9021ccec
LS
1184 switch (zswap_init_state) {
1185 case ZSWAP_UNINIT:
1186 /* if this is load-time (pre-init) param setting,
1187 * don't create a pool; that's done during init.
1188 */
141fdeec
LS
1189 ret = param_set_charp(s, kp);
1190 break;
9021ccec 1191 case ZSWAP_INIT_SUCCEED:
141fdeec 1192 new_pool = zswap_pool_changed(s, kp);
9021ccec
LS
1193 break;
1194 case ZSWAP_INIT_FAILED:
d7b028f5 1195 pr_err("can't set param, initialization failed\n");
141fdeec 1196 ret = -ENODEV;
d7b028f5 1197 }
141fdeec 1198 mutex_unlock(&zswap_init_lock);
d7b028f5 1199
141fdeec
LS
1200 /* no need to create a new pool, return directly */
1201 if (!new_pool)
1202 return ret;
90b0fc26
DS
1203
1204 if (!type) {
c99b42c3
DS
1205 if (!zpool_has_pool(s)) {
1206 pr_err("zpool %s not available\n", s);
90b0fc26
DS
1207 return -ENOENT;
1208 }
c99b42c3 1209 type = s;
90b0fc26 1210 } else if (!compressor) {
1ec3b5fe 1211 if (!crypto_has_acomp(s, 0, 0)) {
c99b42c3 1212 pr_err("compressor %s not available\n", s);
90b0fc26
DS
1213 return -ENOENT;
1214 }
c99b42c3
DS
1215 compressor = s;
1216 } else {
1217 WARN_ON(1);
1218 return -EINVAL;
90b0fc26
DS
1219 }
1220
1221 spin_lock(&zswap_pools_lock);
1222
1223 pool = zswap_pool_find_get(type, compressor);
1224 if (pool) {
1225 zswap_pool_debug("using existing", pool);
fd5bb66c 1226 WARN_ON(pool == zswap_pool_current());
90b0fc26 1227 list_del_rcu(&pool->list);
90b0fc26
DS
1228 }
1229
fd5bb66c
DS
1230 spin_unlock(&zswap_pools_lock);
1231
1232 if (!pool)
1233 pool = zswap_pool_create(type, compressor);
1234
90b0fc26 1235 if (pool)
c99b42c3 1236 ret = param_set_charp(s, kp);
90b0fc26
DS
1237 else
1238 ret = -EINVAL;
1239
fd5bb66c
DS
1240 spin_lock(&zswap_pools_lock);
1241
90b0fc26
DS
1242 if (!ret) {
1243 put_pool = zswap_pool_current();
1244 list_add_rcu(&pool->list, &zswap_pools);
ae3d89a7 1245 zswap_has_pool = true;
90b0fc26
DS
1246 } else if (pool) {
1247 /* add the possibly pre-existing pool to the end of the pools
1248 * list; if it's new (and empty) then it'll be removed and
1249 * destroyed by the put after we drop the lock
1250 */
1251 list_add_tail_rcu(&pool->list, &zswap_pools);
1252 put_pool = pool;
fd5bb66c
DS
1253 }
1254
1255 spin_unlock(&zswap_pools_lock);
1256
1257 if (!zswap_has_pool && !pool) {
ae3d89a7
DS
1258 /* if initial pool creation failed, and this pool creation also
1259 * failed, maybe both compressor and zpool params were bad.
1260 * Allow changing this param, so pool creation will succeed
1261 * when the other param is changed. We already verified this
1ec3b5fe 1262 * param is ok in the zpool_has_pool() or crypto_has_acomp()
ae3d89a7
DS
1263 * checks above.
1264 */
1265 ret = param_set_charp(s, kp);
90b0fc26
DS
1266 }
1267
90b0fc26
DS
1268 /* drop the ref from either the old current pool,
1269 * or the new pool we failed to add
1270 */
1271 if (put_pool)
1272 zswap_pool_put(put_pool);
1273
1274 return ret;
1275}
1276
1277static int zswap_compressor_param_set(const char *val,
1278 const struct kernel_param *kp)
1279{
1280 return __zswap_param_set(val, kp, zswap_zpool_type, NULL);
1281}
1282
1283static int zswap_zpool_param_set(const char *val,
1284 const struct kernel_param *kp)
1285{
1286 return __zswap_param_set(val, kp, NULL, zswap_compressor);
1287}
1288
d7b028f5
DS
1289static int zswap_enabled_param_set(const char *val,
1290 const struct kernel_param *kp)
1291{
141fdeec
LS
1292 int ret = -ENODEV;
1293
1294 /* if this is load-time (pre-init) param setting, only set param. */
1295 if (system_state != SYSTEM_RUNNING)
1296 return param_set_bool(val, kp);
1297
1298 mutex_lock(&zswap_init_lock);
9021ccec
LS
1299 switch (zswap_init_state) {
1300 case ZSWAP_UNINIT:
141fdeec
LS
1301 if (zswap_setup())
1302 break;
1303 fallthrough;
9021ccec 1304 case ZSWAP_INIT_SUCCEED:
141fdeec 1305 if (!zswap_has_pool)
9021ccec 1306 pr_err("can't enable, no pool configured\n");
141fdeec
LS
1307 else
1308 ret = param_set_bool(val, kp);
1309 break;
9021ccec 1310 case ZSWAP_INIT_FAILED:
d7b028f5 1311 pr_err("can't enable, initialization failed\n");
ae3d89a7 1312 }
141fdeec 1313 mutex_unlock(&zswap_init_lock);
d7b028f5 1314
141fdeec 1315 return ret;
d7b028f5
DS
1316}
1317
fa9ad6e2
JW
1318static bool zswap_compress(struct folio *folio, struct zswap_entry *entry)
1319{
1320 struct crypto_acomp_ctx *acomp_ctx;
1321 struct scatterlist input, output;
1322 unsigned int dlen = PAGE_SIZE;
1323 unsigned long handle;
1324 struct zpool *zpool;
1325 char *buf;
1326 gfp_t gfp;
1327 int ret;
1328 u8 *dst;
1329
1330 acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
1331
1332 mutex_lock(&acomp_ctx->mutex);
1333
1334 dst = acomp_ctx->buffer;
1335 sg_init_table(&input, 1);
1336 sg_set_page(&input, &folio->page, PAGE_SIZE, 0);
1337
1338 /*
1339 * We need PAGE_SIZE * 2 here since there maybe over-compression case,
1340 * and hardware-accelerators may won't check the dst buffer size, so
1341 * giving the dst buffer with enough length to avoid buffer overflow.
1342 */
1343 sg_init_one(&output, dst, PAGE_SIZE * 2);
1344 acomp_request_set_params(acomp_ctx->req, &input, &output, PAGE_SIZE, dlen);
1345
1346 /*
1347 * it maybe looks a little bit silly that we send an asynchronous request,
1348 * then wait for its completion synchronously. This makes the process look
1349 * synchronous in fact.
1350 * Theoretically, acomp supports users send multiple acomp requests in one
1351 * acomp instance, then get those requests done simultaneously. but in this
1352 * case, zswap actually does store and load page by page, there is no
1353 * existing method to send the second page before the first page is done
1354 * in one thread doing zwap.
1355 * but in different threads running on different cpu, we have different
1356 * acomp instance, so multiple threads can do (de)compression in parallel.
1357 */
1358 ret = crypto_wait_req(crypto_acomp_compress(acomp_ctx->req), &acomp_ctx->wait);
1359 dlen = acomp_ctx->req->dlen;
1360 if (ret) {
1361 zswap_reject_compress_fail++;
1362 goto unlock;
1363 }
1364
1365 zpool = zswap_find_zpool(entry);
1366 gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
1367 if (zpool_malloc_support_movable(zpool))
1368 gfp |= __GFP_HIGHMEM | __GFP_MOVABLE;
1369 ret = zpool_malloc(zpool, dlen, gfp, &handle);
1370 if (ret == -ENOSPC) {
1371 zswap_reject_compress_poor++;
1372 goto unlock;
1373 }
1374 if (ret) {
1375 zswap_reject_alloc_fail++;
1376 goto unlock;
1377 }
1378
1379 buf = zpool_map_handle(zpool, handle, ZPOOL_MM_WO);
1380 memcpy(buf, dst, dlen);
1381 zpool_unmap_handle(zpool, handle);
1382
1383 entry->handle = handle;
1384 entry->length = dlen;
1385
1386unlock:
1387 mutex_unlock(&acomp_ctx->mutex);
1388 return ret == 0;
1389}
1390
ff2972aa 1391static void zswap_decompress(struct zswap_entry *entry, struct page *page)
32acba4c
CZ
1392{
1393 struct zpool *zpool = zswap_find_zpool(entry);
1394 struct scatterlist input, output;
1395 struct crypto_acomp_ctx *acomp_ctx;
1396 u8 *src;
1397
1398 acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
8ba2f844 1399 mutex_lock(&acomp_ctx->mutex);
32acba4c
CZ
1400
1401 src = zpool_map_handle(zpool, entry->handle, ZPOOL_MM_RO);
1402 if (!zpool_can_sleep_mapped(zpool)) {
8ba2f844
CZ
1403 memcpy(acomp_ctx->buffer, src, entry->length);
1404 src = acomp_ctx->buffer;
32acba4c
CZ
1405 zpool_unmap_handle(zpool, entry->handle);
1406 }
1407
1408 sg_init_one(&input, src, entry->length);
1409 sg_init_table(&output, 1);
1410 sg_set_page(&output, page, PAGE_SIZE, 0);
1411 acomp_request_set_params(acomp_ctx->req, &input, &output, entry->length, PAGE_SIZE);
1412 BUG_ON(crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait));
1413 BUG_ON(acomp_ctx->req->dlen != PAGE_SIZE);
8ba2f844 1414 mutex_unlock(&acomp_ctx->mutex);
32acba4c
CZ
1415
1416 if (zpool_can_sleep_mapped(zpool))
1417 zpool_unmap_handle(zpool, entry->handle);
1418}
1419
2b281117
SJ
1420/*********************************
1421* writeback code
1422**********************************/
2b281117 1423/*
96c7b0b4
MWO
1424 * Attempts to free an entry by adding a folio to the swap cache,
1425 * decompressing the entry data into the folio, and issuing a
1426 * bio write to write the folio back to the swap device.
2b281117 1427 *
96c7b0b4 1428 * This can be thought of as a "resumed writeback" of the folio
2b281117 1429 * to the swap device. We are basically resuming the same swap
42c06a0e 1430 * writeback path that was intercepted with the zswap_store()
96c7b0b4 1431 * in the first place. After the folio has been decompressed into
2b281117
SJ
1432 * the swap cache, the compressed version stored by zswap can be
1433 * freed.
1434 */
0bb48849 1435static int zswap_writeback_entry(struct zswap_entry *entry,
5878303c 1436 swp_entry_t swpentry)
2b281117 1437{
5878303c 1438 struct zswap_tree *tree;
96c7b0b4 1439 struct folio *folio;
ddc1a5cb 1440 struct mempolicy *mpol;
96c7b0b4 1441 bool folio_was_allocated;
2b281117
SJ
1442 struct writeback_control wbc = {
1443 .sync_mode = WB_SYNC_NONE,
1444 };
1445
96c7b0b4 1446 /* try to allocate swap cache folio */
ddc1a5cb 1447 mpol = get_task_policy(current);
96c7b0b4
MWO
1448 folio = __read_swap_cache_async(swpentry, GFP_KERNEL, mpol,
1449 NO_INTERLEAVE_INDEX, &folio_was_allocated, true);
1450 if (!folio)
e947ba0b 1451 return -ENOMEM;
2b281117 1452
e947ba0b 1453 /*
5878303c
CZ
1454 * Found an existing folio, we raced with swapin or concurrent
1455 * shrinker. We generally writeback cold folios from zswap, and
1456 * swapin means the folio just became hot, so skip this folio.
1457 * For unlikely concurrent shrinker case, it will be unlinked
1458 * and freed when invalidated by the concurrent shrinker anyway.
e947ba0b 1459 */
96c7b0b4
MWO
1460 if (!folio_was_allocated) {
1461 folio_put(folio);
e947ba0b 1462 return -EEXIST;
98804a94 1463 }
2b281117 1464
98804a94 1465 /*
96c7b0b4 1466 * folio is locked, and the swapcache is now secured against
98804a94
JW
1467 * concurrent swapping to and from the slot. Verify that the
1468 * swap entry hasn't been invalidated and recycled behind our
1469 * backs (our zswap_entry reference doesn't prevent that), to
96c7b0b4 1470 * avoid overwriting a new swap folio with old compressed data.
98804a94 1471 */
5878303c 1472 tree = swap_zswap_tree(swpentry);
98804a94 1473 spin_lock(&tree->lock);
5878303c 1474 if (zswap_rb_search(&tree->rbroot, swp_offset(swpentry)) != entry) {
04fc7816 1475 spin_unlock(&tree->lock);
96c7b0b4 1476 delete_from_swap_cache(folio);
e3b63e96
YA
1477 folio_unlock(folio);
1478 folio_put(folio);
e947ba0b 1479 return -ENOMEM;
98804a94 1480 }
5878303c
CZ
1481
1482 /* Safe to deref entry after the entry is verified above. */
1483 zswap_entry_get(entry);
98804a94 1484 spin_unlock(&tree->lock);
04fc7816 1485
ff2972aa 1486 zswap_decompress(entry, &folio->page);
98804a94 1487
5878303c
CZ
1488 count_vm_event(ZSWPWB);
1489 if (entry->objcg)
1490 count_objcg_event(entry->objcg, ZSWPWB);
1491
1492 spin_lock(&tree->lock);
1493 zswap_invalidate_entry(tree, entry);
1494 zswap_entry_put(entry);
1495 spin_unlock(&tree->lock);
1496
96c7b0b4
MWO
1497 /* folio is up to date */
1498 folio_mark_uptodate(folio);
2b281117 1499
b349acc7 1500 /* move it to the tail of the inactive list after end_writeback */
96c7b0b4 1501 folio_set_reclaim(folio);
b349acc7 1502
2b281117 1503 /* start writeback */
b99b4e0d 1504 __swap_writepage(folio, &wbc);
96c7b0b4 1505 folio_put(folio);
2b281117 1506
e947ba0b 1507 return 0;
2b281117
SJ
1508}
1509
a85f878b
SD
1510static int zswap_is_page_same_filled(void *ptr, unsigned long *value)
1511{
a85f878b 1512 unsigned long *page;
62bf1258
TS
1513 unsigned long val;
1514 unsigned int pos, last_pos = PAGE_SIZE / sizeof(*page) - 1;
a85f878b
SD
1515
1516 page = (unsigned long *)ptr;
62bf1258
TS
1517 val = page[0];
1518
1519 if (val != page[last_pos])
1520 return 0;
1521
1522 for (pos = 1; pos < last_pos; pos++) {
1523 if (val != page[pos])
a85f878b
SD
1524 return 0;
1525 }
62bf1258
TS
1526
1527 *value = val;
1528
a85f878b
SD
1529 return 1;
1530}
1531
1532static void zswap_fill_page(void *ptr, unsigned long value)
1533{
1534 unsigned long *page;
1535
1536 page = (unsigned long *)ptr;
1537 memset_l(page, value, PAGE_SIZE / sizeof(unsigned long));
1538}
1539
34f4c198 1540bool zswap_store(struct folio *folio)
2b281117 1541{
3d2c9087 1542 swp_entry_t swp = folio->swap;
42c06a0e 1543 pgoff_t offset = swp_offset(swp);
44c7c734 1544 struct zswap_tree *tree = swap_zswap_tree(swp);
2b281117 1545 struct zswap_entry *entry, *dupentry;
f4840ccf 1546 struct obj_cgroup *objcg = NULL;
a65b0e76 1547 struct mem_cgroup *memcg = NULL;
be7fc97c 1548 struct zswap_pool *shrink_pool;
42c06a0e 1549
34f4c198
MWO
1550 VM_WARN_ON_ONCE(!folio_test_locked(folio));
1551 VM_WARN_ON_ONCE(!folio_test_swapcache(folio));
2b281117 1552
34f4c198
MWO
1553 /* Large folios aren't supported */
1554 if (folio_test_large(folio))
42c06a0e 1555 return false;
7ba71669 1556
ca56489c
DC
1557 /*
1558 * If this is a duplicate, it must be removed before attempting to store
1559 * it, otherwise, if the store fails the old page won't be removed from
1560 * the tree, and it might be written back overriding the new data.
1561 */
1562 spin_lock(&tree->lock);
be7fc97c
JW
1563 entry = zswap_rb_search(&tree->rbroot, offset);
1564 if (entry) {
1565 zswap_invalidate_entry(tree, entry);
ca56489c 1566 zswap_duplicate_entry++;
ca56489c
DC
1567 }
1568 spin_unlock(&tree->lock);
678e54d4
CZ
1569
1570 if (!zswap_enabled)
1571 return false;
1572
074e3e26 1573 objcg = get_obj_cgroup_from_folio(folio);
a65b0e76
DC
1574 if (objcg && !obj_cgroup_may_zswap(objcg)) {
1575 memcg = get_mem_cgroup_from_objcg(objcg);
1576 if (shrink_memcg(memcg)) {
1577 mem_cgroup_put(memcg);
1578 goto reject;
1579 }
1580 mem_cgroup_put(memcg);
1581 }
f4840ccf 1582
2b281117
SJ
1583 /* reclaim space if needed */
1584 if (zswap_is_full()) {
1585 zswap_pool_limit_hit++;
45190f01 1586 zswap_pool_reached_full = true;
f4840ccf 1587 goto shrink;
45190f01 1588 }
16e536ef 1589
45190f01 1590 if (zswap_pool_reached_full) {
42c06a0e 1591 if (!zswap_can_accept())
e0228d59 1592 goto shrink;
42c06a0e 1593 else
45190f01 1594 zswap_pool_reached_full = false;
2b281117
SJ
1595 }
1596
1597 /* allocate entry */
be7fc97c 1598 entry = zswap_entry_cache_alloc(GFP_KERNEL, folio_nid(folio));
2b281117
SJ
1599 if (!entry) {
1600 zswap_reject_kmemcache_fail++;
2b281117
SJ
1601 goto reject;
1602 }
1603
a85f878b 1604 if (zswap_same_filled_pages_enabled) {
be7fc97c
JW
1605 unsigned long value;
1606 u8 *src;
1607
1608 src = kmap_local_folio(folio, 0);
a85f878b 1609 if (zswap_is_page_same_filled(src, &value)) {
003ae2fb 1610 kunmap_local(src);
a85f878b
SD
1611 entry->length = 0;
1612 entry->value = value;
1613 atomic_inc(&zswap_same_filled_pages);
1614 goto insert_entry;
1615 }
003ae2fb 1616 kunmap_local(src);
a85f878b
SD
1617 }
1618
42c06a0e 1619 if (!zswap_non_same_filled_pages_enabled)
cb325ddd 1620 goto freepage;
cb325ddd 1621
f1c54846
DS
1622 /* if entry is successfully added, it keeps the reference */
1623 entry->pool = zswap_pool_current_get();
42c06a0e 1624 if (!entry->pool)
f1c54846 1625 goto freepage;
f1c54846 1626
a65b0e76
DC
1627 if (objcg) {
1628 memcg = get_mem_cgroup_from_objcg(objcg);
1629 if (memcg_list_lru_alloc(memcg, &entry->pool->list_lru, GFP_KERNEL)) {
1630 mem_cgroup_put(memcg);
1631 goto put_pool;
1632 }
1633 mem_cgroup_put(memcg);
1634 }
1635
fa9ad6e2
JW
1636 if (!zswap_compress(folio, entry))
1637 goto put_pool;
1ec3b5fe 1638
a85f878b 1639insert_entry:
be7fc97c 1640 entry->swpentry = swp;
f4840ccf
JW
1641 entry->objcg = objcg;
1642 if (objcg) {
1643 obj_cgroup_charge_zswap(objcg, entry->length);
1644 /* Account before objcg ref is moved to tree */
1645 count_objcg_event(objcg, ZSWPOUT);
1646 }
1647
2b281117
SJ
1648 /* map */
1649 spin_lock(&tree->lock);
ca56489c
DC
1650 /*
1651 * A duplicate entry should have been removed at the beginning of this
1652 * function. Since the swap entry should be pinned, if a duplicate is
1653 * found again here it means that something went wrong in the swap
1654 * cache.
1655 */
42c06a0e 1656 while (zswap_rb_insert(&tree->rbroot, entry, &dupentry) == -EEXIST) {
ca56489c 1657 WARN_ON(1);
42c06a0e 1658 zswap_duplicate_entry++;
56c67049 1659 zswap_invalidate_entry(tree, dupentry);
42c06a0e 1660 }
35499e2b 1661 if (entry->length) {
a65b0e76
DC
1662 INIT_LIST_HEAD(&entry->lru);
1663 zswap_lru_add(&entry->pool->list_lru, entry);
b5ba474f 1664 atomic_inc(&entry->pool->nr_stored);
f999f38b 1665 }
2b281117
SJ
1666 spin_unlock(&tree->lock);
1667
1668 /* update stats */
1669 atomic_inc(&zswap_stored_pages);
f1c54846 1670 zswap_update_total_size();
f6498b77 1671 count_vm_event(ZSWPOUT);
2b281117 1672
42c06a0e 1673 return true;
2b281117 1674
a65b0e76 1675put_pool:
f1c54846
DS
1676 zswap_pool_put(entry->pool);
1677freepage:
2b281117
SJ
1678 zswap_entry_cache_free(entry);
1679reject:
f4840ccf
JW
1680 if (objcg)
1681 obj_cgroup_put(objcg);
42c06a0e 1682 return false;
f4840ccf
JW
1683
1684shrink:
be7fc97c
JW
1685 shrink_pool = zswap_pool_last_get();
1686 if (shrink_pool && !queue_work(shrink_wq, &shrink_pool->shrink_work))
1687 zswap_pool_put(shrink_pool);
f4840ccf 1688 goto reject;
2b281117
SJ
1689}
1690
ca54f6d8 1691bool zswap_load(struct folio *folio)
2b281117 1692{
3d2c9087 1693 swp_entry_t swp = folio->swap;
42c06a0e 1694 pgoff_t offset = swp_offset(swp);
ca54f6d8 1695 struct page *page = &folio->page;
44c7c734 1696 struct zswap_tree *tree = swap_zswap_tree(swp);
2b281117 1697 struct zswap_entry *entry;
32acba4c 1698 u8 *dst;
42c06a0e 1699
ca54f6d8 1700 VM_WARN_ON_ONCE(!folio_test_locked(folio));
2b281117 1701
2b281117 1702 spin_lock(&tree->lock);
5b297f70 1703 entry = zswap_rb_search(&tree->rbroot, offset);
2b281117 1704 if (!entry) {
2b281117 1705 spin_unlock(&tree->lock);
42c06a0e 1706 return false;
2b281117 1707 }
5b297f70 1708 zswap_entry_get(entry);
2b281117
SJ
1709 spin_unlock(&tree->lock);
1710
66447fd0 1711 if (entry->length)
ff2972aa 1712 zswap_decompress(entry, page);
66447fd0 1713 else {
003ae2fb 1714 dst = kmap_local_page(page);
a85f878b 1715 zswap_fill_page(dst, entry->value);
003ae2fb 1716 kunmap_local(dst);
a85f878b
SD
1717 }
1718
f6498b77 1719 count_vm_event(ZSWPIN);
f4840ccf
JW
1720 if (entry->objcg)
1721 count_objcg_event(entry->objcg, ZSWPIN);
c75f5c1e 1722
2b281117 1723 spin_lock(&tree->lock);
66447fd0 1724 if (zswap_exclusive_loads_enabled) {
b9c91c43 1725 zswap_invalidate_entry(tree, entry);
ca54f6d8 1726 folio_mark_dirty(folio);
35499e2b 1727 } else if (entry->length) {
a65b0e76
DC
1728 zswap_lru_del(&entry->pool->list_lru, entry);
1729 zswap_lru_add(&entry->pool->list_lru, entry);
b9c91c43 1730 }
db128f5f 1731 zswap_entry_put(entry);
2b281117
SJ
1732 spin_unlock(&tree->lock);
1733
66447fd0 1734 return true;
2b281117
SJ
1735}
1736
42c06a0e 1737void zswap_invalidate(int type, pgoff_t offset)
2b281117 1738{
44c7c734 1739 struct zswap_tree *tree = swap_zswap_tree(swp_entry(type, offset));
2b281117 1740 struct zswap_entry *entry;
2b281117
SJ
1741
1742 /* find */
1743 spin_lock(&tree->lock);
1744 entry = zswap_rb_search(&tree->rbroot, offset);
1745 if (!entry) {
1746 /* entry was written back */
1747 spin_unlock(&tree->lock);
1748 return;
1749 }
b9c91c43 1750 zswap_invalidate_entry(tree, entry);
2b281117 1751 spin_unlock(&tree->lock);
2b281117
SJ
1752}
1753
44c7c734 1754int zswap_swapon(int type, unsigned long nr_pages)
42c06a0e 1755{
44c7c734
CZ
1756 struct zswap_tree *trees, *tree;
1757 unsigned int nr, i;
42c06a0e 1758
44c7c734
CZ
1759 nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES);
1760 trees = kvcalloc(nr, sizeof(*tree), GFP_KERNEL);
1761 if (!trees) {
42c06a0e 1762 pr_err("alloc failed, zswap disabled for swap type %d\n", type);
bb29fd77 1763 return -ENOMEM;
42c06a0e
JW
1764 }
1765
44c7c734
CZ
1766 for (i = 0; i < nr; i++) {
1767 tree = trees + i;
1768 tree->rbroot = RB_ROOT;
1769 spin_lock_init(&tree->lock);
1770 }
1771
1772 nr_zswap_trees[type] = nr;
1773 zswap_trees[type] = trees;
bb29fd77 1774 return 0;
42c06a0e
JW
1775}
1776
1777void zswap_swapoff(int type)
2b281117 1778{
44c7c734
CZ
1779 struct zswap_tree *trees = zswap_trees[type];
1780 unsigned int i;
2b281117 1781
44c7c734 1782 if (!trees)
2b281117
SJ
1783 return;
1784
83e68f25
YA
1785 /* try_to_unuse() invalidated all the entries already */
1786 for (i = 0; i < nr_zswap_trees[type]; i++)
1787 WARN_ON_ONCE(!RB_EMPTY_ROOT(&trees[i].rbroot));
44c7c734
CZ
1788
1789 kvfree(trees);
1790 nr_zswap_trees[type] = 0;
aa9bca05 1791 zswap_trees[type] = NULL;
2b281117
SJ
1792}
1793
2b281117
SJ
1794/*********************************
1795* debugfs functions
1796**********************************/
1797#ifdef CONFIG_DEBUG_FS
1798#include <linux/debugfs.h>
1799
1800static struct dentry *zswap_debugfs_root;
1801
141fdeec 1802static int zswap_debugfs_init(void)
2b281117
SJ
1803{
1804 if (!debugfs_initialized())
1805 return -ENODEV;
1806
1807 zswap_debugfs_root = debugfs_create_dir("zswap", NULL);
2b281117 1808
0825a6f9
JP
1809 debugfs_create_u64("pool_limit_hit", 0444,
1810 zswap_debugfs_root, &zswap_pool_limit_hit);
1811 debugfs_create_u64("reject_reclaim_fail", 0444,
1812 zswap_debugfs_root, &zswap_reject_reclaim_fail);
1813 debugfs_create_u64("reject_alloc_fail", 0444,
1814 zswap_debugfs_root, &zswap_reject_alloc_fail);
1815 debugfs_create_u64("reject_kmemcache_fail", 0444,
1816 zswap_debugfs_root, &zswap_reject_kmemcache_fail);
cb61dad8
NP
1817 debugfs_create_u64("reject_compress_fail", 0444,
1818 zswap_debugfs_root, &zswap_reject_compress_fail);
0825a6f9
JP
1819 debugfs_create_u64("reject_compress_poor", 0444,
1820 zswap_debugfs_root, &zswap_reject_compress_poor);
1821 debugfs_create_u64("written_back_pages", 0444,
1822 zswap_debugfs_root, &zswap_written_back_pages);
1823 debugfs_create_u64("duplicate_entry", 0444,
1824 zswap_debugfs_root, &zswap_duplicate_entry);
1825 debugfs_create_u64("pool_total_size", 0444,
1826 zswap_debugfs_root, &zswap_pool_total_size);
1827 debugfs_create_atomic_t("stored_pages", 0444,
1828 zswap_debugfs_root, &zswap_stored_pages);
a85f878b 1829 debugfs_create_atomic_t("same_filled_pages", 0444,
0825a6f9 1830 zswap_debugfs_root, &zswap_same_filled_pages);
2b281117
SJ
1831
1832 return 0;
1833}
2b281117 1834#else
141fdeec 1835static int zswap_debugfs_init(void)
2b281117
SJ
1836{
1837 return 0;
1838}
2b281117
SJ
1839#endif
1840
1841/*********************************
1842* module init and exit
1843**********************************/
141fdeec 1844static int zswap_setup(void)
2b281117 1845{
f1c54846 1846 struct zswap_pool *pool;
ad7ed770 1847 int ret;
60105e12 1848
b7919122
LS
1849 zswap_entry_cache = KMEM_CACHE(zswap_entry, 0);
1850 if (!zswap_entry_cache) {
2b281117 1851 pr_err("entry cache creation failed\n");
f1c54846 1852 goto cache_fail;
2b281117 1853 }
f1c54846 1854
cab7a7e5
SAS
1855 ret = cpuhp_setup_state_multi(CPUHP_MM_ZSWP_POOL_PREPARE,
1856 "mm/zswap_pool:prepare",
1857 zswap_cpu_comp_prepare,
1858 zswap_cpu_comp_dead);
1859 if (ret)
1860 goto hp_fail;
1861
f1c54846 1862 pool = __zswap_pool_create_fallback();
ae3d89a7
DS
1863 if (pool) {
1864 pr_info("loaded using pool %s/%s\n", pool->tfm_name,
b8cf32dc 1865 zpool_get_type(pool->zpools[0]));
ae3d89a7
DS
1866 list_add(&pool->list, &zswap_pools);
1867 zswap_has_pool = true;
1868 } else {
f1c54846 1869 pr_err("pool creation failed\n");
ae3d89a7 1870 zswap_enabled = false;
2b281117 1871 }
60105e12 1872
8409a385
RM
1873 shrink_wq = alloc_workqueue("zswap-shrink",
1874 WQ_UNBOUND|WQ_MEM_RECLAIM, 1);
45190f01
VW
1875 if (!shrink_wq)
1876 goto fallback_fail;
1877
2b281117
SJ
1878 if (zswap_debugfs_init())
1879 pr_warn("debugfs initialization failed\n");
9021ccec 1880 zswap_init_state = ZSWAP_INIT_SUCCEED;
2b281117 1881 return 0;
f1c54846 1882
45190f01 1883fallback_fail:
38aeb071
DC
1884 if (pool)
1885 zswap_pool_destroy(pool);
cab7a7e5 1886hp_fail:
b7919122 1887 kmem_cache_destroy(zswap_entry_cache);
f1c54846 1888cache_fail:
d7b028f5 1889 /* if built-in, we aren't unloaded on failure; don't allow use */
9021ccec 1890 zswap_init_state = ZSWAP_INIT_FAILED;
d7b028f5 1891 zswap_enabled = false;
2b281117
SJ
1892 return -ENOMEM;
1893}
141fdeec
LS
1894
1895static int __init zswap_init(void)
1896{
1897 if (!zswap_enabled)
1898 return 0;
1899 return zswap_setup();
1900}
2b281117 1901/* must be late so crypto has time to come up */
141fdeec 1902late_initcall(zswap_init);
2b281117 1903
68386da8 1904MODULE_AUTHOR("Seth Jennings <[email protected]>");
2b281117 1905MODULE_DESCRIPTION("Compressed cache for swap pages");
This page took 0.869946 seconds and 4 git commands to generate.