]> Git Repo - linux.git/blame - mm/zswap.c
mm: memcg: restore subtree stats flushing
[linux.git] / mm / zswap.c
CommitLineData
c942fddf 1// SPDX-License-Identifier: GPL-2.0-or-later
2b281117
SJ
2/*
3 * zswap.c - zswap driver file
4 *
42c06a0e 5 * zswap is a cache that takes pages that are in the process
2b281117
SJ
6 * of being swapped out and attempts to compress and store them in a
7 * RAM-based memory pool. This can result in a significant I/O reduction on
8 * the swap device and, in the case where decompressing from RAM is faster
9 * than reading from the swap device, can also improve workload performance.
10 *
11 * Copyright (C) 2012 Seth Jennings <[email protected]>
2b281117
SJ
12*/
13
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
16#include <linux/module.h>
17#include <linux/cpu.h>
18#include <linux/highmem.h>
19#include <linux/slab.h>
20#include <linux/spinlock.h>
21#include <linux/types.h>
22#include <linux/atomic.h>
2b281117
SJ
23#include <linux/rbtree.h>
24#include <linux/swap.h>
25#include <linux/crypto.h>
1ec3b5fe 26#include <linux/scatterlist.h>
ddc1a5cb 27#include <linux/mempolicy.h>
2b281117 28#include <linux/mempool.h>
12d79d64 29#include <linux/zpool.h>
1ec3b5fe 30#include <crypto/acompress.h>
42c06a0e 31#include <linux/zswap.h>
2b281117
SJ
32#include <linux/mm_types.h>
33#include <linux/page-flags.h>
34#include <linux/swapops.h>
35#include <linux/writeback.h>
36#include <linux/pagemap.h>
45190f01 37#include <linux/workqueue.h>
a65b0e76 38#include <linux/list_lru.h>
2b281117 39
014bb1de 40#include "swap.h"
e0228d59 41#include "internal.h"
014bb1de 42
2b281117
SJ
43/*********************************
44* statistics
45**********************************/
12d79d64 46/* Total bytes used by the compressed storage */
f6498b77 47u64 zswap_pool_total_size;
2b281117 48/* The number of compressed pages currently stored in zswap */
f6498b77 49atomic_t zswap_stored_pages = ATOMIC_INIT(0);
a85f878b
SD
50/* The number of same-value filled pages currently stored in zswap */
51static atomic_t zswap_same_filled_pages = ATOMIC_INIT(0);
2b281117
SJ
52
53/*
54 * The statistics below are not protected from concurrent access for
55 * performance reasons so they may not be a 100% accurate. However,
56 * they do provide useful information on roughly how many times a
57 * certain event is occurring.
58*/
59
60/* Pool limit was hit (see zswap_max_pool_percent) */
61static u64 zswap_pool_limit_hit;
62/* Pages written back when pool limit was reached */
63static u64 zswap_written_back_pages;
64/* Store failed due to a reclaim failure after pool limit was reached */
65static u64 zswap_reject_reclaim_fail;
cb61dad8
NP
66/* Store failed due to compression algorithm failure */
67static u64 zswap_reject_compress_fail;
2b281117
SJ
68/* Compressed page was too big for the allocator to (optimally) store */
69static u64 zswap_reject_compress_poor;
70/* Store failed because underlying allocator could not get memory */
71static u64 zswap_reject_alloc_fail;
72/* Store failed because the entry metadata could not be allocated (rare) */
73static u64 zswap_reject_kmemcache_fail;
74/* Duplicate store was encountered (rare) */
75static u64 zswap_duplicate_entry;
76
45190f01
VW
77/* Shrinker work queue */
78static struct workqueue_struct *shrink_wq;
79/* Pool limit was hit, we need to calm down */
80static bool zswap_pool_reached_full;
81
2b281117
SJ
82/*********************************
83* tunables
84**********************************/
c00ed16a 85
bae21db8
DS
86#define ZSWAP_PARAM_UNSET ""
87
141fdeec
LS
88static int zswap_setup(void);
89
bb8b93b5
MS
90/* Enable/disable zswap */
91static bool zswap_enabled = IS_ENABLED(CONFIG_ZSWAP_DEFAULT_ON);
d7b028f5
DS
92static int zswap_enabled_param_set(const char *,
93 const struct kernel_param *);
83aed6cd 94static const struct kernel_param_ops zswap_enabled_param_ops = {
d7b028f5
DS
95 .set = zswap_enabled_param_set,
96 .get = param_get_bool,
97};
98module_param_cb(enabled, &zswap_enabled_param_ops, &zswap_enabled, 0644);
2b281117 99
90b0fc26 100/* Crypto compressor to use */
bb8b93b5 101static char *zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT;
90b0fc26
DS
102static int zswap_compressor_param_set(const char *,
103 const struct kernel_param *);
83aed6cd 104static const struct kernel_param_ops zswap_compressor_param_ops = {
90b0fc26 105 .set = zswap_compressor_param_set,
c99b42c3
DS
106 .get = param_get_charp,
107 .free = param_free_charp,
90b0fc26
DS
108};
109module_param_cb(compressor, &zswap_compressor_param_ops,
c99b42c3 110 &zswap_compressor, 0644);
2b281117 111
90b0fc26 112/* Compressed storage zpool to use */
bb8b93b5 113static char *zswap_zpool_type = CONFIG_ZSWAP_ZPOOL_DEFAULT;
90b0fc26 114static int zswap_zpool_param_set(const char *, const struct kernel_param *);
83aed6cd 115static const struct kernel_param_ops zswap_zpool_param_ops = {
c99b42c3
DS
116 .set = zswap_zpool_param_set,
117 .get = param_get_charp,
118 .free = param_free_charp,
90b0fc26 119};
c99b42c3 120module_param_cb(zpool, &zswap_zpool_param_ops, &zswap_zpool_type, 0644);
12d79d64 121
90b0fc26
DS
122/* The maximum percentage of memory that the compressed pool can occupy */
123static unsigned int zswap_max_pool_percent = 20;
124module_param_named(max_pool_percent, zswap_max_pool_percent, uint, 0644);
60105e12 125
45190f01
VW
126/* The threshold for accepting new pages after the max_pool_percent was hit */
127static unsigned int zswap_accept_thr_percent = 90; /* of max pool size */
128module_param_named(accept_threshold_percent, zswap_accept_thr_percent,
129 uint, 0644);
130
cb325ddd
MS
131/*
132 * Enable/disable handling same-value filled pages (enabled by default).
133 * If disabled every page is considered non-same-value filled.
134 */
a85f878b
SD
135static bool zswap_same_filled_pages_enabled = true;
136module_param_named(same_filled_pages_enabled, zswap_same_filled_pages_enabled,
137 bool, 0644);
138
cb325ddd
MS
139/* Enable/disable handling non-same-value filled pages (enabled by default) */
140static bool zswap_non_same_filled_pages_enabled = true;
141module_param_named(non_same_filled_pages_enabled, zswap_non_same_filled_pages_enabled,
142 bool, 0644);
143
b9c91c43
YA
144static bool zswap_exclusive_loads_enabled = IS_ENABLED(
145 CONFIG_ZSWAP_EXCLUSIVE_LOADS_DEFAULT_ON);
146module_param_named(exclusive_loads, zswap_exclusive_loads_enabled, bool, 0644);
147
b8cf32dc
YA
148/* Number of zpools in zswap_pool (empirically determined for scalability) */
149#define ZSWAP_NR_ZPOOLS 32
150
b5ba474f
NP
151/* Enable/disable memory pressure-based shrinker. */
152static bool zswap_shrinker_enabled = IS_ENABLED(
153 CONFIG_ZSWAP_SHRINKER_DEFAULT_ON);
154module_param_named(shrinker_enabled, zswap_shrinker_enabled, bool, 0644);
155
2b281117 156/*********************************
f1c54846 157* data structures
2b281117 158**********************************/
2b281117 159
1ec3b5fe
BS
160struct crypto_acomp_ctx {
161 struct crypto_acomp *acomp;
162 struct acomp_req *req;
163 struct crypto_wait wait;
164 u8 *dstmem;
165 struct mutex *mutex;
166};
167
f999f38b
DC
168/*
169 * The lock ordering is zswap_tree.lock -> zswap_pool.lru_lock.
170 * The only case where lru_lock is not acquired while holding tree.lock is
171 * when a zswap_entry is taken off the lru for writeback, in that case it
172 * needs to be verified that it's still valid in the tree.
173 */
f1c54846 174struct zswap_pool {
b8cf32dc 175 struct zpool *zpools[ZSWAP_NR_ZPOOLS];
1ec3b5fe 176 struct crypto_acomp_ctx __percpu *acomp_ctx;
f1c54846
DS
177 struct kref kref;
178 struct list_head list;
45190f01
VW
179 struct work_struct release_work;
180 struct work_struct shrink_work;
cab7a7e5 181 struct hlist_node node;
f1c54846 182 char tfm_name[CRYPTO_MAX_ALG_NAME];
a65b0e76
DC
183 struct list_lru list_lru;
184 struct mem_cgroup *next_shrink;
b5ba474f
NP
185 struct shrinker *shrinker;
186 atomic_t nr_stored;
2b281117
SJ
187};
188
2b281117
SJ
189/*
190 * struct zswap_entry
191 *
192 * This structure contains the metadata for tracking a single compressed
193 * page within zswap.
194 *
195 * rbnode - links the entry into red-black tree for the appropriate swap type
97157d89 196 * swpentry - associated swap entry, the offset indexes into the red-black tree
2b281117
SJ
197 * refcount - the number of outstanding reference to the entry. This is needed
198 * to protect against premature freeing of the entry by code
6b452516 199 * concurrent calls to load, invalidate, and writeback. The lock
2b281117
SJ
200 * for the zswap_tree structure that contains the entry must
201 * be held while changing the refcount. Since the lock must
202 * be held, there is no reason to also make refcount atomic.
2b281117 203 * length - the length in bytes of the compressed page data. Needed during
f999f38b
DC
204 * decompression. For a same value filled page length is 0, and both
205 * pool and lru are invalid and must be ignored.
f1c54846
DS
206 * pool - the zswap_pool the entry's data is in
207 * handle - zpool allocation handle that stores the compressed page data
a85f878b 208 * value - value of the same-value filled pages which have same content
97157d89 209 * objcg - the obj_cgroup that the compressed memory is charged to
f999f38b 210 * lru - handle to the pool's lru used to evict pages.
2b281117
SJ
211 */
212struct zswap_entry {
213 struct rb_node rbnode;
0bb48849 214 swp_entry_t swpentry;
2b281117
SJ
215 int refcount;
216 unsigned int length;
f1c54846 217 struct zswap_pool *pool;
a85f878b
SD
218 union {
219 unsigned long handle;
220 unsigned long value;
221 };
f4840ccf 222 struct obj_cgroup *objcg;
f999f38b 223 struct list_head lru;
2b281117
SJ
224};
225
2b281117
SJ
226/*
227 * The tree lock in the zswap_tree struct protects a few things:
228 * - the rbtree
229 * - the refcount field of each entry in the tree
230 */
231struct zswap_tree {
232 struct rb_root rbroot;
233 spinlock_t lock;
2b281117
SJ
234};
235
236static struct zswap_tree *zswap_trees[MAX_SWAPFILES];
237
f1c54846
DS
238/* RCU-protected iteration */
239static LIST_HEAD(zswap_pools);
240/* protects zswap_pools list modification */
241static DEFINE_SPINLOCK(zswap_pools_lock);
32a4e169
DS
242/* pool counter to provide unique names to zpool */
243static atomic_t zswap_pools_count = ATOMIC_INIT(0);
f1c54846 244
9021ccec
LS
245enum zswap_init_type {
246 ZSWAP_UNINIT,
247 ZSWAP_INIT_SUCCEED,
248 ZSWAP_INIT_FAILED
249};
90b0fc26 250
9021ccec 251static enum zswap_init_type zswap_init_state;
90b0fc26 252
141fdeec
LS
253/* used to ensure the integrity of initialization */
254static DEFINE_MUTEX(zswap_init_lock);
d7b028f5 255
ae3d89a7
DS
256/* init completed, but couldn't create the initial pool */
257static bool zswap_has_pool;
258
f1c54846
DS
259/*********************************
260* helpers and fwd declarations
261**********************************/
262
263#define zswap_pool_debug(msg, p) \
264 pr_debug("%s pool %s/%s\n", msg, (p)->tfm_name, \
b8cf32dc 265 zpool_get_type((p)->zpools[0]))
f1c54846 266
0bb48849 267static int zswap_writeback_entry(struct zswap_entry *entry,
ff9d5ba2 268 struct zswap_tree *tree);
f1c54846
DS
269static int zswap_pool_get(struct zswap_pool *pool);
270static void zswap_pool_put(struct zswap_pool *pool);
271
f1c54846
DS
272static bool zswap_is_full(void)
273{
ca79b0c2
AK
274 return totalram_pages() * zswap_max_pool_percent / 100 <
275 DIV_ROUND_UP(zswap_pool_total_size, PAGE_SIZE);
f1c54846
DS
276}
277
45190f01
VW
278static bool zswap_can_accept(void)
279{
280 return totalram_pages() * zswap_accept_thr_percent / 100 *
281 zswap_max_pool_percent / 100 >
282 DIV_ROUND_UP(zswap_pool_total_size, PAGE_SIZE);
283}
284
b5ba474f
NP
285static u64 get_zswap_pool_size(struct zswap_pool *pool)
286{
287 u64 pool_size = 0;
288 int i;
289
290 for (i = 0; i < ZSWAP_NR_ZPOOLS; i++)
291 pool_size += zpool_get_total_size(pool->zpools[i]);
292
293 return pool_size;
294}
295
f1c54846
DS
296static void zswap_update_total_size(void)
297{
298 struct zswap_pool *pool;
299 u64 total = 0;
300
301 rcu_read_lock();
302
303 list_for_each_entry_rcu(pool, &zswap_pools, list)
b5ba474f 304 total += get_zswap_pool_size(pool);
f1c54846
DS
305
306 rcu_read_unlock();
307
308 zswap_pool_total_size = total;
309}
310
a65b0e76
DC
311/* should be called under RCU */
312#ifdef CONFIG_MEMCG
313static inline struct mem_cgroup *mem_cgroup_from_entry(struct zswap_entry *entry)
314{
315 return entry->objcg ? obj_cgroup_memcg(entry->objcg) : NULL;
316}
317#else
318static inline struct mem_cgroup *mem_cgroup_from_entry(struct zswap_entry *entry)
319{
320 return NULL;
321}
322#endif
323
324static inline int entry_to_nid(struct zswap_entry *entry)
325{
326 return page_to_nid(virt_to_page(entry));
327}
328
329void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg)
330{
331 struct zswap_pool *pool;
332
333 /* lock out zswap pools list modification */
334 spin_lock(&zswap_pools_lock);
335 list_for_each_entry(pool, &zswap_pools, list) {
336 if (pool->next_shrink == memcg)
337 pool->next_shrink = mem_cgroup_iter(NULL, pool->next_shrink, NULL);
338 }
339 spin_unlock(&zswap_pools_lock);
340}
341
2b281117
SJ
342/*********************************
343* zswap entry functions
344**********************************/
345static struct kmem_cache *zswap_entry_cache;
346
a65b0e76 347static struct zswap_entry *zswap_entry_cache_alloc(gfp_t gfp, int nid)
2b281117
SJ
348{
349 struct zswap_entry *entry;
a65b0e76 350 entry = kmem_cache_alloc_node(zswap_entry_cache, gfp, nid);
2b281117
SJ
351 if (!entry)
352 return NULL;
353 entry->refcount = 1;
0ab0abcf 354 RB_CLEAR_NODE(&entry->rbnode);
2b281117
SJ
355 return entry;
356}
357
358static void zswap_entry_cache_free(struct zswap_entry *entry)
359{
360 kmem_cache_free(zswap_entry_cache, entry);
361}
362
b5ba474f
NP
363/*********************************
364* zswap lruvec functions
365**********************************/
366void zswap_lruvec_state_init(struct lruvec *lruvec)
367{
368 atomic_long_set(&lruvec->zswap_lruvec_state.nr_zswap_protected, 0);
369}
370
371void zswap_page_swapin(struct page *page)
372{
373 struct lruvec *lruvec;
374
375 if (page) {
376 lruvec = folio_lruvec(page_folio(page));
377 atomic_long_inc(&lruvec->zswap_lruvec_state.nr_zswap_protected);
378 }
379}
380
a65b0e76
DC
381/*********************************
382* lru functions
383**********************************/
384static void zswap_lru_add(struct list_lru *list_lru, struct zswap_entry *entry)
385{
b5ba474f
NP
386 atomic_long_t *nr_zswap_protected;
387 unsigned long lru_size, old, new;
a65b0e76
DC
388 int nid = entry_to_nid(entry);
389 struct mem_cgroup *memcg;
b5ba474f 390 struct lruvec *lruvec;
a65b0e76
DC
391
392 /*
393 * Note that it is safe to use rcu_read_lock() here, even in the face of
394 * concurrent memcg offlining. Thanks to the memcg->kmemcg_id indirection
395 * used in list_lru lookup, only two scenarios are possible:
396 *
397 * 1. list_lru_add() is called before memcg->kmemcg_id is updated. The
398 * new entry will be reparented to memcg's parent's list_lru.
399 * 2. list_lru_add() is called after memcg->kmemcg_id is updated. The
400 * new entry will be added directly to memcg's parent's list_lru.
401 *
402 * Similar reasoning holds for list_lru_del() and list_lru_putback().
403 */
404 rcu_read_lock();
405 memcg = mem_cgroup_from_entry(entry);
406 /* will always succeed */
407 list_lru_add(list_lru, &entry->lru, nid, memcg);
b5ba474f
NP
408
409 /* Update the protection area */
410 lru_size = list_lru_count_one(list_lru, nid, memcg);
411 lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
412 nr_zswap_protected = &lruvec->zswap_lruvec_state.nr_zswap_protected;
413 old = atomic_long_inc_return(nr_zswap_protected);
414 /*
415 * Decay to avoid overflow and adapt to changing workloads.
416 * This is based on LRU reclaim cost decaying heuristics.
417 */
418 do {
419 new = old > lru_size / 4 ? old / 2 : old;
420 } while (!atomic_long_try_cmpxchg(nr_zswap_protected, &old, new));
a65b0e76
DC
421 rcu_read_unlock();
422}
423
424static void zswap_lru_del(struct list_lru *list_lru, struct zswap_entry *entry)
425{
426 int nid = entry_to_nid(entry);
427 struct mem_cgroup *memcg;
428
429 rcu_read_lock();
430 memcg = mem_cgroup_from_entry(entry);
431 /* will always succeed */
432 list_lru_del(list_lru, &entry->lru, nid, memcg);
433 rcu_read_unlock();
434}
435
436static void zswap_lru_putback(struct list_lru *list_lru,
437 struct zswap_entry *entry)
438{
439 int nid = entry_to_nid(entry);
440 spinlock_t *lock = &list_lru->node[nid].lock;
441 struct mem_cgroup *memcg;
b5ba474f 442 struct lruvec *lruvec;
a65b0e76
DC
443
444 rcu_read_lock();
445 memcg = mem_cgroup_from_entry(entry);
446 spin_lock(lock);
447 /* we cannot use list_lru_add here, because it increments node's lru count */
448 list_lru_putback(list_lru, &entry->lru, nid, memcg);
449 spin_unlock(lock);
b5ba474f
NP
450
451 lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(entry_to_nid(entry)));
452 /* increment the protection area to account for the LRU rotation. */
453 atomic_long_inc(&lruvec->zswap_lruvec_state.nr_zswap_protected);
a65b0e76
DC
454 rcu_read_unlock();
455}
456
2b281117
SJ
457/*********************************
458* rbtree functions
459**********************************/
460static struct zswap_entry *zswap_rb_search(struct rb_root *root, pgoff_t offset)
461{
462 struct rb_node *node = root->rb_node;
463 struct zswap_entry *entry;
0bb48849 464 pgoff_t entry_offset;
2b281117
SJ
465
466 while (node) {
467 entry = rb_entry(node, struct zswap_entry, rbnode);
0bb48849
DC
468 entry_offset = swp_offset(entry->swpentry);
469 if (entry_offset > offset)
2b281117 470 node = node->rb_left;
0bb48849 471 else if (entry_offset < offset)
2b281117
SJ
472 node = node->rb_right;
473 else
474 return entry;
475 }
476 return NULL;
477}
478
479/*
480 * In the case that a entry with the same offset is found, a pointer to
481 * the existing entry is stored in dupentry and the function returns -EEXIST
482 */
483static int zswap_rb_insert(struct rb_root *root, struct zswap_entry *entry,
484 struct zswap_entry **dupentry)
485{
486 struct rb_node **link = &root->rb_node, *parent = NULL;
487 struct zswap_entry *myentry;
0bb48849 488 pgoff_t myentry_offset, entry_offset = swp_offset(entry->swpentry);
2b281117
SJ
489
490 while (*link) {
491 parent = *link;
492 myentry = rb_entry(parent, struct zswap_entry, rbnode);
0bb48849
DC
493 myentry_offset = swp_offset(myentry->swpentry);
494 if (myentry_offset > entry_offset)
2b281117 495 link = &(*link)->rb_left;
0bb48849 496 else if (myentry_offset < entry_offset)
2b281117
SJ
497 link = &(*link)->rb_right;
498 else {
499 *dupentry = myentry;
500 return -EEXIST;
501 }
502 }
503 rb_link_node(&entry->rbnode, parent, link);
504 rb_insert_color(&entry->rbnode, root);
505 return 0;
506}
507
18a93707 508static bool zswap_rb_erase(struct rb_root *root, struct zswap_entry *entry)
0ab0abcf
WY
509{
510 if (!RB_EMPTY_NODE(&entry->rbnode)) {
511 rb_erase(&entry->rbnode, root);
512 RB_CLEAR_NODE(&entry->rbnode);
18a93707 513 return true;
0ab0abcf 514 }
18a93707 515 return false;
0ab0abcf
WY
516}
517
b8cf32dc
YA
518static struct zpool *zswap_find_zpool(struct zswap_entry *entry)
519{
520 int i = 0;
521
522 if (ZSWAP_NR_ZPOOLS > 1)
523 i = hash_ptr(entry, ilog2(ZSWAP_NR_ZPOOLS));
524
525 return entry->pool->zpools[i];
526}
527
0ab0abcf 528/*
12d79d64 529 * Carries out the common pattern of freeing and entry's zpool allocation,
0ab0abcf
WY
530 * freeing the entry itself, and decrementing the number of stored pages.
531 */
60105e12 532static void zswap_free_entry(struct zswap_entry *entry)
0ab0abcf 533{
f4840ccf
JW
534 if (entry->objcg) {
535 obj_cgroup_uncharge_zswap(entry->objcg, entry->length);
536 obj_cgroup_put(entry->objcg);
537 }
a85f878b
SD
538 if (!entry->length)
539 atomic_dec(&zswap_same_filled_pages);
540 else {
a65b0e76 541 zswap_lru_del(&entry->pool->list_lru, entry);
b8cf32dc 542 zpool_free(zswap_find_zpool(entry), entry->handle);
b5ba474f 543 atomic_dec(&entry->pool->nr_stored);
a85f878b
SD
544 zswap_pool_put(entry->pool);
545 }
0ab0abcf
WY
546 zswap_entry_cache_free(entry);
547 atomic_dec(&zswap_stored_pages);
f1c54846 548 zswap_update_total_size();
0ab0abcf
WY
549}
550
551/* caller must hold the tree lock */
552static void zswap_entry_get(struct zswap_entry *entry)
553{
554 entry->refcount++;
555}
556
557/* caller must hold the tree lock
558* remove from the tree and free it, if nobody reference the entry
559*/
560static void zswap_entry_put(struct zswap_tree *tree,
561 struct zswap_entry *entry)
562{
563 int refcount = --entry->refcount;
564
73108957 565 WARN_ON_ONCE(refcount < 0);
0ab0abcf 566 if (refcount == 0) {
73108957 567 WARN_ON_ONCE(!RB_EMPTY_NODE(&entry->rbnode));
60105e12 568 zswap_free_entry(entry);
0ab0abcf
WY
569 }
570}
571
572/* caller must hold the tree lock */
573static struct zswap_entry *zswap_entry_find_get(struct rb_root *root,
574 pgoff_t offset)
575{
b0c9865f 576 struct zswap_entry *entry;
0ab0abcf
WY
577
578 entry = zswap_rb_search(root, offset);
579 if (entry)
580 zswap_entry_get(entry);
581
582 return entry;
583}
584
b5ba474f
NP
585/*********************************
586* shrinker functions
587**********************************/
588static enum lru_status shrink_memcg_cb(struct list_head *item, struct list_lru_one *l,
589 spinlock_t *lock, void *arg);
590
591static unsigned long zswap_shrinker_scan(struct shrinker *shrinker,
592 struct shrink_control *sc)
593{
594 struct lruvec *lruvec = mem_cgroup_lruvec(sc->memcg, NODE_DATA(sc->nid));
595 unsigned long shrink_ret, nr_protected, lru_size;
596 struct zswap_pool *pool = shrinker->private_data;
597 bool encountered_page_in_swapcache = false;
598
599 if (!zswap_shrinker_enabled) {
600 sc->nr_scanned = 0;
601 return SHRINK_STOP;
602 }
603
604 nr_protected =
605 atomic_long_read(&lruvec->zswap_lruvec_state.nr_zswap_protected);
606 lru_size = list_lru_shrink_count(&pool->list_lru, sc);
607
608 /*
609 * Abort if we are shrinking into the protected region.
610 *
611 * This short-circuiting is necessary because if we have too many multiple
612 * concurrent reclaimers getting the freeable zswap object counts at the
613 * same time (before any of them made reasonable progress), the total
614 * number of reclaimed objects might be more than the number of unprotected
615 * objects (i.e the reclaimers will reclaim into the protected area of the
616 * zswap LRU).
617 */
618 if (nr_protected >= lru_size - sc->nr_to_scan) {
619 sc->nr_scanned = 0;
620 return SHRINK_STOP;
621 }
622
623 shrink_ret = list_lru_shrink_walk(&pool->list_lru, sc, &shrink_memcg_cb,
624 &encountered_page_in_swapcache);
625
626 if (encountered_page_in_swapcache)
627 return SHRINK_STOP;
628
629 return shrink_ret ? shrink_ret : SHRINK_STOP;
630}
631
632static unsigned long zswap_shrinker_count(struct shrinker *shrinker,
633 struct shrink_control *sc)
634{
635 struct zswap_pool *pool = shrinker->private_data;
636 struct mem_cgroup *memcg = sc->memcg;
637 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(sc->nid));
638 unsigned long nr_backing, nr_stored, nr_freeable, nr_protected;
639
640 if (!zswap_shrinker_enabled)
641 return 0;
642
643#ifdef CONFIG_MEMCG_KMEM
7d7ef0a4 644 mem_cgroup_flush_stats(memcg);
b5ba474f
NP
645 nr_backing = memcg_page_state(memcg, MEMCG_ZSWAP_B) >> PAGE_SHIFT;
646 nr_stored = memcg_page_state(memcg, MEMCG_ZSWAPPED);
647#else
648 /* use pool stats instead of memcg stats */
649 nr_backing = get_zswap_pool_size(pool) >> PAGE_SHIFT;
650 nr_stored = atomic_read(&pool->nr_stored);
651#endif
652
653 if (!nr_stored)
654 return 0;
655
656 nr_protected =
657 atomic_long_read(&lruvec->zswap_lruvec_state.nr_zswap_protected);
658 nr_freeable = list_lru_shrink_count(&pool->list_lru, sc);
659 /*
660 * Subtract the lru size by an estimate of the number of pages
661 * that should be protected.
662 */
663 nr_freeable = nr_freeable > nr_protected ? nr_freeable - nr_protected : 0;
664
665 /*
666 * Scale the number of freeable pages by the memory saving factor.
667 * This ensures that the better zswap compresses memory, the fewer
668 * pages we will evict to swap (as it will otherwise incur IO for
669 * relatively small memory saving).
670 */
671 return mult_frac(nr_freeable, nr_backing, nr_stored);
672}
673
674static void zswap_alloc_shrinker(struct zswap_pool *pool)
675{
676 pool->shrinker =
677 shrinker_alloc(SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE, "mm-zswap");
678 if (!pool->shrinker)
679 return;
680
681 pool->shrinker->private_data = pool;
682 pool->shrinker->scan_objects = zswap_shrinker_scan;
683 pool->shrinker->count_objects = zswap_shrinker_count;
684 pool->shrinker->batch = 0;
685 pool->shrinker->seeks = DEFAULT_SEEKS;
686}
687
2b281117
SJ
688/*********************************
689* per-cpu code
690**********************************/
691static DEFINE_PER_CPU(u8 *, zswap_dstmem);
1ec3b5fe
BS
692/*
693 * If users dynamically change the zpool type and compressor at runtime, i.e.
694 * zswap is running, zswap can have more than one zpool on one cpu, but they
695 * are sharing dtsmem. So we need this mutex to be per-cpu.
696 */
697static DEFINE_PER_CPU(struct mutex *, zswap_mutex);
2b281117 698
ad7ed770 699static int zswap_dstmem_prepare(unsigned int cpu)
2b281117 700{
1ec3b5fe 701 struct mutex *mutex;
2b281117
SJ
702 u8 *dst;
703
ad7ed770 704 dst = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu));
2b2695f5 705 if (!dst)
ad7ed770 706 return -ENOMEM;
2b2695f5 707
1ec3b5fe
BS
708 mutex = kmalloc_node(sizeof(*mutex), GFP_KERNEL, cpu_to_node(cpu));
709 if (!mutex) {
710 kfree(dst);
711 return -ENOMEM;
712 }
713
714 mutex_init(mutex);
ad7ed770 715 per_cpu(zswap_dstmem, cpu) = dst;
1ec3b5fe 716 per_cpu(zswap_mutex, cpu) = mutex;
ad7ed770 717 return 0;
2b281117
SJ
718}
719
ad7ed770 720static int zswap_dstmem_dead(unsigned int cpu)
2b281117 721{
1ec3b5fe 722 struct mutex *mutex;
ad7ed770 723 u8 *dst;
2b281117 724
1ec3b5fe
BS
725 mutex = per_cpu(zswap_mutex, cpu);
726 kfree(mutex);
727 per_cpu(zswap_mutex, cpu) = NULL;
728
ad7ed770
SAS
729 dst = per_cpu(zswap_dstmem, cpu);
730 kfree(dst);
731 per_cpu(zswap_dstmem, cpu) = NULL;
f1c54846 732
f1c54846 733 return 0;
f1c54846
DS
734}
735
cab7a7e5 736static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node)
f1c54846 737{
cab7a7e5 738 struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
1ec3b5fe
BS
739 struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
740 struct crypto_acomp *acomp;
741 struct acomp_req *req;
742
743 acomp = crypto_alloc_acomp_node(pool->tfm_name, 0, 0, cpu_to_node(cpu));
744 if (IS_ERR(acomp)) {
745 pr_err("could not alloc crypto acomp %s : %ld\n",
746 pool->tfm_name, PTR_ERR(acomp));
747 return PTR_ERR(acomp);
748 }
749 acomp_ctx->acomp = acomp;
f1c54846 750
1ec3b5fe
BS
751 req = acomp_request_alloc(acomp_ctx->acomp);
752 if (!req) {
753 pr_err("could not alloc crypto acomp_request %s\n",
754 pool->tfm_name);
755 crypto_free_acomp(acomp_ctx->acomp);
cab7a7e5
SAS
756 return -ENOMEM;
757 }
1ec3b5fe
BS
758 acomp_ctx->req = req;
759
760 crypto_init_wait(&acomp_ctx->wait);
761 /*
762 * if the backend of acomp is async zip, crypto_req_done() will wakeup
763 * crypto_wait_req(); if the backend of acomp is scomp, the callback
764 * won't be called, crypto_wait_req() will return without blocking.
765 */
766 acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
767 crypto_req_done, &acomp_ctx->wait);
768
769 acomp_ctx->mutex = per_cpu(zswap_mutex, cpu);
770 acomp_ctx->dstmem = per_cpu(zswap_dstmem, cpu);
771
2b281117 772 return 0;
2b281117
SJ
773}
774
cab7a7e5 775static int zswap_cpu_comp_dead(unsigned int cpu, struct hlist_node *node)
f1c54846 776{
cab7a7e5 777 struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
1ec3b5fe
BS
778 struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
779
780 if (!IS_ERR_OR_NULL(acomp_ctx)) {
781 if (!IS_ERR_OR_NULL(acomp_ctx->req))
782 acomp_request_free(acomp_ctx->req);
783 if (!IS_ERR_OR_NULL(acomp_ctx->acomp))
784 crypto_free_acomp(acomp_ctx->acomp);
785 }
f1c54846 786
cab7a7e5 787 return 0;
f1c54846
DS
788}
789
2b281117 790/*********************************
f1c54846 791* pool functions
2b281117 792**********************************/
f1c54846
DS
793
794static struct zswap_pool *__zswap_pool_current(void)
2b281117 795{
f1c54846
DS
796 struct zswap_pool *pool;
797
798 pool = list_first_or_null_rcu(&zswap_pools, typeof(*pool), list);
ae3d89a7
DS
799 WARN_ONCE(!pool && zswap_has_pool,
800 "%s: no page storage pool!\n", __func__);
f1c54846
DS
801
802 return pool;
803}
804
805static struct zswap_pool *zswap_pool_current(void)
806{
807 assert_spin_locked(&zswap_pools_lock);
808
809 return __zswap_pool_current();
810}
811
812static struct zswap_pool *zswap_pool_current_get(void)
813{
814 struct zswap_pool *pool;
815
816 rcu_read_lock();
817
818 pool = __zswap_pool_current();
ae3d89a7 819 if (!zswap_pool_get(pool))
f1c54846
DS
820 pool = NULL;
821
822 rcu_read_unlock();
823
824 return pool;
825}
826
827static struct zswap_pool *zswap_pool_last_get(void)
828{
829 struct zswap_pool *pool, *last = NULL;
830
831 rcu_read_lock();
832
833 list_for_each_entry_rcu(pool, &zswap_pools, list)
834 last = pool;
ae3d89a7
DS
835 WARN_ONCE(!last && zswap_has_pool,
836 "%s: no page storage pool!\n", __func__);
837 if (!zswap_pool_get(last))
f1c54846
DS
838 last = NULL;
839
840 rcu_read_unlock();
841
842 return last;
843}
844
8bc8b228 845/* type and compressor must be null-terminated */
f1c54846
DS
846static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor)
847{
848 struct zswap_pool *pool;
849
850 assert_spin_locked(&zswap_pools_lock);
851
852 list_for_each_entry_rcu(pool, &zswap_pools, list) {
8bc8b228 853 if (strcmp(pool->tfm_name, compressor))
f1c54846 854 continue;
b8cf32dc
YA
855 /* all zpools share the same type */
856 if (strcmp(zpool_get_type(pool->zpools[0]), type))
f1c54846
DS
857 continue;
858 /* if we can't get it, it's about to be destroyed */
859 if (!zswap_pool_get(pool))
860 continue;
861 return pool;
862 }
863
864 return NULL;
865}
866
18a93707
YA
867/*
868 * If the entry is still valid in the tree, drop the initial ref and remove it
869 * from the tree. This function must be called with an additional ref held,
870 * otherwise it may race with another invalidation freeing the entry.
871 */
418fd29d
DC
872static void zswap_invalidate_entry(struct zswap_tree *tree,
873 struct zswap_entry *entry)
874{
18a93707
YA
875 if (zswap_rb_erase(&tree->rbroot, entry))
876 zswap_entry_put(tree, entry);
418fd29d
DC
877}
878
a65b0e76
DC
879static enum lru_status shrink_memcg_cb(struct list_head *item, struct list_lru_one *l,
880 spinlock_t *lock, void *arg)
f999f38b 881{
a65b0e76 882 struct zswap_entry *entry = container_of(item, struct zswap_entry, lru);
b5ba474f 883 bool *encountered_page_in_swapcache = (bool *)arg;
f999f38b
DC
884 struct zswap_tree *tree;
885 pgoff_t swpoffset;
a65b0e76
DC
886 enum lru_status ret = LRU_REMOVED_RETRY;
887 int writeback_result;
f999f38b 888
f999f38b
DC
889 /*
890 * Once the lru lock is dropped, the entry might get freed. The
891 * swpoffset is copied to the stack, and entry isn't deref'd again
892 * until the entry is verified to still be alive in the tree.
893 */
0bb48849
DC
894 swpoffset = swp_offset(entry->swpentry);
895 tree = zswap_trees[swp_type(entry->swpentry)];
a65b0e76
DC
896 list_lru_isolate(l, item);
897 /*
898 * It's safe to drop the lock here because we return either
899 * LRU_REMOVED_RETRY or LRU_RETRY.
900 */
901 spin_unlock(lock);
f999f38b
DC
902
903 /* Check for invalidate() race */
904 spin_lock(&tree->lock);
a65b0e76 905 if (entry != zswap_rb_search(&tree->rbroot, swpoffset))
f999f38b 906 goto unlock;
a65b0e76 907
f999f38b
DC
908 /* Hold a reference to prevent a free during writeback */
909 zswap_entry_get(entry);
910 spin_unlock(&tree->lock);
911
a65b0e76 912 writeback_result = zswap_writeback_entry(entry, tree);
f999f38b
DC
913
914 spin_lock(&tree->lock);
a65b0e76
DC
915 if (writeback_result) {
916 zswap_reject_reclaim_fail++;
917 zswap_lru_putback(&entry->pool->list_lru, entry);
918 ret = LRU_RETRY;
b5ba474f
NP
919
920 /*
921 * Encountering a page already in swap cache is a sign that we are shrinking
922 * into the warmer region. We should terminate shrinking (if we're in the dynamic
923 * shrinker context).
924 */
925 if (writeback_result == -EEXIST && encountered_page_in_swapcache) {
926 ret = LRU_SKIP;
927 *encountered_page_in_swapcache = true;
928 }
929
ff9d5ba2 930 goto put_unlock;
f999f38b 931 }
a65b0e76 932 zswap_written_back_pages++;
f999f38b 933
7108cc3f
DC
934 if (entry->objcg)
935 count_objcg_event(entry->objcg, ZSWPWB);
936
937 count_vm_event(ZSWPWB);
418fd29d
DC
938 /*
939 * Writeback started successfully, the page now belongs to the
940 * swapcache. Drop the entry from zswap - unless invalidate already
941 * took it out while we had the tree->lock released for IO.
942 */
18a93707 943 zswap_invalidate_entry(tree, entry);
ff9d5ba2
DC
944
945put_unlock:
f999f38b
DC
946 /* Drop local reference */
947 zswap_entry_put(tree, entry);
948unlock:
949 spin_unlock(&tree->lock);
a65b0e76
DC
950 spin_lock(lock);
951 return ret;
952}
953
954static int shrink_memcg(struct mem_cgroup *memcg)
955{
956 struct zswap_pool *pool;
957 int nid, shrunk = 0;
958
959 /*
960 * Skip zombies because their LRUs are reparented and we would be
961 * reclaiming from the parent instead of the dead memcg.
962 */
963 if (memcg && !mem_cgroup_online(memcg))
964 return -ENOENT;
965
966 pool = zswap_pool_current_get();
967 if (!pool)
968 return -EINVAL;
969
970 for_each_node_state(nid, N_NORMAL_MEMORY) {
971 unsigned long nr_to_walk = 1;
972
973 shrunk += list_lru_walk_one(&pool->list_lru, nid, memcg,
974 &shrink_memcg_cb, NULL, &nr_to_walk);
975 }
976 zswap_pool_put(pool);
977 return shrunk ? 0 : -EAGAIN;
f999f38b
DC
978}
979
45190f01
VW
980static void shrink_worker(struct work_struct *w)
981{
982 struct zswap_pool *pool = container_of(w, typeof(*pool),
983 shrink_work);
a65b0e76 984 struct mem_cgroup *memcg;
e0228d59
DC
985 int ret, failures = 0;
986
a65b0e76 987 /* global reclaim will select cgroup in a round-robin fashion. */
e0228d59 988 do {
a65b0e76
DC
989 spin_lock(&zswap_pools_lock);
990 pool->next_shrink = mem_cgroup_iter(NULL, pool->next_shrink, NULL);
991 memcg = pool->next_shrink;
992
993 /*
994 * We need to retry if we have gone through a full round trip, or if we
995 * got an offline memcg (or else we risk undoing the effect of the
996 * zswap memcg offlining cleanup callback). This is not catastrophic
997 * per se, but it will keep the now offlined memcg hostage for a while.
998 *
999 * Note that if we got an online memcg, we will keep the extra
1000 * reference in case the original reference obtained by mem_cgroup_iter
1001 * is dropped by the zswap memcg offlining callback, ensuring that the
1002 * memcg is not killed when we are reclaiming.
1003 */
1004 if (!memcg) {
1005 spin_unlock(&zswap_pools_lock);
1006 if (++failures == MAX_RECLAIM_RETRIES)
e0228d59 1007 break;
a65b0e76
DC
1008
1009 goto resched;
1010 }
1011
1012 if (!mem_cgroup_tryget_online(memcg)) {
1013 /* drop the reference from mem_cgroup_iter() */
1014 mem_cgroup_iter_break(NULL, memcg);
1015 pool->next_shrink = NULL;
1016 spin_unlock(&zswap_pools_lock);
1017
e0228d59
DC
1018 if (++failures == MAX_RECLAIM_RETRIES)
1019 break;
a65b0e76
DC
1020
1021 goto resched;
e0228d59 1022 }
a65b0e76
DC
1023 spin_unlock(&zswap_pools_lock);
1024
1025 ret = shrink_memcg(memcg);
1026 /* drop the extra reference */
1027 mem_cgroup_put(memcg);
1028
1029 if (ret == -EINVAL)
1030 break;
1031 if (ret && ++failures == MAX_RECLAIM_RETRIES)
1032 break;
1033
1034resched:
e0228d59
DC
1035 cond_resched();
1036 } while (!zswap_can_accept());
45190f01
VW
1037 zswap_pool_put(pool);
1038}
1039
f1c54846
DS
1040static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
1041{
b8cf32dc 1042 int i;
f1c54846 1043 struct zswap_pool *pool;
32a4e169 1044 char name[38]; /* 'zswap' + 32 char (max) num + \0 */
d0164adc 1045 gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
cab7a7e5 1046 int ret;
f1c54846 1047
bae21db8
DS
1048 if (!zswap_has_pool) {
1049 /* if either are unset, pool initialization failed, and we
1050 * need both params to be set correctly before trying to
1051 * create a pool.
1052 */
1053 if (!strcmp(type, ZSWAP_PARAM_UNSET))
1054 return NULL;
1055 if (!strcmp(compressor, ZSWAP_PARAM_UNSET))
1056 return NULL;
1057 }
1058
f1c54846 1059 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
f4ae0ce0 1060 if (!pool)
f1c54846 1061 return NULL;
f1c54846 1062
b8cf32dc
YA
1063 for (i = 0; i < ZSWAP_NR_ZPOOLS; i++) {
1064 /* unique name for each pool specifically required by zsmalloc */
1065 snprintf(name, 38, "zswap%x",
1066 atomic_inc_return(&zswap_pools_count));
32a4e169 1067
b8cf32dc
YA
1068 pool->zpools[i] = zpool_create_pool(type, name, gfp);
1069 if (!pool->zpools[i]) {
1070 pr_err("%s zpool not available\n", type);
1071 goto error;
1072 }
f1c54846 1073 }
b8cf32dc 1074 pr_debug("using %s zpool\n", zpool_get_type(pool->zpools[0]));
f1c54846 1075
79cd4202 1076 strscpy(pool->tfm_name, compressor, sizeof(pool->tfm_name));
1ec3b5fe
BS
1077
1078 pool->acomp_ctx = alloc_percpu(*pool->acomp_ctx);
1079 if (!pool->acomp_ctx) {
f1c54846
DS
1080 pr_err("percpu alloc failed\n");
1081 goto error;
1082 }
1083
cab7a7e5
SAS
1084 ret = cpuhp_state_add_instance(CPUHP_MM_ZSWP_POOL_PREPARE,
1085 &pool->node);
1086 if (ret)
f1c54846 1087 goto error;
b5ba474f
NP
1088
1089 zswap_alloc_shrinker(pool);
1090 if (!pool->shrinker)
1091 goto error;
1092
f1c54846
DS
1093 pr_debug("using %s compressor\n", pool->tfm_name);
1094
1095 /* being the current pool takes 1 ref; this func expects the
1096 * caller to always add the new pool as the current pool
1097 */
1098 kref_init(&pool->kref);
1099 INIT_LIST_HEAD(&pool->list);
b5ba474f
NP
1100 if (list_lru_init_memcg(&pool->list_lru, pool->shrinker))
1101 goto lru_fail;
1102 shrinker_register(pool->shrinker);
45190f01 1103 INIT_WORK(&pool->shrink_work, shrink_worker);
b5ba474f 1104 atomic_set(&pool->nr_stored, 0);
f1c54846
DS
1105
1106 zswap_pool_debug("created", pool);
1107
1108 return pool;
1109
b5ba474f
NP
1110lru_fail:
1111 list_lru_destroy(&pool->list_lru);
1112 shrinker_free(pool->shrinker);
f1c54846 1113error:
1ec3b5fe
BS
1114 if (pool->acomp_ctx)
1115 free_percpu(pool->acomp_ctx);
b8cf32dc
YA
1116 while (i--)
1117 zpool_destroy_pool(pool->zpools[i]);
f1c54846
DS
1118 kfree(pool);
1119 return NULL;
1120}
1121
141fdeec 1122static struct zswap_pool *__zswap_pool_create_fallback(void)
f1c54846 1123{
bae21db8
DS
1124 bool has_comp, has_zpool;
1125
1ec3b5fe 1126 has_comp = crypto_has_acomp(zswap_compressor, 0, 0);
bb8b93b5
MS
1127 if (!has_comp && strcmp(zswap_compressor,
1128 CONFIG_ZSWAP_COMPRESSOR_DEFAULT)) {
f1c54846 1129 pr_err("compressor %s not available, using default %s\n",
bb8b93b5 1130 zswap_compressor, CONFIG_ZSWAP_COMPRESSOR_DEFAULT);
c99b42c3 1131 param_free_charp(&zswap_compressor);
bb8b93b5 1132 zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT;
1ec3b5fe 1133 has_comp = crypto_has_acomp(zswap_compressor, 0, 0);
f1c54846 1134 }
bae21db8
DS
1135 if (!has_comp) {
1136 pr_err("default compressor %s not available\n",
1137 zswap_compressor);
1138 param_free_charp(&zswap_compressor);
1139 zswap_compressor = ZSWAP_PARAM_UNSET;
1140 }
1141
1142 has_zpool = zpool_has_pool(zswap_zpool_type);
bb8b93b5
MS
1143 if (!has_zpool && strcmp(zswap_zpool_type,
1144 CONFIG_ZSWAP_ZPOOL_DEFAULT)) {
f1c54846 1145 pr_err("zpool %s not available, using default %s\n",
bb8b93b5 1146 zswap_zpool_type, CONFIG_ZSWAP_ZPOOL_DEFAULT);
c99b42c3 1147 param_free_charp(&zswap_zpool_type);
bb8b93b5 1148 zswap_zpool_type = CONFIG_ZSWAP_ZPOOL_DEFAULT;
bae21db8 1149 has_zpool = zpool_has_pool(zswap_zpool_type);
f1c54846 1150 }
bae21db8
DS
1151 if (!has_zpool) {
1152 pr_err("default zpool %s not available\n",
1153 zswap_zpool_type);
1154 param_free_charp(&zswap_zpool_type);
1155 zswap_zpool_type = ZSWAP_PARAM_UNSET;
1156 }
1157
1158 if (!has_comp || !has_zpool)
1159 return NULL;
f1c54846
DS
1160
1161 return zswap_pool_create(zswap_zpool_type, zswap_compressor);
1162}
1163
1164static void zswap_pool_destroy(struct zswap_pool *pool)
1165{
b8cf32dc
YA
1166 int i;
1167
f1c54846
DS
1168 zswap_pool_debug("destroying", pool);
1169
b5ba474f 1170 shrinker_free(pool->shrinker);
cab7a7e5 1171 cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node);
1ec3b5fe 1172 free_percpu(pool->acomp_ctx);
a65b0e76
DC
1173 list_lru_destroy(&pool->list_lru);
1174
1175 spin_lock(&zswap_pools_lock);
1176 mem_cgroup_iter_break(NULL, pool->next_shrink);
1177 pool->next_shrink = NULL;
1178 spin_unlock(&zswap_pools_lock);
1179
b8cf32dc
YA
1180 for (i = 0; i < ZSWAP_NR_ZPOOLS; i++)
1181 zpool_destroy_pool(pool->zpools[i]);
f1c54846
DS
1182 kfree(pool);
1183}
1184
1185static int __must_check zswap_pool_get(struct zswap_pool *pool)
1186{
ae3d89a7
DS
1187 if (!pool)
1188 return 0;
1189
f1c54846
DS
1190 return kref_get_unless_zero(&pool->kref);
1191}
1192
200867af 1193static void __zswap_pool_release(struct work_struct *work)
f1c54846 1194{
45190f01
VW
1195 struct zswap_pool *pool = container_of(work, typeof(*pool),
1196 release_work);
200867af
DS
1197
1198 synchronize_rcu();
f1c54846
DS
1199
1200 /* nobody should have been able to get a kref... */
1201 WARN_ON(kref_get_unless_zero(&pool->kref));
1202
1203 /* pool is now off zswap_pools list and has no references. */
1204 zswap_pool_destroy(pool);
1205}
1206
1207static void __zswap_pool_empty(struct kref *kref)
1208{
1209 struct zswap_pool *pool;
1210
1211 pool = container_of(kref, typeof(*pool), kref);
1212
1213 spin_lock(&zswap_pools_lock);
1214
1215 WARN_ON(pool == zswap_pool_current());
1216
1217 list_del_rcu(&pool->list);
200867af 1218
45190f01
VW
1219 INIT_WORK(&pool->release_work, __zswap_pool_release);
1220 schedule_work(&pool->release_work);
f1c54846
DS
1221
1222 spin_unlock(&zswap_pools_lock);
1223}
1224
1225static void zswap_pool_put(struct zswap_pool *pool)
1226{
1227 kref_put(&pool->kref, __zswap_pool_empty);
2b281117
SJ
1228}
1229
90b0fc26
DS
1230/*********************************
1231* param callbacks
1232**********************************/
1233
141fdeec
LS
1234static bool zswap_pool_changed(const char *s, const struct kernel_param *kp)
1235{
1236 /* no change required */
1237 if (!strcmp(s, *(char **)kp->arg) && zswap_has_pool)
1238 return false;
1239 return true;
1240}
1241
c99b42c3 1242/* val must be a null-terminated string */
90b0fc26
DS
1243static int __zswap_param_set(const char *val, const struct kernel_param *kp,
1244 char *type, char *compressor)
1245{
1246 struct zswap_pool *pool, *put_pool = NULL;
c99b42c3 1247 char *s = strstrip((char *)val);
141fdeec
LS
1248 int ret = 0;
1249 bool new_pool = false;
90b0fc26 1250
141fdeec 1251 mutex_lock(&zswap_init_lock);
9021ccec
LS
1252 switch (zswap_init_state) {
1253 case ZSWAP_UNINIT:
1254 /* if this is load-time (pre-init) param setting,
1255 * don't create a pool; that's done during init.
1256 */
141fdeec
LS
1257 ret = param_set_charp(s, kp);
1258 break;
9021ccec 1259 case ZSWAP_INIT_SUCCEED:
141fdeec 1260 new_pool = zswap_pool_changed(s, kp);
9021ccec
LS
1261 break;
1262 case ZSWAP_INIT_FAILED:
d7b028f5 1263 pr_err("can't set param, initialization failed\n");
141fdeec 1264 ret = -ENODEV;
d7b028f5 1265 }
141fdeec 1266 mutex_unlock(&zswap_init_lock);
d7b028f5 1267
141fdeec
LS
1268 /* no need to create a new pool, return directly */
1269 if (!new_pool)
1270 return ret;
90b0fc26
DS
1271
1272 if (!type) {
c99b42c3
DS
1273 if (!zpool_has_pool(s)) {
1274 pr_err("zpool %s not available\n", s);
90b0fc26
DS
1275 return -ENOENT;
1276 }
c99b42c3 1277 type = s;
90b0fc26 1278 } else if (!compressor) {
1ec3b5fe 1279 if (!crypto_has_acomp(s, 0, 0)) {
c99b42c3 1280 pr_err("compressor %s not available\n", s);
90b0fc26
DS
1281 return -ENOENT;
1282 }
c99b42c3
DS
1283 compressor = s;
1284 } else {
1285 WARN_ON(1);
1286 return -EINVAL;
90b0fc26
DS
1287 }
1288
1289 spin_lock(&zswap_pools_lock);
1290
1291 pool = zswap_pool_find_get(type, compressor);
1292 if (pool) {
1293 zswap_pool_debug("using existing", pool);
fd5bb66c 1294 WARN_ON(pool == zswap_pool_current());
90b0fc26 1295 list_del_rcu(&pool->list);
90b0fc26
DS
1296 }
1297
fd5bb66c
DS
1298 spin_unlock(&zswap_pools_lock);
1299
1300 if (!pool)
1301 pool = zswap_pool_create(type, compressor);
1302
90b0fc26 1303 if (pool)
c99b42c3 1304 ret = param_set_charp(s, kp);
90b0fc26
DS
1305 else
1306 ret = -EINVAL;
1307
fd5bb66c
DS
1308 spin_lock(&zswap_pools_lock);
1309
90b0fc26
DS
1310 if (!ret) {
1311 put_pool = zswap_pool_current();
1312 list_add_rcu(&pool->list, &zswap_pools);
ae3d89a7 1313 zswap_has_pool = true;
90b0fc26
DS
1314 } else if (pool) {
1315 /* add the possibly pre-existing pool to the end of the pools
1316 * list; if it's new (and empty) then it'll be removed and
1317 * destroyed by the put after we drop the lock
1318 */
1319 list_add_tail_rcu(&pool->list, &zswap_pools);
1320 put_pool = pool;
fd5bb66c
DS
1321 }
1322
1323 spin_unlock(&zswap_pools_lock);
1324
1325 if (!zswap_has_pool && !pool) {
ae3d89a7
DS
1326 /* if initial pool creation failed, and this pool creation also
1327 * failed, maybe both compressor and zpool params were bad.
1328 * Allow changing this param, so pool creation will succeed
1329 * when the other param is changed. We already verified this
1ec3b5fe 1330 * param is ok in the zpool_has_pool() or crypto_has_acomp()
ae3d89a7
DS
1331 * checks above.
1332 */
1333 ret = param_set_charp(s, kp);
90b0fc26
DS
1334 }
1335
90b0fc26
DS
1336 /* drop the ref from either the old current pool,
1337 * or the new pool we failed to add
1338 */
1339 if (put_pool)
1340 zswap_pool_put(put_pool);
1341
1342 return ret;
1343}
1344
1345static int zswap_compressor_param_set(const char *val,
1346 const struct kernel_param *kp)
1347{
1348 return __zswap_param_set(val, kp, zswap_zpool_type, NULL);
1349}
1350
1351static int zswap_zpool_param_set(const char *val,
1352 const struct kernel_param *kp)
1353{
1354 return __zswap_param_set(val, kp, NULL, zswap_compressor);
1355}
1356
d7b028f5
DS
1357static int zswap_enabled_param_set(const char *val,
1358 const struct kernel_param *kp)
1359{
141fdeec
LS
1360 int ret = -ENODEV;
1361
1362 /* if this is load-time (pre-init) param setting, only set param. */
1363 if (system_state != SYSTEM_RUNNING)
1364 return param_set_bool(val, kp);
1365
1366 mutex_lock(&zswap_init_lock);
9021ccec
LS
1367 switch (zswap_init_state) {
1368 case ZSWAP_UNINIT:
141fdeec
LS
1369 if (zswap_setup())
1370 break;
1371 fallthrough;
9021ccec 1372 case ZSWAP_INIT_SUCCEED:
141fdeec 1373 if (!zswap_has_pool)
9021ccec 1374 pr_err("can't enable, no pool configured\n");
141fdeec
LS
1375 else
1376 ret = param_set_bool(val, kp);
1377 break;
9021ccec 1378 case ZSWAP_INIT_FAILED:
d7b028f5 1379 pr_err("can't enable, initialization failed\n");
ae3d89a7 1380 }
141fdeec 1381 mutex_unlock(&zswap_init_lock);
d7b028f5 1382
141fdeec 1383 return ret;
d7b028f5
DS
1384}
1385
2b281117
SJ
1386/*********************************
1387* writeback code
1388**********************************/
2b281117
SJ
1389/*
1390 * Attempts to free an entry by adding a page to the swap cache,
1391 * decompressing the entry data into the page, and issuing a
1392 * bio write to write the page back to the swap device.
1393 *
1394 * This can be thought of as a "resumed writeback" of the page
1395 * to the swap device. We are basically resuming the same swap
42c06a0e 1396 * writeback path that was intercepted with the zswap_store()
2b281117
SJ
1397 * in the first place. After the page has been decompressed into
1398 * the swap cache, the compressed version stored by zswap can be
1399 * freed.
1400 */
0bb48849 1401static int zswap_writeback_entry(struct zswap_entry *entry,
ff9d5ba2 1402 struct zswap_tree *tree)
2b281117 1403{
0bb48849 1404 swp_entry_t swpentry = entry->swpentry;
2b281117 1405 struct page *page;
ddc1a5cb 1406 struct mempolicy *mpol;
1ec3b5fe
BS
1407 struct scatterlist input, output;
1408 struct crypto_acomp_ctx *acomp_ctx;
b8cf32dc 1409 struct zpool *pool = zswap_find_zpool(entry);
98804a94 1410 bool page_was_allocated;
fc6697a8 1411 u8 *src, *tmp = NULL;
2b281117 1412 unsigned int dlen;
0ab0abcf 1413 int ret;
2b281117
SJ
1414 struct writeback_control wbc = {
1415 .sync_mode = WB_SYNC_NONE,
1416 };
1417
fc6697a8 1418 if (!zpool_can_sleep_mapped(pool)) {
8d9b6370 1419 tmp = kmalloc(PAGE_SIZE, GFP_KERNEL);
fc6697a8
TT
1420 if (!tmp)
1421 return -ENOMEM;
1422 }
1423
2b281117 1424 /* try to allocate swap cache page */
ddc1a5cb
HD
1425 mpol = get_task_policy(current);
1426 page = __read_swap_cache_async(swpentry, GFP_KERNEL, mpol,
a65b0e76 1427 NO_INTERLEAVE_INDEX, &page_was_allocated, true);
98804a94 1428 if (!page) {
2b281117
SJ
1429 ret = -ENOMEM;
1430 goto fail;
98804a94 1431 }
2b281117 1432
98804a94
JW
1433 /* Found an existing page, we raced with load/swapin */
1434 if (!page_was_allocated) {
09cbfeaf 1435 put_page(page);
2b281117
SJ
1436 ret = -EEXIST;
1437 goto fail;
98804a94 1438 }
2b281117 1439
98804a94
JW
1440 /*
1441 * Page is locked, and the swapcache is now secured against
1442 * concurrent swapping to and from the slot. Verify that the
1443 * swap entry hasn't been invalidated and recycled behind our
1444 * backs (our zswap_entry reference doesn't prevent that), to
1445 * avoid overwriting a new swap page with old compressed data.
1446 */
1447 spin_lock(&tree->lock);
1448 if (zswap_rb_search(&tree->rbroot, swp_offset(entry->swpentry)) != entry) {
04fc7816 1449 spin_unlock(&tree->lock);
98804a94
JW
1450 delete_from_swap_cache(page_folio(page));
1451 ret = -ENOMEM;
1452 goto fail;
1453 }
1454 spin_unlock(&tree->lock);
04fc7816 1455
98804a94
JW
1456 /* decompress */
1457 acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
1458 dlen = PAGE_SIZE;
fc6697a8 1459
98804a94
JW
1460 src = zpool_map_handle(pool, entry->handle, ZPOOL_MM_RO);
1461 if (!zpool_can_sleep_mapped(pool)) {
1462 memcpy(tmp, src, entry->length);
1463 src = tmp;
1464 zpool_unmap_handle(pool, entry->handle);
1465 }
6b3379e8 1466
98804a94
JW
1467 mutex_lock(acomp_ctx->mutex);
1468 sg_init_one(&input, src, entry->length);
1469 sg_init_table(&output, 1);
1470 sg_set_page(&output, page, PAGE_SIZE, 0);
1471 acomp_request_set_params(acomp_ctx->req, &input, &output, entry->length, dlen);
1472 ret = crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait);
1473 dlen = acomp_ctx->req->dlen;
1474 mutex_unlock(acomp_ctx->mutex);
6b3379e8 1475
98804a94
JW
1476 if (!zpool_can_sleep_mapped(pool))
1477 kfree(tmp);
1478 else
1479 zpool_unmap_handle(pool, entry->handle);
2b281117 1480
98804a94
JW
1481 BUG_ON(ret);
1482 BUG_ON(dlen != PAGE_SIZE);
1483
1484 /* page is up to date */
1485 SetPageUptodate(page);
2b281117 1486
b349acc7
WY
1487 /* move it to the tail of the inactive list after end_writeback */
1488 SetPageReclaim(page);
1489
2b281117 1490 /* start writeback */
cf1e3fe4 1491 __swap_writepage(page, &wbc);
09cbfeaf 1492 put_page(page);
2b281117 1493
6b3379e8 1494 return ret;
98804a94 1495
6b3379e8
JW
1496fail:
1497 if (!zpool_can_sleep_mapped(pool))
1498 kfree(tmp);
0ab0abcf
WY
1499
1500 /*
98804a94
JW
1501 * If we get here because the page is already in swapcache, a
1502 * load may be happening concurrently. It is safe and okay to
1503 * not free the entry. It is also okay to return !0.
1504 */
2b281117
SJ
1505 return ret;
1506}
1507
a85f878b
SD
1508static int zswap_is_page_same_filled(void *ptr, unsigned long *value)
1509{
a85f878b 1510 unsigned long *page;
62bf1258
TS
1511 unsigned long val;
1512 unsigned int pos, last_pos = PAGE_SIZE / sizeof(*page) - 1;
a85f878b
SD
1513
1514 page = (unsigned long *)ptr;
62bf1258
TS
1515 val = page[0];
1516
1517 if (val != page[last_pos])
1518 return 0;
1519
1520 for (pos = 1; pos < last_pos; pos++) {
1521 if (val != page[pos])
a85f878b
SD
1522 return 0;
1523 }
62bf1258
TS
1524
1525 *value = val;
1526
a85f878b
SD
1527 return 1;
1528}
1529
1530static void zswap_fill_page(void *ptr, unsigned long value)
1531{
1532 unsigned long *page;
1533
1534 page = (unsigned long *)ptr;
1535 memset_l(page, value, PAGE_SIZE / sizeof(unsigned long));
1536}
1537
34f4c198 1538bool zswap_store(struct folio *folio)
2b281117 1539{
3d2c9087 1540 swp_entry_t swp = folio->swap;
42c06a0e
JW
1541 int type = swp_type(swp);
1542 pgoff_t offset = swp_offset(swp);
34f4c198 1543 struct page *page = &folio->page;
2b281117
SJ
1544 struct zswap_tree *tree = zswap_trees[type];
1545 struct zswap_entry *entry, *dupentry;
1ec3b5fe
BS
1546 struct scatterlist input, output;
1547 struct crypto_acomp_ctx *acomp_ctx;
f4840ccf 1548 struct obj_cgroup *objcg = NULL;
a65b0e76 1549 struct mem_cgroup *memcg = NULL;
f4840ccf 1550 struct zswap_pool *pool;
b8cf32dc 1551 struct zpool *zpool;
0bb48849 1552 unsigned int dlen = PAGE_SIZE;
a85f878b 1553 unsigned long handle, value;
2b281117
SJ
1554 char *buf;
1555 u8 *src, *dst;
d2fcd82b 1556 gfp_t gfp;
42c06a0e
JW
1557 int ret;
1558
34f4c198
MWO
1559 VM_WARN_ON_ONCE(!folio_test_locked(folio));
1560 VM_WARN_ON_ONCE(!folio_test_swapcache(folio));
2b281117 1561
34f4c198
MWO
1562 /* Large folios aren't supported */
1563 if (folio_test_large(folio))
42c06a0e 1564 return false;
7ba71669 1565
42c06a0e
JW
1566 if (!zswap_enabled || !tree)
1567 return false;
2b281117 1568
ca56489c
DC
1569 /*
1570 * If this is a duplicate, it must be removed before attempting to store
1571 * it, otherwise, if the store fails the old page won't be removed from
1572 * the tree, and it might be written back overriding the new data.
1573 */
1574 spin_lock(&tree->lock);
1575 dupentry = zswap_rb_search(&tree->rbroot, offset);
1576 if (dupentry) {
1577 zswap_duplicate_entry++;
1578 zswap_invalidate_entry(tree, dupentry);
1579 }
1580 spin_unlock(&tree->lock);
074e3e26 1581 objcg = get_obj_cgroup_from_folio(folio);
a65b0e76
DC
1582 if (objcg && !obj_cgroup_may_zswap(objcg)) {
1583 memcg = get_mem_cgroup_from_objcg(objcg);
1584 if (shrink_memcg(memcg)) {
1585 mem_cgroup_put(memcg);
1586 goto reject;
1587 }
1588 mem_cgroup_put(memcg);
1589 }
f4840ccf 1590
2b281117
SJ
1591 /* reclaim space if needed */
1592 if (zswap_is_full()) {
1593 zswap_pool_limit_hit++;
45190f01 1594 zswap_pool_reached_full = true;
f4840ccf 1595 goto shrink;
45190f01 1596 }
16e536ef 1597
45190f01 1598 if (zswap_pool_reached_full) {
42c06a0e 1599 if (!zswap_can_accept())
e0228d59 1600 goto shrink;
42c06a0e 1601 else
45190f01 1602 zswap_pool_reached_full = false;
2b281117
SJ
1603 }
1604
1605 /* allocate entry */
a65b0e76 1606 entry = zswap_entry_cache_alloc(GFP_KERNEL, page_to_nid(page));
2b281117
SJ
1607 if (!entry) {
1608 zswap_reject_kmemcache_fail++;
2b281117
SJ
1609 goto reject;
1610 }
1611
a85f878b 1612 if (zswap_same_filled_pages_enabled) {
003ae2fb 1613 src = kmap_local_page(page);
a85f878b 1614 if (zswap_is_page_same_filled(src, &value)) {
003ae2fb 1615 kunmap_local(src);
0bb48849 1616 entry->swpentry = swp_entry(type, offset);
a85f878b
SD
1617 entry->length = 0;
1618 entry->value = value;
1619 atomic_inc(&zswap_same_filled_pages);
1620 goto insert_entry;
1621 }
003ae2fb 1622 kunmap_local(src);
a85f878b
SD
1623 }
1624
42c06a0e 1625 if (!zswap_non_same_filled_pages_enabled)
cb325ddd 1626 goto freepage;
cb325ddd 1627
f1c54846
DS
1628 /* if entry is successfully added, it keeps the reference */
1629 entry->pool = zswap_pool_current_get();
42c06a0e 1630 if (!entry->pool)
f1c54846 1631 goto freepage;
f1c54846 1632
a65b0e76
DC
1633 if (objcg) {
1634 memcg = get_mem_cgroup_from_objcg(objcg);
1635 if (memcg_list_lru_alloc(memcg, &entry->pool->list_lru, GFP_KERNEL)) {
1636 mem_cgroup_put(memcg);
1637 goto put_pool;
1638 }
1639 mem_cgroup_put(memcg);
1640 }
1641
2b281117 1642 /* compress */
1ec3b5fe
BS
1643 acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
1644
1645 mutex_lock(acomp_ctx->mutex);
1646
1647 dst = acomp_ctx->dstmem;
1648 sg_init_table(&input, 1);
1649 sg_set_page(&input, page, PAGE_SIZE, 0);
1650
1651 /* zswap_dstmem is of size (PAGE_SIZE * 2). Reflect same in sg_list */
1652 sg_init_one(&output, dst, PAGE_SIZE * 2);
1653 acomp_request_set_params(acomp_ctx->req, &input, &output, PAGE_SIZE, dlen);
1654 /*
1655 * it maybe looks a little bit silly that we send an asynchronous request,
1656 * then wait for its completion synchronously. This makes the process look
1657 * synchronous in fact.
1658 * Theoretically, acomp supports users send multiple acomp requests in one
1659 * acomp instance, then get those requests done simultaneously. but in this
42c06a0e 1660 * case, zswap actually does store and load page by page, there is no
1ec3b5fe 1661 * existing method to send the second page before the first page is done
42c06a0e 1662 * in one thread doing zwap.
1ec3b5fe
BS
1663 * but in different threads running on different cpu, we have different
1664 * acomp instance, so multiple threads can do (de)compression in parallel.
1665 */
1666 ret = crypto_wait_req(crypto_acomp_compress(acomp_ctx->req), &acomp_ctx->wait);
1667 dlen = acomp_ctx->req->dlen;
1668
cb61dad8
NP
1669 if (ret) {
1670 zswap_reject_compress_fail++;
f1c54846 1671 goto put_dstmem;
cb61dad8 1672 }
2b281117
SJ
1673
1674 /* store */
b8cf32dc 1675 zpool = zswap_find_zpool(entry);
d2fcd82b 1676 gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
b8cf32dc 1677 if (zpool_malloc_support_movable(zpool))
d2fcd82b 1678 gfp |= __GFP_HIGHMEM | __GFP_MOVABLE;
b8cf32dc 1679 ret = zpool_malloc(zpool, dlen, gfp, &handle);
2b281117
SJ
1680 if (ret == -ENOSPC) {
1681 zswap_reject_compress_poor++;
f1c54846 1682 goto put_dstmem;
2b281117
SJ
1683 }
1684 if (ret) {
1685 zswap_reject_alloc_fail++;
f1c54846 1686 goto put_dstmem;
2b281117 1687 }
b8cf32dc 1688 buf = zpool_map_handle(zpool, handle, ZPOOL_MM_WO);
0bb48849 1689 memcpy(buf, dst, dlen);
b8cf32dc 1690 zpool_unmap_handle(zpool, handle);
1ec3b5fe 1691 mutex_unlock(acomp_ctx->mutex);
2b281117
SJ
1692
1693 /* populate entry */
0bb48849 1694 entry->swpentry = swp_entry(type, offset);
2b281117
SJ
1695 entry->handle = handle;
1696 entry->length = dlen;
1697
a85f878b 1698insert_entry:
f4840ccf
JW
1699 entry->objcg = objcg;
1700 if (objcg) {
1701 obj_cgroup_charge_zswap(objcg, entry->length);
1702 /* Account before objcg ref is moved to tree */
1703 count_objcg_event(objcg, ZSWPOUT);
1704 }
1705
2b281117
SJ
1706 /* map */
1707 spin_lock(&tree->lock);
ca56489c
DC
1708 /*
1709 * A duplicate entry should have been removed at the beginning of this
1710 * function. Since the swap entry should be pinned, if a duplicate is
1711 * found again here it means that something went wrong in the swap
1712 * cache.
1713 */
42c06a0e 1714 while (zswap_rb_insert(&tree->rbroot, entry, &dupentry) == -EEXIST) {
ca56489c 1715 WARN_ON(1);
42c06a0e 1716 zswap_duplicate_entry++;
56c67049 1717 zswap_invalidate_entry(tree, dupentry);
42c06a0e 1718 }
35499e2b 1719 if (entry->length) {
a65b0e76
DC
1720 INIT_LIST_HEAD(&entry->lru);
1721 zswap_lru_add(&entry->pool->list_lru, entry);
b5ba474f 1722 atomic_inc(&entry->pool->nr_stored);
f999f38b 1723 }
2b281117
SJ
1724 spin_unlock(&tree->lock);
1725
1726 /* update stats */
1727 atomic_inc(&zswap_stored_pages);
f1c54846 1728 zswap_update_total_size();
f6498b77 1729 count_vm_event(ZSWPOUT);
2b281117 1730
42c06a0e 1731 return true;
2b281117 1732
f1c54846 1733put_dstmem:
1ec3b5fe 1734 mutex_unlock(acomp_ctx->mutex);
a65b0e76 1735put_pool:
f1c54846
DS
1736 zswap_pool_put(entry->pool);
1737freepage:
2b281117
SJ
1738 zswap_entry_cache_free(entry);
1739reject:
f4840ccf
JW
1740 if (objcg)
1741 obj_cgroup_put(objcg);
42c06a0e 1742 return false;
f4840ccf
JW
1743
1744shrink:
1745 pool = zswap_pool_last_get();
969d63e1
JW
1746 if (pool && !queue_work(shrink_wq, &pool->shrink_work))
1747 zswap_pool_put(pool);
f4840ccf 1748 goto reject;
2b281117
SJ
1749}
1750
ca54f6d8 1751bool zswap_load(struct folio *folio)
2b281117 1752{
3d2c9087 1753 swp_entry_t swp = folio->swap;
42c06a0e
JW
1754 int type = swp_type(swp);
1755 pgoff_t offset = swp_offset(swp);
ca54f6d8 1756 struct page *page = &folio->page;
2b281117
SJ
1757 struct zswap_tree *tree = zswap_trees[type];
1758 struct zswap_entry *entry;
1ec3b5fe
BS
1759 struct scatterlist input, output;
1760 struct crypto_acomp_ctx *acomp_ctx;
fc6697a8 1761 u8 *src, *dst, *tmp;
b8cf32dc 1762 struct zpool *zpool;
2b281117 1763 unsigned int dlen;
42c06a0e
JW
1764 bool ret;
1765
ca54f6d8 1766 VM_WARN_ON_ONCE(!folio_test_locked(folio));
2b281117
SJ
1767
1768 /* find */
1769 spin_lock(&tree->lock);
0ab0abcf 1770 entry = zswap_entry_find_get(&tree->rbroot, offset);
2b281117 1771 if (!entry) {
2b281117 1772 spin_unlock(&tree->lock);
42c06a0e 1773 return false;
2b281117 1774 }
2b281117
SJ
1775 spin_unlock(&tree->lock);
1776
a85f878b 1777 if (!entry->length) {
003ae2fb 1778 dst = kmap_local_page(page);
a85f878b 1779 zswap_fill_page(dst, entry->value);
003ae2fb 1780 kunmap_local(dst);
42c06a0e 1781 ret = true;
f6498b77 1782 goto stats;
a85f878b
SD
1783 }
1784
b8cf32dc
YA
1785 zpool = zswap_find_zpool(entry);
1786 if (!zpool_can_sleep_mapped(zpool)) {
8d9b6370 1787 tmp = kmalloc(entry->length, GFP_KERNEL);
fc6697a8 1788 if (!tmp) {
42c06a0e 1789 ret = false;
fc6697a8
TT
1790 goto freeentry;
1791 }
1792 }
1793
2b281117
SJ
1794 /* decompress */
1795 dlen = PAGE_SIZE;
b8cf32dc 1796 src = zpool_map_handle(zpool, entry->handle, ZPOOL_MM_RO);
1ec3b5fe 1797
b8cf32dc 1798 if (!zpool_can_sleep_mapped(zpool)) {
fc6697a8
TT
1799 memcpy(tmp, src, entry->length);
1800 src = tmp;
b8cf32dc 1801 zpool_unmap_handle(zpool, entry->handle);
fc6697a8
TT
1802 }
1803
1ec3b5fe
BS
1804 acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
1805 mutex_lock(acomp_ctx->mutex);
1806 sg_init_one(&input, src, entry->length);
1807 sg_init_table(&output, 1);
1808 sg_set_page(&output, page, PAGE_SIZE, 0);
1809 acomp_request_set_params(acomp_ctx->req, &input, &output, entry->length, dlen);
42c06a0e
JW
1810 if (crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait))
1811 WARN_ON(1);
1ec3b5fe
BS
1812 mutex_unlock(acomp_ctx->mutex);
1813
b8cf32dc
YA
1814 if (zpool_can_sleep_mapped(zpool))
1815 zpool_unmap_handle(zpool, entry->handle);
fc6697a8
TT
1816 else
1817 kfree(tmp);
1818
42c06a0e 1819 ret = true;
f6498b77
JW
1820stats:
1821 count_vm_event(ZSWPIN);
f4840ccf
JW
1822 if (entry->objcg)
1823 count_objcg_event(entry->objcg, ZSWPIN);
a85f878b 1824freeentry:
2b281117 1825 spin_lock(&tree->lock);
42c06a0e 1826 if (ret && zswap_exclusive_loads_enabled) {
b9c91c43 1827 zswap_invalidate_entry(tree, entry);
ca54f6d8 1828 folio_mark_dirty(folio);
35499e2b 1829 } else if (entry->length) {
a65b0e76
DC
1830 zswap_lru_del(&entry->pool->list_lru, entry);
1831 zswap_lru_add(&entry->pool->list_lru, entry);
b9c91c43 1832 }
18a93707 1833 zswap_entry_put(tree, entry);
2b281117
SJ
1834 spin_unlock(&tree->lock);
1835
fc6697a8 1836 return ret;
2b281117
SJ
1837}
1838
42c06a0e 1839void zswap_invalidate(int type, pgoff_t offset)
2b281117
SJ
1840{
1841 struct zswap_tree *tree = zswap_trees[type];
1842 struct zswap_entry *entry;
2b281117
SJ
1843
1844 /* find */
1845 spin_lock(&tree->lock);
1846 entry = zswap_rb_search(&tree->rbroot, offset);
1847 if (!entry) {
1848 /* entry was written back */
1849 spin_unlock(&tree->lock);
1850 return;
1851 }
b9c91c43 1852 zswap_invalidate_entry(tree, entry);
2b281117 1853 spin_unlock(&tree->lock);
2b281117
SJ
1854}
1855
42c06a0e
JW
1856void zswap_swapon(int type)
1857{
1858 struct zswap_tree *tree;
1859
1860 tree = kzalloc(sizeof(*tree), GFP_KERNEL);
1861 if (!tree) {
1862 pr_err("alloc failed, zswap disabled for swap type %d\n", type);
1863 return;
1864 }
1865
1866 tree->rbroot = RB_ROOT;
1867 spin_lock_init(&tree->lock);
1868 zswap_trees[type] = tree;
1869}
1870
1871void zswap_swapoff(int type)
2b281117
SJ
1872{
1873 struct zswap_tree *tree = zswap_trees[type];
0bd42136 1874 struct zswap_entry *entry, *n;
2b281117
SJ
1875
1876 if (!tree)
1877 return;
1878
1879 /* walk the tree and free everything */
1880 spin_lock(&tree->lock);
0ab0abcf 1881 rbtree_postorder_for_each_entry_safe(entry, n, &tree->rbroot, rbnode)
60105e12 1882 zswap_free_entry(entry);
2b281117
SJ
1883 tree->rbroot = RB_ROOT;
1884 spin_unlock(&tree->lock);
aa9bca05
WY
1885 kfree(tree);
1886 zswap_trees[type] = NULL;
2b281117
SJ
1887}
1888
2b281117
SJ
1889/*********************************
1890* debugfs functions
1891**********************************/
1892#ifdef CONFIG_DEBUG_FS
1893#include <linux/debugfs.h>
1894
1895static struct dentry *zswap_debugfs_root;
1896
141fdeec 1897static int zswap_debugfs_init(void)
2b281117
SJ
1898{
1899 if (!debugfs_initialized())
1900 return -ENODEV;
1901
1902 zswap_debugfs_root = debugfs_create_dir("zswap", NULL);
2b281117 1903
0825a6f9
JP
1904 debugfs_create_u64("pool_limit_hit", 0444,
1905 zswap_debugfs_root, &zswap_pool_limit_hit);
1906 debugfs_create_u64("reject_reclaim_fail", 0444,
1907 zswap_debugfs_root, &zswap_reject_reclaim_fail);
1908 debugfs_create_u64("reject_alloc_fail", 0444,
1909 zswap_debugfs_root, &zswap_reject_alloc_fail);
1910 debugfs_create_u64("reject_kmemcache_fail", 0444,
1911 zswap_debugfs_root, &zswap_reject_kmemcache_fail);
cb61dad8
NP
1912 debugfs_create_u64("reject_compress_fail", 0444,
1913 zswap_debugfs_root, &zswap_reject_compress_fail);
0825a6f9
JP
1914 debugfs_create_u64("reject_compress_poor", 0444,
1915 zswap_debugfs_root, &zswap_reject_compress_poor);
1916 debugfs_create_u64("written_back_pages", 0444,
1917 zswap_debugfs_root, &zswap_written_back_pages);
1918 debugfs_create_u64("duplicate_entry", 0444,
1919 zswap_debugfs_root, &zswap_duplicate_entry);
1920 debugfs_create_u64("pool_total_size", 0444,
1921 zswap_debugfs_root, &zswap_pool_total_size);
1922 debugfs_create_atomic_t("stored_pages", 0444,
1923 zswap_debugfs_root, &zswap_stored_pages);
a85f878b 1924 debugfs_create_atomic_t("same_filled_pages", 0444,
0825a6f9 1925 zswap_debugfs_root, &zswap_same_filled_pages);
2b281117
SJ
1926
1927 return 0;
1928}
2b281117 1929#else
141fdeec 1930static int zswap_debugfs_init(void)
2b281117
SJ
1931{
1932 return 0;
1933}
2b281117
SJ
1934#endif
1935
1936/*********************************
1937* module init and exit
1938**********************************/
141fdeec 1939static int zswap_setup(void)
2b281117 1940{
f1c54846 1941 struct zswap_pool *pool;
ad7ed770 1942 int ret;
60105e12 1943
b7919122
LS
1944 zswap_entry_cache = KMEM_CACHE(zswap_entry, 0);
1945 if (!zswap_entry_cache) {
2b281117 1946 pr_err("entry cache creation failed\n");
f1c54846 1947 goto cache_fail;
2b281117 1948 }
f1c54846 1949
ad7ed770
SAS
1950 ret = cpuhp_setup_state(CPUHP_MM_ZSWP_MEM_PREPARE, "mm/zswap:prepare",
1951 zswap_dstmem_prepare, zswap_dstmem_dead);
1952 if (ret) {
f1c54846
DS
1953 pr_err("dstmem alloc failed\n");
1954 goto dstmem_fail;
2b281117 1955 }
f1c54846 1956
cab7a7e5
SAS
1957 ret = cpuhp_setup_state_multi(CPUHP_MM_ZSWP_POOL_PREPARE,
1958 "mm/zswap_pool:prepare",
1959 zswap_cpu_comp_prepare,
1960 zswap_cpu_comp_dead);
1961 if (ret)
1962 goto hp_fail;
1963
f1c54846 1964 pool = __zswap_pool_create_fallback();
ae3d89a7
DS
1965 if (pool) {
1966 pr_info("loaded using pool %s/%s\n", pool->tfm_name,
b8cf32dc 1967 zpool_get_type(pool->zpools[0]));
ae3d89a7
DS
1968 list_add(&pool->list, &zswap_pools);
1969 zswap_has_pool = true;
1970 } else {
f1c54846 1971 pr_err("pool creation failed\n");
ae3d89a7 1972 zswap_enabled = false;
2b281117 1973 }
60105e12 1974
45190f01
VW
1975 shrink_wq = create_workqueue("zswap-shrink");
1976 if (!shrink_wq)
1977 goto fallback_fail;
1978
2b281117
SJ
1979 if (zswap_debugfs_init())
1980 pr_warn("debugfs initialization failed\n");
9021ccec 1981 zswap_init_state = ZSWAP_INIT_SUCCEED;
2b281117 1982 return 0;
f1c54846 1983
45190f01 1984fallback_fail:
38aeb071
DC
1985 if (pool)
1986 zswap_pool_destroy(pool);
cab7a7e5 1987hp_fail:
ad7ed770 1988 cpuhp_remove_state(CPUHP_MM_ZSWP_MEM_PREPARE);
f1c54846 1989dstmem_fail:
b7919122 1990 kmem_cache_destroy(zswap_entry_cache);
f1c54846 1991cache_fail:
d7b028f5 1992 /* if built-in, we aren't unloaded on failure; don't allow use */
9021ccec 1993 zswap_init_state = ZSWAP_INIT_FAILED;
d7b028f5 1994 zswap_enabled = false;
2b281117
SJ
1995 return -ENOMEM;
1996}
141fdeec
LS
1997
1998static int __init zswap_init(void)
1999{
2000 if (!zswap_enabled)
2001 return 0;
2002 return zswap_setup();
2003}
2b281117 2004/* must be late so crypto has time to come up */
141fdeec 2005late_initcall(zswap_init);
2b281117 2006
68386da8 2007MODULE_AUTHOR("Seth Jennings <[email protected]>");
2b281117 2008MODULE_DESCRIPTION("Compressed cache for swap pages");
This page took 0.905134 seconds and 4 git commands to generate.