]> Git Repo - linux.git/blame - mm/zswap.c
wifi: mac80211: don't connect to an AP while it's in a CSA process
[linux.git] / mm / zswap.c
CommitLineData
c942fddf 1// SPDX-License-Identifier: GPL-2.0-or-later
2b281117
SJ
2/*
3 * zswap.c - zswap driver file
4 *
5 * zswap is a backend for frontswap that takes pages that are in the process
6 * of being swapped out and attempts to compress and store them in a
7 * RAM-based memory pool. This can result in a significant I/O reduction on
8 * the swap device and, in the case where decompressing from RAM is faster
9 * than reading from the swap device, can also improve workload performance.
10 *
11 * Copyright (C) 2012 Seth Jennings <[email protected]>
2b281117
SJ
12*/
13
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
16#include <linux/module.h>
17#include <linux/cpu.h>
18#include <linux/highmem.h>
19#include <linux/slab.h>
20#include <linux/spinlock.h>
21#include <linux/types.h>
22#include <linux/atomic.h>
23#include <linux/frontswap.h>
24#include <linux/rbtree.h>
25#include <linux/swap.h>
26#include <linux/crypto.h>
1ec3b5fe 27#include <linux/scatterlist.h>
2b281117 28#include <linux/mempool.h>
12d79d64 29#include <linux/zpool.h>
1ec3b5fe 30#include <crypto/acompress.h>
2b281117
SJ
31
32#include <linux/mm_types.h>
33#include <linux/page-flags.h>
34#include <linux/swapops.h>
35#include <linux/writeback.h>
36#include <linux/pagemap.h>
45190f01 37#include <linux/workqueue.h>
2b281117 38
014bb1de 39#include "swap.h"
e0228d59 40#include "internal.h"
014bb1de 41
2b281117
SJ
42/*********************************
43* statistics
44**********************************/
12d79d64 45/* Total bytes used by the compressed storage */
f6498b77 46u64 zswap_pool_total_size;
2b281117 47/* The number of compressed pages currently stored in zswap */
f6498b77 48atomic_t zswap_stored_pages = ATOMIC_INIT(0);
a85f878b
SD
49/* The number of same-value filled pages currently stored in zswap */
50static atomic_t zswap_same_filled_pages = ATOMIC_INIT(0);
2b281117
SJ
51
52/*
53 * The statistics below are not protected from concurrent access for
54 * performance reasons so they may not be a 100% accurate. However,
55 * they do provide useful information on roughly how many times a
56 * certain event is occurring.
57*/
58
59/* Pool limit was hit (see zswap_max_pool_percent) */
60static u64 zswap_pool_limit_hit;
61/* Pages written back when pool limit was reached */
62static u64 zswap_written_back_pages;
63/* Store failed due to a reclaim failure after pool limit was reached */
64static u64 zswap_reject_reclaim_fail;
65/* Compressed page was too big for the allocator to (optimally) store */
66static u64 zswap_reject_compress_poor;
67/* Store failed because underlying allocator could not get memory */
68static u64 zswap_reject_alloc_fail;
69/* Store failed because the entry metadata could not be allocated (rare) */
70static u64 zswap_reject_kmemcache_fail;
71/* Duplicate store was encountered (rare) */
72static u64 zswap_duplicate_entry;
73
45190f01
VW
74/* Shrinker work queue */
75static struct workqueue_struct *shrink_wq;
76/* Pool limit was hit, we need to calm down */
77static bool zswap_pool_reached_full;
78
2b281117
SJ
79/*********************************
80* tunables
81**********************************/
c00ed16a 82
bae21db8
DS
83#define ZSWAP_PARAM_UNSET ""
84
141fdeec
LS
85static int zswap_setup(void);
86
bb8b93b5
MS
87/* Enable/disable zswap */
88static bool zswap_enabled = IS_ENABLED(CONFIG_ZSWAP_DEFAULT_ON);
d7b028f5
DS
89static int zswap_enabled_param_set(const char *,
90 const struct kernel_param *);
83aed6cd 91static const struct kernel_param_ops zswap_enabled_param_ops = {
d7b028f5
DS
92 .set = zswap_enabled_param_set,
93 .get = param_get_bool,
94};
95module_param_cb(enabled, &zswap_enabled_param_ops, &zswap_enabled, 0644);
2b281117 96
90b0fc26 97/* Crypto compressor to use */
bb8b93b5 98static char *zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT;
90b0fc26
DS
99static int zswap_compressor_param_set(const char *,
100 const struct kernel_param *);
83aed6cd 101static const struct kernel_param_ops zswap_compressor_param_ops = {
90b0fc26 102 .set = zswap_compressor_param_set,
c99b42c3
DS
103 .get = param_get_charp,
104 .free = param_free_charp,
90b0fc26
DS
105};
106module_param_cb(compressor, &zswap_compressor_param_ops,
c99b42c3 107 &zswap_compressor, 0644);
2b281117 108
90b0fc26 109/* Compressed storage zpool to use */
bb8b93b5 110static char *zswap_zpool_type = CONFIG_ZSWAP_ZPOOL_DEFAULT;
90b0fc26 111static int zswap_zpool_param_set(const char *, const struct kernel_param *);
83aed6cd 112static const struct kernel_param_ops zswap_zpool_param_ops = {
c99b42c3
DS
113 .set = zswap_zpool_param_set,
114 .get = param_get_charp,
115 .free = param_free_charp,
90b0fc26 116};
c99b42c3 117module_param_cb(zpool, &zswap_zpool_param_ops, &zswap_zpool_type, 0644);
12d79d64 118
90b0fc26
DS
119/* The maximum percentage of memory that the compressed pool can occupy */
120static unsigned int zswap_max_pool_percent = 20;
121module_param_named(max_pool_percent, zswap_max_pool_percent, uint, 0644);
60105e12 122
45190f01
VW
123/* The threshold for accepting new pages after the max_pool_percent was hit */
124static unsigned int zswap_accept_thr_percent = 90; /* of max pool size */
125module_param_named(accept_threshold_percent, zswap_accept_thr_percent,
126 uint, 0644);
127
cb325ddd
MS
128/*
129 * Enable/disable handling same-value filled pages (enabled by default).
130 * If disabled every page is considered non-same-value filled.
131 */
a85f878b
SD
132static bool zswap_same_filled_pages_enabled = true;
133module_param_named(same_filled_pages_enabled, zswap_same_filled_pages_enabled,
134 bool, 0644);
135
cb325ddd
MS
136/* Enable/disable handling non-same-value filled pages (enabled by default) */
137static bool zswap_non_same_filled_pages_enabled = true;
138module_param_named(non_same_filled_pages_enabled, zswap_non_same_filled_pages_enabled,
139 bool, 0644);
140
b9c91c43
YA
141static bool zswap_exclusive_loads_enabled = IS_ENABLED(
142 CONFIG_ZSWAP_EXCLUSIVE_LOADS_DEFAULT_ON);
143module_param_named(exclusive_loads, zswap_exclusive_loads_enabled, bool, 0644);
144
2b281117 145/*********************************
f1c54846 146* data structures
2b281117 147**********************************/
2b281117 148
1ec3b5fe
BS
149struct crypto_acomp_ctx {
150 struct crypto_acomp *acomp;
151 struct acomp_req *req;
152 struct crypto_wait wait;
153 u8 *dstmem;
154 struct mutex *mutex;
155};
156
f999f38b
DC
157/*
158 * The lock ordering is zswap_tree.lock -> zswap_pool.lru_lock.
159 * The only case where lru_lock is not acquired while holding tree.lock is
160 * when a zswap_entry is taken off the lru for writeback, in that case it
161 * needs to be verified that it's still valid in the tree.
162 */
f1c54846
DS
163struct zswap_pool {
164 struct zpool *zpool;
1ec3b5fe 165 struct crypto_acomp_ctx __percpu *acomp_ctx;
f1c54846
DS
166 struct kref kref;
167 struct list_head list;
45190f01
VW
168 struct work_struct release_work;
169 struct work_struct shrink_work;
cab7a7e5 170 struct hlist_node node;
f1c54846 171 char tfm_name[CRYPTO_MAX_ALG_NAME];
f999f38b
DC
172 struct list_head lru;
173 spinlock_t lru_lock;
2b281117
SJ
174};
175
2b281117
SJ
176/*
177 * struct zswap_entry
178 *
179 * This structure contains the metadata for tracking a single compressed
180 * page within zswap.
181 *
182 * rbnode - links the entry into red-black tree for the appropriate swap type
f1c54846 183 * offset - the swap offset for the entry. Index into the red-black tree.
2b281117
SJ
184 * refcount - the number of outstanding reference to the entry. This is needed
185 * to protect against premature freeing of the entry by code
6b452516 186 * concurrent calls to load, invalidate, and writeback. The lock
2b281117
SJ
187 * for the zswap_tree structure that contains the entry must
188 * be held while changing the refcount. Since the lock must
189 * be held, there is no reason to also make refcount atomic.
2b281117 190 * length - the length in bytes of the compressed page data. Needed during
f999f38b
DC
191 * decompression. For a same value filled page length is 0, and both
192 * pool and lru are invalid and must be ignored.
f1c54846
DS
193 * pool - the zswap_pool the entry's data is in
194 * handle - zpool allocation handle that stores the compressed page data
a85f878b 195 * value - value of the same-value filled pages which have same content
f999f38b 196 * lru - handle to the pool's lru used to evict pages.
2b281117
SJ
197 */
198struct zswap_entry {
199 struct rb_node rbnode;
0bb48849 200 swp_entry_t swpentry;
2b281117
SJ
201 int refcount;
202 unsigned int length;
f1c54846 203 struct zswap_pool *pool;
a85f878b
SD
204 union {
205 unsigned long handle;
206 unsigned long value;
207 };
f4840ccf 208 struct obj_cgroup *objcg;
f999f38b 209 struct list_head lru;
2b281117
SJ
210};
211
2b281117
SJ
212/*
213 * The tree lock in the zswap_tree struct protects a few things:
214 * - the rbtree
215 * - the refcount field of each entry in the tree
216 */
217struct zswap_tree {
218 struct rb_root rbroot;
219 spinlock_t lock;
2b281117
SJ
220};
221
222static struct zswap_tree *zswap_trees[MAX_SWAPFILES];
223
f1c54846
DS
224/* RCU-protected iteration */
225static LIST_HEAD(zswap_pools);
226/* protects zswap_pools list modification */
227static DEFINE_SPINLOCK(zswap_pools_lock);
32a4e169
DS
228/* pool counter to provide unique names to zpool */
229static atomic_t zswap_pools_count = ATOMIC_INIT(0);
f1c54846 230
9021ccec
LS
231enum zswap_init_type {
232 ZSWAP_UNINIT,
233 ZSWAP_INIT_SUCCEED,
234 ZSWAP_INIT_FAILED
235};
90b0fc26 236
9021ccec 237static enum zswap_init_type zswap_init_state;
90b0fc26 238
141fdeec
LS
239/* used to ensure the integrity of initialization */
240static DEFINE_MUTEX(zswap_init_lock);
d7b028f5 241
ae3d89a7
DS
242/* init completed, but couldn't create the initial pool */
243static bool zswap_has_pool;
244
f1c54846
DS
245/*********************************
246* helpers and fwd declarations
247**********************************/
248
249#define zswap_pool_debug(msg, p) \
250 pr_debug("%s pool %s/%s\n", msg, (p)->tfm_name, \
251 zpool_get_type((p)->zpool))
252
0bb48849 253static int zswap_writeback_entry(struct zswap_entry *entry,
ff9d5ba2 254 struct zswap_tree *tree);
f1c54846
DS
255static int zswap_pool_get(struct zswap_pool *pool);
256static void zswap_pool_put(struct zswap_pool *pool);
257
f1c54846
DS
258static bool zswap_is_full(void)
259{
ca79b0c2
AK
260 return totalram_pages() * zswap_max_pool_percent / 100 <
261 DIV_ROUND_UP(zswap_pool_total_size, PAGE_SIZE);
f1c54846
DS
262}
263
45190f01
VW
264static bool zswap_can_accept(void)
265{
266 return totalram_pages() * zswap_accept_thr_percent / 100 *
267 zswap_max_pool_percent / 100 >
268 DIV_ROUND_UP(zswap_pool_total_size, PAGE_SIZE);
269}
270
f1c54846
DS
271static void zswap_update_total_size(void)
272{
273 struct zswap_pool *pool;
274 u64 total = 0;
275
276 rcu_read_lock();
277
278 list_for_each_entry_rcu(pool, &zswap_pools, list)
279 total += zpool_get_total_size(pool->zpool);
280
281 rcu_read_unlock();
282
283 zswap_pool_total_size = total;
284}
285
2b281117
SJ
286/*********************************
287* zswap entry functions
288**********************************/
289static struct kmem_cache *zswap_entry_cache;
290
2b281117
SJ
291static struct zswap_entry *zswap_entry_cache_alloc(gfp_t gfp)
292{
293 struct zswap_entry *entry;
294 entry = kmem_cache_alloc(zswap_entry_cache, gfp);
295 if (!entry)
296 return NULL;
297 entry->refcount = 1;
0ab0abcf 298 RB_CLEAR_NODE(&entry->rbnode);
2b281117
SJ
299 return entry;
300}
301
302static void zswap_entry_cache_free(struct zswap_entry *entry)
303{
304 kmem_cache_free(zswap_entry_cache, entry);
305}
306
2b281117
SJ
307/*********************************
308* rbtree functions
309**********************************/
310static struct zswap_entry *zswap_rb_search(struct rb_root *root, pgoff_t offset)
311{
312 struct rb_node *node = root->rb_node;
313 struct zswap_entry *entry;
0bb48849 314 pgoff_t entry_offset;
2b281117
SJ
315
316 while (node) {
317 entry = rb_entry(node, struct zswap_entry, rbnode);
0bb48849
DC
318 entry_offset = swp_offset(entry->swpentry);
319 if (entry_offset > offset)
2b281117 320 node = node->rb_left;
0bb48849 321 else if (entry_offset < offset)
2b281117
SJ
322 node = node->rb_right;
323 else
324 return entry;
325 }
326 return NULL;
327}
328
329/*
330 * In the case that a entry with the same offset is found, a pointer to
331 * the existing entry is stored in dupentry and the function returns -EEXIST
332 */
333static int zswap_rb_insert(struct rb_root *root, struct zswap_entry *entry,
334 struct zswap_entry **dupentry)
335{
336 struct rb_node **link = &root->rb_node, *parent = NULL;
337 struct zswap_entry *myentry;
0bb48849 338 pgoff_t myentry_offset, entry_offset = swp_offset(entry->swpentry);
2b281117
SJ
339
340 while (*link) {
341 parent = *link;
342 myentry = rb_entry(parent, struct zswap_entry, rbnode);
0bb48849
DC
343 myentry_offset = swp_offset(myentry->swpentry);
344 if (myentry_offset > entry_offset)
2b281117 345 link = &(*link)->rb_left;
0bb48849 346 else if (myentry_offset < entry_offset)
2b281117
SJ
347 link = &(*link)->rb_right;
348 else {
349 *dupentry = myentry;
350 return -EEXIST;
351 }
352 }
353 rb_link_node(&entry->rbnode, parent, link);
354 rb_insert_color(&entry->rbnode, root);
355 return 0;
356}
357
18a93707 358static bool zswap_rb_erase(struct rb_root *root, struct zswap_entry *entry)
0ab0abcf
WY
359{
360 if (!RB_EMPTY_NODE(&entry->rbnode)) {
361 rb_erase(&entry->rbnode, root);
362 RB_CLEAR_NODE(&entry->rbnode);
18a93707 363 return true;
0ab0abcf 364 }
18a93707 365 return false;
0ab0abcf
WY
366}
367
368/*
12d79d64 369 * Carries out the common pattern of freeing and entry's zpool allocation,
0ab0abcf
WY
370 * freeing the entry itself, and decrementing the number of stored pages.
371 */
60105e12 372static void zswap_free_entry(struct zswap_entry *entry)
0ab0abcf 373{
f4840ccf
JW
374 if (entry->objcg) {
375 obj_cgroup_uncharge_zswap(entry->objcg, entry->length);
376 obj_cgroup_put(entry->objcg);
377 }
a85f878b
SD
378 if (!entry->length)
379 atomic_dec(&zswap_same_filled_pages);
380 else {
35499e2b
DC
381 spin_lock(&entry->pool->lru_lock);
382 list_del(&entry->lru);
383 spin_unlock(&entry->pool->lru_lock);
a85f878b
SD
384 zpool_free(entry->pool->zpool, entry->handle);
385 zswap_pool_put(entry->pool);
386 }
0ab0abcf
WY
387 zswap_entry_cache_free(entry);
388 atomic_dec(&zswap_stored_pages);
f1c54846 389 zswap_update_total_size();
0ab0abcf
WY
390}
391
392/* caller must hold the tree lock */
393static void zswap_entry_get(struct zswap_entry *entry)
394{
395 entry->refcount++;
396}
397
398/* caller must hold the tree lock
399* remove from the tree and free it, if nobody reference the entry
400*/
401static void zswap_entry_put(struct zswap_tree *tree,
402 struct zswap_entry *entry)
403{
404 int refcount = --entry->refcount;
405
406 BUG_ON(refcount < 0);
407 if (refcount == 0) {
408 zswap_rb_erase(&tree->rbroot, entry);
60105e12 409 zswap_free_entry(entry);
0ab0abcf
WY
410 }
411}
412
413/* caller must hold the tree lock */
414static struct zswap_entry *zswap_entry_find_get(struct rb_root *root,
415 pgoff_t offset)
416{
b0c9865f 417 struct zswap_entry *entry;
0ab0abcf
WY
418
419 entry = zswap_rb_search(root, offset);
420 if (entry)
421 zswap_entry_get(entry);
422
423 return entry;
424}
425
2b281117
SJ
426/*********************************
427* per-cpu code
428**********************************/
429static DEFINE_PER_CPU(u8 *, zswap_dstmem);
1ec3b5fe
BS
430/*
431 * If users dynamically change the zpool type and compressor at runtime, i.e.
432 * zswap is running, zswap can have more than one zpool on one cpu, but they
433 * are sharing dtsmem. So we need this mutex to be per-cpu.
434 */
435static DEFINE_PER_CPU(struct mutex *, zswap_mutex);
2b281117 436
ad7ed770 437static int zswap_dstmem_prepare(unsigned int cpu)
2b281117 438{
1ec3b5fe 439 struct mutex *mutex;
2b281117
SJ
440 u8 *dst;
441
ad7ed770 442 dst = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu));
2b2695f5 443 if (!dst)
ad7ed770 444 return -ENOMEM;
2b2695f5 445
1ec3b5fe
BS
446 mutex = kmalloc_node(sizeof(*mutex), GFP_KERNEL, cpu_to_node(cpu));
447 if (!mutex) {
448 kfree(dst);
449 return -ENOMEM;
450 }
451
452 mutex_init(mutex);
ad7ed770 453 per_cpu(zswap_dstmem, cpu) = dst;
1ec3b5fe 454 per_cpu(zswap_mutex, cpu) = mutex;
ad7ed770 455 return 0;
2b281117
SJ
456}
457
ad7ed770 458static int zswap_dstmem_dead(unsigned int cpu)
2b281117 459{
1ec3b5fe 460 struct mutex *mutex;
ad7ed770 461 u8 *dst;
2b281117 462
1ec3b5fe
BS
463 mutex = per_cpu(zswap_mutex, cpu);
464 kfree(mutex);
465 per_cpu(zswap_mutex, cpu) = NULL;
466
ad7ed770
SAS
467 dst = per_cpu(zswap_dstmem, cpu);
468 kfree(dst);
469 per_cpu(zswap_dstmem, cpu) = NULL;
f1c54846 470
f1c54846 471 return 0;
f1c54846
DS
472}
473
cab7a7e5 474static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node)
f1c54846 475{
cab7a7e5 476 struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
1ec3b5fe
BS
477 struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
478 struct crypto_acomp *acomp;
479 struct acomp_req *req;
480
481 acomp = crypto_alloc_acomp_node(pool->tfm_name, 0, 0, cpu_to_node(cpu));
482 if (IS_ERR(acomp)) {
483 pr_err("could not alloc crypto acomp %s : %ld\n",
484 pool->tfm_name, PTR_ERR(acomp));
485 return PTR_ERR(acomp);
486 }
487 acomp_ctx->acomp = acomp;
f1c54846 488
1ec3b5fe
BS
489 req = acomp_request_alloc(acomp_ctx->acomp);
490 if (!req) {
491 pr_err("could not alloc crypto acomp_request %s\n",
492 pool->tfm_name);
493 crypto_free_acomp(acomp_ctx->acomp);
cab7a7e5
SAS
494 return -ENOMEM;
495 }
1ec3b5fe
BS
496 acomp_ctx->req = req;
497
498 crypto_init_wait(&acomp_ctx->wait);
499 /*
500 * if the backend of acomp is async zip, crypto_req_done() will wakeup
501 * crypto_wait_req(); if the backend of acomp is scomp, the callback
502 * won't be called, crypto_wait_req() will return without blocking.
503 */
504 acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
505 crypto_req_done, &acomp_ctx->wait);
506
507 acomp_ctx->mutex = per_cpu(zswap_mutex, cpu);
508 acomp_ctx->dstmem = per_cpu(zswap_dstmem, cpu);
509
2b281117 510 return 0;
2b281117
SJ
511}
512
cab7a7e5 513static int zswap_cpu_comp_dead(unsigned int cpu, struct hlist_node *node)
f1c54846 514{
cab7a7e5 515 struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
1ec3b5fe
BS
516 struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
517
518 if (!IS_ERR_OR_NULL(acomp_ctx)) {
519 if (!IS_ERR_OR_NULL(acomp_ctx->req))
520 acomp_request_free(acomp_ctx->req);
521 if (!IS_ERR_OR_NULL(acomp_ctx->acomp))
522 crypto_free_acomp(acomp_ctx->acomp);
523 }
f1c54846 524
cab7a7e5 525 return 0;
f1c54846
DS
526}
527
2b281117 528/*********************************
f1c54846 529* pool functions
2b281117 530**********************************/
f1c54846
DS
531
532static struct zswap_pool *__zswap_pool_current(void)
2b281117 533{
f1c54846
DS
534 struct zswap_pool *pool;
535
536 pool = list_first_or_null_rcu(&zswap_pools, typeof(*pool), list);
ae3d89a7
DS
537 WARN_ONCE(!pool && zswap_has_pool,
538 "%s: no page storage pool!\n", __func__);
f1c54846
DS
539
540 return pool;
541}
542
543static struct zswap_pool *zswap_pool_current(void)
544{
545 assert_spin_locked(&zswap_pools_lock);
546
547 return __zswap_pool_current();
548}
549
550static struct zswap_pool *zswap_pool_current_get(void)
551{
552 struct zswap_pool *pool;
553
554 rcu_read_lock();
555
556 pool = __zswap_pool_current();
ae3d89a7 557 if (!zswap_pool_get(pool))
f1c54846
DS
558 pool = NULL;
559
560 rcu_read_unlock();
561
562 return pool;
563}
564
565static struct zswap_pool *zswap_pool_last_get(void)
566{
567 struct zswap_pool *pool, *last = NULL;
568
569 rcu_read_lock();
570
571 list_for_each_entry_rcu(pool, &zswap_pools, list)
572 last = pool;
ae3d89a7
DS
573 WARN_ONCE(!last && zswap_has_pool,
574 "%s: no page storage pool!\n", __func__);
575 if (!zswap_pool_get(last))
f1c54846
DS
576 last = NULL;
577
578 rcu_read_unlock();
579
580 return last;
581}
582
8bc8b228 583/* type and compressor must be null-terminated */
f1c54846
DS
584static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor)
585{
586 struct zswap_pool *pool;
587
588 assert_spin_locked(&zswap_pools_lock);
589
590 list_for_each_entry_rcu(pool, &zswap_pools, list) {
8bc8b228 591 if (strcmp(pool->tfm_name, compressor))
f1c54846 592 continue;
8bc8b228 593 if (strcmp(zpool_get_type(pool->zpool), type))
f1c54846
DS
594 continue;
595 /* if we can't get it, it's about to be destroyed */
596 if (!zswap_pool_get(pool))
597 continue;
598 return pool;
599 }
600
601 return NULL;
602}
603
18a93707
YA
604/*
605 * If the entry is still valid in the tree, drop the initial ref and remove it
606 * from the tree. This function must be called with an additional ref held,
607 * otherwise it may race with another invalidation freeing the entry.
608 */
418fd29d
DC
609static void zswap_invalidate_entry(struct zswap_tree *tree,
610 struct zswap_entry *entry)
611{
18a93707
YA
612 if (zswap_rb_erase(&tree->rbroot, entry))
613 zswap_entry_put(tree, entry);
418fd29d
DC
614}
615
f999f38b
DC
616static int zswap_reclaim_entry(struct zswap_pool *pool)
617{
f999f38b
DC
618 struct zswap_entry *entry;
619 struct zswap_tree *tree;
620 pgoff_t swpoffset;
621 int ret;
622
623 /* Get an entry off the LRU */
624 spin_lock(&pool->lru_lock);
625 if (list_empty(&pool->lru)) {
626 spin_unlock(&pool->lru_lock);
627 return -EINVAL;
628 }
629 entry = list_last_entry(&pool->lru, struct zswap_entry, lru);
630 list_del_init(&entry->lru);
f999f38b
DC
631 /*
632 * Once the lru lock is dropped, the entry might get freed. The
633 * swpoffset is copied to the stack, and entry isn't deref'd again
634 * until the entry is verified to still be alive in the tree.
635 */
0bb48849
DC
636 swpoffset = swp_offset(entry->swpentry);
637 tree = zswap_trees[swp_type(entry->swpentry)];
f999f38b
DC
638 spin_unlock(&pool->lru_lock);
639
640 /* Check for invalidate() race */
641 spin_lock(&tree->lock);
642 if (entry != zswap_rb_search(&tree->rbroot, swpoffset)) {
643 ret = -EAGAIN;
644 goto unlock;
645 }
646 /* Hold a reference to prevent a free during writeback */
647 zswap_entry_get(entry);
648 spin_unlock(&tree->lock);
649
0bb48849 650 ret = zswap_writeback_entry(entry, tree);
f999f38b
DC
651
652 spin_lock(&tree->lock);
653 if (ret) {
654 /* Writeback failed, put entry back on LRU */
655 spin_lock(&pool->lru_lock);
656 list_move(&entry->lru, &pool->lru);
657 spin_unlock(&pool->lru_lock);
ff9d5ba2 658 goto put_unlock;
f999f38b
DC
659 }
660
418fd29d
DC
661 /*
662 * Writeback started successfully, the page now belongs to the
663 * swapcache. Drop the entry from zswap - unless invalidate already
664 * took it out while we had the tree->lock released for IO.
665 */
18a93707 666 zswap_invalidate_entry(tree, entry);
ff9d5ba2
DC
667
668put_unlock:
f999f38b
DC
669 /* Drop local reference */
670 zswap_entry_put(tree, entry);
671unlock:
672 spin_unlock(&tree->lock);
673 return ret ? -EAGAIN : 0;
674}
675
45190f01
VW
676static void shrink_worker(struct work_struct *w)
677{
678 struct zswap_pool *pool = container_of(w, typeof(*pool),
679 shrink_work);
e0228d59
DC
680 int ret, failures = 0;
681
682 do {
35499e2b 683 ret = zswap_reclaim_entry(pool);
e0228d59
DC
684 if (ret) {
685 zswap_reject_reclaim_fail++;
686 if (ret != -EAGAIN)
687 break;
688 if (++failures == MAX_RECLAIM_RETRIES)
689 break;
690 }
691 cond_resched();
692 } while (!zswap_can_accept());
45190f01
VW
693 zswap_pool_put(pool);
694}
695
f1c54846
DS
696static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
697{
698 struct zswap_pool *pool;
32a4e169 699 char name[38]; /* 'zswap' + 32 char (max) num + \0 */
d0164adc 700 gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
cab7a7e5 701 int ret;
f1c54846 702
bae21db8
DS
703 if (!zswap_has_pool) {
704 /* if either are unset, pool initialization failed, and we
705 * need both params to be set correctly before trying to
706 * create a pool.
707 */
708 if (!strcmp(type, ZSWAP_PARAM_UNSET))
709 return NULL;
710 if (!strcmp(compressor, ZSWAP_PARAM_UNSET))
711 return NULL;
712 }
713
f1c54846 714 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
f4ae0ce0 715 if (!pool)
f1c54846 716 return NULL;
f1c54846 717
32a4e169
DS
718 /* unique name for each pool specifically required by zsmalloc */
719 snprintf(name, 38, "zswap%x", atomic_inc_return(&zswap_pools_count));
720
35499e2b 721 pool->zpool = zpool_create_pool(type, name, gfp);
f1c54846
DS
722 if (!pool->zpool) {
723 pr_err("%s zpool not available\n", type);
724 goto error;
725 }
726 pr_debug("using %s zpool\n", zpool_get_type(pool->zpool));
727
79cd4202 728 strscpy(pool->tfm_name, compressor, sizeof(pool->tfm_name));
1ec3b5fe
BS
729
730 pool->acomp_ctx = alloc_percpu(*pool->acomp_ctx);
731 if (!pool->acomp_ctx) {
f1c54846
DS
732 pr_err("percpu alloc failed\n");
733 goto error;
734 }
735
cab7a7e5
SAS
736 ret = cpuhp_state_add_instance(CPUHP_MM_ZSWP_POOL_PREPARE,
737 &pool->node);
738 if (ret)
f1c54846
DS
739 goto error;
740 pr_debug("using %s compressor\n", pool->tfm_name);
741
742 /* being the current pool takes 1 ref; this func expects the
743 * caller to always add the new pool as the current pool
744 */
745 kref_init(&pool->kref);
746 INIT_LIST_HEAD(&pool->list);
f999f38b
DC
747 INIT_LIST_HEAD(&pool->lru);
748 spin_lock_init(&pool->lru_lock);
45190f01 749 INIT_WORK(&pool->shrink_work, shrink_worker);
f1c54846
DS
750
751 zswap_pool_debug("created", pool);
752
753 return pool;
754
755error:
1ec3b5fe
BS
756 if (pool->acomp_ctx)
757 free_percpu(pool->acomp_ctx);
f1c54846
DS
758 if (pool->zpool)
759 zpool_destroy_pool(pool->zpool);
760 kfree(pool);
761 return NULL;
762}
763
141fdeec 764static struct zswap_pool *__zswap_pool_create_fallback(void)
f1c54846 765{
bae21db8
DS
766 bool has_comp, has_zpool;
767
1ec3b5fe 768 has_comp = crypto_has_acomp(zswap_compressor, 0, 0);
bb8b93b5
MS
769 if (!has_comp && strcmp(zswap_compressor,
770 CONFIG_ZSWAP_COMPRESSOR_DEFAULT)) {
f1c54846 771 pr_err("compressor %s not available, using default %s\n",
bb8b93b5 772 zswap_compressor, CONFIG_ZSWAP_COMPRESSOR_DEFAULT);
c99b42c3 773 param_free_charp(&zswap_compressor);
bb8b93b5 774 zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT;
1ec3b5fe 775 has_comp = crypto_has_acomp(zswap_compressor, 0, 0);
f1c54846 776 }
bae21db8
DS
777 if (!has_comp) {
778 pr_err("default compressor %s not available\n",
779 zswap_compressor);
780 param_free_charp(&zswap_compressor);
781 zswap_compressor = ZSWAP_PARAM_UNSET;
782 }
783
784 has_zpool = zpool_has_pool(zswap_zpool_type);
bb8b93b5
MS
785 if (!has_zpool && strcmp(zswap_zpool_type,
786 CONFIG_ZSWAP_ZPOOL_DEFAULT)) {
f1c54846 787 pr_err("zpool %s not available, using default %s\n",
bb8b93b5 788 zswap_zpool_type, CONFIG_ZSWAP_ZPOOL_DEFAULT);
c99b42c3 789 param_free_charp(&zswap_zpool_type);
bb8b93b5 790 zswap_zpool_type = CONFIG_ZSWAP_ZPOOL_DEFAULT;
bae21db8 791 has_zpool = zpool_has_pool(zswap_zpool_type);
f1c54846 792 }
bae21db8
DS
793 if (!has_zpool) {
794 pr_err("default zpool %s not available\n",
795 zswap_zpool_type);
796 param_free_charp(&zswap_zpool_type);
797 zswap_zpool_type = ZSWAP_PARAM_UNSET;
798 }
799
800 if (!has_comp || !has_zpool)
801 return NULL;
f1c54846
DS
802
803 return zswap_pool_create(zswap_zpool_type, zswap_compressor);
804}
805
806static void zswap_pool_destroy(struct zswap_pool *pool)
807{
808 zswap_pool_debug("destroying", pool);
809
cab7a7e5 810 cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node);
1ec3b5fe 811 free_percpu(pool->acomp_ctx);
f1c54846
DS
812 zpool_destroy_pool(pool->zpool);
813 kfree(pool);
814}
815
816static int __must_check zswap_pool_get(struct zswap_pool *pool)
817{
ae3d89a7
DS
818 if (!pool)
819 return 0;
820
f1c54846
DS
821 return kref_get_unless_zero(&pool->kref);
822}
823
200867af 824static void __zswap_pool_release(struct work_struct *work)
f1c54846 825{
45190f01
VW
826 struct zswap_pool *pool = container_of(work, typeof(*pool),
827 release_work);
200867af
DS
828
829 synchronize_rcu();
f1c54846
DS
830
831 /* nobody should have been able to get a kref... */
832 WARN_ON(kref_get_unless_zero(&pool->kref));
833
834 /* pool is now off zswap_pools list and has no references. */
835 zswap_pool_destroy(pool);
836}
837
838static void __zswap_pool_empty(struct kref *kref)
839{
840 struct zswap_pool *pool;
841
842 pool = container_of(kref, typeof(*pool), kref);
843
844 spin_lock(&zswap_pools_lock);
845
846 WARN_ON(pool == zswap_pool_current());
847
848 list_del_rcu(&pool->list);
200867af 849
45190f01
VW
850 INIT_WORK(&pool->release_work, __zswap_pool_release);
851 schedule_work(&pool->release_work);
f1c54846
DS
852
853 spin_unlock(&zswap_pools_lock);
854}
855
856static void zswap_pool_put(struct zswap_pool *pool)
857{
858 kref_put(&pool->kref, __zswap_pool_empty);
2b281117
SJ
859}
860
90b0fc26
DS
861/*********************************
862* param callbacks
863**********************************/
864
141fdeec
LS
865static bool zswap_pool_changed(const char *s, const struct kernel_param *kp)
866{
867 /* no change required */
868 if (!strcmp(s, *(char **)kp->arg) && zswap_has_pool)
869 return false;
870 return true;
871}
872
c99b42c3 873/* val must be a null-terminated string */
90b0fc26
DS
874static int __zswap_param_set(const char *val, const struct kernel_param *kp,
875 char *type, char *compressor)
876{
877 struct zswap_pool *pool, *put_pool = NULL;
c99b42c3 878 char *s = strstrip((char *)val);
141fdeec
LS
879 int ret = 0;
880 bool new_pool = false;
90b0fc26 881
141fdeec 882 mutex_lock(&zswap_init_lock);
9021ccec
LS
883 switch (zswap_init_state) {
884 case ZSWAP_UNINIT:
885 /* if this is load-time (pre-init) param setting,
886 * don't create a pool; that's done during init.
887 */
141fdeec
LS
888 ret = param_set_charp(s, kp);
889 break;
9021ccec 890 case ZSWAP_INIT_SUCCEED:
141fdeec 891 new_pool = zswap_pool_changed(s, kp);
9021ccec
LS
892 break;
893 case ZSWAP_INIT_FAILED:
d7b028f5 894 pr_err("can't set param, initialization failed\n");
141fdeec 895 ret = -ENODEV;
d7b028f5 896 }
141fdeec 897 mutex_unlock(&zswap_init_lock);
d7b028f5 898
141fdeec
LS
899 /* no need to create a new pool, return directly */
900 if (!new_pool)
901 return ret;
90b0fc26
DS
902
903 if (!type) {
c99b42c3
DS
904 if (!zpool_has_pool(s)) {
905 pr_err("zpool %s not available\n", s);
90b0fc26
DS
906 return -ENOENT;
907 }
c99b42c3 908 type = s;
90b0fc26 909 } else if (!compressor) {
1ec3b5fe 910 if (!crypto_has_acomp(s, 0, 0)) {
c99b42c3 911 pr_err("compressor %s not available\n", s);
90b0fc26
DS
912 return -ENOENT;
913 }
c99b42c3
DS
914 compressor = s;
915 } else {
916 WARN_ON(1);
917 return -EINVAL;
90b0fc26
DS
918 }
919
920 spin_lock(&zswap_pools_lock);
921
922 pool = zswap_pool_find_get(type, compressor);
923 if (pool) {
924 zswap_pool_debug("using existing", pool);
fd5bb66c 925 WARN_ON(pool == zswap_pool_current());
90b0fc26 926 list_del_rcu(&pool->list);
90b0fc26
DS
927 }
928
fd5bb66c
DS
929 spin_unlock(&zswap_pools_lock);
930
931 if (!pool)
932 pool = zswap_pool_create(type, compressor);
933
90b0fc26 934 if (pool)
c99b42c3 935 ret = param_set_charp(s, kp);
90b0fc26
DS
936 else
937 ret = -EINVAL;
938
fd5bb66c
DS
939 spin_lock(&zswap_pools_lock);
940
90b0fc26
DS
941 if (!ret) {
942 put_pool = zswap_pool_current();
943 list_add_rcu(&pool->list, &zswap_pools);
ae3d89a7 944 zswap_has_pool = true;
90b0fc26
DS
945 } else if (pool) {
946 /* add the possibly pre-existing pool to the end of the pools
947 * list; if it's new (and empty) then it'll be removed and
948 * destroyed by the put after we drop the lock
949 */
950 list_add_tail_rcu(&pool->list, &zswap_pools);
951 put_pool = pool;
fd5bb66c
DS
952 }
953
954 spin_unlock(&zswap_pools_lock);
955
956 if (!zswap_has_pool && !pool) {
ae3d89a7
DS
957 /* if initial pool creation failed, and this pool creation also
958 * failed, maybe both compressor and zpool params were bad.
959 * Allow changing this param, so pool creation will succeed
960 * when the other param is changed. We already verified this
1ec3b5fe 961 * param is ok in the zpool_has_pool() or crypto_has_acomp()
ae3d89a7
DS
962 * checks above.
963 */
964 ret = param_set_charp(s, kp);
90b0fc26
DS
965 }
966
90b0fc26
DS
967 /* drop the ref from either the old current pool,
968 * or the new pool we failed to add
969 */
970 if (put_pool)
971 zswap_pool_put(put_pool);
972
973 return ret;
974}
975
976static int zswap_compressor_param_set(const char *val,
977 const struct kernel_param *kp)
978{
979 return __zswap_param_set(val, kp, zswap_zpool_type, NULL);
980}
981
982static int zswap_zpool_param_set(const char *val,
983 const struct kernel_param *kp)
984{
985 return __zswap_param_set(val, kp, NULL, zswap_compressor);
986}
987
d7b028f5
DS
988static int zswap_enabled_param_set(const char *val,
989 const struct kernel_param *kp)
990{
141fdeec
LS
991 int ret = -ENODEV;
992
993 /* if this is load-time (pre-init) param setting, only set param. */
994 if (system_state != SYSTEM_RUNNING)
995 return param_set_bool(val, kp);
996
997 mutex_lock(&zswap_init_lock);
9021ccec
LS
998 switch (zswap_init_state) {
999 case ZSWAP_UNINIT:
141fdeec
LS
1000 if (zswap_setup())
1001 break;
1002 fallthrough;
9021ccec 1003 case ZSWAP_INIT_SUCCEED:
141fdeec 1004 if (!zswap_has_pool)
9021ccec 1005 pr_err("can't enable, no pool configured\n");
141fdeec
LS
1006 else
1007 ret = param_set_bool(val, kp);
1008 break;
9021ccec 1009 case ZSWAP_INIT_FAILED:
d7b028f5 1010 pr_err("can't enable, initialization failed\n");
ae3d89a7 1011 }
141fdeec 1012 mutex_unlock(&zswap_init_lock);
d7b028f5 1013
141fdeec 1014 return ret;
d7b028f5
DS
1015}
1016
2b281117
SJ
1017/*********************************
1018* writeback code
1019**********************************/
1020/* return enum for zswap_get_swap_cache_page */
1021enum zswap_get_swap_ret {
1022 ZSWAP_SWAPCACHE_NEW,
1023 ZSWAP_SWAPCACHE_EXIST,
67d13fe8 1024 ZSWAP_SWAPCACHE_FAIL,
2b281117
SJ
1025};
1026
1027/*
1028 * zswap_get_swap_cache_page
1029 *
1030 * This is an adaption of read_swap_cache_async()
1031 *
1032 * This function tries to find a page with the given swap entry
1033 * in the swapper_space address space (the swap cache). If the page
1034 * is found, it is returned in retpage. Otherwise, a page is allocated,
1035 * added to the swap cache, and returned in retpage.
1036 *
1037 * If success, the swap cache page is returned in retpage
67d13fe8
WY
1038 * Returns ZSWAP_SWAPCACHE_EXIST if page was already in the swap cache
1039 * Returns ZSWAP_SWAPCACHE_NEW if the new page needs to be populated,
1040 * the new page is added to swapcache and locked
1041 * Returns ZSWAP_SWAPCACHE_FAIL on error
2b281117
SJ
1042 */
1043static int zswap_get_swap_cache_page(swp_entry_t entry,
1044 struct page **retpage)
1045{
5b999aad 1046 bool page_was_allocated;
2b281117 1047
5b999aad
DS
1048 *retpage = __read_swap_cache_async(entry, GFP_KERNEL,
1049 NULL, 0, &page_was_allocated);
1050 if (page_was_allocated)
1051 return ZSWAP_SWAPCACHE_NEW;
1052 if (!*retpage)
67d13fe8 1053 return ZSWAP_SWAPCACHE_FAIL;
2b281117
SJ
1054 return ZSWAP_SWAPCACHE_EXIST;
1055}
1056
1057/*
1058 * Attempts to free an entry by adding a page to the swap cache,
1059 * decompressing the entry data into the page, and issuing a
1060 * bio write to write the page back to the swap device.
1061 *
1062 * This can be thought of as a "resumed writeback" of the page
1063 * to the swap device. We are basically resuming the same swap
1064 * writeback path that was intercepted with the frontswap_store()
1065 * in the first place. After the page has been decompressed into
1066 * the swap cache, the compressed version stored by zswap can be
1067 * freed.
1068 */
0bb48849 1069static int zswap_writeback_entry(struct zswap_entry *entry,
ff9d5ba2 1070 struct zswap_tree *tree)
2b281117 1071{
0bb48849 1072 swp_entry_t swpentry = entry->swpentry;
2b281117 1073 struct page *page;
1ec3b5fe
BS
1074 struct scatterlist input, output;
1075 struct crypto_acomp_ctx *acomp_ctx;
ff9d5ba2 1076 struct zpool *pool = entry->pool->zpool;
1ec3b5fe 1077
fc6697a8 1078 u8 *src, *tmp = NULL;
2b281117 1079 unsigned int dlen;
0ab0abcf 1080 int ret;
2b281117
SJ
1081 struct writeback_control wbc = {
1082 .sync_mode = WB_SYNC_NONE,
1083 };
1084
fc6697a8 1085 if (!zpool_can_sleep_mapped(pool)) {
8d9b6370 1086 tmp = kmalloc(PAGE_SIZE, GFP_KERNEL);
fc6697a8
TT
1087 if (!tmp)
1088 return -ENOMEM;
1089 }
1090
2b281117
SJ
1091 /* try to allocate swap cache page */
1092 switch (zswap_get_swap_cache_page(swpentry, &page)) {
67d13fe8 1093 case ZSWAP_SWAPCACHE_FAIL: /* no memory or invalidate happened */
2b281117
SJ
1094 ret = -ENOMEM;
1095 goto fail;
1096
67d13fe8 1097 case ZSWAP_SWAPCACHE_EXIST:
2b281117 1098 /* page is already in the swap cache, ignore for now */
09cbfeaf 1099 put_page(page);
2b281117
SJ
1100 ret = -EEXIST;
1101 goto fail;
1102
1103 case ZSWAP_SWAPCACHE_NEW: /* page is locked */
04fc7816
DC
1104 /*
1105 * Having a local reference to the zswap entry doesn't exclude
1106 * swapping from invalidating and recycling the swap slot. Once
1107 * the swapcache is secured against concurrent swapping to and
1108 * from the slot, recheck that the entry is still current before
1109 * writing.
1110 */
1111 spin_lock(&tree->lock);
0bb48849 1112 if (zswap_rb_search(&tree->rbroot, swp_offset(entry->swpentry)) != entry) {
04fc7816
DC
1113 spin_unlock(&tree->lock);
1114 delete_from_swap_cache(page_folio(page));
1115 ret = -ENOMEM;
1116 goto fail;
1117 }
1118 spin_unlock(&tree->lock);
1119
2b281117 1120 /* decompress */
1ec3b5fe 1121 acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
2b281117 1122 dlen = PAGE_SIZE;
fc6697a8 1123
0bb48849 1124 src = zpool_map_handle(pool, entry->handle, ZPOOL_MM_RO);
6b3379e8
JW
1125 if (!zpool_can_sleep_mapped(pool)) {
1126 memcpy(tmp, src, entry->length);
1127 src = tmp;
ff9d5ba2 1128 zpool_unmap_handle(pool, entry->handle);
6b3379e8
JW
1129 }
1130
1ec3b5fe
BS
1131 mutex_lock(acomp_ctx->mutex);
1132 sg_init_one(&input, src, entry->length);
1133 sg_init_table(&output, 1);
1134 sg_set_page(&output, page, PAGE_SIZE, 0);
1135 acomp_request_set_params(acomp_ctx->req, &input, &output, entry->length, dlen);
1136 ret = crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait);
1137 dlen = acomp_ctx->req->dlen;
1138 mutex_unlock(acomp_ctx->mutex);
1139
6b3379e8
JW
1140 if (!zpool_can_sleep_mapped(pool))
1141 kfree(tmp);
1142 else
ff9d5ba2 1143 zpool_unmap_handle(pool, entry->handle);
6b3379e8 1144
2b281117
SJ
1145 BUG_ON(ret);
1146 BUG_ON(dlen != PAGE_SIZE);
1147
1148 /* page is up to date */
1149 SetPageUptodate(page);
1150 }
1151
b349acc7
WY
1152 /* move it to the tail of the inactive list after end_writeback */
1153 SetPageReclaim(page);
1154
2b281117 1155 /* start writeback */
cf1e3fe4 1156 __swap_writepage(page, &wbc);
09cbfeaf 1157 put_page(page);
2b281117
SJ
1158 zswap_written_back_pages++;
1159
6b3379e8 1160 return ret;
6b3379e8
JW
1161fail:
1162 if (!zpool_can_sleep_mapped(pool))
1163 kfree(tmp);
0ab0abcf
WY
1164
1165 /*
1166 * if we get here due to ZSWAP_SWAPCACHE_EXIST
c0c641d7
RD
1167 * a load may be happening concurrently.
1168 * it is safe and okay to not free the entry.
c0c641d7 1169 * it is also okay to return !0
0ab0abcf 1170 */
2b281117
SJ
1171 return ret;
1172}
1173
a85f878b
SD
1174static int zswap_is_page_same_filled(void *ptr, unsigned long *value)
1175{
a85f878b 1176 unsigned long *page;
62bf1258
TS
1177 unsigned long val;
1178 unsigned int pos, last_pos = PAGE_SIZE / sizeof(*page) - 1;
a85f878b
SD
1179
1180 page = (unsigned long *)ptr;
62bf1258
TS
1181 val = page[0];
1182
1183 if (val != page[last_pos])
1184 return 0;
1185
1186 for (pos = 1; pos < last_pos; pos++) {
1187 if (val != page[pos])
a85f878b
SD
1188 return 0;
1189 }
62bf1258
TS
1190
1191 *value = val;
1192
a85f878b
SD
1193 return 1;
1194}
1195
1196static void zswap_fill_page(void *ptr, unsigned long value)
1197{
1198 unsigned long *page;
1199
1200 page = (unsigned long *)ptr;
1201 memset_l(page, value, PAGE_SIZE / sizeof(unsigned long));
1202}
1203
2b281117
SJ
1204/*********************************
1205* frontswap hooks
1206**********************************/
1207/* attempts to compress and store an single page */
1208static int zswap_frontswap_store(unsigned type, pgoff_t offset,
1209 struct page *page)
1210{
1211 struct zswap_tree *tree = zswap_trees[type];
1212 struct zswap_entry *entry, *dupentry;
1ec3b5fe
BS
1213 struct scatterlist input, output;
1214 struct crypto_acomp_ctx *acomp_ctx;
f4840ccf
JW
1215 struct obj_cgroup *objcg = NULL;
1216 struct zswap_pool *pool;
2b281117 1217 int ret;
0bb48849 1218 unsigned int dlen = PAGE_SIZE;
a85f878b 1219 unsigned long handle, value;
2b281117
SJ
1220 char *buf;
1221 u8 *src, *dst;
d2fcd82b 1222 gfp_t gfp;
2b281117 1223
7ba71669
HY
1224 /* THP isn't supported */
1225 if (PageTransHuge(page)) {
1226 ret = -EINVAL;
1227 goto reject;
1228 }
1229
c00ed16a 1230 if (!zswap_enabled || !tree) {
2b281117
SJ
1231 ret = -ENODEV;
1232 goto reject;
1233 }
1234
0bdf0efa
NP
1235 /*
1236 * XXX: zswap reclaim does not work with cgroups yet. Without a
1237 * cgroup-aware entry LRU, we will push out entries system-wide based on
1238 * local cgroup limits.
1239 */
f4840ccf 1240 objcg = get_obj_cgroup_from_page(page);
0bdf0efa
NP
1241 if (objcg && !obj_cgroup_may_zswap(objcg)) {
1242 ret = -ENOMEM;
1243 goto reject;
1244 }
f4840ccf 1245
2b281117
SJ
1246 /* reclaim space if needed */
1247 if (zswap_is_full()) {
1248 zswap_pool_limit_hit++;
45190f01 1249 zswap_pool_reached_full = true;
f4840ccf 1250 goto shrink;
45190f01 1251 }
16e536ef 1252
45190f01
VW
1253 if (zswap_pool_reached_full) {
1254 if (!zswap_can_accept()) {
16e536ef 1255 ret = -ENOMEM;
e0228d59 1256 goto shrink;
45190f01
VW
1257 } else
1258 zswap_pool_reached_full = false;
2b281117
SJ
1259 }
1260
1261 /* allocate entry */
1262 entry = zswap_entry_cache_alloc(GFP_KERNEL);
1263 if (!entry) {
1264 zswap_reject_kmemcache_fail++;
1265 ret = -ENOMEM;
1266 goto reject;
1267 }
1268
a85f878b
SD
1269 if (zswap_same_filled_pages_enabled) {
1270 src = kmap_atomic(page);
1271 if (zswap_is_page_same_filled(src, &value)) {
1272 kunmap_atomic(src);
0bb48849 1273 entry->swpentry = swp_entry(type, offset);
a85f878b
SD
1274 entry->length = 0;
1275 entry->value = value;
1276 atomic_inc(&zswap_same_filled_pages);
1277 goto insert_entry;
1278 }
1279 kunmap_atomic(src);
1280 }
1281
cb325ddd
MS
1282 if (!zswap_non_same_filled_pages_enabled) {
1283 ret = -EINVAL;
1284 goto freepage;
1285 }
1286
f1c54846
DS
1287 /* if entry is successfully added, it keeps the reference */
1288 entry->pool = zswap_pool_current_get();
1289 if (!entry->pool) {
1290 ret = -EINVAL;
1291 goto freepage;
1292 }
1293
2b281117 1294 /* compress */
1ec3b5fe
BS
1295 acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
1296
1297 mutex_lock(acomp_ctx->mutex);
1298
1299 dst = acomp_ctx->dstmem;
1300 sg_init_table(&input, 1);
1301 sg_set_page(&input, page, PAGE_SIZE, 0);
1302
1303 /* zswap_dstmem is of size (PAGE_SIZE * 2). Reflect same in sg_list */
1304 sg_init_one(&output, dst, PAGE_SIZE * 2);
1305 acomp_request_set_params(acomp_ctx->req, &input, &output, PAGE_SIZE, dlen);
1306 /*
1307 * it maybe looks a little bit silly that we send an asynchronous request,
1308 * then wait for its completion synchronously. This makes the process look
1309 * synchronous in fact.
1310 * Theoretically, acomp supports users send multiple acomp requests in one
1311 * acomp instance, then get those requests done simultaneously. but in this
1312 * case, frontswap actually does store and load page by page, there is no
1313 * existing method to send the second page before the first page is done
1314 * in one thread doing frontswap.
1315 * but in different threads running on different cpu, we have different
1316 * acomp instance, so multiple threads can do (de)compression in parallel.
1317 */
1318 ret = crypto_wait_req(crypto_acomp_compress(acomp_ctx->req), &acomp_ctx->wait);
1319 dlen = acomp_ctx->req->dlen;
1320
2b281117
SJ
1321 if (ret) {
1322 ret = -EINVAL;
f1c54846 1323 goto put_dstmem;
2b281117
SJ
1324 }
1325
1326 /* store */
d2fcd82b
HZ
1327 gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
1328 if (zpool_malloc_support_movable(entry->pool->zpool))
1329 gfp |= __GFP_HIGHMEM | __GFP_MOVABLE;
0bb48849 1330 ret = zpool_malloc(entry->pool->zpool, dlen, gfp, &handle);
2b281117
SJ
1331 if (ret == -ENOSPC) {
1332 zswap_reject_compress_poor++;
f1c54846 1333 goto put_dstmem;
2b281117
SJ
1334 }
1335 if (ret) {
1336 zswap_reject_alloc_fail++;
f1c54846 1337 goto put_dstmem;
2b281117 1338 }
ae34af1f 1339 buf = zpool_map_handle(entry->pool->zpool, handle, ZPOOL_MM_WO);
0bb48849 1340 memcpy(buf, dst, dlen);
f1c54846 1341 zpool_unmap_handle(entry->pool->zpool, handle);
1ec3b5fe 1342 mutex_unlock(acomp_ctx->mutex);
2b281117
SJ
1343
1344 /* populate entry */
0bb48849 1345 entry->swpentry = swp_entry(type, offset);
2b281117
SJ
1346 entry->handle = handle;
1347 entry->length = dlen;
1348
a85f878b 1349insert_entry:
f4840ccf
JW
1350 entry->objcg = objcg;
1351 if (objcg) {
1352 obj_cgroup_charge_zswap(objcg, entry->length);
1353 /* Account before objcg ref is moved to tree */
1354 count_objcg_event(objcg, ZSWPOUT);
1355 }
1356
2b281117
SJ
1357 /* map */
1358 spin_lock(&tree->lock);
1359 do {
1360 ret = zswap_rb_insert(&tree->rbroot, entry, &dupentry);
1361 if (ret == -EEXIST) {
1362 zswap_duplicate_entry++;
1363 /* remove from rbtree */
0ab0abcf
WY
1364 zswap_rb_erase(&tree->rbroot, dupentry);
1365 zswap_entry_put(tree, dupentry);
2b281117
SJ
1366 }
1367 } while (ret == -EEXIST);
35499e2b 1368 if (entry->length) {
f999f38b
DC
1369 spin_lock(&entry->pool->lru_lock);
1370 list_add(&entry->lru, &entry->pool->lru);
1371 spin_unlock(&entry->pool->lru_lock);
1372 }
2b281117
SJ
1373 spin_unlock(&tree->lock);
1374
1375 /* update stats */
1376 atomic_inc(&zswap_stored_pages);
f1c54846 1377 zswap_update_total_size();
f6498b77 1378 count_vm_event(ZSWPOUT);
2b281117
SJ
1379
1380 return 0;
1381
f1c54846 1382put_dstmem:
1ec3b5fe 1383 mutex_unlock(acomp_ctx->mutex);
f1c54846
DS
1384 zswap_pool_put(entry->pool);
1385freepage:
2b281117
SJ
1386 zswap_entry_cache_free(entry);
1387reject:
f4840ccf
JW
1388 if (objcg)
1389 obj_cgroup_put(objcg);
2b281117 1390 return ret;
f4840ccf
JW
1391
1392shrink:
1393 pool = zswap_pool_last_get();
1394 if (pool)
1395 queue_work(shrink_wq, &pool->shrink_work);
1396 ret = -ENOMEM;
1397 goto reject;
2b281117
SJ
1398}
1399
1400/*
1401 * returns 0 if the page was successfully decompressed
1402 * return -1 on entry not found or error
1403*/
1404static int zswap_frontswap_load(unsigned type, pgoff_t offset,
b9c91c43 1405 struct page *page, bool *exclusive)
2b281117
SJ
1406{
1407 struct zswap_tree *tree = zswap_trees[type];
1408 struct zswap_entry *entry;
1ec3b5fe
BS
1409 struct scatterlist input, output;
1410 struct crypto_acomp_ctx *acomp_ctx;
fc6697a8 1411 u8 *src, *dst, *tmp;
2b281117 1412 unsigned int dlen;
0ab0abcf 1413 int ret;
2b281117
SJ
1414
1415 /* find */
1416 spin_lock(&tree->lock);
0ab0abcf 1417 entry = zswap_entry_find_get(&tree->rbroot, offset);
2b281117
SJ
1418 if (!entry) {
1419 /* entry was written back */
1420 spin_unlock(&tree->lock);
1421 return -1;
1422 }
2b281117
SJ
1423 spin_unlock(&tree->lock);
1424
a85f878b
SD
1425 if (!entry->length) {
1426 dst = kmap_atomic(page);
1427 zswap_fill_page(dst, entry->value);
1428 kunmap_atomic(dst);
fc6697a8 1429 ret = 0;
f6498b77 1430 goto stats;
a85f878b
SD
1431 }
1432
fc6697a8 1433 if (!zpool_can_sleep_mapped(entry->pool->zpool)) {
8d9b6370 1434 tmp = kmalloc(entry->length, GFP_KERNEL);
fc6697a8
TT
1435 if (!tmp) {
1436 ret = -ENOMEM;
1437 goto freeentry;
1438 }
1439 }
1440
2b281117
SJ
1441 /* decompress */
1442 dlen = PAGE_SIZE;
9c3760eb 1443 src = zpool_map_handle(entry->pool->zpool, entry->handle, ZPOOL_MM_RO);
1ec3b5fe 1444
fc6697a8 1445 if (!zpool_can_sleep_mapped(entry->pool->zpool)) {
fc6697a8
TT
1446 memcpy(tmp, src, entry->length);
1447 src = tmp;
fc6697a8
TT
1448 zpool_unmap_handle(entry->pool->zpool, entry->handle);
1449 }
1450
1ec3b5fe
BS
1451 acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
1452 mutex_lock(acomp_ctx->mutex);
1453 sg_init_one(&input, src, entry->length);
1454 sg_init_table(&output, 1);
1455 sg_set_page(&output, page, PAGE_SIZE, 0);
1456 acomp_request_set_params(acomp_ctx->req, &input, &output, entry->length, dlen);
1457 ret = crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait);
1458 mutex_unlock(acomp_ctx->mutex);
1459
fc6697a8
TT
1460 if (zpool_can_sleep_mapped(entry->pool->zpool))
1461 zpool_unmap_handle(entry->pool->zpool, entry->handle);
1462 else
1463 kfree(tmp);
1464
2b281117 1465 BUG_ON(ret);
f6498b77
JW
1466stats:
1467 count_vm_event(ZSWPIN);
f4840ccf
JW
1468 if (entry->objcg)
1469 count_objcg_event(entry->objcg, ZSWPIN);
a85f878b 1470freeentry:
2b281117 1471 spin_lock(&tree->lock);
b9c91c43
YA
1472 if (!ret && zswap_exclusive_loads_enabled) {
1473 zswap_invalidate_entry(tree, entry);
1474 *exclusive = true;
35499e2b 1475 } else if (entry->length) {
f999f38b
DC
1476 spin_lock(&entry->pool->lru_lock);
1477 list_move(&entry->lru, &entry->pool->lru);
1478 spin_unlock(&entry->pool->lru_lock);
b9c91c43 1479 }
18a93707 1480 zswap_entry_put(tree, entry);
2b281117
SJ
1481 spin_unlock(&tree->lock);
1482
fc6697a8 1483 return ret;
2b281117
SJ
1484}
1485
1486/* frees an entry in zswap */
1487static void zswap_frontswap_invalidate_page(unsigned type, pgoff_t offset)
1488{
1489 struct zswap_tree *tree = zswap_trees[type];
1490 struct zswap_entry *entry;
2b281117
SJ
1491
1492 /* find */
1493 spin_lock(&tree->lock);
1494 entry = zswap_rb_search(&tree->rbroot, offset);
1495 if (!entry) {
1496 /* entry was written back */
1497 spin_unlock(&tree->lock);
1498 return;
1499 }
b9c91c43 1500 zswap_invalidate_entry(tree, entry);
2b281117 1501 spin_unlock(&tree->lock);
2b281117
SJ
1502}
1503
1504/* frees all zswap entries for the given swap type */
1505static void zswap_frontswap_invalidate_area(unsigned type)
1506{
1507 struct zswap_tree *tree = zswap_trees[type];
0bd42136 1508 struct zswap_entry *entry, *n;
2b281117
SJ
1509
1510 if (!tree)
1511 return;
1512
1513 /* walk the tree and free everything */
1514 spin_lock(&tree->lock);
0ab0abcf 1515 rbtree_postorder_for_each_entry_safe(entry, n, &tree->rbroot, rbnode)
60105e12 1516 zswap_free_entry(entry);
2b281117
SJ
1517 tree->rbroot = RB_ROOT;
1518 spin_unlock(&tree->lock);
aa9bca05
WY
1519 kfree(tree);
1520 zswap_trees[type] = NULL;
2b281117
SJ
1521}
1522
2b281117
SJ
1523static void zswap_frontswap_init(unsigned type)
1524{
1525 struct zswap_tree *tree;
1526
9cd1f701 1527 tree = kzalloc(sizeof(*tree), GFP_KERNEL);
60105e12
MK
1528 if (!tree) {
1529 pr_err("alloc failed, zswap disabled for swap type %d\n", type);
1530 return;
1531 }
1532
2b281117
SJ
1533 tree->rbroot = RB_ROOT;
1534 spin_lock_init(&tree->lock);
1535 zswap_trees[type] = tree;
2b281117
SJ
1536}
1537
1da0d94a 1538static const struct frontswap_ops zswap_frontswap_ops = {
2b281117
SJ
1539 .store = zswap_frontswap_store,
1540 .load = zswap_frontswap_load,
1541 .invalidate_page = zswap_frontswap_invalidate_page,
1542 .invalidate_area = zswap_frontswap_invalidate_area,
1543 .init = zswap_frontswap_init
1544};
1545
1546/*********************************
1547* debugfs functions
1548**********************************/
1549#ifdef CONFIG_DEBUG_FS
1550#include <linux/debugfs.h>
1551
1552static struct dentry *zswap_debugfs_root;
1553
141fdeec 1554static int zswap_debugfs_init(void)
2b281117
SJ
1555{
1556 if (!debugfs_initialized())
1557 return -ENODEV;
1558
1559 zswap_debugfs_root = debugfs_create_dir("zswap", NULL);
2b281117 1560
0825a6f9
JP
1561 debugfs_create_u64("pool_limit_hit", 0444,
1562 zswap_debugfs_root, &zswap_pool_limit_hit);
1563 debugfs_create_u64("reject_reclaim_fail", 0444,
1564 zswap_debugfs_root, &zswap_reject_reclaim_fail);
1565 debugfs_create_u64("reject_alloc_fail", 0444,
1566 zswap_debugfs_root, &zswap_reject_alloc_fail);
1567 debugfs_create_u64("reject_kmemcache_fail", 0444,
1568 zswap_debugfs_root, &zswap_reject_kmemcache_fail);
1569 debugfs_create_u64("reject_compress_poor", 0444,
1570 zswap_debugfs_root, &zswap_reject_compress_poor);
1571 debugfs_create_u64("written_back_pages", 0444,
1572 zswap_debugfs_root, &zswap_written_back_pages);
1573 debugfs_create_u64("duplicate_entry", 0444,
1574 zswap_debugfs_root, &zswap_duplicate_entry);
1575 debugfs_create_u64("pool_total_size", 0444,
1576 zswap_debugfs_root, &zswap_pool_total_size);
1577 debugfs_create_atomic_t("stored_pages", 0444,
1578 zswap_debugfs_root, &zswap_stored_pages);
a85f878b 1579 debugfs_create_atomic_t("same_filled_pages", 0444,
0825a6f9 1580 zswap_debugfs_root, &zswap_same_filled_pages);
2b281117
SJ
1581
1582 return 0;
1583}
2b281117 1584#else
141fdeec 1585static int zswap_debugfs_init(void)
2b281117
SJ
1586{
1587 return 0;
1588}
2b281117
SJ
1589#endif
1590
1591/*********************************
1592* module init and exit
1593**********************************/
141fdeec 1594static int zswap_setup(void)
2b281117 1595{
f1c54846 1596 struct zswap_pool *pool;
ad7ed770 1597 int ret;
60105e12 1598
b7919122
LS
1599 zswap_entry_cache = KMEM_CACHE(zswap_entry, 0);
1600 if (!zswap_entry_cache) {
2b281117 1601 pr_err("entry cache creation failed\n");
f1c54846 1602 goto cache_fail;
2b281117 1603 }
f1c54846 1604
ad7ed770
SAS
1605 ret = cpuhp_setup_state(CPUHP_MM_ZSWP_MEM_PREPARE, "mm/zswap:prepare",
1606 zswap_dstmem_prepare, zswap_dstmem_dead);
1607 if (ret) {
f1c54846
DS
1608 pr_err("dstmem alloc failed\n");
1609 goto dstmem_fail;
2b281117 1610 }
f1c54846 1611
cab7a7e5
SAS
1612 ret = cpuhp_setup_state_multi(CPUHP_MM_ZSWP_POOL_PREPARE,
1613 "mm/zswap_pool:prepare",
1614 zswap_cpu_comp_prepare,
1615 zswap_cpu_comp_dead);
1616 if (ret)
1617 goto hp_fail;
1618
f1c54846 1619 pool = __zswap_pool_create_fallback();
ae3d89a7
DS
1620 if (pool) {
1621 pr_info("loaded using pool %s/%s\n", pool->tfm_name,
1622 zpool_get_type(pool->zpool));
1623 list_add(&pool->list, &zswap_pools);
1624 zswap_has_pool = true;
1625 } else {
f1c54846 1626 pr_err("pool creation failed\n");
ae3d89a7 1627 zswap_enabled = false;
2b281117 1628 }
60105e12 1629
45190f01
VW
1630 shrink_wq = create_workqueue("zswap-shrink");
1631 if (!shrink_wq)
1632 goto fallback_fail;
1633
1da0d94a
CH
1634 ret = frontswap_register_ops(&zswap_frontswap_ops);
1635 if (ret)
1636 goto destroy_wq;
2b281117
SJ
1637 if (zswap_debugfs_init())
1638 pr_warn("debugfs initialization failed\n");
9021ccec 1639 zswap_init_state = ZSWAP_INIT_SUCCEED;
2b281117 1640 return 0;
f1c54846 1641
1da0d94a
CH
1642destroy_wq:
1643 destroy_workqueue(shrink_wq);
45190f01 1644fallback_fail:
38aeb071
DC
1645 if (pool)
1646 zswap_pool_destroy(pool);
cab7a7e5 1647hp_fail:
ad7ed770 1648 cpuhp_remove_state(CPUHP_MM_ZSWP_MEM_PREPARE);
f1c54846 1649dstmem_fail:
b7919122 1650 kmem_cache_destroy(zswap_entry_cache);
f1c54846 1651cache_fail:
d7b028f5 1652 /* if built-in, we aren't unloaded on failure; don't allow use */
9021ccec 1653 zswap_init_state = ZSWAP_INIT_FAILED;
d7b028f5 1654 zswap_enabled = false;
2b281117
SJ
1655 return -ENOMEM;
1656}
141fdeec
LS
1657
1658static int __init zswap_init(void)
1659{
1660 if (!zswap_enabled)
1661 return 0;
1662 return zswap_setup();
1663}
2b281117 1664/* must be late so crypto has time to come up */
141fdeec 1665late_initcall(zswap_init);
2b281117 1666
68386da8 1667MODULE_AUTHOR("Seth Jennings <[email protected]>");
2b281117 1668MODULE_DESCRIPTION("Compressed cache for swap pages");
This page took 0.838095 seconds and 4 git commands to generate.