]>
Commit | Line | Data |
---|---|---|
c942fddf | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2b281117 SJ |
2 | /* |
3 | * zswap.c - zswap driver file | |
4 | * | |
42c06a0e | 5 | * zswap is a cache that takes pages that are in the process |
2b281117 SJ |
6 | * of being swapped out and attempts to compress and store them in a |
7 | * RAM-based memory pool. This can result in a significant I/O reduction on | |
8 | * the swap device and, in the case where decompressing from RAM is faster | |
9 | * than reading from the swap device, can also improve workload performance. | |
10 | * | |
11 | * Copyright (C) 2012 Seth Jennings <[email protected]> | |
2b281117 SJ |
12 | */ |
13 | ||
14 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
15 | ||
16 | #include <linux/module.h> | |
17 | #include <linux/cpu.h> | |
18 | #include <linux/highmem.h> | |
19 | #include <linux/slab.h> | |
20 | #include <linux/spinlock.h> | |
21 | #include <linux/types.h> | |
22 | #include <linux/atomic.h> | |
2b281117 SJ |
23 | #include <linux/swap.h> |
24 | #include <linux/crypto.h> | |
1ec3b5fe | 25 | #include <linux/scatterlist.h> |
ddc1a5cb | 26 | #include <linux/mempolicy.h> |
2b281117 | 27 | #include <linux/mempool.h> |
12d79d64 | 28 | #include <linux/zpool.h> |
1ec3b5fe | 29 | #include <crypto/acompress.h> |
42c06a0e | 30 | #include <linux/zswap.h> |
2b281117 SJ |
31 | #include <linux/mm_types.h> |
32 | #include <linux/page-flags.h> | |
33 | #include <linux/swapops.h> | |
34 | #include <linux/writeback.h> | |
35 | #include <linux/pagemap.h> | |
45190f01 | 36 | #include <linux/workqueue.h> |
a65b0e76 | 37 | #include <linux/list_lru.h> |
2b281117 | 38 | |
014bb1de | 39 | #include "swap.h" |
e0228d59 | 40 | #include "internal.h" |
014bb1de | 41 | |
2b281117 SJ |
42 | /********************************* |
43 | * statistics | |
44 | **********************************/ | |
2b281117 | 45 | /* The number of compressed pages currently stored in zswap */ |
f6498b77 | 46 | atomic_t zswap_stored_pages = ATOMIC_INIT(0); |
a85f878b SD |
47 | /* The number of same-value filled pages currently stored in zswap */ |
48 | static atomic_t zswap_same_filled_pages = ATOMIC_INIT(0); | |
2b281117 SJ |
49 | |
50 | /* | |
51 | * The statistics below are not protected from concurrent access for | |
52 | * performance reasons so they may not be a 100% accurate. However, | |
53 | * they do provide useful information on roughly how many times a | |
54 | * certain event is occurring. | |
55 | */ | |
56 | ||
57 | /* Pool limit was hit (see zswap_max_pool_percent) */ | |
58 | static u64 zswap_pool_limit_hit; | |
59 | /* Pages written back when pool limit was reached */ | |
60 | static u64 zswap_written_back_pages; | |
61 | /* Store failed due to a reclaim failure after pool limit was reached */ | |
62 | static u64 zswap_reject_reclaim_fail; | |
cb61dad8 NP |
63 | /* Store failed due to compression algorithm failure */ |
64 | static u64 zswap_reject_compress_fail; | |
2b281117 SJ |
65 | /* Compressed page was too big for the allocator to (optimally) store */ |
66 | static u64 zswap_reject_compress_poor; | |
67 | /* Store failed because underlying allocator could not get memory */ | |
68 | static u64 zswap_reject_alloc_fail; | |
69 | /* Store failed because the entry metadata could not be allocated (rare) */ | |
70 | static u64 zswap_reject_kmemcache_fail; | |
2b281117 | 71 | |
45190f01 VW |
72 | /* Shrinker work queue */ |
73 | static struct workqueue_struct *shrink_wq; | |
74 | /* Pool limit was hit, we need to calm down */ | |
75 | static bool zswap_pool_reached_full; | |
76 | ||
2b281117 SJ |
77 | /********************************* |
78 | * tunables | |
79 | **********************************/ | |
c00ed16a | 80 | |
bae21db8 DS |
81 | #define ZSWAP_PARAM_UNSET "" |
82 | ||
141fdeec LS |
83 | static int zswap_setup(void); |
84 | ||
bb8b93b5 | 85 | /* Enable/disable zswap */ |
2d4d2b1c | 86 | static DEFINE_STATIC_KEY_MAYBE(CONFIG_ZSWAP_DEFAULT_ON, zswap_ever_enabled); |
bb8b93b5 | 87 | static bool zswap_enabled = IS_ENABLED(CONFIG_ZSWAP_DEFAULT_ON); |
d7b028f5 DS |
88 | static int zswap_enabled_param_set(const char *, |
89 | const struct kernel_param *); | |
83aed6cd | 90 | static const struct kernel_param_ops zswap_enabled_param_ops = { |
d7b028f5 DS |
91 | .set = zswap_enabled_param_set, |
92 | .get = param_get_bool, | |
93 | }; | |
94 | module_param_cb(enabled, &zswap_enabled_param_ops, &zswap_enabled, 0644); | |
2b281117 | 95 | |
90b0fc26 | 96 | /* Crypto compressor to use */ |
bb8b93b5 | 97 | static char *zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT; |
90b0fc26 DS |
98 | static int zswap_compressor_param_set(const char *, |
99 | const struct kernel_param *); | |
83aed6cd | 100 | static const struct kernel_param_ops zswap_compressor_param_ops = { |
90b0fc26 | 101 | .set = zswap_compressor_param_set, |
c99b42c3 DS |
102 | .get = param_get_charp, |
103 | .free = param_free_charp, | |
90b0fc26 DS |
104 | }; |
105 | module_param_cb(compressor, &zswap_compressor_param_ops, | |
c99b42c3 | 106 | &zswap_compressor, 0644); |
2b281117 | 107 | |
90b0fc26 | 108 | /* Compressed storage zpool to use */ |
bb8b93b5 | 109 | static char *zswap_zpool_type = CONFIG_ZSWAP_ZPOOL_DEFAULT; |
90b0fc26 | 110 | static int zswap_zpool_param_set(const char *, const struct kernel_param *); |
83aed6cd | 111 | static const struct kernel_param_ops zswap_zpool_param_ops = { |
c99b42c3 DS |
112 | .set = zswap_zpool_param_set, |
113 | .get = param_get_charp, | |
114 | .free = param_free_charp, | |
90b0fc26 | 115 | }; |
c99b42c3 | 116 | module_param_cb(zpool, &zswap_zpool_param_ops, &zswap_zpool_type, 0644); |
12d79d64 | 117 | |
90b0fc26 DS |
118 | /* The maximum percentage of memory that the compressed pool can occupy */ |
119 | static unsigned int zswap_max_pool_percent = 20; | |
120 | module_param_named(max_pool_percent, zswap_max_pool_percent, uint, 0644); | |
60105e12 | 121 | |
45190f01 VW |
122 | /* The threshold for accepting new pages after the max_pool_percent was hit */ |
123 | static unsigned int zswap_accept_thr_percent = 90; /* of max pool size */ | |
124 | module_param_named(accept_threshold_percent, zswap_accept_thr_percent, | |
125 | uint, 0644); | |
126 | ||
b5ba474f NP |
127 | /* Enable/disable memory pressure-based shrinker. */ |
128 | static bool zswap_shrinker_enabled = IS_ENABLED( | |
129 | CONFIG_ZSWAP_SHRINKER_DEFAULT_ON); | |
130 | module_param_named(shrinker_enabled, zswap_shrinker_enabled, bool, 0644); | |
131 | ||
2b33a97c | 132 | bool zswap_is_enabled(void) |
501a06fe NP |
133 | { |
134 | return zswap_enabled; | |
135 | } | |
136 | ||
2d4d2b1c YA |
137 | bool zswap_never_enabled(void) |
138 | { | |
139 | return !static_branch_maybe(CONFIG_ZSWAP_DEFAULT_ON, &zswap_ever_enabled); | |
140 | } | |
141 | ||
2b281117 | 142 | /********************************* |
f1c54846 | 143 | * data structures |
2b281117 | 144 | **********************************/ |
2b281117 | 145 | |
1ec3b5fe BS |
146 | struct crypto_acomp_ctx { |
147 | struct crypto_acomp *acomp; | |
148 | struct acomp_req *req; | |
149 | struct crypto_wait wait; | |
8ba2f844 CZ |
150 | u8 *buffer; |
151 | struct mutex mutex; | |
270700dd | 152 | bool is_sleepable; |
1ec3b5fe BS |
153 | }; |
154 | ||
f999f38b DC |
155 | /* |
156 | * The lock ordering is zswap_tree.lock -> zswap_pool.lru_lock. | |
157 | * The only case where lru_lock is not acquired while holding tree.lock is | |
158 | * when a zswap_entry is taken off the lru for writeback, in that case it | |
159 | * needs to be verified that it's still valid in the tree. | |
160 | */ | |
f1c54846 | 161 | struct zswap_pool { |
8edc9c4e | 162 | struct zpool *zpool; |
1ec3b5fe | 163 | struct crypto_acomp_ctx __percpu *acomp_ctx; |
94ace3fe | 164 | struct percpu_ref ref; |
f1c54846 | 165 | struct list_head list; |
45190f01 | 166 | struct work_struct release_work; |
cab7a7e5 | 167 | struct hlist_node node; |
f1c54846 | 168 | char tfm_name[CRYPTO_MAX_ALG_NAME]; |
bf9b7df2 CZ |
169 | }; |
170 | ||
e35606e4 CZ |
171 | /* Global LRU lists shared by all zswap pools. */ |
172 | static struct list_lru zswap_list_lru; | |
e35606e4 CZ |
173 | |
174 | /* The lock protects zswap_next_shrink updates. */ | |
175 | static DEFINE_SPINLOCK(zswap_shrink_lock); | |
176 | static struct mem_cgroup *zswap_next_shrink; | |
177 | static struct work_struct zswap_shrink_work; | |
178 | static struct shrinker *zswap_shrinker; | |
2b281117 | 179 | |
2b281117 SJ |
180 | /* |
181 | * struct zswap_entry | |
182 | * | |
183 | * This structure contains the metadata for tracking a single compressed | |
184 | * page within zswap. | |
185 | * | |
97157d89 | 186 | * swpentry - associated swap entry, the offset indexes into the red-black tree |
2b281117 | 187 | * length - the length in bytes of the compressed page data. Needed during |
f999f38b DC |
188 | * decompression. For a same value filled page length is 0, and both |
189 | * pool and lru are invalid and must be ignored. | |
f1c54846 DS |
190 | * pool - the zswap_pool the entry's data is in |
191 | * handle - zpool allocation handle that stores the compressed page data | |
a85f878b | 192 | * value - value of the same-value filled pages which have same content |
97157d89 | 193 | * objcg - the obj_cgroup that the compressed memory is charged to |
f999f38b | 194 | * lru - handle to the pool's lru used to evict pages. |
2b281117 SJ |
195 | */ |
196 | struct zswap_entry { | |
0bb48849 | 197 | swp_entry_t swpentry; |
2b281117 | 198 | unsigned int length; |
f1c54846 | 199 | struct zswap_pool *pool; |
a85f878b SD |
200 | union { |
201 | unsigned long handle; | |
202 | unsigned long value; | |
203 | }; | |
f4840ccf | 204 | struct obj_cgroup *objcg; |
f999f38b | 205 | struct list_head lru; |
2b281117 SJ |
206 | }; |
207 | ||
796c2c23 | 208 | static struct xarray *zswap_trees[MAX_SWAPFILES]; |
44c7c734 | 209 | static unsigned int nr_zswap_trees[MAX_SWAPFILES]; |
2b281117 | 210 | |
f1c54846 DS |
211 | /* RCU-protected iteration */ |
212 | static LIST_HEAD(zswap_pools); | |
213 | /* protects zswap_pools list modification */ | |
214 | static DEFINE_SPINLOCK(zswap_pools_lock); | |
32a4e169 DS |
215 | /* pool counter to provide unique names to zpool */ |
216 | static atomic_t zswap_pools_count = ATOMIC_INIT(0); | |
f1c54846 | 217 | |
9021ccec LS |
218 | enum zswap_init_type { |
219 | ZSWAP_UNINIT, | |
220 | ZSWAP_INIT_SUCCEED, | |
221 | ZSWAP_INIT_FAILED | |
222 | }; | |
90b0fc26 | 223 | |
9021ccec | 224 | static enum zswap_init_type zswap_init_state; |
90b0fc26 | 225 | |
141fdeec LS |
226 | /* used to ensure the integrity of initialization */ |
227 | static DEFINE_MUTEX(zswap_init_lock); | |
d7b028f5 | 228 | |
ae3d89a7 DS |
229 | /* init completed, but couldn't create the initial pool */ |
230 | static bool zswap_has_pool; | |
231 | ||
f1c54846 DS |
232 | /********************************* |
233 | * helpers and fwd declarations | |
234 | **********************************/ | |
235 | ||
796c2c23 | 236 | static inline struct xarray *swap_zswap_tree(swp_entry_t swp) |
44c7c734 CZ |
237 | { |
238 | return &zswap_trees[swp_type(swp)][swp_offset(swp) | |
239 | >> SWAP_ADDRESS_SPACE_SHIFT]; | |
240 | } | |
241 | ||
f1c54846 DS |
242 | #define zswap_pool_debug(msg, p) \ |
243 | pr_debug("%s pool %s/%s\n", msg, (p)->tfm_name, \ | |
8edc9c4e | 244 | zpool_get_type((p)->zpool)) |
f1c54846 | 245 | |
a984649b JW |
246 | /********************************* |
247 | * pool functions | |
248 | **********************************/ | |
94ace3fe | 249 | static void __zswap_pool_empty(struct percpu_ref *ref); |
a984649b | 250 | |
a984649b JW |
251 | static struct zswap_pool *zswap_pool_create(char *type, char *compressor) |
252 | { | |
a984649b JW |
253 | struct zswap_pool *pool; |
254 | char name[38]; /* 'zswap' + 32 char (max) num + \0 */ | |
255 | gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM; | |
256 | int ret; | |
257 | ||
258 | if (!zswap_has_pool) { | |
259 | /* if either are unset, pool initialization failed, and we | |
260 | * need both params to be set correctly before trying to | |
261 | * create a pool. | |
262 | */ | |
263 | if (!strcmp(type, ZSWAP_PARAM_UNSET)) | |
264 | return NULL; | |
265 | if (!strcmp(compressor, ZSWAP_PARAM_UNSET)) | |
266 | return NULL; | |
267 | } | |
268 | ||
269 | pool = kzalloc(sizeof(*pool), GFP_KERNEL); | |
270 | if (!pool) | |
271 | return NULL; | |
272 | ||
8edc9c4e CZ |
273 | /* unique name for each pool specifically required by zsmalloc */ |
274 | snprintf(name, 38, "zswap%x", atomic_inc_return(&zswap_pools_count)); | |
275 | pool->zpool = zpool_create_pool(type, name, gfp); | |
276 | if (!pool->zpool) { | |
277 | pr_err("%s zpool not available\n", type); | |
278 | goto error; | |
a984649b | 279 | } |
8edc9c4e | 280 | pr_debug("using %s zpool\n", zpool_get_type(pool->zpool)); |
a984649b JW |
281 | |
282 | strscpy(pool->tfm_name, compressor, sizeof(pool->tfm_name)); | |
283 | ||
284 | pool->acomp_ctx = alloc_percpu(*pool->acomp_ctx); | |
285 | if (!pool->acomp_ctx) { | |
286 | pr_err("percpu alloc failed\n"); | |
287 | goto error; | |
288 | } | |
289 | ||
290 | ret = cpuhp_state_add_instance(CPUHP_MM_ZSWP_POOL_PREPARE, | |
291 | &pool->node); | |
292 | if (ret) | |
293 | goto error; | |
294 | ||
a984649b JW |
295 | /* being the current pool takes 1 ref; this func expects the |
296 | * caller to always add the new pool as the current pool | |
297 | */ | |
94ace3fe CZ |
298 | ret = percpu_ref_init(&pool->ref, __zswap_pool_empty, |
299 | PERCPU_REF_ALLOW_REINIT, GFP_KERNEL); | |
300 | if (ret) | |
301 | goto ref_fail; | |
a984649b | 302 | INIT_LIST_HEAD(&pool->list); |
a984649b JW |
303 | |
304 | zswap_pool_debug("created", pool); | |
305 | ||
306 | return pool; | |
307 | ||
94ace3fe CZ |
308 | ref_fail: |
309 | cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node); | |
a984649b JW |
310 | error: |
311 | if (pool->acomp_ctx) | |
312 | free_percpu(pool->acomp_ctx); | |
8edc9c4e CZ |
313 | if (pool->zpool) |
314 | zpool_destroy_pool(pool->zpool); | |
a984649b JW |
315 | kfree(pool); |
316 | return NULL; | |
317 | } | |
318 | ||
319 | static struct zswap_pool *__zswap_pool_create_fallback(void) | |
320 | { | |
321 | bool has_comp, has_zpool; | |
322 | ||
323 | has_comp = crypto_has_acomp(zswap_compressor, 0, 0); | |
324 | if (!has_comp && strcmp(zswap_compressor, | |
325 | CONFIG_ZSWAP_COMPRESSOR_DEFAULT)) { | |
326 | pr_err("compressor %s not available, using default %s\n", | |
327 | zswap_compressor, CONFIG_ZSWAP_COMPRESSOR_DEFAULT); | |
328 | param_free_charp(&zswap_compressor); | |
329 | zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT; | |
330 | has_comp = crypto_has_acomp(zswap_compressor, 0, 0); | |
331 | } | |
332 | if (!has_comp) { | |
333 | pr_err("default compressor %s not available\n", | |
334 | zswap_compressor); | |
335 | param_free_charp(&zswap_compressor); | |
336 | zswap_compressor = ZSWAP_PARAM_UNSET; | |
337 | } | |
338 | ||
339 | has_zpool = zpool_has_pool(zswap_zpool_type); | |
340 | if (!has_zpool && strcmp(zswap_zpool_type, | |
341 | CONFIG_ZSWAP_ZPOOL_DEFAULT)) { | |
342 | pr_err("zpool %s not available, using default %s\n", | |
343 | zswap_zpool_type, CONFIG_ZSWAP_ZPOOL_DEFAULT); | |
344 | param_free_charp(&zswap_zpool_type); | |
345 | zswap_zpool_type = CONFIG_ZSWAP_ZPOOL_DEFAULT; | |
346 | has_zpool = zpool_has_pool(zswap_zpool_type); | |
347 | } | |
348 | if (!has_zpool) { | |
349 | pr_err("default zpool %s not available\n", | |
350 | zswap_zpool_type); | |
351 | param_free_charp(&zswap_zpool_type); | |
352 | zswap_zpool_type = ZSWAP_PARAM_UNSET; | |
353 | } | |
354 | ||
355 | if (!has_comp || !has_zpool) | |
356 | return NULL; | |
357 | ||
358 | return zswap_pool_create(zswap_zpool_type, zswap_compressor); | |
359 | } | |
360 | ||
361 | static void zswap_pool_destroy(struct zswap_pool *pool) | |
362 | { | |
a984649b JW |
363 | zswap_pool_debug("destroying", pool); |
364 | ||
a984649b JW |
365 | cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node); |
366 | free_percpu(pool->acomp_ctx); | |
a984649b | 367 | |
8edc9c4e | 368 | zpool_destroy_pool(pool->zpool); |
a984649b JW |
369 | kfree(pool); |
370 | } | |
371 | ||
39f3ec8e JW |
372 | static void __zswap_pool_release(struct work_struct *work) |
373 | { | |
374 | struct zswap_pool *pool = container_of(work, typeof(*pool), | |
375 | release_work); | |
376 | ||
377 | synchronize_rcu(); | |
378 | ||
94ace3fe CZ |
379 | /* nobody should have been able to get a ref... */ |
380 | WARN_ON(!percpu_ref_is_zero(&pool->ref)); | |
381 | percpu_ref_exit(&pool->ref); | |
39f3ec8e JW |
382 | |
383 | /* pool is now off zswap_pools list and has no references. */ | |
384 | zswap_pool_destroy(pool); | |
385 | } | |
386 | ||
387 | static struct zswap_pool *zswap_pool_current(void); | |
388 | ||
94ace3fe | 389 | static void __zswap_pool_empty(struct percpu_ref *ref) |
39f3ec8e JW |
390 | { |
391 | struct zswap_pool *pool; | |
392 | ||
94ace3fe | 393 | pool = container_of(ref, typeof(*pool), ref); |
39f3ec8e | 394 | |
94ace3fe | 395 | spin_lock_bh(&zswap_pools_lock); |
39f3ec8e JW |
396 | |
397 | WARN_ON(pool == zswap_pool_current()); | |
398 | ||
399 | list_del_rcu(&pool->list); | |
400 | ||
401 | INIT_WORK(&pool->release_work, __zswap_pool_release); | |
402 | schedule_work(&pool->release_work); | |
403 | ||
94ace3fe | 404 | spin_unlock_bh(&zswap_pools_lock); |
39f3ec8e JW |
405 | } |
406 | ||
407 | static int __must_check zswap_pool_get(struct zswap_pool *pool) | |
408 | { | |
409 | if (!pool) | |
410 | return 0; | |
411 | ||
94ace3fe | 412 | return percpu_ref_tryget(&pool->ref); |
39f3ec8e JW |
413 | } |
414 | ||
415 | static void zswap_pool_put(struct zswap_pool *pool) | |
416 | { | |
94ace3fe | 417 | percpu_ref_put(&pool->ref); |
39f3ec8e JW |
418 | } |
419 | ||
c1a0ecb8 JW |
420 | static struct zswap_pool *__zswap_pool_current(void) |
421 | { | |
422 | struct zswap_pool *pool; | |
423 | ||
424 | pool = list_first_or_null_rcu(&zswap_pools, typeof(*pool), list); | |
425 | WARN_ONCE(!pool && zswap_has_pool, | |
426 | "%s: no page storage pool!\n", __func__); | |
427 | ||
428 | return pool; | |
429 | } | |
430 | ||
431 | static struct zswap_pool *zswap_pool_current(void) | |
432 | { | |
433 | assert_spin_locked(&zswap_pools_lock); | |
434 | ||
435 | return __zswap_pool_current(); | |
436 | } | |
437 | ||
438 | static struct zswap_pool *zswap_pool_current_get(void) | |
439 | { | |
440 | struct zswap_pool *pool; | |
441 | ||
442 | rcu_read_lock(); | |
443 | ||
444 | pool = __zswap_pool_current(); | |
445 | if (!zswap_pool_get(pool)) | |
446 | pool = NULL; | |
447 | ||
448 | rcu_read_unlock(); | |
449 | ||
450 | return pool; | |
451 | } | |
452 | ||
c1a0ecb8 JW |
453 | /* type and compressor must be null-terminated */ |
454 | static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor) | |
455 | { | |
456 | struct zswap_pool *pool; | |
457 | ||
458 | assert_spin_locked(&zswap_pools_lock); | |
459 | ||
460 | list_for_each_entry_rcu(pool, &zswap_pools, list) { | |
461 | if (strcmp(pool->tfm_name, compressor)) | |
462 | continue; | |
8edc9c4e | 463 | if (strcmp(zpool_get_type(pool->zpool), type)) |
c1a0ecb8 JW |
464 | continue; |
465 | /* if we can't get it, it's about to be destroyed */ | |
466 | if (!zswap_pool_get(pool)) | |
467 | continue; | |
468 | return pool; | |
469 | } | |
470 | ||
471 | return NULL; | |
472 | } | |
473 | ||
91cdcd8d JW |
474 | static unsigned long zswap_max_pages(void) |
475 | { | |
476 | return totalram_pages() * zswap_max_pool_percent / 100; | |
477 | } | |
478 | ||
479 | static unsigned long zswap_accept_thr_pages(void) | |
480 | { | |
481 | return zswap_max_pages() * zswap_accept_thr_percent / 100; | |
482 | } | |
483 | ||
484 | unsigned long zswap_total_pages(void) | |
485 | { | |
486 | struct zswap_pool *pool; | |
4196b48d | 487 | unsigned long total = 0; |
91cdcd8d JW |
488 | |
489 | rcu_read_lock(); | |
8edc9c4e CZ |
490 | list_for_each_entry_rcu(pool, &zswap_pools, list) |
491 | total += zpool_get_total_pages(pool->zpool); | |
91cdcd8d JW |
492 | rcu_read_unlock(); |
493 | ||
4196b48d | 494 | return total; |
91cdcd8d JW |
495 | } |
496 | ||
82e0f8e4 YA |
497 | static bool zswap_check_limits(void) |
498 | { | |
499 | unsigned long cur_pages = zswap_total_pages(); | |
500 | unsigned long max_pages = zswap_max_pages(); | |
501 | ||
502 | if (cur_pages >= max_pages) { | |
503 | zswap_pool_limit_hit++; | |
504 | zswap_pool_reached_full = true; | |
505 | } else if (zswap_pool_reached_full && | |
506 | cur_pages <= zswap_accept_thr_pages()) { | |
507 | zswap_pool_reached_full = false; | |
508 | } | |
509 | return zswap_pool_reached_full; | |
510 | } | |
511 | ||
abca07c0 JW |
512 | /********************************* |
513 | * param callbacks | |
514 | **********************************/ | |
515 | ||
516 | static bool zswap_pool_changed(const char *s, const struct kernel_param *kp) | |
517 | { | |
518 | /* no change required */ | |
519 | if (!strcmp(s, *(char **)kp->arg) && zswap_has_pool) | |
520 | return false; | |
521 | return true; | |
522 | } | |
523 | ||
524 | /* val must be a null-terminated string */ | |
525 | static int __zswap_param_set(const char *val, const struct kernel_param *kp, | |
526 | char *type, char *compressor) | |
527 | { | |
528 | struct zswap_pool *pool, *put_pool = NULL; | |
529 | char *s = strstrip((char *)val); | |
530 | int ret = 0; | |
531 | bool new_pool = false; | |
532 | ||
533 | mutex_lock(&zswap_init_lock); | |
534 | switch (zswap_init_state) { | |
535 | case ZSWAP_UNINIT: | |
536 | /* if this is load-time (pre-init) param setting, | |
537 | * don't create a pool; that's done during init. | |
538 | */ | |
539 | ret = param_set_charp(s, kp); | |
540 | break; | |
541 | case ZSWAP_INIT_SUCCEED: | |
542 | new_pool = zswap_pool_changed(s, kp); | |
543 | break; | |
544 | case ZSWAP_INIT_FAILED: | |
545 | pr_err("can't set param, initialization failed\n"); | |
546 | ret = -ENODEV; | |
547 | } | |
548 | mutex_unlock(&zswap_init_lock); | |
549 | ||
550 | /* no need to create a new pool, return directly */ | |
551 | if (!new_pool) | |
552 | return ret; | |
553 | ||
554 | if (!type) { | |
555 | if (!zpool_has_pool(s)) { | |
556 | pr_err("zpool %s not available\n", s); | |
557 | return -ENOENT; | |
558 | } | |
559 | type = s; | |
560 | } else if (!compressor) { | |
561 | if (!crypto_has_acomp(s, 0, 0)) { | |
562 | pr_err("compressor %s not available\n", s); | |
563 | return -ENOENT; | |
564 | } | |
565 | compressor = s; | |
566 | } else { | |
567 | WARN_ON(1); | |
568 | return -EINVAL; | |
569 | } | |
570 | ||
94ace3fe | 571 | spin_lock_bh(&zswap_pools_lock); |
abca07c0 JW |
572 | |
573 | pool = zswap_pool_find_get(type, compressor); | |
574 | if (pool) { | |
575 | zswap_pool_debug("using existing", pool); | |
576 | WARN_ON(pool == zswap_pool_current()); | |
577 | list_del_rcu(&pool->list); | |
578 | } | |
579 | ||
94ace3fe | 580 | spin_unlock_bh(&zswap_pools_lock); |
abca07c0 JW |
581 | |
582 | if (!pool) | |
583 | pool = zswap_pool_create(type, compressor); | |
94ace3fe CZ |
584 | else { |
585 | /* | |
586 | * Restore the initial ref dropped by percpu_ref_kill() | |
587 | * when the pool was decommissioned and switch it again | |
588 | * to percpu mode. | |
589 | */ | |
590 | percpu_ref_resurrect(&pool->ref); | |
591 | ||
592 | /* Drop the ref from zswap_pool_find_get(). */ | |
593 | zswap_pool_put(pool); | |
594 | } | |
abca07c0 JW |
595 | |
596 | if (pool) | |
597 | ret = param_set_charp(s, kp); | |
598 | else | |
599 | ret = -EINVAL; | |
600 | ||
94ace3fe | 601 | spin_lock_bh(&zswap_pools_lock); |
abca07c0 JW |
602 | |
603 | if (!ret) { | |
604 | put_pool = zswap_pool_current(); | |
605 | list_add_rcu(&pool->list, &zswap_pools); | |
606 | zswap_has_pool = true; | |
607 | } else if (pool) { | |
608 | /* add the possibly pre-existing pool to the end of the pools | |
609 | * list; if it's new (and empty) then it'll be removed and | |
610 | * destroyed by the put after we drop the lock | |
611 | */ | |
612 | list_add_tail_rcu(&pool->list, &zswap_pools); | |
613 | put_pool = pool; | |
614 | } | |
615 | ||
94ace3fe | 616 | spin_unlock_bh(&zswap_pools_lock); |
abca07c0 JW |
617 | |
618 | if (!zswap_has_pool && !pool) { | |
619 | /* if initial pool creation failed, and this pool creation also | |
620 | * failed, maybe both compressor and zpool params were bad. | |
621 | * Allow changing this param, so pool creation will succeed | |
622 | * when the other param is changed. We already verified this | |
623 | * param is ok in the zpool_has_pool() or crypto_has_acomp() | |
624 | * checks above. | |
625 | */ | |
626 | ret = param_set_charp(s, kp); | |
627 | } | |
628 | ||
629 | /* drop the ref from either the old current pool, | |
630 | * or the new pool we failed to add | |
631 | */ | |
632 | if (put_pool) | |
94ace3fe | 633 | percpu_ref_kill(&put_pool->ref); |
abca07c0 JW |
634 | |
635 | return ret; | |
636 | } | |
637 | ||
638 | static int zswap_compressor_param_set(const char *val, | |
639 | const struct kernel_param *kp) | |
640 | { | |
641 | return __zswap_param_set(val, kp, zswap_zpool_type, NULL); | |
642 | } | |
643 | ||
644 | static int zswap_zpool_param_set(const char *val, | |
645 | const struct kernel_param *kp) | |
646 | { | |
647 | return __zswap_param_set(val, kp, NULL, zswap_compressor); | |
648 | } | |
649 | ||
650 | static int zswap_enabled_param_set(const char *val, | |
651 | const struct kernel_param *kp) | |
652 | { | |
653 | int ret = -ENODEV; | |
654 | ||
655 | /* if this is load-time (pre-init) param setting, only set param. */ | |
656 | if (system_state != SYSTEM_RUNNING) | |
657 | return param_set_bool(val, kp); | |
658 | ||
659 | mutex_lock(&zswap_init_lock); | |
660 | switch (zswap_init_state) { | |
661 | case ZSWAP_UNINIT: | |
662 | if (zswap_setup()) | |
663 | break; | |
664 | fallthrough; | |
665 | case ZSWAP_INIT_SUCCEED: | |
666 | if (!zswap_has_pool) | |
667 | pr_err("can't enable, no pool configured\n"); | |
668 | else | |
669 | ret = param_set_bool(val, kp); | |
670 | break; | |
671 | case ZSWAP_INIT_FAILED: | |
672 | pr_err("can't enable, initialization failed\n"); | |
673 | } | |
674 | mutex_unlock(&zswap_init_lock); | |
675 | ||
676 | return ret; | |
677 | } | |
678 | ||
506a86c5 JW |
679 | /********************************* |
680 | * lru functions | |
681 | **********************************/ | |
682 | ||
a65b0e76 DC |
683 | /* should be called under RCU */ |
684 | #ifdef CONFIG_MEMCG | |
685 | static inline struct mem_cgroup *mem_cgroup_from_entry(struct zswap_entry *entry) | |
686 | { | |
687 | return entry->objcg ? obj_cgroup_memcg(entry->objcg) : NULL; | |
688 | } | |
689 | #else | |
690 | static inline struct mem_cgroup *mem_cgroup_from_entry(struct zswap_entry *entry) | |
691 | { | |
692 | return NULL; | |
693 | } | |
694 | #endif | |
695 | ||
696 | static inline int entry_to_nid(struct zswap_entry *entry) | |
697 | { | |
698 | return page_to_nid(virt_to_page(entry)); | |
699 | } | |
700 | ||
a65b0e76 DC |
701 | static void zswap_lru_add(struct list_lru *list_lru, struct zswap_entry *entry) |
702 | { | |
b5ba474f NP |
703 | atomic_long_t *nr_zswap_protected; |
704 | unsigned long lru_size, old, new; | |
a65b0e76 DC |
705 | int nid = entry_to_nid(entry); |
706 | struct mem_cgroup *memcg; | |
b5ba474f | 707 | struct lruvec *lruvec; |
a65b0e76 DC |
708 | |
709 | /* | |
710 | * Note that it is safe to use rcu_read_lock() here, even in the face of | |
711 | * concurrent memcg offlining. Thanks to the memcg->kmemcg_id indirection | |
712 | * used in list_lru lookup, only two scenarios are possible: | |
713 | * | |
714 | * 1. list_lru_add() is called before memcg->kmemcg_id is updated. The | |
715 | * new entry will be reparented to memcg's parent's list_lru. | |
716 | * 2. list_lru_add() is called after memcg->kmemcg_id is updated. The | |
717 | * new entry will be added directly to memcg's parent's list_lru. | |
718 | * | |
3f798aa6 | 719 | * Similar reasoning holds for list_lru_del(). |
a65b0e76 DC |
720 | */ |
721 | rcu_read_lock(); | |
722 | memcg = mem_cgroup_from_entry(entry); | |
723 | /* will always succeed */ | |
724 | list_lru_add(list_lru, &entry->lru, nid, memcg); | |
b5ba474f NP |
725 | |
726 | /* Update the protection area */ | |
727 | lru_size = list_lru_count_one(list_lru, nid, memcg); | |
728 | lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid)); | |
729 | nr_zswap_protected = &lruvec->zswap_lruvec_state.nr_zswap_protected; | |
730 | old = atomic_long_inc_return(nr_zswap_protected); | |
731 | /* | |
732 | * Decay to avoid overflow and adapt to changing workloads. | |
733 | * This is based on LRU reclaim cost decaying heuristics. | |
734 | */ | |
735 | do { | |
736 | new = old > lru_size / 4 ? old / 2 : old; | |
737 | } while (!atomic_long_try_cmpxchg(nr_zswap_protected, &old, new)); | |
a65b0e76 DC |
738 | rcu_read_unlock(); |
739 | } | |
740 | ||
741 | static void zswap_lru_del(struct list_lru *list_lru, struct zswap_entry *entry) | |
742 | { | |
743 | int nid = entry_to_nid(entry); | |
744 | struct mem_cgroup *memcg; | |
745 | ||
746 | rcu_read_lock(); | |
747 | memcg = mem_cgroup_from_entry(entry); | |
748 | /* will always succeed */ | |
749 | list_lru_del(list_lru, &entry->lru, nid, memcg); | |
750 | rcu_read_unlock(); | |
751 | } | |
752 | ||
5182661a JW |
753 | void zswap_lruvec_state_init(struct lruvec *lruvec) |
754 | { | |
755 | atomic_long_set(&lruvec->zswap_lruvec_state.nr_zswap_protected, 0); | |
756 | } | |
757 | ||
758 | void zswap_folio_swapin(struct folio *folio) | |
759 | { | |
760 | struct lruvec *lruvec; | |
761 | ||
762 | if (folio) { | |
763 | lruvec = folio_lruvec(folio); | |
764 | atomic_long_inc(&lruvec->zswap_lruvec_state.nr_zswap_protected); | |
765 | } | |
766 | } | |
767 | ||
768 | void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg) | |
769 | { | |
bf9b7df2 | 770 | /* lock out zswap shrinker walking memcg tree */ |
e35606e4 CZ |
771 | spin_lock(&zswap_shrink_lock); |
772 | if (zswap_next_shrink == memcg) | |
773 | zswap_next_shrink = mem_cgroup_iter(NULL, zswap_next_shrink, NULL); | |
774 | spin_unlock(&zswap_shrink_lock); | |
5182661a JW |
775 | } |
776 | ||
36034bf6 JW |
777 | /********************************* |
778 | * zswap entry functions | |
779 | **********************************/ | |
780 | static struct kmem_cache *zswap_entry_cache; | |
781 | ||
782 | static struct zswap_entry *zswap_entry_cache_alloc(gfp_t gfp, int nid) | |
783 | { | |
784 | struct zswap_entry *entry; | |
785 | entry = kmem_cache_alloc_node(zswap_entry_cache, gfp, nid); | |
786 | if (!entry) | |
787 | return NULL; | |
36034bf6 JW |
788 | return entry; |
789 | } | |
790 | ||
791 | static void zswap_entry_cache_free(struct zswap_entry *entry) | |
792 | { | |
793 | kmem_cache_free(zswap_entry_cache, entry); | |
794 | } | |
795 | ||
0ab0abcf | 796 | /* |
12d79d64 | 797 | * Carries out the common pattern of freeing and entry's zpool allocation, |
0ab0abcf WY |
798 | * freeing the entry itself, and decrementing the number of stored pages. |
799 | */ | |
42398be2 | 800 | static void zswap_entry_free(struct zswap_entry *entry) |
0ab0abcf | 801 | { |
a85f878b SD |
802 | if (!entry->length) |
803 | atomic_dec(&zswap_same_filled_pages); | |
804 | else { | |
e35606e4 | 805 | zswap_lru_del(&zswap_list_lru, entry); |
b749cb0d | 806 | zpool_free(entry->pool->zpool, entry->handle); |
a85f878b SD |
807 | zswap_pool_put(entry->pool); |
808 | } | |
2e601e1e JW |
809 | if (entry->objcg) { |
810 | obj_cgroup_uncharge_zswap(entry->objcg, entry->length); | |
811 | obj_cgroup_put(entry->objcg); | |
812 | } | |
0ab0abcf WY |
813 | zswap_entry_cache_free(entry); |
814 | atomic_dec(&zswap_stored_pages); | |
0ab0abcf WY |
815 | } |
816 | ||
f91e81d3 JW |
817 | /********************************* |
818 | * compressed storage functions | |
819 | **********************************/ | |
64f200b8 JW |
820 | static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node) |
821 | { | |
822 | struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node); | |
823 | struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu); | |
824 | struct crypto_acomp *acomp; | |
825 | struct acomp_req *req; | |
826 | int ret; | |
827 | ||
828 | mutex_init(&acomp_ctx->mutex); | |
829 | ||
830 | acomp_ctx->buffer = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu)); | |
831 | if (!acomp_ctx->buffer) | |
832 | return -ENOMEM; | |
833 | ||
834 | acomp = crypto_alloc_acomp_node(pool->tfm_name, 0, 0, cpu_to_node(cpu)); | |
835 | if (IS_ERR(acomp)) { | |
836 | pr_err("could not alloc crypto acomp %s : %ld\n", | |
837 | pool->tfm_name, PTR_ERR(acomp)); | |
838 | ret = PTR_ERR(acomp); | |
839 | goto acomp_fail; | |
840 | } | |
841 | acomp_ctx->acomp = acomp; | |
270700dd | 842 | acomp_ctx->is_sleepable = acomp_is_async(acomp); |
64f200b8 JW |
843 | |
844 | req = acomp_request_alloc(acomp_ctx->acomp); | |
845 | if (!req) { | |
846 | pr_err("could not alloc crypto acomp_request %s\n", | |
847 | pool->tfm_name); | |
848 | ret = -ENOMEM; | |
849 | goto req_fail; | |
850 | } | |
851 | acomp_ctx->req = req; | |
852 | ||
853 | crypto_init_wait(&acomp_ctx->wait); | |
854 | /* | |
855 | * if the backend of acomp is async zip, crypto_req_done() will wakeup | |
856 | * crypto_wait_req(); if the backend of acomp is scomp, the callback | |
857 | * won't be called, crypto_wait_req() will return without blocking. | |
858 | */ | |
859 | acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, | |
860 | crypto_req_done, &acomp_ctx->wait); | |
861 | ||
862 | return 0; | |
863 | ||
864 | req_fail: | |
865 | crypto_free_acomp(acomp_ctx->acomp); | |
866 | acomp_fail: | |
867 | kfree(acomp_ctx->buffer); | |
868 | return ret; | |
869 | } | |
870 | ||
871 | static int zswap_cpu_comp_dead(unsigned int cpu, struct hlist_node *node) | |
872 | { | |
873 | struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node); | |
874 | struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu); | |
875 | ||
876 | if (!IS_ERR_OR_NULL(acomp_ctx)) { | |
877 | if (!IS_ERR_OR_NULL(acomp_ctx->req)) | |
878 | acomp_request_free(acomp_ctx->req); | |
879 | if (!IS_ERR_OR_NULL(acomp_ctx->acomp)) | |
880 | crypto_free_acomp(acomp_ctx->acomp); | |
881 | kfree(acomp_ctx->buffer); | |
882 | } | |
883 | ||
884 | return 0; | |
885 | } | |
886 | ||
f91e81d3 JW |
887 | static bool zswap_compress(struct folio *folio, struct zswap_entry *entry) |
888 | { | |
889 | struct crypto_acomp_ctx *acomp_ctx; | |
890 | struct scatterlist input, output; | |
55e78c93 | 891 | int comp_ret = 0, alloc_ret = 0; |
f91e81d3 JW |
892 | unsigned int dlen = PAGE_SIZE; |
893 | unsigned long handle; | |
894 | struct zpool *zpool; | |
895 | char *buf; | |
896 | gfp_t gfp; | |
f91e81d3 JW |
897 | u8 *dst; |
898 | ||
899 | acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx); | |
900 | ||
901 | mutex_lock(&acomp_ctx->mutex); | |
902 | ||
903 | dst = acomp_ctx->buffer; | |
904 | sg_init_table(&input, 1); | |
5d19f5de | 905 | sg_set_folio(&input, folio, PAGE_SIZE, 0); |
f91e81d3 JW |
906 | |
907 | /* | |
908 | * We need PAGE_SIZE * 2 here since there maybe over-compression case, | |
909 | * and hardware-accelerators may won't check the dst buffer size, so | |
910 | * giving the dst buffer with enough length to avoid buffer overflow. | |
911 | */ | |
912 | sg_init_one(&output, dst, PAGE_SIZE * 2); | |
913 | acomp_request_set_params(acomp_ctx->req, &input, &output, PAGE_SIZE, dlen); | |
914 | ||
915 | /* | |
916 | * it maybe looks a little bit silly that we send an asynchronous request, | |
917 | * then wait for its completion synchronously. This makes the process look | |
918 | * synchronous in fact. | |
919 | * Theoretically, acomp supports users send multiple acomp requests in one | |
920 | * acomp instance, then get those requests done simultaneously. but in this | |
921 | * case, zswap actually does store and load page by page, there is no | |
922 | * existing method to send the second page before the first page is done | |
923 | * in one thread doing zwap. | |
924 | * but in different threads running on different cpu, we have different | |
925 | * acomp instance, so multiple threads can do (de)compression in parallel. | |
926 | */ | |
55e78c93 | 927 | comp_ret = crypto_wait_req(crypto_acomp_compress(acomp_ctx->req), &acomp_ctx->wait); |
f91e81d3 | 928 | dlen = acomp_ctx->req->dlen; |
55e78c93 | 929 | if (comp_ret) |
f91e81d3 | 930 | goto unlock; |
f91e81d3 | 931 | |
8edc9c4e | 932 | zpool = entry->pool->zpool; |
f91e81d3 JW |
933 | gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM; |
934 | if (zpool_malloc_support_movable(zpool)) | |
935 | gfp |= __GFP_HIGHMEM | __GFP_MOVABLE; | |
55e78c93 BS |
936 | alloc_ret = zpool_malloc(zpool, dlen, gfp, &handle); |
937 | if (alloc_ret) | |
f91e81d3 | 938 | goto unlock; |
f91e81d3 JW |
939 | |
940 | buf = zpool_map_handle(zpool, handle, ZPOOL_MM_WO); | |
941 | memcpy(buf, dst, dlen); | |
942 | zpool_unmap_handle(zpool, handle); | |
943 | ||
944 | entry->handle = handle; | |
945 | entry->length = dlen; | |
946 | ||
947 | unlock: | |
55e78c93 BS |
948 | if (comp_ret == -ENOSPC || alloc_ret == -ENOSPC) |
949 | zswap_reject_compress_poor++; | |
950 | else if (comp_ret) | |
951 | zswap_reject_compress_fail++; | |
952 | else if (alloc_ret) | |
953 | zswap_reject_alloc_fail++; | |
954 | ||
f91e81d3 | 955 | mutex_unlock(&acomp_ctx->mutex); |
55e78c93 | 956 | return comp_ret == 0 && alloc_ret == 0; |
f91e81d3 JW |
957 | } |
958 | ||
5d19f5de | 959 | static void zswap_decompress(struct zswap_entry *entry, struct folio *folio) |
f91e81d3 | 960 | { |
8edc9c4e | 961 | struct zpool *zpool = entry->pool->zpool; |
f91e81d3 JW |
962 | struct scatterlist input, output; |
963 | struct crypto_acomp_ctx *acomp_ctx; | |
964 | u8 *src; | |
965 | ||
966 | acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx); | |
967 | mutex_lock(&acomp_ctx->mutex); | |
968 | ||
969 | src = zpool_map_handle(zpool, entry->handle, ZPOOL_MM_RO); | |
9c500835 BS |
970 | /* |
971 | * If zpool_map_handle is atomic, we cannot reliably utilize its mapped buffer | |
972 | * to do crypto_acomp_decompress() which might sleep. In such cases, we must | |
973 | * resort to copying the buffer to a temporary one. | |
974 | * Meanwhile, zpool_map_handle() might return a non-linearly mapped buffer, | |
975 | * such as a kmap address of high memory or even ever a vmap address. | |
976 | * However, sg_init_one is only equipped to handle linearly mapped low memory. | |
977 | * In such cases, we also must copy the buffer to a temporary and lowmem one. | |
978 | */ | |
979 | if ((acomp_ctx->is_sleepable && !zpool_can_sleep_mapped(zpool)) || | |
980 | !virt_addr_valid(src)) { | |
f91e81d3 JW |
981 | memcpy(acomp_ctx->buffer, src, entry->length); |
982 | src = acomp_ctx->buffer; | |
983 | zpool_unmap_handle(zpool, entry->handle); | |
984 | } | |
985 | ||
986 | sg_init_one(&input, src, entry->length); | |
987 | sg_init_table(&output, 1); | |
5d19f5de | 988 | sg_set_folio(&output, folio, PAGE_SIZE, 0); |
f91e81d3 JW |
989 | acomp_request_set_params(acomp_ctx->req, &input, &output, entry->length, PAGE_SIZE); |
990 | BUG_ON(crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait)); | |
991 | BUG_ON(acomp_ctx->req->dlen != PAGE_SIZE); | |
992 | mutex_unlock(&acomp_ctx->mutex); | |
993 | ||
9c500835 | 994 | if (src != acomp_ctx->buffer) |
f91e81d3 JW |
995 | zpool_unmap_handle(zpool, entry->handle); |
996 | } | |
997 | ||
9986d35d JW |
998 | /********************************* |
999 | * writeback code | |
1000 | **********************************/ | |
1001 | /* | |
1002 | * Attempts to free an entry by adding a folio to the swap cache, | |
1003 | * decompressing the entry data into the folio, and issuing a | |
1004 | * bio write to write the folio back to the swap device. | |
1005 | * | |
1006 | * This can be thought of as a "resumed writeback" of the folio | |
1007 | * to the swap device. We are basically resuming the same swap | |
1008 | * writeback path that was intercepted with the zswap_store() | |
1009 | * in the first place. After the folio has been decompressed into | |
1010 | * the swap cache, the compressed version stored by zswap can be | |
1011 | * freed. | |
1012 | */ | |
1013 | static int zswap_writeback_entry(struct zswap_entry *entry, | |
1014 | swp_entry_t swpentry) | |
1015 | { | |
796c2c23 CL |
1016 | struct xarray *tree; |
1017 | pgoff_t offset = swp_offset(swpentry); | |
9986d35d JW |
1018 | struct folio *folio; |
1019 | struct mempolicy *mpol; | |
1020 | bool folio_was_allocated; | |
1021 | struct writeback_control wbc = { | |
1022 | .sync_mode = WB_SYNC_NONE, | |
1023 | }; | |
1024 | ||
1025 | /* try to allocate swap cache folio */ | |
1026 | mpol = get_task_policy(current); | |
1027 | folio = __read_swap_cache_async(swpentry, GFP_KERNEL, mpol, | |
1028 | NO_INTERLEAVE_INDEX, &folio_was_allocated, true); | |
1029 | if (!folio) | |
1030 | return -ENOMEM; | |
1031 | ||
1032 | /* | |
1033 | * Found an existing folio, we raced with swapin or concurrent | |
1034 | * shrinker. We generally writeback cold folios from zswap, and | |
1035 | * swapin means the folio just became hot, so skip this folio. | |
1036 | * For unlikely concurrent shrinker case, it will be unlinked | |
1037 | * and freed when invalidated by the concurrent shrinker anyway. | |
1038 | */ | |
1039 | if (!folio_was_allocated) { | |
1040 | folio_put(folio); | |
1041 | return -EEXIST; | |
1042 | } | |
1043 | ||
1044 | /* | |
1045 | * folio is locked, and the swapcache is now secured against | |
f9c0f1c3 CZ |
1046 | * concurrent swapping to and from the slot, and concurrent |
1047 | * swapoff so we can safely dereference the zswap tree here. | |
1048 | * Verify that the swap entry hasn't been invalidated and recycled | |
1049 | * behind our backs, to avoid overwriting a new swap folio with | |
1050 | * old compressed data. Only when this is successful can the entry | |
1051 | * be dereferenced. | |
9986d35d JW |
1052 | */ |
1053 | tree = swap_zswap_tree(swpentry); | |
796c2c23 | 1054 | if (entry != xa_cmpxchg(tree, offset, entry, NULL, GFP_KERNEL)) { |
9986d35d JW |
1055 | delete_from_swap_cache(folio); |
1056 | folio_unlock(folio); | |
1057 | folio_put(folio); | |
1058 | return -ENOMEM; | |
1059 | } | |
1060 | ||
5d19f5de | 1061 | zswap_decompress(entry, folio); |
9986d35d JW |
1062 | |
1063 | count_vm_event(ZSWPWB); | |
1064 | if (entry->objcg) | |
1065 | count_objcg_event(entry->objcg, ZSWPWB); | |
1066 | ||
a230c20e | 1067 | zswap_entry_free(entry); |
9986d35d JW |
1068 | |
1069 | /* folio is up to date */ | |
1070 | folio_mark_uptodate(folio); | |
1071 | ||
1072 | /* move it to the tail of the inactive list after end_writeback */ | |
1073 | folio_set_reclaim(folio); | |
1074 | ||
1075 | /* start writeback */ | |
1076 | __swap_writepage(folio, &wbc); | |
1077 | folio_put(folio); | |
1078 | ||
1079 | return 0; | |
1080 | } | |
1081 | ||
b5ba474f NP |
1082 | /********************************* |
1083 | * shrinker functions | |
1084 | **********************************/ | |
1085 | static enum lru_status shrink_memcg_cb(struct list_head *item, struct list_lru_one *l, | |
eb23ee4f JW |
1086 | spinlock_t *lock, void *arg) |
1087 | { | |
1088 | struct zswap_entry *entry = container_of(item, struct zswap_entry, lru); | |
1089 | bool *encountered_page_in_swapcache = (bool *)arg; | |
1090 | swp_entry_t swpentry; | |
1091 | enum lru_status ret = LRU_REMOVED_RETRY; | |
1092 | int writeback_result; | |
1093 | ||
1094 | /* | |
f9c0f1c3 CZ |
1095 | * As soon as we drop the LRU lock, the entry can be freed by |
1096 | * a concurrent invalidation. This means the following: | |
eb23ee4f | 1097 | * |
f9c0f1c3 CZ |
1098 | * 1. We extract the swp_entry_t to the stack, allowing |
1099 | * zswap_writeback_entry() to pin the swap entry and | |
1100 | * then validate the zwap entry against that swap entry's | |
1101 | * tree using pointer value comparison. Only when that | |
1102 | * is successful can the entry be dereferenced. | |
eb23ee4f | 1103 | * |
f9c0f1c3 CZ |
1104 | * 2. Usually, objects are taken off the LRU for reclaim. In |
1105 | * this case this isn't possible, because if reclaim fails | |
1106 | * for whatever reason, we have no means of knowing if the | |
1107 | * entry is alive to put it back on the LRU. | |
eb23ee4f | 1108 | * |
f9c0f1c3 CZ |
1109 | * So rotate it before dropping the lock. If the entry is |
1110 | * written back or invalidated, the free path will unlink | |
1111 | * it. For failures, rotation is the right thing as well. | |
1112 | * | |
1113 | * Temporary failures, where the same entry should be tried | |
1114 | * again immediately, almost never happen for this shrinker. | |
1115 | * We don't do any trylocking; -ENOMEM comes closest, | |
1116 | * but that's extremely rare and doesn't happen spuriously | |
1117 | * either. Don't bother distinguishing this case. | |
eb23ee4f JW |
1118 | */ |
1119 | list_move_tail(item, &l->list); | |
1120 | ||
1121 | /* | |
1122 | * Once the lru lock is dropped, the entry might get freed. The | |
1123 | * swpentry is copied to the stack, and entry isn't deref'd again | |
1124 | * until the entry is verified to still be alive in the tree. | |
1125 | */ | |
1126 | swpentry = entry->swpentry; | |
1127 | ||
1128 | /* | |
1129 | * It's safe to drop the lock here because we return either | |
1130 | * LRU_REMOVED_RETRY or LRU_RETRY. | |
1131 | */ | |
1132 | spin_unlock(lock); | |
1133 | ||
1134 | writeback_result = zswap_writeback_entry(entry, swpentry); | |
1135 | ||
1136 | if (writeback_result) { | |
1137 | zswap_reject_reclaim_fail++; | |
1138 | ret = LRU_RETRY; | |
1139 | ||
1140 | /* | |
1141 | * Encountering a page already in swap cache is a sign that we are shrinking | |
1142 | * into the warmer region. We should terminate shrinking (if we're in the dynamic | |
1143 | * shrinker context). | |
1144 | */ | |
b49547ad CZ |
1145 | if (writeback_result == -EEXIST && encountered_page_in_swapcache) { |
1146 | ret = LRU_STOP; | |
eb23ee4f | 1147 | *encountered_page_in_swapcache = true; |
b49547ad | 1148 | } |
eb23ee4f JW |
1149 | } else { |
1150 | zswap_written_back_pages++; | |
1151 | } | |
1152 | ||
1153 | spin_lock(lock); | |
1154 | return ret; | |
1155 | } | |
b5ba474f NP |
1156 | |
1157 | static unsigned long zswap_shrinker_scan(struct shrinker *shrinker, | |
1158 | struct shrink_control *sc) | |
1159 | { | |
1160 | struct lruvec *lruvec = mem_cgroup_lruvec(sc->memcg, NODE_DATA(sc->nid)); | |
1161 | unsigned long shrink_ret, nr_protected, lru_size; | |
b5ba474f NP |
1162 | bool encountered_page_in_swapcache = false; |
1163 | ||
501a06fe NP |
1164 | if (!zswap_shrinker_enabled || |
1165 | !mem_cgroup_zswap_writeback_enabled(sc->memcg)) { | |
b5ba474f NP |
1166 | sc->nr_scanned = 0; |
1167 | return SHRINK_STOP; | |
1168 | } | |
1169 | ||
1170 | nr_protected = | |
1171 | atomic_long_read(&lruvec->zswap_lruvec_state.nr_zswap_protected); | |
e35606e4 | 1172 | lru_size = list_lru_shrink_count(&zswap_list_lru, sc); |
b5ba474f NP |
1173 | |
1174 | /* | |
1175 | * Abort if we are shrinking into the protected region. | |
1176 | * | |
1177 | * This short-circuiting is necessary because if we have too many multiple | |
1178 | * concurrent reclaimers getting the freeable zswap object counts at the | |
1179 | * same time (before any of them made reasonable progress), the total | |
1180 | * number of reclaimed objects might be more than the number of unprotected | |
1181 | * objects (i.e the reclaimers will reclaim into the protected area of the | |
1182 | * zswap LRU). | |
1183 | */ | |
1184 | if (nr_protected >= lru_size - sc->nr_to_scan) { | |
1185 | sc->nr_scanned = 0; | |
1186 | return SHRINK_STOP; | |
1187 | } | |
1188 | ||
e35606e4 | 1189 | shrink_ret = list_lru_shrink_walk(&zswap_list_lru, sc, &shrink_memcg_cb, |
b5ba474f NP |
1190 | &encountered_page_in_swapcache); |
1191 | ||
1192 | if (encountered_page_in_swapcache) | |
1193 | return SHRINK_STOP; | |
1194 | ||
1195 | return shrink_ret ? shrink_ret : SHRINK_STOP; | |
1196 | } | |
1197 | ||
1198 | static unsigned long zswap_shrinker_count(struct shrinker *shrinker, | |
1199 | struct shrink_control *sc) | |
1200 | { | |
b5ba474f NP |
1201 | struct mem_cgroup *memcg = sc->memcg; |
1202 | struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(sc->nid)); | |
1203 | unsigned long nr_backing, nr_stored, nr_freeable, nr_protected; | |
1204 | ||
501a06fe | 1205 | if (!zswap_shrinker_enabled || !mem_cgroup_zswap_writeback_enabled(memcg)) |
b5ba474f NP |
1206 | return 0; |
1207 | ||
30fb6a8d JW |
1208 | /* |
1209 | * The shrinker resumes swap writeback, which will enter block | |
1210 | * and may enter fs. XXX: Harmonize with vmscan.c __GFP_FS | |
1211 | * rules (may_enter_fs()), which apply on a per-folio basis. | |
1212 | */ | |
1213 | if (!gfp_has_io_fs(sc->gfp_mask)) | |
1214 | return 0; | |
1215 | ||
682886ec JW |
1216 | /* |
1217 | * For memcg, use the cgroup-wide ZSWAP stats since we don't | |
1218 | * have them per-node and thus per-lruvec. Careful if memcg is | |
1219 | * runtime-disabled: we can get sc->memcg == NULL, which is ok | |
1220 | * for the lruvec, but not for memcg_page_state(). | |
1221 | * | |
1222 | * Without memcg, use the zswap pool-wide metrics. | |
1223 | */ | |
1224 | if (!mem_cgroup_disabled()) { | |
1225 | mem_cgroup_flush_stats(memcg); | |
1226 | nr_backing = memcg_page_state(memcg, MEMCG_ZSWAP_B) >> PAGE_SHIFT; | |
1227 | nr_stored = memcg_page_state(memcg, MEMCG_ZSWAPPED); | |
1228 | } else { | |
91cdcd8d | 1229 | nr_backing = zswap_total_pages(); |
cc9bc36e | 1230 | nr_stored = atomic_read(&zswap_stored_pages); |
682886ec | 1231 | } |
b5ba474f NP |
1232 | |
1233 | if (!nr_stored) | |
1234 | return 0; | |
1235 | ||
1236 | nr_protected = | |
1237 | atomic_long_read(&lruvec->zswap_lruvec_state.nr_zswap_protected); | |
e35606e4 | 1238 | nr_freeable = list_lru_shrink_count(&zswap_list_lru, sc); |
b5ba474f NP |
1239 | /* |
1240 | * Subtract the lru size by an estimate of the number of pages | |
1241 | * that should be protected. | |
1242 | */ | |
1243 | nr_freeable = nr_freeable > nr_protected ? nr_freeable - nr_protected : 0; | |
1244 | ||
1245 | /* | |
1246 | * Scale the number of freeable pages by the memory saving factor. | |
1247 | * This ensures that the better zswap compresses memory, the fewer | |
1248 | * pages we will evict to swap (as it will otherwise incur IO for | |
1249 | * relatively small memory saving). | |
cc9bc36e YA |
1250 | * |
1251 | * The memory saving factor calculated here takes same-filled pages into | |
1252 | * account, but those are not freeable since they almost occupy no | |
1253 | * space. Hence, we may scale nr_freeable down a little bit more than we | |
1254 | * should if we have a lot of same-filled pages. | |
b5ba474f NP |
1255 | */ |
1256 | return mult_frac(nr_freeable, nr_backing, nr_stored); | |
1257 | } | |
1258 | ||
bf9b7df2 | 1259 | static struct shrinker *zswap_alloc_shrinker(void) |
b5ba474f | 1260 | { |
bf9b7df2 CZ |
1261 | struct shrinker *shrinker; |
1262 | ||
1263 | shrinker = | |
b5ba474f | 1264 | shrinker_alloc(SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE, "mm-zswap"); |
bf9b7df2 CZ |
1265 | if (!shrinker) |
1266 | return NULL; | |
b5ba474f | 1267 | |
bf9b7df2 CZ |
1268 | shrinker->scan_objects = zswap_shrinker_scan; |
1269 | shrinker->count_objects = zswap_shrinker_count; | |
1270 | shrinker->batch = 0; | |
1271 | shrinker->seeks = DEFAULT_SEEKS; | |
1272 | return shrinker; | |
b5ba474f NP |
1273 | } |
1274 | ||
a65b0e76 DC |
1275 | static int shrink_memcg(struct mem_cgroup *memcg) |
1276 | { | |
a65b0e76 DC |
1277 | int nid, shrunk = 0; |
1278 | ||
501a06fe NP |
1279 | if (!mem_cgroup_zswap_writeback_enabled(memcg)) |
1280 | return -EINVAL; | |
1281 | ||
a65b0e76 DC |
1282 | /* |
1283 | * Skip zombies because their LRUs are reparented and we would be | |
1284 | * reclaiming from the parent instead of the dead memcg. | |
1285 | */ | |
1286 | if (memcg && !mem_cgroup_online(memcg)) | |
1287 | return -ENOENT; | |
1288 | ||
a65b0e76 DC |
1289 | for_each_node_state(nid, N_NORMAL_MEMORY) { |
1290 | unsigned long nr_to_walk = 1; | |
1291 | ||
e35606e4 | 1292 | shrunk += list_lru_walk_one(&zswap_list_lru, nid, memcg, |
a65b0e76 DC |
1293 | &shrink_memcg_cb, NULL, &nr_to_walk); |
1294 | } | |
a65b0e76 | 1295 | return shrunk ? 0 : -EAGAIN; |
f999f38b DC |
1296 | } |
1297 | ||
45190f01 VW |
1298 | static void shrink_worker(struct work_struct *w) |
1299 | { | |
a65b0e76 | 1300 | struct mem_cgroup *memcg; |
e0228d59 | 1301 | int ret, failures = 0; |
91cdcd8d JW |
1302 | unsigned long thr; |
1303 | ||
1304 | /* Reclaim down to the accept threshold */ | |
1305 | thr = zswap_accept_thr_pages(); | |
e0228d59 | 1306 | |
a65b0e76 | 1307 | /* global reclaim will select cgroup in a round-robin fashion. */ |
e0228d59 | 1308 | do { |
e35606e4 CZ |
1309 | spin_lock(&zswap_shrink_lock); |
1310 | zswap_next_shrink = mem_cgroup_iter(NULL, zswap_next_shrink, NULL); | |
1311 | memcg = zswap_next_shrink; | |
a65b0e76 DC |
1312 | |
1313 | /* | |
1314 | * We need to retry if we have gone through a full round trip, or if we | |
1315 | * got an offline memcg (or else we risk undoing the effect of the | |
1316 | * zswap memcg offlining cleanup callback). This is not catastrophic | |
1317 | * per se, but it will keep the now offlined memcg hostage for a while. | |
1318 | * | |
1319 | * Note that if we got an online memcg, we will keep the extra | |
1320 | * reference in case the original reference obtained by mem_cgroup_iter | |
1321 | * is dropped by the zswap memcg offlining callback, ensuring that the | |
1322 | * memcg is not killed when we are reclaiming. | |
1323 | */ | |
1324 | if (!memcg) { | |
e35606e4 | 1325 | spin_unlock(&zswap_shrink_lock); |
a65b0e76 | 1326 | if (++failures == MAX_RECLAIM_RETRIES) |
e0228d59 | 1327 | break; |
a65b0e76 DC |
1328 | |
1329 | goto resched; | |
1330 | } | |
1331 | ||
1332 | if (!mem_cgroup_tryget_online(memcg)) { | |
1333 | /* drop the reference from mem_cgroup_iter() */ | |
1334 | mem_cgroup_iter_break(NULL, memcg); | |
e35606e4 CZ |
1335 | zswap_next_shrink = NULL; |
1336 | spin_unlock(&zswap_shrink_lock); | |
a65b0e76 | 1337 | |
e0228d59 DC |
1338 | if (++failures == MAX_RECLAIM_RETRIES) |
1339 | break; | |
a65b0e76 DC |
1340 | |
1341 | goto resched; | |
e0228d59 | 1342 | } |
e35606e4 | 1343 | spin_unlock(&zswap_shrink_lock); |
a65b0e76 DC |
1344 | |
1345 | ret = shrink_memcg(memcg); | |
1346 | /* drop the extra reference */ | |
1347 | mem_cgroup_put(memcg); | |
1348 | ||
1349 | if (ret == -EINVAL) | |
1350 | break; | |
1351 | if (ret && ++failures == MAX_RECLAIM_RETRIES) | |
1352 | break; | |
a65b0e76 | 1353 | resched: |
e0228d59 | 1354 | cond_resched(); |
91cdcd8d | 1355 | } while (zswap_total_pages() > thr); |
45190f01 VW |
1356 | } |
1357 | ||
e87b8814 YA |
1358 | /********************************* |
1359 | * same-filled functions | |
1360 | **********************************/ | |
1361 | static bool zswap_is_folio_same_filled(struct folio *folio, unsigned long *value) | |
a85f878b | 1362 | { |
5a3f572a | 1363 | unsigned long *data; |
62bf1258 | 1364 | unsigned long val; |
5a3f572a | 1365 | unsigned int pos, last_pos = PAGE_SIZE / sizeof(*data) - 1; |
e87b8814 | 1366 | bool ret = false; |
a85f878b | 1367 | |
5a3f572a YA |
1368 | data = kmap_local_folio(folio, 0); |
1369 | val = data[0]; | |
62bf1258 | 1370 | |
5a3f572a | 1371 | if (val != data[last_pos]) |
e87b8814 | 1372 | goto out; |
62bf1258 TS |
1373 | |
1374 | for (pos = 1; pos < last_pos; pos++) { | |
5a3f572a | 1375 | if (val != data[pos]) |
e87b8814 | 1376 | goto out; |
a85f878b | 1377 | } |
62bf1258 TS |
1378 | |
1379 | *value = val; | |
e87b8814 YA |
1380 | ret = true; |
1381 | out: | |
5a3f572a | 1382 | kunmap_local(data); |
e87b8814 | 1383 | return ret; |
a85f878b SD |
1384 | } |
1385 | ||
5a3f572a | 1386 | static void zswap_fill_folio(struct folio *folio, unsigned long value) |
a85f878b | 1387 | { |
5a3f572a | 1388 | unsigned long *data = kmap_local_folio(folio, 0); |
a85f878b | 1389 | |
5a3f572a YA |
1390 | memset_l(data, value, PAGE_SIZE / sizeof(unsigned long)); |
1391 | kunmap_local(data); | |
a85f878b SD |
1392 | } |
1393 | ||
e87b8814 YA |
1394 | /********************************* |
1395 | * main API | |
1396 | **********************************/ | |
34f4c198 | 1397 | bool zswap_store(struct folio *folio) |
2b281117 | 1398 | { |
3d2c9087 | 1399 | swp_entry_t swp = folio->swap; |
42c06a0e | 1400 | pgoff_t offset = swp_offset(swp); |
796c2c23 CL |
1401 | struct xarray *tree = swap_zswap_tree(swp); |
1402 | struct zswap_entry *entry, *old; | |
f4840ccf | 1403 | struct obj_cgroup *objcg = NULL; |
a65b0e76 | 1404 | struct mem_cgroup *memcg = NULL; |
e87b8814 | 1405 | unsigned long value; |
42c06a0e | 1406 | |
34f4c198 MWO |
1407 | VM_WARN_ON_ONCE(!folio_test_locked(folio)); |
1408 | VM_WARN_ON_ONCE(!folio_test_swapcache(folio)); | |
2b281117 | 1409 | |
34f4c198 MWO |
1410 | /* Large folios aren't supported */ |
1411 | if (folio_test_large(folio)) | |
42c06a0e | 1412 | return false; |
7ba71669 | 1413 | |
678e54d4 | 1414 | if (!zswap_enabled) |
f576a1e8 | 1415 | goto check_old; |
678e54d4 | 1416 | |
91cdcd8d | 1417 | /* Check cgroup limits */ |
074e3e26 | 1418 | objcg = get_obj_cgroup_from_folio(folio); |
a65b0e76 DC |
1419 | if (objcg && !obj_cgroup_may_zswap(objcg)) { |
1420 | memcg = get_mem_cgroup_from_objcg(objcg); | |
1421 | if (shrink_memcg(memcg)) { | |
1422 | mem_cgroup_put(memcg); | |
1423 | goto reject; | |
1424 | } | |
1425 | mem_cgroup_put(memcg); | |
1426 | } | |
f4840ccf | 1427 | |
82e0f8e4 | 1428 | if (zswap_check_limits()) |
4ea3fa9d | 1429 | goto reject; |
2b281117 SJ |
1430 | |
1431 | /* allocate entry */ | |
be7fc97c | 1432 | entry = zswap_entry_cache_alloc(GFP_KERNEL, folio_nid(folio)); |
2b281117 SJ |
1433 | if (!entry) { |
1434 | zswap_reject_kmemcache_fail++; | |
2b281117 SJ |
1435 | goto reject; |
1436 | } | |
1437 | ||
e87b8814 YA |
1438 | if (zswap_is_folio_same_filled(folio, &value)) { |
1439 | entry->length = 0; | |
1440 | entry->value = value; | |
1441 | atomic_inc(&zswap_same_filled_pages); | |
1442 | goto store_entry; | |
a85f878b SD |
1443 | } |
1444 | ||
f1c54846 DS |
1445 | /* if entry is successfully added, it keeps the reference */ |
1446 | entry->pool = zswap_pool_current_get(); | |
42c06a0e | 1447 | if (!entry->pool) |
f1c54846 | 1448 | goto freepage; |
f1c54846 | 1449 | |
a65b0e76 DC |
1450 | if (objcg) { |
1451 | memcg = get_mem_cgroup_from_objcg(objcg); | |
e35606e4 | 1452 | if (memcg_list_lru_alloc(memcg, &zswap_list_lru, GFP_KERNEL)) { |
a65b0e76 DC |
1453 | mem_cgroup_put(memcg); |
1454 | goto put_pool; | |
1455 | } | |
1456 | mem_cgroup_put(memcg); | |
1457 | } | |
1458 | ||
fa9ad6e2 JW |
1459 | if (!zswap_compress(folio, entry)) |
1460 | goto put_pool; | |
1ec3b5fe | 1461 | |
e87b8814 | 1462 | store_entry: |
be7fc97c | 1463 | entry->swpentry = swp; |
f4840ccf | 1464 | entry->objcg = objcg; |
796c2c23 CL |
1465 | |
1466 | old = xa_store(tree, offset, entry, GFP_KERNEL); | |
1467 | if (xa_is_err(old)) { | |
1468 | int err = xa_err(old); | |
1469 | ||
1470 | WARN_ONCE(err != -ENOMEM, "unexpected xarray error: %d\n", err); | |
1471 | zswap_reject_alloc_fail++; | |
1472 | goto store_failed; | |
1473 | } | |
1474 | ||
1475 | /* | |
1476 | * We may have had an existing entry that became stale when | |
1477 | * the folio was redirtied and now the new version is being | |
1478 | * swapped out. Get rid of the old. | |
1479 | */ | |
1480 | if (old) | |
1481 | zswap_entry_free(old); | |
1482 | ||
f4840ccf JW |
1483 | if (objcg) { |
1484 | obj_cgroup_charge_zswap(objcg, entry->length); | |
f4840ccf JW |
1485 | count_objcg_event(objcg, ZSWPOUT); |
1486 | } | |
1487 | ||
ca56489c | 1488 | /* |
796c2c23 CL |
1489 | * We finish initializing the entry while it's already in xarray. |
1490 | * This is safe because: | |
1491 | * | |
1492 | * 1. Concurrent stores and invalidations are excluded by folio lock. | |
1493 | * | |
1494 | * 2. Writeback is excluded by the entry not being on the LRU yet. | |
1495 | * The publishing order matters to prevent writeback from seeing | |
1496 | * an incoherent entry. | |
ca56489c | 1497 | */ |
35499e2b | 1498 | if (entry->length) { |
a65b0e76 | 1499 | INIT_LIST_HEAD(&entry->lru); |
e35606e4 | 1500 | zswap_lru_add(&zswap_list_lru, entry); |
f999f38b | 1501 | } |
2b281117 SJ |
1502 | |
1503 | /* update stats */ | |
1504 | atomic_inc(&zswap_stored_pages); | |
f6498b77 | 1505 | count_vm_event(ZSWPOUT); |
2b281117 | 1506 | |
42c06a0e | 1507 | return true; |
2b281117 | 1508 | |
796c2c23 CL |
1509 | store_failed: |
1510 | if (!entry->length) | |
1511 | atomic_dec(&zswap_same_filled_pages); | |
1512 | else { | |
8edc9c4e | 1513 | zpool_free(entry->pool->zpool, entry->handle); |
a65b0e76 | 1514 | put_pool: |
796c2c23 CL |
1515 | zswap_pool_put(entry->pool); |
1516 | } | |
f1c54846 | 1517 | freepage: |
2b281117 SJ |
1518 | zswap_entry_cache_free(entry); |
1519 | reject: | |
91b71e78 | 1520 | obj_cgroup_put(objcg); |
4ea3fa9d YA |
1521 | if (zswap_pool_reached_full) |
1522 | queue_work(shrink_wq, &zswap_shrink_work); | |
f576a1e8 CZ |
1523 | check_old: |
1524 | /* | |
1525 | * If the zswap store fails or zswap is disabled, we must invalidate the | |
1526 | * possibly stale entry which was previously stored at this offset. | |
1527 | * Otherwise, writeback could overwrite the new data in the swapfile. | |
1528 | */ | |
796c2c23 | 1529 | entry = xa_erase(tree, offset); |
f576a1e8 | 1530 | if (entry) |
796c2c23 | 1531 | zswap_entry_free(entry); |
42c06a0e | 1532 | return false; |
2b281117 SJ |
1533 | } |
1534 | ||
ca54f6d8 | 1535 | bool zswap_load(struct folio *folio) |
2b281117 | 1536 | { |
3d2c9087 | 1537 | swp_entry_t swp = folio->swap; |
42c06a0e | 1538 | pgoff_t offset = swp_offset(swp); |
25cd2414 | 1539 | bool swapcache = folio_test_swapcache(folio); |
796c2c23 | 1540 | struct xarray *tree = swap_zswap_tree(swp); |
2b281117 | 1541 | struct zswap_entry *entry; |
42c06a0e | 1542 | |
ca54f6d8 | 1543 | VM_WARN_ON_ONCE(!folio_test_locked(folio)); |
2b281117 | 1544 | |
2d4d2b1c YA |
1545 | if (zswap_never_enabled()) |
1546 | return false; | |
1547 | ||
c63f210d YA |
1548 | /* |
1549 | * Large folios should not be swapped in while zswap is being used, as | |
1550 | * they are not properly handled. Zswap does not properly load large | |
1551 | * folios, and a large folio may only be partially in zswap. | |
1552 | * | |
1553 | * Return true without marking the folio uptodate so that an IO error is | |
1554 | * emitted (e.g. do_swap_page() will sigbus). | |
1555 | */ | |
1556 | if (WARN_ON_ONCE(folio_test_large(folio))) | |
1557 | return true; | |
1558 | ||
25cd2414 JW |
1559 | /* |
1560 | * When reading into the swapcache, invalidate our entry. The | |
1561 | * swapcache can be the authoritative owner of the page and | |
1562 | * its mappings, and the pressure that results from having two | |
1563 | * in-memory copies outweighs any benefits of caching the | |
1564 | * compression work. | |
1565 | * | |
1566 | * (Most swapins go through the swapcache. The notable | |
1567 | * exception is the singleton fault on SWP_SYNCHRONOUS_IO | |
1568 | * files, which reads into a private page and may free it if | |
1569 | * the fault fails. We remain the primary owner of the entry.) | |
1570 | */ | |
1571 | if (swapcache) | |
796c2c23 CL |
1572 | entry = xa_erase(tree, offset); |
1573 | else | |
1574 | entry = xa_load(tree, offset); | |
1575 | ||
1576 | if (!entry) | |
1577 | return false; | |
2b281117 | 1578 | |
66447fd0 | 1579 | if (entry->length) |
5d19f5de | 1580 | zswap_decompress(entry, folio); |
5a3f572a YA |
1581 | else |
1582 | zswap_fill_folio(folio, entry->value); | |
a85f878b | 1583 | |
f6498b77 | 1584 | count_vm_event(ZSWPIN); |
f4840ccf JW |
1585 | if (entry->objcg) |
1586 | count_objcg_event(entry->objcg, ZSWPIN); | |
c75f5c1e | 1587 | |
25cd2414 JW |
1588 | if (swapcache) { |
1589 | zswap_entry_free(entry); | |
1590 | folio_mark_dirty(folio); | |
1591 | } | |
c2e2ba77 | 1592 | |
c63f210d | 1593 | folio_mark_uptodate(folio); |
66447fd0 | 1594 | return true; |
2b281117 SJ |
1595 | } |
1596 | ||
0827a1fb | 1597 | void zswap_invalidate(swp_entry_t swp) |
2b281117 | 1598 | { |
0827a1fb | 1599 | pgoff_t offset = swp_offset(swp); |
796c2c23 | 1600 | struct xarray *tree = swap_zswap_tree(swp); |
2b281117 | 1601 | struct zswap_entry *entry; |
2b281117 | 1602 | |
796c2c23 | 1603 | entry = xa_erase(tree, offset); |
06ed2289 | 1604 | if (entry) |
796c2c23 | 1605 | zswap_entry_free(entry); |
2b281117 SJ |
1606 | } |
1607 | ||
44c7c734 | 1608 | int zswap_swapon(int type, unsigned long nr_pages) |
42c06a0e | 1609 | { |
796c2c23 | 1610 | struct xarray *trees, *tree; |
44c7c734 | 1611 | unsigned int nr, i; |
42c06a0e | 1612 | |
44c7c734 CZ |
1613 | nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES); |
1614 | trees = kvcalloc(nr, sizeof(*tree), GFP_KERNEL); | |
1615 | if (!trees) { | |
42c06a0e | 1616 | pr_err("alloc failed, zswap disabled for swap type %d\n", type); |
bb29fd77 | 1617 | return -ENOMEM; |
42c06a0e JW |
1618 | } |
1619 | ||
796c2c23 CL |
1620 | for (i = 0; i < nr; i++) |
1621 | xa_init(trees + i); | |
44c7c734 CZ |
1622 | |
1623 | nr_zswap_trees[type] = nr; | |
1624 | zswap_trees[type] = trees; | |
bb29fd77 | 1625 | return 0; |
42c06a0e JW |
1626 | } |
1627 | ||
1628 | void zswap_swapoff(int type) | |
2b281117 | 1629 | { |
796c2c23 | 1630 | struct xarray *trees = zswap_trees[type]; |
44c7c734 | 1631 | unsigned int i; |
2b281117 | 1632 | |
44c7c734 | 1633 | if (!trees) |
2b281117 SJ |
1634 | return; |
1635 | ||
83e68f25 YA |
1636 | /* try_to_unuse() invalidated all the entries already */ |
1637 | for (i = 0; i < nr_zswap_trees[type]; i++) | |
796c2c23 | 1638 | WARN_ON_ONCE(!xa_empty(trees + i)); |
44c7c734 CZ |
1639 | |
1640 | kvfree(trees); | |
1641 | nr_zswap_trees[type] = 0; | |
aa9bca05 | 1642 | zswap_trees[type] = NULL; |
2b281117 SJ |
1643 | } |
1644 | ||
2b281117 SJ |
1645 | /********************************* |
1646 | * debugfs functions | |
1647 | **********************************/ | |
1648 | #ifdef CONFIG_DEBUG_FS | |
1649 | #include <linux/debugfs.h> | |
1650 | ||
1651 | static struct dentry *zswap_debugfs_root; | |
1652 | ||
91cdcd8d JW |
1653 | static int debugfs_get_total_size(void *data, u64 *val) |
1654 | { | |
1655 | *val = zswap_total_pages() * PAGE_SIZE; | |
1656 | return 0; | |
1657 | } | |
1658 | DEFINE_DEBUGFS_ATTRIBUTE(total_size_fops, debugfs_get_total_size, NULL, "%llu\n"); | |
1659 | ||
141fdeec | 1660 | static int zswap_debugfs_init(void) |
2b281117 SJ |
1661 | { |
1662 | if (!debugfs_initialized()) | |
1663 | return -ENODEV; | |
1664 | ||
1665 | zswap_debugfs_root = debugfs_create_dir("zswap", NULL); | |
2b281117 | 1666 | |
0825a6f9 JP |
1667 | debugfs_create_u64("pool_limit_hit", 0444, |
1668 | zswap_debugfs_root, &zswap_pool_limit_hit); | |
1669 | debugfs_create_u64("reject_reclaim_fail", 0444, | |
1670 | zswap_debugfs_root, &zswap_reject_reclaim_fail); | |
1671 | debugfs_create_u64("reject_alloc_fail", 0444, | |
1672 | zswap_debugfs_root, &zswap_reject_alloc_fail); | |
1673 | debugfs_create_u64("reject_kmemcache_fail", 0444, | |
1674 | zswap_debugfs_root, &zswap_reject_kmemcache_fail); | |
cb61dad8 NP |
1675 | debugfs_create_u64("reject_compress_fail", 0444, |
1676 | zswap_debugfs_root, &zswap_reject_compress_fail); | |
0825a6f9 JP |
1677 | debugfs_create_u64("reject_compress_poor", 0444, |
1678 | zswap_debugfs_root, &zswap_reject_compress_poor); | |
1679 | debugfs_create_u64("written_back_pages", 0444, | |
1680 | zswap_debugfs_root, &zswap_written_back_pages); | |
91cdcd8d JW |
1681 | debugfs_create_file("pool_total_size", 0444, |
1682 | zswap_debugfs_root, NULL, &total_size_fops); | |
0825a6f9 JP |
1683 | debugfs_create_atomic_t("stored_pages", 0444, |
1684 | zswap_debugfs_root, &zswap_stored_pages); | |
a85f878b | 1685 | debugfs_create_atomic_t("same_filled_pages", 0444, |
0825a6f9 | 1686 | zswap_debugfs_root, &zswap_same_filled_pages); |
2b281117 SJ |
1687 | |
1688 | return 0; | |
1689 | } | |
2b281117 | 1690 | #else |
141fdeec | 1691 | static int zswap_debugfs_init(void) |
2b281117 SJ |
1692 | { |
1693 | return 0; | |
1694 | } | |
2b281117 SJ |
1695 | #endif |
1696 | ||
1697 | /********************************* | |
1698 | * module init and exit | |
1699 | **********************************/ | |
141fdeec | 1700 | static int zswap_setup(void) |
2b281117 | 1701 | { |
f1c54846 | 1702 | struct zswap_pool *pool; |
ad7ed770 | 1703 | int ret; |
60105e12 | 1704 | |
b7919122 LS |
1705 | zswap_entry_cache = KMEM_CACHE(zswap_entry, 0); |
1706 | if (!zswap_entry_cache) { | |
2b281117 | 1707 | pr_err("entry cache creation failed\n"); |
f1c54846 | 1708 | goto cache_fail; |
2b281117 | 1709 | } |
f1c54846 | 1710 | |
cab7a7e5 SAS |
1711 | ret = cpuhp_setup_state_multi(CPUHP_MM_ZSWP_POOL_PREPARE, |
1712 | "mm/zswap_pool:prepare", | |
1713 | zswap_cpu_comp_prepare, | |
1714 | zswap_cpu_comp_dead); | |
1715 | if (ret) | |
1716 | goto hp_fail; | |
1717 | ||
bf9b7df2 CZ |
1718 | shrink_wq = alloc_workqueue("zswap-shrink", |
1719 | WQ_UNBOUND|WQ_MEM_RECLAIM, 1); | |
1720 | if (!shrink_wq) | |
1721 | goto shrink_wq_fail; | |
1722 | ||
e35606e4 CZ |
1723 | zswap_shrinker = zswap_alloc_shrinker(); |
1724 | if (!zswap_shrinker) | |
bf9b7df2 | 1725 | goto shrinker_fail; |
e35606e4 | 1726 | if (list_lru_init_memcg(&zswap_list_lru, zswap_shrinker)) |
bf9b7df2 | 1727 | goto lru_fail; |
e35606e4 | 1728 | shrinker_register(zswap_shrinker); |
bf9b7df2 | 1729 | |
e35606e4 | 1730 | INIT_WORK(&zswap_shrink_work, shrink_worker); |
bf9b7df2 | 1731 | |
f1c54846 | 1732 | pool = __zswap_pool_create_fallback(); |
ae3d89a7 DS |
1733 | if (pool) { |
1734 | pr_info("loaded using pool %s/%s\n", pool->tfm_name, | |
8edc9c4e | 1735 | zpool_get_type(pool->zpool)); |
ae3d89a7 DS |
1736 | list_add(&pool->list, &zswap_pools); |
1737 | zswap_has_pool = true; | |
2d4d2b1c | 1738 | static_branch_enable(&zswap_ever_enabled); |
ae3d89a7 | 1739 | } else { |
f1c54846 | 1740 | pr_err("pool creation failed\n"); |
ae3d89a7 | 1741 | zswap_enabled = false; |
2b281117 | 1742 | } |
60105e12 | 1743 | |
2b281117 SJ |
1744 | if (zswap_debugfs_init()) |
1745 | pr_warn("debugfs initialization failed\n"); | |
9021ccec | 1746 | zswap_init_state = ZSWAP_INIT_SUCCEED; |
2b281117 | 1747 | return 0; |
f1c54846 | 1748 | |
bf9b7df2 | 1749 | lru_fail: |
e35606e4 | 1750 | shrinker_free(zswap_shrinker); |
bf9b7df2 CZ |
1751 | shrinker_fail: |
1752 | destroy_workqueue(shrink_wq); | |
1753 | shrink_wq_fail: | |
1754 | cpuhp_remove_multi_state(CPUHP_MM_ZSWP_POOL_PREPARE); | |
cab7a7e5 | 1755 | hp_fail: |
b7919122 | 1756 | kmem_cache_destroy(zswap_entry_cache); |
f1c54846 | 1757 | cache_fail: |
d7b028f5 | 1758 | /* if built-in, we aren't unloaded on failure; don't allow use */ |
9021ccec | 1759 | zswap_init_state = ZSWAP_INIT_FAILED; |
d7b028f5 | 1760 | zswap_enabled = false; |
2b281117 SJ |
1761 | return -ENOMEM; |
1762 | } | |
141fdeec LS |
1763 | |
1764 | static int __init zswap_init(void) | |
1765 | { | |
1766 | if (!zswap_enabled) | |
1767 | return 0; | |
1768 | return zswap_setup(); | |
1769 | } | |
2b281117 | 1770 | /* must be late so crypto has time to come up */ |
141fdeec | 1771 | late_initcall(zswap_init); |
2b281117 | 1772 | |
68386da8 | 1773 | MODULE_AUTHOR("Seth Jennings <[email protected]>"); |
2b281117 | 1774 | MODULE_DESCRIPTION("Compressed cache for swap pages"); |