]> Git Repo - J-linux.git/blob - fs/f2fs/compress.c
Merge tag 'vfs-6.13-rc7.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs
[J-linux.git] / fs / f2fs / compress.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * f2fs compress support
4  *
5  * Copyright (c) 2019 Chao Yu <[email protected]>
6  */
7
8 #include <linux/fs.h>
9 #include <linux/f2fs_fs.h>
10 #include <linux/moduleparam.h>
11 #include <linux/writeback.h>
12 #include <linux/backing-dev.h>
13 #include <linux/lzo.h>
14 #include <linux/lz4.h>
15 #include <linux/zstd.h>
16 #include <linux/pagevec.h>
17
18 #include "f2fs.h"
19 #include "node.h"
20 #include "segment.h"
21 #include <trace/events/f2fs.h>
22
23 static struct kmem_cache *cic_entry_slab;
24 static struct kmem_cache *dic_entry_slab;
25
26 static void *page_array_alloc(struct inode *inode, int nr)
27 {
28         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
29         unsigned int size = sizeof(struct page *) * nr;
30
31         if (likely(size <= sbi->page_array_slab_size))
32                 return f2fs_kmem_cache_alloc(sbi->page_array_slab,
33                                         GFP_F2FS_ZERO, false, F2FS_I_SB(inode));
34         return f2fs_kzalloc(sbi, size, GFP_NOFS);
35 }
36
37 static void page_array_free(struct inode *inode, void *pages, int nr)
38 {
39         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
40         unsigned int size = sizeof(struct page *) * nr;
41
42         if (!pages)
43                 return;
44
45         if (likely(size <= sbi->page_array_slab_size))
46                 kmem_cache_free(sbi->page_array_slab, pages);
47         else
48                 kfree(pages);
49 }
50
51 struct f2fs_compress_ops {
52         int (*init_compress_ctx)(struct compress_ctx *cc);
53         void (*destroy_compress_ctx)(struct compress_ctx *cc);
54         int (*compress_pages)(struct compress_ctx *cc);
55         int (*init_decompress_ctx)(struct decompress_io_ctx *dic);
56         void (*destroy_decompress_ctx)(struct decompress_io_ctx *dic);
57         int (*decompress_pages)(struct decompress_io_ctx *dic);
58         bool (*is_level_valid)(int level);
59 };
60
61 static unsigned int offset_in_cluster(struct compress_ctx *cc, pgoff_t index)
62 {
63         return index & (cc->cluster_size - 1);
64 }
65
66 static pgoff_t cluster_idx(struct compress_ctx *cc, pgoff_t index)
67 {
68         return index >> cc->log_cluster_size;
69 }
70
71 static pgoff_t start_idx_of_cluster(struct compress_ctx *cc)
72 {
73         return cc->cluster_idx << cc->log_cluster_size;
74 }
75
76 bool f2fs_is_compressed_page(struct page *page)
77 {
78         if (!PagePrivate(page))
79                 return false;
80         if (!page_private(page))
81                 return false;
82         if (page_private_nonpointer(page))
83                 return false;
84
85         f2fs_bug_on(F2FS_M_SB(page->mapping),
86                 *((u32 *)page_private(page)) != F2FS_COMPRESSED_PAGE_MAGIC);
87         return true;
88 }
89
90 static void f2fs_set_compressed_page(struct page *page,
91                 struct inode *inode, pgoff_t index, void *data)
92 {
93         struct folio *folio = page_folio(page);
94
95         folio_attach_private(folio, (void *)data);
96
97         /* i_crypto_info and iv index */
98         folio->index = index;
99         folio->mapping = inode->i_mapping;
100 }
101
102 static void f2fs_drop_rpages(struct compress_ctx *cc, int len, bool unlock)
103 {
104         int i;
105
106         for (i = 0; i < len; i++) {
107                 if (!cc->rpages[i])
108                         continue;
109                 if (unlock)
110                         unlock_page(cc->rpages[i]);
111                 else
112                         put_page(cc->rpages[i]);
113         }
114 }
115
116 static void f2fs_put_rpages(struct compress_ctx *cc)
117 {
118         f2fs_drop_rpages(cc, cc->cluster_size, false);
119 }
120
121 static void f2fs_unlock_rpages(struct compress_ctx *cc, int len)
122 {
123         f2fs_drop_rpages(cc, len, true);
124 }
125
126 static void f2fs_put_rpages_wbc(struct compress_ctx *cc,
127                 struct writeback_control *wbc, bool redirty, int unlock)
128 {
129         unsigned int i;
130
131         for (i = 0; i < cc->cluster_size; i++) {
132                 if (!cc->rpages[i])
133                         continue;
134                 if (redirty)
135                         redirty_page_for_writepage(wbc, cc->rpages[i]);
136                 f2fs_put_page(cc->rpages[i], unlock);
137         }
138 }
139
140 struct page *f2fs_compress_control_page(struct page *page)
141 {
142         return ((struct compress_io_ctx *)page_private(page))->rpages[0];
143 }
144
145 int f2fs_init_compress_ctx(struct compress_ctx *cc)
146 {
147         if (cc->rpages)
148                 return 0;
149
150         cc->rpages = page_array_alloc(cc->inode, cc->cluster_size);
151         return cc->rpages ? 0 : -ENOMEM;
152 }
153
154 void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse)
155 {
156         page_array_free(cc->inode, cc->rpages, cc->cluster_size);
157         cc->rpages = NULL;
158         cc->nr_rpages = 0;
159         cc->nr_cpages = 0;
160         cc->valid_nr_cpages = 0;
161         if (!reuse)
162                 cc->cluster_idx = NULL_CLUSTER;
163 }
164
165 void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct folio *folio)
166 {
167         unsigned int cluster_ofs;
168
169         if (!f2fs_cluster_can_merge_page(cc, folio->index))
170                 f2fs_bug_on(F2FS_I_SB(cc->inode), 1);
171
172         cluster_ofs = offset_in_cluster(cc, folio->index);
173         cc->rpages[cluster_ofs] = folio_page(folio, 0);
174         cc->nr_rpages++;
175         cc->cluster_idx = cluster_idx(cc, folio->index);
176 }
177
178 #ifdef CONFIG_F2FS_FS_LZO
179 static int lzo_init_compress_ctx(struct compress_ctx *cc)
180 {
181         cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
182                                 LZO1X_MEM_COMPRESS, GFP_NOFS);
183         if (!cc->private)
184                 return -ENOMEM;
185
186         cc->clen = lzo1x_worst_compress(PAGE_SIZE << cc->log_cluster_size);
187         return 0;
188 }
189
190 static void lzo_destroy_compress_ctx(struct compress_ctx *cc)
191 {
192         kvfree(cc->private);
193         cc->private = NULL;
194 }
195
196 static int lzo_compress_pages(struct compress_ctx *cc)
197 {
198         int ret;
199
200         ret = lzo1x_1_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata,
201                                         &cc->clen, cc->private);
202         if (ret != LZO_E_OK) {
203                 f2fs_err_ratelimited(F2FS_I_SB(cc->inode),
204                                 "lzo compress failed, ret:%d", ret);
205                 return -EIO;
206         }
207         return 0;
208 }
209
210 static int lzo_decompress_pages(struct decompress_io_ctx *dic)
211 {
212         int ret;
213
214         ret = lzo1x_decompress_safe(dic->cbuf->cdata, dic->clen,
215                                                 dic->rbuf, &dic->rlen);
216         if (ret != LZO_E_OK) {
217                 f2fs_err_ratelimited(F2FS_I_SB(dic->inode),
218                                 "lzo decompress failed, ret:%d", ret);
219                 return -EIO;
220         }
221
222         if (dic->rlen != PAGE_SIZE << dic->log_cluster_size) {
223                 f2fs_err_ratelimited(F2FS_I_SB(dic->inode),
224                                 "lzo invalid rlen:%zu, expected:%lu",
225                                 dic->rlen, PAGE_SIZE << dic->log_cluster_size);
226                 return -EIO;
227         }
228         return 0;
229 }
230
231 static const struct f2fs_compress_ops f2fs_lzo_ops = {
232         .init_compress_ctx      = lzo_init_compress_ctx,
233         .destroy_compress_ctx   = lzo_destroy_compress_ctx,
234         .compress_pages         = lzo_compress_pages,
235         .decompress_pages       = lzo_decompress_pages,
236 };
237 #endif
238
239 #ifdef CONFIG_F2FS_FS_LZ4
240 static int lz4_init_compress_ctx(struct compress_ctx *cc)
241 {
242         unsigned int size = LZ4_MEM_COMPRESS;
243
244 #ifdef CONFIG_F2FS_FS_LZ4HC
245         if (F2FS_I(cc->inode)->i_compress_level)
246                 size = LZ4HC_MEM_COMPRESS;
247 #endif
248
249         cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode), size, GFP_NOFS);
250         if (!cc->private)
251                 return -ENOMEM;
252
253         /*
254          * we do not change cc->clen to LZ4_compressBound(inputsize) to
255          * adapt worst compress case, because lz4 compressor can handle
256          * output budget properly.
257          */
258         cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE;
259         return 0;
260 }
261
262 static void lz4_destroy_compress_ctx(struct compress_ctx *cc)
263 {
264         kvfree(cc->private);
265         cc->private = NULL;
266 }
267
268 static int lz4_compress_pages(struct compress_ctx *cc)
269 {
270         int len = -EINVAL;
271         unsigned char level = F2FS_I(cc->inode)->i_compress_level;
272
273         if (!level)
274                 len = LZ4_compress_default(cc->rbuf, cc->cbuf->cdata, cc->rlen,
275                                                 cc->clen, cc->private);
276 #ifdef CONFIG_F2FS_FS_LZ4HC
277         else
278                 len = LZ4_compress_HC(cc->rbuf, cc->cbuf->cdata, cc->rlen,
279                                         cc->clen, level, cc->private);
280 #endif
281         if (len < 0)
282                 return len;
283         if (!len)
284                 return -EAGAIN;
285
286         cc->clen = len;
287         return 0;
288 }
289
290 static int lz4_decompress_pages(struct decompress_io_ctx *dic)
291 {
292         int ret;
293
294         ret = LZ4_decompress_safe(dic->cbuf->cdata, dic->rbuf,
295                                                 dic->clen, dic->rlen);
296         if (ret < 0) {
297                 f2fs_err_ratelimited(F2FS_I_SB(dic->inode),
298                                 "lz4 decompress failed, ret:%d", ret);
299                 return -EIO;
300         }
301
302         if (ret != PAGE_SIZE << dic->log_cluster_size) {
303                 f2fs_err_ratelimited(F2FS_I_SB(dic->inode),
304                                 "lz4 invalid ret:%d, expected:%lu",
305                                 ret, PAGE_SIZE << dic->log_cluster_size);
306                 return -EIO;
307         }
308         return 0;
309 }
310
311 static bool lz4_is_level_valid(int lvl)
312 {
313 #ifdef CONFIG_F2FS_FS_LZ4HC
314         return !lvl || (lvl >= LZ4HC_MIN_CLEVEL && lvl <= LZ4HC_MAX_CLEVEL);
315 #else
316         return lvl == 0;
317 #endif
318 }
319
320 static const struct f2fs_compress_ops f2fs_lz4_ops = {
321         .init_compress_ctx      = lz4_init_compress_ctx,
322         .destroy_compress_ctx   = lz4_destroy_compress_ctx,
323         .compress_pages         = lz4_compress_pages,
324         .decompress_pages       = lz4_decompress_pages,
325         .is_level_valid         = lz4_is_level_valid,
326 };
327 #endif
328
329 #ifdef CONFIG_F2FS_FS_ZSTD
330 static int zstd_init_compress_ctx(struct compress_ctx *cc)
331 {
332         zstd_parameters params;
333         zstd_cstream *stream;
334         void *workspace;
335         unsigned int workspace_size;
336         unsigned char level = F2FS_I(cc->inode)->i_compress_level;
337
338         /* Need to remain this for backward compatibility */
339         if (!level)
340                 level = F2FS_ZSTD_DEFAULT_CLEVEL;
341
342         params = zstd_get_params(level, cc->rlen);
343         workspace_size = zstd_cstream_workspace_bound(&params.cParams);
344
345         workspace = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
346                                         workspace_size, GFP_NOFS);
347         if (!workspace)
348                 return -ENOMEM;
349
350         stream = zstd_init_cstream(&params, 0, workspace, workspace_size);
351         if (!stream) {
352                 f2fs_err_ratelimited(F2FS_I_SB(cc->inode),
353                                 "%s zstd_init_cstream failed", __func__);
354                 kvfree(workspace);
355                 return -EIO;
356         }
357
358         cc->private = workspace;
359         cc->private2 = stream;
360
361         cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE;
362         return 0;
363 }
364
365 static void zstd_destroy_compress_ctx(struct compress_ctx *cc)
366 {
367         kvfree(cc->private);
368         cc->private = NULL;
369         cc->private2 = NULL;
370 }
371
372 static int zstd_compress_pages(struct compress_ctx *cc)
373 {
374         zstd_cstream *stream = cc->private2;
375         zstd_in_buffer inbuf;
376         zstd_out_buffer outbuf;
377         int src_size = cc->rlen;
378         int dst_size = src_size - PAGE_SIZE - COMPRESS_HEADER_SIZE;
379         int ret;
380
381         inbuf.pos = 0;
382         inbuf.src = cc->rbuf;
383         inbuf.size = src_size;
384
385         outbuf.pos = 0;
386         outbuf.dst = cc->cbuf->cdata;
387         outbuf.size = dst_size;
388
389         ret = zstd_compress_stream(stream, &outbuf, &inbuf);
390         if (zstd_is_error(ret)) {
391                 f2fs_err_ratelimited(F2FS_I_SB(cc->inode),
392                                 "%s zstd_compress_stream failed, ret: %d",
393                                 __func__, zstd_get_error_code(ret));
394                 return -EIO;
395         }
396
397         ret = zstd_end_stream(stream, &outbuf);
398         if (zstd_is_error(ret)) {
399                 f2fs_err_ratelimited(F2FS_I_SB(cc->inode),
400                                 "%s zstd_end_stream returned %d",
401                                 __func__, zstd_get_error_code(ret));
402                 return -EIO;
403         }
404
405         /*
406          * there is compressed data remained in intermediate buffer due to
407          * no more space in cbuf.cdata
408          */
409         if (ret)
410                 return -EAGAIN;
411
412         cc->clen = outbuf.pos;
413         return 0;
414 }
415
416 static int zstd_init_decompress_ctx(struct decompress_io_ctx *dic)
417 {
418         zstd_dstream *stream;
419         void *workspace;
420         unsigned int workspace_size;
421         unsigned int max_window_size =
422                         MAX_COMPRESS_WINDOW_SIZE(dic->log_cluster_size);
423
424         workspace_size = zstd_dstream_workspace_bound(max_window_size);
425
426         workspace = f2fs_kvmalloc(F2FS_I_SB(dic->inode),
427                                         workspace_size, GFP_NOFS);
428         if (!workspace)
429                 return -ENOMEM;
430
431         stream = zstd_init_dstream(max_window_size, workspace, workspace_size);
432         if (!stream) {
433                 f2fs_err_ratelimited(F2FS_I_SB(dic->inode),
434                                 "%s zstd_init_dstream failed", __func__);
435                 kvfree(workspace);
436                 return -EIO;
437         }
438
439         dic->private = workspace;
440         dic->private2 = stream;
441
442         return 0;
443 }
444
445 static void zstd_destroy_decompress_ctx(struct decompress_io_ctx *dic)
446 {
447         kvfree(dic->private);
448         dic->private = NULL;
449         dic->private2 = NULL;
450 }
451
452 static int zstd_decompress_pages(struct decompress_io_ctx *dic)
453 {
454         zstd_dstream *stream = dic->private2;
455         zstd_in_buffer inbuf;
456         zstd_out_buffer outbuf;
457         int ret;
458
459         inbuf.pos = 0;
460         inbuf.src = dic->cbuf->cdata;
461         inbuf.size = dic->clen;
462
463         outbuf.pos = 0;
464         outbuf.dst = dic->rbuf;
465         outbuf.size = dic->rlen;
466
467         ret = zstd_decompress_stream(stream, &outbuf, &inbuf);
468         if (zstd_is_error(ret)) {
469                 f2fs_err_ratelimited(F2FS_I_SB(dic->inode),
470                                 "%s zstd_decompress_stream failed, ret: %d",
471                                 __func__, zstd_get_error_code(ret));
472                 return -EIO;
473         }
474
475         if (dic->rlen != outbuf.pos) {
476                 f2fs_err_ratelimited(F2FS_I_SB(dic->inode),
477                                 "%s ZSTD invalid rlen:%zu, expected:%lu",
478                                 __func__, dic->rlen,
479                                 PAGE_SIZE << dic->log_cluster_size);
480                 return -EIO;
481         }
482
483         return 0;
484 }
485
486 static bool zstd_is_level_valid(int lvl)
487 {
488         return lvl >= zstd_min_clevel() && lvl <= zstd_max_clevel();
489 }
490
491 static const struct f2fs_compress_ops f2fs_zstd_ops = {
492         .init_compress_ctx      = zstd_init_compress_ctx,
493         .destroy_compress_ctx   = zstd_destroy_compress_ctx,
494         .compress_pages         = zstd_compress_pages,
495         .init_decompress_ctx    = zstd_init_decompress_ctx,
496         .destroy_decompress_ctx = zstd_destroy_decompress_ctx,
497         .decompress_pages       = zstd_decompress_pages,
498         .is_level_valid         = zstd_is_level_valid,
499 };
500 #endif
501
502 #ifdef CONFIG_F2FS_FS_LZO
503 #ifdef CONFIG_F2FS_FS_LZORLE
504 static int lzorle_compress_pages(struct compress_ctx *cc)
505 {
506         int ret;
507
508         ret = lzorle1x_1_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata,
509                                         &cc->clen, cc->private);
510         if (ret != LZO_E_OK) {
511                 f2fs_err_ratelimited(F2FS_I_SB(cc->inode),
512                                 "lzo-rle compress failed, ret:%d", ret);
513                 return -EIO;
514         }
515         return 0;
516 }
517
518 static const struct f2fs_compress_ops f2fs_lzorle_ops = {
519         .init_compress_ctx      = lzo_init_compress_ctx,
520         .destroy_compress_ctx   = lzo_destroy_compress_ctx,
521         .compress_pages         = lzorle_compress_pages,
522         .decompress_pages       = lzo_decompress_pages,
523 };
524 #endif
525 #endif
526
527 static const struct f2fs_compress_ops *f2fs_cops[COMPRESS_MAX] = {
528 #ifdef CONFIG_F2FS_FS_LZO
529         &f2fs_lzo_ops,
530 #else
531         NULL,
532 #endif
533 #ifdef CONFIG_F2FS_FS_LZ4
534         &f2fs_lz4_ops,
535 #else
536         NULL,
537 #endif
538 #ifdef CONFIG_F2FS_FS_ZSTD
539         &f2fs_zstd_ops,
540 #else
541         NULL,
542 #endif
543 #if defined(CONFIG_F2FS_FS_LZO) && defined(CONFIG_F2FS_FS_LZORLE)
544         &f2fs_lzorle_ops,
545 #else
546         NULL,
547 #endif
548 };
549
550 bool f2fs_is_compress_backend_ready(struct inode *inode)
551 {
552         if (!f2fs_compressed_file(inode))
553                 return true;
554         return f2fs_cops[F2FS_I(inode)->i_compress_algorithm];
555 }
556
557 bool f2fs_is_compress_level_valid(int alg, int lvl)
558 {
559         const struct f2fs_compress_ops *cops = f2fs_cops[alg];
560
561         if (cops->is_level_valid)
562                 return cops->is_level_valid(lvl);
563
564         return lvl == 0;
565 }
566
567 static mempool_t *compress_page_pool;
568 static int num_compress_pages = 512;
569 module_param(num_compress_pages, uint, 0444);
570 MODULE_PARM_DESC(num_compress_pages,
571                 "Number of intermediate compress pages to preallocate");
572
573 int __init f2fs_init_compress_mempool(void)
574 {
575         compress_page_pool = mempool_create_page_pool(num_compress_pages, 0);
576         return compress_page_pool ? 0 : -ENOMEM;
577 }
578
579 void f2fs_destroy_compress_mempool(void)
580 {
581         mempool_destroy(compress_page_pool);
582 }
583
584 static struct page *f2fs_compress_alloc_page(void)
585 {
586         struct page *page;
587
588         page = mempool_alloc(compress_page_pool, GFP_NOFS);
589         lock_page(page);
590
591         return page;
592 }
593
594 static void f2fs_compress_free_page(struct page *page)
595 {
596         if (!page)
597                 return;
598         detach_page_private(page);
599         page->mapping = NULL;
600         unlock_page(page);
601         mempool_free(page, compress_page_pool);
602 }
603
604 #define MAX_VMAP_RETRIES        3
605
606 static void *f2fs_vmap(struct page **pages, unsigned int count)
607 {
608         int i;
609         void *buf = NULL;
610
611         for (i = 0; i < MAX_VMAP_RETRIES; i++) {
612                 buf = vm_map_ram(pages, count, -1);
613                 if (buf)
614                         break;
615                 vm_unmap_aliases();
616         }
617         return buf;
618 }
619
620 static int f2fs_compress_pages(struct compress_ctx *cc)
621 {
622         struct f2fs_inode_info *fi = F2FS_I(cc->inode);
623         const struct f2fs_compress_ops *cops =
624                                 f2fs_cops[fi->i_compress_algorithm];
625         unsigned int max_len, new_nr_cpages;
626         u32 chksum = 0;
627         int i, ret;
628
629         trace_f2fs_compress_pages_start(cc->inode, cc->cluster_idx,
630                                 cc->cluster_size, fi->i_compress_algorithm);
631
632         if (cops->init_compress_ctx) {
633                 ret = cops->init_compress_ctx(cc);
634                 if (ret)
635                         goto out;
636         }
637
638         max_len = COMPRESS_HEADER_SIZE + cc->clen;
639         cc->nr_cpages = DIV_ROUND_UP(max_len, PAGE_SIZE);
640         cc->valid_nr_cpages = cc->nr_cpages;
641
642         cc->cpages = page_array_alloc(cc->inode, cc->nr_cpages);
643         if (!cc->cpages) {
644                 ret = -ENOMEM;
645                 goto destroy_compress_ctx;
646         }
647
648         for (i = 0; i < cc->nr_cpages; i++)
649                 cc->cpages[i] = f2fs_compress_alloc_page();
650
651         cc->rbuf = f2fs_vmap(cc->rpages, cc->cluster_size);
652         if (!cc->rbuf) {
653                 ret = -ENOMEM;
654                 goto out_free_cpages;
655         }
656
657         cc->cbuf = f2fs_vmap(cc->cpages, cc->nr_cpages);
658         if (!cc->cbuf) {
659                 ret = -ENOMEM;
660                 goto out_vunmap_rbuf;
661         }
662
663         ret = cops->compress_pages(cc);
664         if (ret)
665                 goto out_vunmap_cbuf;
666
667         max_len = PAGE_SIZE * (cc->cluster_size - 1) - COMPRESS_HEADER_SIZE;
668
669         if (cc->clen > max_len) {
670                 ret = -EAGAIN;
671                 goto out_vunmap_cbuf;
672         }
673
674         cc->cbuf->clen = cpu_to_le32(cc->clen);
675
676         if (fi->i_compress_flag & BIT(COMPRESS_CHKSUM))
677                 chksum = f2fs_crc32(F2FS_I_SB(cc->inode),
678                                         cc->cbuf->cdata, cc->clen);
679         cc->cbuf->chksum = cpu_to_le32(chksum);
680
681         for (i = 0; i < COMPRESS_DATA_RESERVED_SIZE; i++)
682                 cc->cbuf->reserved[i] = cpu_to_le32(0);
683
684         new_nr_cpages = DIV_ROUND_UP(cc->clen + COMPRESS_HEADER_SIZE, PAGE_SIZE);
685
686         /* zero out any unused part of the last page */
687         memset(&cc->cbuf->cdata[cc->clen], 0,
688                         (new_nr_cpages * PAGE_SIZE) -
689                         (cc->clen + COMPRESS_HEADER_SIZE));
690
691         vm_unmap_ram(cc->cbuf, cc->nr_cpages);
692         vm_unmap_ram(cc->rbuf, cc->cluster_size);
693
694         for (i = new_nr_cpages; i < cc->nr_cpages; i++) {
695                 f2fs_compress_free_page(cc->cpages[i]);
696                 cc->cpages[i] = NULL;
697         }
698
699         if (cops->destroy_compress_ctx)
700                 cops->destroy_compress_ctx(cc);
701
702         cc->valid_nr_cpages = new_nr_cpages;
703
704         trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
705                                                         cc->clen, ret);
706         return 0;
707
708 out_vunmap_cbuf:
709         vm_unmap_ram(cc->cbuf, cc->nr_cpages);
710 out_vunmap_rbuf:
711         vm_unmap_ram(cc->rbuf, cc->cluster_size);
712 out_free_cpages:
713         for (i = 0; i < cc->nr_cpages; i++) {
714                 if (cc->cpages[i])
715                         f2fs_compress_free_page(cc->cpages[i]);
716         }
717         page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
718         cc->cpages = NULL;
719 destroy_compress_ctx:
720         if (cops->destroy_compress_ctx)
721                 cops->destroy_compress_ctx(cc);
722 out:
723         trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
724                                                         cc->clen, ret);
725         return ret;
726 }
727
728 static int f2fs_prepare_decomp_mem(struct decompress_io_ctx *dic,
729                 bool pre_alloc);
730 static void f2fs_release_decomp_mem(struct decompress_io_ctx *dic,
731                 bool bypass_destroy_callback, bool pre_alloc);
732
733 void f2fs_decompress_cluster(struct decompress_io_ctx *dic, bool in_task)
734 {
735         struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
736         struct f2fs_inode_info *fi = F2FS_I(dic->inode);
737         const struct f2fs_compress_ops *cops =
738                         f2fs_cops[fi->i_compress_algorithm];
739         bool bypass_callback = false;
740         int ret;
741
742         trace_f2fs_decompress_pages_start(dic->inode, dic->cluster_idx,
743                                 dic->cluster_size, fi->i_compress_algorithm);
744
745         if (dic->failed) {
746                 ret = -EIO;
747                 goto out_end_io;
748         }
749
750         ret = f2fs_prepare_decomp_mem(dic, false);
751         if (ret) {
752                 bypass_callback = true;
753                 goto out_release;
754         }
755
756         dic->clen = le32_to_cpu(dic->cbuf->clen);
757         dic->rlen = PAGE_SIZE << dic->log_cluster_size;
758
759         if (dic->clen > PAGE_SIZE * dic->nr_cpages - COMPRESS_HEADER_SIZE) {
760                 ret = -EFSCORRUPTED;
761
762                 /* Avoid f2fs_commit_super in irq context */
763                 if (!in_task)
764                         f2fs_handle_error_async(sbi, ERROR_FAIL_DECOMPRESSION);
765                 else
766                         f2fs_handle_error(sbi, ERROR_FAIL_DECOMPRESSION);
767                 goto out_release;
768         }
769
770         ret = cops->decompress_pages(dic);
771
772         if (!ret && (fi->i_compress_flag & BIT(COMPRESS_CHKSUM))) {
773                 u32 provided = le32_to_cpu(dic->cbuf->chksum);
774                 u32 calculated = f2fs_crc32(sbi, dic->cbuf->cdata, dic->clen);
775
776                 if (provided != calculated) {
777                         if (!is_inode_flag_set(dic->inode, FI_COMPRESS_CORRUPT)) {
778                                 set_inode_flag(dic->inode, FI_COMPRESS_CORRUPT);
779                                 f2fs_info_ratelimited(sbi,
780                                         "checksum invalid, nid = %lu, %x vs %x",
781                                         dic->inode->i_ino,
782                                         provided, calculated);
783                         }
784                         set_sbi_flag(sbi, SBI_NEED_FSCK);
785                 }
786         }
787
788 out_release:
789         f2fs_release_decomp_mem(dic, bypass_callback, false);
790
791 out_end_io:
792         trace_f2fs_decompress_pages_end(dic->inode, dic->cluster_idx,
793                                                         dic->clen, ret);
794         f2fs_decompress_end_io(dic, ret, in_task);
795 }
796
797 /*
798  * This is called when a page of a compressed cluster has been read from disk
799  * (or failed to be read from disk).  It checks whether this page was the last
800  * page being waited on in the cluster, and if so, it decompresses the cluster
801  * (or in the case of a failure, cleans up without actually decompressing).
802  */
803 void f2fs_end_read_compressed_page(struct page *page, bool failed,
804                 block_t blkaddr, bool in_task)
805 {
806         struct decompress_io_ctx *dic =
807                         (struct decompress_io_ctx *)page_private(page);
808         struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
809
810         dec_page_count(sbi, F2FS_RD_DATA);
811
812         if (failed)
813                 WRITE_ONCE(dic->failed, true);
814         else if (blkaddr && in_task)
815                 f2fs_cache_compressed_page(sbi, page,
816                                         dic->inode->i_ino, blkaddr);
817
818         if (atomic_dec_and_test(&dic->remaining_pages))
819                 f2fs_decompress_cluster(dic, in_task);
820 }
821
822 static bool is_page_in_cluster(struct compress_ctx *cc, pgoff_t index)
823 {
824         if (cc->cluster_idx == NULL_CLUSTER)
825                 return true;
826         return cc->cluster_idx == cluster_idx(cc, index);
827 }
828
829 bool f2fs_cluster_is_empty(struct compress_ctx *cc)
830 {
831         return cc->nr_rpages == 0;
832 }
833
834 static bool f2fs_cluster_is_full(struct compress_ctx *cc)
835 {
836         return cc->cluster_size == cc->nr_rpages;
837 }
838
839 bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index)
840 {
841         if (f2fs_cluster_is_empty(cc))
842                 return true;
843         return is_page_in_cluster(cc, index);
844 }
845
846 bool f2fs_all_cluster_page_ready(struct compress_ctx *cc, struct page **pages,
847                                 int index, int nr_pages, bool uptodate)
848 {
849         unsigned long pgidx = pages[index]->index;
850         int i = uptodate ? 0 : 1;
851
852         /*
853          * when uptodate set to true, try to check all pages in cluster is
854          * uptodate or not.
855          */
856         if (uptodate && (pgidx % cc->cluster_size))
857                 return false;
858
859         if (nr_pages - index < cc->cluster_size)
860                 return false;
861
862         for (; i < cc->cluster_size; i++) {
863                 if (pages[index + i]->index != pgidx + i)
864                         return false;
865                 if (uptodate && !PageUptodate(pages[index + i]))
866                         return false;
867         }
868
869         return true;
870 }
871
872 static bool cluster_has_invalid_data(struct compress_ctx *cc)
873 {
874         loff_t i_size = i_size_read(cc->inode);
875         unsigned nr_pages = DIV_ROUND_UP(i_size, PAGE_SIZE);
876         int i;
877
878         for (i = 0; i < cc->cluster_size; i++) {
879                 struct page *page = cc->rpages[i];
880
881                 f2fs_bug_on(F2FS_I_SB(cc->inode), !page);
882
883                 /* beyond EOF */
884                 if (page_folio(page)->index >= nr_pages)
885                         return true;
886         }
887         return false;
888 }
889
890 bool f2fs_sanity_check_cluster(struct dnode_of_data *dn)
891 {
892 #ifdef CONFIG_F2FS_CHECK_FS
893         struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
894         unsigned int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
895         int cluster_end = 0;
896         unsigned int count;
897         int i;
898         char *reason = "";
899
900         if (dn->data_blkaddr != COMPRESS_ADDR)
901                 return false;
902
903         /* [..., COMPR_ADDR, ...] */
904         if (dn->ofs_in_node % cluster_size) {
905                 reason = "[*|C|*|*]";
906                 goto out;
907         }
908
909         for (i = 1, count = 1; i < cluster_size; i++, count++) {
910                 block_t blkaddr = data_blkaddr(dn->inode, dn->node_page,
911                                                         dn->ofs_in_node + i);
912
913                 /* [COMPR_ADDR, ..., COMPR_ADDR] */
914                 if (blkaddr == COMPRESS_ADDR) {
915                         reason = "[C|*|C|*]";
916                         goto out;
917                 }
918                 if (!__is_valid_data_blkaddr(blkaddr)) {
919                         if (!cluster_end)
920                                 cluster_end = i;
921                         continue;
922                 }
923                 /* [COMPR_ADDR, NULL_ADDR or NEW_ADDR, valid_blkaddr] */
924                 if (cluster_end) {
925                         reason = "[C|N|N|V]";
926                         goto out;
927                 }
928         }
929
930         f2fs_bug_on(F2FS_I_SB(dn->inode), count != cluster_size &&
931                 !is_inode_flag_set(dn->inode, FI_COMPRESS_RELEASED));
932
933         return false;
934 out:
935         f2fs_warn(sbi, "access invalid cluster, ino:%lu, nid:%u, ofs_in_node:%u, reason:%s",
936                         dn->inode->i_ino, dn->nid, dn->ofs_in_node, reason);
937         set_sbi_flag(sbi, SBI_NEED_FSCK);
938         return true;
939 #else
940         return false;
941 #endif
942 }
943
944 static int __f2fs_get_cluster_blocks(struct inode *inode,
945                                         struct dnode_of_data *dn)
946 {
947         unsigned int cluster_size = F2FS_I(inode)->i_cluster_size;
948         int count, i;
949
950         for (i = 0, count = 0; i < cluster_size; i++) {
951                 block_t blkaddr = data_blkaddr(dn->inode, dn->node_page,
952                                                         dn->ofs_in_node + i);
953
954                 if (__is_valid_data_blkaddr(blkaddr))
955                         count++;
956         }
957
958         return count;
959 }
960
961 static int __f2fs_cluster_blocks(struct inode *inode, unsigned int cluster_idx,
962                                 enum cluster_check_type type)
963 {
964         struct dnode_of_data dn;
965         unsigned int start_idx = cluster_idx <<
966                                 F2FS_I(inode)->i_log_cluster_size;
967         int ret;
968
969         set_new_dnode(&dn, inode, NULL, NULL, 0);
970         ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
971         if (ret) {
972                 if (ret == -ENOENT)
973                         ret = 0;
974                 goto fail;
975         }
976
977         if (f2fs_sanity_check_cluster(&dn)) {
978                 ret = -EFSCORRUPTED;
979                 goto fail;
980         }
981
982         if (dn.data_blkaddr == COMPRESS_ADDR) {
983                 if (type == CLUSTER_COMPR_BLKS)
984                         ret = 1 + __f2fs_get_cluster_blocks(inode, &dn);
985                 else if (type == CLUSTER_IS_COMPR)
986                         ret = 1;
987         } else if (type == CLUSTER_RAW_BLKS) {
988                 ret = __f2fs_get_cluster_blocks(inode, &dn);
989         }
990 fail:
991         f2fs_put_dnode(&dn);
992         return ret;
993 }
994
995 /* return # of compressed blocks in compressed cluster */
996 static int f2fs_compressed_blocks(struct compress_ctx *cc)
997 {
998         return __f2fs_cluster_blocks(cc->inode, cc->cluster_idx,
999                 CLUSTER_COMPR_BLKS);
1000 }
1001
1002 /* return # of raw blocks in non-compressed cluster */
1003 static int f2fs_decompressed_blocks(struct inode *inode,
1004                                 unsigned int cluster_idx)
1005 {
1006         return __f2fs_cluster_blocks(inode, cluster_idx,
1007                 CLUSTER_RAW_BLKS);
1008 }
1009
1010 /* return whether cluster is compressed one or not */
1011 int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index)
1012 {
1013         return __f2fs_cluster_blocks(inode,
1014                 index >> F2FS_I(inode)->i_log_cluster_size,
1015                 CLUSTER_IS_COMPR);
1016 }
1017
1018 /* return whether cluster contains non raw blocks or not */
1019 bool f2fs_is_sparse_cluster(struct inode *inode, pgoff_t index)
1020 {
1021         unsigned int cluster_idx = index >> F2FS_I(inode)->i_log_cluster_size;
1022
1023         return f2fs_decompressed_blocks(inode, cluster_idx) !=
1024                 F2FS_I(inode)->i_cluster_size;
1025 }
1026
1027 static bool cluster_may_compress(struct compress_ctx *cc)
1028 {
1029         if (!f2fs_need_compress_data(cc->inode))
1030                 return false;
1031         if (f2fs_is_atomic_file(cc->inode))
1032                 return false;
1033         if (!f2fs_cluster_is_full(cc))
1034                 return false;
1035         if (unlikely(f2fs_cp_error(F2FS_I_SB(cc->inode))))
1036                 return false;
1037         return !cluster_has_invalid_data(cc);
1038 }
1039
1040 static void set_cluster_writeback(struct compress_ctx *cc)
1041 {
1042         int i;
1043
1044         for (i = 0; i < cc->cluster_size; i++) {
1045                 if (cc->rpages[i])
1046                         set_page_writeback(cc->rpages[i]);
1047         }
1048 }
1049
1050 static void cancel_cluster_writeback(struct compress_ctx *cc,
1051                         struct compress_io_ctx *cic, int submitted)
1052 {
1053         int i;
1054
1055         /* Wait for submitted IOs. */
1056         if (submitted > 1) {
1057                 f2fs_submit_merged_write(F2FS_I_SB(cc->inode), DATA);
1058                 while (atomic_read(&cic->pending_pages) !=
1059                                         (cc->valid_nr_cpages - submitted + 1))
1060                         f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT);
1061         }
1062
1063         /* Cancel writeback and stay locked. */
1064         for (i = 0; i < cc->cluster_size; i++) {
1065                 if (i < submitted) {
1066                         inode_inc_dirty_pages(cc->inode);
1067                         lock_page(cc->rpages[i]);
1068                 }
1069                 clear_page_private_gcing(cc->rpages[i]);
1070                 if (folio_test_writeback(page_folio(cc->rpages[i])))
1071                         end_page_writeback(cc->rpages[i]);
1072         }
1073 }
1074
1075 static void set_cluster_dirty(struct compress_ctx *cc)
1076 {
1077         int i;
1078
1079         for (i = 0; i < cc->cluster_size; i++)
1080                 if (cc->rpages[i]) {
1081                         set_page_dirty(cc->rpages[i]);
1082                         set_page_private_gcing(cc->rpages[i]);
1083                 }
1084 }
1085
1086 static int prepare_compress_overwrite(struct compress_ctx *cc,
1087                 struct page **pagep, pgoff_t index, void **fsdata)
1088 {
1089         struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
1090         struct address_space *mapping = cc->inode->i_mapping;
1091         struct page *page;
1092         sector_t last_block_in_bio;
1093         fgf_t fgp_flag = FGP_LOCK | FGP_WRITE | FGP_CREAT;
1094         pgoff_t start_idx = start_idx_of_cluster(cc);
1095         int i, ret;
1096
1097 retry:
1098         ret = f2fs_is_compressed_cluster(cc->inode, start_idx);
1099         if (ret <= 0)
1100                 return ret;
1101
1102         ret = f2fs_init_compress_ctx(cc);
1103         if (ret)
1104                 return ret;
1105
1106         /* keep page reference to avoid page reclaim */
1107         for (i = 0; i < cc->cluster_size; i++) {
1108                 page = f2fs_pagecache_get_page(mapping, start_idx + i,
1109                                                         fgp_flag, GFP_NOFS);
1110                 if (!page) {
1111                         ret = -ENOMEM;
1112                         goto unlock_pages;
1113                 }
1114
1115                 if (PageUptodate(page))
1116                         f2fs_put_page(page, 1);
1117                 else
1118                         f2fs_compress_ctx_add_page(cc, page_folio(page));
1119         }
1120
1121         if (!f2fs_cluster_is_empty(cc)) {
1122                 struct bio *bio = NULL;
1123
1124                 ret = f2fs_read_multi_pages(cc, &bio, cc->cluster_size,
1125                                         &last_block_in_bio, NULL, true);
1126                 f2fs_put_rpages(cc);
1127                 f2fs_destroy_compress_ctx(cc, true);
1128                 if (ret)
1129                         goto out;
1130                 if (bio)
1131                         f2fs_submit_read_bio(sbi, bio, DATA);
1132
1133                 ret = f2fs_init_compress_ctx(cc);
1134                 if (ret)
1135                         goto out;
1136         }
1137
1138         for (i = 0; i < cc->cluster_size; i++) {
1139                 f2fs_bug_on(sbi, cc->rpages[i]);
1140
1141                 page = find_lock_page(mapping, start_idx + i);
1142                 if (!page) {
1143                         /* page can be truncated */
1144                         goto release_and_retry;
1145                 }
1146
1147                 f2fs_wait_on_page_writeback(page, DATA, true, true);
1148                 f2fs_compress_ctx_add_page(cc, page_folio(page));
1149
1150                 if (!PageUptodate(page)) {
1151 release_and_retry:
1152                         f2fs_put_rpages(cc);
1153                         f2fs_unlock_rpages(cc, i + 1);
1154                         f2fs_destroy_compress_ctx(cc, true);
1155                         goto retry;
1156                 }
1157         }
1158
1159         if (likely(!ret)) {
1160                 *fsdata = cc->rpages;
1161                 *pagep = cc->rpages[offset_in_cluster(cc, index)];
1162                 return cc->cluster_size;
1163         }
1164
1165 unlock_pages:
1166         f2fs_put_rpages(cc);
1167         f2fs_unlock_rpages(cc, i);
1168         f2fs_destroy_compress_ctx(cc, true);
1169 out:
1170         return ret;
1171 }
1172
1173 int f2fs_prepare_compress_overwrite(struct inode *inode,
1174                 struct page **pagep, pgoff_t index, void **fsdata)
1175 {
1176         struct compress_ctx cc = {
1177                 .inode = inode,
1178                 .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
1179                 .cluster_size = F2FS_I(inode)->i_cluster_size,
1180                 .cluster_idx = index >> F2FS_I(inode)->i_log_cluster_size,
1181                 .rpages = NULL,
1182                 .nr_rpages = 0,
1183         };
1184
1185         return prepare_compress_overwrite(&cc, pagep, index, fsdata);
1186 }
1187
1188 bool f2fs_compress_write_end(struct inode *inode, void *fsdata,
1189                                         pgoff_t index, unsigned copied)
1190
1191 {
1192         struct compress_ctx cc = {
1193                 .inode = inode,
1194                 .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
1195                 .cluster_size = F2FS_I(inode)->i_cluster_size,
1196                 .rpages = fsdata,
1197         };
1198         bool first_index = (index == cc.rpages[0]->index);
1199
1200         if (copied)
1201                 set_cluster_dirty(&cc);
1202
1203         f2fs_put_rpages_wbc(&cc, NULL, false, 1);
1204         f2fs_destroy_compress_ctx(&cc, false);
1205
1206         return first_index;
1207 }
1208
1209 int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock)
1210 {
1211         void *fsdata = NULL;
1212         struct page *pagep;
1213         int log_cluster_size = F2FS_I(inode)->i_log_cluster_size;
1214         pgoff_t start_idx = from >> (PAGE_SHIFT + log_cluster_size) <<
1215                                                         log_cluster_size;
1216         int err;
1217
1218         err = f2fs_is_compressed_cluster(inode, start_idx);
1219         if (err < 0)
1220                 return err;
1221
1222         /* truncate normal cluster */
1223         if (!err)
1224                 return f2fs_do_truncate_blocks(inode, from, lock);
1225
1226         /* truncate compressed cluster */
1227         err = f2fs_prepare_compress_overwrite(inode, &pagep,
1228                                                 start_idx, &fsdata);
1229
1230         /* should not be a normal cluster */
1231         f2fs_bug_on(F2FS_I_SB(inode), err == 0);
1232
1233         if (err <= 0)
1234                 return err;
1235
1236         if (err > 0) {
1237                 struct page **rpages = fsdata;
1238                 int cluster_size = F2FS_I(inode)->i_cluster_size;
1239                 int i;
1240
1241                 for (i = cluster_size - 1; i >= 0; i--) {
1242                         loff_t start = rpages[i]->index << PAGE_SHIFT;
1243
1244                         if (from <= start) {
1245                                 zero_user_segment(rpages[i], 0, PAGE_SIZE);
1246                         } else {
1247                                 zero_user_segment(rpages[i], from - start,
1248                                                                 PAGE_SIZE);
1249                                 break;
1250                         }
1251                 }
1252
1253                 f2fs_compress_write_end(inode, fsdata, start_idx, true);
1254         }
1255         return 0;
1256 }
1257
1258 static int f2fs_write_compressed_pages(struct compress_ctx *cc,
1259                                         int *submitted,
1260                                         struct writeback_control *wbc,
1261                                         enum iostat_type io_type)
1262 {
1263         struct inode *inode = cc->inode;
1264         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1265         struct f2fs_inode_info *fi = F2FS_I(inode);
1266         struct f2fs_io_info fio = {
1267                 .sbi = sbi,
1268                 .ino = cc->inode->i_ino,
1269                 .type = DATA,
1270                 .op = REQ_OP_WRITE,
1271                 .op_flags = wbc_to_write_flags(wbc),
1272                 .old_blkaddr = NEW_ADDR,
1273                 .page = NULL,
1274                 .encrypted_page = NULL,
1275                 .compressed_page = NULL,
1276                 .io_type = io_type,
1277                 .io_wbc = wbc,
1278                 .encrypted = fscrypt_inode_uses_fs_layer_crypto(cc->inode) ?
1279                                                                         1 : 0,
1280         };
1281         struct dnode_of_data dn;
1282         struct node_info ni;
1283         struct compress_io_ctx *cic;
1284         pgoff_t start_idx = start_idx_of_cluster(cc);
1285         unsigned int last_index = cc->cluster_size - 1;
1286         loff_t psize;
1287         int i, err;
1288         bool quota_inode = IS_NOQUOTA(inode);
1289
1290         /* we should bypass data pages to proceed the kworker jobs */
1291         if (unlikely(f2fs_cp_error(sbi))) {
1292                 mapping_set_error(cc->rpages[0]->mapping, -EIO);
1293                 goto out_free;
1294         }
1295
1296         if (quota_inode) {
1297                 /*
1298                  * We need to wait for node_write to avoid block allocation during
1299                  * checkpoint. This can only happen to quota writes which can cause
1300                  * the below discard race condition.
1301                  */
1302                 f2fs_down_read(&sbi->node_write);
1303         } else if (!f2fs_trylock_op(sbi)) {
1304                 goto out_free;
1305         }
1306
1307         set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
1308
1309         err = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
1310         if (err)
1311                 goto out_unlock_op;
1312
1313         for (i = 0; i < cc->cluster_size; i++) {
1314                 if (data_blkaddr(dn.inode, dn.node_page,
1315                                         dn.ofs_in_node + i) == NULL_ADDR)
1316                         goto out_put_dnode;
1317         }
1318
1319         psize = (loff_t)(cc->rpages[last_index]->index + 1) << PAGE_SHIFT;
1320
1321         err = f2fs_get_node_info(fio.sbi, dn.nid, &ni, false);
1322         if (err)
1323                 goto out_put_dnode;
1324
1325         fio.version = ni.version;
1326
1327         cic = f2fs_kmem_cache_alloc(cic_entry_slab, GFP_F2FS_ZERO, false, sbi);
1328         if (!cic)
1329                 goto out_put_dnode;
1330
1331         cic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
1332         cic->inode = inode;
1333         atomic_set(&cic->pending_pages, cc->valid_nr_cpages);
1334         cic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
1335         if (!cic->rpages)
1336                 goto out_put_cic;
1337
1338         cic->nr_rpages = cc->cluster_size;
1339
1340         for (i = 0; i < cc->valid_nr_cpages; i++) {
1341                 f2fs_set_compressed_page(cc->cpages[i], inode,
1342                                         cc->rpages[i + 1]->index, cic);
1343                 fio.compressed_page = cc->cpages[i];
1344
1345                 fio.old_blkaddr = data_blkaddr(dn.inode, dn.node_page,
1346                                                 dn.ofs_in_node + i + 1);
1347
1348                 /* wait for GCed page writeback via META_MAPPING */
1349                 f2fs_wait_on_block_writeback(inode, fio.old_blkaddr);
1350
1351                 if (fio.encrypted) {
1352                         fio.page = cc->rpages[i + 1];
1353                         err = f2fs_encrypt_one_page(&fio);
1354                         if (err)
1355                                 goto out_destroy_crypt;
1356                         cc->cpages[i] = fio.encrypted_page;
1357                 }
1358         }
1359
1360         set_cluster_writeback(cc);
1361
1362         for (i = 0; i < cc->cluster_size; i++)
1363                 cic->rpages[i] = cc->rpages[i];
1364
1365         for (i = 0; i < cc->cluster_size; i++, dn.ofs_in_node++) {
1366                 block_t blkaddr;
1367
1368                 blkaddr = f2fs_data_blkaddr(&dn);
1369                 fio.page = cc->rpages[i];
1370                 fio.old_blkaddr = blkaddr;
1371
1372                 /* cluster header */
1373                 if (i == 0) {
1374                         if (blkaddr == COMPRESS_ADDR)
1375                                 fio.compr_blocks++;
1376                         if (__is_valid_data_blkaddr(blkaddr))
1377                                 f2fs_invalidate_blocks(sbi, blkaddr);
1378                         f2fs_update_data_blkaddr(&dn, COMPRESS_ADDR);
1379                         goto unlock_continue;
1380                 }
1381
1382                 if (fio.compr_blocks && __is_valid_data_blkaddr(blkaddr))
1383                         fio.compr_blocks++;
1384
1385                 if (i > cc->valid_nr_cpages) {
1386                         if (__is_valid_data_blkaddr(blkaddr)) {
1387                                 f2fs_invalidate_blocks(sbi, blkaddr);
1388                                 f2fs_update_data_blkaddr(&dn, NEW_ADDR);
1389                         }
1390                         goto unlock_continue;
1391                 }
1392
1393                 f2fs_bug_on(fio.sbi, blkaddr == NULL_ADDR);
1394
1395                 if (fio.encrypted)
1396                         fio.encrypted_page = cc->cpages[i - 1];
1397                 else
1398                         fio.compressed_page = cc->cpages[i - 1];
1399
1400                 cc->cpages[i - 1] = NULL;
1401                 fio.submitted = 0;
1402                 f2fs_outplace_write_data(&dn, &fio);
1403                 if (unlikely(!fio.submitted)) {
1404                         cancel_cluster_writeback(cc, cic, i);
1405
1406                         /* To call fscrypt_finalize_bounce_page */
1407                         i = cc->valid_nr_cpages;
1408                         *submitted = 0;
1409                         goto out_destroy_crypt;
1410                 }
1411                 (*submitted)++;
1412 unlock_continue:
1413                 inode_dec_dirty_pages(cc->inode);
1414                 unlock_page(fio.page);
1415         }
1416
1417         if (fio.compr_blocks)
1418                 f2fs_i_compr_blocks_update(inode, fio.compr_blocks - 1, false);
1419         f2fs_i_compr_blocks_update(inode, cc->valid_nr_cpages, true);
1420         add_compr_block_stat(inode, cc->valid_nr_cpages);
1421
1422         set_inode_flag(cc->inode, FI_APPEND_WRITE);
1423
1424         f2fs_put_dnode(&dn);
1425         if (quota_inode)
1426                 f2fs_up_read(&sbi->node_write);
1427         else
1428                 f2fs_unlock_op(sbi);
1429
1430         spin_lock(&fi->i_size_lock);
1431         if (fi->last_disk_size < psize)
1432                 fi->last_disk_size = psize;
1433         spin_unlock(&fi->i_size_lock);
1434
1435         f2fs_put_rpages(cc);
1436         page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
1437         cc->cpages = NULL;
1438         f2fs_destroy_compress_ctx(cc, false);
1439         return 0;
1440
1441 out_destroy_crypt:
1442         page_array_free(cc->inode, cic->rpages, cc->cluster_size);
1443
1444         for (--i; i >= 0; i--) {
1445                 if (!cc->cpages[i])
1446                         continue;
1447                 fscrypt_finalize_bounce_page(&cc->cpages[i]);
1448         }
1449 out_put_cic:
1450         kmem_cache_free(cic_entry_slab, cic);
1451 out_put_dnode:
1452         f2fs_put_dnode(&dn);
1453 out_unlock_op:
1454         if (quota_inode)
1455                 f2fs_up_read(&sbi->node_write);
1456         else
1457                 f2fs_unlock_op(sbi);
1458 out_free:
1459         for (i = 0; i < cc->valid_nr_cpages; i++) {
1460                 f2fs_compress_free_page(cc->cpages[i]);
1461                 cc->cpages[i] = NULL;
1462         }
1463         page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
1464         cc->cpages = NULL;
1465         return -EAGAIN;
1466 }
1467
1468 void f2fs_compress_write_end_io(struct bio *bio, struct page *page)
1469 {
1470         struct f2fs_sb_info *sbi = bio->bi_private;
1471         struct compress_io_ctx *cic =
1472                         (struct compress_io_ctx *)page_private(page);
1473         enum count_type type = WB_DATA_TYPE(page,
1474                                 f2fs_is_compressed_page(page));
1475         int i;
1476
1477         if (unlikely(bio->bi_status))
1478                 mapping_set_error(cic->inode->i_mapping, -EIO);
1479
1480         f2fs_compress_free_page(page);
1481
1482         dec_page_count(sbi, type);
1483
1484         if (atomic_dec_return(&cic->pending_pages))
1485                 return;
1486
1487         for (i = 0; i < cic->nr_rpages; i++) {
1488                 WARN_ON(!cic->rpages[i]);
1489                 clear_page_private_gcing(cic->rpages[i]);
1490                 end_page_writeback(cic->rpages[i]);
1491         }
1492
1493         page_array_free(cic->inode, cic->rpages, cic->nr_rpages);
1494         kmem_cache_free(cic_entry_slab, cic);
1495 }
1496
1497 static int f2fs_write_raw_pages(struct compress_ctx *cc,
1498                                         int *submitted_p,
1499                                         struct writeback_control *wbc,
1500                                         enum iostat_type io_type)
1501 {
1502         struct address_space *mapping = cc->inode->i_mapping;
1503         struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
1504         int submitted, compr_blocks, i;
1505         int ret = 0;
1506
1507         compr_blocks = f2fs_compressed_blocks(cc);
1508
1509         for (i = 0; i < cc->cluster_size; i++) {
1510                 if (!cc->rpages[i])
1511                         continue;
1512
1513                 redirty_page_for_writepage(wbc, cc->rpages[i]);
1514                 unlock_page(cc->rpages[i]);
1515         }
1516
1517         if (compr_blocks < 0)
1518                 return compr_blocks;
1519
1520         /* overwrite compressed cluster w/ normal cluster */
1521         if (compr_blocks > 0)
1522                 f2fs_lock_op(sbi);
1523
1524         for (i = 0; i < cc->cluster_size; i++) {
1525                 if (!cc->rpages[i])
1526                         continue;
1527 retry_write:
1528                 lock_page(cc->rpages[i]);
1529
1530                 if (cc->rpages[i]->mapping != mapping) {
1531 continue_unlock:
1532                         unlock_page(cc->rpages[i]);
1533                         continue;
1534                 }
1535
1536                 if (!PageDirty(cc->rpages[i]))
1537                         goto continue_unlock;
1538
1539                 if (folio_test_writeback(page_folio(cc->rpages[i]))) {
1540                         if (wbc->sync_mode == WB_SYNC_NONE)
1541                                 goto continue_unlock;
1542                         f2fs_wait_on_page_writeback(cc->rpages[i], DATA, true, true);
1543                 }
1544
1545                 if (!clear_page_dirty_for_io(cc->rpages[i]))
1546                         goto continue_unlock;
1547
1548                 ret = f2fs_write_single_data_page(page_folio(cc->rpages[i]),
1549                                                 &submitted,
1550                                                 NULL, NULL, wbc, io_type,
1551                                                 compr_blocks, false);
1552                 if (ret) {
1553                         if (ret == AOP_WRITEPAGE_ACTIVATE) {
1554                                 unlock_page(cc->rpages[i]);
1555                                 ret = 0;
1556                         } else if (ret == -EAGAIN) {
1557                                 ret = 0;
1558                                 /*
1559                                  * for quota file, just redirty left pages to
1560                                  * avoid deadlock caused by cluster update race
1561                                  * from foreground operation.
1562                                  */
1563                                 if (IS_NOQUOTA(cc->inode))
1564                                         goto out;
1565                                 f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT);
1566                                 goto retry_write;
1567                         }
1568                         goto out;
1569                 }
1570
1571                 *submitted_p += submitted;
1572         }
1573
1574 out:
1575         if (compr_blocks > 0)
1576                 f2fs_unlock_op(sbi);
1577
1578         f2fs_balance_fs(sbi, true);
1579         return ret;
1580 }
1581
1582 int f2fs_write_multi_pages(struct compress_ctx *cc,
1583                                         int *submitted,
1584                                         struct writeback_control *wbc,
1585                                         enum iostat_type io_type)
1586 {
1587         int err;
1588
1589         *submitted = 0;
1590         if (cluster_may_compress(cc)) {
1591                 err = f2fs_compress_pages(cc);
1592                 if (err == -EAGAIN) {
1593                         add_compr_block_stat(cc->inode, cc->cluster_size);
1594                         goto write;
1595                 } else if (err) {
1596                         f2fs_put_rpages_wbc(cc, wbc, true, 1);
1597                         goto destroy_out;
1598                 }
1599
1600                 err = f2fs_write_compressed_pages(cc, submitted,
1601                                                         wbc, io_type);
1602                 if (!err)
1603                         return 0;
1604                 f2fs_bug_on(F2FS_I_SB(cc->inode), err != -EAGAIN);
1605         }
1606 write:
1607         f2fs_bug_on(F2FS_I_SB(cc->inode), *submitted);
1608
1609         err = f2fs_write_raw_pages(cc, submitted, wbc, io_type);
1610         f2fs_put_rpages_wbc(cc, wbc, false, 0);
1611 destroy_out:
1612         f2fs_destroy_compress_ctx(cc, false);
1613         return err;
1614 }
1615
1616 static inline bool allow_memalloc_for_decomp(struct f2fs_sb_info *sbi,
1617                 bool pre_alloc)
1618 {
1619         return pre_alloc ^ f2fs_low_mem_mode(sbi);
1620 }
1621
1622 static int f2fs_prepare_decomp_mem(struct decompress_io_ctx *dic,
1623                 bool pre_alloc)
1624 {
1625         const struct f2fs_compress_ops *cops =
1626                 f2fs_cops[F2FS_I(dic->inode)->i_compress_algorithm];
1627         int i;
1628
1629         if (!allow_memalloc_for_decomp(F2FS_I_SB(dic->inode), pre_alloc))
1630                 return 0;
1631
1632         dic->tpages = page_array_alloc(dic->inode, dic->cluster_size);
1633         if (!dic->tpages)
1634                 return -ENOMEM;
1635
1636         for (i = 0; i < dic->cluster_size; i++) {
1637                 if (dic->rpages[i]) {
1638                         dic->tpages[i] = dic->rpages[i];
1639                         continue;
1640                 }
1641
1642                 dic->tpages[i] = f2fs_compress_alloc_page();
1643         }
1644
1645         dic->rbuf = f2fs_vmap(dic->tpages, dic->cluster_size);
1646         if (!dic->rbuf)
1647                 return -ENOMEM;
1648
1649         dic->cbuf = f2fs_vmap(dic->cpages, dic->nr_cpages);
1650         if (!dic->cbuf)
1651                 return -ENOMEM;
1652
1653         if (cops->init_decompress_ctx)
1654                 return cops->init_decompress_ctx(dic);
1655
1656         return 0;
1657 }
1658
1659 static void f2fs_release_decomp_mem(struct decompress_io_ctx *dic,
1660                 bool bypass_destroy_callback, bool pre_alloc)
1661 {
1662         const struct f2fs_compress_ops *cops =
1663                 f2fs_cops[F2FS_I(dic->inode)->i_compress_algorithm];
1664
1665         if (!allow_memalloc_for_decomp(F2FS_I_SB(dic->inode), pre_alloc))
1666                 return;
1667
1668         if (!bypass_destroy_callback && cops->destroy_decompress_ctx)
1669                 cops->destroy_decompress_ctx(dic);
1670
1671         if (dic->cbuf)
1672                 vm_unmap_ram(dic->cbuf, dic->nr_cpages);
1673
1674         if (dic->rbuf)
1675                 vm_unmap_ram(dic->rbuf, dic->cluster_size);
1676 }
1677
1678 static void f2fs_free_dic(struct decompress_io_ctx *dic,
1679                 bool bypass_destroy_callback);
1680
1681 struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
1682 {
1683         struct decompress_io_ctx *dic;
1684         pgoff_t start_idx = start_idx_of_cluster(cc);
1685         struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
1686         int i, ret;
1687
1688         dic = f2fs_kmem_cache_alloc(dic_entry_slab, GFP_F2FS_ZERO, false, sbi);
1689         if (!dic)
1690                 return ERR_PTR(-ENOMEM);
1691
1692         dic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
1693         if (!dic->rpages) {
1694                 kmem_cache_free(dic_entry_slab, dic);
1695                 return ERR_PTR(-ENOMEM);
1696         }
1697
1698         dic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
1699         dic->inode = cc->inode;
1700         atomic_set(&dic->remaining_pages, cc->nr_cpages);
1701         dic->cluster_idx = cc->cluster_idx;
1702         dic->cluster_size = cc->cluster_size;
1703         dic->log_cluster_size = cc->log_cluster_size;
1704         dic->nr_cpages = cc->nr_cpages;
1705         refcount_set(&dic->refcnt, 1);
1706         dic->failed = false;
1707         dic->need_verity = f2fs_need_verity(cc->inode, start_idx);
1708
1709         for (i = 0; i < dic->cluster_size; i++)
1710                 dic->rpages[i] = cc->rpages[i];
1711         dic->nr_rpages = cc->cluster_size;
1712
1713         dic->cpages = page_array_alloc(dic->inode, dic->nr_cpages);
1714         if (!dic->cpages) {
1715                 ret = -ENOMEM;
1716                 goto out_free;
1717         }
1718
1719         for (i = 0; i < dic->nr_cpages; i++) {
1720                 struct page *page;
1721
1722                 page = f2fs_compress_alloc_page();
1723                 f2fs_set_compressed_page(page, cc->inode,
1724                                         start_idx + i + 1, dic);
1725                 dic->cpages[i] = page;
1726         }
1727
1728         ret = f2fs_prepare_decomp_mem(dic, true);
1729         if (ret)
1730                 goto out_free;
1731
1732         return dic;
1733
1734 out_free:
1735         f2fs_free_dic(dic, true);
1736         return ERR_PTR(ret);
1737 }
1738
1739 static void f2fs_free_dic(struct decompress_io_ctx *dic,
1740                 bool bypass_destroy_callback)
1741 {
1742         int i;
1743
1744         f2fs_release_decomp_mem(dic, bypass_destroy_callback, true);
1745
1746         if (dic->tpages) {
1747                 for (i = 0; i < dic->cluster_size; i++) {
1748                         if (dic->rpages[i])
1749                                 continue;
1750                         if (!dic->tpages[i])
1751                                 continue;
1752                         f2fs_compress_free_page(dic->tpages[i]);
1753                 }
1754                 page_array_free(dic->inode, dic->tpages, dic->cluster_size);
1755         }
1756
1757         if (dic->cpages) {
1758                 for (i = 0; i < dic->nr_cpages; i++) {
1759                         if (!dic->cpages[i])
1760                                 continue;
1761                         f2fs_compress_free_page(dic->cpages[i]);
1762                 }
1763                 page_array_free(dic->inode, dic->cpages, dic->nr_cpages);
1764         }
1765
1766         page_array_free(dic->inode, dic->rpages, dic->nr_rpages);
1767         kmem_cache_free(dic_entry_slab, dic);
1768 }
1769
1770 static void f2fs_late_free_dic(struct work_struct *work)
1771 {
1772         struct decompress_io_ctx *dic =
1773                 container_of(work, struct decompress_io_ctx, free_work);
1774
1775         f2fs_free_dic(dic, false);
1776 }
1777
1778 static void f2fs_put_dic(struct decompress_io_ctx *dic, bool in_task)
1779 {
1780         if (refcount_dec_and_test(&dic->refcnt)) {
1781                 if (in_task) {
1782                         f2fs_free_dic(dic, false);
1783                 } else {
1784                         INIT_WORK(&dic->free_work, f2fs_late_free_dic);
1785                         queue_work(F2FS_I_SB(dic->inode)->post_read_wq,
1786                                         &dic->free_work);
1787                 }
1788         }
1789 }
1790
1791 static void f2fs_verify_cluster(struct work_struct *work)
1792 {
1793         struct decompress_io_ctx *dic =
1794                 container_of(work, struct decompress_io_ctx, verity_work);
1795         int i;
1796
1797         /* Verify, update, and unlock the decompressed pages. */
1798         for (i = 0; i < dic->cluster_size; i++) {
1799                 struct page *rpage = dic->rpages[i];
1800
1801                 if (!rpage)
1802                         continue;
1803
1804                 if (fsverity_verify_page(rpage))
1805                         SetPageUptodate(rpage);
1806                 else
1807                         ClearPageUptodate(rpage);
1808                 unlock_page(rpage);
1809         }
1810
1811         f2fs_put_dic(dic, true);
1812 }
1813
1814 /*
1815  * This is called when a compressed cluster has been decompressed
1816  * (or failed to be read and/or decompressed).
1817  */
1818 void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed,
1819                                 bool in_task)
1820 {
1821         int i;
1822
1823         if (!failed && dic->need_verity) {
1824                 /*
1825                  * Note that to avoid deadlocks, the verity work can't be done
1826                  * on the decompression workqueue.  This is because verifying
1827                  * the data pages can involve reading metadata pages from the
1828                  * file, and these metadata pages may be compressed.
1829                  */
1830                 INIT_WORK(&dic->verity_work, f2fs_verify_cluster);
1831                 fsverity_enqueue_verify_work(&dic->verity_work);
1832                 return;
1833         }
1834
1835         /* Update and unlock the cluster's pagecache pages. */
1836         for (i = 0; i < dic->cluster_size; i++) {
1837                 struct page *rpage = dic->rpages[i];
1838
1839                 if (!rpage)
1840                         continue;
1841
1842                 if (failed)
1843                         ClearPageUptodate(rpage);
1844                 else
1845                         SetPageUptodate(rpage);
1846                 unlock_page(rpage);
1847         }
1848
1849         /*
1850          * Release the reference to the decompress_io_ctx that was being held
1851          * for I/O completion.
1852          */
1853         f2fs_put_dic(dic, in_task);
1854 }
1855
1856 /*
1857  * Put a reference to a compressed page's decompress_io_ctx.
1858  *
1859  * This is called when the page is no longer needed and can be freed.
1860  */
1861 void f2fs_put_page_dic(struct page *page, bool in_task)
1862 {
1863         struct decompress_io_ctx *dic =
1864                         (struct decompress_io_ctx *)page_private(page);
1865
1866         f2fs_put_dic(dic, in_task);
1867 }
1868
1869 /*
1870  * check whether cluster blocks are contiguous, and add extent cache entry
1871  * only if cluster blocks are logically and physically contiguous.
1872  */
1873 unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn,
1874                                                 unsigned int ofs_in_node)
1875 {
1876         bool compressed = data_blkaddr(dn->inode, dn->node_page,
1877                                         ofs_in_node) == COMPRESS_ADDR;
1878         int i = compressed ? 1 : 0;
1879         block_t first_blkaddr = data_blkaddr(dn->inode, dn->node_page,
1880                                                         ofs_in_node + i);
1881
1882         for (i += 1; i < F2FS_I(dn->inode)->i_cluster_size; i++) {
1883                 block_t blkaddr = data_blkaddr(dn->inode, dn->node_page,
1884                                                         ofs_in_node + i);
1885
1886                 if (!__is_valid_data_blkaddr(blkaddr))
1887                         break;
1888                 if (first_blkaddr + i - (compressed ? 1 : 0) != blkaddr)
1889                         return 0;
1890         }
1891
1892         return compressed ? i - 1 : i;
1893 }
1894
1895 const struct address_space_operations f2fs_compress_aops = {
1896         .release_folio = f2fs_release_folio,
1897         .invalidate_folio = f2fs_invalidate_folio,
1898         .migrate_folio  = filemap_migrate_folio,
1899 };
1900
1901 struct address_space *COMPRESS_MAPPING(struct f2fs_sb_info *sbi)
1902 {
1903         return sbi->compress_inode->i_mapping;
1904 }
1905
1906 void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi, block_t blkaddr)
1907 {
1908         if (!sbi->compress_inode)
1909                 return;
1910         invalidate_mapping_pages(COMPRESS_MAPPING(sbi), blkaddr, blkaddr);
1911 }
1912
1913 void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
1914                                                 nid_t ino, block_t blkaddr)
1915 {
1916         struct page *cpage;
1917         int ret;
1918
1919         if (!test_opt(sbi, COMPRESS_CACHE))
1920                 return;
1921
1922         if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE_READ))
1923                 return;
1924
1925         if (!f2fs_available_free_memory(sbi, COMPRESS_PAGE))
1926                 return;
1927
1928         cpage = find_get_page(COMPRESS_MAPPING(sbi), blkaddr);
1929         if (cpage) {
1930                 f2fs_put_page(cpage, 0);
1931                 return;
1932         }
1933
1934         cpage = alloc_page(__GFP_NOWARN | __GFP_IO);
1935         if (!cpage)
1936                 return;
1937
1938         ret = add_to_page_cache_lru(cpage, COMPRESS_MAPPING(sbi),
1939                                                 blkaddr, GFP_NOFS);
1940         if (ret) {
1941                 f2fs_put_page(cpage, 0);
1942                 return;
1943         }
1944
1945         set_page_private_data(cpage, ino);
1946
1947         memcpy(page_address(cpage), page_address(page), PAGE_SIZE);
1948         SetPageUptodate(cpage);
1949         f2fs_put_page(cpage, 1);
1950 }
1951
1952 bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
1953                                                                 block_t blkaddr)
1954 {
1955         struct page *cpage;
1956         bool hitted = false;
1957
1958         if (!test_opt(sbi, COMPRESS_CACHE))
1959                 return false;
1960
1961         cpage = f2fs_pagecache_get_page(COMPRESS_MAPPING(sbi),
1962                                 blkaddr, FGP_LOCK | FGP_NOWAIT, GFP_NOFS);
1963         if (cpage) {
1964                 if (PageUptodate(cpage)) {
1965                         atomic_inc(&sbi->compress_page_hit);
1966                         memcpy(page_address(page),
1967                                 page_address(cpage), PAGE_SIZE);
1968                         hitted = true;
1969                 }
1970                 f2fs_put_page(cpage, 1);
1971         }
1972
1973         return hitted;
1974 }
1975
1976 void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, nid_t ino)
1977 {
1978         struct address_space *mapping = COMPRESS_MAPPING(sbi);
1979         struct folio_batch fbatch;
1980         pgoff_t index = 0;
1981         pgoff_t end = MAX_BLKADDR(sbi);
1982
1983         if (!mapping->nrpages)
1984                 return;
1985
1986         folio_batch_init(&fbatch);
1987
1988         do {
1989                 unsigned int nr, i;
1990
1991                 nr = filemap_get_folios(mapping, &index, end - 1, &fbatch);
1992                 if (!nr)
1993                         break;
1994
1995                 for (i = 0; i < nr; i++) {
1996                         struct folio *folio = fbatch.folios[i];
1997
1998                         folio_lock(folio);
1999                         if (folio->mapping != mapping) {
2000                                 folio_unlock(folio);
2001                                 continue;
2002                         }
2003
2004                         if (ino != get_page_private_data(&folio->page)) {
2005                                 folio_unlock(folio);
2006                                 continue;
2007                         }
2008
2009                         generic_error_remove_folio(mapping, folio);
2010                         folio_unlock(folio);
2011                 }
2012                 folio_batch_release(&fbatch);
2013                 cond_resched();
2014         } while (index < end);
2015 }
2016
2017 int f2fs_init_compress_inode(struct f2fs_sb_info *sbi)
2018 {
2019         struct inode *inode;
2020
2021         if (!test_opt(sbi, COMPRESS_CACHE))
2022                 return 0;
2023
2024         inode = f2fs_iget(sbi->sb, F2FS_COMPRESS_INO(sbi));
2025         if (IS_ERR(inode))
2026                 return PTR_ERR(inode);
2027         sbi->compress_inode = inode;
2028
2029         sbi->compress_percent = COMPRESS_PERCENT;
2030         sbi->compress_watermark = COMPRESS_WATERMARK;
2031
2032         atomic_set(&sbi->compress_page_hit, 0);
2033
2034         return 0;
2035 }
2036
2037 void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi)
2038 {
2039         if (!sbi->compress_inode)
2040                 return;
2041         iput(sbi->compress_inode);
2042         sbi->compress_inode = NULL;
2043 }
2044
2045 int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi)
2046 {
2047         dev_t dev = sbi->sb->s_bdev->bd_dev;
2048         char slab_name[35];
2049
2050         if (!f2fs_sb_has_compression(sbi))
2051                 return 0;
2052
2053         sprintf(slab_name, "f2fs_page_array_entry-%u:%u", MAJOR(dev), MINOR(dev));
2054
2055         sbi->page_array_slab_size = sizeof(struct page *) <<
2056                                         F2FS_OPTION(sbi).compress_log_size;
2057
2058         sbi->page_array_slab = f2fs_kmem_cache_create(slab_name,
2059                                         sbi->page_array_slab_size);
2060         return sbi->page_array_slab ? 0 : -ENOMEM;
2061 }
2062
2063 void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi)
2064 {
2065         kmem_cache_destroy(sbi->page_array_slab);
2066 }
2067
2068 int __init f2fs_init_compress_cache(void)
2069 {
2070         cic_entry_slab = f2fs_kmem_cache_create("f2fs_cic_entry",
2071                                         sizeof(struct compress_io_ctx));
2072         if (!cic_entry_slab)
2073                 return -ENOMEM;
2074         dic_entry_slab = f2fs_kmem_cache_create("f2fs_dic_entry",
2075                                         sizeof(struct decompress_io_ctx));
2076         if (!dic_entry_slab)
2077                 goto free_cic;
2078         return 0;
2079 free_cic:
2080         kmem_cache_destroy(cic_entry_slab);
2081         return -ENOMEM;
2082 }
2083
2084 void f2fs_destroy_compress_cache(void)
2085 {
2086         kmem_cache_destroy(dic_entry_slab);
2087         kmem_cache_destroy(cic_entry_slab);
2088 }
This page took 0.14461 seconds and 4 git commands to generate.