1 // SPDX-License-Identifier: GPL-2.0
3 * f2fs compress support
9 #include <linux/f2fs_fs.h>
10 #include <linux/moduleparam.h>
11 #include <linux/writeback.h>
12 #include <linux/backing-dev.h>
13 #include <linux/lzo.h>
14 #include <linux/lz4.h>
15 #include <linux/zstd.h>
16 #include <linux/pagevec.h>
21 #include <trace/events/f2fs.h>
23 static struct kmem_cache *cic_entry_slab;
24 static struct kmem_cache *dic_entry_slab;
26 static void *page_array_alloc(struct inode *inode, int nr)
28 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
29 unsigned int size = sizeof(struct page *) * nr;
31 if (likely(size <= sbi->page_array_slab_size))
32 return f2fs_kmem_cache_alloc(sbi->page_array_slab,
33 GFP_F2FS_ZERO, false, F2FS_I_SB(inode));
34 return f2fs_kzalloc(sbi, size, GFP_NOFS);
37 static void page_array_free(struct inode *inode, void *pages, int nr)
39 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
40 unsigned int size = sizeof(struct page *) * nr;
45 if (likely(size <= sbi->page_array_slab_size))
46 kmem_cache_free(sbi->page_array_slab, pages);
51 struct f2fs_compress_ops {
52 int (*init_compress_ctx)(struct compress_ctx *cc);
53 void (*destroy_compress_ctx)(struct compress_ctx *cc);
54 int (*compress_pages)(struct compress_ctx *cc);
55 int (*init_decompress_ctx)(struct decompress_io_ctx *dic);
56 void (*destroy_decompress_ctx)(struct decompress_io_ctx *dic);
57 int (*decompress_pages)(struct decompress_io_ctx *dic);
58 bool (*is_level_valid)(int level);
61 static unsigned int offset_in_cluster(struct compress_ctx *cc, pgoff_t index)
63 return index & (cc->cluster_size - 1);
66 static pgoff_t cluster_idx(struct compress_ctx *cc, pgoff_t index)
68 return index >> cc->log_cluster_size;
71 static pgoff_t start_idx_of_cluster(struct compress_ctx *cc)
73 return cc->cluster_idx << cc->log_cluster_size;
76 bool f2fs_is_compressed_page(struct page *page)
78 if (!PagePrivate(page))
80 if (!page_private(page))
82 if (page_private_nonpointer(page))
85 f2fs_bug_on(F2FS_M_SB(page->mapping),
86 *((u32 *)page_private(page)) != F2FS_COMPRESSED_PAGE_MAGIC);
90 static void f2fs_set_compressed_page(struct page *page,
91 struct inode *inode, pgoff_t index, void *data)
93 struct folio *folio = page_folio(page);
95 folio_attach_private(folio, (void *)data);
97 /* i_crypto_info and iv index */
99 folio->mapping = inode->i_mapping;
102 static void f2fs_drop_rpages(struct compress_ctx *cc, int len, bool unlock)
106 for (i = 0; i < len; i++) {
110 unlock_page(cc->rpages[i]);
112 put_page(cc->rpages[i]);
116 static void f2fs_put_rpages(struct compress_ctx *cc)
118 f2fs_drop_rpages(cc, cc->cluster_size, false);
121 static void f2fs_unlock_rpages(struct compress_ctx *cc, int len)
123 f2fs_drop_rpages(cc, len, true);
126 static void f2fs_put_rpages_wbc(struct compress_ctx *cc,
127 struct writeback_control *wbc, bool redirty, int unlock)
131 for (i = 0; i < cc->cluster_size; i++) {
135 redirty_page_for_writepage(wbc, cc->rpages[i]);
136 f2fs_put_page(cc->rpages[i], unlock);
140 struct page *f2fs_compress_control_page(struct page *page)
142 return ((struct compress_io_ctx *)page_private(page))->rpages[0];
145 int f2fs_init_compress_ctx(struct compress_ctx *cc)
150 cc->rpages = page_array_alloc(cc->inode, cc->cluster_size);
151 return cc->rpages ? 0 : -ENOMEM;
154 void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse)
156 page_array_free(cc->inode, cc->rpages, cc->cluster_size);
160 cc->valid_nr_cpages = 0;
162 cc->cluster_idx = NULL_CLUSTER;
165 void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct folio *folio)
167 unsigned int cluster_ofs;
169 if (!f2fs_cluster_can_merge_page(cc, folio->index))
170 f2fs_bug_on(F2FS_I_SB(cc->inode), 1);
172 cluster_ofs = offset_in_cluster(cc, folio->index);
173 cc->rpages[cluster_ofs] = folio_page(folio, 0);
175 cc->cluster_idx = cluster_idx(cc, folio->index);
178 #ifdef CONFIG_F2FS_FS_LZO
179 static int lzo_init_compress_ctx(struct compress_ctx *cc)
181 cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
182 LZO1X_MEM_COMPRESS, GFP_NOFS);
186 cc->clen = lzo1x_worst_compress(PAGE_SIZE << cc->log_cluster_size);
190 static void lzo_destroy_compress_ctx(struct compress_ctx *cc)
196 static int lzo_compress_pages(struct compress_ctx *cc)
200 ret = lzo1x_1_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata,
201 &cc->clen, cc->private);
202 if (ret != LZO_E_OK) {
203 f2fs_err_ratelimited(F2FS_I_SB(cc->inode),
204 "lzo compress failed, ret:%d", ret);
210 static int lzo_decompress_pages(struct decompress_io_ctx *dic)
214 ret = lzo1x_decompress_safe(dic->cbuf->cdata, dic->clen,
215 dic->rbuf, &dic->rlen);
216 if (ret != LZO_E_OK) {
217 f2fs_err_ratelimited(F2FS_I_SB(dic->inode),
218 "lzo decompress failed, ret:%d", ret);
222 if (dic->rlen != PAGE_SIZE << dic->log_cluster_size) {
223 f2fs_err_ratelimited(F2FS_I_SB(dic->inode),
224 "lzo invalid rlen:%zu, expected:%lu",
225 dic->rlen, PAGE_SIZE << dic->log_cluster_size);
231 static const struct f2fs_compress_ops f2fs_lzo_ops = {
232 .init_compress_ctx = lzo_init_compress_ctx,
233 .destroy_compress_ctx = lzo_destroy_compress_ctx,
234 .compress_pages = lzo_compress_pages,
235 .decompress_pages = lzo_decompress_pages,
239 #ifdef CONFIG_F2FS_FS_LZ4
240 static int lz4_init_compress_ctx(struct compress_ctx *cc)
242 unsigned int size = LZ4_MEM_COMPRESS;
244 #ifdef CONFIG_F2FS_FS_LZ4HC
245 if (F2FS_I(cc->inode)->i_compress_level)
246 size = LZ4HC_MEM_COMPRESS;
249 cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode), size, GFP_NOFS);
254 * we do not change cc->clen to LZ4_compressBound(inputsize) to
255 * adapt worst compress case, because lz4 compressor can handle
256 * output budget properly.
258 cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE;
262 static void lz4_destroy_compress_ctx(struct compress_ctx *cc)
268 static int lz4_compress_pages(struct compress_ctx *cc)
271 unsigned char level = F2FS_I(cc->inode)->i_compress_level;
274 len = LZ4_compress_default(cc->rbuf, cc->cbuf->cdata, cc->rlen,
275 cc->clen, cc->private);
276 #ifdef CONFIG_F2FS_FS_LZ4HC
278 len = LZ4_compress_HC(cc->rbuf, cc->cbuf->cdata, cc->rlen,
279 cc->clen, level, cc->private);
290 static int lz4_decompress_pages(struct decompress_io_ctx *dic)
294 ret = LZ4_decompress_safe(dic->cbuf->cdata, dic->rbuf,
295 dic->clen, dic->rlen);
297 f2fs_err_ratelimited(F2FS_I_SB(dic->inode),
298 "lz4 decompress failed, ret:%d", ret);
302 if (ret != PAGE_SIZE << dic->log_cluster_size) {
303 f2fs_err_ratelimited(F2FS_I_SB(dic->inode),
304 "lz4 invalid ret:%d, expected:%lu",
305 ret, PAGE_SIZE << dic->log_cluster_size);
311 static bool lz4_is_level_valid(int lvl)
313 #ifdef CONFIG_F2FS_FS_LZ4HC
314 return !lvl || (lvl >= LZ4HC_MIN_CLEVEL && lvl <= LZ4HC_MAX_CLEVEL);
320 static const struct f2fs_compress_ops f2fs_lz4_ops = {
321 .init_compress_ctx = lz4_init_compress_ctx,
322 .destroy_compress_ctx = lz4_destroy_compress_ctx,
323 .compress_pages = lz4_compress_pages,
324 .decompress_pages = lz4_decompress_pages,
325 .is_level_valid = lz4_is_level_valid,
329 #ifdef CONFIG_F2FS_FS_ZSTD
330 static int zstd_init_compress_ctx(struct compress_ctx *cc)
332 zstd_parameters params;
333 zstd_cstream *stream;
335 unsigned int workspace_size;
336 unsigned char level = F2FS_I(cc->inode)->i_compress_level;
338 /* Need to remain this for backward compatibility */
340 level = F2FS_ZSTD_DEFAULT_CLEVEL;
342 params = zstd_get_params(level, cc->rlen);
343 workspace_size = zstd_cstream_workspace_bound(¶ms.cParams);
345 workspace = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
346 workspace_size, GFP_NOFS);
350 stream = zstd_init_cstream(¶ms, 0, workspace, workspace_size);
352 f2fs_err_ratelimited(F2FS_I_SB(cc->inode),
353 "%s zstd_init_cstream failed", __func__);
358 cc->private = workspace;
359 cc->private2 = stream;
361 cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE;
365 static void zstd_destroy_compress_ctx(struct compress_ctx *cc)
372 static int zstd_compress_pages(struct compress_ctx *cc)
374 zstd_cstream *stream = cc->private2;
375 zstd_in_buffer inbuf;
376 zstd_out_buffer outbuf;
377 int src_size = cc->rlen;
378 int dst_size = src_size - PAGE_SIZE - COMPRESS_HEADER_SIZE;
382 inbuf.src = cc->rbuf;
383 inbuf.size = src_size;
386 outbuf.dst = cc->cbuf->cdata;
387 outbuf.size = dst_size;
389 ret = zstd_compress_stream(stream, &outbuf, &inbuf);
390 if (zstd_is_error(ret)) {
391 f2fs_err_ratelimited(F2FS_I_SB(cc->inode),
392 "%s zstd_compress_stream failed, ret: %d",
393 __func__, zstd_get_error_code(ret));
397 ret = zstd_end_stream(stream, &outbuf);
398 if (zstd_is_error(ret)) {
399 f2fs_err_ratelimited(F2FS_I_SB(cc->inode),
400 "%s zstd_end_stream returned %d",
401 __func__, zstd_get_error_code(ret));
406 * there is compressed data remained in intermediate buffer due to
407 * no more space in cbuf.cdata
412 cc->clen = outbuf.pos;
416 static int zstd_init_decompress_ctx(struct decompress_io_ctx *dic)
418 zstd_dstream *stream;
420 unsigned int workspace_size;
421 unsigned int max_window_size =
422 MAX_COMPRESS_WINDOW_SIZE(dic->log_cluster_size);
424 workspace_size = zstd_dstream_workspace_bound(max_window_size);
426 workspace = f2fs_kvmalloc(F2FS_I_SB(dic->inode),
427 workspace_size, GFP_NOFS);
431 stream = zstd_init_dstream(max_window_size, workspace, workspace_size);
433 f2fs_err_ratelimited(F2FS_I_SB(dic->inode),
434 "%s zstd_init_dstream failed", __func__);
439 dic->private = workspace;
440 dic->private2 = stream;
445 static void zstd_destroy_decompress_ctx(struct decompress_io_ctx *dic)
447 kvfree(dic->private);
449 dic->private2 = NULL;
452 static int zstd_decompress_pages(struct decompress_io_ctx *dic)
454 zstd_dstream *stream = dic->private2;
455 zstd_in_buffer inbuf;
456 zstd_out_buffer outbuf;
460 inbuf.src = dic->cbuf->cdata;
461 inbuf.size = dic->clen;
464 outbuf.dst = dic->rbuf;
465 outbuf.size = dic->rlen;
467 ret = zstd_decompress_stream(stream, &outbuf, &inbuf);
468 if (zstd_is_error(ret)) {
469 f2fs_err_ratelimited(F2FS_I_SB(dic->inode),
470 "%s zstd_decompress_stream failed, ret: %d",
471 __func__, zstd_get_error_code(ret));
475 if (dic->rlen != outbuf.pos) {
476 f2fs_err_ratelimited(F2FS_I_SB(dic->inode),
477 "%s ZSTD invalid rlen:%zu, expected:%lu",
479 PAGE_SIZE << dic->log_cluster_size);
486 static bool zstd_is_level_valid(int lvl)
488 return lvl >= zstd_min_clevel() && lvl <= zstd_max_clevel();
491 static const struct f2fs_compress_ops f2fs_zstd_ops = {
492 .init_compress_ctx = zstd_init_compress_ctx,
493 .destroy_compress_ctx = zstd_destroy_compress_ctx,
494 .compress_pages = zstd_compress_pages,
495 .init_decompress_ctx = zstd_init_decompress_ctx,
496 .destroy_decompress_ctx = zstd_destroy_decompress_ctx,
497 .decompress_pages = zstd_decompress_pages,
498 .is_level_valid = zstd_is_level_valid,
502 #ifdef CONFIG_F2FS_FS_LZO
503 #ifdef CONFIG_F2FS_FS_LZORLE
504 static int lzorle_compress_pages(struct compress_ctx *cc)
508 ret = lzorle1x_1_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata,
509 &cc->clen, cc->private);
510 if (ret != LZO_E_OK) {
511 f2fs_err_ratelimited(F2FS_I_SB(cc->inode),
512 "lzo-rle compress failed, ret:%d", ret);
518 static const struct f2fs_compress_ops f2fs_lzorle_ops = {
519 .init_compress_ctx = lzo_init_compress_ctx,
520 .destroy_compress_ctx = lzo_destroy_compress_ctx,
521 .compress_pages = lzorle_compress_pages,
522 .decompress_pages = lzo_decompress_pages,
527 static const struct f2fs_compress_ops *f2fs_cops[COMPRESS_MAX] = {
528 #ifdef CONFIG_F2FS_FS_LZO
533 #ifdef CONFIG_F2FS_FS_LZ4
538 #ifdef CONFIG_F2FS_FS_ZSTD
543 #if defined(CONFIG_F2FS_FS_LZO) && defined(CONFIG_F2FS_FS_LZORLE)
550 bool f2fs_is_compress_backend_ready(struct inode *inode)
552 if (!f2fs_compressed_file(inode))
554 return f2fs_cops[F2FS_I(inode)->i_compress_algorithm];
557 bool f2fs_is_compress_level_valid(int alg, int lvl)
559 const struct f2fs_compress_ops *cops = f2fs_cops[alg];
561 if (cops->is_level_valid)
562 return cops->is_level_valid(lvl);
567 static mempool_t *compress_page_pool;
568 static int num_compress_pages = 512;
569 module_param(num_compress_pages, uint, 0444);
570 MODULE_PARM_DESC(num_compress_pages,
571 "Number of intermediate compress pages to preallocate");
573 int __init f2fs_init_compress_mempool(void)
575 compress_page_pool = mempool_create_page_pool(num_compress_pages, 0);
576 return compress_page_pool ? 0 : -ENOMEM;
579 void f2fs_destroy_compress_mempool(void)
581 mempool_destroy(compress_page_pool);
584 static struct page *f2fs_compress_alloc_page(void)
588 page = mempool_alloc(compress_page_pool, GFP_NOFS);
594 static void f2fs_compress_free_page(struct page *page)
598 detach_page_private(page);
599 page->mapping = NULL;
601 mempool_free(page, compress_page_pool);
604 #define MAX_VMAP_RETRIES 3
606 static void *f2fs_vmap(struct page **pages, unsigned int count)
611 for (i = 0; i < MAX_VMAP_RETRIES; i++) {
612 buf = vm_map_ram(pages, count, -1);
620 static int f2fs_compress_pages(struct compress_ctx *cc)
622 struct f2fs_inode_info *fi = F2FS_I(cc->inode);
623 const struct f2fs_compress_ops *cops =
624 f2fs_cops[fi->i_compress_algorithm];
625 unsigned int max_len, new_nr_cpages;
629 trace_f2fs_compress_pages_start(cc->inode, cc->cluster_idx,
630 cc->cluster_size, fi->i_compress_algorithm);
632 if (cops->init_compress_ctx) {
633 ret = cops->init_compress_ctx(cc);
638 max_len = COMPRESS_HEADER_SIZE + cc->clen;
639 cc->nr_cpages = DIV_ROUND_UP(max_len, PAGE_SIZE);
640 cc->valid_nr_cpages = cc->nr_cpages;
642 cc->cpages = page_array_alloc(cc->inode, cc->nr_cpages);
645 goto destroy_compress_ctx;
648 for (i = 0; i < cc->nr_cpages; i++)
649 cc->cpages[i] = f2fs_compress_alloc_page();
651 cc->rbuf = f2fs_vmap(cc->rpages, cc->cluster_size);
654 goto out_free_cpages;
657 cc->cbuf = f2fs_vmap(cc->cpages, cc->nr_cpages);
660 goto out_vunmap_rbuf;
663 ret = cops->compress_pages(cc);
665 goto out_vunmap_cbuf;
667 max_len = PAGE_SIZE * (cc->cluster_size - 1) - COMPRESS_HEADER_SIZE;
669 if (cc->clen > max_len) {
671 goto out_vunmap_cbuf;
674 cc->cbuf->clen = cpu_to_le32(cc->clen);
676 if (fi->i_compress_flag & BIT(COMPRESS_CHKSUM))
677 chksum = f2fs_crc32(F2FS_I_SB(cc->inode),
678 cc->cbuf->cdata, cc->clen);
679 cc->cbuf->chksum = cpu_to_le32(chksum);
681 for (i = 0; i < COMPRESS_DATA_RESERVED_SIZE; i++)
682 cc->cbuf->reserved[i] = cpu_to_le32(0);
684 new_nr_cpages = DIV_ROUND_UP(cc->clen + COMPRESS_HEADER_SIZE, PAGE_SIZE);
686 /* zero out any unused part of the last page */
687 memset(&cc->cbuf->cdata[cc->clen], 0,
688 (new_nr_cpages * PAGE_SIZE) -
689 (cc->clen + COMPRESS_HEADER_SIZE));
691 vm_unmap_ram(cc->cbuf, cc->nr_cpages);
692 vm_unmap_ram(cc->rbuf, cc->cluster_size);
694 for (i = new_nr_cpages; i < cc->nr_cpages; i++) {
695 f2fs_compress_free_page(cc->cpages[i]);
696 cc->cpages[i] = NULL;
699 if (cops->destroy_compress_ctx)
700 cops->destroy_compress_ctx(cc);
702 cc->valid_nr_cpages = new_nr_cpages;
704 trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
709 vm_unmap_ram(cc->cbuf, cc->nr_cpages);
711 vm_unmap_ram(cc->rbuf, cc->cluster_size);
713 for (i = 0; i < cc->nr_cpages; i++) {
715 f2fs_compress_free_page(cc->cpages[i]);
717 page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
719 destroy_compress_ctx:
720 if (cops->destroy_compress_ctx)
721 cops->destroy_compress_ctx(cc);
723 trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
728 static int f2fs_prepare_decomp_mem(struct decompress_io_ctx *dic,
730 static void f2fs_release_decomp_mem(struct decompress_io_ctx *dic,
731 bool bypass_destroy_callback, bool pre_alloc);
733 void f2fs_decompress_cluster(struct decompress_io_ctx *dic, bool in_task)
735 struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
736 struct f2fs_inode_info *fi = F2FS_I(dic->inode);
737 const struct f2fs_compress_ops *cops =
738 f2fs_cops[fi->i_compress_algorithm];
739 bool bypass_callback = false;
742 trace_f2fs_decompress_pages_start(dic->inode, dic->cluster_idx,
743 dic->cluster_size, fi->i_compress_algorithm);
750 ret = f2fs_prepare_decomp_mem(dic, false);
752 bypass_callback = true;
756 dic->clen = le32_to_cpu(dic->cbuf->clen);
757 dic->rlen = PAGE_SIZE << dic->log_cluster_size;
759 if (dic->clen > PAGE_SIZE * dic->nr_cpages - COMPRESS_HEADER_SIZE) {
762 /* Avoid f2fs_commit_super in irq context */
764 f2fs_handle_error_async(sbi, ERROR_FAIL_DECOMPRESSION);
766 f2fs_handle_error(sbi, ERROR_FAIL_DECOMPRESSION);
770 ret = cops->decompress_pages(dic);
772 if (!ret && (fi->i_compress_flag & BIT(COMPRESS_CHKSUM))) {
773 u32 provided = le32_to_cpu(dic->cbuf->chksum);
774 u32 calculated = f2fs_crc32(sbi, dic->cbuf->cdata, dic->clen);
776 if (provided != calculated) {
777 if (!is_inode_flag_set(dic->inode, FI_COMPRESS_CORRUPT)) {
778 set_inode_flag(dic->inode, FI_COMPRESS_CORRUPT);
779 f2fs_info_ratelimited(sbi,
780 "checksum invalid, nid = %lu, %x vs %x",
782 provided, calculated);
784 set_sbi_flag(sbi, SBI_NEED_FSCK);
789 f2fs_release_decomp_mem(dic, bypass_callback, false);
792 trace_f2fs_decompress_pages_end(dic->inode, dic->cluster_idx,
794 f2fs_decompress_end_io(dic, ret, in_task);
798 * This is called when a page of a compressed cluster has been read from disk
799 * (or failed to be read from disk). It checks whether this page was the last
800 * page being waited on in the cluster, and if so, it decompresses the cluster
801 * (or in the case of a failure, cleans up without actually decompressing).
803 void f2fs_end_read_compressed_page(struct page *page, bool failed,
804 block_t blkaddr, bool in_task)
806 struct decompress_io_ctx *dic =
807 (struct decompress_io_ctx *)page_private(page);
808 struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
810 dec_page_count(sbi, F2FS_RD_DATA);
813 WRITE_ONCE(dic->failed, true);
814 else if (blkaddr && in_task)
815 f2fs_cache_compressed_page(sbi, page,
816 dic->inode->i_ino, blkaddr);
818 if (atomic_dec_and_test(&dic->remaining_pages))
819 f2fs_decompress_cluster(dic, in_task);
822 static bool is_page_in_cluster(struct compress_ctx *cc, pgoff_t index)
824 if (cc->cluster_idx == NULL_CLUSTER)
826 return cc->cluster_idx == cluster_idx(cc, index);
829 bool f2fs_cluster_is_empty(struct compress_ctx *cc)
831 return cc->nr_rpages == 0;
834 static bool f2fs_cluster_is_full(struct compress_ctx *cc)
836 return cc->cluster_size == cc->nr_rpages;
839 bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index)
841 if (f2fs_cluster_is_empty(cc))
843 return is_page_in_cluster(cc, index);
846 bool f2fs_all_cluster_page_ready(struct compress_ctx *cc, struct page **pages,
847 int index, int nr_pages, bool uptodate)
849 unsigned long pgidx = pages[index]->index;
850 int i = uptodate ? 0 : 1;
853 * when uptodate set to true, try to check all pages in cluster is
856 if (uptodate && (pgidx % cc->cluster_size))
859 if (nr_pages - index < cc->cluster_size)
862 for (; i < cc->cluster_size; i++) {
863 if (pages[index + i]->index != pgidx + i)
865 if (uptodate && !PageUptodate(pages[index + i]))
872 static bool cluster_has_invalid_data(struct compress_ctx *cc)
874 loff_t i_size = i_size_read(cc->inode);
875 unsigned nr_pages = DIV_ROUND_UP(i_size, PAGE_SIZE);
878 for (i = 0; i < cc->cluster_size; i++) {
879 struct page *page = cc->rpages[i];
881 f2fs_bug_on(F2FS_I_SB(cc->inode), !page);
884 if (page_folio(page)->index >= nr_pages)
890 bool f2fs_sanity_check_cluster(struct dnode_of_data *dn)
892 #ifdef CONFIG_F2FS_CHECK_FS
893 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
894 unsigned int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
900 if (dn->data_blkaddr != COMPRESS_ADDR)
903 /* [..., COMPR_ADDR, ...] */
904 if (dn->ofs_in_node % cluster_size) {
905 reason = "[*|C|*|*]";
909 for (i = 1, count = 1; i < cluster_size; i++, count++) {
910 block_t blkaddr = data_blkaddr(dn->inode, dn->node_page,
911 dn->ofs_in_node + i);
913 /* [COMPR_ADDR, ..., COMPR_ADDR] */
914 if (blkaddr == COMPRESS_ADDR) {
915 reason = "[C|*|C|*]";
918 if (!__is_valid_data_blkaddr(blkaddr)) {
923 /* [COMPR_ADDR, NULL_ADDR or NEW_ADDR, valid_blkaddr] */
925 reason = "[C|N|N|V]";
930 f2fs_bug_on(F2FS_I_SB(dn->inode), count != cluster_size &&
931 !is_inode_flag_set(dn->inode, FI_COMPRESS_RELEASED));
935 f2fs_warn(sbi, "access invalid cluster, ino:%lu, nid:%u, ofs_in_node:%u, reason:%s",
936 dn->inode->i_ino, dn->nid, dn->ofs_in_node, reason);
937 set_sbi_flag(sbi, SBI_NEED_FSCK);
944 static int __f2fs_get_cluster_blocks(struct inode *inode,
945 struct dnode_of_data *dn)
947 unsigned int cluster_size = F2FS_I(inode)->i_cluster_size;
950 for (i = 0, count = 0; i < cluster_size; i++) {
951 block_t blkaddr = data_blkaddr(dn->inode, dn->node_page,
952 dn->ofs_in_node + i);
954 if (__is_valid_data_blkaddr(blkaddr))
961 static int __f2fs_cluster_blocks(struct inode *inode, unsigned int cluster_idx,
962 enum cluster_check_type type)
964 struct dnode_of_data dn;
965 unsigned int start_idx = cluster_idx <<
966 F2FS_I(inode)->i_log_cluster_size;
969 set_new_dnode(&dn, inode, NULL, NULL, 0);
970 ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
977 if (f2fs_sanity_check_cluster(&dn)) {
982 if (dn.data_blkaddr == COMPRESS_ADDR) {
983 if (type == CLUSTER_COMPR_BLKS)
984 ret = 1 + __f2fs_get_cluster_blocks(inode, &dn);
985 else if (type == CLUSTER_IS_COMPR)
987 } else if (type == CLUSTER_RAW_BLKS) {
988 ret = __f2fs_get_cluster_blocks(inode, &dn);
995 /* return # of compressed blocks in compressed cluster */
996 static int f2fs_compressed_blocks(struct compress_ctx *cc)
998 return __f2fs_cluster_blocks(cc->inode, cc->cluster_idx,
1002 /* return # of raw blocks in non-compressed cluster */
1003 static int f2fs_decompressed_blocks(struct inode *inode,
1004 unsigned int cluster_idx)
1006 return __f2fs_cluster_blocks(inode, cluster_idx,
1010 /* return whether cluster is compressed one or not */
1011 int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index)
1013 return __f2fs_cluster_blocks(inode,
1014 index >> F2FS_I(inode)->i_log_cluster_size,
1018 /* return whether cluster contains non raw blocks or not */
1019 bool f2fs_is_sparse_cluster(struct inode *inode, pgoff_t index)
1021 unsigned int cluster_idx = index >> F2FS_I(inode)->i_log_cluster_size;
1023 return f2fs_decompressed_blocks(inode, cluster_idx) !=
1024 F2FS_I(inode)->i_cluster_size;
1027 static bool cluster_may_compress(struct compress_ctx *cc)
1029 if (!f2fs_need_compress_data(cc->inode))
1031 if (f2fs_is_atomic_file(cc->inode))
1033 if (!f2fs_cluster_is_full(cc))
1035 if (unlikely(f2fs_cp_error(F2FS_I_SB(cc->inode))))
1037 return !cluster_has_invalid_data(cc);
1040 static void set_cluster_writeback(struct compress_ctx *cc)
1044 for (i = 0; i < cc->cluster_size; i++) {
1046 set_page_writeback(cc->rpages[i]);
1050 static void cancel_cluster_writeback(struct compress_ctx *cc,
1051 struct compress_io_ctx *cic, int submitted)
1055 /* Wait for submitted IOs. */
1056 if (submitted > 1) {
1057 f2fs_submit_merged_write(F2FS_I_SB(cc->inode), DATA);
1058 while (atomic_read(&cic->pending_pages) !=
1059 (cc->valid_nr_cpages - submitted + 1))
1060 f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT);
1063 /* Cancel writeback and stay locked. */
1064 for (i = 0; i < cc->cluster_size; i++) {
1065 if (i < submitted) {
1066 inode_inc_dirty_pages(cc->inode);
1067 lock_page(cc->rpages[i]);
1069 clear_page_private_gcing(cc->rpages[i]);
1070 if (folio_test_writeback(page_folio(cc->rpages[i])))
1071 end_page_writeback(cc->rpages[i]);
1075 static void set_cluster_dirty(struct compress_ctx *cc)
1079 for (i = 0; i < cc->cluster_size; i++)
1080 if (cc->rpages[i]) {
1081 set_page_dirty(cc->rpages[i]);
1082 set_page_private_gcing(cc->rpages[i]);
1086 static int prepare_compress_overwrite(struct compress_ctx *cc,
1087 struct page **pagep, pgoff_t index, void **fsdata)
1089 struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
1090 struct address_space *mapping = cc->inode->i_mapping;
1092 sector_t last_block_in_bio;
1093 fgf_t fgp_flag = FGP_LOCK | FGP_WRITE | FGP_CREAT;
1094 pgoff_t start_idx = start_idx_of_cluster(cc);
1098 ret = f2fs_is_compressed_cluster(cc->inode, start_idx);
1102 ret = f2fs_init_compress_ctx(cc);
1106 /* keep page reference to avoid page reclaim */
1107 for (i = 0; i < cc->cluster_size; i++) {
1108 page = f2fs_pagecache_get_page(mapping, start_idx + i,
1109 fgp_flag, GFP_NOFS);
1115 if (PageUptodate(page))
1116 f2fs_put_page(page, 1);
1118 f2fs_compress_ctx_add_page(cc, page_folio(page));
1121 if (!f2fs_cluster_is_empty(cc)) {
1122 struct bio *bio = NULL;
1124 ret = f2fs_read_multi_pages(cc, &bio, cc->cluster_size,
1125 &last_block_in_bio, NULL, true);
1126 f2fs_put_rpages(cc);
1127 f2fs_destroy_compress_ctx(cc, true);
1131 f2fs_submit_read_bio(sbi, bio, DATA);
1133 ret = f2fs_init_compress_ctx(cc);
1138 for (i = 0; i < cc->cluster_size; i++) {
1139 f2fs_bug_on(sbi, cc->rpages[i]);
1141 page = find_lock_page(mapping, start_idx + i);
1143 /* page can be truncated */
1144 goto release_and_retry;
1147 f2fs_wait_on_page_writeback(page, DATA, true, true);
1148 f2fs_compress_ctx_add_page(cc, page_folio(page));
1150 if (!PageUptodate(page)) {
1152 f2fs_put_rpages(cc);
1153 f2fs_unlock_rpages(cc, i + 1);
1154 f2fs_destroy_compress_ctx(cc, true);
1160 *fsdata = cc->rpages;
1161 *pagep = cc->rpages[offset_in_cluster(cc, index)];
1162 return cc->cluster_size;
1166 f2fs_put_rpages(cc);
1167 f2fs_unlock_rpages(cc, i);
1168 f2fs_destroy_compress_ctx(cc, true);
1173 int f2fs_prepare_compress_overwrite(struct inode *inode,
1174 struct page **pagep, pgoff_t index, void **fsdata)
1176 struct compress_ctx cc = {
1178 .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
1179 .cluster_size = F2FS_I(inode)->i_cluster_size,
1180 .cluster_idx = index >> F2FS_I(inode)->i_log_cluster_size,
1185 return prepare_compress_overwrite(&cc, pagep, index, fsdata);
1188 bool f2fs_compress_write_end(struct inode *inode, void *fsdata,
1189 pgoff_t index, unsigned copied)
1192 struct compress_ctx cc = {
1194 .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
1195 .cluster_size = F2FS_I(inode)->i_cluster_size,
1198 bool first_index = (index == cc.rpages[0]->index);
1201 set_cluster_dirty(&cc);
1203 f2fs_put_rpages_wbc(&cc, NULL, false, 1);
1204 f2fs_destroy_compress_ctx(&cc, false);
1209 int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock)
1211 void *fsdata = NULL;
1213 int log_cluster_size = F2FS_I(inode)->i_log_cluster_size;
1214 pgoff_t start_idx = from >> (PAGE_SHIFT + log_cluster_size) <<
1218 err = f2fs_is_compressed_cluster(inode, start_idx);
1222 /* truncate normal cluster */
1224 return f2fs_do_truncate_blocks(inode, from, lock);
1226 /* truncate compressed cluster */
1227 err = f2fs_prepare_compress_overwrite(inode, &pagep,
1228 start_idx, &fsdata);
1230 /* should not be a normal cluster */
1231 f2fs_bug_on(F2FS_I_SB(inode), err == 0);
1237 struct page **rpages = fsdata;
1238 int cluster_size = F2FS_I(inode)->i_cluster_size;
1241 for (i = cluster_size - 1; i >= 0; i--) {
1242 loff_t start = rpages[i]->index << PAGE_SHIFT;
1244 if (from <= start) {
1245 zero_user_segment(rpages[i], 0, PAGE_SIZE);
1247 zero_user_segment(rpages[i], from - start,
1253 f2fs_compress_write_end(inode, fsdata, start_idx, true);
1258 static int f2fs_write_compressed_pages(struct compress_ctx *cc,
1260 struct writeback_control *wbc,
1261 enum iostat_type io_type)
1263 struct inode *inode = cc->inode;
1264 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1265 struct f2fs_inode_info *fi = F2FS_I(inode);
1266 struct f2fs_io_info fio = {
1268 .ino = cc->inode->i_ino,
1271 .op_flags = wbc_to_write_flags(wbc),
1272 .old_blkaddr = NEW_ADDR,
1274 .encrypted_page = NULL,
1275 .compressed_page = NULL,
1278 .encrypted = fscrypt_inode_uses_fs_layer_crypto(cc->inode) ?
1281 struct dnode_of_data dn;
1282 struct node_info ni;
1283 struct compress_io_ctx *cic;
1284 pgoff_t start_idx = start_idx_of_cluster(cc);
1285 unsigned int last_index = cc->cluster_size - 1;
1288 bool quota_inode = IS_NOQUOTA(inode);
1290 /* we should bypass data pages to proceed the kworker jobs */
1291 if (unlikely(f2fs_cp_error(sbi))) {
1292 mapping_set_error(cc->rpages[0]->mapping, -EIO);
1298 * We need to wait for node_write to avoid block allocation during
1299 * checkpoint. This can only happen to quota writes which can cause
1300 * the below discard race condition.
1302 f2fs_down_read(&sbi->node_write);
1303 } else if (!f2fs_trylock_op(sbi)) {
1307 set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
1309 err = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
1313 for (i = 0; i < cc->cluster_size; i++) {
1314 if (data_blkaddr(dn.inode, dn.node_page,
1315 dn.ofs_in_node + i) == NULL_ADDR)
1319 psize = (loff_t)(cc->rpages[last_index]->index + 1) << PAGE_SHIFT;
1321 err = f2fs_get_node_info(fio.sbi, dn.nid, &ni, false);
1325 fio.version = ni.version;
1327 cic = f2fs_kmem_cache_alloc(cic_entry_slab, GFP_F2FS_ZERO, false, sbi);
1331 cic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
1333 atomic_set(&cic->pending_pages, cc->valid_nr_cpages);
1334 cic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
1338 cic->nr_rpages = cc->cluster_size;
1340 for (i = 0; i < cc->valid_nr_cpages; i++) {
1341 f2fs_set_compressed_page(cc->cpages[i], inode,
1342 cc->rpages[i + 1]->index, cic);
1343 fio.compressed_page = cc->cpages[i];
1345 fio.old_blkaddr = data_blkaddr(dn.inode, dn.node_page,
1346 dn.ofs_in_node + i + 1);
1348 /* wait for GCed page writeback via META_MAPPING */
1349 f2fs_wait_on_block_writeback(inode, fio.old_blkaddr);
1351 if (fio.encrypted) {
1352 fio.page = cc->rpages[i + 1];
1353 err = f2fs_encrypt_one_page(&fio);
1355 goto out_destroy_crypt;
1356 cc->cpages[i] = fio.encrypted_page;
1360 set_cluster_writeback(cc);
1362 for (i = 0; i < cc->cluster_size; i++)
1363 cic->rpages[i] = cc->rpages[i];
1365 for (i = 0; i < cc->cluster_size; i++, dn.ofs_in_node++) {
1368 blkaddr = f2fs_data_blkaddr(&dn);
1369 fio.page = cc->rpages[i];
1370 fio.old_blkaddr = blkaddr;
1372 /* cluster header */
1374 if (blkaddr == COMPRESS_ADDR)
1376 if (__is_valid_data_blkaddr(blkaddr))
1377 f2fs_invalidate_blocks(sbi, blkaddr);
1378 f2fs_update_data_blkaddr(&dn, COMPRESS_ADDR);
1379 goto unlock_continue;
1382 if (fio.compr_blocks && __is_valid_data_blkaddr(blkaddr))
1385 if (i > cc->valid_nr_cpages) {
1386 if (__is_valid_data_blkaddr(blkaddr)) {
1387 f2fs_invalidate_blocks(sbi, blkaddr);
1388 f2fs_update_data_blkaddr(&dn, NEW_ADDR);
1390 goto unlock_continue;
1393 f2fs_bug_on(fio.sbi, blkaddr == NULL_ADDR);
1396 fio.encrypted_page = cc->cpages[i - 1];
1398 fio.compressed_page = cc->cpages[i - 1];
1400 cc->cpages[i - 1] = NULL;
1402 f2fs_outplace_write_data(&dn, &fio);
1403 if (unlikely(!fio.submitted)) {
1404 cancel_cluster_writeback(cc, cic, i);
1406 /* To call fscrypt_finalize_bounce_page */
1407 i = cc->valid_nr_cpages;
1409 goto out_destroy_crypt;
1413 inode_dec_dirty_pages(cc->inode);
1414 unlock_page(fio.page);
1417 if (fio.compr_blocks)
1418 f2fs_i_compr_blocks_update(inode, fio.compr_blocks - 1, false);
1419 f2fs_i_compr_blocks_update(inode, cc->valid_nr_cpages, true);
1420 add_compr_block_stat(inode, cc->valid_nr_cpages);
1422 set_inode_flag(cc->inode, FI_APPEND_WRITE);
1424 f2fs_put_dnode(&dn);
1426 f2fs_up_read(&sbi->node_write);
1428 f2fs_unlock_op(sbi);
1430 spin_lock(&fi->i_size_lock);
1431 if (fi->last_disk_size < psize)
1432 fi->last_disk_size = psize;
1433 spin_unlock(&fi->i_size_lock);
1435 f2fs_put_rpages(cc);
1436 page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
1438 f2fs_destroy_compress_ctx(cc, false);
1442 page_array_free(cc->inode, cic->rpages, cc->cluster_size);
1444 for (--i; i >= 0; i--) {
1447 fscrypt_finalize_bounce_page(&cc->cpages[i]);
1450 kmem_cache_free(cic_entry_slab, cic);
1452 f2fs_put_dnode(&dn);
1455 f2fs_up_read(&sbi->node_write);
1457 f2fs_unlock_op(sbi);
1459 for (i = 0; i < cc->valid_nr_cpages; i++) {
1460 f2fs_compress_free_page(cc->cpages[i]);
1461 cc->cpages[i] = NULL;
1463 page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
1468 void f2fs_compress_write_end_io(struct bio *bio, struct page *page)
1470 struct f2fs_sb_info *sbi = bio->bi_private;
1471 struct compress_io_ctx *cic =
1472 (struct compress_io_ctx *)page_private(page);
1473 enum count_type type = WB_DATA_TYPE(page,
1474 f2fs_is_compressed_page(page));
1477 if (unlikely(bio->bi_status))
1478 mapping_set_error(cic->inode->i_mapping, -EIO);
1480 f2fs_compress_free_page(page);
1482 dec_page_count(sbi, type);
1484 if (atomic_dec_return(&cic->pending_pages))
1487 for (i = 0; i < cic->nr_rpages; i++) {
1488 WARN_ON(!cic->rpages[i]);
1489 clear_page_private_gcing(cic->rpages[i]);
1490 end_page_writeback(cic->rpages[i]);
1493 page_array_free(cic->inode, cic->rpages, cic->nr_rpages);
1494 kmem_cache_free(cic_entry_slab, cic);
1497 static int f2fs_write_raw_pages(struct compress_ctx *cc,
1499 struct writeback_control *wbc,
1500 enum iostat_type io_type)
1502 struct address_space *mapping = cc->inode->i_mapping;
1503 struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
1504 int submitted, compr_blocks, i;
1507 compr_blocks = f2fs_compressed_blocks(cc);
1509 for (i = 0; i < cc->cluster_size; i++) {
1513 redirty_page_for_writepage(wbc, cc->rpages[i]);
1514 unlock_page(cc->rpages[i]);
1517 if (compr_blocks < 0)
1518 return compr_blocks;
1520 /* overwrite compressed cluster w/ normal cluster */
1521 if (compr_blocks > 0)
1524 for (i = 0; i < cc->cluster_size; i++) {
1528 lock_page(cc->rpages[i]);
1530 if (cc->rpages[i]->mapping != mapping) {
1532 unlock_page(cc->rpages[i]);
1536 if (!PageDirty(cc->rpages[i]))
1537 goto continue_unlock;
1539 if (folio_test_writeback(page_folio(cc->rpages[i]))) {
1540 if (wbc->sync_mode == WB_SYNC_NONE)
1541 goto continue_unlock;
1542 f2fs_wait_on_page_writeback(cc->rpages[i], DATA, true, true);
1545 if (!clear_page_dirty_for_io(cc->rpages[i]))
1546 goto continue_unlock;
1548 ret = f2fs_write_single_data_page(page_folio(cc->rpages[i]),
1550 NULL, NULL, wbc, io_type,
1551 compr_blocks, false);
1553 if (ret == AOP_WRITEPAGE_ACTIVATE) {
1554 unlock_page(cc->rpages[i]);
1556 } else if (ret == -EAGAIN) {
1559 * for quota file, just redirty left pages to
1560 * avoid deadlock caused by cluster update race
1561 * from foreground operation.
1563 if (IS_NOQUOTA(cc->inode))
1565 f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT);
1571 *submitted_p += submitted;
1575 if (compr_blocks > 0)
1576 f2fs_unlock_op(sbi);
1578 f2fs_balance_fs(sbi, true);
1582 int f2fs_write_multi_pages(struct compress_ctx *cc,
1584 struct writeback_control *wbc,
1585 enum iostat_type io_type)
1590 if (cluster_may_compress(cc)) {
1591 err = f2fs_compress_pages(cc);
1592 if (err == -EAGAIN) {
1593 add_compr_block_stat(cc->inode, cc->cluster_size);
1596 f2fs_put_rpages_wbc(cc, wbc, true, 1);
1600 err = f2fs_write_compressed_pages(cc, submitted,
1604 f2fs_bug_on(F2FS_I_SB(cc->inode), err != -EAGAIN);
1607 f2fs_bug_on(F2FS_I_SB(cc->inode), *submitted);
1609 err = f2fs_write_raw_pages(cc, submitted, wbc, io_type);
1610 f2fs_put_rpages_wbc(cc, wbc, false, 0);
1612 f2fs_destroy_compress_ctx(cc, false);
1616 static inline bool allow_memalloc_for_decomp(struct f2fs_sb_info *sbi,
1619 return pre_alloc ^ f2fs_low_mem_mode(sbi);
1622 static int f2fs_prepare_decomp_mem(struct decompress_io_ctx *dic,
1625 const struct f2fs_compress_ops *cops =
1626 f2fs_cops[F2FS_I(dic->inode)->i_compress_algorithm];
1629 if (!allow_memalloc_for_decomp(F2FS_I_SB(dic->inode), pre_alloc))
1632 dic->tpages = page_array_alloc(dic->inode, dic->cluster_size);
1636 for (i = 0; i < dic->cluster_size; i++) {
1637 if (dic->rpages[i]) {
1638 dic->tpages[i] = dic->rpages[i];
1642 dic->tpages[i] = f2fs_compress_alloc_page();
1645 dic->rbuf = f2fs_vmap(dic->tpages, dic->cluster_size);
1649 dic->cbuf = f2fs_vmap(dic->cpages, dic->nr_cpages);
1653 if (cops->init_decompress_ctx)
1654 return cops->init_decompress_ctx(dic);
1659 static void f2fs_release_decomp_mem(struct decompress_io_ctx *dic,
1660 bool bypass_destroy_callback, bool pre_alloc)
1662 const struct f2fs_compress_ops *cops =
1663 f2fs_cops[F2FS_I(dic->inode)->i_compress_algorithm];
1665 if (!allow_memalloc_for_decomp(F2FS_I_SB(dic->inode), pre_alloc))
1668 if (!bypass_destroy_callback && cops->destroy_decompress_ctx)
1669 cops->destroy_decompress_ctx(dic);
1672 vm_unmap_ram(dic->cbuf, dic->nr_cpages);
1675 vm_unmap_ram(dic->rbuf, dic->cluster_size);
1678 static void f2fs_free_dic(struct decompress_io_ctx *dic,
1679 bool bypass_destroy_callback);
1681 struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
1683 struct decompress_io_ctx *dic;
1684 pgoff_t start_idx = start_idx_of_cluster(cc);
1685 struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
1688 dic = f2fs_kmem_cache_alloc(dic_entry_slab, GFP_F2FS_ZERO, false, sbi);
1690 return ERR_PTR(-ENOMEM);
1692 dic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
1694 kmem_cache_free(dic_entry_slab, dic);
1695 return ERR_PTR(-ENOMEM);
1698 dic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
1699 dic->inode = cc->inode;
1700 atomic_set(&dic->remaining_pages, cc->nr_cpages);
1701 dic->cluster_idx = cc->cluster_idx;
1702 dic->cluster_size = cc->cluster_size;
1703 dic->log_cluster_size = cc->log_cluster_size;
1704 dic->nr_cpages = cc->nr_cpages;
1705 refcount_set(&dic->refcnt, 1);
1706 dic->failed = false;
1707 dic->need_verity = f2fs_need_verity(cc->inode, start_idx);
1709 for (i = 0; i < dic->cluster_size; i++)
1710 dic->rpages[i] = cc->rpages[i];
1711 dic->nr_rpages = cc->cluster_size;
1713 dic->cpages = page_array_alloc(dic->inode, dic->nr_cpages);
1719 for (i = 0; i < dic->nr_cpages; i++) {
1722 page = f2fs_compress_alloc_page();
1723 f2fs_set_compressed_page(page, cc->inode,
1724 start_idx + i + 1, dic);
1725 dic->cpages[i] = page;
1728 ret = f2fs_prepare_decomp_mem(dic, true);
1735 f2fs_free_dic(dic, true);
1736 return ERR_PTR(ret);
1739 static void f2fs_free_dic(struct decompress_io_ctx *dic,
1740 bool bypass_destroy_callback)
1744 f2fs_release_decomp_mem(dic, bypass_destroy_callback, true);
1747 for (i = 0; i < dic->cluster_size; i++) {
1750 if (!dic->tpages[i])
1752 f2fs_compress_free_page(dic->tpages[i]);
1754 page_array_free(dic->inode, dic->tpages, dic->cluster_size);
1758 for (i = 0; i < dic->nr_cpages; i++) {
1759 if (!dic->cpages[i])
1761 f2fs_compress_free_page(dic->cpages[i]);
1763 page_array_free(dic->inode, dic->cpages, dic->nr_cpages);
1766 page_array_free(dic->inode, dic->rpages, dic->nr_rpages);
1767 kmem_cache_free(dic_entry_slab, dic);
1770 static void f2fs_late_free_dic(struct work_struct *work)
1772 struct decompress_io_ctx *dic =
1773 container_of(work, struct decompress_io_ctx, free_work);
1775 f2fs_free_dic(dic, false);
1778 static void f2fs_put_dic(struct decompress_io_ctx *dic, bool in_task)
1780 if (refcount_dec_and_test(&dic->refcnt)) {
1782 f2fs_free_dic(dic, false);
1784 INIT_WORK(&dic->free_work, f2fs_late_free_dic);
1785 queue_work(F2FS_I_SB(dic->inode)->post_read_wq,
1791 static void f2fs_verify_cluster(struct work_struct *work)
1793 struct decompress_io_ctx *dic =
1794 container_of(work, struct decompress_io_ctx, verity_work);
1797 /* Verify, update, and unlock the decompressed pages. */
1798 for (i = 0; i < dic->cluster_size; i++) {
1799 struct page *rpage = dic->rpages[i];
1804 if (fsverity_verify_page(rpage))
1805 SetPageUptodate(rpage);
1807 ClearPageUptodate(rpage);
1811 f2fs_put_dic(dic, true);
1815 * This is called when a compressed cluster has been decompressed
1816 * (or failed to be read and/or decompressed).
1818 void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed,
1823 if (!failed && dic->need_verity) {
1825 * Note that to avoid deadlocks, the verity work can't be done
1826 * on the decompression workqueue. This is because verifying
1827 * the data pages can involve reading metadata pages from the
1828 * file, and these metadata pages may be compressed.
1830 INIT_WORK(&dic->verity_work, f2fs_verify_cluster);
1831 fsverity_enqueue_verify_work(&dic->verity_work);
1835 /* Update and unlock the cluster's pagecache pages. */
1836 for (i = 0; i < dic->cluster_size; i++) {
1837 struct page *rpage = dic->rpages[i];
1843 ClearPageUptodate(rpage);
1845 SetPageUptodate(rpage);
1850 * Release the reference to the decompress_io_ctx that was being held
1851 * for I/O completion.
1853 f2fs_put_dic(dic, in_task);
1857 * Put a reference to a compressed page's decompress_io_ctx.
1859 * This is called when the page is no longer needed and can be freed.
1861 void f2fs_put_page_dic(struct page *page, bool in_task)
1863 struct decompress_io_ctx *dic =
1864 (struct decompress_io_ctx *)page_private(page);
1866 f2fs_put_dic(dic, in_task);
1870 * check whether cluster blocks are contiguous, and add extent cache entry
1871 * only if cluster blocks are logically and physically contiguous.
1873 unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn,
1874 unsigned int ofs_in_node)
1876 bool compressed = data_blkaddr(dn->inode, dn->node_page,
1877 ofs_in_node) == COMPRESS_ADDR;
1878 int i = compressed ? 1 : 0;
1879 block_t first_blkaddr = data_blkaddr(dn->inode, dn->node_page,
1882 for (i += 1; i < F2FS_I(dn->inode)->i_cluster_size; i++) {
1883 block_t blkaddr = data_blkaddr(dn->inode, dn->node_page,
1886 if (!__is_valid_data_blkaddr(blkaddr))
1888 if (first_blkaddr + i - (compressed ? 1 : 0) != blkaddr)
1892 return compressed ? i - 1 : i;
1895 const struct address_space_operations f2fs_compress_aops = {
1896 .release_folio = f2fs_release_folio,
1897 .invalidate_folio = f2fs_invalidate_folio,
1898 .migrate_folio = filemap_migrate_folio,
1901 struct address_space *COMPRESS_MAPPING(struct f2fs_sb_info *sbi)
1903 return sbi->compress_inode->i_mapping;
1906 void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi, block_t blkaddr)
1908 if (!sbi->compress_inode)
1910 invalidate_mapping_pages(COMPRESS_MAPPING(sbi), blkaddr, blkaddr);
1913 void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
1914 nid_t ino, block_t blkaddr)
1919 if (!test_opt(sbi, COMPRESS_CACHE))
1922 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE_READ))
1925 if (!f2fs_available_free_memory(sbi, COMPRESS_PAGE))
1928 cpage = find_get_page(COMPRESS_MAPPING(sbi), blkaddr);
1930 f2fs_put_page(cpage, 0);
1934 cpage = alloc_page(__GFP_NOWARN | __GFP_IO);
1938 ret = add_to_page_cache_lru(cpage, COMPRESS_MAPPING(sbi),
1941 f2fs_put_page(cpage, 0);
1945 set_page_private_data(cpage, ino);
1947 memcpy(page_address(cpage), page_address(page), PAGE_SIZE);
1948 SetPageUptodate(cpage);
1949 f2fs_put_page(cpage, 1);
1952 bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
1956 bool hitted = false;
1958 if (!test_opt(sbi, COMPRESS_CACHE))
1961 cpage = f2fs_pagecache_get_page(COMPRESS_MAPPING(sbi),
1962 blkaddr, FGP_LOCK | FGP_NOWAIT, GFP_NOFS);
1964 if (PageUptodate(cpage)) {
1965 atomic_inc(&sbi->compress_page_hit);
1966 memcpy(page_address(page),
1967 page_address(cpage), PAGE_SIZE);
1970 f2fs_put_page(cpage, 1);
1976 void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, nid_t ino)
1978 struct address_space *mapping = COMPRESS_MAPPING(sbi);
1979 struct folio_batch fbatch;
1981 pgoff_t end = MAX_BLKADDR(sbi);
1983 if (!mapping->nrpages)
1986 folio_batch_init(&fbatch);
1991 nr = filemap_get_folios(mapping, &index, end - 1, &fbatch);
1995 for (i = 0; i < nr; i++) {
1996 struct folio *folio = fbatch.folios[i];
1999 if (folio->mapping != mapping) {
2000 folio_unlock(folio);
2004 if (ino != get_page_private_data(&folio->page)) {
2005 folio_unlock(folio);
2009 generic_error_remove_folio(mapping, folio);
2010 folio_unlock(folio);
2012 folio_batch_release(&fbatch);
2014 } while (index < end);
2017 int f2fs_init_compress_inode(struct f2fs_sb_info *sbi)
2019 struct inode *inode;
2021 if (!test_opt(sbi, COMPRESS_CACHE))
2024 inode = f2fs_iget(sbi->sb, F2FS_COMPRESS_INO(sbi));
2026 return PTR_ERR(inode);
2027 sbi->compress_inode = inode;
2029 sbi->compress_percent = COMPRESS_PERCENT;
2030 sbi->compress_watermark = COMPRESS_WATERMARK;
2032 atomic_set(&sbi->compress_page_hit, 0);
2037 void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi)
2039 if (!sbi->compress_inode)
2041 iput(sbi->compress_inode);
2042 sbi->compress_inode = NULL;
2045 int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi)
2047 dev_t dev = sbi->sb->s_bdev->bd_dev;
2050 if (!f2fs_sb_has_compression(sbi))
2053 sprintf(slab_name, "f2fs_page_array_entry-%u:%u", MAJOR(dev), MINOR(dev));
2055 sbi->page_array_slab_size = sizeof(struct page *) <<
2056 F2FS_OPTION(sbi).compress_log_size;
2058 sbi->page_array_slab = f2fs_kmem_cache_create(slab_name,
2059 sbi->page_array_slab_size);
2060 return sbi->page_array_slab ? 0 : -ENOMEM;
2063 void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi)
2065 kmem_cache_destroy(sbi->page_array_slab);
2068 int __init f2fs_init_compress_cache(void)
2070 cic_entry_slab = f2fs_kmem_cache_create("f2fs_cic_entry",
2071 sizeof(struct compress_io_ctx));
2072 if (!cic_entry_slab)
2074 dic_entry_slab = f2fs_kmem_cache_create("f2fs_dic_entry",
2075 sizeof(struct decompress_io_ctx));
2076 if (!dic_entry_slab)
2080 kmem_cache_destroy(cic_entry_slab);
2084 void f2fs_destroy_compress_cache(void)
2086 kmem_cache_destroy(dic_entry_slab);
2087 kmem_cache_destroy(cic_entry_slab);