1 // SPDX-License-Identifier: GPL-2.0
3 * f2fs compress support
9 #include <linux/f2fs_fs.h>
10 #include <linux/writeback.h>
11 #include <linux/backing-dev.h>
12 #include <linux/lzo.h>
13 #include <linux/lz4.h>
14 #include <linux/zstd.h>
18 #include <trace/events/f2fs.h>
20 static struct kmem_cache *cic_entry_slab;
21 static struct kmem_cache *dic_entry_slab;
23 static void *page_array_alloc(struct inode *inode, int nr)
25 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
26 unsigned int size = sizeof(struct page *) * nr;
28 if (likely(size <= sbi->page_array_slab_size))
29 return kmem_cache_zalloc(sbi->page_array_slab, GFP_NOFS);
30 return f2fs_kzalloc(sbi, size, GFP_NOFS);
33 static void page_array_free(struct inode *inode, void *pages, int nr)
35 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
36 unsigned int size = sizeof(struct page *) * nr;
41 if (likely(size <= sbi->page_array_slab_size))
42 kmem_cache_free(sbi->page_array_slab, pages);
47 struct f2fs_compress_ops {
48 int (*init_compress_ctx)(struct compress_ctx *cc);
49 void (*destroy_compress_ctx)(struct compress_ctx *cc);
50 int (*compress_pages)(struct compress_ctx *cc);
51 int (*init_decompress_ctx)(struct decompress_io_ctx *dic);
52 void (*destroy_decompress_ctx)(struct decompress_io_ctx *dic);
53 int (*decompress_pages)(struct decompress_io_ctx *dic);
56 static unsigned int offset_in_cluster(struct compress_ctx *cc, pgoff_t index)
58 return index & (cc->cluster_size - 1);
61 static pgoff_t cluster_idx(struct compress_ctx *cc, pgoff_t index)
63 return index >> cc->log_cluster_size;
66 static pgoff_t start_idx_of_cluster(struct compress_ctx *cc)
68 return cc->cluster_idx << cc->log_cluster_size;
71 bool f2fs_is_compressed_page(struct page *page)
73 if (!PagePrivate(page))
75 if (!page_private(page))
77 if (IS_ATOMIC_WRITTEN_PAGE(page) || IS_DUMMY_WRITTEN_PAGE(page))
80 f2fs_bug_on(F2FS_M_SB(page->mapping),
81 *((u32 *)page_private(page)) != F2FS_COMPRESSED_PAGE_MAGIC);
85 static void f2fs_set_compressed_page(struct page *page,
86 struct inode *inode, pgoff_t index, void *data)
89 set_page_private(page, (unsigned long)data);
91 /* i_crypto_info and iv index */
93 page->mapping = inode->i_mapping;
96 static void f2fs_drop_rpages(struct compress_ctx *cc, int len, bool unlock)
100 for (i = 0; i < len; i++) {
104 unlock_page(cc->rpages[i]);
106 put_page(cc->rpages[i]);
110 static void f2fs_put_rpages(struct compress_ctx *cc)
112 f2fs_drop_rpages(cc, cc->cluster_size, false);
115 static void f2fs_unlock_rpages(struct compress_ctx *cc, int len)
117 f2fs_drop_rpages(cc, len, true);
120 static void f2fs_put_rpages_mapping(struct address_space *mapping,
121 pgoff_t start, int len)
125 for (i = 0; i < len; i++) {
126 struct page *page = find_get_page(mapping, start + i);
133 static void f2fs_put_rpages_wbc(struct compress_ctx *cc,
134 struct writeback_control *wbc, bool redirty, int unlock)
138 for (i = 0; i < cc->cluster_size; i++) {
142 redirty_page_for_writepage(wbc, cc->rpages[i]);
143 f2fs_put_page(cc->rpages[i], unlock);
147 struct page *f2fs_compress_control_page(struct page *page)
149 return ((struct compress_io_ctx *)page_private(page))->rpages[0];
152 int f2fs_init_compress_ctx(struct compress_ctx *cc)
157 cc->rpages = page_array_alloc(cc->inode, cc->cluster_size);
158 return cc->rpages ? 0 : -ENOMEM;
161 void f2fs_destroy_compress_ctx(struct compress_ctx *cc)
163 page_array_free(cc->inode, cc->rpages, cc->cluster_size);
167 cc->cluster_idx = NULL_CLUSTER;
170 void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page)
172 unsigned int cluster_ofs;
174 if (!f2fs_cluster_can_merge_page(cc, page->index))
175 f2fs_bug_on(F2FS_I_SB(cc->inode), 1);
177 cluster_ofs = offset_in_cluster(cc, page->index);
178 cc->rpages[cluster_ofs] = page;
180 cc->cluster_idx = cluster_idx(cc, page->index);
183 #ifdef CONFIG_F2FS_FS_LZO
184 static int lzo_init_compress_ctx(struct compress_ctx *cc)
186 cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
187 LZO1X_MEM_COMPRESS, GFP_NOFS);
191 cc->clen = lzo1x_worst_compress(PAGE_SIZE << cc->log_cluster_size);
195 static void lzo_destroy_compress_ctx(struct compress_ctx *cc)
201 static int lzo_compress_pages(struct compress_ctx *cc)
205 ret = lzo1x_1_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata,
206 &cc->clen, cc->private);
207 if (ret != LZO_E_OK) {
208 printk_ratelimited("%sF2FS-fs (%s): lzo compress failed, ret:%d\n",
209 KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id, ret);
215 static int lzo_decompress_pages(struct decompress_io_ctx *dic)
219 ret = lzo1x_decompress_safe(dic->cbuf->cdata, dic->clen,
220 dic->rbuf, &dic->rlen);
221 if (ret != LZO_E_OK) {
222 printk_ratelimited("%sF2FS-fs (%s): lzo decompress failed, ret:%d\n",
223 KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id, ret);
227 if (dic->rlen != PAGE_SIZE << dic->log_cluster_size) {
228 printk_ratelimited("%sF2FS-fs (%s): lzo invalid rlen:%zu, "
229 "expected:%lu\n", KERN_ERR,
230 F2FS_I_SB(dic->inode)->sb->s_id,
232 PAGE_SIZE << dic->log_cluster_size);
238 static const struct f2fs_compress_ops f2fs_lzo_ops = {
239 .init_compress_ctx = lzo_init_compress_ctx,
240 .destroy_compress_ctx = lzo_destroy_compress_ctx,
241 .compress_pages = lzo_compress_pages,
242 .decompress_pages = lzo_decompress_pages,
246 #ifdef CONFIG_F2FS_FS_LZ4
247 static int lz4_init_compress_ctx(struct compress_ctx *cc)
249 unsigned int size = LZ4_MEM_COMPRESS;
251 #ifdef CONFIG_F2FS_FS_LZ4HC
252 if (F2FS_I(cc->inode)->i_compress_flag >> COMPRESS_LEVEL_OFFSET)
253 size = LZ4HC_MEM_COMPRESS;
256 cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode), size, GFP_NOFS);
261 * we do not change cc->clen to LZ4_compressBound(inputsize) to
262 * adapt worst compress case, because lz4 compressor can handle
263 * output budget properly.
265 cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE;
269 static void lz4_destroy_compress_ctx(struct compress_ctx *cc)
275 #ifdef CONFIG_F2FS_FS_LZ4HC
276 static int lz4hc_compress_pages(struct compress_ctx *cc)
278 unsigned char level = F2FS_I(cc->inode)->i_compress_flag >>
279 COMPRESS_LEVEL_OFFSET;
283 len = LZ4_compress_HC(cc->rbuf, cc->cbuf->cdata, cc->rlen,
284 cc->clen, level, cc->private);
286 len = LZ4_compress_default(cc->rbuf, cc->cbuf->cdata, cc->rlen,
287 cc->clen, cc->private);
296 static int lz4_compress_pages(struct compress_ctx *cc)
300 #ifdef CONFIG_F2FS_FS_LZ4HC
301 return lz4hc_compress_pages(cc);
303 len = LZ4_compress_default(cc->rbuf, cc->cbuf->cdata, cc->rlen,
304 cc->clen, cc->private);
312 static int lz4_decompress_pages(struct decompress_io_ctx *dic)
316 ret = LZ4_decompress_safe(dic->cbuf->cdata, dic->rbuf,
317 dic->clen, dic->rlen);
319 printk_ratelimited("%sF2FS-fs (%s): lz4 decompress failed, ret:%d\n",
320 KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id, ret);
324 if (ret != PAGE_SIZE << dic->log_cluster_size) {
325 printk_ratelimited("%sF2FS-fs (%s): lz4 invalid rlen:%zu, "
326 "expected:%lu\n", KERN_ERR,
327 F2FS_I_SB(dic->inode)->sb->s_id,
329 PAGE_SIZE << dic->log_cluster_size);
335 static const struct f2fs_compress_ops f2fs_lz4_ops = {
336 .init_compress_ctx = lz4_init_compress_ctx,
337 .destroy_compress_ctx = lz4_destroy_compress_ctx,
338 .compress_pages = lz4_compress_pages,
339 .decompress_pages = lz4_decompress_pages,
343 #ifdef CONFIG_F2FS_FS_ZSTD
344 #define F2FS_ZSTD_DEFAULT_CLEVEL 1
346 static int zstd_init_compress_ctx(struct compress_ctx *cc)
348 ZSTD_parameters params;
349 ZSTD_CStream *stream;
351 unsigned int workspace_size;
352 unsigned char level = F2FS_I(cc->inode)->i_compress_flag >>
353 COMPRESS_LEVEL_OFFSET;
356 level = F2FS_ZSTD_DEFAULT_CLEVEL;
358 params = ZSTD_getParams(level, cc->rlen, 0);
359 workspace_size = ZSTD_CStreamWorkspaceBound(params.cParams);
361 workspace = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
362 workspace_size, GFP_NOFS);
366 stream = ZSTD_initCStream(params, 0, workspace, workspace_size);
368 printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_initCStream failed\n",
369 KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
375 cc->private = workspace;
376 cc->private2 = stream;
378 cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE;
382 static void zstd_destroy_compress_ctx(struct compress_ctx *cc)
389 static int zstd_compress_pages(struct compress_ctx *cc)
391 ZSTD_CStream *stream = cc->private2;
393 ZSTD_outBuffer outbuf;
394 int src_size = cc->rlen;
395 int dst_size = src_size - PAGE_SIZE - COMPRESS_HEADER_SIZE;
399 inbuf.src = cc->rbuf;
400 inbuf.size = src_size;
403 outbuf.dst = cc->cbuf->cdata;
404 outbuf.size = dst_size;
406 ret = ZSTD_compressStream(stream, &outbuf, &inbuf);
407 if (ZSTD_isError(ret)) {
408 printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_compressStream failed, ret: %d\n",
409 KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
410 __func__, ZSTD_getErrorCode(ret));
414 ret = ZSTD_endStream(stream, &outbuf);
415 if (ZSTD_isError(ret)) {
416 printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_endStream returned %d\n",
417 KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
418 __func__, ZSTD_getErrorCode(ret));
423 * there is compressed data remained in intermediate buffer due to
424 * no more space in cbuf.cdata
429 cc->clen = outbuf.pos;
433 static int zstd_init_decompress_ctx(struct decompress_io_ctx *dic)
435 ZSTD_DStream *stream;
437 unsigned int workspace_size;
438 unsigned int max_window_size =
439 MAX_COMPRESS_WINDOW_SIZE(dic->log_cluster_size);
441 workspace_size = ZSTD_DStreamWorkspaceBound(max_window_size);
443 workspace = f2fs_kvmalloc(F2FS_I_SB(dic->inode),
444 workspace_size, GFP_NOFS);
448 stream = ZSTD_initDStream(max_window_size, workspace, workspace_size);
450 printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_initDStream failed\n",
451 KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id,
457 dic->private = workspace;
458 dic->private2 = stream;
463 static void zstd_destroy_decompress_ctx(struct decompress_io_ctx *dic)
465 kvfree(dic->private);
467 dic->private2 = NULL;
470 static int zstd_decompress_pages(struct decompress_io_ctx *dic)
472 ZSTD_DStream *stream = dic->private2;
474 ZSTD_outBuffer outbuf;
478 inbuf.src = dic->cbuf->cdata;
479 inbuf.size = dic->clen;
482 outbuf.dst = dic->rbuf;
483 outbuf.size = dic->rlen;
485 ret = ZSTD_decompressStream(stream, &outbuf, &inbuf);
486 if (ZSTD_isError(ret)) {
487 printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_compressStream failed, ret: %d\n",
488 KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id,
489 __func__, ZSTD_getErrorCode(ret));
493 if (dic->rlen != outbuf.pos) {
494 printk_ratelimited("%sF2FS-fs (%s): %s ZSTD invalid rlen:%zu, "
495 "expected:%lu\n", KERN_ERR,
496 F2FS_I_SB(dic->inode)->sb->s_id,
498 PAGE_SIZE << dic->log_cluster_size);
505 static const struct f2fs_compress_ops f2fs_zstd_ops = {
506 .init_compress_ctx = zstd_init_compress_ctx,
507 .destroy_compress_ctx = zstd_destroy_compress_ctx,
508 .compress_pages = zstd_compress_pages,
509 .init_decompress_ctx = zstd_init_decompress_ctx,
510 .destroy_decompress_ctx = zstd_destroy_decompress_ctx,
511 .decompress_pages = zstd_decompress_pages,
515 #ifdef CONFIG_F2FS_FS_LZO
516 #ifdef CONFIG_F2FS_FS_LZORLE
517 static int lzorle_compress_pages(struct compress_ctx *cc)
521 ret = lzorle1x_1_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata,
522 &cc->clen, cc->private);
523 if (ret != LZO_E_OK) {
524 printk_ratelimited("%sF2FS-fs (%s): lzo-rle compress failed, ret:%d\n",
525 KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id, ret);
531 static const struct f2fs_compress_ops f2fs_lzorle_ops = {
532 .init_compress_ctx = lzo_init_compress_ctx,
533 .destroy_compress_ctx = lzo_destroy_compress_ctx,
534 .compress_pages = lzorle_compress_pages,
535 .decompress_pages = lzo_decompress_pages,
540 static const struct f2fs_compress_ops *f2fs_cops[COMPRESS_MAX] = {
541 #ifdef CONFIG_F2FS_FS_LZO
546 #ifdef CONFIG_F2FS_FS_LZ4
551 #ifdef CONFIG_F2FS_FS_ZSTD
556 #if defined(CONFIG_F2FS_FS_LZO) && defined(CONFIG_F2FS_FS_LZORLE)
563 bool f2fs_is_compress_backend_ready(struct inode *inode)
565 if (!f2fs_compressed_file(inode))
567 return f2fs_cops[F2FS_I(inode)->i_compress_algorithm];
570 static mempool_t *compress_page_pool;
571 static int num_compress_pages = 512;
572 module_param(num_compress_pages, uint, 0444);
573 MODULE_PARM_DESC(num_compress_pages,
574 "Number of intermediate compress pages to preallocate");
576 int f2fs_init_compress_mempool(void)
578 compress_page_pool = mempool_create_page_pool(num_compress_pages, 0);
579 if (!compress_page_pool)
585 void f2fs_destroy_compress_mempool(void)
587 mempool_destroy(compress_page_pool);
590 static struct page *f2fs_compress_alloc_page(void)
594 page = mempool_alloc(compress_page_pool, GFP_NOFS);
600 static void f2fs_compress_free_page(struct page *page)
604 set_page_private(page, (unsigned long)NULL);
605 ClearPagePrivate(page);
606 page->mapping = NULL;
608 mempool_free(page, compress_page_pool);
611 #define MAX_VMAP_RETRIES 3
613 static void *f2fs_vmap(struct page **pages, unsigned int count)
618 for (i = 0; i < MAX_VMAP_RETRIES; i++) {
619 buf = vm_map_ram(pages, count, -1);
627 static int f2fs_compress_pages(struct compress_ctx *cc)
629 struct f2fs_inode_info *fi = F2FS_I(cc->inode);
630 const struct f2fs_compress_ops *cops =
631 f2fs_cops[fi->i_compress_algorithm];
632 unsigned int max_len, new_nr_cpages;
633 struct page **new_cpages;
637 trace_f2fs_compress_pages_start(cc->inode, cc->cluster_idx,
638 cc->cluster_size, fi->i_compress_algorithm);
640 if (cops->init_compress_ctx) {
641 ret = cops->init_compress_ctx(cc);
646 max_len = COMPRESS_HEADER_SIZE + cc->clen;
647 cc->nr_cpages = DIV_ROUND_UP(max_len, PAGE_SIZE);
649 cc->cpages = page_array_alloc(cc->inode, cc->nr_cpages);
652 goto destroy_compress_ctx;
655 for (i = 0; i < cc->nr_cpages; i++) {
656 cc->cpages[i] = f2fs_compress_alloc_page();
657 if (!cc->cpages[i]) {
659 goto out_free_cpages;
663 cc->rbuf = f2fs_vmap(cc->rpages, cc->cluster_size);
666 goto out_free_cpages;
669 cc->cbuf = f2fs_vmap(cc->cpages, cc->nr_cpages);
672 goto out_vunmap_rbuf;
675 ret = cops->compress_pages(cc);
677 goto out_vunmap_cbuf;
679 max_len = PAGE_SIZE * (cc->cluster_size - 1) - COMPRESS_HEADER_SIZE;
681 if (cc->clen > max_len) {
683 goto out_vunmap_cbuf;
686 cc->cbuf->clen = cpu_to_le32(cc->clen);
688 if (fi->i_compress_flag & 1 << COMPRESS_CHKSUM)
689 chksum = f2fs_crc32(F2FS_I_SB(cc->inode),
690 cc->cbuf->cdata, cc->clen);
691 cc->cbuf->chksum = cpu_to_le32(chksum);
693 for (i = 0; i < COMPRESS_DATA_RESERVED_SIZE; i++)
694 cc->cbuf->reserved[i] = cpu_to_le32(0);
696 new_nr_cpages = DIV_ROUND_UP(cc->clen + COMPRESS_HEADER_SIZE, PAGE_SIZE);
698 /* Now we're going to cut unnecessary tail pages */
699 new_cpages = page_array_alloc(cc->inode, new_nr_cpages);
702 goto out_vunmap_cbuf;
705 /* zero out any unused part of the last page */
706 memset(&cc->cbuf->cdata[cc->clen], 0,
707 (new_nr_cpages * PAGE_SIZE) -
708 (cc->clen + COMPRESS_HEADER_SIZE));
710 vm_unmap_ram(cc->cbuf, cc->nr_cpages);
711 vm_unmap_ram(cc->rbuf, cc->cluster_size);
713 for (i = 0; i < cc->nr_cpages; i++) {
714 if (i < new_nr_cpages) {
715 new_cpages[i] = cc->cpages[i];
718 f2fs_compress_free_page(cc->cpages[i]);
719 cc->cpages[i] = NULL;
722 if (cops->destroy_compress_ctx)
723 cops->destroy_compress_ctx(cc);
725 page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
726 cc->cpages = new_cpages;
727 cc->nr_cpages = new_nr_cpages;
729 trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
734 vm_unmap_ram(cc->cbuf, cc->nr_cpages);
736 vm_unmap_ram(cc->rbuf, cc->cluster_size);
738 for (i = 0; i < cc->nr_cpages; i++) {
740 f2fs_compress_free_page(cc->cpages[i]);
742 page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
744 destroy_compress_ctx:
745 if (cops->destroy_compress_ctx)
746 cops->destroy_compress_ctx(cc);
748 trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
753 static void f2fs_decompress_cluster(struct decompress_io_ctx *dic)
755 struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
756 struct f2fs_inode_info *fi = F2FS_I(dic->inode);
757 const struct f2fs_compress_ops *cops =
758 f2fs_cops[fi->i_compress_algorithm];
762 trace_f2fs_decompress_pages_start(dic->inode, dic->cluster_idx,
763 dic->cluster_size, fi->i_compress_algorithm);
770 dic->tpages = page_array_alloc(dic->inode, dic->cluster_size);
776 for (i = 0; i < dic->cluster_size; i++) {
777 if (dic->rpages[i]) {
778 dic->tpages[i] = dic->rpages[i];
782 dic->tpages[i] = f2fs_compress_alloc_page();
783 if (!dic->tpages[i]) {
789 if (cops->init_decompress_ctx) {
790 ret = cops->init_decompress_ctx(dic);
795 dic->rbuf = f2fs_vmap(dic->tpages, dic->cluster_size);
798 goto out_destroy_decompress_ctx;
801 dic->cbuf = f2fs_vmap(dic->cpages, dic->nr_cpages);
804 goto out_vunmap_rbuf;
807 dic->clen = le32_to_cpu(dic->cbuf->clen);
808 dic->rlen = PAGE_SIZE << dic->log_cluster_size;
810 if (dic->clen > PAGE_SIZE * dic->nr_cpages - COMPRESS_HEADER_SIZE) {
812 goto out_vunmap_cbuf;
815 ret = cops->decompress_pages(dic);
817 if (!ret && (fi->i_compress_flag & 1 << COMPRESS_CHKSUM)) {
818 u32 provided = le32_to_cpu(dic->cbuf->chksum);
819 u32 calculated = f2fs_crc32(sbi, dic->cbuf->cdata, dic->clen);
821 if (provided != calculated) {
822 if (!is_inode_flag_set(dic->inode, FI_COMPRESS_CORRUPT)) {
823 set_inode_flag(dic->inode, FI_COMPRESS_CORRUPT);
825 "%sF2FS-fs (%s): checksum invalid, nid = %lu, %x vs %x",
826 KERN_INFO, sbi->sb->s_id, dic->inode->i_ino,
827 provided, calculated);
829 set_sbi_flag(sbi, SBI_NEED_FSCK);
834 vm_unmap_ram(dic->cbuf, dic->nr_cpages);
836 vm_unmap_ram(dic->rbuf, dic->cluster_size);
837 out_destroy_decompress_ctx:
838 if (cops->destroy_decompress_ctx)
839 cops->destroy_decompress_ctx(dic);
841 trace_f2fs_decompress_pages_end(dic->inode, dic->cluster_idx,
843 f2fs_decompress_end_io(dic, ret);
847 * This is called when a page of a compressed cluster has been read from disk
848 * (or failed to be read from disk). It checks whether this page was the last
849 * page being waited on in the cluster, and if so, it decompresses the cluster
850 * (or in the case of a failure, cleans up without actually decompressing).
852 void f2fs_end_read_compressed_page(struct page *page, bool failed)
854 struct decompress_io_ctx *dic =
855 (struct decompress_io_ctx *)page_private(page);
856 struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
858 dec_page_count(sbi, F2FS_RD_DATA);
861 WRITE_ONCE(dic->failed, true);
863 if (atomic_dec_and_test(&dic->remaining_pages))
864 f2fs_decompress_cluster(dic);
867 static bool is_page_in_cluster(struct compress_ctx *cc, pgoff_t index)
869 if (cc->cluster_idx == NULL_CLUSTER)
871 return cc->cluster_idx == cluster_idx(cc, index);
874 bool f2fs_cluster_is_empty(struct compress_ctx *cc)
876 return cc->nr_rpages == 0;
879 static bool f2fs_cluster_is_full(struct compress_ctx *cc)
881 return cc->cluster_size == cc->nr_rpages;
884 bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index)
886 if (f2fs_cluster_is_empty(cc))
888 return is_page_in_cluster(cc, index);
891 static bool __cluster_may_compress(struct compress_ctx *cc)
893 loff_t i_size = i_size_read(cc->inode);
894 unsigned nr_pages = DIV_ROUND_UP(i_size, PAGE_SIZE);
897 for (i = 0; i < cc->cluster_size; i++) {
898 struct page *page = cc->rpages[i];
900 f2fs_bug_on(F2FS_I_SB(cc->inode), !page);
903 if (page->index >= nr_pages)
909 static int __f2fs_cluster_blocks(struct compress_ctx *cc, bool compr)
911 struct dnode_of_data dn;
914 set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
915 ret = f2fs_get_dnode_of_data(&dn, start_idx_of_cluster(cc),
923 if (dn.data_blkaddr == COMPRESS_ADDR) {
927 for (i = 1; i < cc->cluster_size; i++) {
930 blkaddr = data_blkaddr(dn.inode,
931 dn.node_page, dn.ofs_in_node + i);
933 if (__is_valid_data_blkaddr(blkaddr))
936 if (blkaddr != NULL_ADDR)
946 /* return # of compressed blocks in compressed cluster */
947 static int f2fs_compressed_blocks(struct compress_ctx *cc)
949 return __f2fs_cluster_blocks(cc, true);
952 /* return # of valid blocks in compressed cluster */
953 static int f2fs_cluster_blocks(struct compress_ctx *cc)
955 return __f2fs_cluster_blocks(cc, false);
958 int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index)
960 struct compress_ctx cc = {
962 .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
963 .cluster_size = F2FS_I(inode)->i_cluster_size,
964 .cluster_idx = index >> F2FS_I(inode)->i_log_cluster_size,
967 return f2fs_cluster_blocks(&cc);
970 static bool cluster_may_compress(struct compress_ctx *cc)
972 if (!f2fs_need_compress_data(cc->inode))
974 if (f2fs_is_atomic_file(cc->inode))
976 if (f2fs_is_mmap_file(cc->inode))
978 if (!f2fs_cluster_is_full(cc))
980 if (unlikely(f2fs_cp_error(F2FS_I_SB(cc->inode))))
982 return __cluster_may_compress(cc);
985 static void set_cluster_writeback(struct compress_ctx *cc)
989 for (i = 0; i < cc->cluster_size; i++) {
991 set_page_writeback(cc->rpages[i]);
995 static void set_cluster_dirty(struct compress_ctx *cc)
999 for (i = 0; i < cc->cluster_size; i++)
1001 set_page_dirty(cc->rpages[i]);
1004 static int prepare_compress_overwrite(struct compress_ctx *cc,
1005 struct page **pagep, pgoff_t index, void **fsdata)
1007 struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
1008 struct address_space *mapping = cc->inode->i_mapping;
1010 struct dnode_of_data dn;
1011 sector_t last_block_in_bio;
1012 unsigned fgp_flag = FGP_LOCK | FGP_WRITE | FGP_CREAT;
1013 pgoff_t start_idx = start_idx_of_cluster(cc);
1018 ret = f2fs_cluster_blocks(cc);
1022 /* compressed case */
1023 prealloc = (ret < cc->cluster_size);
1025 ret = f2fs_init_compress_ctx(cc);
1029 /* keep page reference to avoid page reclaim */
1030 for (i = 0; i < cc->cluster_size; i++) {
1031 page = f2fs_pagecache_get_page(mapping, start_idx + i,
1032 fgp_flag, GFP_NOFS);
1038 if (PageUptodate(page))
1041 f2fs_compress_ctx_add_page(cc, page);
1044 if (!f2fs_cluster_is_empty(cc)) {
1045 struct bio *bio = NULL;
1047 ret = f2fs_read_multi_pages(cc, &bio, cc->cluster_size,
1048 &last_block_in_bio, false, true);
1049 f2fs_destroy_compress_ctx(cc);
1053 f2fs_submit_bio(sbi, bio, DATA);
1055 ret = f2fs_init_compress_ctx(cc);
1060 for (i = 0; i < cc->cluster_size; i++) {
1061 f2fs_bug_on(sbi, cc->rpages[i]);
1063 page = find_lock_page(mapping, start_idx + i);
1064 f2fs_bug_on(sbi, !page);
1066 f2fs_wait_on_page_writeback(page, DATA, true, true);
1068 f2fs_compress_ctx_add_page(cc, page);
1069 f2fs_put_page(page, 0);
1071 if (!PageUptodate(page)) {
1072 f2fs_unlock_rpages(cc, i + 1);
1073 f2fs_put_rpages_mapping(mapping, start_idx,
1075 f2fs_destroy_compress_ctx(cc);
1081 f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
1083 set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
1085 for (i = cc->cluster_size - 1; i > 0; i--) {
1086 ret = f2fs_get_block(&dn, start_idx + i);
1088 i = cc->cluster_size;
1092 if (dn.data_blkaddr != NEW_ADDR)
1096 f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
1100 *fsdata = cc->rpages;
1101 *pagep = cc->rpages[offset_in_cluster(cc, index)];
1102 return cc->cluster_size;
1106 f2fs_unlock_rpages(cc, i);
1108 f2fs_put_rpages_mapping(mapping, start_idx, i);
1109 f2fs_destroy_compress_ctx(cc);
1113 int f2fs_prepare_compress_overwrite(struct inode *inode,
1114 struct page **pagep, pgoff_t index, void **fsdata)
1116 struct compress_ctx cc = {
1118 .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
1119 .cluster_size = F2FS_I(inode)->i_cluster_size,
1120 .cluster_idx = index >> F2FS_I(inode)->i_log_cluster_size,
1125 return prepare_compress_overwrite(&cc, pagep, index, fsdata);
1128 bool f2fs_compress_write_end(struct inode *inode, void *fsdata,
1129 pgoff_t index, unsigned copied)
1132 struct compress_ctx cc = {
1134 .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
1135 .cluster_size = F2FS_I(inode)->i_cluster_size,
1138 bool first_index = (index == cc.rpages[0]->index);
1141 set_cluster_dirty(&cc);
1143 f2fs_put_rpages_wbc(&cc, NULL, false, 1);
1144 f2fs_destroy_compress_ctx(&cc);
1149 int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock)
1151 void *fsdata = NULL;
1153 int log_cluster_size = F2FS_I(inode)->i_log_cluster_size;
1154 pgoff_t start_idx = from >> (PAGE_SHIFT + log_cluster_size) <<
1158 err = f2fs_is_compressed_cluster(inode, start_idx);
1162 /* truncate normal cluster */
1164 return f2fs_do_truncate_blocks(inode, from, lock);
1166 /* truncate compressed cluster */
1167 err = f2fs_prepare_compress_overwrite(inode, &pagep,
1168 start_idx, &fsdata);
1170 /* should not be a normal cluster */
1171 f2fs_bug_on(F2FS_I_SB(inode), err == 0);
1177 struct page **rpages = fsdata;
1178 int cluster_size = F2FS_I(inode)->i_cluster_size;
1181 for (i = cluster_size - 1; i >= 0; i--) {
1182 loff_t start = rpages[i]->index << PAGE_SHIFT;
1184 if (from <= start) {
1185 zero_user_segment(rpages[i], 0, PAGE_SIZE);
1187 zero_user_segment(rpages[i], from - start,
1193 f2fs_compress_write_end(inode, fsdata, start_idx, true);
1198 static int f2fs_write_compressed_pages(struct compress_ctx *cc,
1200 struct writeback_control *wbc,
1201 enum iostat_type io_type)
1203 struct inode *inode = cc->inode;
1204 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1205 struct f2fs_inode_info *fi = F2FS_I(inode);
1206 struct f2fs_io_info fio = {
1208 .ino = cc->inode->i_ino,
1211 .op_flags = wbc_to_write_flags(wbc),
1212 .old_blkaddr = NEW_ADDR,
1214 .encrypted_page = NULL,
1215 .compressed_page = NULL,
1219 .encrypted = fscrypt_inode_uses_fs_layer_crypto(cc->inode),
1221 struct dnode_of_data dn;
1222 struct node_info ni;
1223 struct compress_io_ctx *cic;
1224 pgoff_t start_idx = start_idx_of_cluster(cc);
1225 unsigned int last_index = cc->cluster_size - 1;
1229 if (IS_NOQUOTA(inode)) {
1231 * We need to wait for node_write to avoid block allocation during
1232 * checkpoint. This can only happen to quota writes which can cause
1233 * the below discard race condition.
1235 down_read(&sbi->node_write);
1236 } else if (!f2fs_trylock_op(sbi)) {
1240 set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
1242 err = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
1246 for (i = 0; i < cc->cluster_size; i++) {
1247 if (data_blkaddr(dn.inode, dn.node_page,
1248 dn.ofs_in_node + i) == NULL_ADDR)
1252 psize = (loff_t)(cc->rpages[last_index]->index + 1) << PAGE_SHIFT;
1254 err = f2fs_get_node_info(fio.sbi, dn.nid, &ni);
1258 fio.version = ni.version;
1260 cic = kmem_cache_zalloc(cic_entry_slab, GFP_NOFS);
1264 cic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
1266 atomic_set(&cic->pending_pages, cc->nr_cpages);
1267 cic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
1271 cic->nr_rpages = cc->cluster_size;
1273 for (i = 0; i < cc->nr_cpages; i++) {
1274 f2fs_set_compressed_page(cc->cpages[i], inode,
1275 cc->rpages[i + 1]->index, cic);
1276 fio.compressed_page = cc->cpages[i];
1278 fio.old_blkaddr = data_blkaddr(dn.inode, dn.node_page,
1279 dn.ofs_in_node + i + 1);
1281 /* wait for GCed page writeback via META_MAPPING */
1282 f2fs_wait_on_block_writeback(inode, fio.old_blkaddr);
1284 if (fio.encrypted) {
1285 fio.page = cc->rpages[i + 1];
1286 err = f2fs_encrypt_one_page(&fio);
1288 goto out_destroy_crypt;
1289 cc->cpages[i] = fio.encrypted_page;
1293 set_cluster_writeback(cc);
1295 for (i = 0; i < cc->cluster_size; i++)
1296 cic->rpages[i] = cc->rpages[i];
1298 for (i = 0; i < cc->cluster_size; i++, dn.ofs_in_node++) {
1301 blkaddr = f2fs_data_blkaddr(&dn);
1302 fio.page = cc->rpages[i];
1303 fio.old_blkaddr = blkaddr;
1305 /* cluster header */
1307 if (blkaddr == COMPRESS_ADDR)
1309 if (__is_valid_data_blkaddr(blkaddr))
1310 f2fs_invalidate_blocks(sbi, blkaddr);
1311 f2fs_update_data_blkaddr(&dn, COMPRESS_ADDR);
1312 goto unlock_continue;
1315 if (fio.compr_blocks && __is_valid_data_blkaddr(blkaddr))
1318 if (i > cc->nr_cpages) {
1319 if (__is_valid_data_blkaddr(blkaddr)) {
1320 f2fs_invalidate_blocks(sbi, blkaddr);
1321 f2fs_update_data_blkaddr(&dn, NEW_ADDR);
1323 goto unlock_continue;
1326 f2fs_bug_on(fio.sbi, blkaddr == NULL_ADDR);
1329 fio.encrypted_page = cc->cpages[i - 1];
1331 fio.compressed_page = cc->cpages[i - 1];
1333 cc->cpages[i - 1] = NULL;
1334 f2fs_outplace_write_data(&dn, &fio);
1337 inode_dec_dirty_pages(cc->inode);
1338 unlock_page(fio.page);
1341 if (fio.compr_blocks)
1342 f2fs_i_compr_blocks_update(inode, fio.compr_blocks - 1, false);
1343 f2fs_i_compr_blocks_update(inode, cc->nr_cpages, true);
1344 add_compr_block_stat(inode, cc->nr_cpages);
1346 set_inode_flag(cc->inode, FI_APPEND_WRITE);
1347 if (cc->cluster_idx == 0)
1348 set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
1350 f2fs_put_dnode(&dn);
1351 if (IS_NOQUOTA(inode))
1352 up_read(&sbi->node_write);
1354 f2fs_unlock_op(sbi);
1356 spin_lock(&fi->i_size_lock);
1357 if (fi->last_disk_size < psize)
1358 fi->last_disk_size = psize;
1359 spin_unlock(&fi->i_size_lock);
1361 f2fs_put_rpages(cc);
1362 page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
1364 f2fs_destroy_compress_ctx(cc);
1368 page_array_free(cc->inode, cic->rpages, cc->cluster_size);
1370 for (--i; i >= 0; i--)
1371 fscrypt_finalize_bounce_page(&cc->cpages[i]);
1372 for (i = 0; i < cc->nr_cpages; i++) {
1375 f2fs_put_page(cc->cpages[i], 1);
1378 kmem_cache_free(cic_entry_slab, cic);
1380 f2fs_put_dnode(&dn);
1382 if (IS_NOQUOTA(inode))
1383 up_read(&sbi->node_write);
1385 f2fs_unlock_op(sbi);
1387 page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
1392 void f2fs_compress_write_end_io(struct bio *bio, struct page *page)
1394 struct f2fs_sb_info *sbi = bio->bi_private;
1395 struct compress_io_ctx *cic =
1396 (struct compress_io_ctx *)page_private(page);
1399 if (unlikely(bio->bi_status))
1400 mapping_set_error(cic->inode->i_mapping, -EIO);
1402 f2fs_compress_free_page(page);
1404 dec_page_count(sbi, F2FS_WB_DATA);
1406 if (atomic_dec_return(&cic->pending_pages))
1409 for (i = 0; i < cic->nr_rpages; i++) {
1410 WARN_ON(!cic->rpages[i]);
1411 clear_cold_data(cic->rpages[i]);
1412 end_page_writeback(cic->rpages[i]);
1415 page_array_free(cic->inode, cic->rpages, cic->nr_rpages);
1416 kmem_cache_free(cic_entry_slab, cic);
1419 static int f2fs_write_raw_pages(struct compress_ctx *cc,
1421 struct writeback_control *wbc,
1422 enum iostat_type io_type)
1424 struct address_space *mapping = cc->inode->i_mapping;
1425 int _submitted, compr_blocks, ret;
1426 int i = -1, err = 0;
1428 compr_blocks = f2fs_compressed_blocks(cc);
1429 if (compr_blocks < 0) {
1434 for (i = 0; i < cc->cluster_size; i++) {
1438 if (cc->rpages[i]->mapping != mapping) {
1439 unlock_page(cc->rpages[i]);
1443 BUG_ON(!PageLocked(cc->rpages[i]));
1445 ret = f2fs_write_single_data_page(cc->rpages[i], &_submitted,
1446 NULL, NULL, wbc, io_type,
1447 compr_blocks, false);
1449 if (ret == AOP_WRITEPAGE_ACTIVATE) {
1450 unlock_page(cc->rpages[i]);
1452 } else if (ret == -EAGAIN) {
1454 * for quota file, just redirty left pages to
1455 * avoid deadlock caused by cluster update race
1456 * from foreground operation.
1458 if (IS_NOQUOTA(cc->inode)) {
1464 congestion_wait(BLK_RW_ASYNC,
1465 DEFAULT_IO_TIMEOUT);
1466 lock_page(cc->rpages[i]);
1468 if (!PageDirty(cc->rpages[i])) {
1469 unlock_page(cc->rpages[i]);
1473 clear_page_dirty_for_io(cc->rpages[i]);
1480 *submitted += _submitted;
1483 f2fs_balance_fs(F2FS_M_SB(mapping), true);
1487 for (++i; i < cc->cluster_size; i++) {
1490 redirty_page_for_writepage(wbc, cc->rpages[i]);
1491 unlock_page(cc->rpages[i]);
1496 int f2fs_write_multi_pages(struct compress_ctx *cc,
1498 struct writeback_control *wbc,
1499 enum iostat_type io_type)
1504 if (cluster_may_compress(cc)) {
1505 err = f2fs_compress_pages(cc);
1506 if (err == -EAGAIN) {
1509 f2fs_put_rpages_wbc(cc, wbc, true, 1);
1513 err = f2fs_write_compressed_pages(cc, submitted,
1517 f2fs_bug_on(F2FS_I_SB(cc->inode), err != -EAGAIN);
1520 f2fs_bug_on(F2FS_I_SB(cc->inode), *submitted);
1522 err = f2fs_write_raw_pages(cc, submitted, wbc, io_type);
1523 f2fs_put_rpages_wbc(cc, wbc, false, 0);
1525 f2fs_destroy_compress_ctx(cc);
1529 static void f2fs_free_dic(struct decompress_io_ctx *dic);
1531 struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
1533 struct decompress_io_ctx *dic;
1534 pgoff_t start_idx = start_idx_of_cluster(cc);
1537 dic = kmem_cache_zalloc(dic_entry_slab, GFP_NOFS);
1539 return ERR_PTR(-ENOMEM);
1541 dic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
1543 kmem_cache_free(dic_entry_slab, dic);
1544 return ERR_PTR(-ENOMEM);
1547 dic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
1548 dic->inode = cc->inode;
1549 atomic_set(&dic->remaining_pages, cc->nr_cpages);
1550 dic->cluster_idx = cc->cluster_idx;
1551 dic->cluster_size = cc->cluster_size;
1552 dic->log_cluster_size = cc->log_cluster_size;
1553 dic->nr_cpages = cc->nr_cpages;
1554 refcount_set(&dic->refcnt, 1);
1555 dic->failed = false;
1556 dic->need_verity = f2fs_need_verity(cc->inode, start_idx);
1558 for (i = 0; i < dic->cluster_size; i++)
1559 dic->rpages[i] = cc->rpages[i];
1560 dic->nr_rpages = cc->cluster_size;
1562 dic->cpages = page_array_alloc(dic->inode, dic->nr_cpages);
1566 for (i = 0; i < dic->nr_cpages; i++) {
1569 page = f2fs_compress_alloc_page();
1573 f2fs_set_compressed_page(page, cc->inode,
1574 start_idx + i + 1, dic);
1575 dic->cpages[i] = page;
1582 return ERR_PTR(-ENOMEM);
1585 static void f2fs_free_dic(struct decompress_io_ctx *dic)
1590 for (i = 0; i < dic->cluster_size; i++) {
1593 if (!dic->tpages[i])
1595 f2fs_compress_free_page(dic->tpages[i]);
1597 page_array_free(dic->inode, dic->tpages, dic->cluster_size);
1601 for (i = 0; i < dic->nr_cpages; i++) {
1602 if (!dic->cpages[i])
1604 f2fs_compress_free_page(dic->cpages[i]);
1606 page_array_free(dic->inode, dic->cpages, dic->nr_cpages);
1609 page_array_free(dic->inode, dic->rpages, dic->nr_rpages);
1610 kmem_cache_free(dic_entry_slab, dic);
1613 static void f2fs_put_dic(struct decompress_io_ctx *dic)
1615 if (refcount_dec_and_test(&dic->refcnt))
1620 * Update and unlock the cluster's pagecache pages, and release the reference to
1621 * the decompress_io_ctx that was being held for I/O completion.
1623 static void __f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed)
1627 for (i = 0; i < dic->cluster_size; i++) {
1628 struct page *rpage = dic->rpages[i];
1633 /* PG_error was set if verity failed. */
1634 if (failed || PageError(rpage)) {
1635 ClearPageUptodate(rpage);
1636 /* will re-read again later */
1637 ClearPageError(rpage);
1639 SetPageUptodate(rpage);
1647 static void f2fs_verify_cluster(struct work_struct *work)
1649 struct decompress_io_ctx *dic =
1650 container_of(work, struct decompress_io_ctx, verity_work);
1653 /* Verify the cluster's decompressed pages with fs-verity. */
1654 for (i = 0; i < dic->cluster_size; i++) {
1655 struct page *rpage = dic->rpages[i];
1657 if (rpage && !fsverity_verify_page(rpage))
1658 SetPageError(rpage);
1661 __f2fs_decompress_end_io(dic, false);
1665 * This is called when a compressed cluster has been decompressed
1666 * (or failed to be read and/or decompressed).
1668 void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed)
1670 if (!failed && dic->need_verity) {
1672 * Note that to avoid deadlocks, the verity work can't be done
1673 * on the decompression workqueue. This is because verifying
1674 * the data pages can involve reading metadata pages from the
1675 * file, and these metadata pages may be compressed.
1677 INIT_WORK(&dic->verity_work, f2fs_verify_cluster);
1678 fsverity_enqueue_verify_work(&dic->verity_work);
1680 __f2fs_decompress_end_io(dic, failed);
1685 * Put a reference to a compressed page's decompress_io_ctx.
1687 * This is called when the page is no longer needed and can be freed.
1689 void f2fs_put_page_dic(struct page *page)
1691 struct decompress_io_ctx *dic =
1692 (struct decompress_io_ctx *)page_private(page);
1697 int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi)
1699 dev_t dev = sbi->sb->s_bdev->bd_dev;
1702 sprintf(slab_name, "f2fs_page_array_entry-%u:%u", MAJOR(dev), MINOR(dev));
1704 sbi->page_array_slab_size = sizeof(struct page *) <<
1705 F2FS_OPTION(sbi).compress_log_size;
1707 sbi->page_array_slab = f2fs_kmem_cache_create(slab_name,
1708 sbi->page_array_slab_size);
1709 if (!sbi->page_array_slab)
1714 void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi)
1716 kmem_cache_destroy(sbi->page_array_slab);
1719 static int __init f2fs_init_cic_cache(void)
1721 cic_entry_slab = f2fs_kmem_cache_create("f2fs_cic_entry",
1722 sizeof(struct compress_io_ctx));
1723 if (!cic_entry_slab)
1728 static void f2fs_destroy_cic_cache(void)
1730 kmem_cache_destroy(cic_entry_slab);
1733 static int __init f2fs_init_dic_cache(void)
1735 dic_entry_slab = f2fs_kmem_cache_create("f2fs_dic_entry",
1736 sizeof(struct decompress_io_ctx));
1737 if (!dic_entry_slab)
1742 static void f2fs_destroy_dic_cache(void)
1744 kmem_cache_destroy(dic_entry_slab);
1747 int __init f2fs_init_compress_cache(void)
1751 err = f2fs_init_cic_cache();
1754 err = f2fs_init_dic_cache();
1759 f2fs_destroy_cic_cache();
1764 void f2fs_destroy_compress_cache(void)
1766 f2fs_destroy_dic_cache();
1767 f2fs_destroy_cic_cache();