]> Git Repo - linux.git/blob - fs/erofs/decompressor.c
Linux 6.14-rc3
[linux.git] / fs / erofs / decompressor.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2019 HUAWEI, Inc.
4  *             https://www.huawei.com/
5  * Copyright (C) 2024 Alibaba Cloud
6  */
7 #include "compress.h"
8 #include <linux/lz4.h>
9
10 #define LZ4_MAX_DISTANCE_PAGES  (DIV_ROUND_UP(LZ4_DISTANCE_MAX, PAGE_SIZE) + 1)
11
12 struct z_erofs_lz4_decompress_ctx {
13         struct z_erofs_decompress_req *rq;
14         /* # of encoded, decoded pages */
15         unsigned int inpages, outpages;
16         /* decoded block total length (used for in-place decompression) */
17         unsigned int oend;
18 };
19
20 static int z_erofs_load_lz4_config(struct super_block *sb,
21                             struct erofs_super_block *dsb, void *data, int size)
22 {
23         struct erofs_sb_info *sbi = EROFS_SB(sb);
24         struct z_erofs_lz4_cfgs *lz4 = data;
25         u16 distance;
26
27         if (lz4) {
28                 if (size < sizeof(struct z_erofs_lz4_cfgs)) {
29                         erofs_err(sb, "invalid lz4 cfgs, size=%u", size);
30                         return -EINVAL;
31                 }
32                 distance = le16_to_cpu(lz4->max_distance);
33
34                 sbi->lz4.max_pclusterblks = le16_to_cpu(lz4->max_pclusterblks);
35                 if (!sbi->lz4.max_pclusterblks) {
36                         sbi->lz4.max_pclusterblks = 1;  /* reserved case */
37                 } else if (sbi->lz4.max_pclusterblks >
38                            erofs_blknr(sb, Z_EROFS_PCLUSTER_MAX_SIZE)) {
39                         erofs_err(sb, "too large lz4 pclusterblks %u",
40                                   sbi->lz4.max_pclusterblks);
41                         return -EINVAL;
42                 }
43         } else {
44                 distance = le16_to_cpu(dsb->u1.lz4_max_distance);
45                 sbi->lz4.max_pclusterblks = 1;
46         }
47
48         sbi->lz4.max_distance_pages = distance ?
49                                         DIV_ROUND_UP(distance, PAGE_SIZE) + 1 :
50                                         LZ4_MAX_DISTANCE_PAGES;
51         return z_erofs_gbuf_growsize(sbi->lz4.max_pclusterblks);
52 }
53
54 /*
55  * Fill all gaps with bounce pages if it's a sparse page list. Also check if
56  * all physical pages are consecutive, which can be seen for moderate CR.
57  */
58 static int z_erofs_lz4_prepare_dstpages(struct z_erofs_lz4_decompress_ctx *ctx,
59                                         struct page **pagepool)
60 {
61         struct z_erofs_decompress_req *rq = ctx->rq;
62         struct page *availables[LZ4_MAX_DISTANCE_PAGES] = { NULL };
63         unsigned long bounced[DIV_ROUND_UP(LZ4_MAX_DISTANCE_PAGES,
64                                            BITS_PER_LONG)] = { 0 };
65         unsigned int lz4_max_distance_pages =
66                                 EROFS_SB(rq->sb)->lz4.max_distance_pages;
67         void *kaddr = NULL;
68         unsigned int i, j, top;
69
70         top = 0;
71         for (i = j = 0; i < ctx->outpages; ++i, ++j) {
72                 struct page *const page = rq->out[i];
73                 struct page *victim;
74
75                 if (j >= lz4_max_distance_pages)
76                         j = 0;
77
78                 /* 'valid' bounced can only be tested after a complete round */
79                 if (!rq->fillgaps && test_bit(j, bounced)) {
80                         DBG_BUGON(i < lz4_max_distance_pages);
81                         DBG_BUGON(top >= lz4_max_distance_pages);
82                         availables[top++] = rq->out[i - lz4_max_distance_pages];
83                 }
84
85                 if (page) {
86                         __clear_bit(j, bounced);
87                         if (!PageHighMem(page)) {
88                                 if (!i) {
89                                         kaddr = page_address(page);
90                                         continue;
91                                 }
92                                 if (kaddr &&
93                                     kaddr + PAGE_SIZE == page_address(page)) {
94                                         kaddr += PAGE_SIZE;
95                                         continue;
96                                 }
97                         }
98                         kaddr = NULL;
99                         continue;
100                 }
101                 kaddr = NULL;
102                 __set_bit(j, bounced);
103
104                 if (top) {
105                         victim = availables[--top];
106                 } else {
107                         victim = __erofs_allocpage(pagepool, rq->gfp, true);
108                         if (!victim)
109                                 return -ENOMEM;
110                         set_page_private(victim, Z_EROFS_SHORTLIVED_PAGE);
111                 }
112                 rq->out[i] = victim;
113         }
114         return kaddr ? 1 : 0;
115 }
116
117 static void *z_erofs_lz4_handle_overlap(struct z_erofs_lz4_decompress_ctx *ctx,
118                         void *inpage, void *out, unsigned int *inputmargin,
119                         int *maptype, bool may_inplace)
120 {
121         struct z_erofs_decompress_req *rq = ctx->rq;
122         unsigned int omargin, total, i;
123         struct page **in;
124         void *src, *tmp;
125
126         if (rq->inplace_io) {
127                 omargin = PAGE_ALIGN(ctx->oend) - ctx->oend;
128                 if (rq->partial_decoding || !may_inplace ||
129                     omargin < LZ4_DECOMPRESS_INPLACE_MARGIN(rq->inputsize))
130                         goto docopy;
131
132                 for (i = 0; i < ctx->inpages; ++i)
133                         if (rq->out[ctx->outpages - ctx->inpages + i] !=
134                             rq->in[i])
135                                 goto docopy;
136                 kunmap_local(inpage);
137                 *maptype = 3;
138                 return out + ((ctx->outpages - ctx->inpages) << PAGE_SHIFT);
139         }
140
141         if (ctx->inpages <= 1) {
142                 *maptype = 0;
143                 return inpage;
144         }
145         kunmap_local(inpage);
146         src = erofs_vm_map_ram(rq->in, ctx->inpages);
147         if (!src)
148                 return ERR_PTR(-ENOMEM);
149         *maptype = 1;
150         return src;
151
152 docopy:
153         /* Or copy compressed data which can be overlapped to per-CPU buffer */
154         in = rq->in;
155         src = z_erofs_get_gbuf(ctx->inpages);
156         if (!src) {
157                 DBG_BUGON(1);
158                 kunmap_local(inpage);
159                 return ERR_PTR(-EFAULT);
160         }
161
162         tmp = src;
163         total = rq->inputsize;
164         while (total) {
165                 unsigned int page_copycnt =
166                         min_t(unsigned int, total, PAGE_SIZE - *inputmargin);
167
168                 if (!inpage)
169                         inpage = kmap_local_page(*in);
170                 memcpy(tmp, inpage + *inputmargin, page_copycnt);
171                 kunmap_local(inpage);
172                 inpage = NULL;
173                 tmp += page_copycnt;
174                 total -= page_copycnt;
175                 ++in;
176                 *inputmargin = 0;
177         }
178         *maptype = 2;
179         return src;
180 }
181
182 /*
183  * Get the exact inputsize with zero_padding feature.
184  *  - For LZ4, it should work if zero_padding feature is on (5.3+);
185  *  - For MicroLZMA, it'd be enabled all the time.
186  */
187 int z_erofs_fixup_insize(struct z_erofs_decompress_req *rq, const char *padbuf,
188                          unsigned int padbufsize)
189 {
190         const char *padend;
191
192         padend = memchr_inv(padbuf, 0, padbufsize);
193         if (!padend)
194                 return -EFSCORRUPTED;
195         rq->inputsize -= padend - padbuf;
196         rq->pageofs_in += padend - padbuf;
197         return 0;
198 }
199
200 static int z_erofs_lz4_decompress_mem(struct z_erofs_lz4_decompress_ctx *ctx,
201                                       u8 *dst)
202 {
203         struct z_erofs_decompress_req *rq = ctx->rq;
204         bool support_0padding = false, may_inplace = false;
205         unsigned int inputmargin;
206         u8 *out, *headpage, *src;
207         int ret, maptype;
208
209         DBG_BUGON(*rq->in == NULL);
210         headpage = kmap_local_page(*rq->in);
211
212         /* LZ4 decompression inplace is only safe if zero_padding is enabled */
213         if (erofs_sb_has_zero_padding(EROFS_SB(rq->sb))) {
214                 support_0padding = true;
215                 ret = z_erofs_fixup_insize(rq, headpage + rq->pageofs_in,
216                                 min_t(unsigned int, rq->inputsize,
217                                       rq->sb->s_blocksize - rq->pageofs_in));
218                 if (ret) {
219                         kunmap_local(headpage);
220                         return ret;
221                 }
222                 may_inplace = !((rq->pageofs_in + rq->inputsize) &
223                                 (rq->sb->s_blocksize - 1));
224         }
225
226         inputmargin = rq->pageofs_in;
227         src = z_erofs_lz4_handle_overlap(ctx, headpage, dst, &inputmargin,
228                                          &maptype, may_inplace);
229         if (IS_ERR(src))
230                 return PTR_ERR(src);
231
232         out = dst + rq->pageofs_out;
233         /* legacy format could compress extra data in a pcluster. */
234         if (rq->partial_decoding || !support_0padding)
235                 ret = LZ4_decompress_safe_partial(src + inputmargin, out,
236                                 rq->inputsize, rq->outputsize, rq->outputsize);
237         else
238                 ret = LZ4_decompress_safe(src + inputmargin, out,
239                                           rq->inputsize, rq->outputsize);
240
241         if (ret != rq->outputsize) {
242                 erofs_err(rq->sb, "failed to decompress %d in[%u, %u] out[%u]",
243                           ret, rq->inputsize, inputmargin, rq->outputsize);
244                 if (ret >= 0)
245                         memset(out + ret, 0, rq->outputsize - ret);
246                 ret = -EFSCORRUPTED;
247         } else {
248                 ret = 0;
249         }
250
251         if (maptype == 0) {
252                 kunmap_local(headpage);
253         } else if (maptype == 1) {
254                 vm_unmap_ram(src, ctx->inpages);
255         } else if (maptype == 2) {
256                 z_erofs_put_gbuf(src);
257         } else if (maptype != 3) {
258                 DBG_BUGON(1);
259                 return -EFAULT;
260         }
261         return ret;
262 }
263
264 static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq,
265                                   struct page **pagepool)
266 {
267         struct z_erofs_lz4_decompress_ctx ctx;
268         unsigned int dst_maptype;
269         void *dst;
270         int ret;
271
272         ctx.rq = rq;
273         ctx.oend = rq->pageofs_out + rq->outputsize;
274         ctx.outpages = PAGE_ALIGN(ctx.oend) >> PAGE_SHIFT;
275         ctx.inpages = PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT;
276
277         /* one optimized fast path only for non bigpcluster cases yet */
278         if (ctx.inpages == 1 && ctx.outpages == 1 && !rq->inplace_io) {
279                 DBG_BUGON(!*rq->out);
280                 dst = kmap_local_page(*rq->out);
281                 dst_maptype = 0;
282                 goto dstmap_out;
283         }
284
285         /* general decoding path which can be used for all cases */
286         ret = z_erofs_lz4_prepare_dstpages(&ctx, pagepool);
287         if (ret < 0) {
288                 return ret;
289         } else if (ret > 0) {
290                 dst = page_address(*rq->out);
291                 dst_maptype = 1;
292         } else {
293                 dst = erofs_vm_map_ram(rq->out, ctx.outpages);
294                 if (!dst)
295                         return -ENOMEM;
296                 dst_maptype = 2;
297         }
298
299 dstmap_out:
300         ret = z_erofs_lz4_decompress_mem(&ctx, dst);
301         if (!dst_maptype)
302                 kunmap_local(dst);
303         else if (dst_maptype == 2)
304                 vm_unmap_ram(dst, ctx.outpages);
305         return ret;
306 }
307
308 static int z_erofs_transform_plain(struct z_erofs_decompress_req *rq,
309                                    struct page **pagepool)
310 {
311         const unsigned int nrpages_in =
312                 PAGE_ALIGN(rq->pageofs_in + rq->inputsize) >> PAGE_SHIFT;
313         const unsigned int nrpages_out =
314                 PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
315         const unsigned int bs = rq->sb->s_blocksize;
316         unsigned int cur = 0, ni = 0, no, pi, po, insz, cnt;
317         u8 *kin;
318
319         if (rq->outputsize > rq->inputsize)
320                 return -EOPNOTSUPP;
321         if (rq->alg == Z_EROFS_COMPRESSION_INTERLACED) {
322                 cur = bs - (rq->pageofs_out & (bs - 1));
323                 pi = (rq->pageofs_in + rq->inputsize - cur) & ~PAGE_MASK;
324                 cur = min(cur, rq->outputsize);
325                 if (cur && rq->out[0]) {
326                         kin = kmap_local_page(rq->in[nrpages_in - 1]);
327                         if (rq->out[0] == rq->in[nrpages_in - 1]) {
328                                 memmove(kin + rq->pageofs_out, kin + pi, cur);
329                                 flush_dcache_page(rq->out[0]);
330                         } else {
331                                 memcpy_to_page(rq->out[0], rq->pageofs_out,
332                                                kin + pi, cur);
333                         }
334                         kunmap_local(kin);
335                 }
336                 rq->outputsize -= cur;
337         }
338
339         for (; rq->outputsize; rq->pageofs_in = 0, cur += PAGE_SIZE, ni++) {
340                 insz = min(PAGE_SIZE - rq->pageofs_in, rq->outputsize);
341                 rq->outputsize -= insz;
342                 if (!rq->in[ni])
343                         continue;
344                 kin = kmap_local_page(rq->in[ni]);
345                 pi = 0;
346                 do {
347                         no = (rq->pageofs_out + cur + pi) >> PAGE_SHIFT;
348                         po = (rq->pageofs_out + cur + pi) & ~PAGE_MASK;
349                         DBG_BUGON(no >= nrpages_out);
350                         cnt = min(insz - pi, PAGE_SIZE - po);
351                         if (rq->out[no] == rq->in[ni]) {
352                                 memmove(kin + po,
353                                         kin + rq->pageofs_in + pi, cnt);
354                                 flush_dcache_page(rq->out[no]);
355                         } else if (rq->out[no]) {
356                                 memcpy_to_page(rq->out[no], po,
357                                                kin + rq->pageofs_in + pi, cnt);
358                         }
359                         pi += cnt;
360                 } while (pi < insz);
361                 kunmap_local(kin);
362         }
363         DBG_BUGON(ni > nrpages_in);
364         return 0;
365 }
366
367 int z_erofs_stream_switch_bufs(struct z_erofs_stream_dctx *dctx, void **dst,
368                                void **src, struct page **pgpl)
369 {
370         struct z_erofs_decompress_req *rq = dctx->rq;
371         struct super_block *sb = rq->sb;
372         struct page **pgo, *tmppage;
373         unsigned int j;
374
375         if (!dctx->avail_out) {
376                 if (++dctx->no >= dctx->outpages || !rq->outputsize) {
377                         erofs_err(sb, "insufficient space for decompressed data");
378                         return -EFSCORRUPTED;
379                 }
380
381                 if (dctx->kout)
382                         kunmap_local(dctx->kout);
383                 dctx->avail_out = min(rq->outputsize, PAGE_SIZE - rq->pageofs_out);
384                 rq->outputsize -= dctx->avail_out;
385                 pgo = &rq->out[dctx->no];
386                 if (!*pgo && rq->fillgaps) {            /* deduped */
387                         *pgo = erofs_allocpage(pgpl, rq->gfp);
388                         if (!*pgo) {
389                                 dctx->kout = NULL;
390                                 return -ENOMEM;
391                         }
392                         set_page_private(*pgo, Z_EROFS_SHORTLIVED_PAGE);
393                 }
394                 if (*pgo) {
395                         dctx->kout = kmap_local_page(*pgo);
396                         *dst = dctx->kout + rq->pageofs_out;
397                 } else {
398                         *dst = dctx->kout = NULL;
399                 }
400                 rq->pageofs_out = 0;
401         }
402
403         if (dctx->inbuf_pos == dctx->inbuf_sz && rq->inputsize) {
404                 if (++dctx->ni >= dctx->inpages) {
405                         erofs_err(sb, "invalid compressed data");
406                         return -EFSCORRUPTED;
407                 }
408                 if (dctx->kout) /* unlike kmap(), take care of the orders */
409                         kunmap_local(dctx->kout);
410                 kunmap_local(dctx->kin);
411
412                 dctx->inbuf_sz = min_t(u32, rq->inputsize, PAGE_SIZE);
413                 rq->inputsize -= dctx->inbuf_sz;
414                 dctx->kin = kmap_local_page(rq->in[dctx->ni]);
415                 *src = dctx->kin;
416                 dctx->bounced = false;
417                 if (dctx->kout) {
418                         j = (u8 *)*dst - dctx->kout;
419                         dctx->kout = kmap_local_page(rq->out[dctx->no]);
420                         *dst = dctx->kout + j;
421                 }
422                 dctx->inbuf_pos = 0;
423         }
424
425         /*
426          * Handle overlapping: Use the given bounce buffer if the input data is
427          * under processing; Or utilize short-lived pages from the on-stack page
428          * pool, where pages are shared among the same request.  Note that only
429          * a few inplace I/O pages need to be doubled.
430          */
431         if (!dctx->bounced && rq->out[dctx->no] == rq->in[dctx->ni]) {
432                 memcpy(dctx->bounce, *src, dctx->inbuf_sz);
433                 *src = dctx->bounce;
434                 dctx->bounced = true;
435         }
436
437         for (j = dctx->ni + 1; j < dctx->inpages; ++j) {
438                 if (rq->out[dctx->no] != rq->in[j])
439                         continue;
440                 tmppage = erofs_allocpage(pgpl, rq->gfp);
441                 if (!tmppage)
442                         return -ENOMEM;
443                 set_page_private(tmppage, Z_EROFS_SHORTLIVED_PAGE);
444                 copy_highpage(tmppage, rq->in[j]);
445                 rq->in[j] = tmppage;
446         }
447         return 0;
448 }
449
450 const struct z_erofs_decompressor *z_erofs_decomp[] = {
451         [Z_EROFS_COMPRESSION_SHIFTED] = &(const struct z_erofs_decompressor) {
452                 .decompress = z_erofs_transform_plain,
453                 .name = "shifted"
454         },
455         [Z_EROFS_COMPRESSION_INTERLACED] = &(const struct z_erofs_decompressor) {
456                 .decompress = z_erofs_transform_plain,
457                 .name = "interlaced"
458         },
459         [Z_EROFS_COMPRESSION_LZ4] = &(const struct z_erofs_decompressor) {
460                 .config = z_erofs_load_lz4_config,
461                 .decompress = z_erofs_lz4_decompress,
462                 .init = z_erofs_gbuf_init,
463                 .exit = z_erofs_gbuf_exit,
464                 .name = "lz4"
465         },
466 #ifdef CONFIG_EROFS_FS_ZIP_LZMA
467         [Z_EROFS_COMPRESSION_LZMA] = &z_erofs_lzma_decomp,
468 #endif
469 #ifdef CONFIG_EROFS_FS_ZIP_DEFLATE
470         [Z_EROFS_COMPRESSION_DEFLATE] = &z_erofs_deflate_decomp,
471 #endif
472 #ifdef CONFIG_EROFS_FS_ZIP_ZSTD
473         [Z_EROFS_COMPRESSION_ZSTD] = &z_erofs_zstd_decomp,
474 #endif
475 };
476
477 int z_erofs_parse_cfgs(struct super_block *sb, struct erofs_super_block *dsb)
478 {
479         struct erofs_sb_info *sbi = EROFS_SB(sb);
480         struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
481         unsigned int algs, alg;
482         erofs_off_t offset;
483         int size, ret = 0;
484
485         if (!erofs_sb_has_compr_cfgs(sbi)) {
486                 sbi->available_compr_algs = 1 << Z_EROFS_COMPRESSION_LZ4;
487                 return z_erofs_load_lz4_config(sb, dsb, NULL, 0);
488         }
489
490         sbi->available_compr_algs = le16_to_cpu(dsb->u1.available_compr_algs);
491         if (sbi->available_compr_algs & ~Z_EROFS_ALL_COMPR_ALGS) {
492                 erofs_err(sb, "unidentified algorithms %x, please upgrade kernel",
493                           sbi->available_compr_algs & ~Z_EROFS_ALL_COMPR_ALGS);
494                 return -EOPNOTSUPP;
495         }
496
497         erofs_init_metabuf(&buf, sb);
498         offset = EROFS_SUPER_OFFSET + sbi->sb_size;
499         alg = 0;
500         for (algs = sbi->available_compr_algs; algs; algs >>= 1, ++alg) {
501                 const struct z_erofs_decompressor *dec = z_erofs_decomp[alg];
502                 void *data;
503
504                 if (!(algs & 1))
505                         continue;
506
507                 data = erofs_read_metadata(sb, &buf, &offset, &size);
508                 if (IS_ERR(data)) {
509                         ret = PTR_ERR(data);
510                         break;
511                 }
512
513                 if (alg < Z_EROFS_COMPRESSION_MAX && dec && dec->config) {
514                         ret = dec->config(sb, dsb, data, size);
515                 } else {
516                         erofs_err(sb, "algorithm %d isn't enabled on this kernel",
517                                   alg);
518                         ret = -EOPNOTSUPP;
519                 }
520                 kfree(data);
521                 if (ret)
522                         break;
523         }
524         erofs_put_metabuf(&buf);
525         return ret;
526 }
527
528 int __init z_erofs_init_decompressor(void)
529 {
530         int i, err;
531
532         for (i = 0; i < Z_EROFS_COMPRESSION_MAX; ++i) {
533                 err = z_erofs_decomp[i] ? z_erofs_decomp[i]->init() : 0;
534                 if (err) {
535                         while (i--)
536                                 if (z_erofs_decomp[i])
537                                         z_erofs_decomp[i]->exit();
538                         return err;
539                 }
540         }
541         return 0;
542 }
543
544 void z_erofs_exit_decompressor(void)
545 {
546         int i;
547
548         for (i = 0; i < Z_EROFS_COMPRESSION_MAX; ++i)
549                 if (z_erofs_decomp[i])
550                         z_erofs_decomp[i]->exit();
551 }
This page took 0.064693 seconds and 4 git commands to generate.