1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2019 HUAWEI, Inc.
4 * https://www.huawei.com/
5 * Copyright (C) 2024 Alibaba Cloud
10 #define LZ4_MAX_DISTANCE_PAGES (DIV_ROUND_UP(LZ4_DISTANCE_MAX, PAGE_SIZE) + 1)
12 struct z_erofs_lz4_decompress_ctx {
13 struct z_erofs_decompress_req *rq;
14 /* # of encoded, decoded pages */
15 unsigned int inpages, outpages;
16 /* decoded block total length (used for in-place decompression) */
20 static int z_erofs_load_lz4_config(struct super_block *sb,
21 struct erofs_super_block *dsb, void *data, int size)
23 struct erofs_sb_info *sbi = EROFS_SB(sb);
24 struct z_erofs_lz4_cfgs *lz4 = data;
28 if (size < sizeof(struct z_erofs_lz4_cfgs)) {
29 erofs_err(sb, "invalid lz4 cfgs, size=%u", size);
32 distance = le16_to_cpu(lz4->max_distance);
34 sbi->lz4.max_pclusterblks = le16_to_cpu(lz4->max_pclusterblks);
35 if (!sbi->lz4.max_pclusterblks) {
36 sbi->lz4.max_pclusterblks = 1; /* reserved case */
37 } else if (sbi->lz4.max_pclusterblks >
38 erofs_blknr(sb, Z_EROFS_PCLUSTER_MAX_SIZE)) {
39 erofs_err(sb, "too large lz4 pclusterblks %u",
40 sbi->lz4.max_pclusterblks);
44 distance = le16_to_cpu(dsb->u1.lz4_max_distance);
45 sbi->lz4.max_pclusterblks = 1;
48 sbi->lz4.max_distance_pages = distance ?
49 DIV_ROUND_UP(distance, PAGE_SIZE) + 1 :
50 LZ4_MAX_DISTANCE_PAGES;
51 return z_erofs_gbuf_growsize(sbi->lz4.max_pclusterblks);
55 * Fill all gaps with bounce pages if it's a sparse page list. Also check if
56 * all physical pages are consecutive, which can be seen for moderate CR.
58 static int z_erofs_lz4_prepare_dstpages(struct z_erofs_lz4_decompress_ctx *ctx,
59 struct page **pagepool)
61 struct z_erofs_decompress_req *rq = ctx->rq;
62 struct page *availables[LZ4_MAX_DISTANCE_PAGES] = { NULL };
63 unsigned long bounced[DIV_ROUND_UP(LZ4_MAX_DISTANCE_PAGES,
64 BITS_PER_LONG)] = { 0 };
65 unsigned int lz4_max_distance_pages =
66 EROFS_SB(rq->sb)->lz4.max_distance_pages;
68 unsigned int i, j, top;
71 for (i = j = 0; i < ctx->outpages; ++i, ++j) {
72 struct page *const page = rq->out[i];
75 if (j >= lz4_max_distance_pages)
78 /* 'valid' bounced can only be tested after a complete round */
79 if (!rq->fillgaps && test_bit(j, bounced)) {
80 DBG_BUGON(i < lz4_max_distance_pages);
81 DBG_BUGON(top >= lz4_max_distance_pages);
82 availables[top++] = rq->out[i - lz4_max_distance_pages];
86 __clear_bit(j, bounced);
87 if (!PageHighMem(page)) {
89 kaddr = page_address(page);
93 kaddr + PAGE_SIZE == page_address(page)) {
102 __set_bit(j, bounced);
105 victim = availables[--top];
107 victim = __erofs_allocpage(pagepool, rq->gfp, true);
110 set_page_private(victim, Z_EROFS_SHORTLIVED_PAGE);
114 return kaddr ? 1 : 0;
117 static void *z_erofs_lz4_handle_overlap(struct z_erofs_lz4_decompress_ctx *ctx,
118 void *inpage, void *out, unsigned int *inputmargin,
119 int *maptype, bool may_inplace)
121 struct z_erofs_decompress_req *rq = ctx->rq;
122 unsigned int omargin, total, i;
126 if (rq->inplace_io) {
127 omargin = PAGE_ALIGN(ctx->oend) - ctx->oend;
128 if (rq->partial_decoding || !may_inplace ||
129 omargin < LZ4_DECOMPRESS_INPLACE_MARGIN(rq->inputsize))
132 for (i = 0; i < ctx->inpages; ++i)
133 if (rq->out[ctx->outpages - ctx->inpages + i] !=
136 kunmap_local(inpage);
138 return out + ((ctx->outpages - ctx->inpages) << PAGE_SHIFT);
141 if (ctx->inpages <= 1) {
145 kunmap_local(inpage);
146 src = erofs_vm_map_ram(rq->in, ctx->inpages);
148 return ERR_PTR(-ENOMEM);
153 /* Or copy compressed data which can be overlapped to per-CPU buffer */
155 src = z_erofs_get_gbuf(ctx->inpages);
158 kunmap_local(inpage);
159 return ERR_PTR(-EFAULT);
163 total = rq->inputsize;
165 unsigned int page_copycnt =
166 min_t(unsigned int, total, PAGE_SIZE - *inputmargin);
169 inpage = kmap_local_page(*in);
170 memcpy(tmp, inpage + *inputmargin, page_copycnt);
171 kunmap_local(inpage);
174 total -= page_copycnt;
183 * Get the exact inputsize with zero_padding feature.
184 * - For LZ4, it should work if zero_padding feature is on (5.3+);
185 * - For MicroLZMA, it'd be enabled all the time.
187 int z_erofs_fixup_insize(struct z_erofs_decompress_req *rq, const char *padbuf,
188 unsigned int padbufsize)
192 padend = memchr_inv(padbuf, 0, padbufsize);
194 return -EFSCORRUPTED;
195 rq->inputsize -= padend - padbuf;
196 rq->pageofs_in += padend - padbuf;
200 static int z_erofs_lz4_decompress_mem(struct z_erofs_lz4_decompress_ctx *ctx,
203 struct z_erofs_decompress_req *rq = ctx->rq;
204 bool support_0padding = false, may_inplace = false;
205 unsigned int inputmargin;
206 u8 *out, *headpage, *src;
209 DBG_BUGON(*rq->in == NULL);
210 headpage = kmap_local_page(*rq->in);
212 /* LZ4 decompression inplace is only safe if zero_padding is enabled */
213 if (erofs_sb_has_zero_padding(EROFS_SB(rq->sb))) {
214 support_0padding = true;
215 ret = z_erofs_fixup_insize(rq, headpage + rq->pageofs_in,
216 min_t(unsigned int, rq->inputsize,
217 rq->sb->s_blocksize - rq->pageofs_in));
219 kunmap_local(headpage);
222 may_inplace = !((rq->pageofs_in + rq->inputsize) &
223 (rq->sb->s_blocksize - 1));
226 inputmargin = rq->pageofs_in;
227 src = z_erofs_lz4_handle_overlap(ctx, headpage, dst, &inputmargin,
228 &maptype, may_inplace);
232 out = dst + rq->pageofs_out;
233 /* legacy format could compress extra data in a pcluster. */
234 if (rq->partial_decoding || !support_0padding)
235 ret = LZ4_decompress_safe_partial(src + inputmargin, out,
236 rq->inputsize, rq->outputsize, rq->outputsize);
238 ret = LZ4_decompress_safe(src + inputmargin, out,
239 rq->inputsize, rq->outputsize);
241 if (ret != rq->outputsize) {
242 erofs_err(rq->sb, "failed to decompress %d in[%u, %u] out[%u]",
243 ret, rq->inputsize, inputmargin, rq->outputsize);
245 memset(out + ret, 0, rq->outputsize - ret);
252 kunmap_local(headpage);
253 } else if (maptype == 1) {
254 vm_unmap_ram(src, ctx->inpages);
255 } else if (maptype == 2) {
256 z_erofs_put_gbuf(src);
257 } else if (maptype != 3) {
264 static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq,
265 struct page **pagepool)
267 struct z_erofs_lz4_decompress_ctx ctx;
268 unsigned int dst_maptype;
273 ctx.oend = rq->pageofs_out + rq->outputsize;
274 ctx.outpages = PAGE_ALIGN(ctx.oend) >> PAGE_SHIFT;
275 ctx.inpages = PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT;
277 /* one optimized fast path only for non bigpcluster cases yet */
278 if (ctx.inpages == 1 && ctx.outpages == 1 && !rq->inplace_io) {
279 DBG_BUGON(!*rq->out);
280 dst = kmap_local_page(*rq->out);
285 /* general decoding path which can be used for all cases */
286 ret = z_erofs_lz4_prepare_dstpages(&ctx, pagepool);
289 } else if (ret > 0) {
290 dst = page_address(*rq->out);
293 dst = erofs_vm_map_ram(rq->out, ctx.outpages);
300 ret = z_erofs_lz4_decompress_mem(&ctx, dst);
303 else if (dst_maptype == 2)
304 vm_unmap_ram(dst, ctx.outpages);
308 static int z_erofs_transform_plain(struct z_erofs_decompress_req *rq,
309 struct page **pagepool)
311 const unsigned int nrpages_in =
312 PAGE_ALIGN(rq->pageofs_in + rq->inputsize) >> PAGE_SHIFT;
313 const unsigned int nrpages_out =
314 PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
315 const unsigned int bs = rq->sb->s_blocksize;
316 unsigned int cur = 0, ni = 0, no, pi, po, insz, cnt;
319 if (rq->outputsize > rq->inputsize)
321 if (rq->alg == Z_EROFS_COMPRESSION_INTERLACED) {
322 cur = bs - (rq->pageofs_out & (bs - 1));
323 pi = (rq->pageofs_in + rq->inputsize - cur) & ~PAGE_MASK;
324 cur = min(cur, rq->outputsize);
325 if (cur && rq->out[0]) {
326 kin = kmap_local_page(rq->in[nrpages_in - 1]);
327 if (rq->out[0] == rq->in[nrpages_in - 1]) {
328 memmove(kin + rq->pageofs_out, kin + pi, cur);
329 flush_dcache_page(rq->out[0]);
331 memcpy_to_page(rq->out[0], rq->pageofs_out,
336 rq->outputsize -= cur;
339 for (; rq->outputsize; rq->pageofs_in = 0, cur += PAGE_SIZE, ni++) {
340 insz = min(PAGE_SIZE - rq->pageofs_in, rq->outputsize);
341 rq->outputsize -= insz;
344 kin = kmap_local_page(rq->in[ni]);
347 no = (rq->pageofs_out + cur + pi) >> PAGE_SHIFT;
348 po = (rq->pageofs_out + cur + pi) & ~PAGE_MASK;
349 DBG_BUGON(no >= nrpages_out);
350 cnt = min(insz - pi, PAGE_SIZE - po);
351 if (rq->out[no] == rq->in[ni]) {
353 kin + rq->pageofs_in + pi, cnt);
354 flush_dcache_page(rq->out[no]);
355 } else if (rq->out[no]) {
356 memcpy_to_page(rq->out[no], po,
357 kin + rq->pageofs_in + pi, cnt);
363 DBG_BUGON(ni > nrpages_in);
367 int z_erofs_stream_switch_bufs(struct z_erofs_stream_dctx *dctx, void **dst,
368 void **src, struct page **pgpl)
370 struct z_erofs_decompress_req *rq = dctx->rq;
371 struct super_block *sb = rq->sb;
372 struct page **pgo, *tmppage;
375 if (!dctx->avail_out) {
376 if (++dctx->no >= dctx->outpages || !rq->outputsize) {
377 erofs_err(sb, "insufficient space for decompressed data");
378 return -EFSCORRUPTED;
382 kunmap_local(dctx->kout);
383 dctx->avail_out = min(rq->outputsize, PAGE_SIZE - rq->pageofs_out);
384 rq->outputsize -= dctx->avail_out;
385 pgo = &rq->out[dctx->no];
386 if (!*pgo && rq->fillgaps) { /* deduped */
387 *pgo = erofs_allocpage(pgpl, rq->gfp);
392 set_page_private(*pgo, Z_EROFS_SHORTLIVED_PAGE);
395 dctx->kout = kmap_local_page(*pgo);
396 *dst = dctx->kout + rq->pageofs_out;
398 *dst = dctx->kout = NULL;
403 if (dctx->inbuf_pos == dctx->inbuf_sz && rq->inputsize) {
404 if (++dctx->ni >= dctx->inpages) {
405 erofs_err(sb, "invalid compressed data");
406 return -EFSCORRUPTED;
408 if (dctx->kout) /* unlike kmap(), take care of the orders */
409 kunmap_local(dctx->kout);
410 kunmap_local(dctx->kin);
412 dctx->inbuf_sz = min_t(u32, rq->inputsize, PAGE_SIZE);
413 rq->inputsize -= dctx->inbuf_sz;
414 dctx->kin = kmap_local_page(rq->in[dctx->ni]);
416 dctx->bounced = false;
418 j = (u8 *)*dst - dctx->kout;
419 dctx->kout = kmap_local_page(rq->out[dctx->no]);
420 *dst = dctx->kout + j;
426 * Handle overlapping: Use the given bounce buffer if the input data is
427 * under processing; Or utilize short-lived pages from the on-stack page
428 * pool, where pages are shared among the same request. Note that only
429 * a few inplace I/O pages need to be doubled.
431 if (!dctx->bounced && rq->out[dctx->no] == rq->in[dctx->ni]) {
432 memcpy(dctx->bounce, *src, dctx->inbuf_sz);
434 dctx->bounced = true;
437 for (j = dctx->ni + 1; j < dctx->inpages; ++j) {
438 if (rq->out[dctx->no] != rq->in[j])
440 tmppage = erofs_allocpage(pgpl, rq->gfp);
443 set_page_private(tmppage, Z_EROFS_SHORTLIVED_PAGE);
444 copy_highpage(tmppage, rq->in[j]);
450 const struct z_erofs_decompressor *z_erofs_decomp[] = {
451 [Z_EROFS_COMPRESSION_SHIFTED] = &(const struct z_erofs_decompressor) {
452 .decompress = z_erofs_transform_plain,
455 [Z_EROFS_COMPRESSION_INTERLACED] = &(const struct z_erofs_decompressor) {
456 .decompress = z_erofs_transform_plain,
459 [Z_EROFS_COMPRESSION_LZ4] = &(const struct z_erofs_decompressor) {
460 .config = z_erofs_load_lz4_config,
461 .decompress = z_erofs_lz4_decompress,
462 .init = z_erofs_gbuf_init,
463 .exit = z_erofs_gbuf_exit,
466 #ifdef CONFIG_EROFS_FS_ZIP_LZMA
467 [Z_EROFS_COMPRESSION_LZMA] = &z_erofs_lzma_decomp,
469 #ifdef CONFIG_EROFS_FS_ZIP_DEFLATE
470 [Z_EROFS_COMPRESSION_DEFLATE] = &z_erofs_deflate_decomp,
472 #ifdef CONFIG_EROFS_FS_ZIP_ZSTD
473 [Z_EROFS_COMPRESSION_ZSTD] = &z_erofs_zstd_decomp,
477 int z_erofs_parse_cfgs(struct super_block *sb, struct erofs_super_block *dsb)
479 struct erofs_sb_info *sbi = EROFS_SB(sb);
480 struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
481 unsigned int algs, alg;
485 if (!erofs_sb_has_compr_cfgs(sbi)) {
486 sbi->available_compr_algs = 1 << Z_EROFS_COMPRESSION_LZ4;
487 return z_erofs_load_lz4_config(sb, dsb, NULL, 0);
490 sbi->available_compr_algs = le16_to_cpu(dsb->u1.available_compr_algs);
491 if (sbi->available_compr_algs & ~Z_EROFS_ALL_COMPR_ALGS) {
492 erofs_err(sb, "unidentified algorithms %x, please upgrade kernel",
493 sbi->available_compr_algs & ~Z_EROFS_ALL_COMPR_ALGS);
497 erofs_init_metabuf(&buf, sb);
498 offset = EROFS_SUPER_OFFSET + sbi->sb_size;
500 for (algs = sbi->available_compr_algs; algs; algs >>= 1, ++alg) {
501 const struct z_erofs_decompressor *dec = z_erofs_decomp[alg];
507 data = erofs_read_metadata(sb, &buf, &offset, &size);
513 if (alg < Z_EROFS_COMPRESSION_MAX && dec && dec->config) {
514 ret = dec->config(sb, dsb, data, size);
516 erofs_err(sb, "algorithm %d isn't enabled on this kernel",
524 erofs_put_metabuf(&buf);
528 int __init z_erofs_init_decompressor(void)
532 for (i = 0; i < Z_EROFS_COMPRESSION_MAX; ++i) {
533 err = z_erofs_decomp[i] ? z_erofs_decomp[i]->init() : 0;
536 if (z_erofs_decomp[i])
537 z_erofs_decomp[i]->exit();
544 void z_erofs_exit_decompressor(void)
548 for (i = 0; i < Z_EROFS_COMPRESSION_MAX; ++i)
549 if (z_erofs_decomp[i])
550 z_erofs_decomp[i]->exit();