1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2008 Oracle. All rights reserved.
6 #include <linux/kernel.h>
7 #include <linux/slab.h>
9 #include <linux/init.h>
10 #include <linux/err.h>
11 #include <linux/sched.h>
12 #include <linux/pagemap.h>
13 #include <linux/bio.h>
14 #include <linux/lzo.h>
15 #include <linux/refcount.h>
17 #include "compression.h"
24 * Btrfs LZO compression format
26 * Regular and inlined LZO compressed data extents consist of:
29 * Fixed size. LZO_LEN (4) bytes long, LE32.
30 * Records the total size (including the header) of compressed data.
33 * Variable size. Each segment includes one segment header, followed by data
35 * One regular LZO compressed extent can have one or more segments.
36 * For inlined LZO compressed extent, only one segment is allowed.
37 * One segment represents at most one sector of uncompressed data.
40 * Fixed size. LZO_LEN (4) bytes long, LE32.
41 * Records the total size of the segment (not including the header).
42 * Segment header never crosses sector boundary, thus it's possible to
43 * have at most 3 padding zeros at the end of the sector.
46 * Variable size. Size up limit should be lzo1x_worst_compress(sectorsize)
47 * which is 4419 for a 4KiB sectorsize.
49 * Example with 4K sectorsize:
51 * 0 0x2 0x4 0x6 0x8 0xa 0xc 0xe 0x10
52 * 0x0000 | Header | SegHdr 01 | Data payload 01 ... |
54 * 0x0ff0 | SegHdr N | Data payload N ... |00|
57 * 0x1000 | SegHdr N+1| Data payload N+1 ... |
60 #define WORKSPACE_BUF_LENGTH (lzo1x_worst_compress(PAGE_SIZE))
61 #define WORKSPACE_CBUF_LENGTH (lzo1x_worst_compress(PAGE_SIZE))
65 void *buf; /* where decompressed data goes */
66 void *cbuf; /* where compressed data goes */
67 struct list_head list;
70 static struct workspace_manager wsm;
72 void lzo_free_workspace(struct list_head *ws)
74 struct workspace *workspace = list_entry(ws, struct workspace, list);
76 kvfree(workspace->buf);
77 kvfree(workspace->cbuf);
78 kvfree(workspace->mem);
82 struct list_head *lzo_alloc_workspace(unsigned int level)
84 struct workspace *workspace;
86 workspace = kzalloc(sizeof(*workspace), GFP_KERNEL);
88 return ERR_PTR(-ENOMEM);
90 workspace->mem = kvmalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
91 workspace->buf = kvmalloc(WORKSPACE_BUF_LENGTH, GFP_KERNEL);
92 workspace->cbuf = kvmalloc(WORKSPACE_CBUF_LENGTH, GFP_KERNEL);
93 if (!workspace->mem || !workspace->buf || !workspace->cbuf)
96 INIT_LIST_HEAD(&workspace->list);
98 return &workspace->list;
100 lzo_free_workspace(&workspace->list);
101 return ERR_PTR(-ENOMEM);
104 static inline void write_compress_length(char *buf, size_t len)
108 dlen = cpu_to_le32(len);
109 memcpy(buf, &dlen, LZO_LEN);
112 static inline size_t read_compress_length(const char *buf)
116 memcpy(&dlen, buf, LZO_LEN);
117 return le32_to_cpu(dlen);
123 * - Write a segment header into the destination
124 * - Copy the compressed buffer into the destination
125 * - Make sure we have enough space in the last sector to fit a segment header
126 * If not, we will pad at most (LZO_LEN (4)) - 1 bytes of zeros.
128 * Will allocate new pages when needed.
130 static int copy_compressed_data_to_page(char *compressed_data,
131 size_t compressed_size,
132 struct page **out_pages,
133 unsigned long max_nr_page,
135 const u32 sectorsize)
137 u32 sector_bytes_left;
139 struct page *cur_page;
142 if ((*cur_out / PAGE_SIZE) >= max_nr_page)
146 * We never allow a segment header crossing sector boundary, previous
147 * run should ensure we have enough space left inside the sector.
149 ASSERT((*cur_out / sectorsize) == (*cur_out + LZO_LEN - 1) / sectorsize);
151 cur_page = out_pages[*cur_out / PAGE_SIZE];
152 /* Allocate a new page */
154 cur_page = alloc_page(GFP_NOFS);
157 out_pages[*cur_out / PAGE_SIZE] = cur_page;
160 kaddr = kmap_local_page(cur_page);
161 write_compress_length(kaddr + offset_in_page(*cur_out),
167 /* Copy compressed data */
168 while (*cur_out - orig_out < compressed_size) {
169 u32 copy_len = min_t(u32, sectorsize - *cur_out % sectorsize,
170 orig_out + compressed_size - *cur_out);
174 if ((*cur_out / PAGE_SIZE) >= max_nr_page)
177 cur_page = out_pages[*cur_out / PAGE_SIZE];
178 /* Allocate a new page */
180 cur_page = alloc_page(GFP_NOFS);
183 out_pages[*cur_out / PAGE_SIZE] = cur_page;
185 kaddr = kmap_local_page(cur_page);
187 memcpy(kaddr + offset_in_page(*cur_out),
188 compressed_data + *cur_out - orig_out, copy_len);
190 *cur_out += copy_len;
194 * Check if we can fit the next segment header into the remaining space
197 sector_bytes_left = round_up(*cur_out, sectorsize) - *cur_out;
198 if (sector_bytes_left >= LZO_LEN || sector_bytes_left == 0)
201 /* The remaining size is not enough, pad it with zeros */
202 memset(kaddr + offset_in_page(*cur_out), 0,
204 *cur_out += sector_bytes_left;
211 int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
212 u64 start, struct page **pages, unsigned long *out_pages,
213 unsigned long *total_in, unsigned long *total_out)
215 struct workspace *workspace = list_entry(ws, struct workspace, list);
216 const u32 sectorsize = btrfs_sb(mapping->host->i_sb)->sectorsize;
217 struct page *page_in = NULL;
219 const unsigned long max_nr_page = *out_pages;
221 /* Points to the file offset of input data */
223 /* Points to the current output byte */
225 u32 len = *total_out;
227 ASSERT(max_nr_page > 0);
233 * Skip the header for now, we will later come back and write the total
237 while (cur_in < start + len) {
239 const u32 sectorsize_mask = sectorsize - 1;
240 u32 sector_off = (cur_in - start) & sectorsize_mask;
244 /* Get the input page first */
246 page_in = find_get_page(mapping, cur_in >> PAGE_SHIFT);
250 /* Compress at most one sector of data each time */
251 in_len = min_t(u32, start + len - cur_in, sectorsize - sector_off);
253 data_in = kmap_local_page(page_in);
254 ret = lzo1x_1_compress(data_in +
255 offset_in_page(cur_in), in_len,
256 workspace->cbuf, &out_len,
258 kunmap_local(data_in);
260 pr_debug("BTRFS: lzo in loop returned %d\n", ret);
265 ret = copy_compressed_data_to_page(workspace->cbuf, out_len,
267 &cur_out, sectorsize);
274 * Check if we're making it bigger after two sectors. And if
277 if (cur_in - start > sectorsize * 2 && cur_in - start < cur_out) {
282 /* Check if we have reached page boundary */
283 if (IS_ALIGNED(cur_in, PAGE_SIZE)) {
289 /* Store the size of all chunks of compressed data */
290 sizes_ptr = kmap_local_page(pages[0]);
291 write_compress_length(sizes_ptr, cur_out);
292 kunmap_local(sizes_ptr);
295 *total_out = cur_out;
296 *total_in = cur_in - start;
300 *out_pages = DIV_ROUND_UP(cur_out, PAGE_SIZE);
305 * Copy the compressed segment payload into @dest.
307 * For the payload there will be no padding, just need to do page switching.
309 static void copy_compressed_segment(struct compressed_bio *cb,
310 char *dest, u32 len, u32 *cur_in)
312 u32 orig_in = *cur_in;
314 while (*cur_in < orig_in + len) {
315 struct page *cur_page;
316 u32 copy_len = min_t(u32, PAGE_SIZE - offset_in_page(*cur_in),
317 orig_in + len - *cur_in);
320 cur_page = cb->compressed_pages[*cur_in / PAGE_SIZE];
322 memcpy_from_page(dest + *cur_in - orig_in, cur_page,
323 offset_in_page(*cur_in), copy_len);
329 int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
331 struct workspace *workspace = list_entry(ws, struct workspace, list);
332 const struct btrfs_fs_info *fs_info = btrfs_sb(cb->inode->i_sb);
333 const u32 sectorsize = fs_info->sectorsize;
336 /* Compressed data length, can be unaligned */
338 /* Offset inside the compressed data */
340 /* Bytes decompressed so far */
343 kaddr = kmap_local_page(cb->compressed_pages[0]);
344 len_in = read_compress_length(kaddr);
349 * LZO header length check
351 * The total length should not exceed the maximum extent length,
352 * and all sectors should be used.
353 * If this happens, it means the compressed extent is corrupted.
355 if (len_in > min_t(size_t, BTRFS_MAX_COMPRESSED, cb->compressed_len) ||
356 round_up(len_in, sectorsize) < cb->compressed_len) {
358 "invalid lzo header, lzo len %u compressed len %u",
359 len_in, cb->compressed_len);
363 /* Go through each lzo segment */
364 while (cur_in < len_in) {
365 struct page *cur_page;
366 /* Length of the compressed segment */
368 u32 sector_bytes_left;
369 size_t out_len = lzo1x_worst_compress(sectorsize);
372 * We should always have enough space for one segment header
373 * inside current sector.
375 ASSERT(cur_in / sectorsize ==
376 (cur_in + LZO_LEN - 1) / sectorsize);
377 cur_page = cb->compressed_pages[cur_in / PAGE_SIZE];
379 kaddr = kmap_local_page(cur_page);
380 seg_len = read_compress_length(kaddr + offset_in_page(cur_in));
384 if (seg_len > WORKSPACE_CBUF_LENGTH) {
386 * seg_len shouldn't be larger than we have allocated
387 * for workspace->cbuf
389 btrfs_err(fs_info, "unexpectedly large lzo segment len %u",
395 /* Copy the compressed segment payload into workspace */
396 copy_compressed_segment(cb, workspace->cbuf, seg_len, &cur_in);
398 /* Decompress the data */
399 ret = lzo1x_decompress_safe(workspace->cbuf, seg_len,
400 workspace->buf, &out_len);
401 if (ret != LZO_E_OK) {
402 btrfs_err(fs_info, "failed to decompress");
407 /* Copy the data into inode pages */
408 ret = btrfs_decompress_buf2page(workspace->buf, out_len, cb, cur_out);
411 /* All data read, exit */
416 /* Check if the sector has enough space for a segment header */
417 sector_bytes_left = sectorsize - (cur_in % sectorsize);
418 if (sector_bytes_left >= LZO_LEN)
421 /* Skip the padding zeros */
422 cur_in += sector_bytes_left;
426 zero_fill_bio(cb->orig_bio);
430 int lzo_decompress(struct list_head *ws, const u8 *data_in,
431 struct page *dest_page, unsigned long start_byte, size_t srclen,
434 struct workspace *workspace = list_entry(ws, struct workspace, list);
437 size_t max_segment_len = WORKSPACE_BUF_LENGTH;
442 if (srclen < LZO_LEN || srclen > max_segment_len + LZO_LEN * 2)
445 in_len = read_compress_length(data_in);
446 if (in_len != srclen)
450 in_len = read_compress_length(data_in);
451 if (in_len != srclen - LZO_LEN * 2) {
458 ret = lzo1x_decompress_safe(data_in, in_len, workspace->buf, &out_len);
459 if (ret != LZO_E_OK) {
460 pr_warn("BTRFS: decompress failed!\n");
465 if (out_len < start_byte) {
471 * the caller is already checking against PAGE_SIZE, but lets
472 * move this check closer to the memcpy/memset
474 destlen = min_t(unsigned long, destlen, PAGE_SIZE);
475 bytes = min_t(unsigned long, destlen, out_len - start_byte);
477 kaddr = kmap_local_page(dest_page);
478 memcpy(kaddr, workspace->buf + start_byte, bytes);
481 * btrfs_getblock is doing a zero on the tail of the page too,
482 * but this will cover anything missing from the decompressed
486 memset(kaddr+bytes, 0, destlen-bytes);
492 const struct btrfs_compress_op btrfs_lzo_compress = {
493 .workspace_manager = &wsm,