]>
Commit | Line | Data |
---|---|---|
c8b97818 CM |
1 | /* |
2 | * Copyright (C) 2008 Oracle. All rights reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public | |
6 | * License v2 as published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, | |
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
11 | * General Public License for more details. | |
12 | * | |
13 | * You should have received a copy of the GNU General Public | |
14 | * License along with this program; if not, write to the | |
15 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | |
16 | * Boston, MA 021110-1307, USA. | |
17 | */ | |
18 | ||
19 | #include <linux/kernel.h> | |
20 | #include <linux/bio.h> | |
21 | #include <linux/buffer_head.h> | |
22 | #include <linux/file.h> | |
23 | #include <linux/fs.h> | |
24 | #include <linux/pagemap.h> | |
25 | #include <linux/highmem.h> | |
26 | #include <linux/time.h> | |
27 | #include <linux/init.h> | |
28 | #include <linux/string.h> | |
c8b97818 CM |
29 | #include <linux/backing-dev.h> |
30 | #include <linux/mpage.h> | |
31 | #include <linux/swap.h> | |
32 | #include <linux/writeback.h> | |
33 | #include <linux/bit_spinlock.h> | |
5a0e3ad6 | 34 | #include <linux/slab.h> |
fe308533 | 35 | #include <linux/sched/mm.h> |
c8b97818 CM |
36 | #include "ctree.h" |
37 | #include "disk-io.h" | |
38 | #include "transaction.h" | |
39 | #include "btrfs_inode.h" | |
40 | #include "volumes.h" | |
41 | #include "ordered-data.h" | |
c8b97818 CM |
42 | #include "compression.h" |
43 | #include "extent_io.h" | |
44 | #include "extent_map.h" | |
45 | ||
8140dc30 | 46 | static int btrfs_decompress_bio(struct compressed_bio *cb); |
48a3b636 | 47 | |
2ff7e61e | 48 | static inline int compressed_bio_size(struct btrfs_fs_info *fs_info, |
d20f7043 CM |
49 | unsigned long disk_size) |
50 | { | |
0b246afa | 51 | u16 csum_size = btrfs_super_csum_size(fs_info->super_copy); |
6c41761f | 52 | |
d20f7043 | 53 | return sizeof(struct compressed_bio) + |
0b246afa | 54 | (DIV_ROUND_UP(disk_size, fs_info->sectorsize)) * csum_size; |
d20f7043 CM |
55 | } |
56 | ||
f898ac6a | 57 | static int check_compressed_csum(struct btrfs_inode *inode, |
d20f7043 CM |
58 | struct compressed_bio *cb, |
59 | u64 disk_start) | |
60 | { | |
61 | int ret; | |
d20f7043 CM |
62 | struct page *page; |
63 | unsigned long i; | |
64 | char *kaddr; | |
65 | u32 csum; | |
66 | u32 *cb_sum = &cb->sums; | |
67 | ||
f898ac6a | 68 | if (inode->flags & BTRFS_INODE_NODATASUM) |
d20f7043 CM |
69 | return 0; |
70 | ||
71 | for (i = 0; i < cb->nr_pages; i++) { | |
72 | page = cb->compressed_pages[i]; | |
73 | csum = ~(u32)0; | |
74 | ||
7ac687d9 | 75 | kaddr = kmap_atomic(page); |
09cbfeaf | 76 | csum = btrfs_csum_data(kaddr, csum, PAGE_SIZE); |
0b5e3daf | 77 | btrfs_csum_final(csum, (u8 *)&csum); |
7ac687d9 | 78 | kunmap_atomic(kaddr); |
d20f7043 CM |
79 | |
80 | if (csum != *cb_sum) { | |
f898ac6a | 81 | btrfs_print_data_csum_error(inode, disk_start, csum, |
0970a22e | 82 | *cb_sum, cb->mirror_num); |
d20f7043 CM |
83 | ret = -EIO; |
84 | goto fail; | |
85 | } | |
86 | cb_sum++; | |
87 | ||
88 | } | |
89 | ret = 0; | |
90 | fail: | |
91 | return ret; | |
92 | } | |
93 | ||
c8b97818 CM |
94 | /* when we finish reading compressed pages from the disk, we |
95 | * decompress them and then run the bio end_io routines on the | |
96 | * decompressed pages (in the inode address space). | |
97 | * | |
98 | * This allows the checksumming and other IO error handling routines | |
99 | * to work normally | |
100 | * | |
101 | * The compressed pages are freed here, and it must be run | |
102 | * in process context | |
103 | */ | |
4246a0b6 | 104 | static void end_compressed_bio_read(struct bio *bio) |
c8b97818 | 105 | { |
c8b97818 CM |
106 | struct compressed_bio *cb = bio->bi_private; |
107 | struct inode *inode; | |
108 | struct page *page; | |
109 | unsigned long index; | |
cf1167d5 | 110 | unsigned int mirror = btrfs_io_bio(bio)->mirror_num; |
e6311f24 | 111 | int ret = 0; |
c8b97818 | 112 | |
4e4cbee9 | 113 | if (bio->bi_status) |
c8b97818 CM |
114 | cb->errors = 1; |
115 | ||
116 | /* if there are more bios still pending for this compressed | |
117 | * extent, just exit | |
118 | */ | |
a50299ae | 119 | if (!refcount_dec_and_test(&cb->pending_bios)) |
c8b97818 CM |
120 | goto out; |
121 | ||
cf1167d5 LB |
122 | /* |
123 | * Record the correct mirror_num in cb->orig_bio so that | |
124 | * read-repair can work properly. | |
125 | */ | |
126 | ASSERT(btrfs_io_bio(cb->orig_bio)); | |
127 | btrfs_io_bio(cb->orig_bio)->mirror_num = mirror; | |
128 | cb->mirror_num = mirror; | |
129 | ||
e6311f24 LB |
130 | /* |
131 | * Some IO in this cb have failed, just skip checksum as there | |
132 | * is no way it could be correct. | |
133 | */ | |
134 | if (cb->errors == 1) | |
135 | goto csum_failed; | |
136 | ||
d20f7043 | 137 | inode = cb->inode; |
f898ac6a | 138 | ret = check_compressed_csum(BTRFS_I(inode), cb, |
4f024f37 | 139 | (u64)bio->bi_iter.bi_sector << 9); |
d20f7043 CM |
140 | if (ret) |
141 | goto csum_failed; | |
142 | ||
c8b97818 CM |
143 | /* ok, we're the last bio for this extent, lets start |
144 | * the decompression. | |
145 | */ | |
8140dc30 AJ |
146 | ret = btrfs_decompress_bio(cb); |
147 | ||
d20f7043 | 148 | csum_failed: |
c8b97818 CM |
149 | if (ret) |
150 | cb->errors = 1; | |
151 | ||
152 | /* release the compressed pages */ | |
153 | index = 0; | |
154 | for (index = 0; index < cb->nr_pages; index++) { | |
155 | page = cb->compressed_pages[index]; | |
156 | page->mapping = NULL; | |
09cbfeaf | 157 | put_page(page); |
c8b97818 CM |
158 | } |
159 | ||
160 | /* do io completion on the original bio */ | |
771ed689 | 161 | if (cb->errors) { |
c8b97818 | 162 | bio_io_error(cb->orig_bio); |
d20f7043 | 163 | } else { |
2c30c71b KO |
164 | int i; |
165 | struct bio_vec *bvec; | |
d20f7043 CM |
166 | |
167 | /* | |
168 | * we have verified the checksum already, set page | |
169 | * checked so the end_io handlers know about it | |
170 | */ | |
c09abff8 | 171 | ASSERT(!bio_flagged(bio, BIO_CLONED)); |
2c30c71b | 172 | bio_for_each_segment_all(bvec, cb->orig_bio, i) |
d20f7043 | 173 | SetPageChecked(bvec->bv_page); |
2c30c71b | 174 | |
4246a0b6 | 175 | bio_endio(cb->orig_bio); |
d20f7043 | 176 | } |
c8b97818 CM |
177 | |
178 | /* finally free the cb struct */ | |
179 | kfree(cb->compressed_pages); | |
180 | kfree(cb); | |
181 | out: | |
182 | bio_put(bio); | |
183 | } | |
184 | ||
185 | /* | |
186 | * Clear the writeback bits on all of the file | |
187 | * pages for a compressed write | |
188 | */ | |
7bdcefc1 FM |
189 | static noinline void end_compressed_writeback(struct inode *inode, |
190 | const struct compressed_bio *cb) | |
c8b97818 | 191 | { |
09cbfeaf KS |
192 | unsigned long index = cb->start >> PAGE_SHIFT; |
193 | unsigned long end_index = (cb->start + cb->len - 1) >> PAGE_SHIFT; | |
c8b97818 CM |
194 | struct page *pages[16]; |
195 | unsigned long nr_pages = end_index - index + 1; | |
196 | int i; | |
197 | int ret; | |
198 | ||
7bdcefc1 FM |
199 | if (cb->errors) |
200 | mapping_set_error(inode->i_mapping, -EIO); | |
201 | ||
d397712b | 202 | while (nr_pages > 0) { |
c8b97818 | 203 | ret = find_get_pages_contig(inode->i_mapping, index, |
5b050f04 CM |
204 | min_t(unsigned long, |
205 | nr_pages, ARRAY_SIZE(pages)), pages); | |
c8b97818 CM |
206 | if (ret == 0) { |
207 | nr_pages -= 1; | |
208 | index += 1; | |
209 | continue; | |
210 | } | |
211 | for (i = 0; i < ret; i++) { | |
7bdcefc1 FM |
212 | if (cb->errors) |
213 | SetPageError(pages[i]); | |
c8b97818 | 214 | end_page_writeback(pages[i]); |
09cbfeaf | 215 | put_page(pages[i]); |
c8b97818 CM |
216 | } |
217 | nr_pages -= ret; | |
218 | index += ret; | |
219 | } | |
220 | /* the inode may be gone now */ | |
c8b97818 CM |
221 | } |
222 | ||
223 | /* | |
224 | * do the cleanup once all the compressed pages hit the disk. | |
225 | * This will clear writeback on the file pages and free the compressed | |
226 | * pages. | |
227 | * | |
228 | * This also calls the writeback end hooks for the file pages so that | |
229 | * metadata and checksums can be updated in the file. | |
230 | */ | |
4246a0b6 | 231 | static void end_compressed_bio_write(struct bio *bio) |
c8b97818 CM |
232 | { |
233 | struct extent_io_tree *tree; | |
234 | struct compressed_bio *cb = bio->bi_private; | |
235 | struct inode *inode; | |
236 | struct page *page; | |
237 | unsigned long index; | |
238 | ||
4e4cbee9 | 239 | if (bio->bi_status) |
c8b97818 CM |
240 | cb->errors = 1; |
241 | ||
242 | /* if there are more bios still pending for this compressed | |
243 | * extent, just exit | |
244 | */ | |
a50299ae | 245 | if (!refcount_dec_and_test(&cb->pending_bios)) |
c8b97818 CM |
246 | goto out; |
247 | ||
248 | /* ok, we're the last bio for this extent, step one is to | |
249 | * call back into the FS and do all the end_io operations | |
250 | */ | |
251 | inode = cb->inode; | |
252 | tree = &BTRFS_I(inode)->io_tree; | |
70b99e69 | 253 | cb->compressed_pages[0]->mapping = cb->inode->i_mapping; |
c8b97818 CM |
254 | tree->ops->writepage_end_io_hook(cb->compressed_pages[0], |
255 | cb->start, | |
256 | cb->start + cb->len - 1, | |
7bdcefc1 | 257 | NULL, |
2dbe0c77 AJ |
258 | bio->bi_status ? |
259 | BLK_STS_OK : BLK_STS_NOTSUPP); | |
70b99e69 | 260 | cb->compressed_pages[0]->mapping = NULL; |
c8b97818 | 261 | |
7bdcefc1 | 262 | end_compressed_writeback(inode, cb); |
c8b97818 CM |
263 | /* note, our inode could be gone now */ |
264 | ||
265 | /* | |
266 | * release the compressed pages, these came from alloc_page and | |
267 | * are not attached to the inode at all | |
268 | */ | |
269 | index = 0; | |
270 | for (index = 0; index < cb->nr_pages; index++) { | |
271 | page = cb->compressed_pages[index]; | |
272 | page->mapping = NULL; | |
09cbfeaf | 273 | put_page(page); |
c8b97818 CM |
274 | } |
275 | ||
276 | /* finally free the cb struct */ | |
277 | kfree(cb->compressed_pages); | |
278 | kfree(cb); | |
279 | out: | |
280 | bio_put(bio); | |
281 | } | |
282 | ||
283 | /* | |
284 | * worker function to build and submit bios for previously compressed pages. | |
285 | * The corresponding pages in the inode should be marked for writeback | |
286 | * and the compressed pages should have a reference on them for dropping | |
287 | * when the IO is complete. | |
288 | * | |
289 | * This also checksums the file bytes and gets things ready for | |
290 | * the end io hooks. | |
291 | */ | |
4e4cbee9 | 292 | blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start, |
c8b97818 CM |
293 | unsigned long len, u64 disk_start, |
294 | unsigned long compressed_len, | |
295 | struct page **compressed_pages, | |
296 | unsigned long nr_pages) | |
297 | { | |
0b246afa | 298 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
c8b97818 | 299 | struct bio *bio = NULL; |
c8b97818 CM |
300 | struct compressed_bio *cb; |
301 | unsigned long bytes_left; | |
302 | struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; | |
306e16ce | 303 | int pg_index = 0; |
c8b97818 CM |
304 | struct page *page; |
305 | u64 first_byte = disk_start; | |
306 | struct block_device *bdev; | |
4e4cbee9 | 307 | blk_status_t ret; |
e55179b3 | 308 | int skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; |
c8b97818 | 309 | |
09cbfeaf | 310 | WARN_ON(start & ((u64)PAGE_SIZE - 1)); |
2ff7e61e | 311 | cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS); |
dac97e51 | 312 | if (!cb) |
4e4cbee9 | 313 | return BLK_STS_RESOURCE; |
a50299ae | 314 | refcount_set(&cb->pending_bios, 0); |
c8b97818 CM |
315 | cb->errors = 0; |
316 | cb->inode = inode; | |
317 | cb->start = start; | |
318 | cb->len = len; | |
d20f7043 | 319 | cb->mirror_num = 0; |
c8b97818 CM |
320 | cb->compressed_pages = compressed_pages; |
321 | cb->compressed_len = compressed_len; | |
322 | cb->orig_bio = NULL; | |
323 | cb->nr_pages = nr_pages; | |
324 | ||
0b246afa | 325 | bdev = fs_info->fs_devices->latest_bdev; |
c8b97818 | 326 | |
c821e7f3 | 327 | bio = btrfs_bio_alloc(bdev, first_byte); |
37226b21 | 328 | bio_set_op_attrs(bio, REQ_OP_WRITE, 0); |
c8b97818 CM |
329 | bio->bi_private = cb; |
330 | bio->bi_end_io = end_compressed_bio_write; | |
a50299ae | 331 | refcount_set(&cb->pending_bios, 1); |
c8b97818 CM |
332 | |
333 | /* create and submit bios for the compressed pages */ | |
334 | bytes_left = compressed_len; | |
306e16ce | 335 | for (pg_index = 0; pg_index < cb->nr_pages; pg_index++) { |
4e4cbee9 CH |
336 | int submit = 0; |
337 | ||
306e16ce | 338 | page = compressed_pages[pg_index]; |
c8b97818 | 339 | page->mapping = inode->i_mapping; |
4f024f37 | 340 | if (bio->bi_iter.bi_size) |
4e4cbee9 | 341 | submit = io_tree->ops->merge_bio_hook(page, 0, |
09cbfeaf | 342 | PAGE_SIZE, |
c8b97818 | 343 | bio, 0); |
c8b97818 | 344 | |
70b99e69 | 345 | page->mapping = NULL; |
4e4cbee9 | 346 | if (submit || bio_add_page(bio, page, PAGE_SIZE, 0) < |
09cbfeaf | 347 | PAGE_SIZE) { |
c8b97818 CM |
348 | bio_get(bio); |
349 | ||
af09abfe CM |
350 | /* |
351 | * inc the count before we submit the bio so | |
352 | * we know the end IO handler won't happen before | |
353 | * we inc the count. Otherwise, the cb might get | |
354 | * freed before we're done setting it up | |
355 | */ | |
a50299ae | 356 | refcount_inc(&cb->pending_bios); |
0b246afa JM |
357 | ret = btrfs_bio_wq_end_io(fs_info, bio, |
358 | BTRFS_WQ_ENDIO_DATA); | |
79787eaa | 359 | BUG_ON(ret); /* -ENOMEM */ |
c8b97818 | 360 | |
e55179b3 | 361 | if (!skip_sum) { |
2ff7e61e | 362 | ret = btrfs_csum_one_bio(inode, bio, start, 1); |
79787eaa | 363 | BUG_ON(ret); /* -ENOMEM */ |
e55179b3 | 364 | } |
d20f7043 | 365 | |
2ff7e61e | 366 | ret = btrfs_map_bio(fs_info, bio, 0, 1); |
f5daf2c7 | 367 | if (ret) { |
4e4cbee9 | 368 | bio->bi_status = ret; |
f5daf2c7 LB |
369 | bio_endio(bio); |
370 | } | |
c8b97818 CM |
371 | |
372 | bio_put(bio); | |
373 | ||
c821e7f3 | 374 | bio = btrfs_bio_alloc(bdev, first_byte); |
37226b21 | 375 | bio_set_op_attrs(bio, REQ_OP_WRITE, 0); |
c8b97818 CM |
376 | bio->bi_private = cb; |
377 | bio->bi_end_io = end_compressed_bio_write; | |
09cbfeaf | 378 | bio_add_page(bio, page, PAGE_SIZE, 0); |
c8b97818 | 379 | } |
09cbfeaf | 380 | if (bytes_left < PAGE_SIZE) { |
0b246afa | 381 | btrfs_info(fs_info, |
efe120a0 | 382 | "bytes left %lu compress len %lu nr %lu", |
cfbc246e CM |
383 | bytes_left, cb->compressed_len, cb->nr_pages); |
384 | } | |
09cbfeaf KS |
385 | bytes_left -= PAGE_SIZE; |
386 | first_byte += PAGE_SIZE; | |
771ed689 | 387 | cond_resched(); |
c8b97818 CM |
388 | } |
389 | bio_get(bio); | |
390 | ||
0b246afa | 391 | ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA); |
79787eaa | 392 | BUG_ON(ret); /* -ENOMEM */ |
c8b97818 | 393 | |
e55179b3 | 394 | if (!skip_sum) { |
2ff7e61e | 395 | ret = btrfs_csum_one_bio(inode, bio, start, 1); |
79787eaa | 396 | BUG_ON(ret); /* -ENOMEM */ |
e55179b3 | 397 | } |
d20f7043 | 398 | |
2ff7e61e | 399 | ret = btrfs_map_bio(fs_info, bio, 0, 1); |
f5daf2c7 | 400 | if (ret) { |
4e4cbee9 | 401 | bio->bi_status = ret; |
f5daf2c7 LB |
402 | bio_endio(bio); |
403 | } | |
c8b97818 CM |
404 | |
405 | bio_put(bio); | |
406 | return 0; | |
407 | } | |
408 | ||
2a4d0c90 CH |
409 | static u64 bio_end_offset(struct bio *bio) |
410 | { | |
411 | struct bio_vec *last = &bio->bi_io_vec[bio->bi_vcnt - 1]; | |
412 | ||
413 | return page_offset(last->bv_page) + last->bv_len + last->bv_offset; | |
414 | } | |
415 | ||
771ed689 CM |
416 | static noinline int add_ra_bio_pages(struct inode *inode, |
417 | u64 compressed_end, | |
418 | struct compressed_bio *cb) | |
419 | { | |
420 | unsigned long end_index; | |
306e16ce | 421 | unsigned long pg_index; |
771ed689 CM |
422 | u64 last_offset; |
423 | u64 isize = i_size_read(inode); | |
424 | int ret; | |
425 | struct page *page; | |
426 | unsigned long nr_pages = 0; | |
427 | struct extent_map *em; | |
428 | struct address_space *mapping = inode->i_mapping; | |
771ed689 CM |
429 | struct extent_map_tree *em_tree; |
430 | struct extent_io_tree *tree; | |
431 | u64 end; | |
432 | int misses = 0; | |
433 | ||
2a4d0c90 | 434 | last_offset = bio_end_offset(cb->orig_bio); |
771ed689 CM |
435 | em_tree = &BTRFS_I(inode)->extent_tree; |
436 | tree = &BTRFS_I(inode)->io_tree; | |
437 | ||
438 | if (isize == 0) | |
439 | return 0; | |
440 | ||
09cbfeaf | 441 | end_index = (i_size_read(inode) - 1) >> PAGE_SHIFT; |
771ed689 | 442 | |
d397712b | 443 | while (last_offset < compressed_end) { |
09cbfeaf | 444 | pg_index = last_offset >> PAGE_SHIFT; |
771ed689 | 445 | |
306e16ce | 446 | if (pg_index > end_index) |
771ed689 CM |
447 | break; |
448 | ||
449 | rcu_read_lock(); | |
306e16ce | 450 | page = radix_tree_lookup(&mapping->page_tree, pg_index); |
771ed689 | 451 | rcu_read_unlock(); |
0cd6144a | 452 | if (page && !radix_tree_exceptional_entry(page)) { |
771ed689 CM |
453 | misses++; |
454 | if (misses > 4) | |
455 | break; | |
456 | goto next; | |
457 | } | |
458 | ||
c62d2555 MH |
459 | page = __page_cache_alloc(mapping_gfp_constraint(mapping, |
460 | ~__GFP_FS)); | |
771ed689 CM |
461 | if (!page) |
462 | break; | |
463 | ||
c62d2555 | 464 | if (add_to_page_cache_lru(page, mapping, pg_index, GFP_NOFS)) { |
09cbfeaf | 465 | put_page(page); |
771ed689 CM |
466 | goto next; |
467 | } | |
468 | ||
09cbfeaf | 469 | end = last_offset + PAGE_SIZE - 1; |
771ed689 CM |
470 | /* |
471 | * at this point, we have a locked page in the page cache | |
472 | * for these bytes in the file. But, we have to make | |
473 | * sure they map to this compressed extent on disk. | |
474 | */ | |
475 | set_page_extent_mapped(page); | |
d0082371 | 476 | lock_extent(tree, last_offset, end); |
890871be | 477 | read_lock(&em_tree->lock); |
771ed689 | 478 | em = lookup_extent_mapping(em_tree, last_offset, |
09cbfeaf | 479 | PAGE_SIZE); |
890871be | 480 | read_unlock(&em_tree->lock); |
771ed689 CM |
481 | |
482 | if (!em || last_offset < em->start || | |
09cbfeaf | 483 | (last_offset + PAGE_SIZE > extent_map_end(em)) || |
4f024f37 | 484 | (em->block_start >> 9) != cb->orig_bio->bi_iter.bi_sector) { |
771ed689 | 485 | free_extent_map(em); |
d0082371 | 486 | unlock_extent(tree, last_offset, end); |
771ed689 | 487 | unlock_page(page); |
09cbfeaf | 488 | put_page(page); |
771ed689 CM |
489 | break; |
490 | } | |
491 | free_extent_map(em); | |
492 | ||
493 | if (page->index == end_index) { | |
494 | char *userpage; | |
09cbfeaf | 495 | size_t zero_offset = isize & (PAGE_SIZE - 1); |
771ed689 CM |
496 | |
497 | if (zero_offset) { | |
498 | int zeros; | |
09cbfeaf | 499 | zeros = PAGE_SIZE - zero_offset; |
7ac687d9 | 500 | userpage = kmap_atomic(page); |
771ed689 CM |
501 | memset(userpage + zero_offset, 0, zeros); |
502 | flush_dcache_page(page); | |
7ac687d9 | 503 | kunmap_atomic(userpage); |
771ed689 CM |
504 | } |
505 | } | |
506 | ||
507 | ret = bio_add_page(cb->orig_bio, page, | |
09cbfeaf | 508 | PAGE_SIZE, 0); |
771ed689 | 509 | |
09cbfeaf | 510 | if (ret == PAGE_SIZE) { |
771ed689 | 511 | nr_pages++; |
09cbfeaf | 512 | put_page(page); |
771ed689 | 513 | } else { |
d0082371 | 514 | unlock_extent(tree, last_offset, end); |
771ed689 | 515 | unlock_page(page); |
09cbfeaf | 516 | put_page(page); |
771ed689 CM |
517 | break; |
518 | } | |
519 | next: | |
09cbfeaf | 520 | last_offset += PAGE_SIZE; |
771ed689 | 521 | } |
771ed689 CM |
522 | return 0; |
523 | } | |
524 | ||
c8b97818 CM |
525 | /* |
526 | * for a compressed read, the bio we get passed has all the inode pages | |
527 | * in it. We don't actually do IO on those pages but allocate new ones | |
528 | * to hold the compressed pages on disk. | |
529 | * | |
4f024f37 | 530 | * bio->bi_iter.bi_sector points to the compressed extent on disk |
c8b97818 | 531 | * bio->bi_io_vec points to all of the inode pages |
c8b97818 CM |
532 | * |
533 | * After the compressed pages are read, we copy the bytes into the | |
534 | * bio we were passed and then call the bio end_io calls | |
535 | */ | |
4e4cbee9 | 536 | blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, |
c8b97818 CM |
537 | int mirror_num, unsigned long bio_flags) |
538 | { | |
0b246afa | 539 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
c8b97818 CM |
540 | struct extent_io_tree *tree; |
541 | struct extent_map_tree *em_tree; | |
542 | struct compressed_bio *cb; | |
c8b97818 CM |
543 | unsigned long compressed_len; |
544 | unsigned long nr_pages; | |
306e16ce | 545 | unsigned long pg_index; |
c8b97818 CM |
546 | struct page *page; |
547 | struct block_device *bdev; | |
548 | struct bio *comp_bio; | |
4f024f37 | 549 | u64 cur_disk_byte = (u64)bio->bi_iter.bi_sector << 9; |
e04ca626 CM |
550 | u64 em_len; |
551 | u64 em_start; | |
c8b97818 | 552 | struct extent_map *em; |
4e4cbee9 | 553 | blk_status_t ret = BLK_STS_RESOURCE; |
15e3004a | 554 | int faili = 0; |
d20f7043 | 555 | u32 *sums; |
c8b97818 CM |
556 | |
557 | tree = &BTRFS_I(inode)->io_tree; | |
558 | em_tree = &BTRFS_I(inode)->extent_tree; | |
559 | ||
560 | /* we need the actual starting offset of this extent in the file */ | |
890871be | 561 | read_lock(&em_tree->lock); |
c8b97818 CM |
562 | em = lookup_extent_mapping(em_tree, |
563 | page_offset(bio->bi_io_vec->bv_page), | |
09cbfeaf | 564 | PAGE_SIZE); |
890871be | 565 | read_unlock(&em_tree->lock); |
285190d9 | 566 | if (!em) |
4e4cbee9 | 567 | return BLK_STS_IOERR; |
c8b97818 | 568 | |
d20f7043 | 569 | compressed_len = em->block_len; |
2ff7e61e | 570 | cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS); |
6b82ce8d | 571 | if (!cb) |
572 | goto out; | |
573 | ||
a50299ae | 574 | refcount_set(&cb->pending_bios, 0); |
c8b97818 CM |
575 | cb->errors = 0; |
576 | cb->inode = inode; | |
d20f7043 CM |
577 | cb->mirror_num = mirror_num; |
578 | sums = &cb->sums; | |
c8b97818 | 579 | |
ff5b7ee3 | 580 | cb->start = em->orig_start; |
e04ca626 CM |
581 | em_len = em->len; |
582 | em_start = em->start; | |
d20f7043 | 583 | |
c8b97818 | 584 | free_extent_map(em); |
e04ca626 | 585 | em = NULL; |
c8b97818 | 586 | |
81381053 | 587 | cb->len = bio->bi_iter.bi_size; |
c8b97818 | 588 | cb->compressed_len = compressed_len; |
261507a0 | 589 | cb->compress_type = extent_compress_type(bio_flags); |
c8b97818 CM |
590 | cb->orig_bio = bio; |
591 | ||
09cbfeaf | 592 | nr_pages = DIV_ROUND_UP(compressed_len, PAGE_SIZE); |
31e818fe | 593 | cb->compressed_pages = kcalloc(nr_pages, sizeof(struct page *), |
c8b97818 | 594 | GFP_NOFS); |
6b82ce8d | 595 | if (!cb->compressed_pages) |
596 | goto fail1; | |
597 | ||
0b246afa | 598 | bdev = fs_info->fs_devices->latest_bdev; |
c8b97818 | 599 | |
306e16ce DS |
600 | for (pg_index = 0; pg_index < nr_pages; pg_index++) { |
601 | cb->compressed_pages[pg_index] = alloc_page(GFP_NOFS | | |
c8b97818 | 602 | __GFP_HIGHMEM); |
15e3004a JB |
603 | if (!cb->compressed_pages[pg_index]) { |
604 | faili = pg_index - 1; | |
0e9350de | 605 | ret = BLK_STS_RESOURCE; |
6b82ce8d | 606 | goto fail2; |
15e3004a | 607 | } |
c8b97818 | 608 | } |
15e3004a | 609 | faili = nr_pages - 1; |
c8b97818 CM |
610 | cb->nr_pages = nr_pages; |
611 | ||
7f042a83 | 612 | add_ra_bio_pages(inode, em_start + em_len, cb); |
771ed689 | 613 | |
771ed689 | 614 | /* include any pages we added in add_ra-bio_pages */ |
81381053 | 615 | cb->len = bio->bi_iter.bi_size; |
771ed689 | 616 | |
c821e7f3 | 617 | comp_bio = btrfs_bio_alloc(bdev, cur_disk_byte); |
37226b21 | 618 | bio_set_op_attrs (comp_bio, REQ_OP_READ, 0); |
c8b97818 CM |
619 | comp_bio->bi_private = cb; |
620 | comp_bio->bi_end_io = end_compressed_bio_read; | |
a50299ae | 621 | refcount_set(&cb->pending_bios, 1); |
c8b97818 | 622 | |
306e16ce | 623 | for (pg_index = 0; pg_index < nr_pages; pg_index++) { |
4e4cbee9 CH |
624 | int submit = 0; |
625 | ||
306e16ce | 626 | page = cb->compressed_pages[pg_index]; |
c8b97818 | 627 | page->mapping = inode->i_mapping; |
09cbfeaf | 628 | page->index = em_start >> PAGE_SHIFT; |
d20f7043 | 629 | |
4f024f37 | 630 | if (comp_bio->bi_iter.bi_size) |
4e4cbee9 | 631 | submit = tree->ops->merge_bio_hook(page, 0, |
09cbfeaf | 632 | PAGE_SIZE, |
c8b97818 | 633 | comp_bio, 0); |
c8b97818 | 634 | |
70b99e69 | 635 | page->mapping = NULL; |
4e4cbee9 | 636 | if (submit || bio_add_page(comp_bio, page, PAGE_SIZE, 0) < |
09cbfeaf | 637 | PAGE_SIZE) { |
c8b97818 CM |
638 | bio_get(comp_bio); |
639 | ||
0b246afa JM |
640 | ret = btrfs_bio_wq_end_io(fs_info, comp_bio, |
641 | BTRFS_WQ_ENDIO_DATA); | |
79787eaa | 642 | BUG_ON(ret); /* -ENOMEM */ |
c8b97818 | 643 | |
af09abfe CM |
644 | /* |
645 | * inc the count before we submit the bio so | |
646 | * we know the end IO handler won't happen before | |
647 | * we inc the count. Otherwise, the cb might get | |
648 | * freed before we're done setting it up | |
649 | */ | |
a50299ae | 650 | refcount_inc(&cb->pending_bios); |
af09abfe | 651 | |
6cbff00f | 652 | if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) { |
2ff7e61e JM |
653 | ret = btrfs_lookup_bio_sums(inode, comp_bio, |
654 | sums); | |
79787eaa | 655 | BUG_ON(ret); /* -ENOMEM */ |
d20f7043 | 656 | } |
ed6078f7 | 657 | sums += DIV_ROUND_UP(comp_bio->bi_iter.bi_size, |
0b246afa | 658 | fs_info->sectorsize); |
d20f7043 | 659 | |
2ff7e61e | 660 | ret = btrfs_map_bio(fs_info, comp_bio, mirror_num, 0); |
4246a0b6 | 661 | if (ret) { |
4e4cbee9 | 662 | comp_bio->bi_status = ret; |
4246a0b6 CH |
663 | bio_endio(comp_bio); |
664 | } | |
c8b97818 CM |
665 | |
666 | bio_put(comp_bio); | |
667 | ||
c821e7f3 | 668 | comp_bio = btrfs_bio_alloc(bdev, cur_disk_byte); |
37226b21 | 669 | bio_set_op_attrs(comp_bio, REQ_OP_READ, 0); |
771ed689 CM |
670 | comp_bio->bi_private = cb; |
671 | comp_bio->bi_end_io = end_compressed_bio_read; | |
672 | ||
09cbfeaf | 673 | bio_add_page(comp_bio, page, PAGE_SIZE, 0); |
c8b97818 | 674 | } |
09cbfeaf | 675 | cur_disk_byte += PAGE_SIZE; |
c8b97818 CM |
676 | } |
677 | bio_get(comp_bio); | |
678 | ||
0b246afa | 679 | ret = btrfs_bio_wq_end_io(fs_info, comp_bio, BTRFS_WQ_ENDIO_DATA); |
79787eaa | 680 | BUG_ON(ret); /* -ENOMEM */ |
c8b97818 | 681 | |
c2db1073 | 682 | if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) { |
2ff7e61e | 683 | ret = btrfs_lookup_bio_sums(inode, comp_bio, sums); |
79787eaa | 684 | BUG_ON(ret); /* -ENOMEM */ |
c2db1073 | 685 | } |
d20f7043 | 686 | |
2ff7e61e | 687 | ret = btrfs_map_bio(fs_info, comp_bio, mirror_num, 0); |
4246a0b6 | 688 | if (ret) { |
4e4cbee9 | 689 | comp_bio->bi_status = ret; |
4246a0b6 CH |
690 | bio_endio(comp_bio); |
691 | } | |
c8b97818 CM |
692 | |
693 | bio_put(comp_bio); | |
694 | return 0; | |
6b82ce8d | 695 | |
696 | fail2: | |
15e3004a JB |
697 | while (faili >= 0) { |
698 | __free_page(cb->compressed_pages[faili]); | |
699 | faili--; | |
700 | } | |
6b82ce8d | 701 | |
702 | kfree(cb->compressed_pages); | |
703 | fail1: | |
704 | kfree(cb); | |
705 | out: | |
706 | free_extent_map(em); | |
707 | return ret; | |
c8b97818 | 708 | } |
261507a0 | 709 | |
17b5a6c1 TT |
710 | /* |
711 | * Heuristic uses systematic sampling to collect data from the input data | |
712 | * range, the logic can be tuned by the following constants: | |
713 | * | |
714 | * @SAMPLING_READ_SIZE - how many bytes will be copied from for each sample | |
715 | * @SAMPLING_INTERVAL - range from which the sampled data can be collected | |
716 | */ | |
717 | #define SAMPLING_READ_SIZE (16) | |
718 | #define SAMPLING_INTERVAL (256) | |
719 | ||
720 | /* | |
721 | * For statistical analysis of the input data we consider bytes that form a | |
722 | * Galois Field of 256 objects. Each object has an attribute count, ie. how | |
723 | * many times the object appeared in the sample. | |
724 | */ | |
725 | #define BUCKET_SIZE (256) | |
726 | ||
727 | /* | |
728 | * The size of the sample is based on a statistical sampling rule of thumb. | |
729 | * The common way is to perform sampling tests as long as the number of | |
730 | * elements in each cell is at least 5. | |
731 | * | |
732 | * Instead of 5, we choose 32 to obtain more accurate results. | |
733 | * If the data contain the maximum number of symbols, which is 256, we obtain a | |
734 | * sample size bound by 8192. | |
735 | * | |
736 | * For a sample of at most 8KB of data per data range: 16 consecutive bytes | |
737 | * from up to 512 locations. | |
738 | */ | |
739 | #define MAX_SAMPLE_SIZE (BTRFS_MAX_UNCOMPRESSED * \ | |
740 | SAMPLING_READ_SIZE / SAMPLING_INTERVAL) | |
741 | ||
742 | struct bucket_item { | |
743 | u32 count; | |
744 | }; | |
4e439a0b TT |
745 | |
746 | struct heuristic_ws { | |
17b5a6c1 TT |
747 | /* Partial copy of input data */ |
748 | u8 *sample; | |
a440d48c | 749 | u32 sample_size; |
17b5a6c1 TT |
750 | /* Buckets store counters for each byte value */ |
751 | struct bucket_item *bucket; | |
4e439a0b TT |
752 | struct list_head list; |
753 | }; | |
754 | ||
755 | static void free_heuristic_ws(struct list_head *ws) | |
756 | { | |
757 | struct heuristic_ws *workspace; | |
758 | ||
759 | workspace = list_entry(ws, struct heuristic_ws, list); | |
760 | ||
17b5a6c1 TT |
761 | kvfree(workspace->sample); |
762 | kfree(workspace->bucket); | |
4e439a0b TT |
763 | kfree(workspace); |
764 | } | |
765 | ||
766 | static struct list_head *alloc_heuristic_ws(void) | |
767 | { | |
768 | struct heuristic_ws *ws; | |
769 | ||
770 | ws = kzalloc(sizeof(*ws), GFP_KERNEL); | |
771 | if (!ws) | |
772 | return ERR_PTR(-ENOMEM); | |
773 | ||
17b5a6c1 TT |
774 | ws->sample = kvmalloc(MAX_SAMPLE_SIZE, GFP_KERNEL); |
775 | if (!ws->sample) | |
776 | goto fail; | |
777 | ||
778 | ws->bucket = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket), GFP_KERNEL); | |
779 | if (!ws->bucket) | |
780 | goto fail; | |
4e439a0b | 781 | |
17b5a6c1 | 782 | INIT_LIST_HEAD(&ws->list); |
4e439a0b | 783 | return &ws->list; |
17b5a6c1 TT |
784 | fail: |
785 | free_heuristic_ws(&ws->list); | |
786 | return ERR_PTR(-ENOMEM); | |
4e439a0b TT |
787 | } |
788 | ||
789 | struct workspaces_list { | |
d9187649 BL |
790 | struct list_head idle_ws; |
791 | spinlock_t ws_lock; | |
6ac10a6a DS |
792 | /* Number of free workspaces */ |
793 | int free_ws; | |
794 | /* Total number of allocated workspaces */ | |
795 | atomic_t total_ws; | |
796 | /* Waiters for a free workspace */ | |
d9187649 | 797 | wait_queue_head_t ws_wait; |
4e439a0b TT |
798 | }; |
799 | ||
800 | static struct workspaces_list btrfs_comp_ws[BTRFS_COMPRESS_TYPES]; | |
801 | ||
802 | static struct workspaces_list btrfs_heuristic_ws; | |
261507a0 | 803 | |
e8c9f186 | 804 | static const struct btrfs_compress_op * const btrfs_compress_op[] = { |
261507a0 | 805 | &btrfs_zlib_compress, |
a6fa6fae | 806 | &btrfs_lzo_compress, |
5c1aab1d | 807 | &btrfs_zstd_compress, |
261507a0 LZ |
808 | }; |
809 | ||
143bede5 | 810 | void __init btrfs_init_compress(void) |
261507a0 | 811 | { |
4e439a0b | 812 | struct list_head *workspace; |
261507a0 LZ |
813 | int i; |
814 | ||
4e439a0b TT |
815 | INIT_LIST_HEAD(&btrfs_heuristic_ws.idle_ws); |
816 | spin_lock_init(&btrfs_heuristic_ws.ws_lock); | |
817 | atomic_set(&btrfs_heuristic_ws.total_ws, 0); | |
818 | init_waitqueue_head(&btrfs_heuristic_ws.ws_wait); | |
f77dd0d6 | 819 | |
4e439a0b TT |
820 | workspace = alloc_heuristic_ws(); |
821 | if (IS_ERR(workspace)) { | |
822 | pr_warn( | |
823 | "BTRFS: cannot preallocate heuristic workspace, will try later\n"); | |
824 | } else { | |
825 | atomic_set(&btrfs_heuristic_ws.total_ws, 1); | |
826 | btrfs_heuristic_ws.free_ws = 1; | |
827 | list_add(workspace, &btrfs_heuristic_ws.idle_ws); | |
828 | } | |
829 | ||
830 | for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) { | |
d9187649 BL |
831 | INIT_LIST_HEAD(&btrfs_comp_ws[i].idle_ws); |
832 | spin_lock_init(&btrfs_comp_ws[i].ws_lock); | |
6ac10a6a | 833 | atomic_set(&btrfs_comp_ws[i].total_ws, 0); |
d9187649 | 834 | init_waitqueue_head(&btrfs_comp_ws[i].ws_wait); |
f77dd0d6 DS |
835 | |
836 | /* | |
837 | * Preallocate one workspace for each compression type so | |
838 | * we can guarantee forward progress in the worst case | |
839 | */ | |
840 | workspace = btrfs_compress_op[i]->alloc_workspace(); | |
841 | if (IS_ERR(workspace)) { | |
62e85577 | 842 | pr_warn("BTRFS: cannot preallocate compression workspace, will try later\n"); |
f77dd0d6 DS |
843 | } else { |
844 | atomic_set(&btrfs_comp_ws[i].total_ws, 1); | |
845 | btrfs_comp_ws[i].free_ws = 1; | |
846 | list_add(workspace, &btrfs_comp_ws[i].idle_ws); | |
847 | } | |
261507a0 | 848 | } |
261507a0 LZ |
849 | } |
850 | ||
851 | /* | |
e721e49d DS |
852 | * This finds an available workspace or allocates a new one. |
853 | * If it's not possible to allocate a new one, waits until there's one. | |
854 | * Preallocation makes a forward progress guarantees and we do not return | |
855 | * errors. | |
261507a0 | 856 | */ |
4e439a0b | 857 | static struct list_head *__find_workspace(int type, bool heuristic) |
261507a0 LZ |
858 | { |
859 | struct list_head *workspace; | |
860 | int cpus = num_online_cpus(); | |
861 | int idx = type - 1; | |
fe308533 | 862 | unsigned nofs_flag; |
4e439a0b TT |
863 | struct list_head *idle_ws; |
864 | spinlock_t *ws_lock; | |
865 | atomic_t *total_ws; | |
866 | wait_queue_head_t *ws_wait; | |
867 | int *free_ws; | |
868 | ||
869 | if (heuristic) { | |
870 | idle_ws = &btrfs_heuristic_ws.idle_ws; | |
871 | ws_lock = &btrfs_heuristic_ws.ws_lock; | |
872 | total_ws = &btrfs_heuristic_ws.total_ws; | |
873 | ws_wait = &btrfs_heuristic_ws.ws_wait; | |
874 | free_ws = &btrfs_heuristic_ws.free_ws; | |
875 | } else { | |
876 | idle_ws = &btrfs_comp_ws[idx].idle_ws; | |
877 | ws_lock = &btrfs_comp_ws[idx].ws_lock; | |
878 | total_ws = &btrfs_comp_ws[idx].total_ws; | |
879 | ws_wait = &btrfs_comp_ws[idx].ws_wait; | |
880 | free_ws = &btrfs_comp_ws[idx].free_ws; | |
881 | } | |
261507a0 | 882 | |
261507a0 | 883 | again: |
d9187649 BL |
884 | spin_lock(ws_lock); |
885 | if (!list_empty(idle_ws)) { | |
886 | workspace = idle_ws->next; | |
261507a0 | 887 | list_del(workspace); |
6ac10a6a | 888 | (*free_ws)--; |
d9187649 | 889 | spin_unlock(ws_lock); |
261507a0 LZ |
890 | return workspace; |
891 | ||
892 | } | |
6ac10a6a | 893 | if (atomic_read(total_ws) > cpus) { |
261507a0 LZ |
894 | DEFINE_WAIT(wait); |
895 | ||
d9187649 BL |
896 | spin_unlock(ws_lock); |
897 | prepare_to_wait(ws_wait, &wait, TASK_UNINTERRUPTIBLE); | |
6ac10a6a | 898 | if (atomic_read(total_ws) > cpus && !*free_ws) |
261507a0 | 899 | schedule(); |
d9187649 | 900 | finish_wait(ws_wait, &wait); |
261507a0 LZ |
901 | goto again; |
902 | } | |
6ac10a6a | 903 | atomic_inc(total_ws); |
d9187649 | 904 | spin_unlock(ws_lock); |
261507a0 | 905 | |
fe308533 DS |
906 | /* |
907 | * Allocation helpers call vmalloc that can't use GFP_NOFS, so we have | |
908 | * to turn it off here because we might get called from the restricted | |
909 | * context of btrfs_compress_bio/btrfs_compress_pages | |
910 | */ | |
911 | nofs_flag = memalloc_nofs_save(); | |
4e439a0b TT |
912 | if (heuristic) |
913 | workspace = alloc_heuristic_ws(); | |
914 | else | |
915 | workspace = btrfs_compress_op[idx]->alloc_workspace(); | |
fe308533 DS |
916 | memalloc_nofs_restore(nofs_flag); |
917 | ||
261507a0 | 918 | if (IS_ERR(workspace)) { |
6ac10a6a | 919 | atomic_dec(total_ws); |
d9187649 | 920 | wake_up(ws_wait); |
e721e49d DS |
921 | |
922 | /* | |
923 | * Do not return the error but go back to waiting. There's a | |
924 | * workspace preallocated for each type and the compression | |
925 | * time is bounded so we get to a workspace eventually. This | |
926 | * makes our caller's life easier. | |
52356716 DS |
927 | * |
928 | * To prevent silent and low-probability deadlocks (when the | |
929 | * initial preallocation fails), check if there are any | |
930 | * workspaces at all. | |
e721e49d | 931 | */ |
52356716 DS |
932 | if (atomic_read(total_ws) == 0) { |
933 | static DEFINE_RATELIMIT_STATE(_rs, | |
934 | /* once per minute */ 60 * HZ, | |
935 | /* no burst */ 1); | |
936 | ||
937 | if (__ratelimit(&_rs)) { | |
ab8d0fc4 | 938 | pr_warn("BTRFS: no compression workspaces, low memory, retrying\n"); |
52356716 DS |
939 | } |
940 | } | |
e721e49d | 941 | goto again; |
261507a0 LZ |
942 | } |
943 | return workspace; | |
944 | } | |
945 | ||
4e439a0b TT |
946 | static struct list_head *find_workspace(int type) |
947 | { | |
948 | return __find_workspace(type, false); | |
949 | } | |
950 | ||
261507a0 LZ |
951 | /* |
952 | * put a workspace struct back on the list or free it if we have enough | |
953 | * idle ones sitting around | |
954 | */ | |
4e439a0b TT |
955 | static void __free_workspace(int type, struct list_head *workspace, |
956 | bool heuristic) | |
261507a0 LZ |
957 | { |
958 | int idx = type - 1; | |
4e439a0b TT |
959 | struct list_head *idle_ws; |
960 | spinlock_t *ws_lock; | |
961 | atomic_t *total_ws; | |
962 | wait_queue_head_t *ws_wait; | |
963 | int *free_ws; | |
964 | ||
965 | if (heuristic) { | |
966 | idle_ws = &btrfs_heuristic_ws.idle_ws; | |
967 | ws_lock = &btrfs_heuristic_ws.ws_lock; | |
968 | total_ws = &btrfs_heuristic_ws.total_ws; | |
969 | ws_wait = &btrfs_heuristic_ws.ws_wait; | |
970 | free_ws = &btrfs_heuristic_ws.free_ws; | |
971 | } else { | |
972 | idle_ws = &btrfs_comp_ws[idx].idle_ws; | |
973 | ws_lock = &btrfs_comp_ws[idx].ws_lock; | |
974 | total_ws = &btrfs_comp_ws[idx].total_ws; | |
975 | ws_wait = &btrfs_comp_ws[idx].ws_wait; | |
976 | free_ws = &btrfs_comp_ws[idx].free_ws; | |
977 | } | |
d9187649 BL |
978 | |
979 | spin_lock(ws_lock); | |
26b28dce | 980 | if (*free_ws <= num_online_cpus()) { |
d9187649 | 981 | list_add(workspace, idle_ws); |
6ac10a6a | 982 | (*free_ws)++; |
d9187649 | 983 | spin_unlock(ws_lock); |
261507a0 LZ |
984 | goto wake; |
985 | } | |
d9187649 | 986 | spin_unlock(ws_lock); |
261507a0 | 987 | |
4e439a0b TT |
988 | if (heuristic) |
989 | free_heuristic_ws(workspace); | |
990 | else | |
991 | btrfs_compress_op[idx]->free_workspace(workspace); | |
6ac10a6a | 992 | atomic_dec(total_ws); |
261507a0 | 993 | wake: |
a83342aa DS |
994 | /* |
995 | * Make sure counter is updated before we wake up waiters. | |
996 | */ | |
66657b31 | 997 | smp_mb(); |
d9187649 BL |
998 | if (waitqueue_active(ws_wait)) |
999 | wake_up(ws_wait); | |
261507a0 LZ |
1000 | } |
1001 | ||
4e439a0b TT |
1002 | static void free_workspace(int type, struct list_head *ws) |
1003 | { | |
1004 | return __free_workspace(type, ws, false); | |
1005 | } | |
1006 | ||
261507a0 LZ |
1007 | /* |
1008 | * cleanup function for module exit | |
1009 | */ | |
1010 | static void free_workspaces(void) | |
1011 | { | |
1012 | struct list_head *workspace; | |
1013 | int i; | |
1014 | ||
4e439a0b TT |
1015 | while (!list_empty(&btrfs_heuristic_ws.idle_ws)) { |
1016 | workspace = btrfs_heuristic_ws.idle_ws.next; | |
1017 | list_del(workspace); | |
1018 | free_heuristic_ws(workspace); | |
1019 | atomic_dec(&btrfs_heuristic_ws.total_ws); | |
1020 | } | |
1021 | ||
261507a0 | 1022 | for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) { |
d9187649 BL |
1023 | while (!list_empty(&btrfs_comp_ws[i].idle_ws)) { |
1024 | workspace = btrfs_comp_ws[i].idle_ws.next; | |
261507a0 LZ |
1025 | list_del(workspace); |
1026 | btrfs_compress_op[i]->free_workspace(workspace); | |
6ac10a6a | 1027 | atomic_dec(&btrfs_comp_ws[i].total_ws); |
261507a0 LZ |
1028 | } |
1029 | } | |
1030 | } | |
1031 | ||
1032 | /* | |
38c31464 DS |
1033 | * Given an address space and start and length, compress the bytes into @pages |
1034 | * that are allocated on demand. | |
261507a0 | 1035 | * |
f51d2b59 DS |
1036 | * @type_level is encoded algorithm and level, where level 0 means whatever |
1037 | * default the algorithm chooses and is opaque here; | |
1038 | * - compression algo are 0-3 | |
1039 | * - the level are bits 4-7 | |
1040 | * | |
4d3a800e DS |
1041 | * @out_pages is an in/out parameter, holds maximum number of pages to allocate |
1042 | * and returns number of actually allocated pages | |
261507a0 | 1043 | * |
38c31464 DS |
1044 | * @total_in is used to return the number of bytes actually read. It |
1045 | * may be smaller than the input length if we had to exit early because we | |
261507a0 LZ |
1046 | * ran out of room in the pages array or because we cross the |
1047 | * max_out threshold. | |
1048 | * | |
38c31464 DS |
1049 | * @total_out is an in/out parameter, must be set to the input length and will |
1050 | * be also used to return the total number of compressed bytes | |
261507a0 | 1051 | * |
38c31464 | 1052 | * @max_out tells us the max number of bytes that we're allowed to |
261507a0 LZ |
1053 | * stuff into pages |
1054 | */ | |
f51d2b59 | 1055 | int btrfs_compress_pages(unsigned int type_level, struct address_space *mapping, |
38c31464 | 1056 | u64 start, struct page **pages, |
261507a0 LZ |
1057 | unsigned long *out_pages, |
1058 | unsigned long *total_in, | |
e5d74902 | 1059 | unsigned long *total_out) |
261507a0 LZ |
1060 | { |
1061 | struct list_head *workspace; | |
1062 | int ret; | |
f51d2b59 | 1063 | int type = type_level & 0xF; |
261507a0 LZ |
1064 | |
1065 | workspace = find_workspace(type); | |
261507a0 | 1066 | |
f51d2b59 | 1067 | btrfs_compress_op[type - 1]->set_level(workspace, type_level); |
261507a0 | 1068 | ret = btrfs_compress_op[type-1]->compress_pages(workspace, mapping, |
38c31464 | 1069 | start, pages, |
4d3a800e | 1070 | out_pages, |
e5d74902 | 1071 | total_in, total_out); |
261507a0 LZ |
1072 | free_workspace(type, workspace); |
1073 | return ret; | |
1074 | } | |
1075 | ||
1076 | /* | |
1077 | * pages_in is an array of pages with compressed data. | |
1078 | * | |
1079 | * disk_start is the starting logical offset of this array in the file | |
1080 | * | |
974b1adc | 1081 | * orig_bio contains the pages from the file that we want to decompress into |
261507a0 LZ |
1082 | * |
1083 | * srclen is the number of bytes in pages_in | |
1084 | * | |
1085 | * The basic idea is that we have a bio that was created by readpages. | |
1086 | * The pages in the bio are for the uncompressed data, and they may not | |
1087 | * be contiguous. They all correspond to the range of bytes covered by | |
1088 | * the compressed extent. | |
1089 | */ | |
8140dc30 | 1090 | static int btrfs_decompress_bio(struct compressed_bio *cb) |
261507a0 LZ |
1091 | { |
1092 | struct list_head *workspace; | |
1093 | int ret; | |
8140dc30 | 1094 | int type = cb->compress_type; |
261507a0 LZ |
1095 | |
1096 | workspace = find_workspace(type); | |
e1ddce71 | 1097 | ret = btrfs_compress_op[type - 1]->decompress_bio(workspace, cb); |
261507a0 | 1098 | free_workspace(type, workspace); |
e1ddce71 | 1099 | |
261507a0 LZ |
1100 | return ret; |
1101 | } | |
1102 | ||
1103 | /* | |
1104 | * a less complex decompression routine. Our compressed data fits in a | |
1105 | * single page, and we want to read a single page out of it. | |
1106 | * start_byte tells us the offset into the compressed data we're interested in | |
1107 | */ | |
1108 | int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page, | |
1109 | unsigned long start_byte, size_t srclen, size_t destlen) | |
1110 | { | |
1111 | struct list_head *workspace; | |
1112 | int ret; | |
1113 | ||
1114 | workspace = find_workspace(type); | |
261507a0 LZ |
1115 | |
1116 | ret = btrfs_compress_op[type-1]->decompress(workspace, data_in, | |
1117 | dest_page, start_byte, | |
1118 | srclen, destlen); | |
1119 | ||
1120 | free_workspace(type, workspace); | |
1121 | return ret; | |
1122 | } | |
1123 | ||
8e4eef7a | 1124 | void btrfs_exit_compress(void) |
261507a0 LZ |
1125 | { |
1126 | free_workspaces(); | |
1127 | } | |
3a39c18d LZ |
1128 | |
1129 | /* | |
1130 | * Copy uncompressed data from working buffer to pages. | |
1131 | * | |
1132 | * buf_start is the byte offset we're of the start of our workspace buffer. | |
1133 | * | |
1134 | * total_out is the last byte of the buffer | |
1135 | */ | |
14a3357b | 1136 | int btrfs_decompress_buf2page(const char *buf, unsigned long buf_start, |
3a39c18d | 1137 | unsigned long total_out, u64 disk_start, |
974b1adc | 1138 | struct bio *bio) |
3a39c18d LZ |
1139 | { |
1140 | unsigned long buf_offset; | |
1141 | unsigned long current_buf_start; | |
1142 | unsigned long start_byte; | |
6e78b3f7 | 1143 | unsigned long prev_start_byte; |
3a39c18d LZ |
1144 | unsigned long working_bytes = total_out - buf_start; |
1145 | unsigned long bytes; | |
1146 | char *kaddr; | |
974b1adc | 1147 | struct bio_vec bvec = bio_iter_iovec(bio, bio->bi_iter); |
3a39c18d LZ |
1148 | |
1149 | /* | |
1150 | * start byte is the first byte of the page we're currently | |
1151 | * copying into relative to the start of the compressed data. | |
1152 | */ | |
974b1adc | 1153 | start_byte = page_offset(bvec.bv_page) - disk_start; |
3a39c18d LZ |
1154 | |
1155 | /* we haven't yet hit data corresponding to this page */ | |
1156 | if (total_out <= start_byte) | |
1157 | return 1; | |
1158 | ||
1159 | /* | |
1160 | * the start of the data we care about is offset into | |
1161 | * the middle of our working buffer | |
1162 | */ | |
1163 | if (total_out > start_byte && buf_start < start_byte) { | |
1164 | buf_offset = start_byte - buf_start; | |
1165 | working_bytes -= buf_offset; | |
1166 | } else { | |
1167 | buf_offset = 0; | |
1168 | } | |
1169 | current_buf_start = buf_start; | |
1170 | ||
1171 | /* copy bytes from the working buffer into the pages */ | |
1172 | while (working_bytes > 0) { | |
974b1adc CH |
1173 | bytes = min_t(unsigned long, bvec.bv_len, |
1174 | PAGE_SIZE - buf_offset); | |
3a39c18d | 1175 | bytes = min(bytes, working_bytes); |
974b1adc CH |
1176 | |
1177 | kaddr = kmap_atomic(bvec.bv_page); | |
1178 | memcpy(kaddr + bvec.bv_offset, buf + buf_offset, bytes); | |
7ac687d9 | 1179 | kunmap_atomic(kaddr); |
974b1adc | 1180 | flush_dcache_page(bvec.bv_page); |
3a39c18d | 1181 | |
3a39c18d LZ |
1182 | buf_offset += bytes; |
1183 | working_bytes -= bytes; | |
1184 | current_buf_start += bytes; | |
1185 | ||
1186 | /* check if we need to pick another page */ | |
974b1adc CH |
1187 | bio_advance(bio, bytes); |
1188 | if (!bio->bi_iter.bi_size) | |
1189 | return 0; | |
1190 | bvec = bio_iter_iovec(bio, bio->bi_iter); | |
6e78b3f7 | 1191 | prev_start_byte = start_byte; |
974b1adc | 1192 | start_byte = page_offset(bvec.bv_page) - disk_start; |
3a39c18d | 1193 | |
974b1adc | 1194 | /* |
6e78b3f7 OS |
1195 | * We need to make sure we're only adjusting |
1196 | * our offset into compression working buffer when | |
1197 | * we're switching pages. Otherwise we can incorrectly | |
1198 | * keep copying when we were actually done. | |
974b1adc | 1199 | */ |
6e78b3f7 OS |
1200 | if (start_byte != prev_start_byte) { |
1201 | /* | |
1202 | * make sure our new page is covered by this | |
1203 | * working buffer | |
1204 | */ | |
1205 | if (total_out <= start_byte) | |
1206 | return 1; | |
3a39c18d | 1207 | |
6e78b3f7 OS |
1208 | /* |
1209 | * the next page in the biovec might not be adjacent | |
1210 | * to the last page, but it might still be found | |
1211 | * inside this working buffer. bump our offset pointer | |
1212 | */ | |
1213 | if (total_out > start_byte && | |
1214 | current_buf_start < start_byte) { | |
1215 | buf_offset = start_byte - buf_start; | |
1216 | working_bytes = total_out - start_byte; | |
1217 | current_buf_start = buf_start + buf_offset; | |
1218 | } | |
3a39c18d LZ |
1219 | } |
1220 | } | |
1221 | ||
1222 | return 1; | |
1223 | } | |
c2fcdcdf | 1224 | |
a440d48c TT |
1225 | static void heuristic_collect_sample(struct inode *inode, u64 start, u64 end, |
1226 | struct heuristic_ws *ws) | |
1227 | { | |
1228 | struct page *page; | |
1229 | u64 index, index_end; | |
1230 | u32 i, curr_sample_pos; | |
1231 | u8 *in_data; | |
1232 | ||
1233 | /* | |
1234 | * Compression handles the input data by chunks of 128KiB | |
1235 | * (defined by BTRFS_MAX_UNCOMPRESSED) | |
1236 | * | |
1237 | * We do the same for the heuristic and loop over the whole range. | |
1238 | * | |
1239 | * MAX_SAMPLE_SIZE - calculated under assumption that heuristic will | |
1240 | * process no more than BTRFS_MAX_UNCOMPRESSED at a time. | |
1241 | */ | |
1242 | if (end - start > BTRFS_MAX_UNCOMPRESSED) | |
1243 | end = start + BTRFS_MAX_UNCOMPRESSED; | |
1244 | ||
1245 | index = start >> PAGE_SHIFT; | |
1246 | index_end = end >> PAGE_SHIFT; | |
1247 | ||
1248 | /* Don't miss unaligned end */ | |
1249 | if (!IS_ALIGNED(end, PAGE_SIZE)) | |
1250 | index_end++; | |
1251 | ||
1252 | curr_sample_pos = 0; | |
1253 | while (index < index_end) { | |
1254 | page = find_get_page(inode->i_mapping, index); | |
1255 | in_data = kmap(page); | |
1256 | /* Handle case where the start is not aligned to PAGE_SIZE */ | |
1257 | i = start % PAGE_SIZE; | |
1258 | while (i < PAGE_SIZE - SAMPLING_READ_SIZE) { | |
1259 | /* Don't sample any garbage from the last page */ | |
1260 | if (start > end - SAMPLING_READ_SIZE) | |
1261 | break; | |
1262 | memcpy(&ws->sample[curr_sample_pos], &in_data[i], | |
1263 | SAMPLING_READ_SIZE); | |
1264 | i += SAMPLING_INTERVAL; | |
1265 | start += SAMPLING_INTERVAL; | |
1266 | curr_sample_pos += SAMPLING_READ_SIZE; | |
1267 | } | |
1268 | kunmap(page); | |
1269 | put_page(page); | |
1270 | ||
1271 | index++; | |
1272 | } | |
1273 | ||
1274 | ws->sample_size = curr_sample_pos; | |
1275 | } | |
1276 | ||
c2fcdcdf TT |
1277 | /* |
1278 | * Compression heuristic. | |
1279 | * | |
1280 | * For now is's a naive and optimistic 'return true', we'll extend the logic to | |
1281 | * quickly (compared to direct compression) detect data characteristics | |
1282 | * (compressible/uncompressible) to avoid wasting CPU time on uncompressible | |
1283 | * data. | |
1284 | * | |
1285 | * The following types of analysis can be performed: | |
1286 | * - detect mostly zero data | |
1287 | * - detect data with low "byte set" size (text, etc) | |
1288 | * - detect data with low/high "core byte" set | |
1289 | * | |
1290 | * Return non-zero if the compression should be done, 0 otherwise. | |
1291 | */ | |
1292 | int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end) | |
1293 | { | |
4e439a0b TT |
1294 | struct list_head *ws_list = __find_workspace(0, true); |
1295 | struct heuristic_ws *ws; | |
a440d48c TT |
1296 | u32 i; |
1297 | u8 byte; | |
c2fcdcdf TT |
1298 | int ret = 1; |
1299 | ||
4e439a0b TT |
1300 | ws = list_entry(ws_list, struct heuristic_ws, list); |
1301 | ||
a440d48c TT |
1302 | heuristic_collect_sample(inode, start, end, ws); |
1303 | ||
1304 | memset(ws->bucket, 0, sizeof(*ws->bucket)*BUCKET_SIZE); | |
1305 | ||
1306 | for (i = 0; i < ws->sample_size; i++) { | |
1307 | byte = ws->sample[i]; | |
1308 | ws->bucket[byte].count++; | |
c2fcdcdf TT |
1309 | } |
1310 | ||
4e439a0b TT |
1311 | __free_workspace(0, ws_list, true); |
1312 | ||
c2fcdcdf TT |
1313 | return ret; |
1314 | } | |
f51d2b59 DS |
1315 | |
1316 | unsigned int btrfs_compress_str2level(const char *str) | |
1317 | { | |
1318 | if (strncmp(str, "zlib", 4) != 0) | |
1319 | return 0; | |
1320 | ||
fa4d885a AB |
1321 | /* Accepted form: zlib:1 up to zlib:9 and nothing left after the number */ |
1322 | if (str[4] == ':' && '1' <= str[5] && str[5] <= '9' && str[6] == 0) | |
1323 | return str[5] - '0'; | |
f51d2b59 DS |
1324 | |
1325 | return 0; | |
1326 | } |