]> Git Repo - linux.git/blame - fs/btrfs/extent_io.c
Btrfs: use a btrfs bioset instead of abusing bio internals
[linux.git] / fs / btrfs / extent_io.c
CommitLineData
d1310b2e
CM
1#include <linux/bitops.h>
2#include <linux/slab.h>
3#include <linux/bio.h>
4#include <linux/mm.h>
d1310b2e
CM
5#include <linux/pagemap.h>
6#include <linux/page-flags.h>
d1310b2e
CM
7#include <linux/spinlock.h>
8#include <linux/blkdev.h>
9#include <linux/swap.h>
d1310b2e
CM
10#include <linux/writeback.h>
11#include <linux/pagevec.h>
268bb0ce 12#include <linux/prefetch.h>
90a887c9 13#include <linux/cleancache.h>
d1310b2e
CM
14#include "extent_io.h"
15#include "extent_map.h"
2db04966 16#include "compat.h"
902b22f3
DW
17#include "ctree.h"
18#include "btrfs_inode.h"
4a54c8c1 19#include "volumes.h"
21adbd5c 20#include "check-integrity.h"
0b32f4bb 21#include "locking.h"
606686ee 22#include "rcu-string.h"
d1310b2e 23
d1310b2e
CM
24static struct kmem_cache *extent_state_cache;
25static struct kmem_cache *extent_buffer_cache;
9be3395b 26static struct bio_set *btrfs_bioset;
d1310b2e 27
6d49ba1b 28#ifdef CONFIG_BTRFS_DEBUG
d1310b2e
CM
29static LIST_HEAD(buffers);
30static LIST_HEAD(states);
4bef0848 31
d397712b 32static DEFINE_SPINLOCK(leak_lock);
6d49ba1b
ES
33
34static inline
35void btrfs_leak_debug_add(struct list_head *new, struct list_head *head)
36{
37 unsigned long flags;
38
39 spin_lock_irqsave(&leak_lock, flags);
40 list_add(new, head);
41 spin_unlock_irqrestore(&leak_lock, flags);
42}
43
44static inline
45void btrfs_leak_debug_del(struct list_head *entry)
46{
47 unsigned long flags;
48
49 spin_lock_irqsave(&leak_lock, flags);
50 list_del(entry);
51 spin_unlock_irqrestore(&leak_lock, flags);
52}
53
54static inline
55void btrfs_leak_debug_check(void)
56{
57 struct extent_state *state;
58 struct extent_buffer *eb;
59
60 while (!list_empty(&states)) {
61 state = list_entry(states.next, struct extent_state, leak_list);
62 printk(KERN_ERR "btrfs state leak: start %llu end %llu "
63 "state %lu in tree %p refs %d\n",
64 (unsigned long long)state->start,
65 (unsigned long long)state->end,
66 state->state, state->tree, atomic_read(&state->refs));
67 list_del(&state->leak_list);
68 kmem_cache_free(extent_state_cache, state);
69 }
70
71 while (!list_empty(&buffers)) {
72 eb = list_entry(buffers.next, struct extent_buffer, leak_list);
73 printk(KERN_ERR "btrfs buffer leak start %llu len %lu "
74 "refs %d\n", (unsigned long long)eb->start,
75 eb->len, atomic_read(&eb->refs));
76 list_del(&eb->leak_list);
77 kmem_cache_free(extent_buffer_cache, eb);
78 }
79}
80#else
81#define btrfs_leak_debug_add(new, head) do {} while (0)
82#define btrfs_leak_debug_del(entry) do {} while (0)
83#define btrfs_leak_debug_check() do {} while (0)
4bef0848 84#endif
d1310b2e 85
d1310b2e
CM
86#define BUFFER_LRU_MAX 64
87
88struct tree_entry {
89 u64 start;
90 u64 end;
d1310b2e
CM
91 struct rb_node rb_node;
92};
93
94struct extent_page_data {
95 struct bio *bio;
96 struct extent_io_tree *tree;
97 get_extent_t *get_extent;
de0022b9 98 unsigned long bio_flags;
771ed689
CM
99
100 /* tells writepage not to lock the state bits for this range
101 * it still does the unlocking
102 */
ffbd517d
CM
103 unsigned int extent_locked:1;
104
105 /* tells the submit_bio code to use a WRITE_SYNC */
106 unsigned int sync_io:1;
d1310b2e
CM
107};
108
0b32f4bb 109static noinline void flush_write_bio(void *data);
c2d904e0
JM
110static inline struct btrfs_fs_info *
111tree_fs_info(struct extent_io_tree *tree)
112{
113 return btrfs_sb(tree->mapping->host->i_sb);
114}
0b32f4bb 115
d1310b2e
CM
116int __init extent_io_init(void)
117{
837e1972 118 extent_state_cache = kmem_cache_create("btrfs_extent_state",
9601e3f6
CH
119 sizeof(struct extent_state), 0,
120 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
d1310b2e
CM
121 if (!extent_state_cache)
122 return -ENOMEM;
123
837e1972 124 extent_buffer_cache = kmem_cache_create("btrfs_extent_buffer",
9601e3f6
CH
125 sizeof(struct extent_buffer), 0,
126 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
d1310b2e
CM
127 if (!extent_buffer_cache)
128 goto free_state_cache;
9be3395b
CM
129
130 btrfs_bioset = bioset_create(BIO_POOL_SIZE,
131 offsetof(struct btrfs_io_bio, bio));
132 if (!btrfs_bioset)
133 goto free_buffer_cache;
d1310b2e
CM
134 return 0;
135
9be3395b
CM
136free_buffer_cache:
137 kmem_cache_destroy(extent_buffer_cache);
138 extent_buffer_cache = NULL;
139
d1310b2e
CM
140free_state_cache:
141 kmem_cache_destroy(extent_state_cache);
9be3395b 142 extent_state_cache = NULL;
d1310b2e
CM
143 return -ENOMEM;
144}
145
146void extent_io_exit(void)
147{
6d49ba1b 148 btrfs_leak_debug_check();
8c0a8537
KS
149
150 /*
151 * Make sure all delayed rcu free are flushed before we
152 * destroy caches.
153 */
154 rcu_barrier();
d1310b2e
CM
155 if (extent_state_cache)
156 kmem_cache_destroy(extent_state_cache);
157 if (extent_buffer_cache)
158 kmem_cache_destroy(extent_buffer_cache);
9be3395b
CM
159 if (btrfs_bioset)
160 bioset_free(btrfs_bioset);
d1310b2e
CM
161}
162
163void extent_io_tree_init(struct extent_io_tree *tree,
f993c883 164 struct address_space *mapping)
d1310b2e 165{
6bef4d31 166 tree->state = RB_ROOT;
19fe0a8b 167 INIT_RADIX_TREE(&tree->buffer, GFP_ATOMIC);
d1310b2e
CM
168 tree->ops = NULL;
169 tree->dirty_bytes = 0;
70dec807 170 spin_lock_init(&tree->lock);
6af118ce 171 spin_lock_init(&tree->buffer_lock);
d1310b2e 172 tree->mapping = mapping;
d1310b2e 173}
d1310b2e 174
b2950863 175static struct extent_state *alloc_extent_state(gfp_t mask)
d1310b2e
CM
176{
177 struct extent_state *state;
d1310b2e
CM
178
179 state = kmem_cache_alloc(extent_state_cache, mask);
2b114d1d 180 if (!state)
d1310b2e
CM
181 return state;
182 state->state = 0;
d1310b2e 183 state->private = 0;
70dec807 184 state->tree = NULL;
6d49ba1b 185 btrfs_leak_debug_add(&state->leak_list, &states);
d1310b2e
CM
186 atomic_set(&state->refs, 1);
187 init_waitqueue_head(&state->wq);
143bede5 188 trace_alloc_extent_state(state, mask, _RET_IP_);
d1310b2e
CM
189 return state;
190}
d1310b2e 191
4845e44f 192void free_extent_state(struct extent_state *state)
d1310b2e 193{
d1310b2e
CM
194 if (!state)
195 return;
196 if (atomic_dec_and_test(&state->refs)) {
70dec807 197 WARN_ON(state->tree);
6d49ba1b 198 btrfs_leak_debug_del(&state->leak_list);
143bede5 199 trace_free_extent_state(state, _RET_IP_);
d1310b2e
CM
200 kmem_cache_free(extent_state_cache, state);
201 }
202}
d1310b2e
CM
203
204static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
205 struct rb_node *node)
206{
d397712b
CM
207 struct rb_node **p = &root->rb_node;
208 struct rb_node *parent = NULL;
d1310b2e
CM
209 struct tree_entry *entry;
210
d397712b 211 while (*p) {
d1310b2e
CM
212 parent = *p;
213 entry = rb_entry(parent, struct tree_entry, rb_node);
214
215 if (offset < entry->start)
216 p = &(*p)->rb_left;
217 else if (offset > entry->end)
218 p = &(*p)->rb_right;
219 else
220 return parent;
221 }
222
d1310b2e
CM
223 rb_link_node(node, parent, p);
224 rb_insert_color(node, root);
225 return NULL;
226}
227
80ea96b1 228static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
d1310b2e
CM
229 struct rb_node **prev_ret,
230 struct rb_node **next_ret)
231{
80ea96b1 232 struct rb_root *root = &tree->state;
d397712b 233 struct rb_node *n = root->rb_node;
d1310b2e
CM
234 struct rb_node *prev = NULL;
235 struct rb_node *orig_prev = NULL;
236 struct tree_entry *entry;
237 struct tree_entry *prev_entry = NULL;
238
d397712b 239 while (n) {
d1310b2e
CM
240 entry = rb_entry(n, struct tree_entry, rb_node);
241 prev = n;
242 prev_entry = entry;
243
244 if (offset < entry->start)
245 n = n->rb_left;
246 else if (offset > entry->end)
247 n = n->rb_right;
d397712b 248 else
d1310b2e
CM
249 return n;
250 }
251
252 if (prev_ret) {
253 orig_prev = prev;
d397712b 254 while (prev && offset > prev_entry->end) {
d1310b2e
CM
255 prev = rb_next(prev);
256 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
257 }
258 *prev_ret = prev;
259 prev = orig_prev;
260 }
261
262 if (next_ret) {
263 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
d397712b 264 while (prev && offset < prev_entry->start) {
d1310b2e
CM
265 prev = rb_prev(prev);
266 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
267 }
268 *next_ret = prev;
269 }
270 return NULL;
271}
272
80ea96b1
CM
273static inline struct rb_node *tree_search(struct extent_io_tree *tree,
274 u64 offset)
d1310b2e 275{
70dec807 276 struct rb_node *prev = NULL;
d1310b2e 277 struct rb_node *ret;
70dec807 278
80ea96b1 279 ret = __etree_search(tree, offset, &prev, NULL);
d397712b 280 if (!ret)
d1310b2e
CM
281 return prev;
282 return ret;
283}
284
9ed74f2d
JB
285static void merge_cb(struct extent_io_tree *tree, struct extent_state *new,
286 struct extent_state *other)
287{
288 if (tree->ops && tree->ops->merge_extent_hook)
289 tree->ops->merge_extent_hook(tree->mapping->host, new,
290 other);
291}
292
d1310b2e
CM
293/*
294 * utility function to look for merge candidates inside a given range.
295 * Any extents with matching state are merged together into a single
296 * extent in the tree. Extents with EXTENT_IO in their state field
297 * are not merged because the end_io handlers need to be able to do
298 * operations on them without sleeping (or doing allocations/splits).
299 *
300 * This should be called with the tree lock held.
301 */
1bf85046
JM
302static void merge_state(struct extent_io_tree *tree,
303 struct extent_state *state)
d1310b2e
CM
304{
305 struct extent_state *other;
306 struct rb_node *other_node;
307
5b21f2ed 308 if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY))
1bf85046 309 return;
d1310b2e
CM
310
311 other_node = rb_prev(&state->rb_node);
312 if (other_node) {
313 other = rb_entry(other_node, struct extent_state, rb_node);
314 if (other->end == state->start - 1 &&
315 other->state == state->state) {
9ed74f2d 316 merge_cb(tree, state, other);
d1310b2e 317 state->start = other->start;
70dec807 318 other->tree = NULL;
d1310b2e
CM
319 rb_erase(&other->rb_node, &tree->state);
320 free_extent_state(other);
321 }
322 }
323 other_node = rb_next(&state->rb_node);
324 if (other_node) {
325 other = rb_entry(other_node, struct extent_state, rb_node);
326 if (other->start == state->end + 1 &&
327 other->state == state->state) {
9ed74f2d 328 merge_cb(tree, state, other);
df98b6e2
JB
329 state->end = other->end;
330 other->tree = NULL;
331 rb_erase(&other->rb_node, &tree->state);
332 free_extent_state(other);
d1310b2e
CM
333 }
334 }
d1310b2e
CM
335}
336
1bf85046 337static void set_state_cb(struct extent_io_tree *tree,
41074888 338 struct extent_state *state, unsigned long *bits)
291d673e 339{
1bf85046
JM
340 if (tree->ops && tree->ops->set_bit_hook)
341 tree->ops->set_bit_hook(tree->mapping->host, state, bits);
291d673e
CM
342}
343
344static void clear_state_cb(struct extent_io_tree *tree,
41074888 345 struct extent_state *state, unsigned long *bits)
291d673e 346{
9ed74f2d
JB
347 if (tree->ops && tree->ops->clear_bit_hook)
348 tree->ops->clear_bit_hook(tree->mapping->host, state, bits);
291d673e
CM
349}
350
3150b699 351static void set_state_bits(struct extent_io_tree *tree,
41074888 352 struct extent_state *state, unsigned long *bits);
3150b699 353
d1310b2e
CM
354/*
355 * insert an extent_state struct into the tree. 'bits' are set on the
356 * struct before it is inserted.
357 *
358 * This may return -EEXIST if the extent is already there, in which case the
359 * state struct is freed.
360 *
361 * The tree lock is not taken internally. This is a utility function and
362 * probably isn't what you want to call (see set/clear_extent_bit).
363 */
364static int insert_state(struct extent_io_tree *tree,
365 struct extent_state *state, u64 start, u64 end,
41074888 366 unsigned long *bits)
d1310b2e
CM
367{
368 struct rb_node *node;
369
31b1a2bd
JL
370 if (end < start)
371 WARN(1, KERN_ERR "btrfs end < start %llu %llu\n",
d397712b
CM
372 (unsigned long long)end,
373 (unsigned long long)start);
d1310b2e
CM
374 state->start = start;
375 state->end = end;
9ed74f2d 376
3150b699
XG
377 set_state_bits(tree, state, bits);
378
d1310b2e
CM
379 node = tree_insert(&tree->state, end, &state->rb_node);
380 if (node) {
381 struct extent_state *found;
382 found = rb_entry(node, struct extent_state, rb_node);
d397712b
CM
383 printk(KERN_ERR "btrfs found node %llu %llu on insert of "
384 "%llu %llu\n", (unsigned long long)found->start,
385 (unsigned long long)found->end,
386 (unsigned long long)start, (unsigned long long)end);
d1310b2e
CM
387 return -EEXIST;
388 }
70dec807 389 state->tree = tree;
d1310b2e
CM
390 merge_state(tree, state);
391 return 0;
392}
393
1bf85046 394static void split_cb(struct extent_io_tree *tree, struct extent_state *orig,
9ed74f2d
JB
395 u64 split)
396{
397 if (tree->ops && tree->ops->split_extent_hook)
1bf85046 398 tree->ops->split_extent_hook(tree->mapping->host, orig, split);
9ed74f2d
JB
399}
400
d1310b2e
CM
401/*
402 * split a given extent state struct in two, inserting the preallocated
403 * struct 'prealloc' as the newly created second half. 'split' indicates an
404 * offset inside 'orig' where it should be split.
405 *
406 * Before calling,
407 * the tree has 'orig' at [orig->start, orig->end]. After calling, there
408 * are two extent state structs in the tree:
409 * prealloc: [orig->start, split - 1]
410 * orig: [ split, orig->end ]
411 *
412 * The tree locks are not taken by this function. They need to be held
413 * by the caller.
414 */
415static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
416 struct extent_state *prealloc, u64 split)
417{
418 struct rb_node *node;
9ed74f2d
JB
419
420 split_cb(tree, orig, split);
421
d1310b2e
CM
422 prealloc->start = orig->start;
423 prealloc->end = split - 1;
424 prealloc->state = orig->state;
425 orig->start = split;
426
427 node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node);
428 if (node) {
d1310b2e
CM
429 free_extent_state(prealloc);
430 return -EEXIST;
431 }
70dec807 432 prealloc->tree = tree;
d1310b2e
CM
433 return 0;
434}
435
cdc6a395
LZ
436static struct extent_state *next_state(struct extent_state *state)
437{
438 struct rb_node *next = rb_next(&state->rb_node);
439 if (next)
440 return rb_entry(next, struct extent_state, rb_node);
441 else
442 return NULL;
443}
444
d1310b2e
CM
445/*
446 * utility function to clear some bits in an extent state struct.
1b303fc0 447 * it will optionally wake up any one waiting on this state (wake == 1).
d1310b2e
CM
448 *
449 * If no bits are set on the state struct after clearing things, the
450 * struct is freed and removed from the tree
451 */
cdc6a395
LZ
452static struct extent_state *clear_state_bit(struct extent_io_tree *tree,
453 struct extent_state *state,
41074888 454 unsigned long *bits, int wake)
d1310b2e 455{
cdc6a395 456 struct extent_state *next;
41074888 457 unsigned long bits_to_clear = *bits & ~EXTENT_CTLBITS;
d1310b2e 458
0ca1f7ce 459 if ((bits_to_clear & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
d1310b2e
CM
460 u64 range = state->end - state->start + 1;
461 WARN_ON(range > tree->dirty_bytes);
462 tree->dirty_bytes -= range;
463 }
291d673e 464 clear_state_cb(tree, state, bits);
32c00aff 465 state->state &= ~bits_to_clear;
d1310b2e
CM
466 if (wake)
467 wake_up(&state->wq);
0ca1f7ce 468 if (state->state == 0) {
cdc6a395 469 next = next_state(state);
70dec807 470 if (state->tree) {
d1310b2e 471 rb_erase(&state->rb_node, &tree->state);
70dec807 472 state->tree = NULL;
d1310b2e
CM
473 free_extent_state(state);
474 } else {
475 WARN_ON(1);
476 }
477 } else {
478 merge_state(tree, state);
cdc6a395 479 next = next_state(state);
d1310b2e 480 }
cdc6a395 481 return next;
d1310b2e
CM
482}
483
8233767a
XG
484static struct extent_state *
485alloc_extent_state_atomic(struct extent_state *prealloc)
486{
487 if (!prealloc)
488 prealloc = alloc_extent_state(GFP_ATOMIC);
489
490 return prealloc;
491}
492
48a3b636 493static void extent_io_tree_panic(struct extent_io_tree *tree, int err)
c2d904e0
JM
494{
495 btrfs_panic(tree_fs_info(tree), err, "Locking error: "
496 "Extent tree was modified by another "
497 "thread while locked.");
498}
499
d1310b2e
CM
500/*
501 * clear some bits on a range in the tree. This may require splitting
502 * or inserting elements in the tree, so the gfp mask is used to
503 * indicate which allocations or sleeping are allowed.
504 *
505 * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
506 * the given range from the tree regardless of state (ie for truncate).
507 *
508 * the range [start, end] is inclusive.
509 *
6763af84 510 * This takes the tree lock, and returns 0 on success and < 0 on error.
d1310b2e
CM
511 */
512int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
41074888 513 unsigned long bits, int wake, int delete,
2c64c53d
CM
514 struct extent_state **cached_state,
515 gfp_t mask)
d1310b2e
CM
516{
517 struct extent_state *state;
2c64c53d 518 struct extent_state *cached;
d1310b2e
CM
519 struct extent_state *prealloc = NULL;
520 struct rb_node *node;
5c939df5 521 u64 last_end;
d1310b2e 522 int err;
2ac55d41 523 int clear = 0;
d1310b2e 524
0ca1f7ce
YZ
525 if (delete)
526 bits |= ~EXTENT_CTLBITS;
527 bits |= EXTENT_FIRST_DELALLOC;
528
2ac55d41
JB
529 if (bits & (EXTENT_IOBITS | EXTENT_BOUNDARY))
530 clear = 1;
d1310b2e
CM
531again:
532 if (!prealloc && (mask & __GFP_WAIT)) {
533 prealloc = alloc_extent_state(mask);
534 if (!prealloc)
535 return -ENOMEM;
536 }
537
cad321ad 538 spin_lock(&tree->lock);
2c64c53d
CM
539 if (cached_state) {
540 cached = *cached_state;
2ac55d41
JB
541
542 if (clear) {
543 *cached_state = NULL;
544 cached_state = NULL;
545 }
546
df98b6e2
JB
547 if (cached && cached->tree && cached->start <= start &&
548 cached->end > start) {
2ac55d41
JB
549 if (clear)
550 atomic_dec(&cached->refs);
2c64c53d 551 state = cached;
42daec29 552 goto hit_next;
2c64c53d 553 }
2ac55d41
JB
554 if (clear)
555 free_extent_state(cached);
2c64c53d 556 }
d1310b2e
CM
557 /*
558 * this search will find the extents that end after
559 * our range starts
560 */
80ea96b1 561 node = tree_search(tree, start);
d1310b2e
CM
562 if (!node)
563 goto out;
564 state = rb_entry(node, struct extent_state, rb_node);
2c64c53d 565hit_next:
d1310b2e
CM
566 if (state->start > end)
567 goto out;
568 WARN_ON(state->end < start);
5c939df5 569 last_end = state->end;
d1310b2e 570
0449314a 571 /* the state doesn't have the wanted bits, go ahead */
cdc6a395
LZ
572 if (!(state->state & bits)) {
573 state = next_state(state);
0449314a 574 goto next;
cdc6a395 575 }
0449314a 576
d1310b2e
CM
577 /*
578 * | ---- desired range ---- |
579 * | state | or
580 * | ------------- state -------------- |
581 *
582 * We need to split the extent we found, and may flip
583 * bits on second half.
584 *
585 * If the extent we found extends past our range, we
586 * just split and search again. It'll get split again
587 * the next time though.
588 *
589 * If the extent we found is inside our range, we clear
590 * the desired bit on it.
591 */
592
593 if (state->start < start) {
8233767a
XG
594 prealloc = alloc_extent_state_atomic(prealloc);
595 BUG_ON(!prealloc);
d1310b2e 596 err = split_state(tree, state, prealloc, start);
c2d904e0
JM
597 if (err)
598 extent_io_tree_panic(tree, err);
599
d1310b2e
CM
600 prealloc = NULL;
601 if (err)
602 goto out;
603 if (state->end <= end) {
d1ac6e41
LB
604 state = clear_state_bit(tree, state, &bits, wake);
605 goto next;
d1310b2e
CM
606 }
607 goto search_again;
608 }
609 /*
610 * | ---- desired range ---- |
611 * | state |
612 * We need to split the extent, and clear the bit
613 * on the first half
614 */
615 if (state->start <= end && state->end > end) {
8233767a
XG
616 prealloc = alloc_extent_state_atomic(prealloc);
617 BUG_ON(!prealloc);
d1310b2e 618 err = split_state(tree, state, prealloc, end + 1);
c2d904e0
JM
619 if (err)
620 extent_io_tree_panic(tree, err);
621
d1310b2e
CM
622 if (wake)
623 wake_up(&state->wq);
42daec29 624
6763af84 625 clear_state_bit(tree, prealloc, &bits, wake);
9ed74f2d 626
d1310b2e
CM
627 prealloc = NULL;
628 goto out;
629 }
42daec29 630
cdc6a395 631 state = clear_state_bit(tree, state, &bits, wake);
0449314a 632next:
5c939df5
YZ
633 if (last_end == (u64)-1)
634 goto out;
635 start = last_end + 1;
cdc6a395 636 if (start <= end && state && !need_resched())
692e5759 637 goto hit_next;
d1310b2e
CM
638 goto search_again;
639
640out:
cad321ad 641 spin_unlock(&tree->lock);
d1310b2e
CM
642 if (prealloc)
643 free_extent_state(prealloc);
644
6763af84 645 return 0;
d1310b2e
CM
646
647search_again:
648 if (start > end)
649 goto out;
cad321ad 650 spin_unlock(&tree->lock);
d1310b2e
CM
651 if (mask & __GFP_WAIT)
652 cond_resched();
653 goto again;
654}
d1310b2e 655
143bede5
JM
656static void wait_on_state(struct extent_io_tree *tree,
657 struct extent_state *state)
641f5219
CH
658 __releases(tree->lock)
659 __acquires(tree->lock)
d1310b2e
CM
660{
661 DEFINE_WAIT(wait);
662 prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
cad321ad 663 spin_unlock(&tree->lock);
d1310b2e 664 schedule();
cad321ad 665 spin_lock(&tree->lock);
d1310b2e 666 finish_wait(&state->wq, &wait);
d1310b2e
CM
667}
668
669/*
670 * waits for one or more bits to clear on a range in the state tree.
671 * The range [start, end] is inclusive.
672 * The tree lock is taken by this function
673 */
41074888
DS
674static void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
675 unsigned long bits)
d1310b2e
CM
676{
677 struct extent_state *state;
678 struct rb_node *node;
679
cad321ad 680 spin_lock(&tree->lock);
d1310b2e
CM
681again:
682 while (1) {
683 /*
684 * this search will find all the extents that end after
685 * our range starts
686 */
80ea96b1 687 node = tree_search(tree, start);
d1310b2e
CM
688 if (!node)
689 break;
690
691 state = rb_entry(node, struct extent_state, rb_node);
692
693 if (state->start > end)
694 goto out;
695
696 if (state->state & bits) {
697 start = state->start;
698 atomic_inc(&state->refs);
699 wait_on_state(tree, state);
700 free_extent_state(state);
701 goto again;
702 }
703 start = state->end + 1;
704
705 if (start > end)
706 break;
707
ded91f08 708 cond_resched_lock(&tree->lock);
d1310b2e
CM
709 }
710out:
cad321ad 711 spin_unlock(&tree->lock);
d1310b2e 712}
d1310b2e 713
1bf85046 714static void set_state_bits(struct extent_io_tree *tree,
d1310b2e 715 struct extent_state *state,
41074888 716 unsigned long *bits)
d1310b2e 717{
41074888 718 unsigned long bits_to_set = *bits & ~EXTENT_CTLBITS;
9ed74f2d 719
1bf85046 720 set_state_cb(tree, state, bits);
0ca1f7ce 721 if ((bits_to_set & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
d1310b2e
CM
722 u64 range = state->end - state->start + 1;
723 tree->dirty_bytes += range;
724 }
0ca1f7ce 725 state->state |= bits_to_set;
d1310b2e
CM
726}
727
2c64c53d
CM
728static void cache_state(struct extent_state *state,
729 struct extent_state **cached_ptr)
730{
731 if (cached_ptr && !(*cached_ptr)) {
732 if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY)) {
733 *cached_ptr = state;
734 atomic_inc(&state->refs);
735 }
736 }
737}
738
507903b8
AJ
739static void uncache_state(struct extent_state **cached_ptr)
740{
741 if (cached_ptr && (*cached_ptr)) {
742 struct extent_state *state = *cached_ptr;
109b36a2
CM
743 *cached_ptr = NULL;
744 free_extent_state(state);
507903b8
AJ
745 }
746}
747
d1310b2e 748/*
1edbb734
CM
749 * set some bits on a range in the tree. This may require allocations or
750 * sleeping, so the gfp mask is used to indicate what is allowed.
d1310b2e 751 *
1edbb734
CM
752 * If any of the exclusive bits are set, this will fail with -EEXIST if some
753 * part of the range already has the desired bits set. The start of the
754 * existing range is returned in failed_start in this case.
d1310b2e 755 *
1edbb734 756 * [start, end] is inclusive This takes the tree lock.
d1310b2e 757 */
1edbb734 758
3fbe5c02
JM
759static int __must_check
760__set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
41074888
DS
761 unsigned long bits, unsigned long exclusive_bits,
762 u64 *failed_start, struct extent_state **cached_state,
763 gfp_t mask)
d1310b2e
CM
764{
765 struct extent_state *state;
766 struct extent_state *prealloc = NULL;
767 struct rb_node *node;
d1310b2e 768 int err = 0;
d1310b2e
CM
769 u64 last_start;
770 u64 last_end;
42daec29 771
0ca1f7ce 772 bits |= EXTENT_FIRST_DELALLOC;
d1310b2e
CM
773again:
774 if (!prealloc && (mask & __GFP_WAIT)) {
775 prealloc = alloc_extent_state(mask);
8233767a 776 BUG_ON(!prealloc);
d1310b2e
CM
777 }
778
cad321ad 779 spin_lock(&tree->lock);
9655d298
CM
780 if (cached_state && *cached_state) {
781 state = *cached_state;
df98b6e2
JB
782 if (state->start <= start && state->end > start &&
783 state->tree) {
9655d298
CM
784 node = &state->rb_node;
785 goto hit_next;
786 }
787 }
d1310b2e
CM
788 /*
789 * this search will find all the extents that end after
790 * our range starts.
791 */
80ea96b1 792 node = tree_search(tree, start);
d1310b2e 793 if (!node) {
8233767a
XG
794 prealloc = alloc_extent_state_atomic(prealloc);
795 BUG_ON(!prealloc);
0ca1f7ce 796 err = insert_state(tree, prealloc, start, end, &bits);
c2d904e0
JM
797 if (err)
798 extent_io_tree_panic(tree, err);
799
d1310b2e 800 prealloc = NULL;
d1310b2e
CM
801 goto out;
802 }
d1310b2e 803 state = rb_entry(node, struct extent_state, rb_node);
40431d6c 804hit_next:
d1310b2e
CM
805 last_start = state->start;
806 last_end = state->end;
807
808 /*
809 * | ---- desired range ---- |
810 * | state |
811 *
812 * Just lock what we found and keep going
813 */
814 if (state->start == start && state->end <= end) {
1edbb734 815 if (state->state & exclusive_bits) {
d1310b2e
CM
816 *failed_start = state->start;
817 err = -EEXIST;
818 goto out;
819 }
42daec29 820
1bf85046 821 set_state_bits(tree, state, &bits);
2c64c53d 822 cache_state(state, cached_state);
d1310b2e 823 merge_state(tree, state);
5c939df5
YZ
824 if (last_end == (u64)-1)
825 goto out;
826 start = last_end + 1;
d1ac6e41
LB
827 state = next_state(state);
828 if (start < end && state && state->start == start &&
829 !need_resched())
830 goto hit_next;
d1310b2e
CM
831 goto search_again;
832 }
833
834 /*
835 * | ---- desired range ---- |
836 * | state |
837 * or
838 * | ------------- state -------------- |
839 *
840 * We need to split the extent we found, and may flip bits on
841 * second half.
842 *
843 * If the extent we found extends past our
844 * range, we just split and search again. It'll get split
845 * again the next time though.
846 *
847 * If the extent we found is inside our range, we set the
848 * desired bit on it.
849 */
850 if (state->start < start) {
1edbb734 851 if (state->state & exclusive_bits) {
d1310b2e
CM
852 *failed_start = start;
853 err = -EEXIST;
854 goto out;
855 }
8233767a
XG
856
857 prealloc = alloc_extent_state_atomic(prealloc);
858 BUG_ON(!prealloc);
d1310b2e 859 err = split_state(tree, state, prealloc, start);
c2d904e0
JM
860 if (err)
861 extent_io_tree_panic(tree, err);
862
d1310b2e
CM
863 prealloc = NULL;
864 if (err)
865 goto out;
866 if (state->end <= end) {
1bf85046 867 set_state_bits(tree, state, &bits);
2c64c53d 868 cache_state(state, cached_state);
d1310b2e 869 merge_state(tree, state);
5c939df5
YZ
870 if (last_end == (u64)-1)
871 goto out;
872 start = last_end + 1;
d1ac6e41
LB
873 state = next_state(state);
874 if (start < end && state && state->start == start &&
875 !need_resched())
876 goto hit_next;
d1310b2e
CM
877 }
878 goto search_again;
879 }
880 /*
881 * | ---- desired range ---- |
882 * | state | or | state |
883 *
884 * There's a hole, we need to insert something in it and
885 * ignore the extent we found.
886 */
887 if (state->start > start) {
888 u64 this_end;
889 if (end < last_start)
890 this_end = end;
891 else
d397712b 892 this_end = last_start - 1;
8233767a
XG
893
894 prealloc = alloc_extent_state_atomic(prealloc);
895 BUG_ON(!prealloc);
c7f895a2
XG
896
897 /*
898 * Avoid to free 'prealloc' if it can be merged with
899 * the later extent.
900 */
d1310b2e 901 err = insert_state(tree, prealloc, start, this_end,
0ca1f7ce 902 &bits);
c2d904e0
JM
903 if (err)
904 extent_io_tree_panic(tree, err);
905
9ed74f2d
JB
906 cache_state(prealloc, cached_state);
907 prealloc = NULL;
d1310b2e
CM
908 start = this_end + 1;
909 goto search_again;
910 }
911 /*
912 * | ---- desired range ---- |
913 * | state |
914 * We need to split the extent, and set the bit
915 * on the first half
916 */
917 if (state->start <= end && state->end > end) {
1edbb734 918 if (state->state & exclusive_bits) {
d1310b2e
CM
919 *failed_start = start;
920 err = -EEXIST;
921 goto out;
922 }
8233767a
XG
923
924 prealloc = alloc_extent_state_atomic(prealloc);
925 BUG_ON(!prealloc);
d1310b2e 926 err = split_state(tree, state, prealloc, end + 1);
c2d904e0
JM
927 if (err)
928 extent_io_tree_panic(tree, err);
d1310b2e 929
1bf85046 930 set_state_bits(tree, prealloc, &bits);
2c64c53d 931 cache_state(prealloc, cached_state);
d1310b2e
CM
932 merge_state(tree, prealloc);
933 prealloc = NULL;
934 goto out;
935 }
936
937 goto search_again;
938
939out:
cad321ad 940 spin_unlock(&tree->lock);
d1310b2e
CM
941 if (prealloc)
942 free_extent_state(prealloc);
943
944 return err;
945
946search_again:
947 if (start > end)
948 goto out;
cad321ad 949 spin_unlock(&tree->lock);
d1310b2e
CM
950 if (mask & __GFP_WAIT)
951 cond_resched();
952 goto again;
953}
d1310b2e 954
41074888
DS
955int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
956 unsigned long bits, u64 * failed_start,
957 struct extent_state **cached_state, gfp_t mask)
3fbe5c02
JM
958{
959 return __set_extent_bit(tree, start, end, bits, 0, failed_start,
960 cached_state, mask);
961}
962
963
462d6fac 964/**
10983f2e
LB
965 * convert_extent_bit - convert all bits in a given range from one bit to
966 * another
462d6fac
JB
967 * @tree: the io tree to search
968 * @start: the start offset in bytes
969 * @end: the end offset in bytes (inclusive)
970 * @bits: the bits to set in this range
971 * @clear_bits: the bits to clear in this range
e6138876 972 * @cached_state: state that we're going to cache
462d6fac
JB
973 * @mask: the allocation mask
974 *
975 * This will go through and set bits for the given range. If any states exist
976 * already in this range they are set with the given bit and cleared of the
977 * clear_bits. This is only meant to be used by things that are mergeable, ie
978 * converting from say DELALLOC to DIRTY. This is not meant to be used with
979 * boundary bits like LOCK.
980 */
981int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
41074888 982 unsigned long bits, unsigned long clear_bits,
e6138876 983 struct extent_state **cached_state, gfp_t mask)
462d6fac
JB
984{
985 struct extent_state *state;
986 struct extent_state *prealloc = NULL;
987 struct rb_node *node;
988 int err = 0;
989 u64 last_start;
990 u64 last_end;
991
992again:
993 if (!prealloc && (mask & __GFP_WAIT)) {
994 prealloc = alloc_extent_state(mask);
995 if (!prealloc)
996 return -ENOMEM;
997 }
998
999 spin_lock(&tree->lock);
e6138876
JB
1000 if (cached_state && *cached_state) {
1001 state = *cached_state;
1002 if (state->start <= start && state->end > start &&
1003 state->tree) {
1004 node = &state->rb_node;
1005 goto hit_next;
1006 }
1007 }
1008
462d6fac
JB
1009 /*
1010 * this search will find all the extents that end after
1011 * our range starts.
1012 */
1013 node = tree_search(tree, start);
1014 if (!node) {
1015 prealloc = alloc_extent_state_atomic(prealloc);
1cf4ffdb
LB
1016 if (!prealloc) {
1017 err = -ENOMEM;
1018 goto out;
1019 }
462d6fac
JB
1020 err = insert_state(tree, prealloc, start, end, &bits);
1021 prealloc = NULL;
c2d904e0
JM
1022 if (err)
1023 extent_io_tree_panic(tree, err);
462d6fac
JB
1024 goto out;
1025 }
1026 state = rb_entry(node, struct extent_state, rb_node);
1027hit_next:
1028 last_start = state->start;
1029 last_end = state->end;
1030
1031 /*
1032 * | ---- desired range ---- |
1033 * | state |
1034 *
1035 * Just lock what we found and keep going
1036 */
1037 if (state->start == start && state->end <= end) {
462d6fac 1038 set_state_bits(tree, state, &bits);
e6138876 1039 cache_state(state, cached_state);
d1ac6e41 1040 state = clear_state_bit(tree, state, &clear_bits, 0);
462d6fac
JB
1041 if (last_end == (u64)-1)
1042 goto out;
462d6fac 1043 start = last_end + 1;
d1ac6e41
LB
1044 if (start < end && state && state->start == start &&
1045 !need_resched())
1046 goto hit_next;
462d6fac
JB
1047 goto search_again;
1048 }
1049
1050 /*
1051 * | ---- desired range ---- |
1052 * | state |
1053 * or
1054 * | ------------- state -------------- |
1055 *
1056 * We need to split the extent we found, and may flip bits on
1057 * second half.
1058 *
1059 * If the extent we found extends past our
1060 * range, we just split and search again. It'll get split
1061 * again the next time though.
1062 *
1063 * If the extent we found is inside our range, we set the
1064 * desired bit on it.
1065 */
1066 if (state->start < start) {
1067 prealloc = alloc_extent_state_atomic(prealloc);
1cf4ffdb
LB
1068 if (!prealloc) {
1069 err = -ENOMEM;
1070 goto out;
1071 }
462d6fac 1072 err = split_state(tree, state, prealloc, start);
c2d904e0
JM
1073 if (err)
1074 extent_io_tree_panic(tree, err);
462d6fac
JB
1075 prealloc = NULL;
1076 if (err)
1077 goto out;
1078 if (state->end <= end) {
1079 set_state_bits(tree, state, &bits);
e6138876 1080 cache_state(state, cached_state);
d1ac6e41 1081 state = clear_state_bit(tree, state, &clear_bits, 0);
462d6fac
JB
1082 if (last_end == (u64)-1)
1083 goto out;
1084 start = last_end + 1;
d1ac6e41
LB
1085 if (start < end && state && state->start == start &&
1086 !need_resched())
1087 goto hit_next;
462d6fac
JB
1088 }
1089 goto search_again;
1090 }
1091 /*
1092 * | ---- desired range ---- |
1093 * | state | or | state |
1094 *
1095 * There's a hole, we need to insert something in it and
1096 * ignore the extent we found.
1097 */
1098 if (state->start > start) {
1099 u64 this_end;
1100 if (end < last_start)
1101 this_end = end;
1102 else
1103 this_end = last_start - 1;
1104
1105 prealloc = alloc_extent_state_atomic(prealloc);
1cf4ffdb
LB
1106 if (!prealloc) {
1107 err = -ENOMEM;
1108 goto out;
1109 }
462d6fac
JB
1110
1111 /*
1112 * Avoid to free 'prealloc' if it can be merged with
1113 * the later extent.
1114 */
1115 err = insert_state(tree, prealloc, start, this_end,
1116 &bits);
c2d904e0
JM
1117 if (err)
1118 extent_io_tree_panic(tree, err);
e6138876 1119 cache_state(prealloc, cached_state);
462d6fac
JB
1120 prealloc = NULL;
1121 start = this_end + 1;
1122 goto search_again;
1123 }
1124 /*
1125 * | ---- desired range ---- |
1126 * | state |
1127 * We need to split the extent, and set the bit
1128 * on the first half
1129 */
1130 if (state->start <= end && state->end > end) {
1131 prealloc = alloc_extent_state_atomic(prealloc);
1cf4ffdb
LB
1132 if (!prealloc) {
1133 err = -ENOMEM;
1134 goto out;
1135 }
462d6fac
JB
1136
1137 err = split_state(tree, state, prealloc, end + 1);
c2d904e0
JM
1138 if (err)
1139 extent_io_tree_panic(tree, err);
462d6fac
JB
1140
1141 set_state_bits(tree, prealloc, &bits);
e6138876 1142 cache_state(prealloc, cached_state);
462d6fac 1143 clear_state_bit(tree, prealloc, &clear_bits, 0);
462d6fac
JB
1144 prealloc = NULL;
1145 goto out;
1146 }
1147
1148 goto search_again;
1149
1150out:
1151 spin_unlock(&tree->lock);
1152 if (prealloc)
1153 free_extent_state(prealloc);
1154
1155 return err;
1156
1157search_again:
1158 if (start > end)
1159 goto out;
1160 spin_unlock(&tree->lock);
1161 if (mask & __GFP_WAIT)
1162 cond_resched();
1163 goto again;
1164}
1165
d1310b2e
CM
1166/* wrappers around set/clear extent bit */
1167int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
1168 gfp_t mask)
1169{
3fbe5c02 1170 return set_extent_bit(tree, start, end, EXTENT_DIRTY, NULL,
2c64c53d 1171 NULL, mask);
d1310b2e 1172}
d1310b2e
CM
1173
1174int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
41074888 1175 unsigned long bits, gfp_t mask)
d1310b2e 1176{
3fbe5c02 1177 return set_extent_bit(tree, start, end, bits, NULL,
2c64c53d 1178 NULL, mask);
d1310b2e 1179}
d1310b2e
CM
1180
1181int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
41074888 1182 unsigned long bits, gfp_t mask)
d1310b2e 1183{
2c64c53d 1184 return clear_extent_bit(tree, start, end, bits, 0, 0, NULL, mask);
d1310b2e 1185}
d1310b2e
CM
1186
1187int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
2ac55d41 1188 struct extent_state **cached_state, gfp_t mask)
d1310b2e
CM
1189{
1190 return set_extent_bit(tree, start, end,
fee187d9 1191 EXTENT_DELALLOC | EXTENT_UPTODATE,
3fbe5c02 1192 NULL, cached_state, mask);
d1310b2e 1193}
d1310b2e 1194
9e8a4a8b
LB
1195int set_extent_defrag(struct extent_io_tree *tree, u64 start, u64 end,
1196 struct extent_state **cached_state, gfp_t mask)
1197{
1198 return set_extent_bit(tree, start, end,
1199 EXTENT_DELALLOC | EXTENT_UPTODATE | EXTENT_DEFRAG,
1200 NULL, cached_state, mask);
1201}
1202
d1310b2e
CM
1203int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
1204 gfp_t mask)
1205{
1206 return clear_extent_bit(tree, start, end,
32c00aff 1207 EXTENT_DIRTY | EXTENT_DELALLOC |
0ca1f7ce 1208 EXTENT_DO_ACCOUNTING, 0, 0, NULL, mask);
d1310b2e 1209}
d1310b2e
CM
1210
1211int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
1212 gfp_t mask)
1213{
3fbe5c02 1214 return set_extent_bit(tree, start, end, EXTENT_NEW, NULL,
2c64c53d 1215 NULL, mask);
d1310b2e 1216}
d1310b2e 1217
d1310b2e 1218int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
507903b8 1219 struct extent_state **cached_state, gfp_t mask)
d1310b2e 1220{
6b67a320 1221 return set_extent_bit(tree, start, end, EXTENT_UPTODATE, NULL,
3fbe5c02 1222 cached_state, mask);
d1310b2e 1223}
d1310b2e 1224
5fd02043
JB
1225int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
1226 struct extent_state **cached_state, gfp_t mask)
d1310b2e 1227{
2c64c53d 1228 return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0,
2ac55d41 1229 cached_state, mask);
d1310b2e 1230}
d1310b2e 1231
d352ac68
CM
1232/*
1233 * either insert or lock state struct between start and end use mask to tell
1234 * us if waiting is desired.
1235 */
1edbb734 1236int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
41074888 1237 unsigned long bits, struct extent_state **cached_state)
d1310b2e
CM
1238{
1239 int err;
1240 u64 failed_start;
1241 while (1) {
3fbe5c02
JM
1242 err = __set_extent_bit(tree, start, end, EXTENT_LOCKED | bits,
1243 EXTENT_LOCKED, &failed_start,
1244 cached_state, GFP_NOFS);
d0082371 1245 if (err == -EEXIST) {
d1310b2e
CM
1246 wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
1247 start = failed_start;
d0082371 1248 } else
d1310b2e 1249 break;
d1310b2e
CM
1250 WARN_ON(start > end);
1251 }
1252 return err;
1253}
d1310b2e 1254
d0082371 1255int lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
1edbb734 1256{
d0082371 1257 return lock_extent_bits(tree, start, end, 0, NULL);
1edbb734
CM
1258}
1259
d0082371 1260int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
25179201
JB
1261{
1262 int err;
1263 u64 failed_start;
1264
3fbe5c02
JM
1265 err = __set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED,
1266 &failed_start, NULL, GFP_NOFS);
6643558d
YZ
1267 if (err == -EEXIST) {
1268 if (failed_start > start)
1269 clear_extent_bit(tree, start, failed_start - 1,
d0082371 1270 EXTENT_LOCKED, 1, 0, NULL, GFP_NOFS);
25179201 1271 return 0;
6643558d 1272 }
25179201
JB
1273 return 1;
1274}
25179201 1275
2c64c53d
CM
1276int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end,
1277 struct extent_state **cached, gfp_t mask)
1278{
1279 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
1280 mask);
1281}
1282
d0082371 1283int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end)
d1310b2e 1284{
2c64c53d 1285 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL,
d0082371 1286 GFP_NOFS);
d1310b2e 1287}
d1310b2e 1288
4adaa611
CM
1289int extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end)
1290{
1291 unsigned long index = start >> PAGE_CACHE_SHIFT;
1292 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1293 struct page *page;
1294
1295 while (index <= end_index) {
1296 page = find_get_page(inode->i_mapping, index);
1297 BUG_ON(!page); /* Pages should be in the extent_io_tree */
1298 clear_page_dirty_for_io(page);
1299 page_cache_release(page);
1300 index++;
1301 }
1302 return 0;
1303}
1304
1305int extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end)
1306{
1307 unsigned long index = start >> PAGE_CACHE_SHIFT;
1308 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1309 struct page *page;
1310
1311 while (index <= end_index) {
1312 page = find_get_page(inode->i_mapping, index);
1313 BUG_ON(!page); /* Pages should be in the extent_io_tree */
1314 account_page_redirty(page);
1315 __set_page_dirty_nobuffers(page);
1316 page_cache_release(page);
1317 index++;
1318 }
1319 return 0;
1320}
1321
d1310b2e
CM
1322/*
1323 * helper function to set both pages and extents in the tree writeback
1324 */
b2950863 1325static int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
d1310b2e
CM
1326{
1327 unsigned long index = start >> PAGE_CACHE_SHIFT;
1328 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1329 struct page *page;
1330
1331 while (index <= end_index) {
1332 page = find_get_page(tree->mapping, index);
79787eaa 1333 BUG_ON(!page); /* Pages should be in the extent_io_tree */
d1310b2e
CM
1334 set_page_writeback(page);
1335 page_cache_release(page);
1336 index++;
1337 }
d1310b2e
CM
1338 return 0;
1339}
d1310b2e 1340
d352ac68
CM
1341/* find the first state struct with 'bits' set after 'start', and
1342 * return it. tree->lock must be held. NULL will returned if
1343 * nothing was found after 'start'
1344 */
48a3b636
ES
1345static struct extent_state *
1346find_first_extent_bit_state(struct extent_io_tree *tree,
41074888 1347 u64 start, unsigned long bits)
d7fc640e
CM
1348{
1349 struct rb_node *node;
1350 struct extent_state *state;
1351
1352 /*
1353 * this search will find all the extents that end after
1354 * our range starts.
1355 */
1356 node = tree_search(tree, start);
d397712b 1357 if (!node)
d7fc640e 1358 goto out;
d7fc640e 1359
d397712b 1360 while (1) {
d7fc640e 1361 state = rb_entry(node, struct extent_state, rb_node);
d397712b 1362 if (state->end >= start && (state->state & bits))
d7fc640e 1363 return state;
d397712b 1364
d7fc640e
CM
1365 node = rb_next(node);
1366 if (!node)
1367 break;
1368 }
1369out:
1370 return NULL;
1371}
d7fc640e 1372
69261c4b
XG
1373/*
1374 * find the first offset in the io tree with 'bits' set. zero is
1375 * returned if we find something, and *start_ret and *end_ret are
1376 * set to reflect the state struct that was found.
1377 *
477d7eaf 1378 * If nothing was found, 1 is returned. If found something, return 0.
69261c4b
XG
1379 */
1380int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
41074888 1381 u64 *start_ret, u64 *end_ret, unsigned long bits,
e6138876 1382 struct extent_state **cached_state)
69261c4b
XG
1383{
1384 struct extent_state *state;
e6138876 1385 struct rb_node *n;
69261c4b
XG
1386 int ret = 1;
1387
1388 spin_lock(&tree->lock);
e6138876
JB
1389 if (cached_state && *cached_state) {
1390 state = *cached_state;
1391 if (state->end == start - 1 && state->tree) {
1392 n = rb_next(&state->rb_node);
1393 while (n) {
1394 state = rb_entry(n, struct extent_state,
1395 rb_node);
1396 if (state->state & bits)
1397 goto got_it;
1398 n = rb_next(n);
1399 }
1400 free_extent_state(*cached_state);
1401 *cached_state = NULL;
1402 goto out;
1403 }
1404 free_extent_state(*cached_state);
1405 *cached_state = NULL;
1406 }
1407
69261c4b 1408 state = find_first_extent_bit_state(tree, start, bits);
e6138876 1409got_it:
69261c4b 1410 if (state) {
e6138876 1411 cache_state(state, cached_state);
69261c4b
XG
1412 *start_ret = state->start;
1413 *end_ret = state->end;
1414 ret = 0;
1415 }
e6138876 1416out:
69261c4b
XG
1417 spin_unlock(&tree->lock);
1418 return ret;
1419}
1420
d352ac68
CM
1421/*
1422 * find a contiguous range of bytes in the file marked as delalloc, not
1423 * more than 'max_bytes'. start and end are used to return the range,
1424 *
1425 * 1 is returned if we find something, 0 if nothing was in the tree
1426 */
c8b97818 1427static noinline u64 find_delalloc_range(struct extent_io_tree *tree,
c2a128d2
JB
1428 u64 *start, u64 *end, u64 max_bytes,
1429 struct extent_state **cached_state)
d1310b2e
CM
1430{
1431 struct rb_node *node;
1432 struct extent_state *state;
1433 u64 cur_start = *start;
1434 u64 found = 0;
1435 u64 total_bytes = 0;
1436
cad321ad 1437 spin_lock(&tree->lock);
c8b97818 1438
d1310b2e
CM
1439 /*
1440 * this search will find all the extents that end after
1441 * our range starts.
1442 */
80ea96b1 1443 node = tree_search(tree, cur_start);
2b114d1d 1444 if (!node) {
3b951516
CM
1445 if (!found)
1446 *end = (u64)-1;
d1310b2e
CM
1447 goto out;
1448 }
1449
d397712b 1450 while (1) {
d1310b2e 1451 state = rb_entry(node, struct extent_state, rb_node);
5b21f2ed
ZY
1452 if (found && (state->start != cur_start ||
1453 (state->state & EXTENT_BOUNDARY))) {
d1310b2e
CM
1454 goto out;
1455 }
1456 if (!(state->state & EXTENT_DELALLOC)) {
1457 if (!found)
1458 *end = state->end;
1459 goto out;
1460 }
c2a128d2 1461 if (!found) {
d1310b2e 1462 *start = state->start;
c2a128d2
JB
1463 *cached_state = state;
1464 atomic_inc(&state->refs);
1465 }
d1310b2e
CM
1466 found++;
1467 *end = state->end;
1468 cur_start = state->end + 1;
1469 node = rb_next(node);
1470 if (!node)
1471 break;
1472 total_bytes += state->end - state->start + 1;
1473 if (total_bytes >= max_bytes)
1474 break;
1475 }
1476out:
cad321ad 1477 spin_unlock(&tree->lock);
d1310b2e
CM
1478 return found;
1479}
1480
143bede5
JM
1481static noinline void __unlock_for_delalloc(struct inode *inode,
1482 struct page *locked_page,
1483 u64 start, u64 end)
c8b97818
CM
1484{
1485 int ret;
1486 struct page *pages[16];
1487 unsigned long index = start >> PAGE_CACHE_SHIFT;
1488 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1489 unsigned long nr_pages = end_index - index + 1;
1490 int i;
1491
1492 if (index == locked_page->index && end_index == index)
143bede5 1493 return;
c8b97818 1494
d397712b 1495 while (nr_pages > 0) {
c8b97818 1496 ret = find_get_pages_contig(inode->i_mapping, index,
5b050f04
CM
1497 min_t(unsigned long, nr_pages,
1498 ARRAY_SIZE(pages)), pages);
c8b97818
CM
1499 for (i = 0; i < ret; i++) {
1500 if (pages[i] != locked_page)
1501 unlock_page(pages[i]);
1502 page_cache_release(pages[i]);
1503 }
1504 nr_pages -= ret;
1505 index += ret;
1506 cond_resched();
1507 }
c8b97818
CM
1508}
1509
1510static noinline int lock_delalloc_pages(struct inode *inode,
1511 struct page *locked_page,
1512 u64 delalloc_start,
1513 u64 delalloc_end)
1514{
1515 unsigned long index = delalloc_start >> PAGE_CACHE_SHIFT;
1516 unsigned long start_index = index;
1517 unsigned long end_index = delalloc_end >> PAGE_CACHE_SHIFT;
1518 unsigned long pages_locked = 0;
1519 struct page *pages[16];
1520 unsigned long nrpages;
1521 int ret;
1522 int i;
1523
1524 /* the caller is responsible for locking the start index */
1525 if (index == locked_page->index && index == end_index)
1526 return 0;
1527
1528 /* skip the page at the start index */
1529 nrpages = end_index - index + 1;
d397712b 1530 while (nrpages > 0) {
c8b97818 1531 ret = find_get_pages_contig(inode->i_mapping, index,
5b050f04
CM
1532 min_t(unsigned long,
1533 nrpages, ARRAY_SIZE(pages)), pages);
c8b97818
CM
1534 if (ret == 0) {
1535 ret = -EAGAIN;
1536 goto done;
1537 }
1538 /* now we have an array of pages, lock them all */
1539 for (i = 0; i < ret; i++) {
1540 /*
1541 * the caller is taking responsibility for
1542 * locked_page
1543 */
771ed689 1544 if (pages[i] != locked_page) {
c8b97818 1545 lock_page(pages[i]);
f2b1c41c
CM
1546 if (!PageDirty(pages[i]) ||
1547 pages[i]->mapping != inode->i_mapping) {
771ed689
CM
1548 ret = -EAGAIN;
1549 unlock_page(pages[i]);
1550 page_cache_release(pages[i]);
1551 goto done;
1552 }
1553 }
c8b97818 1554 page_cache_release(pages[i]);
771ed689 1555 pages_locked++;
c8b97818 1556 }
c8b97818
CM
1557 nrpages -= ret;
1558 index += ret;
1559 cond_resched();
1560 }
1561 ret = 0;
1562done:
1563 if (ret && pages_locked) {
1564 __unlock_for_delalloc(inode, locked_page,
1565 delalloc_start,
1566 ((u64)(start_index + pages_locked - 1)) <<
1567 PAGE_CACHE_SHIFT);
1568 }
1569 return ret;
1570}
1571
1572/*
1573 * find a contiguous range of bytes in the file marked as delalloc, not
1574 * more than 'max_bytes'. start and end are used to return the range,
1575 *
1576 * 1 is returned if we find something, 0 if nothing was in the tree
1577 */
1578static noinline u64 find_lock_delalloc_range(struct inode *inode,
1579 struct extent_io_tree *tree,
1580 struct page *locked_page,
1581 u64 *start, u64 *end,
1582 u64 max_bytes)
1583{
1584 u64 delalloc_start;
1585 u64 delalloc_end;
1586 u64 found;
9655d298 1587 struct extent_state *cached_state = NULL;
c8b97818
CM
1588 int ret;
1589 int loops = 0;
1590
1591again:
1592 /* step one, find a bunch of delalloc bytes starting at start */
1593 delalloc_start = *start;
1594 delalloc_end = 0;
1595 found = find_delalloc_range(tree, &delalloc_start, &delalloc_end,
c2a128d2 1596 max_bytes, &cached_state);
70b99e69 1597 if (!found || delalloc_end <= *start) {
c8b97818
CM
1598 *start = delalloc_start;
1599 *end = delalloc_end;
c2a128d2 1600 free_extent_state(cached_state);
c8b97818
CM
1601 return found;
1602 }
1603
70b99e69
CM
1604 /*
1605 * start comes from the offset of locked_page. We have to lock
1606 * pages in order, so we can't process delalloc bytes before
1607 * locked_page
1608 */
d397712b 1609 if (delalloc_start < *start)
70b99e69 1610 delalloc_start = *start;
70b99e69 1611
c8b97818
CM
1612 /*
1613 * make sure to limit the number of pages we try to lock down
1614 * if we're looping.
1615 */
d397712b 1616 if (delalloc_end + 1 - delalloc_start > max_bytes && loops)
771ed689 1617 delalloc_end = delalloc_start + PAGE_CACHE_SIZE - 1;
d397712b 1618
c8b97818
CM
1619 /* step two, lock all the pages after the page that has start */
1620 ret = lock_delalloc_pages(inode, locked_page,
1621 delalloc_start, delalloc_end);
1622 if (ret == -EAGAIN) {
1623 /* some of the pages are gone, lets avoid looping by
1624 * shortening the size of the delalloc range we're searching
1625 */
9655d298 1626 free_extent_state(cached_state);
c8b97818
CM
1627 if (!loops) {
1628 unsigned long offset = (*start) & (PAGE_CACHE_SIZE - 1);
1629 max_bytes = PAGE_CACHE_SIZE - offset;
1630 loops = 1;
1631 goto again;
1632 } else {
1633 found = 0;
1634 goto out_failed;
1635 }
1636 }
79787eaa 1637 BUG_ON(ret); /* Only valid values are 0 and -EAGAIN */
c8b97818
CM
1638
1639 /* step three, lock the state bits for the whole range */
d0082371 1640 lock_extent_bits(tree, delalloc_start, delalloc_end, 0, &cached_state);
c8b97818
CM
1641
1642 /* then test to make sure it is all still delalloc */
1643 ret = test_range_bit(tree, delalloc_start, delalloc_end,
9655d298 1644 EXTENT_DELALLOC, 1, cached_state);
c8b97818 1645 if (!ret) {
9655d298
CM
1646 unlock_extent_cached(tree, delalloc_start, delalloc_end,
1647 &cached_state, GFP_NOFS);
c8b97818
CM
1648 __unlock_for_delalloc(inode, locked_page,
1649 delalloc_start, delalloc_end);
1650 cond_resched();
1651 goto again;
1652 }
9655d298 1653 free_extent_state(cached_state);
c8b97818
CM
1654 *start = delalloc_start;
1655 *end = delalloc_end;
1656out_failed:
1657 return found;
1658}
1659
1660int extent_clear_unlock_delalloc(struct inode *inode,
1661 struct extent_io_tree *tree,
1662 u64 start, u64 end, struct page *locked_page,
a791e35e 1663 unsigned long op)
c8b97818
CM
1664{
1665 int ret;
1666 struct page *pages[16];
1667 unsigned long index = start >> PAGE_CACHE_SHIFT;
1668 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1669 unsigned long nr_pages = end_index - index + 1;
1670 int i;
41074888 1671 unsigned long clear_bits = 0;
c8b97818 1672
a791e35e 1673 if (op & EXTENT_CLEAR_UNLOCK)
771ed689 1674 clear_bits |= EXTENT_LOCKED;
a791e35e 1675 if (op & EXTENT_CLEAR_DIRTY)
c8b97818
CM
1676 clear_bits |= EXTENT_DIRTY;
1677
a791e35e 1678 if (op & EXTENT_CLEAR_DELALLOC)
771ed689
CM
1679 clear_bits |= EXTENT_DELALLOC;
1680
2c64c53d 1681 clear_extent_bit(tree, start, end, clear_bits, 1, 0, NULL, GFP_NOFS);
32c00aff
JB
1682 if (!(op & (EXTENT_CLEAR_UNLOCK_PAGE | EXTENT_CLEAR_DIRTY |
1683 EXTENT_SET_WRITEBACK | EXTENT_END_WRITEBACK |
1684 EXTENT_SET_PRIVATE2)))
771ed689 1685 return 0;
c8b97818 1686
d397712b 1687 while (nr_pages > 0) {
c8b97818 1688 ret = find_get_pages_contig(inode->i_mapping, index,
5b050f04
CM
1689 min_t(unsigned long,
1690 nr_pages, ARRAY_SIZE(pages)), pages);
c8b97818 1691 for (i = 0; i < ret; i++) {
8b62b72b 1692
a791e35e 1693 if (op & EXTENT_SET_PRIVATE2)
8b62b72b
CM
1694 SetPagePrivate2(pages[i]);
1695
c8b97818
CM
1696 if (pages[i] == locked_page) {
1697 page_cache_release(pages[i]);
1698 continue;
1699 }
a791e35e 1700 if (op & EXTENT_CLEAR_DIRTY)
c8b97818 1701 clear_page_dirty_for_io(pages[i]);
a791e35e 1702 if (op & EXTENT_SET_WRITEBACK)
c8b97818 1703 set_page_writeback(pages[i]);
a791e35e 1704 if (op & EXTENT_END_WRITEBACK)
c8b97818 1705 end_page_writeback(pages[i]);
a791e35e 1706 if (op & EXTENT_CLEAR_UNLOCK_PAGE)
771ed689 1707 unlock_page(pages[i]);
c8b97818
CM
1708 page_cache_release(pages[i]);
1709 }
1710 nr_pages -= ret;
1711 index += ret;
1712 cond_resched();
1713 }
1714 return 0;
1715}
c8b97818 1716
d352ac68
CM
1717/*
1718 * count the number of bytes in the tree that have a given bit(s)
1719 * set. This can be fairly slow, except for EXTENT_DIRTY which is
1720 * cached. The total number found is returned.
1721 */
d1310b2e
CM
1722u64 count_range_bits(struct extent_io_tree *tree,
1723 u64 *start, u64 search_end, u64 max_bytes,
ec29ed5b 1724 unsigned long bits, int contig)
d1310b2e
CM
1725{
1726 struct rb_node *node;
1727 struct extent_state *state;
1728 u64 cur_start = *start;
1729 u64 total_bytes = 0;
ec29ed5b 1730 u64 last = 0;
d1310b2e
CM
1731 int found = 0;
1732
1733 if (search_end <= cur_start) {
d1310b2e
CM
1734 WARN_ON(1);
1735 return 0;
1736 }
1737
cad321ad 1738 spin_lock(&tree->lock);
d1310b2e
CM
1739 if (cur_start == 0 && bits == EXTENT_DIRTY) {
1740 total_bytes = tree->dirty_bytes;
1741 goto out;
1742 }
1743 /*
1744 * this search will find all the extents that end after
1745 * our range starts.
1746 */
80ea96b1 1747 node = tree_search(tree, cur_start);
d397712b 1748 if (!node)
d1310b2e 1749 goto out;
d1310b2e 1750
d397712b 1751 while (1) {
d1310b2e
CM
1752 state = rb_entry(node, struct extent_state, rb_node);
1753 if (state->start > search_end)
1754 break;
ec29ed5b
CM
1755 if (contig && found && state->start > last + 1)
1756 break;
1757 if (state->end >= cur_start && (state->state & bits) == bits) {
d1310b2e
CM
1758 total_bytes += min(search_end, state->end) + 1 -
1759 max(cur_start, state->start);
1760 if (total_bytes >= max_bytes)
1761 break;
1762 if (!found) {
af60bed2 1763 *start = max(cur_start, state->start);
d1310b2e
CM
1764 found = 1;
1765 }
ec29ed5b
CM
1766 last = state->end;
1767 } else if (contig && found) {
1768 break;
d1310b2e
CM
1769 }
1770 node = rb_next(node);
1771 if (!node)
1772 break;
1773 }
1774out:
cad321ad 1775 spin_unlock(&tree->lock);
d1310b2e
CM
1776 return total_bytes;
1777}
b2950863 1778
d352ac68
CM
1779/*
1780 * set the private field for a given byte offset in the tree. If there isn't
1781 * an extent_state there already, this does nothing.
1782 */
d1310b2e
CM
1783int set_state_private(struct extent_io_tree *tree, u64 start, u64 private)
1784{
1785 struct rb_node *node;
1786 struct extent_state *state;
1787 int ret = 0;
1788
cad321ad 1789 spin_lock(&tree->lock);
d1310b2e
CM
1790 /*
1791 * this search will find all the extents that end after
1792 * our range starts.
1793 */
80ea96b1 1794 node = tree_search(tree, start);
2b114d1d 1795 if (!node) {
d1310b2e
CM
1796 ret = -ENOENT;
1797 goto out;
1798 }
1799 state = rb_entry(node, struct extent_state, rb_node);
1800 if (state->start != start) {
1801 ret = -ENOENT;
1802 goto out;
1803 }
1804 state->private = private;
1805out:
cad321ad 1806 spin_unlock(&tree->lock);
d1310b2e
CM
1807 return ret;
1808}
1809
e4100d98
MX
1810void extent_cache_csums_dio(struct extent_io_tree *tree, u64 start, u32 csums[],
1811 int count)
1812{
1813 struct rb_node *node;
1814 struct extent_state *state;
1815
1816 spin_lock(&tree->lock);
1817 /*
1818 * this search will find all the extents that end after
1819 * our range starts.
1820 */
1821 node = tree_search(tree, start);
1822 BUG_ON(!node);
1823
1824 state = rb_entry(node, struct extent_state, rb_node);
1825 BUG_ON(state->start != start);
1826
1827 while (count) {
1828 state->private = *csums++;
1829 count--;
1830 state = next_state(state);
1831 }
1832 spin_unlock(&tree->lock);
1833}
1834
1835static inline u64 __btrfs_get_bio_offset(struct bio *bio, int bio_index)
1836{
1837 struct bio_vec *bvec = bio->bi_io_vec + bio_index;
1838
1839 return page_offset(bvec->bv_page) + bvec->bv_offset;
1840}
1841
1842void extent_cache_csums(struct extent_io_tree *tree, struct bio *bio, int bio_index,
1843 u32 csums[], int count)
1844{
1845 struct rb_node *node;
1846 struct extent_state *state = NULL;
1847 u64 start;
1848
1849 spin_lock(&tree->lock);
1850 do {
1851 start = __btrfs_get_bio_offset(bio, bio_index);
1852 if (state == NULL || state->start != start) {
1853 node = tree_search(tree, start);
1854 BUG_ON(!node);
1855
1856 state = rb_entry(node, struct extent_state, rb_node);
1857 BUG_ON(state->start != start);
1858 }
1859 state->private = *csums++;
1860 count--;
1861 bio_index++;
1862
1863 state = next_state(state);
1864 } while (count);
1865 spin_unlock(&tree->lock);
1866}
1867
d1310b2e
CM
1868int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private)
1869{
1870 struct rb_node *node;
1871 struct extent_state *state;
1872 int ret = 0;
1873
cad321ad 1874 spin_lock(&tree->lock);
d1310b2e
CM
1875 /*
1876 * this search will find all the extents that end after
1877 * our range starts.
1878 */
80ea96b1 1879 node = tree_search(tree, start);
2b114d1d 1880 if (!node) {
d1310b2e
CM
1881 ret = -ENOENT;
1882 goto out;
1883 }
1884 state = rb_entry(node, struct extent_state, rb_node);
1885 if (state->start != start) {
1886 ret = -ENOENT;
1887 goto out;
1888 }
1889 *private = state->private;
1890out:
cad321ad 1891 spin_unlock(&tree->lock);
d1310b2e
CM
1892 return ret;
1893}
1894
1895/*
1896 * searches a range in the state tree for a given mask.
70dec807 1897 * If 'filled' == 1, this returns 1 only if every extent in the tree
d1310b2e
CM
1898 * has the bits set. Otherwise, 1 is returned if any bit in the
1899 * range is found set.
1900 */
1901int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
41074888 1902 unsigned long bits, int filled, struct extent_state *cached)
d1310b2e
CM
1903{
1904 struct extent_state *state = NULL;
1905 struct rb_node *node;
1906 int bitset = 0;
d1310b2e 1907
cad321ad 1908 spin_lock(&tree->lock);
df98b6e2
JB
1909 if (cached && cached->tree && cached->start <= start &&
1910 cached->end > start)
9655d298
CM
1911 node = &cached->rb_node;
1912 else
1913 node = tree_search(tree, start);
d1310b2e
CM
1914 while (node && start <= end) {
1915 state = rb_entry(node, struct extent_state, rb_node);
1916
1917 if (filled && state->start > start) {
1918 bitset = 0;
1919 break;
1920 }
1921
1922 if (state->start > end)
1923 break;
1924
1925 if (state->state & bits) {
1926 bitset = 1;
1927 if (!filled)
1928 break;
1929 } else if (filled) {
1930 bitset = 0;
1931 break;
1932 }
46562cec
CM
1933
1934 if (state->end == (u64)-1)
1935 break;
1936
d1310b2e
CM
1937 start = state->end + 1;
1938 if (start > end)
1939 break;
1940 node = rb_next(node);
1941 if (!node) {
1942 if (filled)
1943 bitset = 0;
1944 break;
1945 }
1946 }
cad321ad 1947 spin_unlock(&tree->lock);
d1310b2e
CM
1948 return bitset;
1949}
d1310b2e
CM
1950
1951/*
1952 * helper function to set a given page up to date if all the
1953 * extents in the tree for that page are up to date
1954 */
143bede5 1955static void check_page_uptodate(struct extent_io_tree *tree, struct page *page)
d1310b2e 1956{
4eee4fa4 1957 u64 start = page_offset(page);
d1310b2e 1958 u64 end = start + PAGE_CACHE_SIZE - 1;
9655d298 1959 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL))
d1310b2e 1960 SetPageUptodate(page);
d1310b2e
CM
1961}
1962
1963/*
1964 * helper function to unlock a page if all the extents in the tree
1965 * for that page are unlocked
1966 */
143bede5 1967static void check_page_locked(struct extent_io_tree *tree, struct page *page)
d1310b2e 1968{
4eee4fa4 1969 u64 start = page_offset(page);
d1310b2e 1970 u64 end = start + PAGE_CACHE_SIZE - 1;
9655d298 1971 if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL))
d1310b2e 1972 unlock_page(page);
d1310b2e
CM
1973}
1974
1975/*
1976 * helper function to end page writeback if all the extents
1977 * in the tree for that page are done with writeback
1978 */
143bede5
JM
1979static void check_page_writeback(struct extent_io_tree *tree,
1980 struct page *page)
d1310b2e 1981{
1edbb734 1982 end_page_writeback(page);
d1310b2e
CM
1983}
1984
4a54c8c1
JS
1985/*
1986 * When IO fails, either with EIO or csum verification fails, we
1987 * try other mirrors that might have a good copy of the data. This
1988 * io_failure_record is used to record state as we go through all the
1989 * mirrors. If another mirror has good data, the page is set up to date
1990 * and things continue. If a good mirror can't be found, the original
1991 * bio end_io callback is called to indicate things have failed.
1992 */
1993struct io_failure_record {
1994 struct page *page;
1995 u64 start;
1996 u64 len;
1997 u64 logical;
1998 unsigned long bio_flags;
1999 int this_mirror;
2000 int failed_mirror;
2001 int in_validation;
2002};
2003
2004static int free_io_failure(struct inode *inode, struct io_failure_record *rec,
2005 int did_repair)
2006{
2007 int ret;
2008 int err = 0;
2009 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
2010
2011 set_state_private(failure_tree, rec->start, 0);
2012 ret = clear_extent_bits(failure_tree, rec->start,
2013 rec->start + rec->len - 1,
2014 EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
2015 if (ret)
2016 err = ret;
2017
53b381b3
DW
2018 ret = clear_extent_bits(&BTRFS_I(inode)->io_tree, rec->start,
2019 rec->start + rec->len - 1,
2020 EXTENT_DAMAGED, GFP_NOFS);
2021 if (ret && !err)
2022 err = ret;
4a54c8c1
JS
2023
2024 kfree(rec);
2025 return err;
2026}
2027
2028static void repair_io_failure_callback(struct bio *bio, int err)
2029{
2030 complete(bio->bi_private);
2031}
2032
2033/*
2034 * this bypasses the standard btrfs submit functions deliberately, as
2035 * the standard behavior is to write all copies in a raid setup. here we only
2036 * want to write the one bad copy. so we do the mapping for ourselves and issue
2037 * submit_bio directly.
3ec706c8 2038 * to avoid any synchronization issues, wait for the data after writing, which
4a54c8c1
JS
2039 * actually prevents the read that triggered the error from finishing.
2040 * currently, there can be no more than two copies of every data bit. thus,
2041 * exactly one rewrite is required.
2042 */
3ec706c8 2043int repair_io_failure(struct btrfs_fs_info *fs_info, u64 start,
4a54c8c1
JS
2044 u64 length, u64 logical, struct page *page,
2045 int mirror_num)
2046{
2047 struct bio *bio;
2048 struct btrfs_device *dev;
2049 DECLARE_COMPLETION_ONSTACK(compl);
2050 u64 map_length = 0;
2051 u64 sector;
2052 struct btrfs_bio *bbio = NULL;
53b381b3 2053 struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
4a54c8c1
JS
2054 int ret;
2055
2056 BUG_ON(!mirror_num);
2057
53b381b3
DW
2058 /* we can't repair anything in raid56 yet */
2059 if (btrfs_is_parity_mirror(map_tree, logical, length, mirror_num))
2060 return 0;
2061
9be3395b 2062 bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
4a54c8c1
JS
2063 if (!bio)
2064 return -EIO;
2065 bio->bi_private = &compl;
2066 bio->bi_end_io = repair_io_failure_callback;
2067 bio->bi_size = 0;
2068 map_length = length;
2069
3ec706c8 2070 ret = btrfs_map_block(fs_info, WRITE, logical,
4a54c8c1
JS
2071 &map_length, &bbio, mirror_num);
2072 if (ret) {
2073 bio_put(bio);
2074 return -EIO;
2075 }
2076 BUG_ON(mirror_num != bbio->mirror_num);
2077 sector = bbio->stripes[mirror_num-1].physical >> 9;
2078 bio->bi_sector = sector;
2079 dev = bbio->stripes[mirror_num-1].dev;
2080 kfree(bbio);
2081 if (!dev || !dev->bdev || !dev->writeable) {
2082 bio_put(bio);
2083 return -EIO;
2084 }
2085 bio->bi_bdev = dev->bdev;
4eee4fa4 2086 bio_add_page(bio, page, length, start - page_offset(page));
21adbd5c 2087 btrfsic_submit_bio(WRITE_SYNC, bio);
4a54c8c1
JS
2088 wait_for_completion(&compl);
2089
2090 if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) {
2091 /* try to remap that extent elsewhere? */
2092 bio_put(bio);
442a4f63 2093 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
4a54c8c1
JS
2094 return -EIO;
2095 }
2096
d5b025d5 2097 printk_ratelimited_in_rcu(KERN_INFO "btrfs read error corrected: ino %lu off %llu "
606686ee
JB
2098 "(dev %s sector %llu)\n", page->mapping->host->i_ino,
2099 start, rcu_str_deref(dev->name), sector);
4a54c8c1
JS
2100
2101 bio_put(bio);
2102 return 0;
2103}
2104
ea466794
JB
2105int repair_eb_io_failure(struct btrfs_root *root, struct extent_buffer *eb,
2106 int mirror_num)
2107{
ea466794
JB
2108 u64 start = eb->start;
2109 unsigned long i, num_pages = num_extent_pages(eb->start, eb->len);
d95603b2 2110 int ret = 0;
ea466794
JB
2111
2112 for (i = 0; i < num_pages; i++) {
2113 struct page *p = extent_buffer_page(eb, i);
3ec706c8 2114 ret = repair_io_failure(root->fs_info, start, PAGE_CACHE_SIZE,
ea466794
JB
2115 start, p, mirror_num);
2116 if (ret)
2117 break;
2118 start += PAGE_CACHE_SIZE;
2119 }
2120
2121 return ret;
2122}
2123
4a54c8c1
JS
2124/*
2125 * each time an IO finishes, we do a fast check in the IO failure tree
2126 * to see if we need to process or clean up an io_failure_record
2127 */
2128static int clean_io_failure(u64 start, struct page *page)
2129{
2130 u64 private;
2131 u64 private_failure;
2132 struct io_failure_record *failrec;
3ec706c8 2133 struct btrfs_fs_info *fs_info;
4a54c8c1
JS
2134 struct extent_state *state;
2135 int num_copies;
2136 int did_repair = 0;
2137 int ret;
2138 struct inode *inode = page->mapping->host;
2139
2140 private = 0;
2141 ret = count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private,
2142 (u64)-1, 1, EXTENT_DIRTY, 0);
2143 if (!ret)
2144 return 0;
2145
2146 ret = get_state_private(&BTRFS_I(inode)->io_failure_tree, start,
2147 &private_failure);
2148 if (ret)
2149 return 0;
2150
2151 failrec = (struct io_failure_record *)(unsigned long) private_failure;
2152 BUG_ON(!failrec->this_mirror);
2153
2154 if (failrec->in_validation) {
2155 /* there was no real error, just free the record */
2156 pr_debug("clean_io_failure: freeing dummy error at %llu\n",
2157 failrec->start);
2158 did_repair = 1;
2159 goto out;
2160 }
2161
2162 spin_lock(&BTRFS_I(inode)->io_tree.lock);
2163 state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree,
2164 failrec->start,
2165 EXTENT_LOCKED);
2166 spin_unlock(&BTRFS_I(inode)->io_tree.lock);
2167
2168 if (state && state->start == failrec->start) {
3ec706c8
SB
2169 fs_info = BTRFS_I(inode)->root->fs_info;
2170 num_copies = btrfs_num_copies(fs_info, failrec->logical,
2171 failrec->len);
4a54c8c1 2172 if (num_copies > 1) {
3ec706c8 2173 ret = repair_io_failure(fs_info, start, failrec->len,
4a54c8c1
JS
2174 failrec->logical, page,
2175 failrec->failed_mirror);
2176 did_repair = !ret;
2177 }
53b381b3 2178 ret = 0;
4a54c8c1
JS
2179 }
2180
2181out:
2182 if (!ret)
2183 ret = free_io_failure(inode, failrec, did_repair);
2184
2185 return ret;
2186}
2187
2188/*
2189 * this is a generic handler for readpage errors (default
2190 * readpage_io_failed_hook). if other copies exist, read those and write back
2191 * good data to the failed position. does not investigate in remapping the
2192 * failed extent elsewhere, hoping the device will be smart enough to do this as
2193 * needed
2194 */
2195
2196static int bio_readpage_error(struct bio *failed_bio, struct page *page,
2197 u64 start, u64 end, int failed_mirror,
2198 struct extent_state *state)
2199{
2200 struct io_failure_record *failrec = NULL;
2201 u64 private;
2202 struct extent_map *em;
2203 struct inode *inode = page->mapping->host;
2204 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
2205 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
2206 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
2207 struct bio *bio;
2208 int num_copies;
2209 int ret;
2210 int read_mode;
2211 u64 logical;
2212
2213 BUG_ON(failed_bio->bi_rw & REQ_WRITE);
2214
2215 ret = get_state_private(failure_tree, start, &private);
2216 if (ret) {
2217 failrec = kzalloc(sizeof(*failrec), GFP_NOFS);
2218 if (!failrec)
2219 return -ENOMEM;
2220 failrec->start = start;
2221 failrec->len = end - start + 1;
2222 failrec->this_mirror = 0;
2223 failrec->bio_flags = 0;
2224 failrec->in_validation = 0;
2225
2226 read_lock(&em_tree->lock);
2227 em = lookup_extent_mapping(em_tree, start, failrec->len);
2228 if (!em) {
2229 read_unlock(&em_tree->lock);
2230 kfree(failrec);
2231 return -EIO;
2232 }
2233
2234 if (em->start > start || em->start + em->len < start) {
2235 free_extent_map(em);
2236 em = NULL;
2237 }
2238 read_unlock(&em_tree->lock);
2239
7a2d6a64 2240 if (!em) {
4a54c8c1
JS
2241 kfree(failrec);
2242 return -EIO;
2243 }
2244 logical = start - em->start;
2245 logical = em->block_start + logical;
2246 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
2247 logical = em->block_start;
2248 failrec->bio_flags = EXTENT_BIO_COMPRESSED;
2249 extent_set_compress_type(&failrec->bio_flags,
2250 em->compress_type);
2251 }
2252 pr_debug("bio_readpage_error: (new) logical=%llu, start=%llu, "
2253 "len=%llu\n", logical, start, failrec->len);
2254 failrec->logical = logical;
2255 free_extent_map(em);
2256
2257 /* set the bits in the private failure tree */
2258 ret = set_extent_bits(failure_tree, start, end,
2259 EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
2260 if (ret >= 0)
2261 ret = set_state_private(failure_tree, start,
2262 (u64)(unsigned long)failrec);
2263 /* set the bits in the inode's tree */
2264 if (ret >= 0)
2265 ret = set_extent_bits(tree, start, end, EXTENT_DAMAGED,
2266 GFP_NOFS);
2267 if (ret < 0) {
2268 kfree(failrec);
2269 return ret;
2270 }
2271 } else {
2272 failrec = (struct io_failure_record *)(unsigned long)private;
2273 pr_debug("bio_readpage_error: (found) logical=%llu, "
2274 "start=%llu, len=%llu, validation=%d\n",
2275 failrec->logical, failrec->start, failrec->len,
2276 failrec->in_validation);
2277 /*
2278 * when data can be on disk more than twice, add to failrec here
2279 * (e.g. with a list for failed_mirror) to make
2280 * clean_io_failure() clean all those errors at once.
2281 */
2282 }
5d964051
SB
2283 num_copies = btrfs_num_copies(BTRFS_I(inode)->root->fs_info,
2284 failrec->logical, failrec->len);
4a54c8c1
JS
2285 if (num_copies == 1) {
2286 /*
2287 * we only have a single copy of the data, so don't bother with
2288 * all the retry and error correction code that follows. no
2289 * matter what the error is, it is very likely to persist.
2290 */
2291 pr_debug("bio_readpage_error: cannot repair, num_copies == 1. "
2292 "state=%p, num_copies=%d, next_mirror %d, "
2293 "failed_mirror %d\n", state, num_copies,
2294 failrec->this_mirror, failed_mirror);
2295 free_io_failure(inode, failrec, 0);
2296 return -EIO;
2297 }
2298
2299 if (!state) {
2300 spin_lock(&tree->lock);
2301 state = find_first_extent_bit_state(tree, failrec->start,
2302 EXTENT_LOCKED);
2303 if (state && state->start != failrec->start)
2304 state = NULL;
2305 spin_unlock(&tree->lock);
2306 }
2307
2308 /*
2309 * there are two premises:
2310 * a) deliver good data to the caller
2311 * b) correct the bad sectors on disk
2312 */
2313 if (failed_bio->bi_vcnt > 1) {
2314 /*
2315 * to fulfill b), we need to know the exact failing sectors, as
2316 * we don't want to rewrite any more than the failed ones. thus,
2317 * we need separate read requests for the failed bio
2318 *
2319 * if the following BUG_ON triggers, our validation request got
2320 * merged. we need separate requests for our algorithm to work.
2321 */
2322 BUG_ON(failrec->in_validation);
2323 failrec->in_validation = 1;
2324 failrec->this_mirror = failed_mirror;
2325 read_mode = READ_SYNC | REQ_FAILFAST_DEV;
2326 } else {
2327 /*
2328 * we're ready to fulfill a) and b) alongside. get a good copy
2329 * of the failed sector and if we succeed, we have setup
2330 * everything for repair_io_failure to do the rest for us.
2331 */
2332 if (failrec->in_validation) {
2333 BUG_ON(failrec->this_mirror != failed_mirror);
2334 failrec->in_validation = 0;
2335 failrec->this_mirror = 0;
2336 }
2337 failrec->failed_mirror = failed_mirror;
2338 failrec->this_mirror++;
2339 if (failrec->this_mirror == failed_mirror)
2340 failrec->this_mirror++;
2341 read_mode = READ_SYNC;
2342 }
2343
2344 if (!state || failrec->this_mirror > num_copies) {
2345 pr_debug("bio_readpage_error: (fail) state=%p, num_copies=%d, "
2346 "next_mirror %d, failed_mirror %d\n", state,
2347 num_copies, failrec->this_mirror, failed_mirror);
2348 free_io_failure(inode, failrec, 0);
2349 return -EIO;
2350 }
2351
9be3395b 2352 bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
e627ee7b
TI
2353 if (!bio) {
2354 free_io_failure(inode, failrec, 0);
2355 return -EIO;
2356 }
4a54c8c1
JS
2357 bio->bi_private = state;
2358 bio->bi_end_io = failed_bio->bi_end_io;
2359 bio->bi_sector = failrec->logical >> 9;
2360 bio->bi_bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
2361 bio->bi_size = 0;
2362
2363 bio_add_page(bio, page, failrec->len, start - page_offset(page));
2364
2365 pr_debug("bio_readpage_error: submitting new read[%#x] to "
2366 "this_mirror=%d, num_copies=%d, in_validation=%d\n", read_mode,
2367 failrec->this_mirror, num_copies, failrec->in_validation);
2368
013bd4c3
TI
2369 ret = tree->ops->submit_bio_hook(inode, read_mode, bio,
2370 failrec->this_mirror,
2371 failrec->bio_flags, 0);
2372 return ret;
4a54c8c1
JS
2373}
2374
d1310b2e
CM
2375/* lots and lots of room for performance fixes in the end_bio funcs */
2376
87826df0
JM
2377int end_extent_writepage(struct page *page, int err, u64 start, u64 end)
2378{
2379 int uptodate = (err == 0);
2380 struct extent_io_tree *tree;
2381 int ret;
2382
2383 tree = &BTRFS_I(page->mapping->host)->io_tree;
2384
2385 if (tree->ops && tree->ops->writepage_end_io_hook) {
2386 ret = tree->ops->writepage_end_io_hook(page, start,
2387 end, NULL, uptodate);
2388 if (ret)
2389 uptodate = 0;
2390 }
2391
87826df0 2392 if (!uptodate) {
87826df0
JM
2393 ClearPageUptodate(page);
2394 SetPageError(page);
2395 }
2396 return 0;
2397}
2398
d1310b2e
CM
2399/*
2400 * after a writepage IO is done, we need to:
2401 * clear the uptodate bits on error
2402 * clear the writeback bits in the extent tree for this IO
2403 * end_page_writeback if the page has no more pending IO
2404 *
2405 * Scheduling is not allowed, so the extent state tree is expected
2406 * to have one and only one object corresponding to this IO.
2407 */
d1310b2e 2408static void end_bio_extent_writepage(struct bio *bio, int err)
d1310b2e 2409{
d1310b2e 2410 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
902b22f3 2411 struct extent_io_tree *tree;
d1310b2e
CM
2412 u64 start;
2413 u64 end;
2414 int whole_page;
2415
d1310b2e
CM
2416 do {
2417 struct page *page = bvec->bv_page;
902b22f3
DW
2418 tree = &BTRFS_I(page->mapping->host)->io_tree;
2419
4eee4fa4 2420 start = page_offset(page) + bvec->bv_offset;
d1310b2e
CM
2421 end = start + bvec->bv_len - 1;
2422
2423 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
2424 whole_page = 1;
2425 else
2426 whole_page = 0;
2427
2428 if (--bvec >= bio->bi_io_vec)
2429 prefetchw(&bvec->bv_page->flags);
1259ab75 2430
87826df0
JM
2431 if (end_extent_writepage(page, err, start, end))
2432 continue;
70dec807 2433
d1310b2e
CM
2434 if (whole_page)
2435 end_page_writeback(page);
2436 else
2437 check_page_writeback(tree, page);
d1310b2e 2438 } while (bvec >= bio->bi_io_vec);
2b1f55b0 2439
d1310b2e 2440 bio_put(bio);
d1310b2e
CM
2441}
2442
2443/*
2444 * after a readpage IO is done, we need to:
2445 * clear the uptodate bits on error
2446 * set the uptodate bits if things worked
2447 * set the page up to date if all extents in the tree are uptodate
2448 * clear the lock bit in the extent tree
2449 * unlock the page if there are no other extents locked for it
2450 *
2451 * Scheduling is not allowed, so the extent state tree is expected
2452 * to have one and only one object corresponding to this IO.
2453 */
d1310b2e 2454static void end_bio_extent_readpage(struct bio *bio, int err)
d1310b2e
CM
2455{
2456 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
4125bf76
CM
2457 struct bio_vec *bvec_end = bio->bi_io_vec + bio->bi_vcnt - 1;
2458 struct bio_vec *bvec = bio->bi_io_vec;
902b22f3 2459 struct extent_io_tree *tree;
d1310b2e
CM
2460 u64 start;
2461 u64 end;
2462 int whole_page;
5cf1ab56 2463 int mirror;
d1310b2e
CM
2464 int ret;
2465
d20f7043
CM
2466 if (err)
2467 uptodate = 0;
2468
d1310b2e
CM
2469 do {
2470 struct page *page = bvec->bv_page;
507903b8
AJ
2471 struct extent_state *cached = NULL;
2472 struct extent_state *state;
9be3395b 2473 struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
507903b8 2474
be3940c0 2475 pr_debug("end_bio_extent_readpage: bi_sector=%llu, err=%d, "
9be3395b
CM
2476 "mirror=%lu\n", (u64)bio->bi_sector, err,
2477 io_bio->mirror_num);
902b22f3
DW
2478 tree = &BTRFS_I(page->mapping->host)->io_tree;
2479
4eee4fa4 2480 start = page_offset(page) + bvec->bv_offset;
d1310b2e
CM
2481 end = start + bvec->bv_len - 1;
2482
2483 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
2484 whole_page = 1;
2485 else
2486 whole_page = 0;
2487
4125bf76 2488 if (++bvec <= bvec_end)
d1310b2e
CM
2489 prefetchw(&bvec->bv_page->flags);
2490
507903b8 2491 spin_lock(&tree->lock);
0d399205 2492 state = find_first_extent_bit_state(tree, start, EXTENT_LOCKED);
109b36a2 2493 if (state && state->start == start) {
507903b8
AJ
2494 /*
2495 * take a reference on the state, unlock will drop
2496 * the ref
2497 */
2498 cache_state(state, &cached);
2499 }
2500 spin_unlock(&tree->lock);
2501
9be3395b 2502 mirror = io_bio->mirror_num;
d1310b2e 2503 if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
70dec807 2504 ret = tree->ops->readpage_end_io_hook(page, start, end,
5cf1ab56 2505 state, mirror);
5ee0844d 2506 if (ret)
d1310b2e 2507 uptodate = 0;
5ee0844d 2508 else
4a54c8c1 2509 clean_io_failure(start, page);
d1310b2e 2510 }
ea466794 2511
ea466794 2512 if (!uptodate && tree->ops && tree->ops->readpage_io_failed_hook) {
5cf1ab56 2513 ret = tree->ops->readpage_io_failed_hook(page, mirror);
ea466794
JB
2514 if (!ret && !err &&
2515 test_bit(BIO_UPTODATE, &bio->bi_flags))
2516 uptodate = 1;
2517 } else if (!uptodate) {
f4a8e656
JS
2518 /*
2519 * The generic bio_readpage_error handles errors the
2520 * following way: If possible, new read requests are
2521 * created and submitted and will end up in
2522 * end_bio_extent_readpage as well (if we're lucky, not
2523 * in the !uptodate case). In that case it returns 0 and
2524 * we just go on with the next page in our bio. If it
2525 * can't handle the error it will return -EIO and we
2526 * remain responsible for that page.
2527 */
5cf1ab56 2528 ret = bio_readpage_error(bio, page, start, end, mirror, NULL);
7e38326f 2529 if (ret == 0) {
3b951516
CM
2530 uptodate =
2531 test_bit(BIO_UPTODATE, &bio->bi_flags);
d20f7043
CM
2532 if (err)
2533 uptodate = 0;
507903b8 2534 uncache_state(&cached);
7e38326f
CM
2535 continue;
2536 }
2537 }
d1310b2e 2538
0b32f4bb 2539 if (uptodate && tree->track_uptodate) {
507903b8 2540 set_extent_uptodate(tree, start, end, &cached,
902b22f3 2541 GFP_ATOMIC);
771ed689 2542 }
507903b8 2543 unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC);
d1310b2e 2544
70dec807
CM
2545 if (whole_page) {
2546 if (uptodate) {
2547 SetPageUptodate(page);
2548 } else {
2549 ClearPageUptodate(page);
2550 SetPageError(page);
2551 }
d1310b2e 2552 unlock_page(page);
70dec807
CM
2553 } else {
2554 if (uptodate) {
2555 check_page_uptodate(tree, page);
2556 } else {
2557 ClearPageUptodate(page);
2558 SetPageError(page);
2559 }
d1310b2e 2560 check_page_locked(tree, page);
70dec807 2561 }
4125bf76 2562 } while (bvec <= bvec_end);
d1310b2e
CM
2563
2564 bio_put(bio);
d1310b2e
CM
2565}
2566
9be3395b
CM
2567/*
2568 * this allocates from the btrfs_bioset. We're returning a bio right now
2569 * but you can call btrfs_io_bio for the appropriate container_of magic
2570 */
88f794ed
MX
2571struct bio *
2572btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
2573 gfp_t gfp_flags)
d1310b2e
CM
2574{
2575 struct bio *bio;
2576
9be3395b 2577 bio = bio_alloc_bioset(gfp_flags, nr_vecs, btrfs_bioset);
d1310b2e
CM
2578
2579 if (bio == NULL && (current->flags & PF_MEMALLOC)) {
9be3395b
CM
2580 while (!bio && (nr_vecs /= 2)) {
2581 bio = bio_alloc_bioset(gfp_flags,
2582 nr_vecs, btrfs_bioset);
2583 }
d1310b2e
CM
2584 }
2585
2586 if (bio) {
e1c4b745 2587 bio->bi_size = 0;
d1310b2e
CM
2588 bio->bi_bdev = bdev;
2589 bio->bi_sector = first_sector;
2590 }
2591 return bio;
2592}
2593
9be3395b
CM
2594struct bio *btrfs_bio_clone(struct bio *bio, gfp_t gfp_mask)
2595{
2596 return bio_clone_bioset(bio, gfp_mask, btrfs_bioset);
2597}
2598
2599
2600/* this also allocates from the btrfs_bioset */
2601struct bio *btrfs_io_bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs)
2602{
2603 return bio_alloc_bioset(gfp_mask, nr_iovecs, btrfs_bioset);
2604}
2605
2606
355808c2
JM
2607static int __must_check submit_one_bio(int rw, struct bio *bio,
2608 int mirror_num, unsigned long bio_flags)
d1310b2e 2609{
d1310b2e 2610 int ret = 0;
70dec807
CM
2611 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
2612 struct page *page = bvec->bv_page;
2613 struct extent_io_tree *tree = bio->bi_private;
70dec807 2614 u64 start;
70dec807 2615
4eee4fa4 2616 start = page_offset(page) + bvec->bv_offset;
70dec807 2617
902b22f3 2618 bio->bi_private = NULL;
d1310b2e
CM
2619
2620 bio_get(bio);
2621
065631f6 2622 if (tree->ops && tree->ops->submit_bio_hook)
6b82ce8d 2623 ret = tree->ops->submit_bio_hook(page->mapping->host, rw, bio,
eaf25d93 2624 mirror_num, bio_flags, start);
0b86a832 2625 else
21adbd5c 2626 btrfsic_submit_bio(rw, bio);
4a54c8c1 2627
d1310b2e
CM
2628 if (bio_flagged(bio, BIO_EOPNOTSUPP))
2629 ret = -EOPNOTSUPP;
2630 bio_put(bio);
2631 return ret;
2632}
2633
64a16701 2634static int merge_bio(int rw, struct extent_io_tree *tree, struct page *page,
3444a972
JM
2635 unsigned long offset, size_t size, struct bio *bio,
2636 unsigned long bio_flags)
2637{
2638 int ret = 0;
2639 if (tree->ops && tree->ops->merge_bio_hook)
64a16701 2640 ret = tree->ops->merge_bio_hook(rw, page, offset, size, bio,
3444a972
JM
2641 bio_flags);
2642 BUG_ON(ret < 0);
2643 return ret;
2644
2645}
2646
d1310b2e
CM
2647static int submit_extent_page(int rw, struct extent_io_tree *tree,
2648 struct page *page, sector_t sector,
2649 size_t size, unsigned long offset,
2650 struct block_device *bdev,
2651 struct bio **bio_ret,
2652 unsigned long max_pages,
f188591e 2653 bio_end_io_t end_io_func,
c8b97818
CM
2654 int mirror_num,
2655 unsigned long prev_bio_flags,
2656 unsigned long bio_flags)
d1310b2e
CM
2657{
2658 int ret = 0;
2659 struct bio *bio;
2660 int nr;
c8b97818
CM
2661 int contig = 0;
2662 int this_compressed = bio_flags & EXTENT_BIO_COMPRESSED;
2663 int old_compressed = prev_bio_flags & EXTENT_BIO_COMPRESSED;
5b050f04 2664 size_t page_size = min_t(size_t, size, PAGE_CACHE_SIZE);
d1310b2e
CM
2665
2666 if (bio_ret && *bio_ret) {
2667 bio = *bio_ret;
c8b97818
CM
2668 if (old_compressed)
2669 contig = bio->bi_sector == sector;
2670 else
2671 contig = bio->bi_sector + (bio->bi_size >> 9) ==
2672 sector;
2673
2674 if (prev_bio_flags != bio_flags || !contig ||
64a16701 2675 merge_bio(rw, tree, page, offset, page_size, bio, bio_flags) ||
c8b97818
CM
2676 bio_add_page(bio, page, page_size, offset) < page_size) {
2677 ret = submit_one_bio(rw, bio, mirror_num,
2678 prev_bio_flags);
79787eaa
JM
2679 if (ret < 0)
2680 return ret;
d1310b2e
CM
2681 bio = NULL;
2682 } else {
2683 return 0;
2684 }
2685 }
c8b97818
CM
2686 if (this_compressed)
2687 nr = BIO_MAX_PAGES;
2688 else
2689 nr = bio_get_nr_vecs(bdev);
2690
88f794ed 2691 bio = btrfs_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
5df67083
TI
2692 if (!bio)
2693 return -ENOMEM;
70dec807 2694
c8b97818 2695 bio_add_page(bio, page, page_size, offset);
d1310b2e
CM
2696 bio->bi_end_io = end_io_func;
2697 bio->bi_private = tree;
70dec807 2698
d397712b 2699 if (bio_ret)
d1310b2e 2700 *bio_ret = bio;
d397712b 2701 else
c8b97818 2702 ret = submit_one_bio(rw, bio, mirror_num, bio_flags);
d1310b2e
CM
2703
2704 return ret;
2705}
2706
48a3b636
ES
2707static void attach_extent_buffer_page(struct extent_buffer *eb,
2708 struct page *page)
d1310b2e
CM
2709{
2710 if (!PagePrivate(page)) {
2711 SetPagePrivate(page);
d1310b2e 2712 page_cache_get(page);
4f2de97a
JB
2713 set_page_private(page, (unsigned long)eb);
2714 } else {
2715 WARN_ON(page->private != (unsigned long)eb);
d1310b2e
CM
2716 }
2717}
2718
4f2de97a 2719void set_page_extent_mapped(struct page *page)
d1310b2e 2720{
4f2de97a
JB
2721 if (!PagePrivate(page)) {
2722 SetPagePrivate(page);
2723 page_cache_get(page);
2724 set_page_private(page, EXTENT_PAGE_PRIVATE);
2725 }
d1310b2e
CM
2726}
2727
2728/*
2729 * basic readpage implementation. Locked extent state structs are inserted
2730 * into the tree that are removed when the IO is done (by the end_io
2731 * handlers)
79787eaa 2732 * XXX JDM: This needs looking at to ensure proper page locking
d1310b2e
CM
2733 */
2734static int __extent_read_full_page(struct extent_io_tree *tree,
2735 struct page *page,
2736 get_extent_t *get_extent,
c8b97818 2737 struct bio **bio, int mirror_num,
d4c7ca86 2738 unsigned long *bio_flags, int rw)
d1310b2e
CM
2739{
2740 struct inode *inode = page->mapping->host;
4eee4fa4 2741 u64 start = page_offset(page);
d1310b2e
CM
2742 u64 page_end = start + PAGE_CACHE_SIZE - 1;
2743 u64 end;
2744 u64 cur = start;
2745 u64 extent_offset;
2746 u64 last_byte = i_size_read(inode);
2747 u64 block_start;
2748 u64 cur_end;
2749 sector_t sector;
2750 struct extent_map *em;
2751 struct block_device *bdev;
11c65dcc 2752 struct btrfs_ordered_extent *ordered;
d1310b2e
CM
2753 int ret;
2754 int nr = 0;
306e16ce 2755 size_t pg_offset = 0;
d1310b2e 2756 size_t iosize;
c8b97818 2757 size_t disk_io_size;
d1310b2e 2758 size_t blocksize = inode->i_sb->s_blocksize;
c8b97818 2759 unsigned long this_bio_flag = 0;
d1310b2e
CM
2760
2761 set_page_extent_mapped(page);
2762
90a887c9
DM
2763 if (!PageUptodate(page)) {
2764 if (cleancache_get_page(page) == 0) {
2765 BUG_ON(blocksize != PAGE_SIZE);
2766 goto out;
2767 }
2768 }
2769
d1310b2e 2770 end = page_end;
11c65dcc 2771 while (1) {
d0082371 2772 lock_extent(tree, start, end);
11c65dcc
JB
2773 ordered = btrfs_lookup_ordered_extent(inode, start);
2774 if (!ordered)
2775 break;
d0082371 2776 unlock_extent(tree, start, end);
11c65dcc
JB
2777 btrfs_start_ordered_extent(inode, ordered, 1);
2778 btrfs_put_ordered_extent(ordered);
2779 }
d1310b2e 2780
c8b97818
CM
2781 if (page->index == last_byte >> PAGE_CACHE_SHIFT) {
2782 char *userpage;
2783 size_t zero_offset = last_byte & (PAGE_CACHE_SIZE - 1);
2784
2785 if (zero_offset) {
2786 iosize = PAGE_CACHE_SIZE - zero_offset;
7ac687d9 2787 userpage = kmap_atomic(page);
c8b97818
CM
2788 memset(userpage + zero_offset, 0, iosize);
2789 flush_dcache_page(page);
7ac687d9 2790 kunmap_atomic(userpage);
c8b97818
CM
2791 }
2792 }
d1310b2e 2793 while (cur <= end) {
c8f2f24b
JB
2794 unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
2795
d1310b2e
CM
2796 if (cur >= last_byte) {
2797 char *userpage;
507903b8
AJ
2798 struct extent_state *cached = NULL;
2799
306e16ce 2800 iosize = PAGE_CACHE_SIZE - pg_offset;
7ac687d9 2801 userpage = kmap_atomic(page);
306e16ce 2802 memset(userpage + pg_offset, 0, iosize);
d1310b2e 2803 flush_dcache_page(page);
7ac687d9 2804 kunmap_atomic(userpage);
d1310b2e 2805 set_extent_uptodate(tree, cur, cur + iosize - 1,
507903b8
AJ
2806 &cached, GFP_NOFS);
2807 unlock_extent_cached(tree, cur, cur + iosize - 1,
2808 &cached, GFP_NOFS);
d1310b2e
CM
2809 break;
2810 }
306e16ce 2811 em = get_extent(inode, page, pg_offset, cur,
d1310b2e 2812 end - cur + 1, 0);
c704005d 2813 if (IS_ERR_OR_NULL(em)) {
d1310b2e 2814 SetPageError(page);
d0082371 2815 unlock_extent(tree, cur, end);
d1310b2e
CM
2816 break;
2817 }
d1310b2e
CM
2818 extent_offset = cur - em->start;
2819 BUG_ON(extent_map_end(em) <= cur);
2820 BUG_ON(end < cur);
2821
261507a0 2822 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
c8b97818 2823 this_bio_flag = EXTENT_BIO_COMPRESSED;
261507a0
LZ
2824 extent_set_compress_type(&this_bio_flag,
2825 em->compress_type);
2826 }
c8b97818 2827
d1310b2e
CM
2828 iosize = min(extent_map_end(em) - cur, end - cur + 1);
2829 cur_end = min(extent_map_end(em) - 1, end);
fda2832f 2830 iosize = ALIGN(iosize, blocksize);
c8b97818
CM
2831 if (this_bio_flag & EXTENT_BIO_COMPRESSED) {
2832 disk_io_size = em->block_len;
2833 sector = em->block_start >> 9;
2834 } else {
2835 sector = (em->block_start + extent_offset) >> 9;
2836 disk_io_size = iosize;
2837 }
d1310b2e
CM
2838 bdev = em->bdev;
2839 block_start = em->block_start;
d899e052
YZ
2840 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
2841 block_start = EXTENT_MAP_HOLE;
d1310b2e
CM
2842 free_extent_map(em);
2843 em = NULL;
2844
2845 /* we've found a hole, just zero and go on */
2846 if (block_start == EXTENT_MAP_HOLE) {
2847 char *userpage;
507903b8
AJ
2848 struct extent_state *cached = NULL;
2849
7ac687d9 2850 userpage = kmap_atomic(page);
306e16ce 2851 memset(userpage + pg_offset, 0, iosize);
d1310b2e 2852 flush_dcache_page(page);
7ac687d9 2853 kunmap_atomic(userpage);
d1310b2e
CM
2854
2855 set_extent_uptodate(tree, cur, cur + iosize - 1,
507903b8
AJ
2856 &cached, GFP_NOFS);
2857 unlock_extent_cached(tree, cur, cur + iosize - 1,
2858 &cached, GFP_NOFS);
d1310b2e 2859 cur = cur + iosize;
306e16ce 2860 pg_offset += iosize;
d1310b2e
CM
2861 continue;
2862 }
2863 /* the get_extent function already copied into the page */
9655d298
CM
2864 if (test_range_bit(tree, cur, cur_end,
2865 EXTENT_UPTODATE, 1, NULL)) {
a1b32a59 2866 check_page_uptodate(tree, page);
d0082371 2867 unlock_extent(tree, cur, cur + iosize - 1);
d1310b2e 2868 cur = cur + iosize;
306e16ce 2869 pg_offset += iosize;
d1310b2e
CM
2870 continue;
2871 }
70dec807
CM
2872 /* we have an inline extent but it didn't get marked up
2873 * to date. Error out
2874 */
2875 if (block_start == EXTENT_MAP_INLINE) {
2876 SetPageError(page);
d0082371 2877 unlock_extent(tree, cur, cur + iosize - 1);
70dec807 2878 cur = cur + iosize;
306e16ce 2879 pg_offset += iosize;
70dec807
CM
2880 continue;
2881 }
d1310b2e 2882
c8f2f24b 2883 pnr -= page->index;
d4c7ca86 2884 ret = submit_extent_page(rw, tree, page,
306e16ce 2885 sector, disk_io_size, pg_offset,
89642229 2886 bdev, bio, pnr,
c8b97818
CM
2887 end_bio_extent_readpage, mirror_num,
2888 *bio_flags,
2889 this_bio_flag);
c8f2f24b
JB
2890 if (!ret) {
2891 nr++;
2892 *bio_flags = this_bio_flag;
2893 } else {
d1310b2e 2894 SetPageError(page);
edd33c99
JB
2895 unlock_extent(tree, cur, cur + iosize - 1);
2896 }
d1310b2e 2897 cur = cur + iosize;
306e16ce 2898 pg_offset += iosize;
d1310b2e 2899 }
90a887c9 2900out:
d1310b2e
CM
2901 if (!nr) {
2902 if (!PageError(page))
2903 SetPageUptodate(page);
2904 unlock_page(page);
2905 }
2906 return 0;
2907}
2908
2909int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
8ddc7d9c 2910 get_extent_t *get_extent, int mirror_num)
d1310b2e
CM
2911{
2912 struct bio *bio = NULL;
c8b97818 2913 unsigned long bio_flags = 0;
d1310b2e
CM
2914 int ret;
2915
8ddc7d9c 2916 ret = __extent_read_full_page(tree, page, get_extent, &bio, mirror_num,
d4c7ca86 2917 &bio_flags, READ);
d1310b2e 2918 if (bio)
8ddc7d9c 2919 ret = submit_one_bio(READ, bio, mirror_num, bio_flags);
d1310b2e
CM
2920 return ret;
2921}
d1310b2e 2922
11c8349b
CM
2923static noinline void update_nr_written(struct page *page,
2924 struct writeback_control *wbc,
2925 unsigned long nr_written)
2926{
2927 wbc->nr_to_write -= nr_written;
2928 if (wbc->range_cyclic || (wbc->nr_to_write > 0 &&
2929 wbc->range_start == 0 && wbc->range_end == LLONG_MAX))
2930 page->mapping->writeback_index = page->index + nr_written;
2931}
2932
d1310b2e
CM
2933/*
2934 * the writepage semantics are similar to regular writepage. extent
2935 * records are inserted to lock ranges in the tree, and as dirty areas
2936 * are found, they are marked writeback. Then the lock bits are removed
2937 * and the end_io handler clears the writeback ranges
2938 */
2939static int __extent_writepage(struct page *page, struct writeback_control *wbc,
2940 void *data)
2941{
2942 struct inode *inode = page->mapping->host;
2943 struct extent_page_data *epd = data;
2944 struct extent_io_tree *tree = epd->tree;
4eee4fa4 2945 u64 start = page_offset(page);
d1310b2e
CM
2946 u64 delalloc_start;
2947 u64 page_end = start + PAGE_CACHE_SIZE - 1;
2948 u64 end;
2949 u64 cur = start;
2950 u64 extent_offset;
2951 u64 last_byte = i_size_read(inode);
2952 u64 block_start;
2953 u64 iosize;
2954 sector_t sector;
2c64c53d 2955 struct extent_state *cached_state = NULL;
d1310b2e
CM
2956 struct extent_map *em;
2957 struct block_device *bdev;
2958 int ret;
2959 int nr = 0;
7f3c74fb 2960 size_t pg_offset = 0;
d1310b2e
CM
2961 size_t blocksize;
2962 loff_t i_size = i_size_read(inode);
2963 unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
2964 u64 nr_delalloc;
2965 u64 delalloc_end;
c8b97818
CM
2966 int page_started;
2967 int compressed;
ffbd517d 2968 int write_flags;
771ed689 2969 unsigned long nr_written = 0;
9e487107 2970 bool fill_delalloc = true;
d1310b2e 2971
ffbd517d 2972 if (wbc->sync_mode == WB_SYNC_ALL)
721a9602 2973 write_flags = WRITE_SYNC;
ffbd517d
CM
2974 else
2975 write_flags = WRITE;
2976
1abe9b8a 2977 trace___extent_writepage(page, inode, wbc);
2978
d1310b2e 2979 WARN_ON(!PageLocked(page));
bf0da8c1
CM
2980
2981 ClearPageError(page);
2982
7f3c74fb 2983 pg_offset = i_size & (PAGE_CACHE_SIZE - 1);
211c17f5 2984 if (page->index > end_index ||
7f3c74fb 2985 (page->index == end_index && !pg_offset)) {
39be25cd 2986 page->mapping->a_ops->invalidatepage(page, 0);
d1310b2e
CM
2987 unlock_page(page);
2988 return 0;
2989 }
2990
2991 if (page->index == end_index) {
2992 char *userpage;
2993
7ac687d9 2994 userpage = kmap_atomic(page);
7f3c74fb
CM
2995 memset(userpage + pg_offset, 0,
2996 PAGE_CACHE_SIZE - pg_offset);
7ac687d9 2997 kunmap_atomic(userpage);
211c17f5 2998 flush_dcache_page(page);
d1310b2e 2999 }
7f3c74fb 3000 pg_offset = 0;
d1310b2e
CM
3001
3002 set_page_extent_mapped(page);
3003
9e487107
JB
3004 if (!tree->ops || !tree->ops->fill_delalloc)
3005 fill_delalloc = false;
3006
d1310b2e
CM
3007 delalloc_start = start;
3008 delalloc_end = 0;
c8b97818 3009 page_started = 0;
9e487107 3010 if (!epd->extent_locked && fill_delalloc) {
f85d7d6c 3011 u64 delalloc_to_write = 0;
11c8349b
CM
3012 /*
3013 * make sure the wbc mapping index is at least updated
3014 * to this page.
3015 */
3016 update_nr_written(page, wbc, 0);
3017
d397712b 3018 while (delalloc_end < page_end) {
771ed689 3019 nr_delalloc = find_lock_delalloc_range(inode, tree,
c8b97818
CM
3020 page,
3021 &delalloc_start,
d1310b2e
CM
3022 &delalloc_end,
3023 128 * 1024 * 1024);
771ed689
CM
3024 if (nr_delalloc == 0) {
3025 delalloc_start = delalloc_end + 1;
3026 continue;
3027 }
013bd4c3
TI
3028 ret = tree->ops->fill_delalloc(inode, page,
3029 delalloc_start,
3030 delalloc_end,
3031 &page_started,
3032 &nr_written);
79787eaa
JM
3033 /* File system has been set read-only */
3034 if (ret) {
3035 SetPageError(page);
3036 goto done;
3037 }
f85d7d6c
CM
3038 /*
3039 * delalloc_end is already one less than the total
3040 * length, so we don't subtract one from
3041 * PAGE_CACHE_SIZE
3042 */
3043 delalloc_to_write += (delalloc_end - delalloc_start +
3044 PAGE_CACHE_SIZE) >>
3045 PAGE_CACHE_SHIFT;
d1310b2e 3046 delalloc_start = delalloc_end + 1;
d1310b2e 3047 }
f85d7d6c
CM
3048 if (wbc->nr_to_write < delalloc_to_write) {
3049 int thresh = 8192;
3050
3051 if (delalloc_to_write < thresh * 2)
3052 thresh = delalloc_to_write;
3053 wbc->nr_to_write = min_t(u64, delalloc_to_write,
3054 thresh);
3055 }
c8b97818 3056
771ed689
CM
3057 /* did the fill delalloc function already unlock and start
3058 * the IO?
3059 */
3060 if (page_started) {
3061 ret = 0;
11c8349b
CM
3062 /*
3063 * we've unlocked the page, so we can't update
3064 * the mapping's writeback index, just update
3065 * nr_to_write.
3066 */
3067 wbc->nr_to_write -= nr_written;
3068 goto done_unlocked;
771ed689 3069 }
c8b97818 3070 }
247e743c 3071 if (tree->ops && tree->ops->writepage_start_hook) {
c8b97818
CM
3072 ret = tree->ops->writepage_start_hook(page, start,
3073 page_end);
87826df0
JM
3074 if (ret) {
3075 /* Fixup worker will requeue */
3076 if (ret == -EBUSY)
3077 wbc->pages_skipped++;
3078 else
3079 redirty_page_for_writepage(wbc, page);
11c8349b 3080 update_nr_written(page, wbc, nr_written);
247e743c 3081 unlock_page(page);
771ed689 3082 ret = 0;
11c8349b 3083 goto done_unlocked;
247e743c
CM
3084 }
3085 }
3086
11c8349b
CM
3087 /*
3088 * we don't want to touch the inode after unlocking the page,
3089 * so we update the mapping writeback index now
3090 */
3091 update_nr_written(page, wbc, nr_written + 1);
771ed689 3092
d1310b2e 3093 end = page_end;
d1310b2e 3094 if (last_byte <= start) {
e6dcd2dc
CM
3095 if (tree->ops && tree->ops->writepage_end_io_hook)
3096 tree->ops->writepage_end_io_hook(page, start,
3097 page_end, NULL, 1);
d1310b2e
CM
3098 goto done;
3099 }
3100
d1310b2e
CM
3101 blocksize = inode->i_sb->s_blocksize;
3102
3103 while (cur <= end) {
3104 if (cur >= last_byte) {
e6dcd2dc
CM
3105 if (tree->ops && tree->ops->writepage_end_io_hook)
3106 tree->ops->writepage_end_io_hook(page, cur,
3107 page_end, NULL, 1);
d1310b2e
CM
3108 break;
3109 }
7f3c74fb 3110 em = epd->get_extent(inode, page, pg_offset, cur,
d1310b2e 3111 end - cur + 1, 1);
c704005d 3112 if (IS_ERR_OR_NULL(em)) {
d1310b2e
CM
3113 SetPageError(page);
3114 break;
3115 }
3116
3117 extent_offset = cur - em->start;
3118 BUG_ON(extent_map_end(em) <= cur);
3119 BUG_ON(end < cur);
3120 iosize = min(extent_map_end(em) - cur, end - cur + 1);
fda2832f 3121 iosize = ALIGN(iosize, blocksize);
d1310b2e
CM
3122 sector = (em->block_start + extent_offset) >> 9;
3123 bdev = em->bdev;
3124 block_start = em->block_start;
c8b97818 3125 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
d1310b2e
CM
3126 free_extent_map(em);
3127 em = NULL;
3128
c8b97818
CM
3129 /*
3130 * compressed and inline extents are written through other
3131 * paths in the FS
3132 */
3133 if (compressed || block_start == EXTENT_MAP_HOLE ||
d1310b2e 3134 block_start == EXTENT_MAP_INLINE) {
c8b97818
CM
3135 /*
3136 * end_io notification does not happen here for
3137 * compressed extents
3138 */
3139 if (!compressed && tree->ops &&
3140 tree->ops->writepage_end_io_hook)
e6dcd2dc
CM
3141 tree->ops->writepage_end_io_hook(page, cur,
3142 cur + iosize - 1,
3143 NULL, 1);
c8b97818
CM
3144 else if (compressed) {
3145 /* we don't want to end_page_writeback on
3146 * a compressed extent. this happens
3147 * elsewhere
3148 */
3149 nr++;
3150 }
3151
3152 cur += iosize;
7f3c74fb 3153 pg_offset += iosize;
d1310b2e
CM
3154 continue;
3155 }
d1310b2e
CM
3156 /* leave this out until we have a page_mkwrite call */
3157 if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
9655d298 3158 EXTENT_DIRTY, 0, NULL)) {
d1310b2e 3159 cur = cur + iosize;
7f3c74fb 3160 pg_offset += iosize;
d1310b2e
CM
3161 continue;
3162 }
c8b97818 3163
d1310b2e
CM
3164 if (tree->ops && tree->ops->writepage_io_hook) {
3165 ret = tree->ops->writepage_io_hook(page, cur,
3166 cur + iosize - 1);
3167 } else {
3168 ret = 0;
3169 }
1259ab75 3170 if (ret) {
d1310b2e 3171 SetPageError(page);
1259ab75 3172 } else {
d1310b2e 3173 unsigned long max_nr = end_index + 1;
7f3c74fb 3174
d1310b2e
CM
3175 set_range_writeback(tree, cur, cur + iosize - 1);
3176 if (!PageWriteback(page)) {
d397712b
CM
3177 printk(KERN_ERR "btrfs warning page %lu not "
3178 "writeback, cur %llu end %llu\n",
3179 page->index, (unsigned long long)cur,
d1310b2e
CM
3180 (unsigned long long)end);
3181 }
3182
ffbd517d
CM
3183 ret = submit_extent_page(write_flags, tree, page,
3184 sector, iosize, pg_offset,
3185 bdev, &epd->bio, max_nr,
c8b97818
CM
3186 end_bio_extent_writepage,
3187 0, 0, 0);
d1310b2e
CM
3188 if (ret)
3189 SetPageError(page);
3190 }
3191 cur = cur + iosize;
7f3c74fb 3192 pg_offset += iosize;
d1310b2e
CM
3193 nr++;
3194 }
3195done:
3196 if (nr == 0) {
3197 /* make sure the mapping tag for page dirty gets cleared */
3198 set_page_writeback(page);
3199 end_page_writeback(page);
3200 }
d1310b2e 3201 unlock_page(page);
771ed689 3202
11c8349b
CM
3203done_unlocked:
3204
2c64c53d
CM
3205 /* drop our reference on any cached states */
3206 free_extent_state(cached_state);
d1310b2e
CM
3207 return 0;
3208}
3209
0b32f4bb
JB
3210static int eb_wait(void *word)
3211{
3212 io_schedule();
3213 return 0;
3214}
3215
fd8b2b61 3216void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
0b32f4bb
JB
3217{
3218 wait_on_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK, eb_wait,
3219 TASK_UNINTERRUPTIBLE);
3220}
3221
3222static int lock_extent_buffer_for_io(struct extent_buffer *eb,
3223 struct btrfs_fs_info *fs_info,
3224 struct extent_page_data *epd)
3225{
3226 unsigned long i, num_pages;
3227 int flush = 0;
3228 int ret = 0;
3229
3230 if (!btrfs_try_tree_write_lock(eb)) {
3231 flush = 1;
3232 flush_write_bio(epd);
3233 btrfs_tree_lock(eb);
3234 }
3235
3236 if (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) {
3237 btrfs_tree_unlock(eb);
3238 if (!epd->sync_io)
3239 return 0;
3240 if (!flush) {
3241 flush_write_bio(epd);
3242 flush = 1;
3243 }
a098d8e8
CM
3244 while (1) {
3245 wait_on_extent_buffer_writeback(eb);
3246 btrfs_tree_lock(eb);
3247 if (!test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags))
3248 break;
0b32f4bb 3249 btrfs_tree_unlock(eb);
0b32f4bb
JB
3250 }
3251 }
3252
51561ffe
JB
3253 /*
3254 * We need to do this to prevent races in people who check if the eb is
3255 * under IO since we can end up having no IO bits set for a short period
3256 * of time.
3257 */
3258 spin_lock(&eb->refs_lock);
0b32f4bb
JB
3259 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
3260 set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
51561ffe 3261 spin_unlock(&eb->refs_lock);
0b32f4bb 3262 btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
e2d84521
MX
3263 __percpu_counter_add(&fs_info->dirty_metadata_bytes,
3264 -eb->len,
3265 fs_info->dirty_metadata_batch);
0b32f4bb 3266 ret = 1;
51561ffe
JB
3267 } else {
3268 spin_unlock(&eb->refs_lock);
0b32f4bb
JB
3269 }
3270
3271 btrfs_tree_unlock(eb);
3272
3273 if (!ret)
3274 return ret;
3275
3276 num_pages = num_extent_pages(eb->start, eb->len);
3277 for (i = 0; i < num_pages; i++) {
3278 struct page *p = extent_buffer_page(eb, i);
3279
3280 if (!trylock_page(p)) {
3281 if (!flush) {
3282 flush_write_bio(epd);
3283 flush = 1;
3284 }
3285 lock_page(p);
3286 }
3287 }
3288
3289 return ret;
3290}
3291
3292static void end_extent_buffer_writeback(struct extent_buffer *eb)
3293{
3294 clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
3295 smp_mb__after_clear_bit();
3296 wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
3297}
3298
3299static void end_bio_extent_buffer_writepage(struct bio *bio, int err)
3300{
3301 int uptodate = err == 0;
3302 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
3303 struct extent_buffer *eb;
3304 int done;
3305
3306 do {
3307 struct page *page = bvec->bv_page;
3308
3309 bvec--;
3310 eb = (struct extent_buffer *)page->private;
3311 BUG_ON(!eb);
3312 done = atomic_dec_and_test(&eb->io_pages);
3313
3314 if (!uptodate || test_bit(EXTENT_BUFFER_IOERR, &eb->bflags)) {
3315 set_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
3316 ClearPageUptodate(page);
3317 SetPageError(page);
3318 }
3319
3320 end_page_writeback(page);
3321
3322 if (!done)
3323 continue;
3324
3325 end_extent_buffer_writeback(eb);
3326 } while (bvec >= bio->bi_io_vec);
3327
3328 bio_put(bio);
3329
3330}
3331
3332static int write_one_eb(struct extent_buffer *eb,
3333 struct btrfs_fs_info *fs_info,
3334 struct writeback_control *wbc,
3335 struct extent_page_data *epd)
3336{
3337 struct block_device *bdev = fs_info->fs_devices->latest_bdev;
3338 u64 offset = eb->start;
3339 unsigned long i, num_pages;
de0022b9 3340 unsigned long bio_flags = 0;
d4c7ca86 3341 int rw = (epd->sync_io ? WRITE_SYNC : WRITE) | REQ_META;
d7dbe9e7 3342 int ret = 0;
0b32f4bb
JB
3343
3344 clear_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
3345 num_pages = num_extent_pages(eb->start, eb->len);
3346 atomic_set(&eb->io_pages, num_pages);
de0022b9
JB
3347 if (btrfs_header_owner(eb) == BTRFS_TREE_LOG_OBJECTID)
3348 bio_flags = EXTENT_BIO_TREE_LOG;
3349
0b32f4bb
JB
3350 for (i = 0; i < num_pages; i++) {
3351 struct page *p = extent_buffer_page(eb, i);
3352
3353 clear_page_dirty_for_io(p);
3354 set_page_writeback(p);
3355 ret = submit_extent_page(rw, eb->tree, p, offset >> 9,
3356 PAGE_CACHE_SIZE, 0, bdev, &epd->bio,
3357 -1, end_bio_extent_buffer_writepage,
de0022b9
JB
3358 0, epd->bio_flags, bio_flags);
3359 epd->bio_flags = bio_flags;
0b32f4bb
JB
3360 if (ret) {
3361 set_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
3362 SetPageError(p);
3363 if (atomic_sub_and_test(num_pages - i, &eb->io_pages))
3364 end_extent_buffer_writeback(eb);
3365 ret = -EIO;
3366 break;
3367 }
3368 offset += PAGE_CACHE_SIZE;
3369 update_nr_written(p, wbc, 1);
3370 unlock_page(p);
3371 }
3372
3373 if (unlikely(ret)) {
3374 for (; i < num_pages; i++) {
3375 struct page *p = extent_buffer_page(eb, i);
3376 unlock_page(p);
3377 }
3378 }
3379
3380 return ret;
3381}
3382
3383int btree_write_cache_pages(struct address_space *mapping,
3384 struct writeback_control *wbc)
3385{
3386 struct extent_io_tree *tree = &BTRFS_I(mapping->host)->io_tree;
3387 struct btrfs_fs_info *fs_info = BTRFS_I(mapping->host)->root->fs_info;
3388 struct extent_buffer *eb, *prev_eb = NULL;
3389 struct extent_page_data epd = {
3390 .bio = NULL,
3391 .tree = tree,
3392 .extent_locked = 0,
3393 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
de0022b9 3394 .bio_flags = 0,
0b32f4bb
JB
3395 };
3396 int ret = 0;
3397 int done = 0;
3398 int nr_to_write_done = 0;
3399 struct pagevec pvec;
3400 int nr_pages;
3401 pgoff_t index;
3402 pgoff_t end; /* Inclusive */
3403 int scanned = 0;
3404 int tag;
3405
3406 pagevec_init(&pvec, 0);
3407 if (wbc->range_cyclic) {
3408 index = mapping->writeback_index; /* Start from prev offset */
3409 end = -1;
3410 } else {
3411 index = wbc->range_start >> PAGE_CACHE_SHIFT;
3412 end = wbc->range_end >> PAGE_CACHE_SHIFT;
3413 scanned = 1;
3414 }
3415 if (wbc->sync_mode == WB_SYNC_ALL)
3416 tag = PAGECACHE_TAG_TOWRITE;
3417 else
3418 tag = PAGECACHE_TAG_DIRTY;
3419retry:
3420 if (wbc->sync_mode == WB_SYNC_ALL)
3421 tag_pages_for_writeback(mapping, index, end);
3422 while (!done && !nr_to_write_done && (index <= end) &&
3423 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
3424 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
3425 unsigned i;
3426
3427 scanned = 1;
3428 for (i = 0; i < nr_pages; i++) {
3429 struct page *page = pvec.pages[i];
3430
3431 if (!PagePrivate(page))
3432 continue;
3433
3434 if (!wbc->range_cyclic && page->index > end) {
3435 done = 1;
3436 break;
3437 }
3438
b5bae261
JB
3439 spin_lock(&mapping->private_lock);
3440 if (!PagePrivate(page)) {
3441 spin_unlock(&mapping->private_lock);
3442 continue;
3443 }
3444
0b32f4bb 3445 eb = (struct extent_buffer *)page->private;
b5bae261
JB
3446
3447 /*
3448 * Shouldn't happen and normally this would be a BUG_ON
3449 * but no sense in crashing the users box for something
3450 * we can survive anyway.
3451 */
0b32f4bb 3452 if (!eb) {
b5bae261 3453 spin_unlock(&mapping->private_lock);
0b32f4bb
JB
3454 WARN_ON(1);
3455 continue;
3456 }
3457
b5bae261
JB
3458 if (eb == prev_eb) {
3459 spin_unlock(&mapping->private_lock);
0b32f4bb 3460 continue;
b5bae261 3461 }
0b32f4bb 3462
b5bae261
JB
3463 ret = atomic_inc_not_zero(&eb->refs);
3464 spin_unlock(&mapping->private_lock);
3465 if (!ret)
0b32f4bb 3466 continue;
0b32f4bb
JB
3467
3468 prev_eb = eb;
3469 ret = lock_extent_buffer_for_io(eb, fs_info, &epd);
3470 if (!ret) {
3471 free_extent_buffer(eb);
3472 continue;
3473 }
3474
3475 ret = write_one_eb(eb, fs_info, wbc, &epd);
3476 if (ret) {
3477 done = 1;
3478 free_extent_buffer(eb);
3479 break;
3480 }
3481 free_extent_buffer(eb);
3482
3483 /*
3484 * the filesystem may choose to bump up nr_to_write.
3485 * We have to make sure to honor the new nr_to_write
3486 * at any time
3487 */
3488 nr_to_write_done = wbc->nr_to_write <= 0;
3489 }
3490 pagevec_release(&pvec);
3491 cond_resched();
3492 }
3493 if (!scanned && !done) {
3494 /*
3495 * We hit the last page and there is more work to be done: wrap
3496 * back to the start of the file
3497 */
3498 scanned = 1;
3499 index = 0;
3500 goto retry;
3501 }
3502 flush_write_bio(&epd);
3503 return ret;
3504}
3505
d1310b2e 3506/**
4bef0848 3507 * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
d1310b2e
CM
3508 * @mapping: address space structure to write
3509 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
3510 * @writepage: function called for each page
3511 * @data: data passed to writepage function
3512 *
3513 * If a page is already under I/O, write_cache_pages() skips it, even
3514 * if it's dirty. This is desirable behaviour for memory-cleaning writeback,
3515 * but it is INCORRECT for data-integrity system calls such as fsync(). fsync()
3516 * and msync() need to guarantee that all the data which was dirty at the time
3517 * the call was made get new I/O started against them. If wbc->sync_mode is
3518 * WB_SYNC_ALL then we were called for data integrity and we must wait for
3519 * existing IO to complete.
3520 */
b2950863 3521static int extent_write_cache_pages(struct extent_io_tree *tree,
4bef0848
CM
3522 struct address_space *mapping,
3523 struct writeback_control *wbc,
d2c3f4f6
CM
3524 writepage_t writepage, void *data,
3525 void (*flush_fn)(void *))
d1310b2e 3526{
7fd1a3f7 3527 struct inode *inode = mapping->host;
d1310b2e
CM
3528 int ret = 0;
3529 int done = 0;
f85d7d6c 3530 int nr_to_write_done = 0;
d1310b2e
CM
3531 struct pagevec pvec;
3532 int nr_pages;
3533 pgoff_t index;
3534 pgoff_t end; /* Inclusive */
3535 int scanned = 0;
f7aaa06b 3536 int tag;
d1310b2e 3537
7fd1a3f7
JB
3538 /*
3539 * We have to hold onto the inode so that ordered extents can do their
3540 * work when the IO finishes. The alternative to this is failing to add
3541 * an ordered extent if the igrab() fails there and that is a huge pain
3542 * to deal with, so instead just hold onto the inode throughout the
3543 * writepages operation. If it fails here we are freeing up the inode
3544 * anyway and we'd rather not waste our time writing out stuff that is
3545 * going to be truncated anyway.
3546 */
3547 if (!igrab(inode))
3548 return 0;
3549
d1310b2e
CM
3550 pagevec_init(&pvec, 0);
3551 if (wbc->range_cyclic) {
3552 index = mapping->writeback_index; /* Start from prev offset */
3553 end = -1;
3554 } else {
3555 index = wbc->range_start >> PAGE_CACHE_SHIFT;
3556 end = wbc->range_end >> PAGE_CACHE_SHIFT;
d1310b2e
CM
3557 scanned = 1;
3558 }
f7aaa06b
JB
3559 if (wbc->sync_mode == WB_SYNC_ALL)
3560 tag = PAGECACHE_TAG_TOWRITE;
3561 else
3562 tag = PAGECACHE_TAG_DIRTY;
d1310b2e 3563retry:
f7aaa06b
JB
3564 if (wbc->sync_mode == WB_SYNC_ALL)
3565 tag_pages_for_writeback(mapping, index, end);
f85d7d6c 3566 while (!done && !nr_to_write_done && (index <= end) &&
f7aaa06b
JB
3567 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
3568 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
d1310b2e
CM
3569 unsigned i;
3570
3571 scanned = 1;
3572 for (i = 0; i < nr_pages; i++) {
3573 struct page *page = pvec.pages[i];
3574
3575 /*
3576 * At this point we hold neither mapping->tree_lock nor
3577 * lock on the page itself: the page may be truncated or
3578 * invalidated (changing page->mapping to NULL), or even
3579 * swizzled back from swapper_space to tmpfs file
3580 * mapping
3581 */
c8f2f24b
JB
3582 if (!trylock_page(page)) {
3583 flush_fn(data);
3584 lock_page(page);
01d658f2 3585 }
d1310b2e
CM
3586
3587 if (unlikely(page->mapping != mapping)) {
3588 unlock_page(page);
3589 continue;
3590 }
3591
3592 if (!wbc->range_cyclic && page->index > end) {
3593 done = 1;
3594 unlock_page(page);
3595 continue;
3596 }
3597
d2c3f4f6 3598 if (wbc->sync_mode != WB_SYNC_NONE) {
0e6bd956
CM
3599 if (PageWriteback(page))
3600 flush_fn(data);
d1310b2e 3601 wait_on_page_writeback(page);
d2c3f4f6 3602 }
d1310b2e
CM
3603
3604 if (PageWriteback(page) ||
3605 !clear_page_dirty_for_io(page)) {
3606 unlock_page(page);
3607 continue;
3608 }
3609
3610 ret = (*writepage)(page, wbc, data);
3611
3612 if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
3613 unlock_page(page);
3614 ret = 0;
3615 }
f85d7d6c 3616 if (ret)
d1310b2e 3617 done = 1;
f85d7d6c
CM
3618
3619 /*
3620 * the filesystem may choose to bump up nr_to_write.
3621 * We have to make sure to honor the new nr_to_write
3622 * at any time
3623 */
3624 nr_to_write_done = wbc->nr_to_write <= 0;
d1310b2e
CM
3625 }
3626 pagevec_release(&pvec);
3627 cond_resched();
3628 }
3629 if (!scanned && !done) {
3630 /*
3631 * We hit the last page and there is more work to be done: wrap
3632 * back to the start of the file
3633 */
3634 scanned = 1;
3635 index = 0;
3636 goto retry;
3637 }
7fd1a3f7 3638 btrfs_add_delayed_iput(inode);
d1310b2e
CM
3639 return ret;
3640}
d1310b2e 3641
ffbd517d 3642static void flush_epd_write_bio(struct extent_page_data *epd)
d2c3f4f6 3643{
d2c3f4f6 3644 if (epd->bio) {
355808c2
JM
3645 int rw = WRITE;
3646 int ret;
3647
ffbd517d 3648 if (epd->sync_io)
355808c2
JM
3649 rw = WRITE_SYNC;
3650
de0022b9 3651 ret = submit_one_bio(rw, epd->bio, 0, epd->bio_flags);
79787eaa 3652 BUG_ON(ret < 0); /* -ENOMEM */
d2c3f4f6
CM
3653 epd->bio = NULL;
3654 }
3655}
3656
ffbd517d
CM
3657static noinline void flush_write_bio(void *data)
3658{
3659 struct extent_page_data *epd = data;
3660 flush_epd_write_bio(epd);
3661}
3662
d1310b2e
CM
3663int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
3664 get_extent_t *get_extent,
3665 struct writeback_control *wbc)
3666{
3667 int ret;
d1310b2e
CM
3668 struct extent_page_data epd = {
3669 .bio = NULL,
3670 .tree = tree,
3671 .get_extent = get_extent,
771ed689 3672 .extent_locked = 0,
ffbd517d 3673 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
de0022b9 3674 .bio_flags = 0,
d1310b2e 3675 };
d1310b2e 3676
d1310b2e
CM
3677 ret = __extent_writepage(page, wbc, &epd);
3678
ffbd517d 3679 flush_epd_write_bio(&epd);
d1310b2e
CM
3680 return ret;
3681}
d1310b2e 3682
771ed689
CM
3683int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,
3684 u64 start, u64 end, get_extent_t *get_extent,
3685 int mode)
3686{
3687 int ret = 0;
3688 struct address_space *mapping = inode->i_mapping;
3689 struct page *page;
3690 unsigned long nr_pages = (end - start + PAGE_CACHE_SIZE) >>
3691 PAGE_CACHE_SHIFT;
3692
3693 struct extent_page_data epd = {
3694 .bio = NULL,
3695 .tree = tree,
3696 .get_extent = get_extent,
3697 .extent_locked = 1,
ffbd517d 3698 .sync_io = mode == WB_SYNC_ALL,
de0022b9 3699 .bio_flags = 0,
771ed689
CM
3700 };
3701 struct writeback_control wbc_writepages = {
771ed689 3702 .sync_mode = mode,
771ed689
CM
3703 .nr_to_write = nr_pages * 2,
3704 .range_start = start,
3705 .range_end = end + 1,
3706 };
3707
d397712b 3708 while (start <= end) {
771ed689
CM
3709 page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT);
3710 if (clear_page_dirty_for_io(page))
3711 ret = __extent_writepage(page, &wbc_writepages, &epd);
3712 else {
3713 if (tree->ops && tree->ops->writepage_end_io_hook)
3714 tree->ops->writepage_end_io_hook(page, start,
3715 start + PAGE_CACHE_SIZE - 1,
3716 NULL, 1);
3717 unlock_page(page);
3718 }
3719 page_cache_release(page);
3720 start += PAGE_CACHE_SIZE;
3721 }
3722
ffbd517d 3723 flush_epd_write_bio(&epd);
771ed689
CM
3724 return ret;
3725}
d1310b2e
CM
3726
3727int extent_writepages(struct extent_io_tree *tree,
3728 struct address_space *mapping,
3729 get_extent_t *get_extent,
3730 struct writeback_control *wbc)
3731{
3732 int ret = 0;
3733 struct extent_page_data epd = {
3734 .bio = NULL,
3735 .tree = tree,
3736 .get_extent = get_extent,
771ed689 3737 .extent_locked = 0,
ffbd517d 3738 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
de0022b9 3739 .bio_flags = 0,
d1310b2e
CM
3740 };
3741
4bef0848 3742 ret = extent_write_cache_pages(tree, mapping, wbc,
d2c3f4f6
CM
3743 __extent_writepage, &epd,
3744 flush_write_bio);
ffbd517d 3745 flush_epd_write_bio(&epd);
d1310b2e
CM
3746 return ret;
3747}
d1310b2e
CM
3748
3749int extent_readpages(struct extent_io_tree *tree,
3750 struct address_space *mapping,
3751 struct list_head *pages, unsigned nr_pages,
3752 get_extent_t get_extent)
3753{
3754 struct bio *bio = NULL;
3755 unsigned page_idx;
c8b97818 3756 unsigned long bio_flags = 0;
67c9684f
LB
3757 struct page *pagepool[16];
3758 struct page *page;
3759 int i = 0;
3760 int nr = 0;
d1310b2e 3761
d1310b2e 3762 for (page_idx = 0; page_idx < nr_pages; page_idx++) {
67c9684f 3763 page = list_entry(pages->prev, struct page, lru);
d1310b2e
CM
3764
3765 prefetchw(&page->flags);
3766 list_del(&page->lru);
67c9684f 3767 if (add_to_page_cache_lru(page, mapping,
43e817a1 3768 page->index, GFP_NOFS)) {
67c9684f
LB
3769 page_cache_release(page);
3770 continue;
d1310b2e 3771 }
67c9684f
LB
3772
3773 pagepool[nr++] = page;
3774 if (nr < ARRAY_SIZE(pagepool))
3775 continue;
3776 for (i = 0; i < nr; i++) {
3777 __extent_read_full_page(tree, pagepool[i], get_extent,
d4c7ca86 3778 &bio, 0, &bio_flags, READ);
67c9684f
LB
3779 page_cache_release(pagepool[i]);
3780 }
3781 nr = 0;
d1310b2e 3782 }
67c9684f
LB
3783 for (i = 0; i < nr; i++) {
3784 __extent_read_full_page(tree, pagepool[i], get_extent,
d4c7ca86 3785 &bio, 0, &bio_flags, READ);
67c9684f 3786 page_cache_release(pagepool[i]);
d1310b2e 3787 }
67c9684f 3788
d1310b2e
CM
3789 BUG_ON(!list_empty(pages));
3790 if (bio)
79787eaa 3791 return submit_one_bio(READ, bio, 0, bio_flags);
d1310b2e
CM
3792 return 0;
3793}
d1310b2e
CM
3794
3795/*
3796 * basic invalidatepage code, this waits on any locked or writeback
3797 * ranges corresponding to the page, and then deletes any extent state
3798 * records from the tree
3799 */
3800int extent_invalidatepage(struct extent_io_tree *tree,
3801 struct page *page, unsigned long offset)
3802{
2ac55d41 3803 struct extent_state *cached_state = NULL;
4eee4fa4 3804 u64 start = page_offset(page);
d1310b2e
CM
3805 u64 end = start + PAGE_CACHE_SIZE - 1;
3806 size_t blocksize = page->mapping->host->i_sb->s_blocksize;
3807
fda2832f 3808 start += ALIGN(offset, blocksize);
d1310b2e
CM
3809 if (start > end)
3810 return 0;
3811
d0082371 3812 lock_extent_bits(tree, start, end, 0, &cached_state);
1edbb734 3813 wait_on_page_writeback(page);
d1310b2e 3814 clear_extent_bit(tree, start, end,
32c00aff
JB
3815 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
3816 EXTENT_DO_ACCOUNTING,
2ac55d41 3817 1, 1, &cached_state, GFP_NOFS);
d1310b2e
CM
3818 return 0;
3819}
d1310b2e 3820
7b13b7b1
CM
3821/*
3822 * a helper for releasepage, this tests for areas of the page that
3823 * are locked or under IO and drops the related state bits if it is safe
3824 * to drop the page.
3825 */
48a3b636
ES
3826static int try_release_extent_state(struct extent_map_tree *map,
3827 struct extent_io_tree *tree,
3828 struct page *page, gfp_t mask)
7b13b7b1 3829{
4eee4fa4 3830 u64 start = page_offset(page);
7b13b7b1
CM
3831 u64 end = start + PAGE_CACHE_SIZE - 1;
3832 int ret = 1;
3833
211f90e6 3834 if (test_range_bit(tree, start, end,
8b62b72b 3835 EXTENT_IOBITS, 0, NULL))
7b13b7b1
CM
3836 ret = 0;
3837 else {
3838 if ((mask & GFP_NOFS) == GFP_NOFS)
3839 mask = GFP_NOFS;
11ef160f
CM
3840 /*
3841 * at this point we can safely clear everything except the
3842 * locked bit and the nodatasum bit
3843 */
e3f24cc5 3844 ret = clear_extent_bit(tree, start, end,
11ef160f
CM
3845 ~(EXTENT_LOCKED | EXTENT_NODATASUM),
3846 0, 0, NULL, mask);
e3f24cc5
CM
3847
3848 /* if clear_extent_bit failed for enomem reasons,
3849 * we can't allow the release to continue.
3850 */
3851 if (ret < 0)
3852 ret = 0;
3853 else
3854 ret = 1;
7b13b7b1
CM
3855 }
3856 return ret;
3857}
7b13b7b1 3858
d1310b2e
CM
3859/*
3860 * a helper for releasepage. As long as there are no locked extents
3861 * in the range corresponding to the page, both state records and extent
3862 * map records are removed
3863 */
3864int try_release_extent_mapping(struct extent_map_tree *map,
70dec807
CM
3865 struct extent_io_tree *tree, struct page *page,
3866 gfp_t mask)
d1310b2e
CM
3867{
3868 struct extent_map *em;
4eee4fa4 3869 u64 start = page_offset(page);
d1310b2e 3870 u64 end = start + PAGE_CACHE_SIZE - 1;
7b13b7b1 3871
70dec807
CM
3872 if ((mask & __GFP_WAIT) &&
3873 page->mapping->host->i_size > 16 * 1024 * 1024) {
39b5637f 3874 u64 len;
70dec807 3875 while (start <= end) {
39b5637f 3876 len = end - start + 1;
890871be 3877 write_lock(&map->lock);
39b5637f 3878 em = lookup_extent_mapping(map, start, len);
285190d9 3879 if (!em) {
890871be 3880 write_unlock(&map->lock);
70dec807
CM
3881 break;
3882 }
7f3c74fb
CM
3883 if (test_bit(EXTENT_FLAG_PINNED, &em->flags) ||
3884 em->start != start) {
890871be 3885 write_unlock(&map->lock);
70dec807
CM
3886 free_extent_map(em);
3887 break;
3888 }
3889 if (!test_range_bit(tree, em->start,
3890 extent_map_end(em) - 1,
8b62b72b 3891 EXTENT_LOCKED | EXTENT_WRITEBACK,
9655d298 3892 0, NULL)) {
70dec807
CM
3893 remove_extent_mapping(map, em);
3894 /* once for the rb tree */
3895 free_extent_map(em);
3896 }
3897 start = extent_map_end(em);
890871be 3898 write_unlock(&map->lock);
70dec807
CM
3899
3900 /* once for us */
d1310b2e
CM
3901 free_extent_map(em);
3902 }
d1310b2e 3903 }
7b13b7b1 3904 return try_release_extent_state(map, tree, page, mask);
d1310b2e 3905}
d1310b2e 3906
ec29ed5b
CM
3907/*
3908 * helper function for fiemap, which doesn't want to see any holes.
3909 * This maps until we find something past 'last'
3910 */
3911static struct extent_map *get_extent_skip_holes(struct inode *inode,
3912 u64 offset,
3913 u64 last,
3914 get_extent_t *get_extent)
3915{
3916 u64 sectorsize = BTRFS_I(inode)->root->sectorsize;
3917 struct extent_map *em;
3918 u64 len;
3919
3920 if (offset >= last)
3921 return NULL;
3922
3923 while(1) {
3924 len = last - offset;
3925 if (len == 0)
3926 break;
fda2832f 3927 len = ALIGN(len, sectorsize);
ec29ed5b 3928 em = get_extent(inode, NULL, 0, offset, len, 0);
c704005d 3929 if (IS_ERR_OR_NULL(em))
ec29ed5b
CM
3930 return em;
3931
3932 /* if this isn't a hole return it */
3933 if (!test_bit(EXTENT_FLAG_VACANCY, &em->flags) &&
3934 em->block_start != EXTENT_MAP_HOLE) {
3935 return em;
3936 }
3937
3938 /* this is a hole, advance to the next extent */
3939 offset = extent_map_end(em);
3940 free_extent_map(em);
3941 if (offset >= last)
3942 break;
3943 }
3944 return NULL;
3945}
3946
1506fcc8
YS
3947int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
3948 __u64 start, __u64 len, get_extent_t *get_extent)
3949{
975f84fe 3950 int ret = 0;
1506fcc8
YS
3951 u64 off = start;
3952 u64 max = start + len;
3953 u32 flags = 0;
975f84fe
JB
3954 u32 found_type;
3955 u64 last;
ec29ed5b 3956 u64 last_for_get_extent = 0;
1506fcc8 3957 u64 disko = 0;
ec29ed5b 3958 u64 isize = i_size_read(inode);
975f84fe 3959 struct btrfs_key found_key;
1506fcc8 3960 struct extent_map *em = NULL;
2ac55d41 3961 struct extent_state *cached_state = NULL;
975f84fe
JB
3962 struct btrfs_path *path;
3963 struct btrfs_file_extent_item *item;
1506fcc8 3964 int end = 0;
ec29ed5b
CM
3965 u64 em_start = 0;
3966 u64 em_len = 0;
3967 u64 em_end = 0;
1506fcc8 3968 unsigned long emflags;
1506fcc8
YS
3969
3970 if (len == 0)
3971 return -EINVAL;
3972
975f84fe
JB
3973 path = btrfs_alloc_path();
3974 if (!path)
3975 return -ENOMEM;
3976 path->leave_spinning = 1;
3977
4d479cf0
JB
3978 start = ALIGN(start, BTRFS_I(inode)->root->sectorsize);
3979 len = ALIGN(len, BTRFS_I(inode)->root->sectorsize);
3980
ec29ed5b
CM
3981 /*
3982 * lookup the last file extent. We're not using i_size here
3983 * because there might be preallocation past i_size
3984 */
975f84fe 3985 ret = btrfs_lookup_file_extent(NULL, BTRFS_I(inode)->root,
33345d01 3986 path, btrfs_ino(inode), -1, 0);
975f84fe
JB
3987 if (ret < 0) {
3988 btrfs_free_path(path);
3989 return ret;
3990 }
3991 WARN_ON(!ret);
3992 path->slots[0]--;
3993 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3994 struct btrfs_file_extent_item);
3995 btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
3996 found_type = btrfs_key_type(&found_key);
3997
ec29ed5b 3998 /* No extents, but there might be delalloc bits */
33345d01 3999 if (found_key.objectid != btrfs_ino(inode) ||
975f84fe 4000 found_type != BTRFS_EXTENT_DATA_KEY) {
ec29ed5b
CM
4001 /* have to trust i_size as the end */
4002 last = (u64)-1;
4003 last_for_get_extent = isize;
4004 } else {
4005 /*
4006 * remember the start of the last extent. There are a
4007 * bunch of different factors that go into the length of the
4008 * extent, so its much less complex to remember where it started
4009 */
4010 last = found_key.offset;
4011 last_for_get_extent = last + 1;
975f84fe 4012 }
975f84fe
JB
4013 btrfs_free_path(path);
4014
ec29ed5b
CM
4015 /*
4016 * we might have some extents allocated but more delalloc past those
4017 * extents. so, we trust isize unless the start of the last extent is
4018 * beyond isize
4019 */
4020 if (last < isize) {
4021 last = (u64)-1;
4022 last_for_get_extent = isize;
4023 }
4024
2ac55d41 4025 lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0,
d0082371 4026 &cached_state);
ec29ed5b 4027
4d479cf0 4028 em = get_extent_skip_holes(inode, start, last_for_get_extent,
ec29ed5b 4029 get_extent);
1506fcc8
YS
4030 if (!em)
4031 goto out;
4032 if (IS_ERR(em)) {
4033 ret = PTR_ERR(em);
4034 goto out;
4035 }
975f84fe 4036
1506fcc8 4037 while (!end) {
ea8efc74
CM
4038 u64 offset_in_extent;
4039
4040 /* break if the extent we found is outside the range */
4041 if (em->start >= max || extent_map_end(em) < off)
4042 break;
4043
4044 /*
4045 * get_extent may return an extent that starts before our
4046 * requested range. We have to make sure the ranges
4047 * we return to fiemap always move forward and don't
4048 * overlap, so adjust the offsets here
4049 */
4050 em_start = max(em->start, off);
1506fcc8 4051
ea8efc74
CM
4052 /*
4053 * record the offset from the start of the extent
4054 * for adjusting the disk offset below
4055 */
4056 offset_in_extent = em_start - em->start;
ec29ed5b 4057 em_end = extent_map_end(em);
ea8efc74 4058 em_len = em_end - em_start;
ec29ed5b 4059 emflags = em->flags;
1506fcc8
YS
4060 disko = 0;
4061 flags = 0;
4062
ea8efc74
CM
4063 /*
4064 * bump off for our next call to get_extent
4065 */
4066 off = extent_map_end(em);
4067 if (off >= max)
4068 end = 1;
4069
93dbfad7 4070 if (em->block_start == EXTENT_MAP_LAST_BYTE) {
1506fcc8
YS
4071 end = 1;
4072 flags |= FIEMAP_EXTENT_LAST;
93dbfad7 4073 } else if (em->block_start == EXTENT_MAP_INLINE) {
1506fcc8
YS
4074 flags |= (FIEMAP_EXTENT_DATA_INLINE |
4075 FIEMAP_EXTENT_NOT_ALIGNED);
93dbfad7 4076 } else if (em->block_start == EXTENT_MAP_DELALLOC) {
1506fcc8
YS
4077 flags |= (FIEMAP_EXTENT_DELALLOC |
4078 FIEMAP_EXTENT_UNKNOWN);
93dbfad7 4079 } else {
ea8efc74 4080 disko = em->block_start + offset_in_extent;
1506fcc8
YS
4081 }
4082 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
4083 flags |= FIEMAP_EXTENT_ENCODED;
4084
1506fcc8
YS
4085 free_extent_map(em);
4086 em = NULL;
ec29ed5b
CM
4087 if ((em_start >= last) || em_len == (u64)-1 ||
4088 (last == (u64)-1 && isize <= em_end)) {
1506fcc8
YS
4089 flags |= FIEMAP_EXTENT_LAST;
4090 end = 1;
4091 }
4092
ec29ed5b
CM
4093 /* now scan forward to see if this is really the last extent. */
4094 em = get_extent_skip_holes(inode, off, last_for_get_extent,
4095 get_extent);
4096 if (IS_ERR(em)) {
4097 ret = PTR_ERR(em);
4098 goto out;
4099 }
4100 if (!em) {
975f84fe
JB
4101 flags |= FIEMAP_EXTENT_LAST;
4102 end = 1;
4103 }
ec29ed5b
CM
4104 ret = fiemap_fill_next_extent(fieinfo, em_start, disko,
4105 em_len, flags);
4106 if (ret)
4107 goto out_free;
1506fcc8
YS
4108 }
4109out_free:
4110 free_extent_map(em);
4111out:
2ac55d41
JB
4112 unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, start + len,
4113 &cached_state, GFP_NOFS);
1506fcc8
YS
4114 return ret;
4115}
4116
727011e0
CM
4117static void __free_extent_buffer(struct extent_buffer *eb)
4118{
6d49ba1b 4119 btrfs_leak_debug_del(&eb->leak_list);
727011e0
CM
4120 kmem_cache_free(extent_buffer_cache, eb);
4121}
4122
d1310b2e
CM
4123static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
4124 u64 start,
4125 unsigned long len,
4126 gfp_t mask)
4127{
4128 struct extent_buffer *eb = NULL;
4129
d1310b2e 4130 eb = kmem_cache_zalloc(extent_buffer_cache, mask);
91ca338d
TI
4131 if (eb == NULL)
4132 return NULL;
d1310b2e
CM
4133 eb->start = start;
4134 eb->len = len;
4f2de97a 4135 eb->tree = tree;
815a51c7 4136 eb->bflags = 0;
bd681513
CM
4137 rwlock_init(&eb->lock);
4138 atomic_set(&eb->write_locks, 0);
4139 atomic_set(&eb->read_locks, 0);
4140 atomic_set(&eb->blocking_readers, 0);
4141 atomic_set(&eb->blocking_writers, 0);
4142 atomic_set(&eb->spinning_readers, 0);
4143 atomic_set(&eb->spinning_writers, 0);
5b25f70f 4144 eb->lock_nested = 0;
bd681513
CM
4145 init_waitqueue_head(&eb->write_lock_wq);
4146 init_waitqueue_head(&eb->read_lock_wq);
b4ce94de 4147
6d49ba1b
ES
4148 btrfs_leak_debug_add(&eb->leak_list, &buffers);
4149
3083ee2e 4150 spin_lock_init(&eb->refs_lock);
d1310b2e 4151 atomic_set(&eb->refs, 1);
0b32f4bb 4152 atomic_set(&eb->io_pages, 0);
727011e0 4153
b8dae313
DS
4154 /*
4155 * Sanity checks, currently the maximum is 64k covered by 16x 4k pages
4156 */
4157 BUILD_BUG_ON(BTRFS_MAX_METADATA_BLOCKSIZE
4158 > MAX_INLINE_EXTENT_BUFFER_SIZE);
4159 BUG_ON(len > MAX_INLINE_EXTENT_BUFFER_SIZE);
d1310b2e
CM
4160
4161 return eb;
4162}
4163
815a51c7
JS
4164struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src)
4165{
4166 unsigned long i;
4167 struct page *p;
4168 struct extent_buffer *new;
4169 unsigned long num_pages = num_extent_pages(src->start, src->len);
4170
4171 new = __alloc_extent_buffer(NULL, src->start, src->len, GFP_ATOMIC);
4172 if (new == NULL)
4173 return NULL;
4174
4175 for (i = 0; i < num_pages; i++) {
4176 p = alloc_page(GFP_ATOMIC);
4177 BUG_ON(!p);
4178 attach_extent_buffer_page(new, p);
4179 WARN_ON(PageDirty(p));
4180 SetPageUptodate(p);
4181 new->pages[i] = p;
4182 }
4183
4184 copy_extent_buffer(new, src, 0, 0, src->len);
4185 set_bit(EXTENT_BUFFER_UPTODATE, &new->bflags);
4186 set_bit(EXTENT_BUFFER_DUMMY, &new->bflags);
4187
4188 return new;
4189}
4190
4191struct extent_buffer *alloc_dummy_extent_buffer(u64 start, unsigned long len)
4192{
4193 struct extent_buffer *eb;
4194 unsigned long num_pages = num_extent_pages(0, len);
4195 unsigned long i;
4196
4197 eb = __alloc_extent_buffer(NULL, start, len, GFP_ATOMIC);
4198 if (!eb)
4199 return NULL;
4200
4201 for (i = 0; i < num_pages; i++) {
4202 eb->pages[i] = alloc_page(GFP_ATOMIC);
4203 if (!eb->pages[i])
4204 goto err;
4205 }
4206 set_extent_buffer_uptodate(eb);
4207 btrfs_set_header_nritems(eb, 0);
4208 set_bit(EXTENT_BUFFER_DUMMY, &eb->bflags);
4209
4210 return eb;
4211err:
84167d19
SB
4212 for (; i > 0; i--)
4213 __free_page(eb->pages[i - 1]);
815a51c7
JS
4214 __free_extent_buffer(eb);
4215 return NULL;
4216}
4217
0b32f4bb 4218static int extent_buffer_under_io(struct extent_buffer *eb)
d1310b2e 4219{
0b32f4bb
JB
4220 return (atomic_read(&eb->io_pages) ||
4221 test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) ||
4222 test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
d1310b2e
CM
4223}
4224
897ca6e9
MX
4225/*
4226 * Helper for releasing extent buffer page.
4227 */
4228static void btrfs_release_extent_buffer_page(struct extent_buffer *eb,
4229 unsigned long start_idx)
4230{
4231 unsigned long index;
39bab87b 4232 unsigned long num_pages;
897ca6e9 4233 struct page *page;
815a51c7 4234 int mapped = !test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags);
897ca6e9 4235
0b32f4bb 4236 BUG_ON(extent_buffer_under_io(eb));
897ca6e9 4237
39bab87b
WSH
4238 num_pages = num_extent_pages(eb->start, eb->len);
4239 index = start_idx + num_pages;
897ca6e9
MX
4240 if (start_idx >= index)
4241 return;
4242
4243 do {
4244 index--;
4245 page = extent_buffer_page(eb, index);
815a51c7 4246 if (page && mapped) {
4f2de97a
JB
4247 spin_lock(&page->mapping->private_lock);
4248 /*
4249 * We do this since we'll remove the pages after we've
4250 * removed the eb from the radix tree, so we could race
4251 * and have this page now attached to the new eb. So
4252 * only clear page_private if it's still connected to
4253 * this eb.
4254 */
4255 if (PagePrivate(page) &&
4256 page->private == (unsigned long)eb) {
0b32f4bb 4257 BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
3083ee2e
JB
4258 BUG_ON(PageDirty(page));
4259 BUG_ON(PageWriteback(page));
4f2de97a
JB
4260 /*
4261 * We need to make sure we haven't be attached
4262 * to a new eb.
4263 */
4264 ClearPagePrivate(page);
4265 set_page_private(page, 0);
4266 /* One for the page private */
4267 page_cache_release(page);
4268 }
4269 spin_unlock(&page->mapping->private_lock);
4270
815a51c7
JS
4271 }
4272 if (page) {
4f2de97a 4273 /* One for when we alloced the page */
897ca6e9 4274 page_cache_release(page);
4f2de97a 4275 }
897ca6e9
MX
4276 } while (index != start_idx);
4277}
4278
4279/*
4280 * Helper for releasing the extent buffer.
4281 */
4282static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
4283{
4284 btrfs_release_extent_buffer_page(eb, 0);
4285 __free_extent_buffer(eb);
4286}
4287
0b32f4bb
JB
4288static void check_buffer_tree_ref(struct extent_buffer *eb)
4289{
242e18c7 4290 int refs;
0b32f4bb
JB
4291 /* the ref bit is tricky. We have to make sure it is set
4292 * if we have the buffer dirty. Otherwise the
4293 * code to free a buffer can end up dropping a dirty
4294 * page
4295 *
4296 * Once the ref bit is set, it won't go away while the
4297 * buffer is dirty or in writeback, and it also won't
4298 * go away while we have the reference count on the
4299 * eb bumped.
4300 *
4301 * We can't just set the ref bit without bumping the
4302 * ref on the eb because free_extent_buffer might
4303 * see the ref bit and try to clear it. If this happens
4304 * free_extent_buffer might end up dropping our original
4305 * ref by mistake and freeing the page before we are able
4306 * to add one more ref.
4307 *
4308 * So bump the ref count first, then set the bit. If someone
4309 * beat us to it, drop the ref we added.
4310 */
242e18c7
CM
4311 refs = atomic_read(&eb->refs);
4312 if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4313 return;
4314
594831c4
JB
4315 spin_lock(&eb->refs_lock);
4316 if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
0b32f4bb 4317 atomic_inc(&eb->refs);
594831c4 4318 spin_unlock(&eb->refs_lock);
0b32f4bb
JB
4319}
4320
5df4235e
JB
4321static void mark_extent_buffer_accessed(struct extent_buffer *eb)
4322{
4323 unsigned long num_pages, i;
4324
0b32f4bb
JB
4325 check_buffer_tree_ref(eb);
4326
5df4235e
JB
4327 num_pages = num_extent_pages(eb->start, eb->len);
4328 for (i = 0; i < num_pages; i++) {
4329 struct page *p = extent_buffer_page(eb, i);
4330 mark_page_accessed(p);
4331 }
4332}
4333
d1310b2e 4334struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
727011e0 4335 u64 start, unsigned long len)
d1310b2e
CM
4336{
4337 unsigned long num_pages = num_extent_pages(start, len);
4338 unsigned long i;
4339 unsigned long index = start >> PAGE_CACHE_SHIFT;
4340 struct extent_buffer *eb;
6af118ce 4341 struct extent_buffer *exists = NULL;
d1310b2e
CM
4342 struct page *p;
4343 struct address_space *mapping = tree->mapping;
4344 int uptodate = 1;
19fe0a8b 4345 int ret;
d1310b2e 4346
19fe0a8b
MX
4347 rcu_read_lock();
4348 eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
4349 if (eb && atomic_inc_not_zero(&eb->refs)) {
4350 rcu_read_unlock();
5df4235e 4351 mark_extent_buffer_accessed(eb);
6af118ce
CM
4352 return eb;
4353 }
19fe0a8b 4354 rcu_read_unlock();
6af118ce 4355
ba144192 4356 eb = __alloc_extent_buffer(tree, start, len, GFP_NOFS);
2b114d1d 4357 if (!eb)
d1310b2e
CM
4358 return NULL;
4359
727011e0 4360 for (i = 0; i < num_pages; i++, index++) {
a6591715 4361 p = find_or_create_page(mapping, index, GFP_NOFS);
4804b382 4362 if (!p)
6af118ce 4363 goto free_eb;
4f2de97a
JB
4364
4365 spin_lock(&mapping->private_lock);
4366 if (PagePrivate(p)) {
4367 /*
4368 * We could have already allocated an eb for this page
4369 * and attached one so lets see if we can get a ref on
4370 * the existing eb, and if we can we know it's good and
4371 * we can just return that one, else we know we can just
4372 * overwrite page->private.
4373 */
4374 exists = (struct extent_buffer *)p->private;
4375 if (atomic_inc_not_zero(&exists->refs)) {
4376 spin_unlock(&mapping->private_lock);
4377 unlock_page(p);
17de39ac 4378 page_cache_release(p);
5df4235e 4379 mark_extent_buffer_accessed(exists);
4f2de97a
JB
4380 goto free_eb;
4381 }
4382
0b32f4bb 4383 /*
4f2de97a
JB
4384 * Do this so attach doesn't complain and we need to
4385 * drop the ref the old guy had.
4386 */
4387 ClearPagePrivate(p);
0b32f4bb 4388 WARN_ON(PageDirty(p));
4f2de97a 4389 page_cache_release(p);
d1310b2e 4390 }
4f2de97a
JB
4391 attach_extent_buffer_page(eb, p);
4392 spin_unlock(&mapping->private_lock);
0b32f4bb 4393 WARN_ON(PageDirty(p));
d1310b2e 4394 mark_page_accessed(p);
727011e0 4395 eb->pages[i] = p;
d1310b2e
CM
4396 if (!PageUptodate(p))
4397 uptodate = 0;
eb14ab8e
CM
4398
4399 /*
4400 * see below about how we avoid a nasty race with release page
4401 * and why we unlock later
4402 */
d1310b2e
CM
4403 }
4404 if (uptodate)
b4ce94de 4405 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
115391d2 4406again:
19fe0a8b
MX
4407 ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
4408 if (ret)
4409 goto free_eb;
4410
6af118ce 4411 spin_lock(&tree->buffer_lock);
19fe0a8b
MX
4412 ret = radix_tree_insert(&tree->buffer, start >> PAGE_CACHE_SHIFT, eb);
4413 if (ret == -EEXIST) {
4414 exists = radix_tree_lookup(&tree->buffer,
4415 start >> PAGE_CACHE_SHIFT);
115391d2
JB
4416 if (!atomic_inc_not_zero(&exists->refs)) {
4417 spin_unlock(&tree->buffer_lock);
4418 radix_tree_preload_end();
115391d2
JB
4419 exists = NULL;
4420 goto again;
4421 }
6af118ce 4422 spin_unlock(&tree->buffer_lock);
19fe0a8b 4423 radix_tree_preload_end();
5df4235e 4424 mark_extent_buffer_accessed(exists);
6af118ce
CM
4425 goto free_eb;
4426 }
6af118ce 4427 /* add one reference for the tree */
0b32f4bb 4428 check_buffer_tree_ref(eb);
f044ba78 4429 spin_unlock(&tree->buffer_lock);
19fe0a8b 4430 radix_tree_preload_end();
eb14ab8e
CM
4431
4432 /*
4433 * there is a race where release page may have
4434 * tried to find this extent buffer in the radix
4435 * but failed. It will tell the VM it is safe to
4436 * reclaim the, and it will clear the page private bit.
4437 * We must make sure to set the page private bit properly
4438 * after the extent buffer is in the radix tree so
4439 * it doesn't get lost
4440 */
727011e0
CM
4441 SetPageChecked(eb->pages[0]);
4442 for (i = 1; i < num_pages; i++) {
4443 p = extent_buffer_page(eb, i);
727011e0
CM
4444 ClearPageChecked(p);
4445 unlock_page(p);
4446 }
4447 unlock_page(eb->pages[0]);
d1310b2e
CM
4448 return eb;
4449
6af118ce 4450free_eb:
727011e0
CM
4451 for (i = 0; i < num_pages; i++) {
4452 if (eb->pages[i])
4453 unlock_page(eb->pages[i]);
4454 }
eb14ab8e 4455
17de39ac 4456 WARN_ON(!atomic_dec_and_test(&eb->refs));
897ca6e9 4457 btrfs_release_extent_buffer(eb);
6af118ce 4458 return exists;
d1310b2e 4459}
d1310b2e
CM
4460
4461struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
f09d1f60 4462 u64 start, unsigned long len)
d1310b2e 4463{
d1310b2e 4464 struct extent_buffer *eb;
d1310b2e 4465
19fe0a8b
MX
4466 rcu_read_lock();
4467 eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
4468 if (eb && atomic_inc_not_zero(&eb->refs)) {
4469 rcu_read_unlock();
5df4235e 4470 mark_extent_buffer_accessed(eb);
19fe0a8b
MX
4471 return eb;
4472 }
4473 rcu_read_unlock();
0f9dd46c 4474
19fe0a8b 4475 return NULL;
d1310b2e 4476}
d1310b2e 4477
3083ee2e
JB
4478static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
4479{
4480 struct extent_buffer *eb =
4481 container_of(head, struct extent_buffer, rcu_head);
4482
4483 __free_extent_buffer(eb);
4484}
4485
3083ee2e 4486/* Expects to have eb->eb_lock already held */
f7a52a40 4487static int release_extent_buffer(struct extent_buffer *eb)
3083ee2e
JB
4488{
4489 WARN_ON(atomic_read(&eb->refs) == 0);
4490 if (atomic_dec_and_test(&eb->refs)) {
815a51c7
JS
4491 if (test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags)) {
4492 spin_unlock(&eb->refs_lock);
4493 } else {
4494 struct extent_io_tree *tree = eb->tree;
3083ee2e 4495
815a51c7 4496 spin_unlock(&eb->refs_lock);
3083ee2e 4497
815a51c7
JS
4498 spin_lock(&tree->buffer_lock);
4499 radix_tree_delete(&tree->buffer,
4500 eb->start >> PAGE_CACHE_SHIFT);
4501 spin_unlock(&tree->buffer_lock);
4502 }
3083ee2e
JB
4503
4504 /* Should be safe to release our pages at this point */
4505 btrfs_release_extent_buffer_page(eb, 0);
3083ee2e 4506 call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);
e64860aa 4507 return 1;
3083ee2e
JB
4508 }
4509 spin_unlock(&eb->refs_lock);
e64860aa
JB
4510
4511 return 0;
3083ee2e
JB
4512}
4513
d1310b2e
CM
4514void free_extent_buffer(struct extent_buffer *eb)
4515{
242e18c7
CM
4516 int refs;
4517 int old;
d1310b2e
CM
4518 if (!eb)
4519 return;
4520
242e18c7
CM
4521 while (1) {
4522 refs = atomic_read(&eb->refs);
4523 if (refs <= 3)
4524 break;
4525 old = atomic_cmpxchg(&eb->refs, refs, refs - 1);
4526 if (old == refs)
4527 return;
4528 }
4529
3083ee2e 4530 spin_lock(&eb->refs_lock);
815a51c7
JS
4531 if (atomic_read(&eb->refs) == 2 &&
4532 test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags))
4533 atomic_dec(&eb->refs);
4534
3083ee2e
JB
4535 if (atomic_read(&eb->refs) == 2 &&
4536 test_bit(EXTENT_BUFFER_STALE, &eb->bflags) &&
0b32f4bb 4537 !extent_buffer_under_io(eb) &&
3083ee2e
JB
4538 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4539 atomic_dec(&eb->refs);
4540
4541 /*
4542 * I know this is terrible, but it's temporary until we stop tracking
4543 * the uptodate bits and such for the extent buffers.
4544 */
f7a52a40 4545 release_extent_buffer(eb);
3083ee2e
JB
4546}
4547
4548void free_extent_buffer_stale(struct extent_buffer *eb)
4549{
4550 if (!eb)
d1310b2e
CM
4551 return;
4552
3083ee2e
JB
4553 spin_lock(&eb->refs_lock);
4554 set_bit(EXTENT_BUFFER_STALE, &eb->bflags);
4555
0b32f4bb 4556 if (atomic_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) &&
3083ee2e
JB
4557 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4558 atomic_dec(&eb->refs);
f7a52a40 4559 release_extent_buffer(eb);
d1310b2e 4560}
d1310b2e 4561
1d4284bd 4562void clear_extent_buffer_dirty(struct extent_buffer *eb)
d1310b2e 4563{
d1310b2e
CM
4564 unsigned long i;
4565 unsigned long num_pages;
4566 struct page *page;
4567
d1310b2e
CM
4568 num_pages = num_extent_pages(eb->start, eb->len);
4569
4570 for (i = 0; i < num_pages; i++) {
4571 page = extent_buffer_page(eb, i);
b9473439 4572 if (!PageDirty(page))
d2c3f4f6
CM
4573 continue;
4574
a61e6f29 4575 lock_page(page);
eb14ab8e
CM
4576 WARN_ON(!PagePrivate(page));
4577
d1310b2e 4578 clear_page_dirty_for_io(page);
0ee0fda0 4579 spin_lock_irq(&page->mapping->tree_lock);
d1310b2e
CM
4580 if (!PageDirty(page)) {
4581 radix_tree_tag_clear(&page->mapping->page_tree,
4582 page_index(page),
4583 PAGECACHE_TAG_DIRTY);
4584 }
0ee0fda0 4585 spin_unlock_irq(&page->mapping->tree_lock);
bf0da8c1 4586 ClearPageError(page);
a61e6f29 4587 unlock_page(page);
d1310b2e 4588 }
0b32f4bb 4589 WARN_ON(atomic_read(&eb->refs) == 0);
d1310b2e 4590}
d1310b2e 4591
0b32f4bb 4592int set_extent_buffer_dirty(struct extent_buffer *eb)
d1310b2e
CM
4593{
4594 unsigned long i;
4595 unsigned long num_pages;
b9473439 4596 int was_dirty = 0;
d1310b2e 4597
0b32f4bb
JB
4598 check_buffer_tree_ref(eb);
4599
b9473439 4600 was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
0b32f4bb 4601
d1310b2e 4602 num_pages = num_extent_pages(eb->start, eb->len);
3083ee2e 4603 WARN_ON(atomic_read(&eb->refs) == 0);
0b32f4bb
JB
4604 WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags));
4605
b9473439 4606 for (i = 0; i < num_pages; i++)
0b32f4bb 4607 set_page_dirty(extent_buffer_page(eb, i));
b9473439 4608 return was_dirty;
d1310b2e 4609}
d1310b2e 4610
0b32f4bb 4611int clear_extent_buffer_uptodate(struct extent_buffer *eb)
1259ab75
CM
4612{
4613 unsigned long i;
4614 struct page *page;
4615 unsigned long num_pages;
4616
b4ce94de 4617 clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
0b32f4bb 4618 num_pages = num_extent_pages(eb->start, eb->len);
1259ab75
CM
4619 for (i = 0; i < num_pages; i++) {
4620 page = extent_buffer_page(eb, i);
33958dc6
CM
4621 if (page)
4622 ClearPageUptodate(page);
1259ab75
CM
4623 }
4624 return 0;
4625}
4626
0b32f4bb 4627int set_extent_buffer_uptodate(struct extent_buffer *eb)
d1310b2e
CM
4628{
4629 unsigned long i;
4630 struct page *page;
4631 unsigned long num_pages;
4632
0b32f4bb 4633 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
d1310b2e 4634 num_pages = num_extent_pages(eb->start, eb->len);
d1310b2e
CM
4635 for (i = 0; i < num_pages; i++) {
4636 page = extent_buffer_page(eb, i);
d1310b2e
CM
4637 SetPageUptodate(page);
4638 }
4639 return 0;
4640}
d1310b2e 4641
0b32f4bb 4642int extent_buffer_uptodate(struct extent_buffer *eb)
d1310b2e 4643{
0b32f4bb 4644 return test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
d1310b2e 4645}
d1310b2e
CM
4646
4647int read_extent_buffer_pages(struct extent_io_tree *tree,
bb82ab88 4648 struct extent_buffer *eb, u64 start, int wait,
f188591e 4649 get_extent_t *get_extent, int mirror_num)
d1310b2e
CM
4650{
4651 unsigned long i;
4652 unsigned long start_i;
4653 struct page *page;
4654 int err;
4655 int ret = 0;
ce9adaa5
CM
4656 int locked_pages = 0;
4657 int all_uptodate = 1;
d1310b2e 4658 unsigned long num_pages;
727011e0 4659 unsigned long num_reads = 0;
a86c12c7 4660 struct bio *bio = NULL;
c8b97818 4661 unsigned long bio_flags = 0;
a86c12c7 4662
b4ce94de 4663 if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
d1310b2e
CM
4664 return 0;
4665
d1310b2e
CM
4666 if (start) {
4667 WARN_ON(start < eb->start);
4668 start_i = (start >> PAGE_CACHE_SHIFT) -
4669 (eb->start >> PAGE_CACHE_SHIFT);
4670 } else {
4671 start_i = 0;
4672 }
4673
4674 num_pages = num_extent_pages(eb->start, eb->len);
4675 for (i = start_i; i < num_pages; i++) {
4676 page = extent_buffer_page(eb, i);
bb82ab88 4677 if (wait == WAIT_NONE) {
2db04966 4678 if (!trylock_page(page))
ce9adaa5 4679 goto unlock_exit;
d1310b2e
CM
4680 } else {
4681 lock_page(page);
4682 }
ce9adaa5 4683 locked_pages++;
727011e0
CM
4684 if (!PageUptodate(page)) {
4685 num_reads++;
ce9adaa5 4686 all_uptodate = 0;
727011e0 4687 }
ce9adaa5
CM
4688 }
4689 if (all_uptodate) {
4690 if (start_i == 0)
b4ce94de 4691 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
ce9adaa5
CM
4692 goto unlock_exit;
4693 }
4694
ea466794 4695 clear_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
5cf1ab56 4696 eb->read_mirror = 0;
0b32f4bb 4697 atomic_set(&eb->io_pages, num_reads);
ce9adaa5
CM
4698 for (i = start_i; i < num_pages; i++) {
4699 page = extent_buffer_page(eb, i);
ce9adaa5 4700 if (!PageUptodate(page)) {
f188591e 4701 ClearPageError(page);
a86c12c7 4702 err = __extent_read_full_page(tree, page,
f188591e 4703 get_extent, &bio,
d4c7ca86
JB
4704 mirror_num, &bio_flags,
4705 READ | REQ_META);
d397712b 4706 if (err)
d1310b2e 4707 ret = err;
d1310b2e
CM
4708 } else {
4709 unlock_page(page);
4710 }
4711 }
4712
355808c2 4713 if (bio) {
d4c7ca86
JB
4714 err = submit_one_bio(READ | REQ_META, bio, mirror_num,
4715 bio_flags);
79787eaa
JM
4716 if (err)
4717 return err;
355808c2 4718 }
a86c12c7 4719
bb82ab88 4720 if (ret || wait != WAIT_COMPLETE)
d1310b2e 4721 return ret;
d397712b 4722
d1310b2e
CM
4723 for (i = start_i; i < num_pages; i++) {
4724 page = extent_buffer_page(eb, i);
4725 wait_on_page_locked(page);
d397712b 4726 if (!PageUptodate(page))
d1310b2e 4727 ret = -EIO;
d1310b2e 4728 }
d397712b 4729
d1310b2e 4730 return ret;
ce9adaa5
CM
4731
4732unlock_exit:
4733 i = start_i;
d397712b 4734 while (locked_pages > 0) {
ce9adaa5
CM
4735 page = extent_buffer_page(eb, i);
4736 i++;
4737 unlock_page(page);
4738 locked_pages--;
4739 }
4740 return ret;
d1310b2e 4741}
d1310b2e
CM
4742
4743void read_extent_buffer(struct extent_buffer *eb, void *dstv,
4744 unsigned long start,
4745 unsigned long len)
4746{
4747 size_t cur;
4748 size_t offset;
4749 struct page *page;
4750 char *kaddr;
4751 char *dst = (char *)dstv;
4752 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4753 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
d1310b2e
CM
4754
4755 WARN_ON(start > eb->len);
4756 WARN_ON(start + len > eb->start + eb->len);
4757
4758 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
4759
d397712b 4760 while (len > 0) {
d1310b2e 4761 page = extent_buffer_page(eb, i);
d1310b2e
CM
4762
4763 cur = min(len, (PAGE_CACHE_SIZE - offset));
a6591715 4764 kaddr = page_address(page);
d1310b2e 4765 memcpy(dst, kaddr + offset, cur);
d1310b2e
CM
4766
4767 dst += cur;
4768 len -= cur;
4769 offset = 0;
4770 i++;
4771 }
4772}
d1310b2e
CM
4773
4774int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
a6591715 4775 unsigned long min_len, char **map,
d1310b2e 4776 unsigned long *map_start,
a6591715 4777 unsigned long *map_len)
d1310b2e
CM
4778{
4779 size_t offset = start & (PAGE_CACHE_SIZE - 1);
4780 char *kaddr;
4781 struct page *p;
4782 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4783 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
4784 unsigned long end_i = (start_offset + start + min_len - 1) >>
4785 PAGE_CACHE_SHIFT;
4786
4787 if (i != end_i)
4788 return -EINVAL;
4789
4790 if (i == 0) {
4791 offset = start_offset;
4792 *map_start = 0;
4793 } else {
4794 offset = 0;
4795 *map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset;
4796 }
d397712b 4797
d1310b2e 4798 if (start + min_len > eb->len) {
31b1a2bd 4799 WARN(1, KERN_ERR "btrfs bad mapping eb start %llu len %lu, "
d397712b
CM
4800 "wanted %lu %lu\n", (unsigned long long)eb->start,
4801 eb->len, start, min_len);
85026533 4802 return -EINVAL;
d1310b2e
CM
4803 }
4804
4805 p = extent_buffer_page(eb, i);
a6591715 4806 kaddr = page_address(p);
d1310b2e
CM
4807 *map = kaddr + offset;
4808 *map_len = PAGE_CACHE_SIZE - offset;
4809 return 0;
4810}
d1310b2e 4811
d1310b2e
CM
4812int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
4813 unsigned long start,
4814 unsigned long len)
4815{
4816 size_t cur;
4817 size_t offset;
4818 struct page *page;
4819 char *kaddr;
4820 char *ptr = (char *)ptrv;
4821 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4822 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
4823 int ret = 0;
4824
4825 WARN_ON(start > eb->len);
4826 WARN_ON(start + len > eb->start + eb->len);
4827
4828 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
4829
d397712b 4830 while (len > 0) {
d1310b2e 4831 page = extent_buffer_page(eb, i);
d1310b2e
CM
4832
4833 cur = min(len, (PAGE_CACHE_SIZE - offset));
4834
a6591715 4835 kaddr = page_address(page);
d1310b2e 4836 ret = memcmp(ptr, kaddr + offset, cur);
d1310b2e
CM
4837 if (ret)
4838 break;
4839
4840 ptr += cur;
4841 len -= cur;
4842 offset = 0;
4843 i++;
4844 }
4845 return ret;
4846}
d1310b2e
CM
4847
4848void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
4849 unsigned long start, unsigned long len)
4850{
4851 size_t cur;
4852 size_t offset;
4853 struct page *page;
4854 char *kaddr;
4855 char *src = (char *)srcv;
4856 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4857 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
4858
4859 WARN_ON(start > eb->len);
4860 WARN_ON(start + len > eb->start + eb->len);
4861
4862 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
4863
d397712b 4864 while (len > 0) {
d1310b2e
CM
4865 page = extent_buffer_page(eb, i);
4866 WARN_ON(!PageUptodate(page));
4867
4868 cur = min(len, PAGE_CACHE_SIZE - offset);
a6591715 4869 kaddr = page_address(page);
d1310b2e 4870 memcpy(kaddr + offset, src, cur);
d1310b2e
CM
4871
4872 src += cur;
4873 len -= cur;
4874 offset = 0;
4875 i++;
4876 }
4877}
d1310b2e
CM
4878
4879void memset_extent_buffer(struct extent_buffer *eb, char c,
4880 unsigned long start, unsigned long len)
4881{
4882 size_t cur;
4883 size_t offset;
4884 struct page *page;
4885 char *kaddr;
4886 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4887 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
4888
4889 WARN_ON(start > eb->len);
4890 WARN_ON(start + len > eb->start + eb->len);
4891
4892 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
4893
d397712b 4894 while (len > 0) {
d1310b2e
CM
4895 page = extent_buffer_page(eb, i);
4896 WARN_ON(!PageUptodate(page));
4897
4898 cur = min(len, PAGE_CACHE_SIZE - offset);
a6591715 4899 kaddr = page_address(page);
d1310b2e 4900 memset(kaddr + offset, c, cur);
d1310b2e
CM
4901
4902 len -= cur;
4903 offset = 0;
4904 i++;
4905 }
4906}
d1310b2e
CM
4907
4908void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
4909 unsigned long dst_offset, unsigned long src_offset,
4910 unsigned long len)
4911{
4912 u64 dst_len = dst->len;
4913 size_t cur;
4914 size_t offset;
4915 struct page *page;
4916 char *kaddr;
4917 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
4918 unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
4919
4920 WARN_ON(src->len != dst_len);
4921
4922 offset = (start_offset + dst_offset) &
4923 ((unsigned long)PAGE_CACHE_SIZE - 1);
4924
d397712b 4925 while (len > 0) {
d1310b2e
CM
4926 page = extent_buffer_page(dst, i);
4927 WARN_ON(!PageUptodate(page));
4928
4929 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
4930
a6591715 4931 kaddr = page_address(page);
d1310b2e 4932 read_extent_buffer(src, kaddr + offset, src_offset, cur);
d1310b2e
CM
4933
4934 src_offset += cur;
4935 len -= cur;
4936 offset = 0;
4937 i++;
4938 }
4939}
d1310b2e
CM
4940
4941static void move_pages(struct page *dst_page, struct page *src_page,
4942 unsigned long dst_off, unsigned long src_off,
4943 unsigned long len)
4944{
a6591715 4945 char *dst_kaddr = page_address(dst_page);
d1310b2e
CM
4946 if (dst_page == src_page) {
4947 memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len);
4948 } else {
a6591715 4949 char *src_kaddr = page_address(src_page);
d1310b2e
CM
4950 char *p = dst_kaddr + dst_off + len;
4951 char *s = src_kaddr + src_off + len;
4952
4953 while (len--)
4954 *--p = *--s;
d1310b2e 4955 }
d1310b2e
CM
4956}
4957
3387206f
ST
4958static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
4959{
4960 unsigned long distance = (src > dst) ? src - dst : dst - src;
4961 return distance < len;
4962}
4963
d1310b2e
CM
4964static void copy_pages(struct page *dst_page, struct page *src_page,
4965 unsigned long dst_off, unsigned long src_off,
4966 unsigned long len)
4967{
a6591715 4968 char *dst_kaddr = page_address(dst_page);
d1310b2e 4969 char *src_kaddr;
727011e0 4970 int must_memmove = 0;
d1310b2e 4971
3387206f 4972 if (dst_page != src_page) {
a6591715 4973 src_kaddr = page_address(src_page);
3387206f 4974 } else {
d1310b2e 4975 src_kaddr = dst_kaddr;
727011e0
CM
4976 if (areas_overlap(src_off, dst_off, len))
4977 must_memmove = 1;
3387206f 4978 }
d1310b2e 4979
727011e0
CM
4980 if (must_memmove)
4981 memmove(dst_kaddr + dst_off, src_kaddr + src_off, len);
4982 else
4983 memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
d1310b2e
CM
4984}
4985
4986void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
4987 unsigned long src_offset, unsigned long len)
4988{
4989 size_t cur;
4990 size_t dst_off_in_page;
4991 size_t src_off_in_page;
4992 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
4993 unsigned long dst_i;
4994 unsigned long src_i;
4995
4996 if (src_offset + len > dst->len) {
d397712b
CM
4997 printk(KERN_ERR "btrfs memmove bogus src_offset %lu move "
4998 "len %lu dst len %lu\n", src_offset, len, dst->len);
d1310b2e
CM
4999 BUG_ON(1);
5000 }
5001 if (dst_offset + len > dst->len) {
d397712b
CM
5002 printk(KERN_ERR "btrfs memmove bogus dst_offset %lu move "
5003 "len %lu dst len %lu\n", dst_offset, len, dst->len);
d1310b2e
CM
5004 BUG_ON(1);
5005 }
5006
d397712b 5007 while (len > 0) {
d1310b2e
CM
5008 dst_off_in_page = (start_offset + dst_offset) &
5009 ((unsigned long)PAGE_CACHE_SIZE - 1);
5010 src_off_in_page = (start_offset + src_offset) &
5011 ((unsigned long)PAGE_CACHE_SIZE - 1);
5012
5013 dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
5014 src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
5015
5016 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
5017 src_off_in_page));
5018 cur = min_t(unsigned long, cur,
5019 (unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
5020
5021 copy_pages(extent_buffer_page(dst, dst_i),
5022 extent_buffer_page(dst, src_i),
5023 dst_off_in_page, src_off_in_page, cur);
5024
5025 src_offset += cur;
5026 dst_offset += cur;
5027 len -= cur;
5028 }
5029}
d1310b2e
CM
5030
5031void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
5032 unsigned long src_offset, unsigned long len)
5033{
5034 size_t cur;
5035 size_t dst_off_in_page;
5036 size_t src_off_in_page;
5037 unsigned long dst_end = dst_offset + len - 1;
5038 unsigned long src_end = src_offset + len - 1;
5039 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
5040 unsigned long dst_i;
5041 unsigned long src_i;
5042
5043 if (src_offset + len > dst->len) {
d397712b
CM
5044 printk(KERN_ERR "btrfs memmove bogus src_offset %lu move "
5045 "len %lu len %lu\n", src_offset, len, dst->len);
d1310b2e
CM
5046 BUG_ON(1);
5047 }
5048 if (dst_offset + len > dst->len) {
d397712b
CM
5049 printk(KERN_ERR "btrfs memmove bogus dst_offset %lu move "
5050 "len %lu len %lu\n", dst_offset, len, dst->len);
d1310b2e
CM
5051 BUG_ON(1);
5052 }
727011e0 5053 if (dst_offset < src_offset) {
d1310b2e
CM
5054 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
5055 return;
5056 }
d397712b 5057 while (len > 0) {
d1310b2e
CM
5058 dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
5059 src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
5060
5061 dst_off_in_page = (start_offset + dst_end) &
5062 ((unsigned long)PAGE_CACHE_SIZE - 1);
5063 src_off_in_page = (start_offset + src_end) &
5064 ((unsigned long)PAGE_CACHE_SIZE - 1);
5065
5066 cur = min_t(unsigned long, len, src_off_in_page + 1);
5067 cur = min(cur, dst_off_in_page + 1);
5068 move_pages(extent_buffer_page(dst, dst_i),
5069 extent_buffer_page(dst, src_i),
5070 dst_off_in_page - cur + 1,
5071 src_off_in_page - cur + 1, cur);
5072
5073 dst_end -= cur;
5074 src_end -= cur;
5075 len -= cur;
5076 }
5077}
6af118ce 5078
f7a52a40 5079int try_release_extent_buffer(struct page *page)
19fe0a8b 5080{
6af118ce 5081 struct extent_buffer *eb;
6af118ce 5082
3083ee2e
JB
5083 /*
5084 * We need to make sure noboody is attaching this page to an eb right
5085 * now.
5086 */
5087 spin_lock(&page->mapping->private_lock);
5088 if (!PagePrivate(page)) {
5089 spin_unlock(&page->mapping->private_lock);
4f2de97a 5090 return 1;
45f49bce 5091 }
6af118ce 5092
3083ee2e
JB
5093 eb = (struct extent_buffer *)page->private;
5094 BUG_ON(!eb);
19fe0a8b
MX
5095
5096 /*
3083ee2e
JB
5097 * This is a little awful but should be ok, we need to make sure that
5098 * the eb doesn't disappear out from under us while we're looking at
5099 * this page.
19fe0a8b 5100 */
3083ee2e 5101 spin_lock(&eb->refs_lock);
0b32f4bb 5102 if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
3083ee2e
JB
5103 spin_unlock(&eb->refs_lock);
5104 spin_unlock(&page->mapping->private_lock);
5105 return 0;
b9473439 5106 }
3083ee2e 5107 spin_unlock(&page->mapping->private_lock);
897ca6e9 5108
19fe0a8b 5109 /*
3083ee2e
JB
5110 * If tree ref isn't set then we know the ref on this eb is a real ref,
5111 * so just return, this page will likely be freed soon anyway.
19fe0a8b 5112 */
3083ee2e
JB
5113 if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
5114 spin_unlock(&eb->refs_lock);
5115 return 0;
b9473439 5116 }
19fe0a8b 5117
f7a52a40 5118 return release_extent_buffer(eb);
6af118ce 5119}
This page took 1.169631 seconds and 4 git commands to generate.