]> Git Repo - linux.git/blob - fs/btrfs/extent-tree.c
Merge branch 'allocator-fixes' into for-linus-4.4
[linux.git] / fs / btrfs / extent-tree.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include <linux/blkdev.h>
22 #include <linux/sort.h>
23 #include <linux/rcupdate.h>
24 #include <linux/kthread.h>
25 #include <linux/slab.h>
26 #include <linux/ratelimit.h>
27 #include <linux/percpu_counter.h>
28 #include "hash.h"
29 #include "tree-log.h"
30 #include "disk-io.h"
31 #include "print-tree.h"
32 #include "volumes.h"
33 #include "raid56.h"
34 #include "locking.h"
35 #include "free-space-cache.h"
36 #include "math.h"
37 #include "sysfs.h"
38 #include "qgroup.h"
39
40 #undef SCRAMBLE_DELAYED_REFS
41
42 /*
43  * control flags for do_chunk_alloc's force field
44  * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
45  * if we really need one.
46  *
47  * CHUNK_ALLOC_LIMITED means to only try and allocate one
48  * if we have very few chunks already allocated.  This is
49  * used as part of the clustering code to help make sure
50  * we have a good pool of storage to cluster in, without
51  * filling the FS with empty chunks
52  *
53  * CHUNK_ALLOC_FORCE means it must try to allocate one
54  *
55  */
56 enum {
57         CHUNK_ALLOC_NO_FORCE = 0,
58         CHUNK_ALLOC_LIMITED = 1,
59         CHUNK_ALLOC_FORCE = 2,
60 };
61
62 /*
63  * Control how reservations are dealt with.
64  *
65  * RESERVE_FREE - freeing a reservation.
66  * RESERVE_ALLOC - allocating space and we need to update bytes_may_use for
67  *   ENOSPC accounting
68  * RESERVE_ALLOC_NO_ACCOUNT - allocating space and we should not update
69  *   bytes_may_use as the ENOSPC accounting is done elsewhere
70  */
71 enum {
72         RESERVE_FREE = 0,
73         RESERVE_ALLOC = 1,
74         RESERVE_ALLOC_NO_ACCOUNT = 2,
75 };
76
77 static int update_block_group(struct btrfs_trans_handle *trans,
78                               struct btrfs_root *root, u64 bytenr,
79                               u64 num_bytes, int alloc);
80 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
81                                 struct btrfs_root *root,
82                                 struct btrfs_delayed_ref_node *node, u64 parent,
83                                 u64 root_objectid, u64 owner_objectid,
84                                 u64 owner_offset, int refs_to_drop,
85                                 struct btrfs_delayed_extent_op *extra_op);
86 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
87                                     struct extent_buffer *leaf,
88                                     struct btrfs_extent_item *ei);
89 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
90                                       struct btrfs_root *root,
91                                       u64 parent, u64 root_objectid,
92                                       u64 flags, u64 owner, u64 offset,
93                                       struct btrfs_key *ins, int ref_mod);
94 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
95                                      struct btrfs_root *root,
96                                      u64 parent, u64 root_objectid,
97                                      u64 flags, struct btrfs_disk_key *key,
98                                      int level, struct btrfs_key *ins,
99                                      int no_quota);
100 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
101                           struct btrfs_root *extent_root, u64 flags,
102                           int force);
103 static int find_next_key(struct btrfs_path *path, int level,
104                          struct btrfs_key *key);
105 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
106                             int dump_block_groups);
107 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
108                                        u64 num_bytes, int reserve,
109                                        int delalloc);
110 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
111                                u64 num_bytes);
112 int btrfs_pin_extent(struct btrfs_root *root,
113                      u64 bytenr, u64 num_bytes, int reserved);
114
115 static noinline int
116 block_group_cache_done(struct btrfs_block_group_cache *cache)
117 {
118         smp_mb();
119         return cache->cached == BTRFS_CACHE_FINISHED ||
120                 cache->cached == BTRFS_CACHE_ERROR;
121 }
122
123 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
124 {
125         return (cache->flags & bits) == bits;
126 }
127
128 static void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
129 {
130         atomic_inc(&cache->count);
131 }
132
133 void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
134 {
135         if (atomic_dec_and_test(&cache->count)) {
136                 WARN_ON(cache->pinned > 0);
137                 WARN_ON(cache->reserved > 0);
138                 kfree(cache->free_space_ctl);
139                 kfree(cache);
140         }
141 }
142
143 /*
144  * this adds the block group to the fs_info rb tree for the block group
145  * cache
146  */
147 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
148                                 struct btrfs_block_group_cache *block_group)
149 {
150         struct rb_node **p;
151         struct rb_node *parent = NULL;
152         struct btrfs_block_group_cache *cache;
153
154         spin_lock(&info->block_group_cache_lock);
155         p = &info->block_group_cache_tree.rb_node;
156
157         while (*p) {
158                 parent = *p;
159                 cache = rb_entry(parent, struct btrfs_block_group_cache,
160                                  cache_node);
161                 if (block_group->key.objectid < cache->key.objectid) {
162                         p = &(*p)->rb_left;
163                 } else if (block_group->key.objectid > cache->key.objectid) {
164                         p = &(*p)->rb_right;
165                 } else {
166                         spin_unlock(&info->block_group_cache_lock);
167                         return -EEXIST;
168                 }
169         }
170
171         rb_link_node(&block_group->cache_node, parent, p);
172         rb_insert_color(&block_group->cache_node,
173                         &info->block_group_cache_tree);
174
175         if (info->first_logical_byte > block_group->key.objectid)
176                 info->first_logical_byte = block_group->key.objectid;
177
178         spin_unlock(&info->block_group_cache_lock);
179
180         return 0;
181 }
182
183 /*
184  * This will return the block group at or after bytenr if contains is 0, else
185  * it will return the block group that contains the bytenr
186  */
187 static struct btrfs_block_group_cache *
188 block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
189                               int contains)
190 {
191         struct btrfs_block_group_cache *cache, *ret = NULL;
192         struct rb_node *n;
193         u64 end, start;
194
195         spin_lock(&info->block_group_cache_lock);
196         n = info->block_group_cache_tree.rb_node;
197
198         while (n) {
199                 cache = rb_entry(n, struct btrfs_block_group_cache,
200                                  cache_node);
201                 end = cache->key.objectid + cache->key.offset - 1;
202                 start = cache->key.objectid;
203
204                 if (bytenr < start) {
205                         if (!contains && (!ret || start < ret->key.objectid))
206                                 ret = cache;
207                         n = n->rb_left;
208                 } else if (bytenr > start) {
209                         if (contains && bytenr <= end) {
210                                 ret = cache;
211                                 break;
212                         }
213                         n = n->rb_right;
214                 } else {
215                         ret = cache;
216                         break;
217                 }
218         }
219         if (ret) {
220                 btrfs_get_block_group(ret);
221                 if (bytenr == 0 && info->first_logical_byte > ret->key.objectid)
222                         info->first_logical_byte = ret->key.objectid;
223         }
224         spin_unlock(&info->block_group_cache_lock);
225
226         return ret;
227 }
228
229 static int add_excluded_extent(struct btrfs_root *root,
230                                u64 start, u64 num_bytes)
231 {
232         u64 end = start + num_bytes - 1;
233         set_extent_bits(&root->fs_info->freed_extents[0],
234                         start, end, EXTENT_UPTODATE, GFP_NOFS);
235         set_extent_bits(&root->fs_info->freed_extents[1],
236                         start, end, EXTENT_UPTODATE, GFP_NOFS);
237         return 0;
238 }
239
240 static void free_excluded_extents(struct btrfs_root *root,
241                                   struct btrfs_block_group_cache *cache)
242 {
243         u64 start, end;
244
245         start = cache->key.objectid;
246         end = start + cache->key.offset - 1;
247
248         clear_extent_bits(&root->fs_info->freed_extents[0],
249                           start, end, EXTENT_UPTODATE, GFP_NOFS);
250         clear_extent_bits(&root->fs_info->freed_extents[1],
251                           start, end, EXTENT_UPTODATE, GFP_NOFS);
252 }
253
254 static int exclude_super_stripes(struct btrfs_root *root,
255                                  struct btrfs_block_group_cache *cache)
256 {
257         u64 bytenr;
258         u64 *logical;
259         int stripe_len;
260         int i, nr, ret;
261
262         if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
263                 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
264                 cache->bytes_super += stripe_len;
265                 ret = add_excluded_extent(root, cache->key.objectid,
266                                           stripe_len);
267                 if (ret)
268                         return ret;
269         }
270
271         for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
272                 bytenr = btrfs_sb_offset(i);
273                 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
274                                        cache->key.objectid, bytenr,
275                                        0, &logical, &nr, &stripe_len);
276                 if (ret)
277                         return ret;
278
279                 while (nr--) {
280                         u64 start, len;
281
282                         if (logical[nr] > cache->key.objectid +
283                             cache->key.offset)
284                                 continue;
285
286                         if (logical[nr] + stripe_len <= cache->key.objectid)
287                                 continue;
288
289                         start = logical[nr];
290                         if (start < cache->key.objectid) {
291                                 start = cache->key.objectid;
292                                 len = (logical[nr] + stripe_len) - start;
293                         } else {
294                                 len = min_t(u64, stripe_len,
295                                             cache->key.objectid +
296                                             cache->key.offset - start);
297                         }
298
299                         cache->bytes_super += len;
300                         ret = add_excluded_extent(root, start, len);
301                         if (ret) {
302                                 kfree(logical);
303                                 return ret;
304                         }
305                 }
306
307                 kfree(logical);
308         }
309         return 0;
310 }
311
312 static struct btrfs_caching_control *
313 get_caching_control(struct btrfs_block_group_cache *cache)
314 {
315         struct btrfs_caching_control *ctl;
316
317         spin_lock(&cache->lock);
318         if (!cache->caching_ctl) {
319                 spin_unlock(&cache->lock);
320                 return NULL;
321         }
322
323         ctl = cache->caching_ctl;
324         atomic_inc(&ctl->count);
325         spin_unlock(&cache->lock);
326         return ctl;
327 }
328
329 static void put_caching_control(struct btrfs_caching_control *ctl)
330 {
331         if (atomic_dec_and_test(&ctl->count))
332                 kfree(ctl);
333 }
334
335 #ifdef CONFIG_BTRFS_DEBUG
336 static void fragment_free_space(struct btrfs_root *root,
337                                 struct btrfs_block_group_cache *block_group)
338 {
339         u64 start = block_group->key.objectid;
340         u64 len = block_group->key.offset;
341         u64 chunk = block_group->flags & BTRFS_BLOCK_GROUP_METADATA ?
342                 root->nodesize : root->sectorsize;
343         u64 step = chunk << 1;
344
345         while (len > chunk) {
346                 btrfs_remove_free_space(block_group, start, chunk);
347                 start += step;
348                 if (len < step)
349                         len = 0;
350                 else
351                         len -= step;
352         }
353 }
354 #endif
355
356 /*
357  * this is only called by cache_block_group, since we could have freed extents
358  * we need to check the pinned_extents for any extents that can't be used yet
359  * since their free space will be released as soon as the transaction commits.
360  */
361 static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
362                               struct btrfs_fs_info *info, u64 start, u64 end)
363 {
364         u64 extent_start, extent_end, size, total_added = 0;
365         int ret;
366
367         while (start < end) {
368                 ret = find_first_extent_bit(info->pinned_extents, start,
369                                             &extent_start, &extent_end,
370                                             EXTENT_DIRTY | EXTENT_UPTODATE,
371                                             NULL);
372                 if (ret)
373                         break;
374
375                 if (extent_start <= start) {
376                         start = extent_end + 1;
377                 } else if (extent_start > start && extent_start < end) {
378                         size = extent_start - start;
379                         total_added += size;
380                         ret = btrfs_add_free_space(block_group, start,
381                                                    size);
382                         BUG_ON(ret); /* -ENOMEM or logic error */
383                         start = extent_end + 1;
384                 } else {
385                         break;
386                 }
387         }
388
389         if (start < end) {
390                 size = end - start;
391                 total_added += size;
392                 ret = btrfs_add_free_space(block_group, start, size);
393                 BUG_ON(ret); /* -ENOMEM or logic error */
394         }
395
396         return total_added;
397 }
398
399 static noinline void caching_thread(struct btrfs_work *work)
400 {
401         struct btrfs_block_group_cache *block_group;
402         struct btrfs_fs_info *fs_info;
403         struct btrfs_caching_control *caching_ctl;
404         struct btrfs_root *extent_root;
405         struct btrfs_path *path;
406         struct extent_buffer *leaf;
407         struct btrfs_key key;
408         u64 total_found = 0;
409         u64 last = 0;
410         u32 nritems;
411         int ret = -ENOMEM;
412         bool wakeup = true;
413
414         caching_ctl = container_of(work, struct btrfs_caching_control, work);
415         block_group = caching_ctl->block_group;
416         fs_info = block_group->fs_info;
417         extent_root = fs_info->extent_root;
418
419         path = btrfs_alloc_path();
420         if (!path)
421                 goto out;
422
423         last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
424
425 #ifdef CONFIG_BTRFS_DEBUG
426         /*
427          * If we're fragmenting we don't want to make anybody think we can
428          * allocate from this block group until we've had a chance to fragment
429          * the free space.
430          */
431         if (btrfs_should_fragment_free_space(extent_root, block_group))
432                 wakeup = false;
433 #endif
434         /*
435          * We don't want to deadlock with somebody trying to allocate a new
436          * extent for the extent root while also trying to search the extent
437          * root to add free space.  So we skip locking and search the commit
438          * root, since its read-only
439          */
440         path->skip_locking = 1;
441         path->search_commit_root = 1;
442         path->reada = 1;
443
444         key.objectid = last;
445         key.offset = 0;
446         key.type = BTRFS_EXTENT_ITEM_KEY;
447 again:
448         mutex_lock(&caching_ctl->mutex);
449         /* need to make sure the commit_root doesn't disappear */
450         down_read(&fs_info->commit_root_sem);
451
452 next:
453         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
454         if (ret < 0)
455                 goto err;
456
457         leaf = path->nodes[0];
458         nritems = btrfs_header_nritems(leaf);
459
460         while (1) {
461                 if (btrfs_fs_closing(fs_info) > 1) {
462                         last = (u64)-1;
463                         break;
464                 }
465
466                 if (path->slots[0] < nritems) {
467                         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
468                 } else {
469                         ret = find_next_key(path, 0, &key);
470                         if (ret)
471                                 break;
472
473                         if (need_resched() ||
474                             rwsem_is_contended(&fs_info->commit_root_sem)) {
475                                 if (wakeup)
476                                         caching_ctl->progress = last;
477                                 btrfs_release_path(path);
478                                 up_read(&fs_info->commit_root_sem);
479                                 mutex_unlock(&caching_ctl->mutex);
480                                 cond_resched();
481                                 goto again;
482                         }
483
484                         ret = btrfs_next_leaf(extent_root, path);
485                         if (ret < 0)
486                                 goto err;
487                         if (ret)
488                                 break;
489                         leaf = path->nodes[0];
490                         nritems = btrfs_header_nritems(leaf);
491                         continue;
492                 }
493
494                 if (key.objectid < last) {
495                         key.objectid = last;
496                         key.offset = 0;
497                         key.type = BTRFS_EXTENT_ITEM_KEY;
498
499                         if (wakeup)
500                                 caching_ctl->progress = last;
501                         btrfs_release_path(path);
502                         goto next;
503                 }
504
505                 if (key.objectid < block_group->key.objectid) {
506                         path->slots[0]++;
507                         continue;
508                 }
509
510                 if (key.objectid >= block_group->key.objectid +
511                     block_group->key.offset)
512                         break;
513
514                 if (key.type == BTRFS_EXTENT_ITEM_KEY ||
515                     key.type == BTRFS_METADATA_ITEM_KEY) {
516                         total_found += add_new_free_space(block_group,
517                                                           fs_info, last,
518                                                           key.objectid);
519                         if (key.type == BTRFS_METADATA_ITEM_KEY)
520                                 last = key.objectid +
521                                         fs_info->tree_root->nodesize;
522                         else
523                                 last = key.objectid + key.offset;
524
525                         if (total_found > (1024 * 1024 * 2)) {
526                                 total_found = 0;
527                                 if (wakeup)
528                                         wake_up(&caching_ctl->wait);
529                         }
530                 }
531                 path->slots[0]++;
532         }
533         ret = 0;
534
535         total_found += add_new_free_space(block_group, fs_info, last,
536                                           block_group->key.objectid +
537                                           block_group->key.offset);
538         spin_lock(&block_group->lock);
539         block_group->caching_ctl = NULL;
540         block_group->cached = BTRFS_CACHE_FINISHED;
541         spin_unlock(&block_group->lock);
542
543 #ifdef CONFIG_BTRFS_DEBUG
544         if (btrfs_should_fragment_free_space(extent_root, block_group)) {
545                 u64 bytes_used;
546
547                 spin_lock(&block_group->space_info->lock);
548                 spin_lock(&block_group->lock);
549                 bytes_used = block_group->key.offset -
550                         btrfs_block_group_used(&block_group->item);
551                 block_group->space_info->bytes_used += bytes_used >> 1;
552                 spin_unlock(&block_group->lock);
553                 spin_unlock(&block_group->space_info->lock);
554                 fragment_free_space(extent_root, block_group);
555         }
556 #endif
557
558         caching_ctl->progress = (u64)-1;
559 err:
560         btrfs_free_path(path);
561         up_read(&fs_info->commit_root_sem);
562
563         free_excluded_extents(extent_root, block_group);
564
565         mutex_unlock(&caching_ctl->mutex);
566 out:
567         if (ret) {
568                 spin_lock(&block_group->lock);
569                 block_group->caching_ctl = NULL;
570                 block_group->cached = BTRFS_CACHE_ERROR;
571                 spin_unlock(&block_group->lock);
572         }
573         wake_up(&caching_ctl->wait);
574
575         put_caching_control(caching_ctl);
576         btrfs_put_block_group(block_group);
577 }
578
579 static int cache_block_group(struct btrfs_block_group_cache *cache,
580                              int load_cache_only)
581 {
582         DEFINE_WAIT(wait);
583         struct btrfs_fs_info *fs_info = cache->fs_info;
584         struct btrfs_caching_control *caching_ctl;
585         int ret = 0;
586
587         caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
588         if (!caching_ctl)
589                 return -ENOMEM;
590
591         INIT_LIST_HEAD(&caching_ctl->list);
592         mutex_init(&caching_ctl->mutex);
593         init_waitqueue_head(&caching_ctl->wait);
594         caching_ctl->block_group = cache;
595         caching_ctl->progress = cache->key.objectid;
596         atomic_set(&caching_ctl->count, 1);
597         btrfs_init_work(&caching_ctl->work, btrfs_cache_helper,
598                         caching_thread, NULL, NULL);
599
600         spin_lock(&cache->lock);
601         /*
602          * This should be a rare occasion, but this could happen I think in the
603          * case where one thread starts to load the space cache info, and then
604          * some other thread starts a transaction commit which tries to do an
605          * allocation while the other thread is still loading the space cache
606          * info.  The previous loop should have kept us from choosing this block
607          * group, but if we've moved to the state where we will wait on caching
608          * block groups we need to first check if we're doing a fast load here,
609          * so we can wait for it to finish, otherwise we could end up allocating
610          * from a block group who's cache gets evicted for one reason or
611          * another.
612          */
613         while (cache->cached == BTRFS_CACHE_FAST) {
614                 struct btrfs_caching_control *ctl;
615
616                 ctl = cache->caching_ctl;
617                 atomic_inc(&ctl->count);
618                 prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
619                 spin_unlock(&cache->lock);
620
621                 schedule();
622
623                 finish_wait(&ctl->wait, &wait);
624                 put_caching_control(ctl);
625                 spin_lock(&cache->lock);
626         }
627
628         if (cache->cached != BTRFS_CACHE_NO) {
629                 spin_unlock(&cache->lock);
630                 kfree(caching_ctl);
631                 return 0;
632         }
633         WARN_ON(cache->caching_ctl);
634         cache->caching_ctl = caching_ctl;
635         cache->cached = BTRFS_CACHE_FAST;
636         spin_unlock(&cache->lock);
637
638         if (fs_info->mount_opt & BTRFS_MOUNT_SPACE_CACHE) {
639                 mutex_lock(&caching_ctl->mutex);
640                 ret = load_free_space_cache(fs_info, cache);
641
642                 spin_lock(&cache->lock);
643                 if (ret == 1) {
644                         cache->caching_ctl = NULL;
645                         cache->cached = BTRFS_CACHE_FINISHED;
646                         cache->last_byte_to_unpin = (u64)-1;
647                         caching_ctl->progress = (u64)-1;
648                 } else {
649                         if (load_cache_only) {
650                                 cache->caching_ctl = NULL;
651                                 cache->cached = BTRFS_CACHE_NO;
652                         } else {
653                                 cache->cached = BTRFS_CACHE_STARTED;
654                                 cache->has_caching_ctl = 1;
655                         }
656                 }
657                 spin_unlock(&cache->lock);
658 #ifdef CONFIG_BTRFS_DEBUG
659                 if (ret == 1 &&
660                     btrfs_should_fragment_free_space(fs_info->extent_root,
661                                                      cache)) {
662                         u64 bytes_used;
663
664                         spin_lock(&cache->space_info->lock);
665                         spin_lock(&cache->lock);
666                         bytes_used = cache->key.offset -
667                                 btrfs_block_group_used(&cache->item);
668                         cache->space_info->bytes_used += bytes_used >> 1;
669                         spin_unlock(&cache->lock);
670                         spin_unlock(&cache->space_info->lock);
671                         fragment_free_space(fs_info->extent_root, cache);
672                 }
673 #endif
674                 mutex_unlock(&caching_ctl->mutex);
675
676                 wake_up(&caching_ctl->wait);
677                 if (ret == 1) {
678                         put_caching_control(caching_ctl);
679                         free_excluded_extents(fs_info->extent_root, cache);
680                         return 0;
681                 }
682         } else {
683                 /*
684                  * We are not going to do the fast caching, set cached to the
685                  * appropriate value and wakeup any waiters.
686                  */
687                 spin_lock(&cache->lock);
688                 if (load_cache_only) {
689                         cache->caching_ctl = NULL;
690                         cache->cached = BTRFS_CACHE_NO;
691                 } else {
692                         cache->cached = BTRFS_CACHE_STARTED;
693                         cache->has_caching_ctl = 1;
694                 }
695                 spin_unlock(&cache->lock);
696                 wake_up(&caching_ctl->wait);
697         }
698
699         if (load_cache_only) {
700                 put_caching_control(caching_ctl);
701                 return 0;
702         }
703
704         down_write(&fs_info->commit_root_sem);
705         atomic_inc(&caching_ctl->count);
706         list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
707         up_write(&fs_info->commit_root_sem);
708
709         btrfs_get_block_group(cache);
710
711         btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work);
712
713         return ret;
714 }
715
716 /*
717  * return the block group that starts at or after bytenr
718  */
719 static struct btrfs_block_group_cache *
720 btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
721 {
722         struct btrfs_block_group_cache *cache;
723
724         cache = block_group_cache_tree_search(info, bytenr, 0);
725
726         return cache;
727 }
728
729 /*
730  * return the block group that contains the given bytenr
731  */
732 struct btrfs_block_group_cache *btrfs_lookup_block_group(
733                                                  struct btrfs_fs_info *info,
734                                                  u64 bytenr)
735 {
736         struct btrfs_block_group_cache *cache;
737
738         cache = block_group_cache_tree_search(info, bytenr, 1);
739
740         return cache;
741 }
742
743 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
744                                                   u64 flags)
745 {
746         struct list_head *head = &info->space_info;
747         struct btrfs_space_info *found;
748
749         flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
750
751         rcu_read_lock();
752         list_for_each_entry_rcu(found, head, list) {
753                 if (found->flags & flags) {
754                         rcu_read_unlock();
755                         return found;
756                 }
757         }
758         rcu_read_unlock();
759         return NULL;
760 }
761
762 /*
763  * after adding space to the filesystem, we need to clear the full flags
764  * on all the space infos.
765  */
766 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
767 {
768         struct list_head *head = &info->space_info;
769         struct btrfs_space_info *found;
770
771         rcu_read_lock();
772         list_for_each_entry_rcu(found, head, list)
773                 found->full = 0;
774         rcu_read_unlock();
775 }
776
777 /* simple helper to search for an existing data extent at a given offset */
778 int btrfs_lookup_data_extent(struct btrfs_root *root, u64 start, u64 len)
779 {
780         int ret;
781         struct btrfs_key key;
782         struct btrfs_path *path;
783
784         path = btrfs_alloc_path();
785         if (!path)
786                 return -ENOMEM;
787
788         key.objectid = start;
789         key.offset = len;
790         key.type = BTRFS_EXTENT_ITEM_KEY;
791         ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
792                                 0, 0);
793         btrfs_free_path(path);
794         return ret;
795 }
796
797 /*
798  * helper function to lookup reference count and flags of a tree block.
799  *
800  * the head node for delayed ref is used to store the sum of all the
801  * reference count modifications queued up in the rbtree. the head
802  * node may also store the extent flags to set. This way you can check
803  * to see what the reference count and extent flags would be if all of
804  * the delayed refs are not processed.
805  */
806 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
807                              struct btrfs_root *root, u64 bytenr,
808                              u64 offset, int metadata, u64 *refs, u64 *flags)
809 {
810         struct btrfs_delayed_ref_head *head;
811         struct btrfs_delayed_ref_root *delayed_refs;
812         struct btrfs_path *path;
813         struct btrfs_extent_item *ei;
814         struct extent_buffer *leaf;
815         struct btrfs_key key;
816         u32 item_size;
817         u64 num_refs;
818         u64 extent_flags;
819         int ret;
820
821         /*
822          * If we don't have skinny metadata, don't bother doing anything
823          * different
824          */
825         if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA)) {
826                 offset = root->nodesize;
827                 metadata = 0;
828         }
829
830         path = btrfs_alloc_path();
831         if (!path)
832                 return -ENOMEM;
833
834         if (!trans) {
835                 path->skip_locking = 1;
836                 path->search_commit_root = 1;
837         }
838
839 search_again:
840         key.objectid = bytenr;
841         key.offset = offset;
842         if (metadata)
843                 key.type = BTRFS_METADATA_ITEM_KEY;
844         else
845                 key.type = BTRFS_EXTENT_ITEM_KEY;
846
847         ret = btrfs_search_slot(trans, root->fs_info->extent_root,
848                                 &key, path, 0, 0);
849         if (ret < 0)
850                 goto out_free;
851
852         if (ret > 0 && metadata && key.type == BTRFS_METADATA_ITEM_KEY) {
853                 if (path->slots[0]) {
854                         path->slots[0]--;
855                         btrfs_item_key_to_cpu(path->nodes[0], &key,
856                                               path->slots[0]);
857                         if (key.objectid == bytenr &&
858                             key.type == BTRFS_EXTENT_ITEM_KEY &&
859                             key.offset == root->nodesize)
860                                 ret = 0;
861                 }
862         }
863
864         if (ret == 0) {
865                 leaf = path->nodes[0];
866                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
867                 if (item_size >= sizeof(*ei)) {
868                         ei = btrfs_item_ptr(leaf, path->slots[0],
869                                             struct btrfs_extent_item);
870                         num_refs = btrfs_extent_refs(leaf, ei);
871                         extent_flags = btrfs_extent_flags(leaf, ei);
872                 } else {
873 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
874                         struct btrfs_extent_item_v0 *ei0;
875                         BUG_ON(item_size != sizeof(*ei0));
876                         ei0 = btrfs_item_ptr(leaf, path->slots[0],
877                                              struct btrfs_extent_item_v0);
878                         num_refs = btrfs_extent_refs_v0(leaf, ei0);
879                         /* FIXME: this isn't correct for data */
880                         extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
881 #else
882                         BUG();
883 #endif
884                 }
885                 BUG_ON(num_refs == 0);
886         } else {
887                 num_refs = 0;
888                 extent_flags = 0;
889                 ret = 0;
890         }
891
892         if (!trans)
893                 goto out;
894
895         delayed_refs = &trans->transaction->delayed_refs;
896         spin_lock(&delayed_refs->lock);
897         head = btrfs_find_delayed_ref_head(trans, bytenr);
898         if (head) {
899                 if (!mutex_trylock(&head->mutex)) {
900                         atomic_inc(&head->node.refs);
901                         spin_unlock(&delayed_refs->lock);
902
903                         btrfs_release_path(path);
904
905                         /*
906                          * Mutex was contended, block until it's released and try
907                          * again
908                          */
909                         mutex_lock(&head->mutex);
910                         mutex_unlock(&head->mutex);
911                         btrfs_put_delayed_ref(&head->node);
912                         goto search_again;
913                 }
914                 spin_lock(&head->lock);
915                 if (head->extent_op && head->extent_op->update_flags)
916                         extent_flags |= head->extent_op->flags_to_set;
917                 else
918                         BUG_ON(num_refs == 0);
919
920                 num_refs += head->node.ref_mod;
921                 spin_unlock(&head->lock);
922                 mutex_unlock(&head->mutex);
923         }
924         spin_unlock(&delayed_refs->lock);
925 out:
926         WARN_ON(num_refs == 0);
927         if (refs)
928                 *refs = num_refs;
929         if (flags)
930                 *flags = extent_flags;
931 out_free:
932         btrfs_free_path(path);
933         return ret;
934 }
935
936 /*
937  * Back reference rules.  Back refs have three main goals:
938  *
939  * 1) differentiate between all holders of references to an extent so that
940  *    when a reference is dropped we can make sure it was a valid reference
941  *    before freeing the extent.
942  *
943  * 2) Provide enough information to quickly find the holders of an extent
944  *    if we notice a given block is corrupted or bad.
945  *
946  * 3) Make it easy to migrate blocks for FS shrinking or storage pool
947  *    maintenance.  This is actually the same as #2, but with a slightly
948  *    different use case.
949  *
950  * There are two kinds of back refs. The implicit back refs is optimized
951  * for pointers in non-shared tree blocks. For a given pointer in a block,
952  * back refs of this kind provide information about the block's owner tree
953  * and the pointer's key. These information allow us to find the block by
954  * b-tree searching. The full back refs is for pointers in tree blocks not
955  * referenced by their owner trees. The location of tree block is recorded
956  * in the back refs. Actually the full back refs is generic, and can be
957  * used in all cases the implicit back refs is used. The major shortcoming
958  * of the full back refs is its overhead. Every time a tree block gets
959  * COWed, we have to update back refs entry for all pointers in it.
960  *
961  * For a newly allocated tree block, we use implicit back refs for
962  * pointers in it. This means most tree related operations only involve
963  * implicit back refs. For a tree block created in old transaction, the
964  * only way to drop a reference to it is COW it. So we can detect the
965  * event that tree block loses its owner tree's reference and do the
966  * back refs conversion.
967  *
968  * When a tree block is COW'd through a tree, there are four cases:
969  *
970  * The reference count of the block is one and the tree is the block's
971  * owner tree. Nothing to do in this case.
972  *
973  * The reference count of the block is one and the tree is not the
974  * block's owner tree. In this case, full back refs is used for pointers
975  * in the block. Remove these full back refs, add implicit back refs for
976  * every pointers in the new block.
977  *
978  * The reference count of the block is greater than one and the tree is
979  * the block's owner tree. In this case, implicit back refs is used for
980  * pointers in the block. Add full back refs for every pointers in the
981  * block, increase lower level extents' reference counts. The original
982  * implicit back refs are entailed to the new block.
983  *
984  * The reference count of the block is greater than one and the tree is
985  * not the block's owner tree. Add implicit back refs for every pointer in
986  * the new block, increase lower level extents' reference count.
987  *
988  * Back Reference Key composing:
989  *
990  * The key objectid corresponds to the first byte in the extent,
991  * The key type is used to differentiate between types of back refs.
992  * There are different meanings of the key offset for different types
993  * of back refs.
994  *
995  * File extents can be referenced by:
996  *
997  * - multiple snapshots, subvolumes, or different generations in one subvol
998  * - different files inside a single subvolume
999  * - different offsets inside a file (bookend extents in file.c)
1000  *
1001  * The extent ref structure for the implicit back refs has fields for:
1002  *
1003  * - Objectid of the subvolume root
1004  * - objectid of the file holding the reference
1005  * - original offset in the file
1006  * - how many bookend extents
1007  *
1008  * The key offset for the implicit back refs is hash of the first
1009  * three fields.
1010  *
1011  * The extent ref structure for the full back refs has field for:
1012  *
1013  * - number of pointers in the tree leaf
1014  *
1015  * The key offset for the implicit back refs is the first byte of
1016  * the tree leaf
1017  *
1018  * When a file extent is allocated, The implicit back refs is used.
1019  * the fields are filled in:
1020  *
1021  *     (root_key.objectid, inode objectid, offset in file, 1)
1022  *
1023  * When a file extent is removed file truncation, we find the
1024  * corresponding implicit back refs and check the following fields:
1025  *
1026  *     (btrfs_header_owner(leaf), inode objectid, offset in file)
1027  *
1028  * Btree extents can be referenced by:
1029  *
1030  * - Different subvolumes
1031  *
1032  * Both the implicit back refs and the full back refs for tree blocks
1033  * only consist of key. The key offset for the implicit back refs is
1034  * objectid of block's owner tree. The key offset for the full back refs
1035  * is the first byte of parent block.
1036  *
1037  * When implicit back refs is used, information about the lowest key and
1038  * level of the tree block are required. These information are stored in
1039  * tree block info structure.
1040  */
1041
1042 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1043 static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
1044                                   struct btrfs_root *root,
1045                                   struct btrfs_path *path,
1046                                   u64 owner, u32 extra_size)
1047 {
1048         struct btrfs_extent_item *item;
1049         struct btrfs_extent_item_v0 *ei0;
1050         struct btrfs_extent_ref_v0 *ref0;
1051         struct btrfs_tree_block_info *bi;
1052         struct extent_buffer *leaf;
1053         struct btrfs_key key;
1054         struct btrfs_key found_key;
1055         u32 new_size = sizeof(*item);
1056         u64 refs;
1057         int ret;
1058
1059         leaf = path->nodes[0];
1060         BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
1061
1062         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1063         ei0 = btrfs_item_ptr(leaf, path->slots[0],
1064                              struct btrfs_extent_item_v0);
1065         refs = btrfs_extent_refs_v0(leaf, ei0);
1066
1067         if (owner == (u64)-1) {
1068                 while (1) {
1069                         if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1070                                 ret = btrfs_next_leaf(root, path);
1071                                 if (ret < 0)
1072                                         return ret;
1073                                 BUG_ON(ret > 0); /* Corruption */
1074                                 leaf = path->nodes[0];
1075                         }
1076                         btrfs_item_key_to_cpu(leaf, &found_key,
1077                                               path->slots[0]);
1078                         BUG_ON(key.objectid != found_key.objectid);
1079                         if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
1080                                 path->slots[0]++;
1081                                 continue;
1082                         }
1083                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
1084                                               struct btrfs_extent_ref_v0);
1085                         owner = btrfs_ref_objectid_v0(leaf, ref0);
1086                         break;
1087                 }
1088         }
1089         btrfs_release_path(path);
1090
1091         if (owner < BTRFS_FIRST_FREE_OBJECTID)
1092                 new_size += sizeof(*bi);
1093
1094         new_size -= sizeof(*ei0);
1095         ret = btrfs_search_slot(trans, root, &key, path,
1096                                 new_size + extra_size, 1);
1097         if (ret < 0)
1098                 return ret;
1099         BUG_ON(ret); /* Corruption */
1100
1101         btrfs_extend_item(root, path, new_size);
1102
1103         leaf = path->nodes[0];
1104         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1105         btrfs_set_extent_refs(leaf, item, refs);
1106         /* FIXME: get real generation */
1107         btrfs_set_extent_generation(leaf, item, 0);
1108         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1109                 btrfs_set_extent_flags(leaf, item,
1110                                        BTRFS_EXTENT_FLAG_TREE_BLOCK |
1111                                        BTRFS_BLOCK_FLAG_FULL_BACKREF);
1112                 bi = (struct btrfs_tree_block_info *)(item + 1);
1113                 /* FIXME: get first key of the block */
1114                 memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
1115                 btrfs_set_tree_block_level(leaf, bi, (int)owner);
1116         } else {
1117                 btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
1118         }
1119         btrfs_mark_buffer_dirty(leaf);
1120         return 0;
1121 }
1122 #endif
1123
1124 static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
1125 {
1126         u32 high_crc = ~(u32)0;
1127         u32 low_crc = ~(u32)0;
1128         __le64 lenum;
1129
1130         lenum = cpu_to_le64(root_objectid);
1131         high_crc = btrfs_crc32c(high_crc, &lenum, sizeof(lenum));
1132         lenum = cpu_to_le64(owner);
1133         low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
1134         lenum = cpu_to_le64(offset);
1135         low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
1136
1137         return ((u64)high_crc << 31) ^ (u64)low_crc;
1138 }
1139
1140 static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
1141                                      struct btrfs_extent_data_ref *ref)
1142 {
1143         return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
1144                                     btrfs_extent_data_ref_objectid(leaf, ref),
1145                                     btrfs_extent_data_ref_offset(leaf, ref));
1146 }
1147
1148 static int match_extent_data_ref(struct extent_buffer *leaf,
1149                                  struct btrfs_extent_data_ref *ref,
1150                                  u64 root_objectid, u64 owner, u64 offset)
1151 {
1152         if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
1153             btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
1154             btrfs_extent_data_ref_offset(leaf, ref) != offset)
1155                 return 0;
1156         return 1;
1157 }
1158
1159 static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
1160                                            struct btrfs_root *root,
1161                                            struct btrfs_path *path,
1162                                            u64 bytenr, u64 parent,
1163                                            u64 root_objectid,
1164                                            u64 owner, u64 offset)
1165 {
1166         struct btrfs_key key;
1167         struct btrfs_extent_data_ref *ref;
1168         struct extent_buffer *leaf;
1169         u32 nritems;
1170         int ret;
1171         int recow;
1172         int err = -ENOENT;
1173
1174         key.objectid = bytenr;
1175         if (parent) {
1176                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1177                 key.offset = parent;
1178         } else {
1179                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1180                 key.offset = hash_extent_data_ref(root_objectid,
1181                                                   owner, offset);
1182         }
1183 again:
1184         recow = 0;
1185         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1186         if (ret < 0) {
1187                 err = ret;
1188                 goto fail;
1189         }
1190
1191         if (parent) {
1192                 if (!ret)
1193                         return 0;
1194 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1195                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1196                 btrfs_release_path(path);
1197                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1198                 if (ret < 0) {
1199                         err = ret;
1200                         goto fail;
1201                 }
1202                 if (!ret)
1203                         return 0;
1204 #endif
1205                 goto fail;
1206         }
1207
1208         leaf = path->nodes[0];
1209         nritems = btrfs_header_nritems(leaf);
1210         while (1) {
1211                 if (path->slots[0] >= nritems) {
1212                         ret = btrfs_next_leaf(root, path);
1213                         if (ret < 0)
1214                                 err = ret;
1215                         if (ret)
1216                                 goto fail;
1217
1218                         leaf = path->nodes[0];
1219                         nritems = btrfs_header_nritems(leaf);
1220                         recow = 1;
1221                 }
1222
1223                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1224                 if (key.objectid != bytenr ||
1225                     key.type != BTRFS_EXTENT_DATA_REF_KEY)
1226                         goto fail;
1227
1228                 ref = btrfs_item_ptr(leaf, path->slots[0],
1229                                      struct btrfs_extent_data_ref);
1230
1231                 if (match_extent_data_ref(leaf, ref, root_objectid,
1232                                           owner, offset)) {
1233                         if (recow) {
1234                                 btrfs_release_path(path);
1235                                 goto again;
1236                         }
1237                         err = 0;
1238                         break;
1239                 }
1240                 path->slots[0]++;
1241         }
1242 fail:
1243         return err;
1244 }
1245
1246 static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
1247                                            struct btrfs_root *root,
1248                                            struct btrfs_path *path,
1249                                            u64 bytenr, u64 parent,
1250                                            u64 root_objectid, u64 owner,
1251                                            u64 offset, int refs_to_add)
1252 {
1253         struct btrfs_key key;
1254         struct extent_buffer *leaf;
1255         u32 size;
1256         u32 num_refs;
1257         int ret;
1258
1259         key.objectid = bytenr;
1260         if (parent) {
1261                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1262                 key.offset = parent;
1263                 size = sizeof(struct btrfs_shared_data_ref);
1264         } else {
1265                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1266                 key.offset = hash_extent_data_ref(root_objectid,
1267                                                   owner, offset);
1268                 size = sizeof(struct btrfs_extent_data_ref);
1269         }
1270
1271         ret = btrfs_insert_empty_item(trans, root, path, &key, size);
1272         if (ret && ret != -EEXIST)
1273                 goto fail;
1274
1275         leaf = path->nodes[0];
1276         if (parent) {
1277                 struct btrfs_shared_data_ref *ref;
1278                 ref = btrfs_item_ptr(leaf, path->slots[0],
1279                                      struct btrfs_shared_data_ref);
1280                 if (ret == 0) {
1281                         btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
1282                 } else {
1283                         num_refs = btrfs_shared_data_ref_count(leaf, ref);
1284                         num_refs += refs_to_add;
1285                         btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
1286                 }
1287         } else {
1288                 struct btrfs_extent_data_ref *ref;
1289                 while (ret == -EEXIST) {
1290                         ref = btrfs_item_ptr(leaf, path->slots[0],
1291                                              struct btrfs_extent_data_ref);
1292                         if (match_extent_data_ref(leaf, ref, root_objectid,
1293                                                   owner, offset))
1294                                 break;
1295                         btrfs_release_path(path);
1296                         key.offset++;
1297                         ret = btrfs_insert_empty_item(trans, root, path, &key,
1298                                                       size);
1299                         if (ret && ret != -EEXIST)
1300                                 goto fail;
1301
1302                         leaf = path->nodes[0];
1303                 }
1304                 ref = btrfs_item_ptr(leaf, path->slots[0],
1305                                      struct btrfs_extent_data_ref);
1306                 if (ret == 0) {
1307                         btrfs_set_extent_data_ref_root(leaf, ref,
1308                                                        root_objectid);
1309                         btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
1310                         btrfs_set_extent_data_ref_offset(leaf, ref, offset);
1311                         btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
1312                 } else {
1313                         num_refs = btrfs_extent_data_ref_count(leaf, ref);
1314                         num_refs += refs_to_add;
1315                         btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
1316                 }
1317         }
1318         btrfs_mark_buffer_dirty(leaf);
1319         ret = 0;
1320 fail:
1321         btrfs_release_path(path);
1322         return ret;
1323 }
1324
1325 static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
1326                                            struct btrfs_root *root,
1327                                            struct btrfs_path *path,
1328                                            int refs_to_drop, int *last_ref)
1329 {
1330         struct btrfs_key key;
1331         struct btrfs_extent_data_ref *ref1 = NULL;
1332         struct btrfs_shared_data_ref *ref2 = NULL;
1333         struct extent_buffer *leaf;
1334         u32 num_refs = 0;
1335         int ret = 0;
1336
1337         leaf = path->nodes[0];
1338         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1339
1340         if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1341                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1342                                       struct btrfs_extent_data_ref);
1343                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1344         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1345                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1346                                       struct btrfs_shared_data_ref);
1347                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1348 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1349         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1350                 struct btrfs_extent_ref_v0 *ref0;
1351                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1352                                       struct btrfs_extent_ref_v0);
1353                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1354 #endif
1355         } else {
1356                 BUG();
1357         }
1358
1359         BUG_ON(num_refs < refs_to_drop);
1360         num_refs -= refs_to_drop;
1361
1362         if (num_refs == 0) {
1363                 ret = btrfs_del_item(trans, root, path);
1364                 *last_ref = 1;
1365         } else {
1366                 if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
1367                         btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
1368                 else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
1369                         btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
1370 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1371                 else {
1372                         struct btrfs_extent_ref_v0 *ref0;
1373                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
1374                                         struct btrfs_extent_ref_v0);
1375                         btrfs_set_ref_count_v0(leaf, ref0, num_refs);
1376                 }
1377 #endif
1378                 btrfs_mark_buffer_dirty(leaf);
1379         }
1380         return ret;
1381 }
1382
1383 static noinline u32 extent_data_ref_count(struct btrfs_path *path,
1384                                           struct btrfs_extent_inline_ref *iref)
1385 {
1386         struct btrfs_key key;
1387         struct extent_buffer *leaf;
1388         struct btrfs_extent_data_ref *ref1;
1389         struct btrfs_shared_data_ref *ref2;
1390         u32 num_refs = 0;
1391
1392         leaf = path->nodes[0];
1393         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1394         if (iref) {
1395                 if (btrfs_extent_inline_ref_type(leaf, iref) ==
1396                     BTRFS_EXTENT_DATA_REF_KEY) {
1397                         ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
1398                         num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1399                 } else {
1400                         ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
1401                         num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1402                 }
1403         } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1404                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1405                                       struct btrfs_extent_data_ref);
1406                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1407         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1408                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1409                                       struct btrfs_shared_data_ref);
1410                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1411 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1412         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1413                 struct btrfs_extent_ref_v0 *ref0;
1414                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1415                                       struct btrfs_extent_ref_v0);
1416                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1417 #endif
1418         } else {
1419                 WARN_ON(1);
1420         }
1421         return num_refs;
1422 }
1423
1424 static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
1425                                           struct btrfs_root *root,
1426                                           struct btrfs_path *path,
1427                                           u64 bytenr, u64 parent,
1428                                           u64 root_objectid)
1429 {
1430         struct btrfs_key key;
1431         int ret;
1432
1433         key.objectid = bytenr;
1434         if (parent) {
1435                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1436                 key.offset = parent;
1437         } else {
1438                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1439                 key.offset = root_objectid;
1440         }
1441
1442         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1443         if (ret > 0)
1444                 ret = -ENOENT;
1445 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1446         if (ret == -ENOENT && parent) {
1447                 btrfs_release_path(path);
1448                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1449                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1450                 if (ret > 0)
1451                         ret = -ENOENT;
1452         }
1453 #endif
1454         return ret;
1455 }
1456
1457 static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
1458                                           struct btrfs_root *root,
1459                                           struct btrfs_path *path,
1460                                           u64 bytenr, u64 parent,
1461                                           u64 root_objectid)
1462 {
1463         struct btrfs_key key;
1464         int ret;
1465
1466         key.objectid = bytenr;
1467         if (parent) {
1468                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1469                 key.offset = parent;
1470         } else {
1471                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1472                 key.offset = root_objectid;
1473         }
1474
1475         ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1476         btrfs_release_path(path);
1477         return ret;
1478 }
1479
1480 static inline int extent_ref_type(u64 parent, u64 owner)
1481 {
1482         int type;
1483         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1484                 if (parent > 0)
1485                         type = BTRFS_SHARED_BLOCK_REF_KEY;
1486                 else
1487                         type = BTRFS_TREE_BLOCK_REF_KEY;
1488         } else {
1489                 if (parent > 0)
1490                         type = BTRFS_SHARED_DATA_REF_KEY;
1491                 else
1492                         type = BTRFS_EXTENT_DATA_REF_KEY;
1493         }
1494         return type;
1495 }
1496
1497 static int find_next_key(struct btrfs_path *path, int level,
1498                          struct btrfs_key *key)
1499
1500 {
1501         for (; level < BTRFS_MAX_LEVEL; level++) {
1502                 if (!path->nodes[level])
1503                         break;
1504                 if (path->slots[level] + 1 >=
1505                     btrfs_header_nritems(path->nodes[level]))
1506                         continue;
1507                 if (level == 0)
1508                         btrfs_item_key_to_cpu(path->nodes[level], key,
1509                                               path->slots[level] + 1);
1510                 else
1511                         btrfs_node_key_to_cpu(path->nodes[level], key,
1512                                               path->slots[level] + 1);
1513                 return 0;
1514         }
1515         return 1;
1516 }
1517
1518 /*
1519  * look for inline back ref. if back ref is found, *ref_ret is set
1520  * to the address of inline back ref, and 0 is returned.
1521  *
1522  * if back ref isn't found, *ref_ret is set to the address where it
1523  * should be inserted, and -ENOENT is returned.
1524  *
1525  * if insert is true and there are too many inline back refs, the path
1526  * points to the extent item, and -EAGAIN is returned.
1527  *
1528  * NOTE: inline back refs are ordered in the same way that back ref
1529  *       items in the tree are ordered.
1530  */
1531 static noinline_for_stack
1532 int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
1533                                  struct btrfs_root *root,
1534                                  struct btrfs_path *path,
1535                                  struct btrfs_extent_inline_ref **ref_ret,
1536                                  u64 bytenr, u64 num_bytes,
1537                                  u64 parent, u64 root_objectid,
1538                                  u64 owner, u64 offset, int insert)
1539 {
1540         struct btrfs_key key;
1541         struct extent_buffer *leaf;
1542         struct btrfs_extent_item *ei;
1543         struct btrfs_extent_inline_ref *iref;
1544         u64 flags;
1545         u64 item_size;
1546         unsigned long ptr;
1547         unsigned long end;
1548         int extra_size;
1549         int type;
1550         int want;
1551         int ret;
1552         int err = 0;
1553         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
1554                                                  SKINNY_METADATA);
1555
1556         key.objectid = bytenr;
1557         key.type = BTRFS_EXTENT_ITEM_KEY;
1558         key.offset = num_bytes;
1559
1560         want = extent_ref_type(parent, owner);
1561         if (insert) {
1562                 extra_size = btrfs_extent_inline_ref_size(want);
1563                 path->keep_locks = 1;
1564         } else
1565                 extra_size = -1;
1566
1567         /*
1568          * Owner is our parent level, so we can just add one to get the level
1569          * for the block we are interested in.
1570          */
1571         if (skinny_metadata && owner < BTRFS_FIRST_FREE_OBJECTID) {
1572                 key.type = BTRFS_METADATA_ITEM_KEY;
1573                 key.offset = owner;
1574         }
1575
1576 again:
1577         ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
1578         if (ret < 0) {
1579                 err = ret;
1580                 goto out;
1581         }
1582
1583         /*
1584          * We may be a newly converted file system which still has the old fat
1585          * extent entries for metadata, so try and see if we have one of those.
1586          */
1587         if (ret > 0 && skinny_metadata) {
1588                 skinny_metadata = false;
1589                 if (path->slots[0]) {
1590                         path->slots[0]--;
1591                         btrfs_item_key_to_cpu(path->nodes[0], &key,
1592                                               path->slots[0]);
1593                         if (key.objectid == bytenr &&
1594                             key.type == BTRFS_EXTENT_ITEM_KEY &&
1595                             key.offset == num_bytes)
1596                                 ret = 0;
1597                 }
1598                 if (ret) {
1599                         key.objectid = bytenr;
1600                         key.type = BTRFS_EXTENT_ITEM_KEY;
1601                         key.offset = num_bytes;
1602                         btrfs_release_path(path);
1603                         goto again;
1604                 }
1605         }
1606
1607         if (ret && !insert) {
1608                 err = -ENOENT;
1609                 goto out;
1610         } else if (WARN_ON(ret)) {
1611                 err = -EIO;
1612                 goto out;
1613         }
1614
1615         leaf = path->nodes[0];
1616         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1617 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1618         if (item_size < sizeof(*ei)) {
1619                 if (!insert) {
1620                         err = -ENOENT;
1621                         goto out;
1622                 }
1623                 ret = convert_extent_item_v0(trans, root, path, owner,
1624                                              extra_size);
1625                 if (ret < 0) {
1626                         err = ret;
1627                         goto out;
1628                 }
1629                 leaf = path->nodes[0];
1630                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1631         }
1632 #endif
1633         BUG_ON(item_size < sizeof(*ei));
1634
1635         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1636         flags = btrfs_extent_flags(leaf, ei);
1637
1638         ptr = (unsigned long)(ei + 1);
1639         end = (unsigned long)ei + item_size;
1640
1641         if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK && !skinny_metadata) {
1642                 ptr += sizeof(struct btrfs_tree_block_info);
1643                 BUG_ON(ptr > end);
1644         }
1645
1646         err = -ENOENT;
1647         while (1) {
1648                 if (ptr >= end) {
1649                         WARN_ON(ptr > end);
1650                         break;
1651                 }
1652                 iref = (struct btrfs_extent_inline_ref *)ptr;
1653                 type = btrfs_extent_inline_ref_type(leaf, iref);
1654                 if (want < type)
1655                         break;
1656                 if (want > type) {
1657                         ptr += btrfs_extent_inline_ref_size(type);
1658                         continue;
1659                 }
1660
1661                 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1662                         struct btrfs_extent_data_ref *dref;
1663                         dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1664                         if (match_extent_data_ref(leaf, dref, root_objectid,
1665                                                   owner, offset)) {
1666                                 err = 0;
1667                                 break;
1668                         }
1669                         if (hash_extent_data_ref_item(leaf, dref) <
1670                             hash_extent_data_ref(root_objectid, owner, offset))
1671                                 break;
1672                 } else {
1673                         u64 ref_offset;
1674                         ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1675                         if (parent > 0) {
1676                                 if (parent == ref_offset) {
1677                                         err = 0;
1678                                         break;
1679                                 }
1680                                 if (ref_offset < parent)
1681                                         break;
1682                         } else {
1683                                 if (root_objectid == ref_offset) {
1684                                         err = 0;
1685                                         break;
1686                                 }
1687                                 if (ref_offset < root_objectid)
1688                                         break;
1689                         }
1690                 }
1691                 ptr += btrfs_extent_inline_ref_size(type);
1692         }
1693         if (err == -ENOENT && insert) {
1694                 if (item_size + extra_size >=
1695                     BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
1696                         err = -EAGAIN;
1697                         goto out;
1698                 }
1699                 /*
1700                  * To add new inline back ref, we have to make sure
1701                  * there is no corresponding back ref item.
1702                  * For simplicity, we just do not add new inline back
1703                  * ref if there is any kind of item for this block
1704                  */
1705                 if (find_next_key(path, 0, &key) == 0 &&
1706                     key.objectid == bytenr &&
1707                     key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
1708                         err = -EAGAIN;
1709                         goto out;
1710                 }
1711         }
1712         *ref_ret = (struct btrfs_extent_inline_ref *)ptr;
1713 out:
1714         if (insert) {
1715                 path->keep_locks = 0;
1716                 btrfs_unlock_up_safe(path, 1);
1717         }
1718         return err;
1719 }
1720
1721 /*
1722  * helper to add new inline back ref
1723  */
1724 static noinline_for_stack
1725 void setup_inline_extent_backref(struct btrfs_root *root,
1726                                  struct btrfs_path *path,
1727                                  struct btrfs_extent_inline_ref *iref,
1728                                  u64 parent, u64 root_objectid,
1729                                  u64 owner, u64 offset, int refs_to_add,
1730                                  struct btrfs_delayed_extent_op *extent_op)
1731 {
1732         struct extent_buffer *leaf;
1733         struct btrfs_extent_item *ei;
1734         unsigned long ptr;
1735         unsigned long end;
1736         unsigned long item_offset;
1737         u64 refs;
1738         int size;
1739         int type;
1740
1741         leaf = path->nodes[0];
1742         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1743         item_offset = (unsigned long)iref - (unsigned long)ei;
1744
1745         type = extent_ref_type(parent, owner);
1746         size = btrfs_extent_inline_ref_size(type);
1747
1748         btrfs_extend_item(root, path, size);
1749
1750         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1751         refs = btrfs_extent_refs(leaf, ei);
1752         refs += refs_to_add;
1753         btrfs_set_extent_refs(leaf, ei, refs);
1754         if (extent_op)
1755                 __run_delayed_extent_op(extent_op, leaf, ei);
1756
1757         ptr = (unsigned long)ei + item_offset;
1758         end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
1759         if (ptr < end - size)
1760                 memmove_extent_buffer(leaf, ptr + size, ptr,
1761                                       end - size - ptr);
1762
1763         iref = (struct btrfs_extent_inline_ref *)ptr;
1764         btrfs_set_extent_inline_ref_type(leaf, iref, type);
1765         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1766                 struct btrfs_extent_data_ref *dref;
1767                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1768                 btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1769                 btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1770                 btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1771                 btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1772         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1773                 struct btrfs_shared_data_ref *sref;
1774                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1775                 btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1776                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1777         } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1778                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1779         } else {
1780                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1781         }
1782         btrfs_mark_buffer_dirty(leaf);
1783 }
1784
1785 static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1786                                  struct btrfs_root *root,
1787                                  struct btrfs_path *path,
1788                                  struct btrfs_extent_inline_ref **ref_ret,
1789                                  u64 bytenr, u64 num_bytes, u64 parent,
1790                                  u64 root_objectid, u64 owner, u64 offset)
1791 {
1792         int ret;
1793
1794         ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
1795                                            bytenr, num_bytes, parent,
1796                                            root_objectid, owner, offset, 0);
1797         if (ret != -ENOENT)
1798                 return ret;
1799
1800         btrfs_release_path(path);
1801         *ref_ret = NULL;
1802
1803         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1804                 ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
1805                                             root_objectid);
1806         } else {
1807                 ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
1808                                              root_objectid, owner, offset);
1809         }
1810         return ret;
1811 }
1812
1813 /*
1814  * helper to update/remove inline back ref
1815  */
1816 static noinline_for_stack
1817 void update_inline_extent_backref(struct btrfs_root *root,
1818                                   struct btrfs_path *path,
1819                                   struct btrfs_extent_inline_ref *iref,
1820                                   int refs_to_mod,
1821                                   struct btrfs_delayed_extent_op *extent_op,
1822                                   int *last_ref)
1823 {
1824         struct extent_buffer *leaf;
1825         struct btrfs_extent_item *ei;
1826         struct btrfs_extent_data_ref *dref = NULL;
1827         struct btrfs_shared_data_ref *sref = NULL;
1828         unsigned long ptr;
1829         unsigned long end;
1830         u32 item_size;
1831         int size;
1832         int type;
1833         u64 refs;
1834
1835         leaf = path->nodes[0];
1836         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1837         refs = btrfs_extent_refs(leaf, ei);
1838         WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
1839         refs += refs_to_mod;
1840         btrfs_set_extent_refs(leaf, ei, refs);
1841         if (extent_op)
1842                 __run_delayed_extent_op(extent_op, leaf, ei);
1843
1844         type = btrfs_extent_inline_ref_type(leaf, iref);
1845
1846         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1847                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1848                 refs = btrfs_extent_data_ref_count(leaf, dref);
1849         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1850                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1851                 refs = btrfs_shared_data_ref_count(leaf, sref);
1852         } else {
1853                 refs = 1;
1854                 BUG_ON(refs_to_mod != -1);
1855         }
1856
1857         BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
1858         refs += refs_to_mod;
1859
1860         if (refs > 0) {
1861                 if (type == BTRFS_EXTENT_DATA_REF_KEY)
1862                         btrfs_set_extent_data_ref_count(leaf, dref, refs);
1863                 else
1864                         btrfs_set_shared_data_ref_count(leaf, sref, refs);
1865         } else {
1866                 *last_ref = 1;
1867                 size =  btrfs_extent_inline_ref_size(type);
1868                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1869                 ptr = (unsigned long)iref;
1870                 end = (unsigned long)ei + item_size;
1871                 if (ptr + size < end)
1872                         memmove_extent_buffer(leaf, ptr, ptr + size,
1873                                               end - ptr - size);
1874                 item_size -= size;
1875                 btrfs_truncate_item(root, path, item_size, 1);
1876         }
1877         btrfs_mark_buffer_dirty(leaf);
1878 }
1879
1880 static noinline_for_stack
1881 int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1882                                  struct btrfs_root *root,
1883                                  struct btrfs_path *path,
1884                                  u64 bytenr, u64 num_bytes, u64 parent,
1885                                  u64 root_objectid, u64 owner,
1886                                  u64 offset, int refs_to_add,
1887                                  struct btrfs_delayed_extent_op *extent_op)
1888 {
1889         struct btrfs_extent_inline_ref *iref;
1890         int ret;
1891
1892         ret = lookup_inline_extent_backref(trans, root, path, &iref,
1893                                            bytenr, num_bytes, parent,
1894                                            root_objectid, owner, offset, 1);
1895         if (ret == 0) {
1896                 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1897                 update_inline_extent_backref(root, path, iref,
1898                                              refs_to_add, extent_op, NULL);
1899         } else if (ret == -ENOENT) {
1900                 setup_inline_extent_backref(root, path, iref, parent,
1901                                             root_objectid, owner, offset,
1902                                             refs_to_add, extent_op);
1903                 ret = 0;
1904         }
1905         return ret;
1906 }
1907
1908 static int insert_extent_backref(struct btrfs_trans_handle *trans,
1909                                  struct btrfs_root *root,
1910                                  struct btrfs_path *path,
1911                                  u64 bytenr, u64 parent, u64 root_objectid,
1912                                  u64 owner, u64 offset, int refs_to_add)
1913 {
1914         int ret;
1915         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1916                 BUG_ON(refs_to_add != 1);
1917                 ret = insert_tree_block_ref(trans, root, path, bytenr,
1918                                             parent, root_objectid);
1919         } else {
1920                 ret = insert_extent_data_ref(trans, root, path, bytenr,
1921                                              parent, root_objectid,
1922                                              owner, offset, refs_to_add);
1923         }
1924         return ret;
1925 }
1926
1927 static int remove_extent_backref(struct btrfs_trans_handle *trans,
1928                                  struct btrfs_root *root,
1929                                  struct btrfs_path *path,
1930                                  struct btrfs_extent_inline_ref *iref,
1931                                  int refs_to_drop, int is_data, int *last_ref)
1932 {
1933         int ret = 0;
1934
1935         BUG_ON(!is_data && refs_to_drop != 1);
1936         if (iref) {
1937                 update_inline_extent_backref(root, path, iref,
1938                                              -refs_to_drop, NULL, last_ref);
1939         } else if (is_data) {
1940                 ret = remove_extent_data_ref(trans, root, path, refs_to_drop,
1941                                              last_ref);
1942         } else {
1943                 *last_ref = 1;
1944                 ret = btrfs_del_item(trans, root, path);
1945         }
1946         return ret;
1947 }
1948
1949 #define in_range(b, first, len)        ((b) >= (first) && (b) < (first) + (len))
1950 static int btrfs_issue_discard(struct block_device *bdev, u64 start, u64 len,
1951                                u64 *discarded_bytes)
1952 {
1953         int j, ret = 0;
1954         u64 bytes_left, end;
1955         u64 aligned_start = ALIGN(start, 1 << 9);
1956
1957         if (WARN_ON(start != aligned_start)) {
1958                 len -= aligned_start - start;
1959                 len = round_down(len, 1 << 9);
1960                 start = aligned_start;
1961         }
1962
1963         *discarded_bytes = 0;
1964
1965         if (!len)
1966                 return 0;
1967
1968         end = start + len;
1969         bytes_left = len;
1970
1971         /* Skip any superblocks on this device. */
1972         for (j = 0; j < BTRFS_SUPER_MIRROR_MAX; j++) {
1973                 u64 sb_start = btrfs_sb_offset(j);
1974                 u64 sb_end = sb_start + BTRFS_SUPER_INFO_SIZE;
1975                 u64 size = sb_start - start;
1976
1977                 if (!in_range(sb_start, start, bytes_left) &&
1978                     !in_range(sb_end, start, bytes_left) &&
1979                     !in_range(start, sb_start, BTRFS_SUPER_INFO_SIZE))
1980                         continue;
1981
1982                 /*
1983                  * Superblock spans beginning of range.  Adjust start and
1984                  * try again.
1985                  */
1986                 if (sb_start <= start) {
1987                         start += sb_end - start;
1988                         if (start > end) {
1989                                 bytes_left = 0;
1990                                 break;
1991                         }
1992                         bytes_left = end - start;
1993                         continue;
1994                 }
1995
1996                 if (size) {
1997                         ret = blkdev_issue_discard(bdev, start >> 9, size >> 9,
1998                                                    GFP_NOFS, 0);
1999                         if (!ret)
2000                                 *discarded_bytes += size;
2001                         else if (ret != -EOPNOTSUPP)
2002                                 return ret;
2003                 }
2004
2005                 start = sb_end;
2006                 if (start > end) {
2007                         bytes_left = 0;
2008                         break;
2009                 }
2010                 bytes_left = end - start;
2011         }
2012
2013         if (bytes_left) {
2014                 ret = blkdev_issue_discard(bdev, start >> 9, bytes_left >> 9,
2015                                            GFP_NOFS, 0);
2016                 if (!ret)
2017                         *discarded_bytes += bytes_left;
2018         }
2019         return ret;
2020 }
2021
2022 int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
2023                          u64 num_bytes, u64 *actual_bytes)
2024 {
2025         int ret;
2026         u64 discarded_bytes = 0;
2027         struct btrfs_bio *bbio = NULL;
2028
2029
2030         /* Tell the block device(s) that the sectors can be discarded */
2031         ret = btrfs_map_block(root->fs_info, REQ_DISCARD,
2032                               bytenr, &num_bytes, &bbio, 0);
2033         /* Error condition is -ENOMEM */
2034         if (!ret) {
2035                 struct btrfs_bio_stripe *stripe = bbio->stripes;
2036                 int i;
2037
2038
2039                 for (i = 0; i < bbio->num_stripes; i++, stripe++) {
2040                         u64 bytes;
2041                         if (!stripe->dev->can_discard)
2042                                 continue;
2043
2044                         ret = btrfs_issue_discard(stripe->dev->bdev,
2045                                                   stripe->physical,
2046                                                   stripe->length,
2047                                                   &bytes);
2048                         if (!ret)
2049                                 discarded_bytes += bytes;
2050                         else if (ret != -EOPNOTSUPP)
2051                                 break; /* Logic errors or -ENOMEM, or -EIO but I don't know how that could happen JDM */
2052
2053                         /*
2054                          * Just in case we get back EOPNOTSUPP for some reason,
2055                          * just ignore the return value so we don't screw up
2056                          * people calling discard_extent.
2057                          */
2058                         ret = 0;
2059                 }
2060                 btrfs_put_bbio(bbio);
2061         }
2062
2063         if (actual_bytes)
2064                 *actual_bytes = discarded_bytes;
2065
2066
2067         if (ret == -EOPNOTSUPP)
2068                 ret = 0;
2069         return ret;
2070 }
2071
2072 /* Can return -ENOMEM */
2073 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
2074                          struct btrfs_root *root,
2075                          u64 bytenr, u64 num_bytes, u64 parent,
2076                          u64 root_objectid, u64 owner, u64 offset,
2077                          int no_quota)
2078 {
2079         int ret;
2080         struct btrfs_fs_info *fs_info = root->fs_info;
2081
2082         BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
2083                root_objectid == BTRFS_TREE_LOG_OBJECTID);
2084
2085         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
2086                 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
2087                                         num_bytes,
2088                                         parent, root_objectid, (int)owner,
2089                                         BTRFS_ADD_DELAYED_REF, NULL, no_quota);
2090         } else {
2091                 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
2092                                         num_bytes,
2093                                         parent, root_objectid, owner, offset,
2094                                         BTRFS_ADD_DELAYED_REF, NULL, no_quota);
2095         }
2096         return ret;
2097 }
2098
2099 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
2100                                   struct btrfs_root *root,
2101                                   struct btrfs_delayed_ref_node *node,
2102                                   u64 parent, u64 root_objectid,
2103                                   u64 owner, u64 offset, int refs_to_add,
2104                                   struct btrfs_delayed_extent_op *extent_op)
2105 {
2106         struct btrfs_fs_info *fs_info = root->fs_info;
2107         struct btrfs_path *path;
2108         struct extent_buffer *leaf;
2109         struct btrfs_extent_item *item;
2110         struct btrfs_key key;
2111         u64 bytenr = node->bytenr;
2112         u64 num_bytes = node->num_bytes;
2113         u64 refs;
2114         int ret;
2115         int no_quota = node->no_quota;
2116
2117         path = btrfs_alloc_path();
2118         if (!path)
2119                 return -ENOMEM;
2120
2121         if (!is_fstree(root_objectid) || !root->fs_info->quota_enabled)
2122                 no_quota = 1;
2123
2124         path->reada = 1;
2125         path->leave_spinning = 1;
2126         /* this will setup the path even if it fails to insert the back ref */
2127         ret = insert_inline_extent_backref(trans, fs_info->extent_root, path,
2128                                            bytenr, num_bytes, parent,
2129                                            root_objectid, owner, offset,
2130                                            refs_to_add, extent_op);
2131         if ((ret < 0 && ret != -EAGAIN) || !ret)
2132                 goto out;
2133
2134         /*
2135          * Ok we had -EAGAIN which means we didn't have space to insert and
2136          * inline extent ref, so just update the reference count and add a
2137          * normal backref.
2138          */
2139         leaf = path->nodes[0];
2140         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2141         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2142         refs = btrfs_extent_refs(leaf, item);
2143         btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
2144         if (extent_op)
2145                 __run_delayed_extent_op(extent_op, leaf, item);
2146
2147         btrfs_mark_buffer_dirty(leaf);
2148         btrfs_release_path(path);
2149
2150         path->reada = 1;
2151         path->leave_spinning = 1;
2152         /* now insert the actual backref */
2153         ret = insert_extent_backref(trans, root->fs_info->extent_root,
2154                                     path, bytenr, parent, root_objectid,
2155                                     owner, offset, refs_to_add);
2156         if (ret)
2157                 btrfs_abort_transaction(trans, root, ret);
2158 out:
2159         btrfs_free_path(path);
2160         return ret;
2161 }
2162
2163 static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
2164                                 struct btrfs_root *root,
2165                                 struct btrfs_delayed_ref_node *node,
2166                                 struct btrfs_delayed_extent_op *extent_op,
2167                                 int insert_reserved)
2168 {
2169         int ret = 0;
2170         struct btrfs_delayed_data_ref *ref;
2171         struct btrfs_key ins;
2172         u64 parent = 0;
2173         u64 ref_root = 0;
2174         u64 flags = 0;
2175
2176         ins.objectid = node->bytenr;
2177         ins.offset = node->num_bytes;
2178         ins.type = BTRFS_EXTENT_ITEM_KEY;
2179
2180         ref = btrfs_delayed_node_to_data_ref(node);
2181         trace_run_delayed_data_ref(node, ref, node->action);
2182
2183         if (node->type == BTRFS_SHARED_DATA_REF_KEY)
2184                 parent = ref->parent;
2185         ref_root = ref->root;
2186
2187         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2188                 if (extent_op)
2189                         flags |= extent_op->flags_to_set;
2190                 ret = alloc_reserved_file_extent(trans, root,
2191                                                  parent, ref_root, flags,
2192                                                  ref->objectid, ref->offset,
2193                                                  &ins, node->ref_mod);
2194         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2195                 ret = __btrfs_inc_extent_ref(trans, root, node, parent,
2196                                              ref_root, ref->objectid,
2197                                              ref->offset, node->ref_mod,
2198                                              extent_op);
2199         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2200                 ret = __btrfs_free_extent(trans, root, node, parent,
2201                                           ref_root, ref->objectid,
2202                                           ref->offset, node->ref_mod,
2203                                           extent_op);
2204         } else {
2205                 BUG();
2206         }
2207         return ret;
2208 }
2209
2210 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
2211                                     struct extent_buffer *leaf,
2212                                     struct btrfs_extent_item *ei)
2213 {
2214         u64 flags = btrfs_extent_flags(leaf, ei);
2215         if (extent_op->update_flags) {
2216                 flags |= extent_op->flags_to_set;
2217                 btrfs_set_extent_flags(leaf, ei, flags);
2218         }
2219
2220         if (extent_op->update_key) {
2221                 struct btrfs_tree_block_info *bi;
2222                 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
2223                 bi = (struct btrfs_tree_block_info *)(ei + 1);
2224                 btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
2225         }
2226 }
2227
2228 static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
2229                                  struct btrfs_root *root,
2230                                  struct btrfs_delayed_ref_node *node,
2231                                  struct btrfs_delayed_extent_op *extent_op)
2232 {
2233         struct btrfs_key key;
2234         struct btrfs_path *path;
2235         struct btrfs_extent_item *ei;
2236         struct extent_buffer *leaf;
2237         u32 item_size;
2238         int ret;
2239         int err = 0;
2240         int metadata = !extent_op->is_data;
2241
2242         if (trans->aborted)
2243                 return 0;
2244
2245         if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
2246                 metadata = 0;
2247
2248         path = btrfs_alloc_path();
2249         if (!path)
2250                 return -ENOMEM;
2251
2252         key.objectid = node->bytenr;
2253
2254         if (metadata) {
2255                 key.type = BTRFS_METADATA_ITEM_KEY;
2256                 key.offset = extent_op->level;
2257         } else {
2258                 key.type = BTRFS_EXTENT_ITEM_KEY;
2259                 key.offset = node->num_bytes;
2260         }
2261
2262 again:
2263         path->reada = 1;
2264         path->leave_spinning = 1;
2265         ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
2266                                 path, 0, 1);
2267         if (ret < 0) {
2268                 err = ret;
2269                 goto out;
2270         }
2271         if (ret > 0) {
2272                 if (metadata) {
2273                         if (path->slots[0] > 0) {
2274                                 path->slots[0]--;
2275                                 btrfs_item_key_to_cpu(path->nodes[0], &key,
2276                                                       path->slots[0]);
2277                                 if (key.objectid == node->bytenr &&
2278                                     key.type == BTRFS_EXTENT_ITEM_KEY &&
2279                                     key.offset == node->num_bytes)
2280                                         ret = 0;
2281                         }
2282                         if (ret > 0) {
2283                                 btrfs_release_path(path);
2284                                 metadata = 0;
2285
2286                                 key.objectid = node->bytenr;
2287                                 key.offset = node->num_bytes;
2288                                 key.type = BTRFS_EXTENT_ITEM_KEY;
2289                                 goto again;
2290                         }
2291                 } else {
2292                         err = -EIO;
2293                         goto out;
2294                 }
2295         }
2296
2297         leaf = path->nodes[0];
2298         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2299 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2300         if (item_size < sizeof(*ei)) {
2301                 ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
2302                                              path, (u64)-1, 0);
2303                 if (ret < 0) {
2304                         err = ret;
2305                         goto out;
2306                 }
2307                 leaf = path->nodes[0];
2308                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2309         }
2310 #endif
2311         BUG_ON(item_size < sizeof(*ei));
2312         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2313         __run_delayed_extent_op(extent_op, leaf, ei);
2314
2315         btrfs_mark_buffer_dirty(leaf);
2316 out:
2317         btrfs_free_path(path);
2318         return err;
2319 }
2320
2321 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
2322                                 struct btrfs_root *root,
2323                                 struct btrfs_delayed_ref_node *node,
2324                                 struct btrfs_delayed_extent_op *extent_op,
2325                                 int insert_reserved)
2326 {
2327         int ret = 0;
2328         struct btrfs_delayed_tree_ref *ref;
2329         struct btrfs_key ins;
2330         u64 parent = 0;
2331         u64 ref_root = 0;
2332         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
2333                                                  SKINNY_METADATA);
2334
2335         ref = btrfs_delayed_node_to_tree_ref(node);
2336         trace_run_delayed_tree_ref(node, ref, node->action);
2337
2338         if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2339                 parent = ref->parent;
2340         ref_root = ref->root;
2341
2342         ins.objectid = node->bytenr;
2343         if (skinny_metadata) {
2344                 ins.offset = ref->level;
2345                 ins.type = BTRFS_METADATA_ITEM_KEY;
2346         } else {
2347                 ins.offset = node->num_bytes;
2348                 ins.type = BTRFS_EXTENT_ITEM_KEY;
2349         }
2350
2351         BUG_ON(node->ref_mod != 1);
2352         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2353                 BUG_ON(!extent_op || !extent_op->update_flags);
2354                 ret = alloc_reserved_tree_block(trans, root,
2355                                                 parent, ref_root,
2356                                                 extent_op->flags_to_set,
2357                                                 &extent_op->key,
2358                                                 ref->level, &ins,
2359                                                 node->no_quota);
2360         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2361                 ret = __btrfs_inc_extent_ref(trans, root, node,
2362                                              parent, ref_root,
2363                                              ref->level, 0, 1,
2364                                              extent_op);
2365         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2366                 ret = __btrfs_free_extent(trans, root, node,
2367                                           parent, ref_root,
2368                                           ref->level, 0, 1, extent_op);
2369         } else {
2370                 BUG();
2371         }
2372         return ret;
2373 }
2374
2375 /* helper function to actually process a single delayed ref entry */
2376 static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
2377                                struct btrfs_root *root,
2378                                struct btrfs_delayed_ref_node *node,
2379                                struct btrfs_delayed_extent_op *extent_op,
2380                                int insert_reserved)
2381 {
2382         int ret = 0;
2383
2384         if (trans->aborted) {
2385                 if (insert_reserved)
2386                         btrfs_pin_extent(root, node->bytenr,
2387                                          node->num_bytes, 1);
2388                 return 0;
2389         }
2390
2391         if (btrfs_delayed_ref_is_head(node)) {
2392                 struct btrfs_delayed_ref_head *head;
2393                 /*
2394                  * we've hit the end of the chain and we were supposed
2395                  * to insert this extent into the tree.  But, it got
2396                  * deleted before we ever needed to insert it, so all
2397                  * we have to do is clean up the accounting
2398                  */
2399                 BUG_ON(extent_op);
2400                 head = btrfs_delayed_node_to_head(node);
2401                 trace_run_delayed_ref_head(node, head, node->action);
2402
2403                 if (insert_reserved) {
2404                         btrfs_pin_extent(root, node->bytenr,
2405                                          node->num_bytes, 1);
2406                         if (head->is_data) {
2407                                 ret = btrfs_del_csums(trans, root,
2408                                                       node->bytenr,
2409                                                       node->num_bytes);
2410                         }
2411                 }
2412
2413                 /* Also free its reserved qgroup space */
2414                 btrfs_qgroup_free_delayed_ref(root->fs_info,
2415                                               head->qgroup_ref_root,
2416                                               head->qgroup_reserved);
2417                 return ret;
2418         }
2419
2420         if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
2421             node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2422                 ret = run_delayed_tree_ref(trans, root, node, extent_op,
2423                                            insert_reserved);
2424         else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
2425                  node->type == BTRFS_SHARED_DATA_REF_KEY)
2426                 ret = run_delayed_data_ref(trans, root, node, extent_op,
2427                                            insert_reserved);
2428         else
2429                 BUG();
2430         return ret;
2431 }
2432
2433 static inline struct btrfs_delayed_ref_node *
2434 select_delayed_ref(struct btrfs_delayed_ref_head *head)
2435 {
2436         struct btrfs_delayed_ref_node *ref;
2437
2438         if (list_empty(&head->ref_list))
2439                 return NULL;
2440
2441         /*
2442          * Select a delayed ref of type BTRFS_ADD_DELAYED_REF first.
2443          * This is to prevent a ref count from going down to zero, which deletes
2444          * the extent item from the extent tree, when there still are references
2445          * to add, which would fail because they would not find the extent item.
2446          */
2447         list_for_each_entry(ref, &head->ref_list, list) {
2448                 if (ref->action == BTRFS_ADD_DELAYED_REF)
2449                         return ref;
2450         }
2451
2452         return list_entry(head->ref_list.next, struct btrfs_delayed_ref_node,
2453                           list);
2454 }
2455
2456 /*
2457  * Returns 0 on success or if called with an already aborted transaction.
2458  * Returns -ENOMEM or -EIO on failure and will abort the transaction.
2459  */
2460 static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2461                                              struct btrfs_root *root,
2462                                              unsigned long nr)
2463 {
2464         struct btrfs_delayed_ref_root *delayed_refs;
2465         struct btrfs_delayed_ref_node *ref;
2466         struct btrfs_delayed_ref_head *locked_ref = NULL;
2467         struct btrfs_delayed_extent_op *extent_op;
2468         struct btrfs_fs_info *fs_info = root->fs_info;
2469         ktime_t start = ktime_get();
2470         int ret;
2471         unsigned long count = 0;
2472         unsigned long actual_count = 0;
2473         int must_insert_reserved = 0;
2474
2475         delayed_refs = &trans->transaction->delayed_refs;
2476         while (1) {
2477                 if (!locked_ref) {
2478                         if (count >= nr)
2479                                 break;
2480
2481                         spin_lock(&delayed_refs->lock);
2482                         locked_ref = btrfs_select_ref_head(trans);
2483                         if (!locked_ref) {
2484                                 spin_unlock(&delayed_refs->lock);
2485                                 break;
2486                         }
2487
2488                         /* grab the lock that says we are going to process
2489                          * all the refs for this head */
2490                         ret = btrfs_delayed_ref_lock(trans, locked_ref);
2491                         spin_unlock(&delayed_refs->lock);
2492                         /*
2493                          * we may have dropped the spin lock to get the head
2494                          * mutex lock, and that might have given someone else
2495                          * time to free the head.  If that's true, it has been
2496                          * removed from our list and we can move on.
2497                          */
2498                         if (ret == -EAGAIN) {
2499                                 locked_ref = NULL;
2500                                 count++;
2501                                 continue;
2502                         }
2503                 }
2504
2505                 spin_lock(&locked_ref->lock);
2506
2507                 /*
2508                  * locked_ref is the head node, so we have to go one
2509                  * node back for any delayed ref updates
2510                  */
2511                 ref = select_delayed_ref(locked_ref);
2512
2513                 if (ref && ref->seq &&
2514                     btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) {
2515                         spin_unlock(&locked_ref->lock);
2516                         btrfs_delayed_ref_unlock(locked_ref);
2517                         spin_lock(&delayed_refs->lock);
2518                         locked_ref->processing = 0;
2519                         delayed_refs->num_heads_ready++;
2520                         spin_unlock(&delayed_refs->lock);
2521                         locked_ref = NULL;
2522                         cond_resched();
2523                         count++;
2524                         continue;
2525                 }
2526
2527                 /*
2528                  * record the must insert reserved flag before we
2529                  * drop the spin lock.
2530                  */
2531                 must_insert_reserved = locked_ref->must_insert_reserved;
2532                 locked_ref->must_insert_reserved = 0;
2533
2534                 extent_op = locked_ref->extent_op;
2535                 locked_ref->extent_op = NULL;
2536
2537                 if (!ref) {
2538
2539
2540                         /* All delayed refs have been processed, Go ahead
2541                          * and send the head node to run_one_delayed_ref,
2542                          * so that any accounting fixes can happen
2543                          */
2544                         ref = &locked_ref->node;
2545
2546                         if (extent_op && must_insert_reserved) {
2547                                 btrfs_free_delayed_extent_op(extent_op);
2548                                 extent_op = NULL;
2549                         }
2550
2551                         if (extent_op) {
2552                                 spin_unlock(&locked_ref->lock);
2553                                 ret = run_delayed_extent_op(trans, root,
2554                                                             ref, extent_op);
2555                                 btrfs_free_delayed_extent_op(extent_op);
2556
2557                                 if (ret) {
2558                                         /*
2559                                          * Need to reset must_insert_reserved if
2560                                          * there was an error so the abort stuff
2561                                          * can cleanup the reserved space
2562                                          * properly.
2563                                          */
2564                                         if (must_insert_reserved)
2565                                                 locked_ref->must_insert_reserved = 1;
2566                                         locked_ref->processing = 0;
2567                                         btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret);
2568                                         btrfs_delayed_ref_unlock(locked_ref);
2569                                         return ret;
2570                                 }
2571                                 continue;
2572                         }
2573
2574                         /*
2575                          * Need to drop our head ref lock and re-aqcuire the
2576                          * delayed ref lock and then re-check to make sure
2577                          * nobody got added.
2578                          */
2579                         spin_unlock(&locked_ref->lock);
2580                         spin_lock(&delayed_refs->lock);
2581                         spin_lock(&locked_ref->lock);
2582                         if (!list_empty(&locked_ref->ref_list) ||
2583                             locked_ref->extent_op) {
2584                                 spin_unlock(&locked_ref->lock);
2585                                 spin_unlock(&delayed_refs->lock);
2586                                 continue;
2587                         }
2588                         ref->in_tree = 0;
2589                         delayed_refs->num_heads--;
2590                         rb_erase(&locked_ref->href_node,
2591                                  &delayed_refs->href_root);
2592                         spin_unlock(&delayed_refs->lock);
2593                 } else {
2594                         actual_count++;
2595                         ref->in_tree = 0;
2596                         list_del(&ref->list);
2597                 }
2598                 atomic_dec(&delayed_refs->num_entries);
2599
2600                 if (!btrfs_delayed_ref_is_head(ref)) {
2601                         /*
2602                          * when we play the delayed ref, also correct the
2603                          * ref_mod on head
2604                          */
2605                         switch (ref->action) {
2606                         case BTRFS_ADD_DELAYED_REF:
2607                         case BTRFS_ADD_DELAYED_EXTENT:
2608                                 locked_ref->node.ref_mod -= ref->ref_mod;
2609                                 break;
2610                         case BTRFS_DROP_DELAYED_REF:
2611                                 locked_ref->node.ref_mod += ref->ref_mod;
2612                                 break;
2613                         default:
2614                                 WARN_ON(1);
2615                         }
2616                 }
2617                 spin_unlock(&locked_ref->lock);
2618
2619                 ret = run_one_delayed_ref(trans, root, ref, extent_op,
2620                                           must_insert_reserved);
2621
2622                 btrfs_free_delayed_extent_op(extent_op);
2623                 if (ret) {
2624                         locked_ref->processing = 0;
2625                         btrfs_delayed_ref_unlock(locked_ref);
2626                         btrfs_put_delayed_ref(ref);
2627                         btrfs_debug(fs_info, "run_one_delayed_ref returned %d", ret);
2628                         return ret;
2629                 }
2630
2631                 /*
2632                  * If this node is a head, that means all the refs in this head
2633                  * have been dealt with, and we will pick the next head to deal
2634                  * with, so we must unlock the head and drop it from the cluster
2635                  * list before we release it.
2636                  */
2637                 if (btrfs_delayed_ref_is_head(ref)) {
2638                         if (locked_ref->is_data &&
2639                             locked_ref->total_ref_mod < 0) {
2640                                 spin_lock(&delayed_refs->lock);
2641                                 delayed_refs->pending_csums -= ref->num_bytes;
2642                                 spin_unlock(&delayed_refs->lock);
2643                         }
2644                         btrfs_delayed_ref_unlock(locked_ref);
2645                         locked_ref = NULL;
2646                 }
2647                 btrfs_put_delayed_ref(ref);
2648                 count++;
2649                 cond_resched();
2650         }
2651
2652         /*
2653          * We don't want to include ref heads since we can have empty ref heads
2654          * and those will drastically skew our runtime down since we just do
2655          * accounting, no actual extent tree updates.
2656          */
2657         if (actual_count > 0) {
2658                 u64 runtime = ktime_to_ns(ktime_sub(ktime_get(), start));
2659                 u64 avg;
2660
2661                 /*
2662                  * We weigh the current average higher than our current runtime
2663                  * to avoid large swings in the average.
2664                  */
2665                 spin_lock(&delayed_refs->lock);
2666                 avg = fs_info->avg_delayed_ref_runtime * 3 + runtime;
2667                 fs_info->avg_delayed_ref_runtime = avg >> 2;    /* div by 4 */
2668                 spin_unlock(&delayed_refs->lock);
2669         }
2670         return 0;
2671 }
2672
2673 #ifdef SCRAMBLE_DELAYED_REFS
2674 /*
2675  * Normally delayed refs get processed in ascending bytenr order. This
2676  * correlates in most cases to the order added. To expose dependencies on this
2677  * order, we start to process the tree in the middle instead of the beginning
2678  */
2679 static u64 find_middle(struct rb_root *root)
2680 {
2681         struct rb_node *n = root->rb_node;
2682         struct btrfs_delayed_ref_node *entry;
2683         int alt = 1;
2684         u64 middle;
2685         u64 first = 0, last = 0;
2686
2687         n = rb_first(root);
2688         if (n) {
2689                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2690                 first = entry->bytenr;
2691         }
2692         n = rb_last(root);
2693         if (n) {
2694                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2695                 last = entry->bytenr;
2696         }
2697         n = root->rb_node;
2698
2699         while (n) {
2700                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2701                 WARN_ON(!entry->in_tree);
2702
2703                 middle = entry->bytenr;
2704
2705                 if (alt)
2706                         n = n->rb_left;
2707                 else
2708                         n = n->rb_right;
2709
2710                 alt = 1 - alt;
2711         }
2712         return middle;
2713 }
2714 #endif
2715
2716 static inline u64 heads_to_leaves(struct btrfs_root *root, u64 heads)
2717 {
2718         u64 num_bytes;
2719
2720         num_bytes = heads * (sizeof(struct btrfs_extent_item) +
2721                              sizeof(struct btrfs_extent_inline_ref));
2722         if (!btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
2723                 num_bytes += heads * sizeof(struct btrfs_tree_block_info);
2724
2725         /*
2726          * We don't ever fill up leaves all the way so multiply by 2 just to be
2727          * closer to what we're really going to want to ouse.
2728          */
2729         return div_u64(num_bytes, BTRFS_LEAF_DATA_SIZE(root));
2730 }
2731
2732 /*
2733  * Takes the number of bytes to be csumm'ed and figures out how many leaves it
2734  * would require to store the csums for that many bytes.
2735  */
2736 u64 btrfs_csum_bytes_to_leaves(struct btrfs_root *root, u64 csum_bytes)
2737 {
2738         u64 csum_size;
2739         u64 num_csums_per_leaf;
2740         u64 num_csums;
2741
2742         csum_size = BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item);
2743         num_csums_per_leaf = div64_u64(csum_size,
2744                         (u64)btrfs_super_csum_size(root->fs_info->super_copy));
2745         num_csums = div64_u64(csum_bytes, root->sectorsize);
2746         num_csums += num_csums_per_leaf - 1;
2747         num_csums = div64_u64(num_csums, num_csums_per_leaf);
2748         return num_csums;
2749 }
2750
2751 int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans,
2752                                        struct btrfs_root *root)
2753 {
2754         struct btrfs_block_rsv *global_rsv;
2755         u64 num_heads = trans->transaction->delayed_refs.num_heads_ready;
2756         u64 csum_bytes = trans->transaction->delayed_refs.pending_csums;
2757         u64 num_dirty_bgs = trans->transaction->num_dirty_bgs;
2758         u64 num_bytes, num_dirty_bgs_bytes;
2759         int ret = 0;
2760
2761         num_bytes = btrfs_calc_trans_metadata_size(root, 1);
2762         num_heads = heads_to_leaves(root, num_heads);
2763         if (num_heads > 1)
2764                 num_bytes += (num_heads - 1) * root->nodesize;
2765         num_bytes <<= 1;
2766         num_bytes += btrfs_csum_bytes_to_leaves(root, csum_bytes) * root->nodesize;
2767         num_dirty_bgs_bytes = btrfs_calc_trans_metadata_size(root,
2768                                                              num_dirty_bgs);
2769         global_rsv = &root->fs_info->global_block_rsv;
2770
2771         /*
2772          * If we can't allocate any more chunks lets make sure we have _lots_ of
2773          * wiggle room since running delayed refs can create more delayed refs.
2774          */
2775         if (global_rsv->space_info->full) {
2776                 num_dirty_bgs_bytes <<= 1;
2777                 num_bytes <<= 1;
2778         }
2779
2780         spin_lock(&global_rsv->lock);
2781         if (global_rsv->reserved <= num_bytes + num_dirty_bgs_bytes)
2782                 ret = 1;
2783         spin_unlock(&global_rsv->lock);
2784         return ret;
2785 }
2786
2787 int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans,
2788                                        struct btrfs_root *root)
2789 {
2790         struct btrfs_fs_info *fs_info = root->fs_info;
2791         u64 num_entries =
2792                 atomic_read(&trans->transaction->delayed_refs.num_entries);
2793         u64 avg_runtime;
2794         u64 val;
2795
2796         smp_mb();
2797         avg_runtime = fs_info->avg_delayed_ref_runtime;
2798         val = num_entries * avg_runtime;
2799         if (num_entries * avg_runtime >= NSEC_PER_SEC)
2800                 return 1;
2801         if (val >= NSEC_PER_SEC / 2)
2802                 return 2;
2803
2804         return btrfs_check_space_for_delayed_refs(trans, root);
2805 }
2806
2807 struct async_delayed_refs {
2808         struct btrfs_root *root;
2809         int count;
2810         int error;
2811         int sync;
2812         struct completion wait;
2813         struct btrfs_work work;
2814 };
2815
2816 static void delayed_ref_async_start(struct btrfs_work *work)
2817 {
2818         struct async_delayed_refs *async;
2819         struct btrfs_trans_handle *trans;
2820         int ret;
2821
2822         async = container_of(work, struct async_delayed_refs, work);
2823
2824         trans = btrfs_join_transaction(async->root);
2825         if (IS_ERR(trans)) {
2826                 async->error = PTR_ERR(trans);
2827                 goto done;
2828         }
2829
2830         /*
2831          * trans->sync means that when we call end_transaciton, we won't
2832          * wait on delayed refs
2833          */
2834         trans->sync = true;
2835         ret = btrfs_run_delayed_refs(trans, async->root, async->count);
2836         if (ret)
2837                 async->error = ret;
2838
2839         ret = btrfs_end_transaction(trans, async->root);
2840         if (ret && !async->error)
2841                 async->error = ret;
2842 done:
2843         if (async->sync)
2844                 complete(&async->wait);
2845         else
2846                 kfree(async);
2847 }
2848
2849 int btrfs_async_run_delayed_refs(struct btrfs_root *root,
2850                                  unsigned long count, int wait)
2851 {
2852         struct async_delayed_refs *async;
2853         int ret;
2854
2855         async = kmalloc(sizeof(*async), GFP_NOFS);
2856         if (!async)
2857                 return -ENOMEM;
2858
2859         async->root = root->fs_info->tree_root;
2860         async->count = count;
2861         async->error = 0;
2862         if (wait)
2863                 async->sync = 1;
2864         else
2865                 async->sync = 0;
2866         init_completion(&async->wait);
2867
2868         btrfs_init_work(&async->work, btrfs_extent_refs_helper,
2869                         delayed_ref_async_start, NULL, NULL);
2870
2871         btrfs_queue_work(root->fs_info->extent_workers, &async->work);
2872
2873         if (wait) {
2874                 wait_for_completion(&async->wait);
2875                 ret = async->error;
2876                 kfree(async);
2877                 return ret;
2878         }
2879         return 0;
2880 }
2881
2882 /*
2883  * this starts processing the delayed reference count updates and
2884  * extent insertions we have queued up so far.  count can be
2885  * 0, which means to process everything in the tree at the start
2886  * of the run (but not newly added entries), or it can be some target
2887  * number you'd like to process.
2888  *
2889  * Returns 0 on success or if called with an aborted transaction
2890  * Returns <0 on error and aborts the transaction
2891  */
2892 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2893                            struct btrfs_root *root, unsigned long count)
2894 {
2895         struct rb_node *node;
2896         struct btrfs_delayed_ref_root *delayed_refs;
2897         struct btrfs_delayed_ref_head *head;
2898         int ret;
2899         int run_all = count == (unsigned long)-1;
2900         bool can_flush_pending_bgs = trans->can_flush_pending_bgs;
2901
2902         /* We'll clean this up in btrfs_cleanup_transaction */
2903         if (trans->aborted)
2904                 return 0;
2905
2906         if (root == root->fs_info->extent_root)
2907                 root = root->fs_info->tree_root;
2908
2909         delayed_refs = &trans->transaction->delayed_refs;
2910         if (count == 0)
2911                 count = atomic_read(&delayed_refs->num_entries) * 2;
2912
2913 again:
2914 #ifdef SCRAMBLE_DELAYED_REFS
2915         delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
2916 #endif
2917         trans->can_flush_pending_bgs = false;
2918         ret = __btrfs_run_delayed_refs(trans, root, count);
2919         if (ret < 0) {
2920                 btrfs_abort_transaction(trans, root, ret);
2921                 return ret;
2922         }
2923
2924         if (run_all) {
2925                 if (!list_empty(&trans->new_bgs))
2926                         btrfs_create_pending_block_groups(trans, root);
2927
2928                 spin_lock(&delayed_refs->lock);
2929                 node = rb_first(&delayed_refs->href_root);
2930                 if (!node) {
2931                         spin_unlock(&delayed_refs->lock);
2932                         goto out;
2933                 }
2934                 count = (unsigned long)-1;
2935
2936                 while (node) {
2937                         head = rb_entry(node, struct btrfs_delayed_ref_head,
2938                                         href_node);
2939                         if (btrfs_delayed_ref_is_head(&head->node)) {
2940                                 struct btrfs_delayed_ref_node *ref;
2941
2942                                 ref = &head->node;
2943                                 atomic_inc(&ref->refs);
2944
2945                                 spin_unlock(&delayed_refs->lock);
2946                                 /*
2947                                  * Mutex was contended, block until it's
2948                                  * released and try again
2949                                  */
2950                                 mutex_lock(&head->mutex);
2951                                 mutex_unlock(&head->mutex);
2952
2953                                 btrfs_put_delayed_ref(ref);
2954                                 cond_resched();
2955                                 goto again;
2956                         } else {
2957                                 WARN_ON(1);
2958                         }
2959                         node = rb_next(node);
2960                 }
2961                 spin_unlock(&delayed_refs->lock);
2962                 cond_resched();
2963                 goto again;
2964         }
2965 out:
2966         assert_qgroups_uptodate(trans);
2967         trans->can_flush_pending_bgs = can_flush_pending_bgs;
2968         return 0;
2969 }
2970
2971 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
2972                                 struct btrfs_root *root,
2973                                 u64 bytenr, u64 num_bytes, u64 flags,
2974                                 int level, int is_data)
2975 {
2976         struct btrfs_delayed_extent_op *extent_op;
2977         int ret;
2978
2979         extent_op = btrfs_alloc_delayed_extent_op();
2980         if (!extent_op)
2981                 return -ENOMEM;
2982
2983         extent_op->flags_to_set = flags;
2984         extent_op->update_flags = 1;
2985         extent_op->update_key = 0;
2986         extent_op->is_data = is_data ? 1 : 0;
2987         extent_op->level = level;
2988
2989         ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr,
2990                                           num_bytes, extent_op);
2991         if (ret)
2992                 btrfs_free_delayed_extent_op(extent_op);
2993         return ret;
2994 }
2995
2996 static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
2997                                       struct btrfs_root *root,
2998                                       struct btrfs_path *path,
2999                                       u64 objectid, u64 offset, u64 bytenr)
3000 {
3001         struct btrfs_delayed_ref_head *head;
3002         struct btrfs_delayed_ref_node *ref;
3003         struct btrfs_delayed_data_ref *data_ref;
3004         struct btrfs_delayed_ref_root *delayed_refs;
3005         int ret = 0;
3006
3007         delayed_refs = &trans->transaction->delayed_refs;
3008         spin_lock(&delayed_refs->lock);
3009         head = btrfs_find_delayed_ref_head(trans, bytenr);
3010         if (!head) {
3011                 spin_unlock(&delayed_refs->lock);
3012                 return 0;
3013         }
3014
3015         if (!mutex_trylock(&head->mutex)) {
3016                 atomic_inc(&head->node.refs);
3017                 spin_unlock(&delayed_refs->lock);
3018
3019                 btrfs_release_path(path);
3020
3021                 /*
3022                  * Mutex was contended, block until it's released and let
3023                  * caller try again
3024                  */
3025                 mutex_lock(&head->mutex);
3026                 mutex_unlock(&head->mutex);
3027                 btrfs_put_delayed_ref(&head->node);
3028                 return -EAGAIN;
3029         }
3030         spin_unlock(&delayed_refs->lock);
3031
3032         spin_lock(&head->lock);
3033         list_for_each_entry(ref, &head->ref_list, list) {
3034                 /* If it's a shared ref we know a cross reference exists */
3035                 if (ref->type != BTRFS_EXTENT_DATA_REF_KEY) {
3036                         ret = 1;
3037                         break;
3038                 }
3039
3040                 data_ref = btrfs_delayed_node_to_data_ref(ref);
3041
3042                 /*
3043                  * If our ref doesn't match the one we're currently looking at
3044                  * then we have a cross reference.
3045                  */
3046                 if (data_ref->root != root->root_key.objectid ||
3047                     data_ref->objectid != objectid ||
3048                     data_ref->offset != offset) {
3049                         ret = 1;
3050                         break;
3051                 }
3052         }
3053         spin_unlock(&head->lock);
3054         mutex_unlock(&head->mutex);
3055         return ret;
3056 }
3057
3058 static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
3059                                         struct btrfs_root *root,
3060                                         struct btrfs_path *path,
3061                                         u64 objectid, u64 offset, u64 bytenr)
3062 {
3063         struct btrfs_root *extent_root = root->fs_info->extent_root;
3064         struct extent_buffer *leaf;
3065         struct btrfs_extent_data_ref *ref;
3066         struct btrfs_extent_inline_ref *iref;
3067         struct btrfs_extent_item *ei;
3068         struct btrfs_key key;
3069         u32 item_size;
3070         int ret;
3071
3072         key.objectid = bytenr;
3073         key.offset = (u64)-1;
3074         key.type = BTRFS_EXTENT_ITEM_KEY;
3075
3076         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
3077         if (ret < 0)
3078                 goto out;
3079         BUG_ON(ret == 0); /* Corruption */
3080
3081         ret = -ENOENT;
3082         if (path->slots[0] == 0)
3083                 goto out;
3084
3085         path->slots[0]--;
3086         leaf = path->nodes[0];
3087         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3088
3089         if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
3090                 goto out;
3091
3092         ret = 1;
3093         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
3094 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
3095         if (item_size < sizeof(*ei)) {
3096                 WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
3097                 goto out;
3098         }
3099 #endif
3100         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
3101
3102         if (item_size != sizeof(*ei) +
3103             btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
3104                 goto out;
3105
3106         if (btrfs_extent_generation(leaf, ei) <=
3107             btrfs_root_last_snapshot(&root->root_item))
3108                 goto out;
3109
3110         iref = (struct btrfs_extent_inline_ref *)(ei + 1);
3111         if (btrfs_extent_inline_ref_type(leaf, iref) !=
3112             BTRFS_EXTENT_DATA_REF_KEY)
3113                 goto out;
3114
3115         ref = (struct btrfs_extent_data_ref *)(&iref->offset);
3116         if (btrfs_extent_refs(leaf, ei) !=
3117             btrfs_extent_data_ref_count(leaf, ref) ||
3118             btrfs_extent_data_ref_root(leaf, ref) !=
3119             root->root_key.objectid ||
3120             btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
3121             btrfs_extent_data_ref_offset(leaf, ref) != offset)
3122                 goto out;
3123
3124         ret = 0;
3125 out:
3126         return ret;
3127 }
3128
3129 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
3130                           struct btrfs_root *root,
3131                           u64 objectid, u64 offset, u64 bytenr)
3132 {
3133         struct btrfs_path *path;
3134         int ret;
3135         int ret2;
3136
3137         path = btrfs_alloc_path();
3138         if (!path)
3139                 return -ENOENT;
3140
3141         do {
3142                 ret = check_committed_ref(trans, root, path, objectid,
3143                                           offset, bytenr);
3144                 if (ret && ret != -ENOENT)
3145                         goto out;
3146
3147                 ret2 = check_delayed_ref(trans, root, path, objectid,
3148                                          offset, bytenr);
3149         } while (ret2 == -EAGAIN);
3150
3151         if (ret2 && ret2 != -ENOENT) {
3152                 ret = ret2;
3153                 goto out;
3154         }
3155
3156         if (ret != -ENOENT || ret2 != -ENOENT)
3157                 ret = 0;
3158 out:
3159         btrfs_free_path(path);
3160         if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
3161                 WARN_ON(ret > 0);
3162         return ret;
3163 }
3164
3165 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
3166                            struct btrfs_root *root,
3167                            struct extent_buffer *buf,
3168                            int full_backref, int inc)
3169 {
3170         u64 bytenr;
3171         u64 num_bytes;
3172         u64 parent;
3173         u64 ref_root;
3174         u32 nritems;
3175         struct btrfs_key key;
3176         struct btrfs_file_extent_item *fi;
3177         int i;
3178         int level;
3179         int ret = 0;
3180         int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
3181                             u64, u64, u64, u64, u64, u64, int);
3182
3183
3184         if (btrfs_test_is_dummy_root(root))
3185                 return 0;
3186
3187         ref_root = btrfs_header_owner(buf);
3188         nritems = btrfs_header_nritems(buf);
3189         level = btrfs_header_level(buf);
3190
3191         if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state) && level == 0)
3192                 return 0;
3193
3194         if (inc)
3195                 process_func = btrfs_inc_extent_ref;
3196         else
3197                 process_func = btrfs_free_extent;
3198
3199         if (full_backref)
3200                 parent = buf->start;
3201         else
3202                 parent = 0;
3203
3204         for (i = 0; i < nritems; i++) {
3205                 if (level == 0) {
3206                         btrfs_item_key_to_cpu(buf, &key, i);
3207                         if (key.type != BTRFS_EXTENT_DATA_KEY)
3208                                 continue;
3209                         fi = btrfs_item_ptr(buf, i,
3210                                             struct btrfs_file_extent_item);
3211                         if (btrfs_file_extent_type(buf, fi) ==
3212                             BTRFS_FILE_EXTENT_INLINE)
3213                                 continue;
3214                         bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
3215                         if (bytenr == 0)
3216                                 continue;
3217
3218                         num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
3219                         key.offset -= btrfs_file_extent_offset(buf, fi);
3220                         ret = process_func(trans, root, bytenr, num_bytes,
3221                                            parent, ref_root, key.objectid,
3222                                            key.offset, 1);
3223                         if (ret)
3224                                 goto fail;
3225                 } else {
3226                         bytenr = btrfs_node_blockptr(buf, i);
3227                         num_bytes = root->nodesize;
3228                         ret = process_func(trans, root, bytenr, num_bytes,
3229                                            parent, ref_root, level - 1, 0,
3230                                            1);
3231                         if (ret)
3232                                 goto fail;
3233                 }
3234         }
3235         return 0;
3236 fail:
3237         return ret;
3238 }
3239
3240 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3241                   struct extent_buffer *buf, int full_backref)
3242 {
3243         return __btrfs_mod_ref(trans, root, buf, full_backref, 1);
3244 }
3245
3246 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3247                   struct extent_buffer *buf, int full_backref)
3248 {
3249         return __btrfs_mod_ref(trans, root, buf, full_backref, 0);
3250 }
3251
3252 static int write_one_cache_group(struct btrfs_trans_handle *trans,
3253                                  struct btrfs_root *root,
3254                                  struct btrfs_path *path,
3255                                  struct btrfs_block_group_cache *cache)
3256 {
3257         int ret;
3258         struct btrfs_root *extent_root = root->fs_info->extent_root;
3259         unsigned long bi;
3260         struct extent_buffer *leaf;
3261
3262         ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
3263         if (ret) {
3264                 if (ret > 0)
3265                         ret = -ENOENT;
3266                 goto fail;
3267         }
3268
3269         leaf = path->nodes[0];
3270         bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
3271         write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
3272         btrfs_mark_buffer_dirty(leaf);
3273 fail:
3274         btrfs_release_path(path);
3275         return ret;
3276
3277 }
3278
3279 static struct btrfs_block_group_cache *
3280 next_block_group(struct btrfs_root *root,
3281                  struct btrfs_block_group_cache *cache)
3282 {
3283         struct rb_node *node;
3284
3285         spin_lock(&root->fs_info->block_group_cache_lock);
3286
3287         /* If our block group was removed, we need a full search. */
3288         if (RB_EMPTY_NODE(&cache->cache_node)) {
3289                 const u64 next_bytenr = cache->key.objectid + cache->key.offset;
3290
3291                 spin_unlock(&root->fs_info->block_group_cache_lock);
3292                 btrfs_put_block_group(cache);
3293                 cache = btrfs_lookup_first_block_group(root->fs_info,
3294                                                        next_bytenr);
3295                 return cache;
3296         }
3297         node = rb_next(&cache->cache_node);
3298         btrfs_put_block_group(cache);
3299         if (node) {
3300                 cache = rb_entry(node, struct btrfs_block_group_cache,
3301                                  cache_node);
3302                 btrfs_get_block_group(cache);
3303         } else
3304                 cache = NULL;
3305         spin_unlock(&root->fs_info->block_group_cache_lock);
3306         return cache;
3307 }
3308
3309 static int cache_save_setup(struct btrfs_block_group_cache *block_group,
3310                             struct btrfs_trans_handle *trans,
3311                             struct btrfs_path *path)
3312 {
3313         struct btrfs_root *root = block_group->fs_info->tree_root;
3314         struct inode *inode = NULL;
3315         u64 alloc_hint = 0;
3316         int dcs = BTRFS_DC_ERROR;
3317         u64 num_pages = 0;
3318         int retries = 0;
3319         int ret = 0;
3320
3321         /*
3322          * If this block group is smaller than 100 megs don't bother caching the
3323          * block group.
3324          */
3325         if (block_group->key.offset < (100 * 1024 * 1024)) {
3326                 spin_lock(&block_group->lock);
3327                 block_group->disk_cache_state = BTRFS_DC_WRITTEN;
3328                 spin_unlock(&block_group->lock);
3329                 return 0;
3330         }
3331
3332         if (trans->aborted)
3333                 return 0;
3334 again:
3335         inode = lookup_free_space_inode(root, block_group, path);
3336         if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
3337                 ret = PTR_ERR(inode);
3338                 btrfs_release_path(path);
3339                 goto out;
3340         }
3341
3342         if (IS_ERR(inode)) {
3343                 BUG_ON(retries);
3344                 retries++;
3345
3346                 if (block_group->ro)
3347                         goto out_free;
3348
3349                 ret = create_free_space_inode(root, trans, block_group, path);
3350                 if (ret)
3351                         goto out_free;
3352                 goto again;
3353         }
3354
3355         /* We've already setup this transaction, go ahead and exit */
3356         if (block_group->cache_generation == trans->transid &&
3357             i_size_read(inode)) {
3358                 dcs = BTRFS_DC_SETUP;
3359                 goto out_put;
3360         }
3361
3362         /*
3363          * We want to set the generation to 0, that way if anything goes wrong
3364          * from here on out we know not to trust this cache when we load up next
3365          * time.
3366          */
3367         BTRFS_I(inode)->generation = 0;
3368         ret = btrfs_update_inode(trans, root, inode);
3369         if (ret) {
3370                 /*
3371                  * So theoretically we could recover from this, simply set the
3372                  * super cache generation to 0 so we know to invalidate the
3373                  * cache, but then we'd have to keep track of the block groups
3374                  * that fail this way so we know we _have_ to reset this cache
3375                  * before the next commit or risk reading stale cache.  So to
3376                  * limit our exposure to horrible edge cases lets just abort the
3377                  * transaction, this only happens in really bad situations
3378                  * anyway.
3379                  */
3380                 btrfs_abort_transaction(trans, root, ret);
3381                 goto out_put;
3382         }
3383         WARN_ON(ret);
3384
3385         if (i_size_read(inode) > 0) {
3386                 ret = btrfs_check_trunc_cache_free_space(root,
3387                                         &root->fs_info->global_block_rsv);
3388                 if (ret)
3389                         goto out_put;
3390
3391                 ret = btrfs_truncate_free_space_cache(root, trans, NULL, inode);
3392                 if (ret)
3393                         goto out_put;
3394         }
3395
3396         spin_lock(&block_group->lock);
3397         if (block_group->cached != BTRFS_CACHE_FINISHED ||
3398             !btrfs_test_opt(root, SPACE_CACHE)) {
3399                 /*
3400                  * don't bother trying to write stuff out _if_
3401                  * a) we're not cached,
3402                  * b) we're with nospace_cache mount option.
3403                  */
3404                 dcs = BTRFS_DC_WRITTEN;
3405                 spin_unlock(&block_group->lock);
3406                 goto out_put;
3407         }
3408         spin_unlock(&block_group->lock);
3409
3410         /*
3411          * We hit an ENOSPC when setting up the cache in this transaction, just
3412          * skip doing the setup, we've already cleared the cache so we're safe.
3413          */
3414         if (test_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags)) {
3415                 ret = -ENOSPC;
3416                 goto out_put;
3417         }
3418
3419         /*
3420          * Try to preallocate enough space based on how big the block group is.
3421          * Keep in mind this has to include any pinned space which could end up
3422          * taking up quite a bit since it's not folded into the other space
3423          * cache.
3424          */
3425         num_pages = div_u64(block_group->key.offset, 256 * 1024 * 1024);
3426         if (!num_pages)
3427                 num_pages = 1;
3428
3429         num_pages *= 16;
3430         num_pages *= PAGE_CACHE_SIZE;
3431
3432         ret = btrfs_check_data_free_space(inode, 0, num_pages);
3433         if (ret)
3434                 goto out_put;
3435
3436         ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
3437                                               num_pages, num_pages,
3438                                               &alloc_hint);
3439         /*
3440          * Our cache requires contiguous chunks so that we don't modify a bunch
3441          * of metadata or split extents when writing the cache out, which means
3442          * we can enospc if we are heavily fragmented in addition to just normal
3443          * out of space conditions.  So if we hit this just skip setting up any
3444          * other block groups for this transaction, maybe we'll unpin enough
3445          * space the next time around.
3446          */
3447         if (!ret)
3448                 dcs = BTRFS_DC_SETUP;
3449         else if (ret == -ENOSPC)
3450                 set_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags);
3451         btrfs_free_reserved_data_space(inode, 0, num_pages);
3452
3453 out_put:
3454         iput(inode);
3455 out_free:
3456         btrfs_release_path(path);
3457 out:
3458         spin_lock(&block_group->lock);
3459         if (!ret && dcs == BTRFS_DC_SETUP)
3460                 block_group->cache_generation = trans->transid;
3461         block_group->disk_cache_state = dcs;
3462         spin_unlock(&block_group->lock);
3463
3464         return ret;
3465 }
3466
3467 int btrfs_setup_space_cache(struct btrfs_trans_handle *trans,
3468                             struct btrfs_root *root)
3469 {
3470         struct btrfs_block_group_cache *cache, *tmp;
3471         struct btrfs_transaction *cur_trans = trans->transaction;
3472         struct btrfs_path *path;
3473
3474         if (list_empty(&cur_trans->dirty_bgs) ||
3475             !btrfs_test_opt(root, SPACE_CACHE))
3476                 return 0;
3477
3478         path = btrfs_alloc_path();
3479         if (!path)
3480                 return -ENOMEM;
3481
3482         /* Could add new block groups, use _safe just in case */
3483         list_for_each_entry_safe(cache, tmp, &cur_trans->dirty_bgs,
3484                                  dirty_list) {
3485                 if (cache->disk_cache_state == BTRFS_DC_CLEAR)
3486                         cache_save_setup(cache, trans, path);
3487         }
3488
3489         btrfs_free_path(path);
3490         return 0;
3491 }
3492
3493 /*
3494  * transaction commit does final block group cache writeback during a
3495  * critical section where nothing is allowed to change the FS.  This is
3496  * required in order for the cache to actually match the block group,
3497  * but can introduce a lot of latency into the commit.
3498  *
3499  * So, btrfs_start_dirty_block_groups is here to kick off block group
3500  * cache IO.  There's a chance we'll have to redo some of it if the
3501  * block group changes again during the commit, but it greatly reduces
3502  * the commit latency by getting rid of the easy block groups while
3503  * we're still allowing others to join the commit.
3504  */
3505 int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans,
3506                                    struct btrfs_root *root)
3507 {
3508         struct btrfs_block_group_cache *cache;
3509         struct btrfs_transaction *cur_trans = trans->transaction;
3510         int ret = 0;
3511         int should_put;
3512         struct btrfs_path *path = NULL;
3513         LIST_HEAD(dirty);
3514         struct list_head *io = &cur_trans->io_bgs;
3515         int num_started = 0;
3516         int loops = 0;
3517
3518         spin_lock(&cur_trans->dirty_bgs_lock);
3519         if (list_empty(&cur_trans->dirty_bgs)) {
3520                 spin_unlock(&cur_trans->dirty_bgs_lock);
3521                 return 0;
3522         }
3523         list_splice_init(&cur_trans->dirty_bgs, &dirty);
3524         spin_unlock(&cur_trans->dirty_bgs_lock);
3525
3526 again:
3527         /*
3528          * make sure all the block groups on our dirty list actually
3529          * exist
3530          */
3531         btrfs_create_pending_block_groups(trans, root);
3532
3533         if (!path) {
3534                 path = btrfs_alloc_path();
3535                 if (!path)
3536                         return -ENOMEM;
3537         }
3538
3539         /*
3540          * cache_write_mutex is here only to save us from balance or automatic
3541          * removal of empty block groups deleting this block group while we are
3542          * writing out the cache
3543          */
3544         mutex_lock(&trans->transaction->cache_write_mutex);
3545         while (!list_empty(&dirty)) {
3546                 cache = list_first_entry(&dirty,
3547                                          struct btrfs_block_group_cache,
3548                                          dirty_list);
3549                 /*
3550                  * this can happen if something re-dirties a block
3551                  * group that is already under IO.  Just wait for it to
3552                  * finish and then do it all again
3553                  */
3554                 if (!list_empty(&cache->io_list)) {
3555                         list_del_init(&cache->io_list);
3556                         btrfs_wait_cache_io(root, trans, cache,
3557                                             &cache->io_ctl, path,
3558                                             cache->key.objectid);
3559                         btrfs_put_block_group(cache);
3560                 }
3561
3562
3563                 /*
3564                  * btrfs_wait_cache_io uses the cache->dirty_list to decide
3565                  * if it should update the cache_state.  Don't delete
3566                  * until after we wait.
3567                  *
3568                  * Since we're not running in the commit critical section
3569                  * we need the dirty_bgs_lock to protect from update_block_group
3570                  */
3571                 spin_lock(&cur_trans->dirty_bgs_lock);
3572                 list_del_init(&cache->dirty_list);
3573                 spin_unlock(&cur_trans->dirty_bgs_lock);
3574
3575                 should_put = 1;
3576
3577                 cache_save_setup(cache, trans, path);
3578
3579                 if (cache->disk_cache_state == BTRFS_DC_SETUP) {
3580                         cache->io_ctl.inode = NULL;
3581                         ret = btrfs_write_out_cache(root, trans, cache, path);
3582                         if (ret == 0 && cache->io_ctl.inode) {
3583                                 num_started++;
3584                                 should_put = 0;
3585
3586                                 /*
3587                                  * the cache_write_mutex is protecting
3588                                  * the io_list
3589                                  */
3590                                 list_add_tail(&cache->io_list, io);
3591                         } else {
3592                                 /*
3593                                  * if we failed to write the cache, the
3594                                  * generation will be bad and life goes on
3595                                  */
3596                                 ret = 0;
3597                         }
3598                 }
3599                 if (!ret) {
3600                         ret = write_one_cache_group(trans, root, path, cache);
3601                         /*
3602                          * Our block group might still be attached to the list
3603                          * of new block groups in the transaction handle of some
3604                          * other task (struct btrfs_trans_handle->new_bgs). This
3605                          * means its block group item isn't yet in the extent
3606                          * tree. If this happens ignore the error, as we will
3607                          * try again later in the critical section of the
3608                          * transaction commit.
3609                          */
3610                         if (ret == -ENOENT) {
3611                                 ret = 0;
3612                                 spin_lock(&cur_trans->dirty_bgs_lock);
3613                                 if (list_empty(&cache->dirty_list)) {
3614                                         list_add_tail(&cache->dirty_list,
3615                                                       &cur_trans->dirty_bgs);
3616                                         btrfs_get_block_group(cache);
3617                                 }
3618                                 spin_unlock(&cur_trans->dirty_bgs_lock);
3619                         } else if (ret) {
3620                                 btrfs_abort_transaction(trans, root, ret);
3621                         }
3622                 }
3623
3624                 /* if its not on the io list, we need to put the block group */
3625                 if (should_put)
3626                         btrfs_put_block_group(cache);
3627
3628                 if (ret)
3629                         break;
3630
3631                 /*
3632                  * Avoid blocking other tasks for too long. It might even save
3633                  * us from writing caches for block groups that are going to be
3634                  * removed.
3635                  */
3636                 mutex_unlock(&trans->transaction->cache_write_mutex);
3637                 mutex_lock(&trans->transaction->cache_write_mutex);
3638         }
3639         mutex_unlock(&trans->transaction->cache_write_mutex);
3640
3641         /*
3642          * go through delayed refs for all the stuff we've just kicked off
3643          * and then loop back (just once)
3644          */
3645         ret = btrfs_run_delayed_refs(trans, root, 0);
3646         if (!ret && loops == 0) {
3647                 loops++;
3648                 spin_lock(&cur_trans->dirty_bgs_lock);
3649                 list_splice_init(&cur_trans->dirty_bgs, &dirty);
3650                 /*
3651                  * dirty_bgs_lock protects us from concurrent block group
3652                  * deletes too (not just cache_write_mutex).
3653                  */
3654                 if (!list_empty(&dirty)) {
3655                         spin_unlock(&cur_trans->dirty_bgs_lock);
3656                         goto again;
3657                 }
3658                 spin_unlock(&cur_trans->dirty_bgs_lock);
3659         }
3660
3661         btrfs_free_path(path);
3662         return ret;
3663 }
3664
3665 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
3666                                    struct btrfs_root *root)
3667 {
3668         struct btrfs_block_group_cache *cache;
3669         struct btrfs_transaction *cur_trans = trans->transaction;
3670         int ret = 0;
3671         int should_put;
3672         struct btrfs_path *path;
3673         struct list_head *io = &cur_trans->io_bgs;
3674         int num_started = 0;
3675
3676         path = btrfs_alloc_path();
3677         if (!path)
3678                 return -ENOMEM;
3679
3680         /*
3681          * We don't need the lock here since we are protected by the transaction
3682          * commit.  We want to do the cache_save_setup first and then run the
3683          * delayed refs to make sure we have the best chance at doing this all
3684          * in one shot.
3685          */
3686         while (!list_empty(&cur_trans->dirty_bgs)) {
3687                 cache = list_first_entry(&cur_trans->dirty_bgs,
3688                                          struct btrfs_block_group_cache,
3689                                          dirty_list);
3690
3691                 /*
3692                  * this can happen if cache_save_setup re-dirties a block
3693                  * group that is already under IO.  Just wait for it to
3694                  * finish and then do it all again
3695                  */
3696                 if (!list_empty(&cache->io_list)) {
3697                         list_del_init(&cache->io_list);
3698                         btrfs_wait_cache_io(root, trans, cache,
3699                                             &cache->io_ctl, path,
3700                                             cache->key.objectid);
3701                         btrfs_put_block_group(cache);
3702                 }
3703
3704                 /*
3705                  * don't remove from the dirty list until after we've waited
3706                  * on any pending IO
3707                  */
3708                 list_del_init(&cache->dirty_list);
3709                 should_put = 1;
3710
3711                 cache_save_setup(cache, trans, path);
3712
3713                 if (!ret)
3714                         ret = btrfs_run_delayed_refs(trans, root, (unsigned long) -1);
3715
3716                 if (!ret && cache->disk_cache_state == BTRFS_DC_SETUP) {
3717                         cache->io_ctl.inode = NULL;
3718                         ret = btrfs_write_out_cache(root, trans, cache, path);
3719                         if (ret == 0 && cache->io_ctl.inode) {
3720                                 num_started++;
3721                                 should_put = 0;
3722                                 list_add_tail(&cache->io_list, io);
3723                         } else {
3724                                 /*
3725                                  * if we failed to write the cache, the
3726                                  * generation will be bad and life goes on
3727                                  */
3728                                 ret = 0;
3729                         }
3730                 }
3731                 if (!ret) {
3732                         ret = write_one_cache_group(trans, root, path, cache);
3733                         if (ret)
3734                                 btrfs_abort_transaction(trans, root, ret);
3735                 }
3736
3737                 /* if its not on the io list, we need to put the block group */
3738                 if (should_put)
3739                         btrfs_put_block_group(cache);
3740         }
3741
3742         while (!list_empty(io)) {
3743                 cache = list_first_entry(io, struct btrfs_block_group_cache,
3744                                          io_list);
3745                 list_del_init(&cache->io_list);
3746                 btrfs_wait_cache_io(root, trans, cache,
3747                                     &cache->io_ctl, path, cache->key.objectid);
3748                 btrfs_put_block_group(cache);
3749         }
3750
3751         btrfs_free_path(path);
3752         return ret;
3753 }
3754
3755 int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
3756 {
3757         struct btrfs_block_group_cache *block_group;
3758         int readonly = 0;
3759
3760         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
3761         if (!block_group || block_group->ro)
3762                 readonly = 1;
3763         if (block_group)
3764                 btrfs_put_block_group(block_group);
3765         return readonly;
3766 }
3767
3768 static const char *alloc_name(u64 flags)
3769 {
3770         switch (flags) {
3771         case BTRFS_BLOCK_GROUP_METADATA|BTRFS_BLOCK_GROUP_DATA:
3772                 return "mixed";
3773         case BTRFS_BLOCK_GROUP_METADATA:
3774                 return "metadata";
3775         case BTRFS_BLOCK_GROUP_DATA:
3776                 return "data";
3777         case BTRFS_BLOCK_GROUP_SYSTEM:
3778                 return "system";
3779         default:
3780                 WARN_ON(1);
3781                 return "invalid-combination";
3782         };
3783 }
3784
3785 static int update_space_info(struct btrfs_fs_info *info, u64 flags,
3786                              u64 total_bytes, u64 bytes_used,
3787                              struct btrfs_space_info **space_info)
3788 {
3789         struct btrfs_space_info *found;
3790         int i;
3791         int factor;
3792         int ret;
3793
3794         if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
3795                      BTRFS_BLOCK_GROUP_RAID10))
3796                 factor = 2;
3797         else
3798                 factor = 1;
3799
3800         found = __find_space_info(info, flags);
3801         if (found) {
3802                 spin_lock(&found->lock);
3803                 found->total_bytes += total_bytes;
3804                 found->disk_total += total_bytes * factor;
3805                 found->bytes_used += bytes_used;
3806                 found->disk_used += bytes_used * factor;
3807                 if (total_bytes > 0)
3808                         found->full = 0;
3809                 spin_unlock(&found->lock);
3810                 *space_info = found;
3811                 return 0;
3812         }
3813         found = kzalloc(sizeof(*found), GFP_NOFS);
3814         if (!found)
3815                 return -ENOMEM;
3816
3817         ret = percpu_counter_init(&found->total_bytes_pinned, 0, GFP_KERNEL);
3818         if (ret) {
3819                 kfree(found);
3820                 return ret;
3821         }
3822
3823         for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
3824                 INIT_LIST_HEAD(&found->block_groups[i]);
3825         init_rwsem(&found->groups_sem);
3826         spin_lock_init(&found->lock);
3827         found->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
3828         found->total_bytes = total_bytes;
3829         found->disk_total = total_bytes * factor;
3830         found->bytes_used = bytes_used;
3831         found->disk_used = bytes_used * factor;
3832         found->bytes_pinned = 0;
3833         found->bytes_reserved = 0;
3834         found->bytes_readonly = 0;
3835         found->bytes_may_use = 0;
3836         found->full = 0;
3837         found->max_extent_size = 0;
3838         found->force_alloc = CHUNK_ALLOC_NO_FORCE;
3839         found->chunk_alloc = 0;
3840         found->flush = 0;
3841         init_waitqueue_head(&found->wait);
3842         INIT_LIST_HEAD(&found->ro_bgs);
3843
3844         ret = kobject_init_and_add(&found->kobj, &space_info_ktype,
3845                                     info->space_info_kobj, "%s",
3846                                     alloc_name(found->flags));
3847         if (ret) {
3848                 kfree(found);
3849                 return ret;
3850         }
3851
3852         *space_info = found;
3853         list_add_rcu(&found->list, &info->space_info);
3854         if (flags & BTRFS_BLOCK_GROUP_DATA)
3855                 info->data_sinfo = found;
3856
3857         return ret;
3858 }
3859
3860 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
3861 {
3862         u64 extra_flags = chunk_to_extended(flags) &
3863                                 BTRFS_EXTENDED_PROFILE_MASK;
3864
3865         write_seqlock(&fs_info->profiles_lock);
3866         if (flags & BTRFS_BLOCK_GROUP_DATA)
3867                 fs_info->avail_data_alloc_bits |= extra_flags;
3868         if (flags & BTRFS_BLOCK_GROUP_METADATA)
3869                 fs_info->avail_metadata_alloc_bits |= extra_flags;
3870         if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3871                 fs_info->avail_system_alloc_bits |= extra_flags;
3872         write_sequnlock(&fs_info->profiles_lock);
3873 }
3874
3875 /*
3876  * returns target flags in extended format or 0 if restripe for this
3877  * chunk_type is not in progress
3878  *
3879  * should be called with either volume_mutex or balance_lock held
3880  */
3881 static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
3882 {
3883         struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3884         u64 target = 0;
3885
3886         if (!bctl)
3887                 return 0;
3888
3889         if (flags & BTRFS_BLOCK_GROUP_DATA &&
3890             bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3891                 target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
3892         } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
3893                    bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3894                 target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
3895         } else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
3896                    bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3897                 target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
3898         }
3899
3900         return target;
3901 }
3902
3903 /*
3904  * @flags: available profiles in extended format (see ctree.h)
3905  *
3906  * Returns reduced profile in chunk format.  If profile changing is in
3907  * progress (either running or paused) picks the target profile (if it's
3908  * already available), otherwise falls back to plain reducing.
3909  */
3910 static u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
3911 {
3912         u64 num_devices = root->fs_info->fs_devices->rw_devices;
3913         u64 target;
3914         u64 raid_type;
3915         u64 allowed = 0;
3916
3917         /*
3918          * see if restripe for this chunk_type is in progress, if so
3919          * try to reduce to the target profile
3920          */
3921         spin_lock(&root->fs_info->balance_lock);
3922         target = get_restripe_target(root->fs_info, flags);
3923         if (target) {
3924                 /* pick target profile only if it's already available */
3925                 if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) {
3926                         spin_unlock(&root->fs_info->balance_lock);
3927                         return extended_to_chunk(target);
3928                 }
3929         }
3930         spin_unlock(&root->fs_info->balance_lock);
3931
3932         /* First, mask out the RAID levels which aren't possible */
3933         for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) {
3934                 if (num_devices >= btrfs_raid_array[raid_type].devs_min)
3935                         allowed |= btrfs_raid_group[raid_type];
3936         }
3937         allowed &= flags;
3938
3939         if (allowed & BTRFS_BLOCK_GROUP_RAID6)
3940                 allowed = BTRFS_BLOCK_GROUP_RAID6;
3941         else if (allowed & BTRFS_BLOCK_GROUP_RAID5)
3942                 allowed = BTRFS_BLOCK_GROUP_RAID5;
3943         else if (allowed & BTRFS_BLOCK_GROUP_RAID10)
3944                 allowed = BTRFS_BLOCK_GROUP_RAID10;
3945         else if (allowed & BTRFS_BLOCK_GROUP_RAID1)
3946                 allowed = BTRFS_BLOCK_GROUP_RAID1;
3947         else if (allowed & BTRFS_BLOCK_GROUP_RAID0)
3948                 allowed = BTRFS_BLOCK_GROUP_RAID0;
3949
3950         flags &= ~BTRFS_BLOCK_GROUP_PROFILE_MASK;
3951
3952         return extended_to_chunk(flags | allowed);
3953 }
3954
3955 static u64 get_alloc_profile(struct btrfs_root *root, u64 orig_flags)
3956 {
3957         unsigned seq;
3958         u64 flags;
3959
3960         do {
3961                 flags = orig_flags;
3962                 seq = read_seqbegin(&root->fs_info->profiles_lock);
3963
3964                 if (flags & BTRFS_BLOCK_GROUP_DATA)
3965                         flags |= root->fs_info->avail_data_alloc_bits;
3966                 else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3967                         flags |= root->fs_info->avail_system_alloc_bits;
3968                 else if (flags & BTRFS_BLOCK_GROUP_METADATA)
3969                         flags |= root->fs_info->avail_metadata_alloc_bits;
3970         } while (read_seqretry(&root->fs_info->profiles_lock, seq));
3971
3972         return btrfs_reduce_alloc_profile(root, flags);
3973 }
3974
3975 u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
3976 {
3977         u64 flags;
3978         u64 ret;
3979
3980         if (data)
3981                 flags = BTRFS_BLOCK_GROUP_DATA;
3982         else if (root == root->fs_info->chunk_root)
3983                 flags = BTRFS_BLOCK_GROUP_SYSTEM;
3984         else
3985                 flags = BTRFS_BLOCK_GROUP_METADATA;
3986
3987         ret = get_alloc_profile(root, flags);
3988         return ret;
3989 }
3990
3991 int btrfs_alloc_data_chunk_ondemand(struct inode *inode, u64 bytes)
3992 {
3993         struct btrfs_space_info *data_sinfo;
3994         struct btrfs_root *root = BTRFS_I(inode)->root;
3995         struct btrfs_fs_info *fs_info = root->fs_info;
3996         u64 used;
3997         int ret = 0;
3998         int need_commit = 2;
3999         int have_pinned_space;
4000
4001         /* make sure bytes are sectorsize aligned */
4002         bytes = ALIGN(bytes, root->sectorsize);
4003
4004         if (btrfs_is_free_space_inode(inode)) {
4005                 need_commit = 0;
4006                 ASSERT(current->journal_info);
4007         }
4008
4009         data_sinfo = fs_info->data_sinfo;
4010         if (!data_sinfo)
4011                 goto alloc;
4012
4013 again:
4014         /* make sure we have enough space to handle the data first */
4015         spin_lock(&data_sinfo->lock);
4016         used = data_sinfo->bytes_used + data_sinfo->bytes_reserved +
4017                 data_sinfo->bytes_pinned + data_sinfo->bytes_readonly +
4018                 data_sinfo->bytes_may_use;
4019
4020         if (used + bytes > data_sinfo->total_bytes) {
4021                 struct btrfs_trans_handle *trans;
4022
4023                 /*
4024                  * if we don't have enough free bytes in this space then we need
4025                  * to alloc a new chunk.
4026                  */
4027                 if (!data_sinfo->full) {
4028                         u64 alloc_target;
4029
4030                         data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
4031                         spin_unlock(&data_sinfo->lock);
4032 alloc:
4033                         alloc_target = btrfs_get_alloc_profile(root, 1);
4034                         /*
4035                          * It is ugly that we don't call nolock join
4036                          * transaction for the free space inode case here.
4037                          * But it is safe because we only do the data space
4038                          * reservation for the free space cache in the
4039                          * transaction context, the common join transaction
4040                          * just increase the counter of the current transaction
4041                          * handler, doesn't try to acquire the trans_lock of
4042                          * the fs.
4043                          */
4044                         trans = btrfs_join_transaction(root);
4045                         if (IS_ERR(trans))
4046                                 return PTR_ERR(trans);
4047
4048                         ret = do_chunk_alloc(trans, root->fs_info->extent_root,
4049                                              alloc_target,
4050                                              CHUNK_ALLOC_NO_FORCE);
4051                         btrfs_end_transaction(trans, root);
4052                         if (ret < 0) {
4053                                 if (ret != -ENOSPC)
4054                                         return ret;
4055                                 else {
4056                                         have_pinned_space = 1;
4057                                         goto commit_trans;
4058                                 }
4059                         }
4060
4061                         if (!data_sinfo)
4062                                 data_sinfo = fs_info->data_sinfo;
4063
4064                         goto again;
4065                 }
4066
4067                 /*
4068                  * If we don't have enough pinned space to deal with this
4069                  * allocation, and no removed chunk in current transaction,
4070                  * don't bother committing the transaction.
4071                  */
4072                 have_pinned_space = percpu_counter_compare(
4073                         &data_sinfo->total_bytes_pinned,
4074                         used + bytes - data_sinfo->total_bytes);
4075                 spin_unlock(&data_sinfo->lock);
4076
4077                 /* commit the current transaction and try again */
4078 commit_trans:
4079                 if (need_commit &&
4080                     !atomic_read(&root->fs_info->open_ioctl_trans)) {
4081                         need_commit--;
4082
4083                         if (need_commit > 0)
4084                                 btrfs_wait_ordered_roots(fs_info, -1);
4085
4086                         trans = btrfs_join_transaction(root);
4087                         if (IS_ERR(trans))
4088                                 return PTR_ERR(trans);
4089                         if (have_pinned_space >= 0 ||
4090                             test_bit(BTRFS_TRANS_HAVE_FREE_BGS,
4091                                      &trans->transaction->flags) ||
4092                             need_commit > 0) {
4093                                 ret = btrfs_commit_transaction(trans, root);
4094                                 if (ret)
4095                                         return ret;
4096                                 /*
4097                                  * make sure that all running delayed iput are
4098                                  * done
4099                                  */
4100                                 down_write(&root->fs_info->delayed_iput_sem);
4101                                 up_write(&root->fs_info->delayed_iput_sem);
4102                                 goto again;
4103                         } else {
4104                                 btrfs_end_transaction(trans, root);
4105                         }
4106                 }
4107
4108                 trace_btrfs_space_reservation(root->fs_info,
4109                                               "space_info:enospc",
4110                                               data_sinfo->flags, bytes, 1);
4111                 return -ENOSPC;
4112         }
4113         data_sinfo->bytes_may_use += bytes;
4114         trace_btrfs_space_reservation(root->fs_info, "space_info",
4115                                       data_sinfo->flags, bytes, 1);
4116         spin_unlock(&data_sinfo->lock);
4117
4118         return ret;
4119 }
4120
4121 /*
4122  * New check_data_free_space() with ability for precious data reservation
4123  * Will replace old btrfs_check_data_free_space(), but for patch split,
4124  * add a new function first and then replace it.
4125  */
4126 int btrfs_check_data_free_space(struct inode *inode, u64 start, u64 len)
4127 {
4128         struct btrfs_root *root = BTRFS_I(inode)->root;
4129         int ret;
4130
4131         /* align the range */
4132         len = round_up(start + len, root->sectorsize) -
4133               round_down(start, root->sectorsize);
4134         start = round_down(start, root->sectorsize);
4135
4136         ret = btrfs_alloc_data_chunk_ondemand(inode, len);
4137         if (ret < 0)
4138                 return ret;
4139
4140         /*
4141          * Use new btrfs_qgroup_reserve_data to reserve precious data space
4142          *
4143          * TODO: Find a good method to avoid reserve data space for NOCOW
4144          * range, but don't impact performance on quota disable case.
4145          */
4146         ret = btrfs_qgroup_reserve_data(inode, start, len);
4147         return ret;
4148 }
4149
4150 /*
4151  * Called if we need to clear a data reservation for this inode
4152  * Normally in a error case.
4153  *
4154  * This one will *NOT* use accurate qgroup reserved space API, just for case
4155  * which we can't sleep and is sure it won't affect qgroup reserved space.
4156  * Like clear_bit_hook().
4157  */
4158 void btrfs_free_reserved_data_space_noquota(struct inode *inode, u64 start,
4159                                             u64 len)
4160 {
4161         struct btrfs_root *root = BTRFS_I(inode)->root;
4162         struct btrfs_space_info *data_sinfo;
4163
4164         /* Make sure the range is aligned to sectorsize */
4165         len = round_up(start + len, root->sectorsize) -
4166               round_down(start, root->sectorsize);
4167         start = round_down(start, root->sectorsize);
4168
4169         data_sinfo = root->fs_info->data_sinfo;
4170         spin_lock(&data_sinfo->lock);
4171         if (WARN_ON(data_sinfo->bytes_may_use < len))
4172                 data_sinfo->bytes_may_use = 0;
4173         else
4174                 data_sinfo->bytes_may_use -= len;
4175         trace_btrfs_space_reservation(root->fs_info, "space_info",
4176                                       data_sinfo->flags, len, 0);
4177         spin_unlock(&data_sinfo->lock);
4178 }
4179
4180 /*
4181  * Called if we need to clear a data reservation for this inode
4182  * Normally in a error case.
4183  *
4184  * This one will handle the per-indoe data rsv map for accurate reserved
4185  * space framework.
4186  */
4187 void btrfs_free_reserved_data_space(struct inode *inode, u64 start, u64 len)
4188 {
4189         btrfs_free_reserved_data_space_noquota(inode, start, len);
4190         btrfs_qgroup_free_data(inode, start, len);
4191 }
4192
4193 static void force_metadata_allocation(struct btrfs_fs_info *info)
4194 {
4195         struct list_head *head = &info->space_info;
4196         struct btrfs_space_info *found;
4197
4198         rcu_read_lock();
4199         list_for_each_entry_rcu(found, head, list) {
4200                 if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
4201                         found->force_alloc = CHUNK_ALLOC_FORCE;
4202         }
4203         rcu_read_unlock();
4204 }
4205
4206 static inline u64 calc_global_rsv_need_space(struct btrfs_block_rsv *global)
4207 {
4208         return (global->size << 1);
4209 }
4210
4211 static int should_alloc_chunk(struct btrfs_root *root,
4212                               struct btrfs_space_info *sinfo, int force)
4213 {
4214         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4215         u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
4216         u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved;
4217         u64 thresh;
4218
4219         if (force == CHUNK_ALLOC_FORCE)
4220                 return 1;
4221
4222         /*
4223          * We need to take into account the global rsv because for all intents
4224          * and purposes it's used space.  Don't worry about locking the
4225          * global_rsv, it doesn't change except when the transaction commits.
4226          */
4227         if (sinfo->flags & BTRFS_BLOCK_GROUP_METADATA)
4228                 num_allocated += calc_global_rsv_need_space(global_rsv);
4229
4230         /*
4231          * in limited mode, we want to have some free space up to
4232          * about 1% of the FS size.
4233          */
4234         if (force == CHUNK_ALLOC_LIMITED) {
4235                 thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
4236                 thresh = max_t(u64, 64 * 1024 * 1024,
4237                                div_factor_fine(thresh, 1));
4238
4239                 if (num_bytes - num_allocated < thresh)
4240                         return 1;
4241         }
4242
4243         if (num_allocated + 2 * 1024 * 1024 < div_factor(num_bytes, 8))
4244                 return 0;
4245         return 1;
4246 }
4247
4248 static u64 get_profile_num_devs(struct btrfs_root *root, u64 type)
4249 {
4250         u64 num_dev;
4251
4252         if (type & (BTRFS_BLOCK_GROUP_RAID10 |
4253                     BTRFS_BLOCK_GROUP_RAID0 |
4254                     BTRFS_BLOCK_GROUP_RAID5 |
4255                     BTRFS_BLOCK_GROUP_RAID6))
4256                 num_dev = root->fs_info->fs_devices->rw_devices;
4257         else if (type & BTRFS_BLOCK_GROUP_RAID1)
4258                 num_dev = 2;
4259         else
4260                 num_dev = 1;    /* DUP or single */
4261
4262         return num_dev;
4263 }
4264
4265 /*
4266  * If @is_allocation is true, reserve space in the system space info necessary
4267  * for allocating a chunk, otherwise if it's false, reserve space necessary for
4268  * removing a chunk.
4269  */
4270 void check_system_chunk(struct btrfs_trans_handle *trans,
4271                         struct btrfs_root *root,
4272                         u64 type)
4273 {
4274         struct btrfs_space_info *info;
4275         u64 left;
4276         u64 thresh;
4277         int ret = 0;
4278         u64 num_devs;
4279
4280         /*
4281          * Needed because we can end up allocating a system chunk and for an
4282          * atomic and race free space reservation in the chunk block reserve.
4283          */
4284         ASSERT(mutex_is_locked(&root->fs_info->chunk_mutex));
4285
4286         info = __find_space_info(root->fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
4287         spin_lock(&info->lock);
4288         left = info->total_bytes - info->bytes_used - info->bytes_pinned -
4289                 info->bytes_reserved - info->bytes_readonly -
4290                 info->bytes_may_use;
4291         spin_unlock(&info->lock);
4292
4293         num_devs = get_profile_num_devs(root, type);
4294
4295         /* num_devs device items to update and 1 chunk item to add or remove */
4296         thresh = btrfs_calc_trunc_metadata_size(root, num_devs) +
4297                 btrfs_calc_trans_metadata_size(root, 1);
4298
4299         if (left < thresh && btrfs_test_opt(root, ENOSPC_DEBUG)) {
4300                 btrfs_info(root->fs_info, "left=%llu, need=%llu, flags=%llu",
4301                         left, thresh, type);
4302                 dump_space_info(info, 0, 0);
4303         }
4304
4305         if (left < thresh) {
4306                 u64 flags;
4307
4308                 flags = btrfs_get_alloc_profile(root->fs_info->chunk_root, 0);
4309                 /*
4310                  * Ignore failure to create system chunk. We might end up not
4311                  * needing it, as we might not need to COW all nodes/leafs from
4312                  * the paths we visit in the chunk tree (they were already COWed
4313                  * or created in the current transaction for example).
4314                  */
4315                 ret = btrfs_alloc_chunk(trans, root, flags);
4316         }
4317
4318         if (!ret) {
4319                 ret = btrfs_block_rsv_add(root->fs_info->chunk_root,
4320                                           &root->fs_info->chunk_block_rsv,
4321                                           thresh, BTRFS_RESERVE_NO_FLUSH);
4322                 if (!ret)
4323                         trans->chunk_bytes_reserved += thresh;
4324         }
4325 }
4326
4327 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
4328                           struct btrfs_root *extent_root, u64 flags, int force)
4329 {
4330         struct btrfs_space_info *space_info;
4331         struct btrfs_fs_info *fs_info = extent_root->fs_info;
4332         int wait_for_alloc = 0;
4333         int ret = 0;
4334
4335         /* Don't re-enter if we're already allocating a chunk */
4336         if (trans->allocating_chunk)
4337                 return -ENOSPC;
4338
4339         space_info = __find_space_info(extent_root->fs_info, flags);
4340         if (!space_info) {
4341                 ret = update_space_info(extent_root->fs_info, flags,
4342                                         0, 0, &space_info);
4343                 BUG_ON(ret); /* -ENOMEM */
4344         }
4345         BUG_ON(!space_info); /* Logic error */
4346
4347 again:
4348         spin_lock(&space_info->lock);
4349         if (force < space_info->force_alloc)
4350                 force = space_info->force_alloc;
4351         if (space_info->full) {
4352                 if (should_alloc_chunk(extent_root, space_info, force))
4353                         ret = -ENOSPC;
4354                 else
4355                         ret = 0;
4356                 spin_unlock(&space_info->lock);
4357                 return ret;
4358         }
4359
4360         if (!should_alloc_chunk(extent_root, space_info, force)) {
4361                 spin_unlock(&space_info->lock);
4362                 return 0;
4363         } else if (space_info->chunk_alloc) {
4364                 wait_for_alloc = 1;
4365         } else {
4366                 space_info->chunk_alloc = 1;
4367         }
4368
4369         spin_unlock(&space_info->lock);
4370
4371         mutex_lock(&fs_info->chunk_mutex);
4372
4373         /*
4374          * The chunk_mutex is held throughout the entirety of a chunk
4375          * allocation, so once we've acquired the chunk_mutex we know that the
4376          * other guy is done and we need to recheck and see if we should
4377          * allocate.
4378          */
4379         if (wait_for_alloc) {
4380                 mutex_unlock(&fs_info->chunk_mutex);
4381                 wait_for_alloc = 0;
4382                 goto again;
4383         }
4384
4385         trans->allocating_chunk = true;
4386
4387         /*
4388          * If we have mixed data/metadata chunks we want to make sure we keep
4389          * allocating mixed chunks instead of individual chunks.
4390          */
4391         if (btrfs_mixed_space_info(space_info))
4392                 flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
4393
4394         /*
4395          * if we're doing a data chunk, go ahead and make sure that
4396          * we keep a reasonable number of metadata chunks allocated in the
4397          * FS as well.
4398          */
4399         if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
4400                 fs_info->data_chunk_allocations++;
4401                 if (!(fs_info->data_chunk_allocations %
4402                       fs_info->metadata_ratio))
4403                         force_metadata_allocation(fs_info);
4404         }
4405
4406         /*
4407          * Check if we have enough space in SYSTEM chunk because we may need
4408          * to update devices.
4409          */
4410         check_system_chunk(trans, extent_root, flags);
4411
4412         ret = btrfs_alloc_chunk(trans, extent_root, flags);
4413         trans->allocating_chunk = false;
4414
4415         spin_lock(&space_info->lock);
4416         if (ret < 0 && ret != -ENOSPC)
4417                 goto out;
4418         if (ret)
4419                 space_info->full = 1;
4420         else
4421                 ret = 1;
4422
4423         space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
4424 out:
4425         space_info->chunk_alloc = 0;
4426         spin_unlock(&space_info->lock);
4427         mutex_unlock(&fs_info->chunk_mutex);
4428         /*
4429          * When we allocate a new chunk we reserve space in the chunk block
4430          * reserve to make sure we can COW nodes/leafs in the chunk tree or
4431          * add new nodes/leafs to it if we end up needing to do it when
4432          * inserting the chunk item and updating device items as part of the
4433          * second phase of chunk allocation, performed by
4434          * btrfs_finish_chunk_alloc(). So make sure we don't accumulate a
4435          * large number of new block groups to create in our transaction
4436          * handle's new_bgs list to avoid exhausting the chunk block reserve
4437          * in extreme cases - like having a single transaction create many new
4438          * block groups when starting to write out the free space caches of all
4439          * the block groups that were made dirty during the lifetime of the
4440          * transaction.
4441          */
4442         if (trans->can_flush_pending_bgs &&
4443             trans->chunk_bytes_reserved >= (2 * 1024 * 1024ull)) {
4444                 btrfs_create_pending_block_groups(trans, trans->root);
4445                 btrfs_trans_release_chunk_metadata(trans);
4446         }
4447         return ret;
4448 }
4449
4450 static int can_overcommit(struct btrfs_root *root,
4451                           struct btrfs_space_info *space_info, u64 bytes,
4452                           enum btrfs_reserve_flush_enum flush)
4453 {
4454         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4455         u64 profile = btrfs_get_alloc_profile(root, 0);
4456         u64 space_size;
4457         u64 avail;
4458         u64 used;
4459
4460         used = space_info->bytes_used + space_info->bytes_reserved +
4461                 space_info->bytes_pinned + space_info->bytes_readonly;
4462
4463         /*
4464          * We only want to allow over committing if we have lots of actual space
4465          * free, but if we don't have enough space to handle the global reserve
4466          * space then we could end up having a real enospc problem when trying
4467          * to allocate a chunk or some other such important allocation.
4468          */
4469         spin_lock(&global_rsv->lock);
4470         space_size = calc_global_rsv_need_space(global_rsv);
4471         spin_unlock(&global_rsv->lock);
4472         if (used + space_size >= space_info->total_bytes)
4473                 return 0;
4474
4475         used += space_info->bytes_may_use;
4476
4477         spin_lock(&root->fs_info->free_chunk_lock);
4478         avail = root->fs_info->free_chunk_space;
4479         spin_unlock(&root->fs_info->free_chunk_lock);
4480
4481         /*
4482          * If we have dup, raid1 or raid10 then only half of the free
4483          * space is actually useable.  For raid56, the space info used
4484          * doesn't include the parity drive, so we don't have to
4485          * change the math
4486          */
4487         if (profile & (BTRFS_BLOCK_GROUP_DUP |
4488                        BTRFS_BLOCK_GROUP_RAID1 |
4489                        BTRFS_BLOCK_GROUP_RAID10))
4490                 avail >>= 1;
4491
4492         /*
4493          * If we aren't flushing all things, let us overcommit up to
4494          * 1/2th of the space. If we can flush, don't let us overcommit
4495          * too much, let it overcommit up to 1/8 of the space.
4496          */
4497         if (flush == BTRFS_RESERVE_FLUSH_ALL)
4498                 avail >>= 3;
4499         else
4500                 avail >>= 1;
4501
4502         if (used + bytes < space_info->total_bytes + avail)
4503                 return 1;
4504         return 0;
4505 }
4506
4507 static void btrfs_writeback_inodes_sb_nr(struct btrfs_root *root,
4508                                          unsigned long nr_pages, int nr_items)
4509 {
4510         struct super_block *sb = root->fs_info->sb;
4511
4512         if (down_read_trylock(&sb->s_umount)) {
4513                 writeback_inodes_sb_nr(sb, nr_pages, WB_REASON_FS_FREE_SPACE);
4514                 up_read(&sb->s_umount);
4515         } else {
4516                 /*
4517                  * We needn't worry the filesystem going from r/w to r/o though
4518                  * we don't acquire ->s_umount mutex, because the filesystem
4519                  * should guarantee the delalloc inodes list be empty after
4520                  * the filesystem is readonly(all dirty pages are written to
4521                  * the disk).
4522                  */
4523                 btrfs_start_delalloc_roots(root->fs_info, 0, nr_items);
4524                 if (!current->journal_info)
4525                         btrfs_wait_ordered_roots(root->fs_info, nr_items);
4526         }
4527 }
4528
4529 static inline int calc_reclaim_items_nr(struct btrfs_root *root, u64 to_reclaim)
4530 {
4531         u64 bytes;
4532         int nr;
4533
4534         bytes = btrfs_calc_trans_metadata_size(root, 1);
4535         nr = (int)div64_u64(to_reclaim, bytes);
4536         if (!nr)
4537                 nr = 1;
4538         return nr;
4539 }
4540
4541 #define EXTENT_SIZE_PER_ITEM    (256 * 1024)
4542
4543 /*
4544  * shrink metadata reservation for delalloc
4545  */
4546 static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
4547                             bool wait_ordered)
4548 {
4549         struct btrfs_block_rsv *block_rsv;
4550         struct btrfs_space_info *space_info;
4551         struct btrfs_trans_handle *trans;
4552         u64 delalloc_bytes;
4553         u64 max_reclaim;
4554         long time_left;
4555         unsigned long nr_pages;
4556         int loops;
4557         int items;
4558         enum btrfs_reserve_flush_enum flush;
4559
4560         /* Calc the number of the pages we need flush for space reservation */
4561         items = calc_reclaim_items_nr(root, to_reclaim);
4562         to_reclaim = items * EXTENT_SIZE_PER_ITEM;
4563
4564         trans = (struct btrfs_trans_handle *)current->journal_info;
4565         block_rsv = &root->fs_info->delalloc_block_rsv;
4566         space_info = block_rsv->space_info;
4567
4568         delalloc_bytes = percpu_counter_sum_positive(
4569                                                 &root->fs_info->delalloc_bytes);
4570         if (delalloc_bytes == 0) {
4571                 if (trans)
4572                         return;
4573                 if (wait_ordered)
4574                         btrfs_wait_ordered_roots(root->fs_info, items);
4575                 return;
4576         }
4577
4578         loops = 0;
4579         while (delalloc_bytes && loops < 3) {
4580                 max_reclaim = min(delalloc_bytes, to_reclaim);
4581                 nr_pages = max_reclaim >> PAGE_CACHE_SHIFT;
4582                 btrfs_writeback_inodes_sb_nr(root, nr_pages, items);
4583                 /*
4584                  * We need to wait for the async pages to actually start before
4585                  * we do anything.
4586                  */
4587                 max_reclaim = atomic_read(&root->fs_info->async_delalloc_pages);
4588                 if (!max_reclaim)
4589                         goto skip_async;
4590
4591                 if (max_reclaim <= nr_pages)
4592                         max_reclaim = 0;
4593                 else
4594                         max_reclaim -= nr_pages;
4595
4596                 wait_event(root->fs_info->async_submit_wait,
4597                            atomic_read(&root->fs_info->async_delalloc_pages) <=
4598                            (int)max_reclaim);
4599 skip_async:
4600                 if (!trans)
4601                         flush = BTRFS_RESERVE_FLUSH_ALL;
4602                 else
4603                         flush = BTRFS_RESERVE_NO_FLUSH;
4604                 spin_lock(&space_info->lock);
4605                 if (can_overcommit(root, space_info, orig, flush)) {
4606                         spin_unlock(&space_info->lock);
4607                         break;
4608                 }
4609                 spin_unlock(&space_info->lock);
4610
4611                 loops++;
4612                 if (wait_ordered && !trans) {
4613                         btrfs_wait_ordered_roots(root->fs_info, items);
4614                 } else {
4615                         time_left = schedule_timeout_killable(1);
4616                         if (time_left)
4617                                 break;
4618                 }
4619                 delalloc_bytes = percpu_counter_sum_positive(
4620                                                 &root->fs_info->delalloc_bytes);
4621         }
4622 }
4623
4624 /**
4625  * maybe_commit_transaction - possibly commit the transaction if its ok to
4626  * @root - the root we're allocating for
4627  * @bytes - the number of bytes we want to reserve
4628  * @force - force the commit
4629  *
4630  * This will check to make sure that committing the transaction will actually
4631  * get us somewhere and then commit the transaction if it does.  Otherwise it
4632  * will return -ENOSPC.
4633  */
4634 static int may_commit_transaction(struct btrfs_root *root,
4635                                   struct btrfs_space_info *space_info,
4636                                   u64 bytes, int force)
4637 {
4638         struct btrfs_block_rsv *delayed_rsv = &root->fs_info->delayed_block_rsv;
4639         struct btrfs_trans_handle *trans;
4640
4641         trans = (struct btrfs_trans_handle *)current->journal_info;
4642         if (trans)
4643                 return -EAGAIN;
4644
4645         if (force)
4646                 goto commit;
4647
4648         /* See if there is enough pinned space to make this reservation */
4649         if (percpu_counter_compare(&space_info->total_bytes_pinned,
4650                                    bytes) >= 0)
4651                 goto commit;
4652
4653         /*
4654          * See if there is some space in the delayed insertion reservation for
4655          * this reservation.
4656          */
4657         if (space_info != delayed_rsv->space_info)
4658                 return -ENOSPC;
4659
4660         spin_lock(&delayed_rsv->lock);
4661         if (percpu_counter_compare(&space_info->total_bytes_pinned,
4662                                    bytes - delayed_rsv->size) >= 0) {
4663                 spin_unlock(&delayed_rsv->lock);
4664                 return -ENOSPC;
4665         }
4666         spin_unlock(&delayed_rsv->lock);
4667
4668 commit:
4669         trans = btrfs_join_transaction(root);
4670         if (IS_ERR(trans))
4671                 return -ENOSPC;
4672
4673         return btrfs_commit_transaction(trans, root);
4674 }
4675
4676 enum flush_state {
4677         FLUSH_DELAYED_ITEMS_NR  =       1,
4678         FLUSH_DELAYED_ITEMS     =       2,
4679         FLUSH_DELALLOC          =       3,
4680         FLUSH_DELALLOC_WAIT     =       4,
4681         ALLOC_CHUNK             =       5,
4682         COMMIT_TRANS            =       6,
4683 };
4684
4685 static int flush_space(struct btrfs_root *root,
4686                        struct btrfs_space_info *space_info, u64 num_bytes,
4687                        u64 orig_bytes, int state)
4688 {
4689         struct btrfs_trans_handle *trans;
4690         int nr;
4691         int ret = 0;
4692
4693         switch (state) {
4694         case FLUSH_DELAYED_ITEMS_NR:
4695         case FLUSH_DELAYED_ITEMS:
4696                 if (state == FLUSH_DELAYED_ITEMS_NR)
4697                         nr = calc_reclaim_items_nr(root, num_bytes) * 2;
4698                 else
4699                         nr = -1;
4700
4701                 trans = btrfs_join_transaction(root);
4702                 if (IS_ERR(trans)) {
4703                         ret = PTR_ERR(trans);
4704                         break;
4705                 }
4706                 ret = btrfs_run_delayed_items_nr(trans, root, nr);
4707                 btrfs_end_transaction(trans, root);
4708                 break;
4709         case FLUSH_DELALLOC:
4710         case FLUSH_DELALLOC_WAIT:
4711                 shrink_delalloc(root, num_bytes * 2, orig_bytes,
4712                                 state == FLUSH_DELALLOC_WAIT);
4713                 break;
4714         case ALLOC_CHUNK:
4715                 trans = btrfs_join_transaction(root);
4716                 if (IS_ERR(trans)) {
4717                         ret = PTR_ERR(trans);
4718                         break;
4719                 }
4720                 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
4721                                      btrfs_get_alloc_profile(root, 0),
4722                                      CHUNK_ALLOC_NO_FORCE);
4723                 btrfs_end_transaction(trans, root);
4724                 if (ret == -ENOSPC)
4725                         ret = 0;
4726                 break;
4727         case COMMIT_TRANS:
4728                 ret = may_commit_transaction(root, space_info, orig_bytes, 0);
4729                 break;
4730         default:
4731                 ret = -ENOSPC;
4732                 break;
4733         }
4734
4735         return ret;
4736 }
4737
4738 static inline u64
4739 btrfs_calc_reclaim_metadata_size(struct btrfs_root *root,
4740                                  struct btrfs_space_info *space_info)
4741 {
4742         u64 used;
4743         u64 expected;
4744         u64 to_reclaim;
4745
4746         to_reclaim = min_t(u64, num_online_cpus() * 1024 * 1024,
4747                                 16 * 1024 * 1024);
4748         spin_lock(&space_info->lock);
4749         if (can_overcommit(root, space_info, to_reclaim,
4750                            BTRFS_RESERVE_FLUSH_ALL)) {
4751                 to_reclaim = 0;
4752                 goto out;
4753         }
4754
4755         used = space_info->bytes_used + space_info->bytes_reserved +
4756                space_info->bytes_pinned + space_info->bytes_readonly +
4757                space_info->bytes_may_use;
4758         if (can_overcommit(root, space_info, 1024 * 1024,
4759                            BTRFS_RESERVE_FLUSH_ALL))
4760                 expected = div_factor_fine(space_info->total_bytes, 95);
4761         else
4762                 expected = div_factor_fine(space_info->total_bytes, 90);
4763
4764         if (used > expected)
4765                 to_reclaim = used - expected;
4766         else
4767                 to_reclaim = 0;
4768         to_reclaim = min(to_reclaim, space_info->bytes_may_use +
4769                                      space_info->bytes_reserved);
4770 out:
4771         spin_unlock(&space_info->lock);
4772
4773         return to_reclaim;
4774 }
4775
4776 static inline int need_do_async_reclaim(struct btrfs_space_info *space_info,
4777                                         struct btrfs_fs_info *fs_info, u64 used)
4778 {
4779         u64 thresh = div_factor_fine(space_info->total_bytes, 98);
4780
4781         /* If we're just plain full then async reclaim just slows us down. */
4782         if (space_info->bytes_used >= thresh)
4783                 return 0;
4784
4785         return (used >= thresh && !btrfs_fs_closing(fs_info) &&
4786                 !test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state));
4787 }
4788
4789 static int btrfs_need_do_async_reclaim(struct btrfs_space_info *space_info,
4790                                        struct btrfs_fs_info *fs_info,
4791                                        int flush_state)
4792 {
4793         u64 used;
4794
4795         spin_lock(&space_info->lock);
4796         /*
4797          * We run out of space and have not got any free space via flush_space,
4798          * so don't bother doing async reclaim.
4799          */
4800         if (flush_state > COMMIT_TRANS && space_info->full) {
4801                 spin_unlock(&space_info->lock);
4802                 return 0;
4803         }
4804
4805         used = space_info->bytes_used + space_info->bytes_reserved +
4806                space_info->bytes_pinned + space_info->bytes_readonly +
4807                space_info->bytes_may_use;
4808         if (need_do_async_reclaim(space_info, fs_info, used)) {
4809                 spin_unlock(&space_info->lock);
4810                 return 1;
4811         }
4812         spin_unlock(&space_info->lock);
4813
4814         return 0;
4815 }
4816
4817 static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
4818 {
4819         struct btrfs_fs_info *fs_info;
4820         struct btrfs_space_info *space_info;
4821         u64 to_reclaim;
4822         int flush_state;
4823
4824         fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work);
4825         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4826
4827         to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info->fs_root,
4828                                                       space_info);
4829         if (!to_reclaim)
4830                 return;
4831
4832         flush_state = FLUSH_DELAYED_ITEMS_NR;
4833         do {
4834                 flush_space(fs_info->fs_root, space_info, to_reclaim,
4835                             to_reclaim, flush_state);
4836                 flush_state++;
4837                 if (!btrfs_need_do_async_reclaim(space_info, fs_info,
4838                                                  flush_state))
4839                         return;
4840         } while (flush_state < COMMIT_TRANS);
4841 }
4842
4843 void btrfs_init_async_reclaim_work(struct work_struct *work)
4844 {
4845         INIT_WORK(work, btrfs_async_reclaim_metadata_space);
4846 }
4847
4848 /**
4849  * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
4850  * @root - the root we're allocating for
4851  * @block_rsv - the block_rsv we're allocating for
4852  * @orig_bytes - the number of bytes we want
4853  * @flush - whether or not we can flush to make our reservation
4854  *
4855  * This will reserve orgi_bytes number of bytes from the space info associated
4856  * with the block_rsv.  If there is not enough space it will make an attempt to
4857  * flush out space to make room.  It will do this by flushing delalloc if
4858  * possible or committing the transaction.  If flush is 0 then no attempts to
4859  * regain reservations will be made and this will fail if there is not enough
4860  * space already.
4861  */
4862 static int reserve_metadata_bytes(struct btrfs_root *root,
4863                                   struct btrfs_block_rsv *block_rsv,
4864                                   u64 orig_bytes,
4865                                   enum btrfs_reserve_flush_enum flush)
4866 {
4867         struct btrfs_space_info *space_info = block_rsv->space_info;
4868         u64 used;
4869         u64 num_bytes = orig_bytes;
4870         int flush_state = FLUSH_DELAYED_ITEMS_NR;
4871         int ret = 0;
4872         bool flushing = false;
4873
4874 again:
4875         ret = 0;
4876         spin_lock(&space_info->lock);
4877         /*
4878          * We only want to wait if somebody other than us is flushing and we
4879          * are actually allowed to flush all things.
4880          */
4881         while (flush == BTRFS_RESERVE_FLUSH_ALL && !flushing &&
4882                space_info->flush) {
4883                 spin_unlock(&space_info->lock);
4884                 /*
4885                  * If we have a trans handle we can't wait because the flusher
4886                  * may have to commit the transaction, which would mean we would
4887                  * deadlock since we are waiting for the flusher to finish, but
4888                  * hold the current transaction open.
4889                  */
4890                 if (current->journal_info)
4891                         return -EAGAIN;
4892                 ret = wait_event_killable(space_info->wait, !space_info->flush);
4893                 /* Must have been killed, return */
4894                 if (ret)
4895                         return -EINTR;
4896
4897                 spin_lock(&space_info->lock);
4898         }
4899
4900         ret = -ENOSPC;
4901         used = space_info->bytes_used + space_info->bytes_reserved +
4902                 space_info->bytes_pinned + space_info->bytes_readonly +
4903                 space_info->bytes_may_use;
4904
4905         /*
4906          * The idea here is that we've not already over-reserved the block group
4907          * then we can go ahead and save our reservation first and then start
4908          * flushing if we need to.  Otherwise if we've already overcommitted
4909          * lets start flushing stuff first and then come back and try to make
4910          * our reservation.
4911          */
4912         if (used <= space_info->total_bytes) {
4913                 if (used + orig_bytes <= space_info->total_bytes) {
4914                         space_info->bytes_may_use += orig_bytes;
4915                         trace_btrfs_space_reservation(root->fs_info,
4916                                 "space_info", space_info->flags, orig_bytes, 1);
4917                         ret = 0;
4918                 } else {
4919                         /*
4920                          * Ok set num_bytes to orig_bytes since we aren't
4921                          * overocmmitted, this way we only try and reclaim what
4922                          * we need.
4923                          */
4924                         num_bytes = orig_bytes;
4925                 }
4926         } else {
4927                 /*
4928                  * Ok we're over committed, set num_bytes to the overcommitted
4929                  * amount plus the amount of bytes that we need for this
4930                  * reservation.
4931                  */
4932                 num_bytes = used - space_info->total_bytes +
4933                         (orig_bytes * 2);
4934         }
4935
4936         if (ret && can_overcommit(root, space_info, orig_bytes, flush)) {
4937                 space_info->bytes_may_use += orig_bytes;
4938                 trace_btrfs_space_reservation(root->fs_info, "space_info",
4939                                               space_info->flags, orig_bytes,
4940                                               1);
4941                 ret = 0;
4942         }
4943
4944         /*
4945          * Couldn't make our reservation, save our place so while we're trying
4946          * to reclaim space we can actually use it instead of somebody else
4947          * stealing it from us.
4948          *
4949          * We make the other tasks wait for the flush only when we can flush
4950          * all things.
4951          */
4952         if (ret && flush != BTRFS_RESERVE_NO_FLUSH) {
4953                 flushing = true;
4954                 space_info->flush = 1;
4955         } else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
4956                 used += orig_bytes;
4957                 /*
4958                  * We will do the space reservation dance during log replay,
4959                  * which means we won't have fs_info->fs_root set, so don't do
4960                  * the async reclaim as we will panic.
4961                  */
4962                 if (!root->fs_info->log_root_recovering &&
4963                     need_do_async_reclaim(space_info, root->fs_info, used) &&
4964                     !work_busy(&root->fs_info->async_reclaim_work))
4965                         queue_work(system_unbound_wq,
4966                                    &root->fs_info->async_reclaim_work);
4967         }
4968         spin_unlock(&space_info->lock);
4969
4970         if (!ret || flush == BTRFS_RESERVE_NO_FLUSH)
4971                 goto out;
4972
4973         ret = flush_space(root, space_info, num_bytes, orig_bytes,
4974                           flush_state);
4975         flush_state++;
4976
4977         /*
4978          * If we are FLUSH_LIMIT, we can not flush delalloc, or the deadlock
4979          * would happen. So skip delalloc flush.
4980          */
4981         if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
4982             (flush_state == FLUSH_DELALLOC ||
4983              flush_state == FLUSH_DELALLOC_WAIT))
4984                 flush_state = ALLOC_CHUNK;
4985
4986         if (!ret)
4987                 goto again;
4988         else if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
4989                  flush_state < COMMIT_TRANS)
4990                 goto again;
4991         else if (flush == BTRFS_RESERVE_FLUSH_ALL &&
4992                  flush_state <= COMMIT_TRANS)
4993                 goto again;
4994
4995 out:
4996         if (ret == -ENOSPC &&
4997             unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) {
4998                 struct btrfs_block_rsv *global_rsv =
4999                         &root->fs_info->global_block_rsv;
5000
5001                 if (block_rsv != global_rsv &&
5002                     !block_rsv_use_bytes(global_rsv, orig_bytes))
5003                         ret = 0;
5004         }
5005         if (ret == -ENOSPC)
5006                 trace_btrfs_space_reservation(root->fs_info,
5007                                               "space_info:enospc",
5008                                               space_info->flags, orig_bytes, 1);
5009         if (flushing) {
5010                 spin_lock(&space_info->lock);
5011                 space_info->flush = 0;
5012                 wake_up_all(&space_info->wait);
5013                 spin_unlock(&space_info->lock);
5014         }
5015         return ret;
5016 }
5017
5018 static struct btrfs_block_rsv *get_block_rsv(
5019                                         const struct btrfs_trans_handle *trans,
5020                                         const struct btrfs_root *root)
5021 {
5022         struct btrfs_block_rsv *block_rsv = NULL;
5023
5024         if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
5025             (root == root->fs_info->csum_root && trans->adding_csums) ||
5026              (root == root->fs_info->uuid_root))
5027                 block_rsv = trans->block_rsv;
5028
5029         if (!block_rsv)
5030                 block_rsv = root->block_rsv;
5031
5032         if (!block_rsv)
5033                 block_rsv = &root->fs_info->empty_block_rsv;
5034
5035         return block_rsv;
5036 }
5037
5038 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
5039                                u64 num_bytes)
5040 {
5041         int ret = -ENOSPC;
5042         spin_lock(&block_rsv->lock);
5043         if (block_rsv->reserved >= num_bytes) {
5044                 block_rsv->reserved -= num_bytes;
5045                 if (block_rsv->reserved < block_rsv->size)
5046                         block_rsv->full = 0;
5047                 ret = 0;
5048         }
5049         spin_unlock(&block_rsv->lock);
5050         return ret;
5051 }
5052
5053 static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
5054                                 u64 num_bytes, int update_size)
5055 {
5056         spin_lock(&block_rsv->lock);
5057         block_rsv->reserved += num_bytes;
5058         if (update_size)
5059                 block_rsv->size += num_bytes;
5060         else if (block_rsv->reserved >= block_rsv->size)
5061                 block_rsv->full = 1;
5062         spin_unlock(&block_rsv->lock);
5063 }
5064
5065 int btrfs_cond_migrate_bytes(struct btrfs_fs_info *fs_info,
5066                              struct btrfs_block_rsv *dest, u64 num_bytes,
5067                              int min_factor)
5068 {
5069         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
5070         u64 min_bytes;
5071
5072         if (global_rsv->space_info != dest->space_info)
5073                 return -ENOSPC;
5074
5075         spin_lock(&global_rsv->lock);
5076         min_bytes = div_factor(global_rsv->size, min_factor);
5077         if (global_rsv->reserved < min_bytes + num_bytes) {
5078                 spin_unlock(&global_rsv->lock);
5079                 return -ENOSPC;
5080         }
5081         global_rsv->reserved -= num_bytes;
5082         if (global_rsv->reserved < global_rsv->size)
5083                 global_rsv->full = 0;
5084         spin_unlock(&global_rsv->lock);
5085
5086         block_rsv_add_bytes(dest, num_bytes, 1);
5087         return 0;
5088 }
5089
5090 static void block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
5091                                     struct btrfs_block_rsv *block_rsv,
5092                                     struct btrfs_block_rsv *dest, u64 num_bytes)
5093 {
5094         struct btrfs_space_info *space_info = block_rsv->space_info;
5095
5096         spin_lock(&block_rsv->lock);
5097         if (num_bytes == (u64)-1)
5098                 num_bytes = block_rsv->size;
5099         block_rsv->size -= num_bytes;
5100         if (block_rsv->reserved >= block_rsv->size) {
5101                 num_bytes = block_rsv->reserved - block_rsv->size;
5102                 block_rsv->reserved = block_rsv->size;
5103                 block_rsv->full = 1;
5104         } else {
5105                 num_bytes = 0;
5106         }
5107         spin_unlock(&block_rsv->lock);
5108
5109         if (num_bytes > 0) {
5110                 if (dest) {
5111                         spin_lock(&dest->lock);
5112                         if (!dest->full) {
5113                                 u64 bytes_to_add;
5114
5115                                 bytes_to_add = dest->size - dest->reserved;
5116                                 bytes_to_add = min(num_bytes, bytes_to_add);
5117                                 dest->reserved += bytes_to_add;
5118                                 if (dest->reserved >= dest->size)
5119                                         dest->full = 1;
5120                                 num_bytes -= bytes_to_add;
5121                         }
5122                         spin_unlock(&dest->lock);
5123                 }
5124                 if (num_bytes) {
5125                         spin_lock(&space_info->lock);
5126                         space_info->bytes_may_use -= num_bytes;
5127                         trace_btrfs_space_reservation(fs_info, "space_info",
5128                                         space_info->flags, num_bytes, 0);
5129                         spin_unlock(&space_info->lock);
5130                 }
5131         }
5132 }
5133
5134 static int block_rsv_migrate_bytes(struct btrfs_block_rsv *src,
5135                                    struct btrfs_block_rsv *dst, u64 num_bytes)
5136 {
5137         int ret;
5138
5139         ret = block_rsv_use_bytes(src, num_bytes);
5140         if (ret)
5141                 return ret;
5142
5143         block_rsv_add_bytes(dst, num_bytes, 1);
5144         return 0;
5145 }
5146
5147 void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type)
5148 {
5149         memset(rsv, 0, sizeof(*rsv));
5150         spin_lock_init(&rsv->lock);
5151         rsv->type = type;
5152 }
5153
5154 struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root,
5155                                               unsigned short type)
5156 {
5157         struct btrfs_block_rsv *block_rsv;
5158         struct btrfs_fs_info *fs_info = root->fs_info;
5159
5160         block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
5161         if (!block_rsv)
5162                 return NULL;
5163
5164         btrfs_init_block_rsv(block_rsv, type);
5165         block_rsv->space_info = __find_space_info(fs_info,
5166                                                   BTRFS_BLOCK_GROUP_METADATA);
5167         return block_rsv;
5168 }
5169
5170 void btrfs_free_block_rsv(struct btrfs_root *root,
5171                           struct btrfs_block_rsv *rsv)
5172 {
5173         if (!rsv)
5174                 return;
5175         btrfs_block_rsv_release(root, rsv, (u64)-1);
5176         kfree(rsv);
5177 }
5178
5179 void __btrfs_free_block_rsv(struct btrfs_block_rsv *rsv)
5180 {
5181         kfree(rsv);
5182 }
5183
5184 int btrfs_block_rsv_add(struct btrfs_root *root,
5185                         struct btrfs_block_rsv *block_rsv, u64 num_bytes,
5186                         enum btrfs_reserve_flush_enum flush)
5187 {
5188         int ret;
5189
5190         if (num_bytes == 0)
5191                 return 0;
5192
5193         ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
5194         if (!ret) {
5195                 block_rsv_add_bytes(block_rsv, num_bytes, 1);
5196                 return 0;
5197         }
5198
5199         return ret;
5200 }
5201
5202 int btrfs_block_rsv_check(struct btrfs_root *root,
5203                           struct btrfs_block_rsv *block_rsv, int min_factor)
5204 {
5205         u64 num_bytes = 0;
5206         int ret = -ENOSPC;
5207
5208         if (!block_rsv)
5209                 return 0;
5210
5211         spin_lock(&block_rsv->lock);
5212         num_bytes = div_factor(block_rsv->size, min_factor);
5213         if (block_rsv->reserved >= num_bytes)
5214                 ret = 0;
5215         spin_unlock(&block_rsv->lock);
5216
5217         return ret;
5218 }
5219
5220 int btrfs_block_rsv_refill(struct btrfs_root *root,
5221                            struct btrfs_block_rsv *block_rsv, u64 min_reserved,
5222                            enum btrfs_reserve_flush_enum flush)
5223 {
5224         u64 num_bytes = 0;
5225         int ret = -ENOSPC;
5226
5227         if (!block_rsv)
5228                 return 0;
5229
5230         spin_lock(&block_rsv->lock);
5231         num_bytes = min_reserved;
5232         if (block_rsv->reserved >= num_bytes)
5233                 ret = 0;
5234         else
5235                 num_bytes -= block_rsv->reserved;
5236         spin_unlock(&block_rsv->lock);
5237
5238         if (!ret)
5239                 return 0;
5240
5241         ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
5242         if (!ret) {
5243                 block_rsv_add_bytes(block_rsv, num_bytes, 0);
5244                 return 0;
5245         }
5246
5247         return ret;
5248 }
5249
5250 int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
5251                             struct btrfs_block_rsv *dst_rsv,
5252                             u64 num_bytes)
5253 {
5254         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
5255 }
5256
5257 void btrfs_block_rsv_release(struct btrfs_root *root,
5258                              struct btrfs_block_rsv *block_rsv,
5259                              u64 num_bytes)
5260 {
5261         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
5262         if (global_rsv == block_rsv ||
5263             block_rsv->space_info != global_rsv->space_info)
5264                 global_rsv = NULL;
5265         block_rsv_release_bytes(root->fs_info, block_rsv, global_rsv,
5266                                 num_bytes);
5267 }
5268
5269 /*
5270  * helper to calculate size of global block reservation.
5271  * the desired value is sum of space used by extent tree,
5272  * checksum tree and root tree
5273  */
5274 static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
5275 {
5276         struct btrfs_space_info *sinfo;
5277         u64 num_bytes;
5278         u64 meta_used;
5279         u64 data_used;
5280         int csum_size = btrfs_super_csum_size(fs_info->super_copy);
5281
5282         sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
5283         spin_lock(&sinfo->lock);
5284         data_used = sinfo->bytes_used;
5285         spin_unlock(&sinfo->lock);
5286
5287         sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
5288         spin_lock(&sinfo->lock);
5289         if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA)
5290                 data_used = 0;
5291         meta_used = sinfo->bytes_used;
5292         spin_unlock(&sinfo->lock);
5293
5294         num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) *
5295                     csum_size * 2;
5296         num_bytes += div_u64(data_used + meta_used, 50);
5297
5298         if (num_bytes * 3 > meta_used)
5299                 num_bytes = div_u64(meta_used, 3);
5300
5301         return ALIGN(num_bytes, fs_info->extent_root->nodesize << 10);
5302 }
5303
5304 static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
5305 {
5306         struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
5307         struct btrfs_space_info *sinfo = block_rsv->space_info;
5308         u64 num_bytes;
5309
5310         num_bytes = calc_global_metadata_size(fs_info);
5311
5312         spin_lock(&sinfo->lock);
5313         spin_lock(&block_rsv->lock);
5314
5315         block_rsv->size = min_t(u64, num_bytes, 512 * 1024 * 1024);
5316
5317         num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
5318                     sinfo->bytes_reserved + sinfo->bytes_readonly +
5319                     sinfo->bytes_may_use;
5320
5321         if (sinfo->total_bytes > num_bytes) {
5322                 num_bytes = sinfo->total_bytes - num_bytes;
5323                 block_rsv->reserved += num_bytes;
5324                 sinfo->bytes_may_use += num_bytes;
5325                 trace_btrfs_space_reservation(fs_info, "space_info",
5326                                       sinfo->flags, num_bytes, 1);
5327         }
5328
5329         if (block_rsv->reserved >= block_rsv->size) {
5330                 num_bytes = block_rsv->reserved - block_rsv->size;
5331                 sinfo->bytes_may_use -= num_bytes;
5332                 trace_btrfs_space_reservation(fs_info, "space_info",
5333                                       sinfo->flags, num_bytes, 0);
5334                 block_rsv->reserved = block_rsv->size;
5335                 block_rsv->full = 1;
5336         }
5337
5338         spin_unlock(&block_rsv->lock);
5339         spin_unlock(&sinfo->lock);
5340 }
5341
5342 static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
5343 {
5344         struct btrfs_space_info *space_info;
5345
5346         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
5347         fs_info->chunk_block_rsv.space_info = space_info;
5348
5349         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
5350         fs_info->global_block_rsv.space_info = space_info;
5351         fs_info->delalloc_block_rsv.space_info = space_info;
5352         fs_info->trans_block_rsv.space_info = space_info;
5353         fs_info->empty_block_rsv.space_info = space_info;
5354         fs_info->delayed_block_rsv.space_info = space_info;
5355
5356         fs_info->extent_root->block_rsv = &fs_info->global_block_rsv;
5357         fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
5358         fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
5359         fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
5360         if (fs_info->quota_root)
5361                 fs_info->quota_root->block_rsv = &fs_info->global_block_rsv;
5362         fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
5363
5364         update_global_block_rsv(fs_info);
5365 }
5366
5367 static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
5368 {
5369         block_rsv_release_bytes(fs_info, &fs_info->global_block_rsv, NULL,
5370                                 (u64)-1);
5371         WARN_ON(fs_info->delalloc_block_rsv.size > 0);
5372         WARN_ON(fs_info->delalloc_block_rsv.reserved > 0);
5373         WARN_ON(fs_info->trans_block_rsv.size > 0);
5374         WARN_ON(fs_info->trans_block_rsv.reserved > 0);
5375         WARN_ON(fs_info->chunk_block_rsv.size > 0);
5376         WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
5377         WARN_ON(fs_info->delayed_block_rsv.size > 0);
5378         WARN_ON(fs_info->delayed_block_rsv.reserved > 0);
5379 }
5380
5381 void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
5382                                   struct btrfs_root *root)
5383 {
5384         if (!trans->block_rsv)
5385                 return;
5386
5387         if (!trans->bytes_reserved)
5388                 return;
5389
5390         trace_btrfs_space_reservation(root->fs_info, "transaction",
5391                                       trans->transid, trans->bytes_reserved, 0);
5392         btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
5393         trans->bytes_reserved = 0;
5394 }
5395
5396 /*
5397  * To be called after all the new block groups attached to the transaction
5398  * handle have been created (btrfs_create_pending_block_groups()).
5399  */
5400 void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans)
5401 {
5402         struct btrfs_fs_info *fs_info = trans->root->fs_info;
5403
5404         if (!trans->chunk_bytes_reserved)
5405                 return;
5406
5407         WARN_ON_ONCE(!list_empty(&trans->new_bgs));
5408
5409         block_rsv_release_bytes(fs_info, &fs_info->chunk_block_rsv, NULL,
5410                                 trans->chunk_bytes_reserved);
5411         trans->chunk_bytes_reserved = 0;
5412 }
5413
5414 /* Can only return 0 or -ENOSPC */
5415 int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
5416                                   struct inode *inode)
5417 {
5418         struct btrfs_root *root = BTRFS_I(inode)->root;
5419         struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
5420         struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;
5421
5422         /*
5423          * We need to hold space in order to delete our orphan item once we've
5424          * added it, so this takes the reservation so we can release it later
5425          * when we are truly done with the orphan item.
5426          */
5427         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
5428         trace_btrfs_space_reservation(root->fs_info, "orphan",
5429                                       btrfs_ino(inode), num_bytes, 1);
5430         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
5431 }
5432
5433 void btrfs_orphan_release_metadata(struct inode *inode)
5434 {
5435         struct btrfs_root *root = BTRFS_I(inode)->root;
5436         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
5437         trace_btrfs_space_reservation(root->fs_info, "orphan",
5438                                       btrfs_ino(inode), num_bytes, 0);
5439         btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
5440 }
5441
5442 /*
5443  * btrfs_subvolume_reserve_metadata() - reserve space for subvolume operation
5444  * root: the root of the parent directory
5445  * rsv: block reservation
5446  * items: the number of items that we need do reservation
5447  * qgroup_reserved: used to return the reserved size in qgroup
5448  *
5449  * This function is used to reserve the space for snapshot/subvolume
5450  * creation and deletion. Those operations are different with the
5451  * common file/directory operations, they change two fs/file trees
5452  * and root tree, the number of items that the qgroup reserves is
5453  * different with the free space reservation. So we can not use
5454  * the space reseravtion mechanism in start_transaction().
5455  */
5456 int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
5457                                      struct btrfs_block_rsv *rsv,
5458                                      int items,
5459                                      u64 *qgroup_reserved,
5460                                      bool use_global_rsv)
5461 {
5462         u64 num_bytes;
5463         int ret;
5464         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
5465
5466         if (root->fs_info->quota_enabled) {
5467                 /* One for parent inode, two for dir entries */
5468                 num_bytes = 3 * root->nodesize;
5469                 ret = btrfs_qgroup_reserve_meta(root, num_bytes);
5470                 if (ret)
5471                         return ret;
5472         } else {
5473                 num_bytes = 0;
5474         }
5475
5476         *qgroup_reserved = num_bytes;
5477
5478         num_bytes = btrfs_calc_trans_metadata_size(root, items);
5479         rsv->space_info = __find_space_info(root->fs_info,
5480                                             BTRFS_BLOCK_GROUP_METADATA);
5481         ret = btrfs_block_rsv_add(root, rsv, num_bytes,
5482                                   BTRFS_RESERVE_FLUSH_ALL);
5483
5484         if (ret == -ENOSPC && use_global_rsv)
5485                 ret = btrfs_block_rsv_migrate(global_rsv, rsv, num_bytes);
5486
5487         if (ret && *qgroup_reserved)
5488                 btrfs_qgroup_free_meta(root, *qgroup_reserved);
5489
5490         return ret;
5491 }
5492
5493 void btrfs_subvolume_release_metadata(struct btrfs_root *root,
5494                                       struct btrfs_block_rsv *rsv,
5495                                       u64 qgroup_reserved)
5496 {
5497         btrfs_block_rsv_release(root, rsv, (u64)-1);
5498 }
5499
5500 /**
5501  * drop_outstanding_extent - drop an outstanding extent
5502  * @inode: the inode we're dropping the extent for
5503  * @num_bytes: the number of bytes we're relaseing.
5504  *
5505  * This is called when we are freeing up an outstanding extent, either called
5506  * after an error or after an extent is written.  This will return the number of
5507  * reserved extents that need to be freed.  This must be called with
5508  * BTRFS_I(inode)->lock held.
5509  */
5510 static unsigned drop_outstanding_extent(struct inode *inode, u64 num_bytes)
5511 {
5512         unsigned drop_inode_space = 0;
5513         unsigned dropped_extents = 0;
5514         unsigned num_extents = 0;
5515
5516         num_extents = (unsigned)div64_u64(num_bytes +
5517                                           BTRFS_MAX_EXTENT_SIZE - 1,
5518                                           BTRFS_MAX_EXTENT_SIZE);
5519         ASSERT(num_extents);
5520         ASSERT(BTRFS_I(inode)->outstanding_extents >= num_extents);
5521         BTRFS_I(inode)->outstanding_extents -= num_extents;
5522
5523         if (BTRFS_I(inode)->outstanding_extents == 0 &&
5524             test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
5525                                &BTRFS_I(inode)->runtime_flags))
5526                 drop_inode_space = 1;
5527
5528         /*
5529          * If we have more or the same amount of outsanding extents than we have
5530          * reserved then we need to leave the reserved extents count alone.
5531          */
5532         if (BTRFS_I(inode)->outstanding_extents >=
5533             BTRFS_I(inode)->reserved_extents)
5534                 return drop_inode_space;
5535
5536         dropped_extents = BTRFS_I(inode)->reserved_extents -
5537                 BTRFS_I(inode)->outstanding_extents;
5538         BTRFS_I(inode)->reserved_extents -= dropped_extents;
5539         return dropped_extents + drop_inode_space;
5540 }
5541
5542 /**
5543  * calc_csum_metadata_size - return the amount of metada space that must be
5544  *      reserved/free'd for the given bytes.
5545  * @inode: the inode we're manipulating
5546  * @num_bytes: the number of bytes in question
5547  * @reserve: 1 if we are reserving space, 0 if we are freeing space
5548  *
5549  * This adjusts the number of csum_bytes in the inode and then returns the
5550  * correct amount of metadata that must either be reserved or freed.  We
5551  * calculate how many checksums we can fit into one leaf and then divide the
5552  * number of bytes that will need to be checksumed by this value to figure out
5553  * how many checksums will be required.  If we are adding bytes then the number
5554  * may go up and we will return the number of additional bytes that must be
5555  * reserved.  If it is going down we will return the number of bytes that must
5556  * be freed.
5557  *
5558  * This must be called with BTRFS_I(inode)->lock held.
5559  */
5560 static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes,
5561                                    int reserve)
5562 {
5563         struct btrfs_root *root = BTRFS_I(inode)->root;
5564         u64 old_csums, num_csums;
5565
5566         if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM &&
5567             BTRFS_I(inode)->csum_bytes == 0)
5568                 return 0;
5569
5570         old_csums = btrfs_csum_bytes_to_leaves(root, BTRFS_I(inode)->csum_bytes);
5571         if (reserve)
5572                 BTRFS_I(inode)->csum_bytes += num_bytes;
5573         else
5574                 BTRFS_I(inode)->csum_bytes -= num_bytes;
5575         num_csums = btrfs_csum_bytes_to_leaves(root, BTRFS_I(inode)->csum_bytes);
5576
5577         /* No change, no need to reserve more */
5578         if (old_csums == num_csums)
5579                 return 0;
5580
5581         if (reserve)
5582                 return btrfs_calc_trans_metadata_size(root,
5583                                                       num_csums - old_csums);
5584
5585         return btrfs_calc_trans_metadata_size(root, old_csums - num_csums);
5586 }
5587
5588 int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
5589 {
5590         struct btrfs_root *root = BTRFS_I(inode)->root;
5591         struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
5592         u64 to_reserve = 0;
5593         u64 csum_bytes;
5594         unsigned nr_extents = 0;
5595         int extra_reserve = 0;
5596         enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL;
5597         int ret = 0;
5598         bool delalloc_lock = true;
5599         u64 to_free = 0;
5600         unsigned dropped;
5601
5602         /* If we are a free space inode we need to not flush since we will be in
5603          * the middle of a transaction commit.  We also don't need the delalloc
5604          * mutex since we won't race with anybody.  We need this mostly to make
5605          * lockdep shut its filthy mouth.
5606          */
5607         if (btrfs_is_free_space_inode(inode)) {
5608                 flush = BTRFS_RESERVE_NO_FLUSH;
5609                 delalloc_lock = false;
5610         }
5611
5612         if (flush != BTRFS_RESERVE_NO_FLUSH &&
5613             btrfs_transaction_in_commit(root->fs_info))
5614                 schedule_timeout(1);
5615
5616         if (delalloc_lock)
5617                 mutex_lock(&BTRFS_I(inode)->delalloc_mutex);
5618
5619         num_bytes = ALIGN(num_bytes, root->sectorsize);
5620
5621         spin_lock(&BTRFS_I(inode)->lock);
5622         nr_extents = (unsigned)div64_u64(num_bytes +
5623                                          BTRFS_MAX_EXTENT_SIZE - 1,
5624                                          BTRFS_MAX_EXTENT_SIZE);
5625         BTRFS_I(inode)->outstanding_extents += nr_extents;
5626         nr_extents = 0;
5627
5628         if (BTRFS_I(inode)->outstanding_extents >
5629             BTRFS_I(inode)->reserved_extents)
5630                 nr_extents = BTRFS_I(inode)->outstanding_extents -
5631                         BTRFS_I(inode)->reserved_extents;
5632
5633         /*
5634          * Add an item to reserve for updating the inode when we complete the
5635          * delalloc io.
5636          */
5637         if (!test_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
5638                       &BTRFS_I(inode)->runtime_flags)) {
5639                 nr_extents++;
5640                 extra_reserve = 1;
5641         }
5642
5643         to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
5644         to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
5645         csum_bytes = BTRFS_I(inode)->csum_bytes;
5646         spin_unlock(&BTRFS_I(inode)->lock);
5647
5648         if (root->fs_info->quota_enabled) {
5649                 ret = btrfs_qgroup_reserve_meta(root,
5650                                 nr_extents * root->nodesize);
5651                 if (ret)
5652                         goto out_fail;
5653         }
5654
5655         ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush);
5656         if (unlikely(ret)) {
5657                 btrfs_qgroup_free_meta(root, nr_extents * root->nodesize);
5658                 goto out_fail;
5659         }
5660
5661         spin_lock(&BTRFS_I(inode)->lock);
5662         if (extra_reserve) {
5663                 set_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
5664                         &BTRFS_I(inode)->runtime_flags);
5665                 nr_extents--;
5666         }
5667         BTRFS_I(inode)->reserved_extents += nr_extents;
5668         spin_unlock(&BTRFS_I(inode)->lock);
5669
5670         if (delalloc_lock)
5671                 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
5672
5673         if (to_reserve)
5674                 trace_btrfs_space_reservation(root->fs_info, "delalloc",
5675                                               btrfs_ino(inode), to_reserve, 1);
5676         block_rsv_add_bytes(block_rsv, to_reserve, 1);
5677
5678         return 0;
5679
5680 out_fail:
5681         spin_lock(&BTRFS_I(inode)->lock);
5682         dropped = drop_outstanding_extent(inode, num_bytes);
5683         /*
5684          * If the inodes csum_bytes is the same as the original
5685          * csum_bytes then we know we haven't raced with any free()ers
5686          * so we can just reduce our inodes csum bytes and carry on.
5687          */
5688         if (BTRFS_I(inode)->csum_bytes == csum_bytes) {
5689                 calc_csum_metadata_size(inode, num_bytes, 0);
5690         } else {
5691                 u64 orig_csum_bytes = BTRFS_I(inode)->csum_bytes;
5692                 u64 bytes;
5693
5694                 /*
5695                  * This is tricky, but first we need to figure out how much we
5696                  * free'd from any free-ers that occured during this
5697                  * reservation, so we reset ->csum_bytes to the csum_bytes
5698                  * before we dropped our lock, and then call the free for the
5699                  * number of bytes that were freed while we were trying our
5700                  * reservation.
5701                  */
5702                 bytes = csum_bytes - BTRFS_I(inode)->csum_bytes;
5703                 BTRFS_I(inode)->csum_bytes = csum_bytes;
5704                 to_free = calc_csum_metadata_size(inode, bytes, 0);
5705
5706
5707                 /*
5708                  * Now we need to see how much we would have freed had we not
5709                  * been making this reservation and our ->csum_bytes were not
5710                  * artificially inflated.
5711                  */
5712                 BTRFS_I(inode)->csum_bytes = csum_bytes - num_bytes;
5713                 bytes = csum_bytes - orig_csum_bytes;
5714                 bytes = calc_csum_metadata_size(inode, bytes, 0);
5715
5716                 /*
5717                  * Now reset ->csum_bytes to what it should be.  If bytes is
5718                  * more than to_free then we would have free'd more space had we
5719                  * not had an artificially high ->csum_bytes, so we need to free
5720                  * the remainder.  If bytes is the same or less then we don't
5721                  * need to do anything, the other free-ers did the correct
5722                  * thing.
5723                  */
5724                 BTRFS_I(inode)->csum_bytes = orig_csum_bytes - num_bytes;
5725                 if (bytes > to_free)
5726                         to_free = bytes - to_free;
5727                 else
5728                         to_free = 0;
5729         }
5730         spin_unlock(&BTRFS_I(inode)->lock);
5731         if (dropped)
5732                 to_free += btrfs_calc_trans_metadata_size(root, dropped);
5733
5734         if (to_free) {
5735                 btrfs_block_rsv_release(root, block_rsv, to_free);
5736                 trace_btrfs_space_reservation(root->fs_info, "delalloc",
5737                                               btrfs_ino(inode), to_free, 0);
5738         }
5739         if (delalloc_lock)
5740                 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
5741         return ret;
5742 }
5743
5744 /**
5745  * btrfs_delalloc_release_metadata - release a metadata reservation for an inode
5746  * @inode: the inode to release the reservation for
5747  * @num_bytes: the number of bytes we're releasing
5748  *
5749  * This will release the metadata reservation for an inode.  This can be called
5750  * once we complete IO for a given set of bytes to release their metadata
5751  * reservations.
5752  */
5753 void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
5754 {
5755         struct btrfs_root *root = BTRFS_I(inode)->root;
5756         u64 to_free = 0;
5757         unsigned dropped;
5758
5759         num_bytes = ALIGN(num_bytes, root->sectorsize);
5760         spin_lock(&BTRFS_I(inode)->lock);
5761         dropped = drop_outstanding_extent(inode, num_bytes);
5762
5763         if (num_bytes)
5764                 to_free = calc_csum_metadata_size(inode, num_bytes, 0);
5765         spin_unlock(&BTRFS_I(inode)->lock);
5766         if (dropped > 0)
5767                 to_free += btrfs_calc_trans_metadata_size(root, dropped);
5768
5769         if (btrfs_test_is_dummy_root(root))
5770                 return;
5771
5772         trace_btrfs_space_reservation(root->fs_info, "delalloc",
5773                                       btrfs_ino(inode), to_free, 0);
5774
5775         btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
5776                                 to_free);
5777 }
5778
5779 /**
5780  * btrfs_delalloc_reserve_space - reserve data and metadata space for
5781  * delalloc
5782  * @inode: inode we're writing to
5783  * @start: start range we are writing to
5784  * @len: how long the range we are writing to
5785  *
5786  * TODO: This function will finally replace old btrfs_delalloc_reserve_space()
5787  *
5788  * This will do the following things
5789  *
5790  * o reserve space in data space info for num bytes
5791  *   and reserve precious corresponding qgroup space
5792  *   (Done in check_data_free_space)
5793  *
5794  * o reserve space for metadata space, based on the number of outstanding
5795  *   extents and how much csums will be needed
5796  *   also reserve metadata space in a per root over-reserve method.
5797  * o add to the inodes->delalloc_bytes
5798  * o add it to the fs_info's delalloc inodes list.
5799  *   (Above 3 all done in delalloc_reserve_metadata)
5800  *
5801  * Return 0 for success
5802  * Return <0 for error(-ENOSPC or -EQUOT)
5803  */
5804 int btrfs_delalloc_reserve_space(struct inode *inode, u64 start, u64 len)
5805 {
5806         int ret;
5807
5808         ret = btrfs_check_data_free_space(inode, start, len);
5809         if (ret < 0)
5810                 return ret;
5811         ret = btrfs_delalloc_reserve_metadata(inode, len);
5812         if (ret < 0)
5813                 btrfs_free_reserved_data_space(inode, start, len);
5814         return ret;
5815 }
5816
5817 /**
5818  * btrfs_delalloc_release_space - release data and metadata space for delalloc
5819  * @inode: inode we're releasing space for
5820  * @start: start position of the space already reserved
5821  * @len: the len of the space already reserved
5822  *
5823  * This must be matched with a call to btrfs_delalloc_reserve_space.  This is
5824  * called in the case that we don't need the metadata AND data reservations
5825  * anymore.  So if there is an error or we insert an inline extent.
5826  *
5827  * This function will release the metadata space that was not used and will
5828  * decrement ->delalloc_bytes and remove it from the fs_info delalloc_inodes
5829  * list if there are no delalloc bytes left.
5830  * Also it will handle the qgroup reserved space.
5831  */
5832 void btrfs_delalloc_release_space(struct inode *inode, u64 start, u64 len)
5833 {
5834         btrfs_delalloc_release_metadata(inode, len);
5835         btrfs_free_reserved_data_space(inode, start, len);
5836 }
5837
5838 static int update_block_group(struct btrfs_trans_handle *trans,
5839                               struct btrfs_root *root, u64 bytenr,
5840                               u64 num_bytes, int alloc)
5841 {
5842         struct btrfs_block_group_cache *cache = NULL;
5843         struct btrfs_fs_info *info = root->fs_info;
5844         u64 total = num_bytes;
5845         u64 old_val;
5846         u64 byte_in_group;
5847         int factor;
5848
5849         /* block accounting for super block */
5850         spin_lock(&info->delalloc_root_lock);
5851         old_val = btrfs_super_bytes_used(info->super_copy);
5852         if (alloc)
5853                 old_val += num_bytes;
5854         else
5855                 old_val -= num_bytes;
5856         btrfs_set_super_bytes_used(info->super_copy, old_val);
5857         spin_unlock(&info->delalloc_root_lock);
5858
5859         while (total) {
5860                 cache = btrfs_lookup_block_group(info, bytenr);
5861                 if (!cache)
5862                         return -ENOENT;
5863                 if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
5864                                     BTRFS_BLOCK_GROUP_RAID1 |
5865                                     BTRFS_BLOCK_GROUP_RAID10))
5866                         factor = 2;
5867                 else
5868                         factor = 1;
5869                 /*
5870                  * If this block group has free space cache written out, we
5871                  * need to make sure to load it if we are removing space.  This
5872                  * is because we need the unpinning stage to actually add the
5873                  * space back to the block group, otherwise we will leak space.
5874                  */
5875                 if (!alloc && cache->cached == BTRFS_CACHE_NO)
5876                         cache_block_group(cache, 1);
5877
5878                 byte_in_group = bytenr - cache->key.objectid;
5879                 WARN_ON(byte_in_group > cache->key.offset);
5880
5881                 spin_lock(&cache->space_info->lock);
5882                 spin_lock(&cache->lock);
5883
5884                 if (btrfs_test_opt(root, SPACE_CACHE) &&
5885                     cache->disk_cache_state < BTRFS_DC_CLEAR)
5886                         cache->disk_cache_state = BTRFS_DC_CLEAR;
5887
5888                 old_val = btrfs_block_group_used(&cache->item);
5889                 num_bytes = min(total, cache->key.offset - byte_in_group);
5890                 if (alloc) {
5891                         old_val += num_bytes;
5892                         btrfs_set_block_group_used(&cache->item, old_val);
5893                         cache->reserved -= num_bytes;
5894                         cache->space_info->bytes_reserved -= num_bytes;
5895                         cache->space_info->bytes_used += num_bytes;
5896                         cache->space_info->disk_used += num_bytes * factor;
5897                         spin_unlock(&cache->lock);
5898                         spin_unlock(&cache->space_info->lock);
5899                 } else {
5900                         old_val -= num_bytes;
5901                         btrfs_set_block_group_used(&cache->item, old_val);
5902                         cache->pinned += num_bytes;
5903                         cache->space_info->bytes_pinned += num_bytes;
5904                         cache->space_info->bytes_used -= num_bytes;
5905                         cache->space_info->disk_used -= num_bytes * factor;
5906                         spin_unlock(&cache->lock);
5907                         spin_unlock(&cache->space_info->lock);
5908
5909                         set_extent_dirty(info->pinned_extents,
5910                                          bytenr, bytenr + num_bytes - 1,
5911                                          GFP_NOFS | __GFP_NOFAIL);
5912                         /*
5913                          * No longer have used bytes in this block group, queue
5914                          * it for deletion.
5915                          */
5916                         if (old_val == 0) {
5917                                 spin_lock(&info->unused_bgs_lock);
5918                                 if (list_empty(&cache->bg_list)) {
5919                                         btrfs_get_block_group(cache);
5920                                         list_add_tail(&cache->bg_list,
5921                                                       &info->unused_bgs);
5922                                 }
5923                                 spin_unlock(&info->unused_bgs_lock);
5924                         }
5925                 }
5926
5927                 spin_lock(&trans->transaction->dirty_bgs_lock);
5928                 if (list_empty(&cache->dirty_list)) {
5929                         list_add_tail(&cache->dirty_list,
5930                                       &trans->transaction->dirty_bgs);
5931                                 trans->transaction->num_dirty_bgs++;
5932                         btrfs_get_block_group(cache);
5933                 }
5934                 spin_unlock(&trans->transaction->dirty_bgs_lock);
5935
5936                 btrfs_put_block_group(cache);
5937                 total -= num_bytes;
5938                 bytenr += num_bytes;
5939         }
5940         return 0;
5941 }
5942
5943 static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
5944 {
5945         struct btrfs_block_group_cache *cache;
5946         u64 bytenr;
5947
5948         spin_lock(&root->fs_info->block_group_cache_lock);
5949         bytenr = root->fs_info->first_logical_byte;
5950         spin_unlock(&root->fs_info->block_group_cache_lock);
5951
5952         if (bytenr < (u64)-1)
5953                 return bytenr;
5954
5955         cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
5956         if (!cache)
5957                 return 0;
5958
5959         bytenr = cache->key.objectid;
5960         btrfs_put_block_group(cache);
5961
5962         return bytenr;
5963 }
5964
5965 static int pin_down_extent(struct btrfs_root *root,
5966                            struct btrfs_block_group_cache *cache,
5967                            u64 bytenr, u64 num_bytes, int reserved)
5968 {
5969         spin_lock(&cache->space_info->lock);
5970         spin_lock(&cache->lock);
5971         cache->pinned += num_bytes;
5972         cache->space_info->bytes_pinned += num_bytes;
5973         if (reserved) {
5974                 cache->reserved -= num_bytes;
5975                 cache->space_info->bytes_reserved -= num_bytes;
5976         }
5977         spin_unlock(&cache->lock);
5978         spin_unlock(&cache->space_info->lock);
5979
5980         set_extent_dirty(root->fs_info->pinned_extents, bytenr,
5981                          bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
5982         if (reserved)
5983                 trace_btrfs_reserved_extent_free(root, bytenr, num_bytes);
5984         return 0;
5985 }
5986
5987 /*
5988  * this function must be called within transaction
5989  */
5990 int btrfs_pin_extent(struct btrfs_root *root,
5991                      u64 bytenr, u64 num_bytes, int reserved)
5992 {
5993         struct btrfs_block_group_cache *cache;
5994
5995         cache = btrfs_lookup_block_group(root->fs_info, bytenr);
5996         BUG_ON(!cache); /* Logic error */
5997
5998         pin_down_extent(root, cache, bytenr, num_bytes, reserved);
5999
6000         btrfs_put_block_group(cache);
6001         return 0;
6002 }
6003
6004 /*
6005  * this function must be called within transaction
6006  */
6007 int btrfs_pin_extent_for_log_replay(struct btrfs_root *root,
6008                                     u64 bytenr, u64 num_bytes)
6009 {
6010         struct btrfs_block_group_cache *cache;
6011         int ret;
6012
6013         cache = btrfs_lookup_block_group(root->fs_info, bytenr);
6014         if (!cache)
6015                 return -EINVAL;
6016
6017         /*
6018          * pull in the free space cache (if any) so that our pin
6019          * removes the free space from the cache.  We have load_only set
6020          * to one because the slow code to read in the free extents does check
6021          * the pinned extents.
6022          */
6023         cache_block_group(cache, 1);
6024
6025         pin_down_extent(root, cache, bytenr, num_bytes, 0);
6026
6027         /* remove us from the free space cache (if we're there at all) */
6028         ret = btrfs_remove_free_space(cache, bytenr, num_bytes);
6029         btrfs_put_block_group(cache);
6030         return ret;
6031 }
6032
6033 static int __exclude_logged_extent(struct btrfs_root *root, u64 start, u64 num_bytes)
6034 {
6035         int ret;
6036         struct btrfs_block_group_cache *block_group;
6037         struct btrfs_caching_control *caching_ctl;
6038
6039         block_group = btrfs_lookup_block_group(root->fs_info, start);
6040         if (!block_group)
6041                 return -EINVAL;
6042
6043         cache_block_group(block_group, 0);
6044         caching_ctl = get_caching_control(block_group);
6045
6046         if (!caching_ctl) {
6047                 /* Logic error */
6048                 BUG_ON(!block_group_cache_done(block_group));
6049                 ret = btrfs_remove_free_space(block_group, start, num_bytes);
6050         } else {
6051                 mutex_lock(&caching_ctl->mutex);
6052
6053                 if (start >= caching_ctl->progress) {
6054                         ret = add_excluded_extent(root, start, num_bytes);
6055                 } else if (start + num_bytes <= caching_ctl->progress) {
6056                         ret = btrfs_remove_free_space(block_group,
6057                                                       start, num_bytes);
6058                 } else {
6059                         num_bytes = caching_ctl->progress - start;
6060                         ret = btrfs_remove_free_space(block_group,
6061                                                       start, num_bytes);
6062                         if (ret)
6063                                 goto out_lock;
6064
6065                         num_bytes = (start + num_bytes) -
6066                                 caching_ctl->progress;
6067                         start = caching_ctl->progress;
6068                         ret = add_excluded_extent(root, start, num_bytes);
6069                 }
6070 out_lock:
6071                 mutex_unlock(&caching_ctl->mutex);
6072                 put_caching_control(caching_ctl);
6073         }
6074         btrfs_put_block_group(block_group);
6075         return ret;
6076 }
6077
6078 int btrfs_exclude_logged_extents(struct btrfs_root *log,
6079                                  struct extent_buffer *eb)
6080 {
6081         struct btrfs_file_extent_item *item;
6082         struct btrfs_key key;
6083         int found_type;
6084         int i;
6085
6086         if (!btrfs_fs_incompat(log->fs_info, MIXED_GROUPS))
6087                 return 0;
6088
6089         for (i = 0; i < btrfs_header_nritems(eb); i++) {
6090                 btrfs_item_key_to_cpu(eb, &key, i);
6091                 if (key.type != BTRFS_EXTENT_DATA_KEY)
6092                         continue;
6093                 item = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
6094                 found_type = btrfs_file_extent_type(eb, item);
6095                 if (found_type == BTRFS_FILE_EXTENT_INLINE)
6096                         continue;
6097                 if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
6098                         continue;
6099                 key.objectid = btrfs_file_extent_disk_bytenr(eb, item);
6100                 key.offset = btrfs_file_extent_disk_num_bytes(eb, item);
6101                 __exclude_logged_extent(log, key.objectid, key.offset);
6102         }
6103
6104         return 0;
6105 }
6106
6107 /**
6108  * btrfs_update_reserved_bytes - update the block_group and space info counters
6109  * @cache:      The cache we are manipulating
6110  * @num_bytes:  The number of bytes in question
6111  * @reserve:    One of the reservation enums
6112  * @delalloc:   The blocks are allocated for the delalloc write
6113  *
6114  * This is called by the allocator when it reserves space, or by somebody who is
6115  * freeing space that was never actually used on disk.  For example if you
6116  * reserve some space for a new leaf in transaction A and before transaction A
6117  * commits you free that leaf, you call this with reserve set to 0 in order to
6118  * clear the reservation.
6119  *
6120  * Metadata reservations should be called with RESERVE_ALLOC so we do the proper
6121  * ENOSPC accounting.  For data we handle the reservation through clearing the
6122  * delalloc bits in the io_tree.  We have to do this since we could end up
6123  * allocating less disk space for the amount of data we have reserved in the
6124  * case of compression.
6125  *
6126  * If this is a reservation and the block group has become read only we cannot
6127  * make the reservation and return -EAGAIN, otherwise this function always
6128  * succeeds.
6129  */
6130 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
6131                                        u64 num_bytes, int reserve, int delalloc)
6132 {
6133         struct btrfs_space_info *space_info = cache->space_info;
6134         int ret = 0;
6135
6136         spin_lock(&space_info->lock);
6137         spin_lock(&cache->lock);
6138         if (reserve != RESERVE_FREE) {
6139                 if (cache->ro) {
6140                         ret = -EAGAIN;
6141                 } else {
6142                         cache->reserved += num_bytes;
6143                         space_info->bytes_reserved += num_bytes;
6144                         if (reserve == RESERVE_ALLOC) {
6145                                 trace_btrfs_space_reservation(cache->fs_info,
6146                                                 "space_info", space_info->flags,
6147                                                 num_bytes, 0);
6148                                 space_info->bytes_may_use -= num_bytes;
6149                         }
6150
6151                         if (delalloc)
6152                                 cache->delalloc_bytes += num_bytes;
6153                 }
6154         } else {
6155                 if (cache->ro)
6156                         space_info->bytes_readonly += num_bytes;
6157                 cache->reserved -= num_bytes;
6158                 space_info->bytes_reserved -= num_bytes;
6159
6160                 if (delalloc)
6161                         cache->delalloc_bytes -= num_bytes;
6162         }
6163         spin_unlock(&cache->lock);
6164         spin_unlock(&space_info->lock);
6165         return ret;
6166 }
6167
6168 void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
6169                                 struct btrfs_root *root)
6170 {
6171         struct btrfs_fs_info *fs_info = root->fs_info;
6172         struct btrfs_caching_control *next;
6173         struct btrfs_caching_control *caching_ctl;
6174         struct btrfs_block_group_cache *cache;
6175
6176         down_write(&fs_info->commit_root_sem);
6177
6178         list_for_each_entry_safe(caching_ctl, next,
6179                                  &fs_info->caching_block_groups, list) {
6180                 cache = caching_ctl->block_group;
6181                 if (block_group_cache_done(cache)) {
6182                         cache->last_byte_to_unpin = (u64)-1;
6183                         list_del_init(&caching_ctl->list);
6184                         put_caching_control(caching_ctl);
6185                 } else {
6186                         cache->last_byte_to_unpin = caching_ctl->progress;
6187                 }
6188         }
6189
6190         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
6191                 fs_info->pinned_extents = &fs_info->freed_extents[1];
6192         else
6193                 fs_info->pinned_extents = &fs_info->freed_extents[0];
6194
6195         up_write(&fs_info->commit_root_sem);
6196
6197         update_global_block_rsv(fs_info);
6198 }
6199
6200 /*
6201  * Returns the free cluster for the given space info and sets empty_cluster to
6202  * what it should be based on the mount options.
6203  */
6204 static struct btrfs_free_cluster *
6205 fetch_cluster_info(struct btrfs_root *root, struct btrfs_space_info *space_info,
6206                    u64 *empty_cluster)
6207 {
6208         struct btrfs_free_cluster *ret = NULL;
6209         bool ssd = btrfs_test_opt(root, SSD);
6210
6211         *empty_cluster = 0;
6212         if (btrfs_mixed_space_info(space_info))
6213                 return ret;
6214
6215         if (ssd)
6216                 *empty_cluster = 2 * 1024 * 1024;
6217         if (space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
6218                 ret = &root->fs_info->meta_alloc_cluster;
6219                 if (!ssd)
6220                         *empty_cluster = 64 * 1024;
6221         } else if ((space_info->flags & BTRFS_BLOCK_GROUP_DATA) && ssd) {
6222                 ret = &root->fs_info->data_alloc_cluster;
6223         }
6224
6225         return ret;
6226 }
6227
6228 static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end,
6229                               const bool return_free_space)
6230 {
6231         struct btrfs_fs_info *fs_info = root->fs_info;
6232         struct btrfs_block_group_cache *cache = NULL;
6233         struct btrfs_space_info *space_info;
6234         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
6235         struct btrfs_free_cluster *cluster = NULL;
6236         u64 len;
6237         u64 total_unpinned = 0;
6238         u64 empty_cluster = 0;
6239         bool readonly;
6240
6241         while (start <= end) {
6242                 readonly = false;
6243                 if (!cache ||
6244                     start >= cache->key.objectid + cache->key.offset) {
6245                         if (cache)
6246                                 btrfs_put_block_group(cache);
6247                         total_unpinned = 0;
6248                         cache = btrfs_lookup_block_group(fs_info, start);
6249                         BUG_ON(!cache); /* Logic error */
6250
6251                         cluster = fetch_cluster_info(root,
6252                                                      cache->space_info,
6253                                                      &empty_cluster);
6254                         empty_cluster <<= 1;
6255                 }
6256
6257                 len = cache->key.objectid + cache->key.offset - start;
6258                 len = min(len, end + 1 - start);
6259
6260                 if (start < cache->last_byte_to_unpin) {
6261                         len = min(len, cache->last_byte_to_unpin - start);
6262                         if (return_free_space)
6263                                 btrfs_add_free_space(cache, start, len);
6264                 }
6265
6266                 start += len;
6267                 total_unpinned += len;
6268                 space_info = cache->space_info;
6269
6270                 /*
6271                  * If this space cluster has been marked as fragmented and we've
6272                  * unpinned enough in this block group to potentially allow a
6273                  * cluster to be created inside of it go ahead and clear the
6274                  * fragmented check.
6275                  */
6276                 if (cluster && cluster->fragmented &&
6277                     total_unpinned > empty_cluster) {
6278                         spin_lock(&cluster->lock);
6279                         cluster->fragmented = 0;
6280                         spin_unlock(&cluster->lock);
6281                 }
6282
6283                 spin_lock(&space_info->lock);
6284                 spin_lock(&cache->lock);
6285                 cache->pinned -= len;
6286                 space_info->bytes_pinned -= len;
6287                 space_info->max_extent_size = 0;
6288                 percpu_counter_add(&space_info->total_bytes_pinned, -len);
6289                 if (cache->ro) {
6290                         space_info->bytes_readonly += len;
6291                         readonly = true;
6292                 }
6293                 spin_unlock(&cache->lock);
6294                 if (!readonly && global_rsv->space_info == space_info) {
6295                         spin_lock(&global_rsv->lock);
6296                         if (!global_rsv->full) {
6297                                 len = min(len, global_rsv->size -
6298                                           global_rsv->reserved);
6299                                 global_rsv->reserved += len;
6300                                 space_info->bytes_may_use += len;
6301                                 if (global_rsv->reserved >= global_rsv->size)
6302                                         global_rsv->full = 1;
6303                         }
6304                         spin_unlock(&global_rsv->lock);
6305                 }
6306                 spin_unlock(&space_info->lock);
6307         }
6308
6309         if (cache)
6310                 btrfs_put_block_group(cache);
6311         return 0;
6312 }
6313
6314 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
6315                                struct btrfs_root *root)
6316 {
6317         struct btrfs_fs_info *fs_info = root->fs_info;
6318         struct btrfs_block_group_cache *block_group, *tmp;
6319         struct list_head *deleted_bgs;
6320         struct extent_io_tree *unpin;
6321         u64 start;
6322         u64 end;
6323         int ret;
6324
6325         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
6326                 unpin = &fs_info->freed_extents[1];
6327         else
6328                 unpin = &fs_info->freed_extents[0];
6329
6330         while (!trans->aborted) {
6331                 mutex_lock(&fs_info->unused_bg_unpin_mutex);
6332                 ret = find_first_extent_bit(unpin, 0, &start, &end,
6333                                             EXTENT_DIRTY, NULL);
6334                 if (ret) {
6335                         mutex_unlock(&fs_info->unused_bg_unpin_mutex);
6336                         break;
6337                 }
6338
6339                 if (btrfs_test_opt(root, DISCARD))
6340                         ret = btrfs_discard_extent(root, start,
6341                                                    end + 1 - start, NULL);
6342
6343                 clear_extent_dirty(unpin, start, end, GFP_NOFS);
6344                 unpin_extent_range(root, start, end, true);
6345                 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
6346                 cond_resched();
6347         }
6348
6349         /*
6350          * Transaction is finished.  We don't need the lock anymore.  We
6351          * do need to clean up the block groups in case of a transaction
6352          * abort.
6353          */
6354         deleted_bgs = &trans->transaction->deleted_bgs;
6355         list_for_each_entry_safe(block_group, tmp, deleted_bgs, bg_list) {
6356                 u64 trimmed = 0;
6357
6358                 ret = -EROFS;
6359                 if (!trans->aborted)
6360                         ret = btrfs_discard_extent(root,
6361                                                    block_group->key.objectid,
6362                                                    block_group->key.offset,
6363                                                    &trimmed);
6364
6365                 list_del_init(&block_group->bg_list);
6366                 btrfs_put_block_group_trimming(block_group);
6367                 btrfs_put_block_group(block_group);
6368
6369                 if (ret) {
6370                         const char *errstr = btrfs_decode_error(ret);
6371                         btrfs_warn(fs_info,
6372                                    "Discard failed while removing blockgroup: errno=%d %s\n",
6373                                    ret, errstr);
6374                 }
6375         }
6376
6377         return 0;
6378 }
6379
6380 static void add_pinned_bytes(struct btrfs_fs_info *fs_info, u64 num_bytes,
6381                              u64 owner, u64 root_objectid)
6382 {
6383         struct btrfs_space_info *space_info;
6384         u64 flags;
6385
6386         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
6387                 if (root_objectid == BTRFS_CHUNK_TREE_OBJECTID)
6388                         flags = BTRFS_BLOCK_GROUP_SYSTEM;
6389                 else
6390                         flags = BTRFS_BLOCK_GROUP_METADATA;
6391         } else {
6392                 flags = BTRFS_BLOCK_GROUP_DATA;
6393         }
6394
6395         space_info = __find_space_info(fs_info, flags);
6396         BUG_ON(!space_info); /* Logic bug */
6397         percpu_counter_add(&space_info->total_bytes_pinned, num_bytes);
6398 }
6399
6400
6401 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
6402                                 struct btrfs_root *root,
6403                                 struct btrfs_delayed_ref_node *node, u64 parent,
6404                                 u64 root_objectid, u64 owner_objectid,
6405                                 u64 owner_offset, int refs_to_drop,
6406                                 struct btrfs_delayed_extent_op *extent_op)
6407 {
6408         struct btrfs_key key;
6409         struct btrfs_path *path;
6410         struct btrfs_fs_info *info = root->fs_info;
6411         struct btrfs_root *extent_root = info->extent_root;
6412         struct extent_buffer *leaf;
6413         struct btrfs_extent_item *ei;
6414         struct btrfs_extent_inline_ref *iref;
6415         int ret;
6416         int is_data;
6417         int extent_slot = 0;
6418         int found_extent = 0;
6419         int num_to_del = 1;
6420         int no_quota = node->no_quota;
6421         u32 item_size;
6422         u64 refs;
6423         u64 bytenr = node->bytenr;
6424         u64 num_bytes = node->num_bytes;
6425         int last_ref = 0;
6426         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
6427                                                  SKINNY_METADATA);
6428
6429         if (!info->quota_enabled || !is_fstree(root_objectid))
6430                 no_quota = 1;
6431
6432         path = btrfs_alloc_path();
6433         if (!path)
6434                 return -ENOMEM;
6435
6436         path->reada = 1;
6437         path->leave_spinning = 1;
6438
6439         is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
6440         BUG_ON(!is_data && refs_to_drop != 1);
6441
6442         if (is_data)
6443                 skinny_metadata = 0;
6444
6445         ret = lookup_extent_backref(trans, extent_root, path, &iref,
6446                                     bytenr, num_bytes, parent,
6447                                     root_objectid, owner_objectid,
6448                                     owner_offset);
6449         if (ret == 0) {
6450                 extent_slot = path->slots[0];
6451                 while (extent_slot >= 0) {
6452                         btrfs_item_key_to_cpu(path->nodes[0], &key,
6453                                               extent_slot);
6454                         if (key.objectid != bytenr)
6455                                 break;
6456                         if (key.type == BTRFS_EXTENT_ITEM_KEY &&
6457                             key.offset == num_bytes) {
6458                                 found_extent = 1;
6459                                 break;
6460                         }
6461                         if (key.type == BTRFS_METADATA_ITEM_KEY &&
6462                             key.offset == owner_objectid) {
6463                                 found_extent = 1;
6464                                 break;
6465                         }
6466                         if (path->slots[0] - extent_slot > 5)
6467                                 break;
6468                         extent_slot--;
6469                 }
6470 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
6471                 item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
6472                 if (found_extent && item_size < sizeof(*ei))
6473                         found_extent = 0;
6474 #endif
6475                 if (!found_extent) {
6476                         BUG_ON(iref);
6477                         ret = remove_extent_backref(trans, extent_root, path,
6478                                                     NULL, refs_to_drop,
6479                                                     is_data, &last_ref);
6480                         if (ret) {
6481                                 btrfs_abort_transaction(trans, extent_root, ret);
6482                                 goto out;
6483                         }
6484                         btrfs_release_path(path);
6485                         path->leave_spinning = 1;
6486
6487                         key.objectid = bytenr;
6488                         key.type = BTRFS_EXTENT_ITEM_KEY;
6489                         key.offset = num_bytes;
6490
6491                         if (!is_data && skinny_metadata) {
6492                                 key.type = BTRFS_METADATA_ITEM_KEY;
6493                                 key.offset = owner_objectid;
6494                         }
6495
6496                         ret = btrfs_search_slot(trans, extent_root,
6497                                                 &key, path, -1, 1);
6498                         if (ret > 0 && skinny_metadata && path->slots[0]) {
6499                                 /*
6500                                  * Couldn't find our skinny metadata item,
6501                                  * see if we have ye olde extent item.
6502                                  */
6503                                 path->slots[0]--;
6504                                 btrfs_item_key_to_cpu(path->nodes[0], &key,
6505                                                       path->slots[0]);
6506                                 if (key.objectid == bytenr &&
6507                                     key.type == BTRFS_EXTENT_ITEM_KEY &&
6508                                     key.offset == num_bytes)
6509                                         ret = 0;
6510                         }
6511
6512                         if (ret > 0 && skinny_metadata) {
6513                                 skinny_metadata = false;
6514                                 key.objectid = bytenr;
6515                                 key.type = BTRFS_EXTENT_ITEM_KEY;
6516                                 key.offset = num_bytes;
6517                                 btrfs_release_path(path);
6518                                 ret = btrfs_search_slot(trans, extent_root,
6519                                                         &key, path, -1, 1);
6520                         }
6521
6522                         if (ret) {
6523                                 btrfs_err(info, "umm, got %d back from search, was looking for %llu",
6524                                         ret, bytenr);
6525                                 if (ret > 0)
6526                                         btrfs_print_leaf(extent_root,
6527                                                          path->nodes[0]);
6528                         }
6529                         if (ret < 0) {
6530                                 btrfs_abort_transaction(trans, extent_root, ret);
6531                                 goto out;
6532                         }
6533                         extent_slot = path->slots[0];
6534                 }
6535         } else if (WARN_ON(ret == -ENOENT)) {
6536                 btrfs_print_leaf(extent_root, path->nodes[0]);
6537                 btrfs_err(info,
6538                         "unable to find ref byte nr %llu parent %llu root %llu  owner %llu offset %llu",
6539                         bytenr, parent, root_objectid, owner_objectid,
6540                         owner_offset);
6541                 btrfs_abort_transaction(trans, extent_root, ret);
6542                 goto out;
6543         } else {
6544                 btrfs_abort_transaction(trans, extent_root, ret);
6545                 goto out;
6546         }
6547
6548         leaf = path->nodes[0];
6549         item_size = btrfs_item_size_nr(leaf, extent_slot);
6550 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
6551         if (item_size < sizeof(*ei)) {
6552                 BUG_ON(found_extent || extent_slot != path->slots[0]);
6553                 ret = convert_extent_item_v0(trans, extent_root, path,
6554                                              owner_objectid, 0);
6555                 if (ret < 0) {
6556                         btrfs_abort_transaction(trans, extent_root, ret);
6557                         goto out;
6558                 }
6559
6560                 btrfs_release_path(path);
6561                 path->leave_spinning = 1;
6562
6563                 key.objectid = bytenr;
6564                 key.type = BTRFS_EXTENT_ITEM_KEY;
6565                 key.offset = num_bytes;
6566
6567                 ret = btrfs_search_slot(trans, extent_root, &key, path,
6568                                         -1, 1);
6569                 if (ret) {
6570                         btrfs_err(info, "umm, got %d back from search, was looking for %llu",
6571                                 ret, bytenr);
6572                         btrfs_print_leaf(extent_root, path->nodes[0]);
6573                 }
6574                 if (ret < 0) {
6575                         btrfs_abort_transaction(trans, extent_root, ret);
6576                         goto out;
6577                 }
6578
6579                 extent_slot = path->slots[0];
6580                 leaf = path->nodes[0];
6581                 item_size = btrfs_item_size_nr(leaf, extent_slot);
6582         }
6583 #endif
6584         BUG_ON(item_size < sizeof(*ei));
6585         ei = btrfs_item_ptr(leaf, extent_slot,
6586                             struct btrfs_extent_item);
6587         if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID &&
6588             key.type == BTRFS_EXTENT_ITEM_KEY) {
6589                 struct btrfs_tree_block_info *bi;
6590                 BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
6591                 bi = (struct btrfs_tree_block_info *)(ei + 1);
6592                 WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
6593         }
6594
6595         refs = btrfs_extent_refs(leaf, ei);
6596         if (refs < refs_to_drop) {
6597                 btrfs_err(info, "trying to drop %d refs but we only have %Lu "
6598                           "for bytenr %Lu", refs_to_drop, refs, bytenr);
6599                 ret = -EINVAL;
6600                 btrfs_abort_transaction(trans, extent_root, ret);
6601                 goto out;
6602         }
6603         refs -= refs_to_drop;
6604
6605         if (refs > 0) {
6606                 if (extent_op)
6607                         __run_delayed_extent_op(extent_op, leaf, ei);
6608                 /*
6609                  * In the case of inline back ref, reference count will
6610                  * be updated by remove_extent_backref
6611                  */
6612                 if (iref) {
6613                         BUG_ON(!found_extent);
6614                 } else {
6615                         btrfs_set_extent_refs(leaf, ei, refs);
6616                         btrfs_mark_buffer_dirty(leaf);
6617                 }
6618                 if (found_extent) {
6619                         ret = remove_extent_backref(trans, extent_root, path,
6620                                                     iref, refs_to_drop,
6621                                                     is_data, &last_ref);
6622                         if (ret) {
6623                                 btrfs_abort_transaction(trans, extent_root, ret);
6624                                 goto out;
6625                         }
6626                 }
6627                 add_pinned_bytes(root->fs_info, -num_bytes, owner_objectid,
6628                                  root_objectid);
6629         } else {
6630                 if (found_extent) {
6631                         BUG_ON(is_data && refs_to_drop !=
6632                                extent_data_ref_count(path, iref));
6633                         if (iref) {
6634                                 BUG_ON(path->slots[0] != extent_slot);
6635                         } else {
6636                                 BUG_ON(path->slots[0] != extent_slot + 1);
6637                                 path->slots[0] = extent_slot;
6638                                 num_to_del = 2;
6639                         }
6640                 }
6641
6642                 last_ref = 1;
6643                 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
6644                                       num_to_del);
6645                 if (ret) {
6646                         btrfs_abort_transaction(trans, extent_root, ret);
6647                         goto out;
6648                 }
6649                 btrfs_release_path(path);
6650
6651                 if (is_data) {
6652                         ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
6653                         if (ret) {
6654                                 btrfs_abort_transaction(trans, extent_root, ret);
6655                                 goto out;
6656                         }
6657                 }
6658
6659                 ret = update_block_group(trans, root, bytenr, num_bytes, 0);
6660                 if (ret) {
6661                         btrfs_abort_transaction(trans, extent_root, ret);
6662                         goto out;
6663                 }
6664         }
6665         btrfs_release_path(path);
6666
6667 out:
6668         btrfs_free_path(path);
6669         return ret;
6670 }
6671
6672 /*
6673  * when we free an block, it is possible (and likely) that we free the last
6674  * delayed ref for that extent as well.  This searches the delayed ref tree for
6675  * a given extent, and if there are no other delayed refs to be processed, it
6676  * removes it from the tree.
6677  */
6678 static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
6679                                       struct btrfs_root *root, u64 bytenr)
6680 {
6681         struct btrfs_delayed_ref_head *head;
6682         struct btrfs_delayed_ref_root *delayed_refs;
6683         int ret = 0;
6684
6685         delayed_refs = &trans->transaction->delayed_refs;
6686         spin_lock(&delayed_refs->lock);
6687         head = btrfs_find_delayed_ref_head(trans, bytenr);
6688         if (!head)
6689                 goto out_delayed_unlock;
6690
6691         spin_lock(&head->lock);
6692         if (!list_empty(&head->ref_list))
6693                 goto out;
6694
6695         if (head->extent_op) {
6696                 if (!head->must_insert_reserved)
6697                         goto out;
6698                 btrfs_free_delayed_extent_op(head->extent_op);
6699                 head->extent_op = NULL;
6700         }
6701
6702         /*
6703          * waiting for the lock here would deadlock.  If someone else has it
6704          * locked they are already in the process of dropping it anyway
6705          */
6706         if (!mutex_trylock(&head->mutex))
6707                 goto out;
6708
6709         /*
6710          * at this point we have a head with no other entries.  Go
6711          * ahead and process it.
6712          */
6713         head->node.in_tree = 0;
6714         rb_erase(&head->href_node, &delayed_refs->href_root);
6715
6716         atomic_dec(&delayed_refs->num_entries);
6717
6718         /*
6719          * we don't take a ref on the node because we're removing it from the
6720          * tree, so we just steal the ref the tree was holding.
6721          */
6722         delayed_refs->num_heads--;
6723         if (head->processing == 0)
6724                 delayed_refs->num_heads_ready--;
6725         head->processing = 0;
6726         spin_unlock(&head->lock);
6727         spin_unlock(&delayed_refs->lock);
6728
6729         BUG_ON(head->extent_op);
6730         if (head->must_insert_reserved)
6731                 ret = 1;
6732
6733         mutex_unlock(&head->mutex);
6734         btrfs_put_delayed_ref(&head->node);
6735         return ret;
6736 out:
6737         spin_unlock(&head->lock);
6738
6739 out_delayed_unlock:
6740         spin_unlock(&delayed_refs->lock);
6741         return 0;
6742 }
6743
6744 void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
6745                            struct btrfs_root *root,
6746                            struct extent_buffer *buf,
6747                            u64 parent, int last_ref)
6748 {
6749         int pin = 1;
6750         int ret;
6751
6752         if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
6753                 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
6754                                         buf->start, buf->len,
6755                                         parent, root->root_key.objectid,
6756                                         btrfs_header_level(buf),
6757                                         BTRFS_DROP_DELAYED_REF, NULL, 0);
6758                 BUG_ON(ret); /* -ENOMEM */
6759         }
6760
6761         if (!last_ref)
6762                 return;
6763
6764         if (btrfs_header_generation(buf) == trans->transid) {
6765                 struct btrfs_block_group_cache *cache;
6766
6767                 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
6768                         ret = check_ref_cleanup(trans, root, buf->start);
6769                         if (!ret)
6770                                 goto out;
6771                 }
6772
6773                 cache = btrfs_lookup_block_group(root->fs_info, buf->start);
6774
6775                 if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
6776                         pin_down_extent(root, cache, buf->start, buf->len, 1);
6777                         btrfs_put_block_group(cache);
6778                         goto out;
6779                 }
6780
6781                 WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
6782
6783                 btrfs_add_free_space(cache, buf->start, buf->len);
6784                 btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE, 0);
6785                 btrfs_put_block_group(cache);
6786                 trace_btrfs_reserved_extent_free(root, buf->start, buf->len);
6787                 pin = 0;
6788         }
6789 out:
6790         if (pin)
6791                 add_pinned_bytes(root->fs_info, buf->len,
6792                                  btrfs_header_level(buf),
6793                                  root->root_key.objectid);
6794
6795         /*
6796          * Deleting the buffer, clear the corrupt flag since it doesn't matter
6797          * anymore.
6798          */
6799         clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
6800 }
6801
6802 /* Can return -ENOMEM */
6803 int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
6804                       u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
6805                       u64 owner, u64 offset, int no_quota)
6806 {
6807         int ret;
6808         struct btrfs_fs_info *fs_info = root->fs_info;
6809
6810         if (btrfs_test_is_dummy_root(root))
6811                 return 0;
6812
6813         add_pinned_bytes(root->fs_info, num_bytes, owner, root_objectid);
6814
6815         /*
6816          * tree log blocks never actually go into the extent allocation
6817          * tree, just update pinning info and exit early.
6818          */
6819         if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
6820                 WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
6821                 /* unlocks the pinned mutex */
6822                 btrfs_pin_extent(root, bytenr, num_bytes, 1);
6823                 ret = 0;
6824         } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
6825                 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
6826                                         num_bytes,
6827                                         parent, root_objectid, (int)owner,
6828                                         BTRFS_DROP_DELAYED_REF, NULL, no_quota);
6829         } else {
6830                 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
6831                                                 num_bytes,
6832                                                 parent, root_objectid, owner,
6833                                                 offset, BTRFS_DROP_DELAYED_REF,
6834                                                 NULL, no_quota);
6835         }
6836         return ret;
6837 }
6838
6839 /*
6840  * when we wait for progress in the block group caching, its because
6841  * our allocation attempt failed at least once.  So, we must sleep
6842  * and let some progress happen before we try again.
6843  *
6844  * This function will sleep at least once waiting for new free space to
6845  * show up, and then it will check the block group free space numbers
6846  * for our min num_bytes.  Another option is to have it go ahead
6847  * and look in the rbtree for a free extent of a given size, but this
6848  * is a good start.
6849  *
6850  * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using
6851  * any of the information in this block group.
6852  */
6853 static noinline void
6854 wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
6855                                 u64 num_bytes)
6856 {
6857         struct btrfs_caching_control *caching_ctl;
6858
6859         caching_ctl = get_caching_control(cache);
6860         if (!caching_ctl)
6861                 return;
6862
6863         wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
6864                    (cache->free_space_ctl->free_space >= num_bytes));
6865
6866         put_caching_control(caching_ctl);
6867 }
6868
6869 static noinline int
6870 wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
6871 {
6872         struct btrfs_caching_control *caching_ctl;
6873         int ret = 0;
6874
6875         caching_ctl = get_caching_control(cache);
6876         if (!caching_ctl)
6877                 return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0;
6878
6879         wait_event(caching_ctl->wait, block_group_cache_done(cache));
6880         if (cache->cached == BTRFS_CACHE_ERROR)
6881                 ret = -EIO;
6882         put_caching_control(caching_ctl);
6883         return ret;
6884 }
6885
6886 int __get_raid_index(u64 flags)
6887 {
6888         if (flags & BTRFS_BLOCK_GROUP_RAID10)
6889                 return BTRFS_RAID_RAID10;
6890         else if (flags & BTRFS_BLOCK_GROUP_RAID1)
6891                 return BTRFS_RAID_RAID1;
6892         else if (flags & BTRFS_BLOCK_GROUP_DUP)
6893                 return BTRFS_RAID_DUP;
6894         else if (flags & BTRFS_BLOCK_GROUP_RAID0)
6895                 return BTRFS_RAID_RAID0;
6896         else if (flags & BTRFS_BLOCK_GROUP_RAID5)
6897                 return BTRFS_RAID_RAID5;
6898         else if (flags & BTRFS_BLOCK_GROUP_RAID6)
6899                 return BTRFS_RAID_RAID6;
6900
6901         return BTRFS_RAID_SINGLE; /* BTRFS_BLOCK_GROUP_SINGLE */
6902 }
6903
6904 int get_block_group_index(struct btrfs_block_group_cache *cache)
6905 {
6906         return __get_raid_index(cache->flags);
6907 }
6908
6909 static const char *btrfs_raid_type_names[BTRFS_NR_RAID_TYPES] = {
6910         [BTRFS_RAID_RAID10]     = "raid10",
6911         [BTRFS_RAID_RAID1]      = "raid1",
6912         [BTRFS_RAID_DUP]        = "dup",
6913         [BTRFS_RAID_RAID0]      = "raid0",
6914         [BTRFS_RAID_SINGLE]     = "single",
6915         [BTRFS_RAID_RAID5]      = "raid5",
6916         [BTRFS_RAID_RAID6]      = "raid6",
6917 };
6918
6919 static const char *get_raid_name(enum btrfs_raid_types type)
6920 {
6921         if (type >= BTRFS_NR_RAID_TYPES)
6922                 return NULL;
6923
6924         return btrfs_raid_type_names[type];
6925 }
6926
6927 enum btrfs_loop_type {
6928         LOOP_CACHING_NOWAIT = 0,
6929         LOOP_CACHING_WAIT = 1,
6930         LOOP_ALLOC_CHUNK = 2,
6931         LOOP_NO_EMPTY_SIZE = 3,
6932 };
6933
6934 static inline void
6935 btrfs_lock_block_group(struct btrfs_block_group_cache *cache,
6936                        int delalloc)
6937 {
6938         if (delalloc)
6939                 down_read(&cache->data_rwsem);
6940 }
6941
6942 static inline void
6943 btrfs_grab_block_group(struct btrfs_block_group_cache *cache,
6944                        int delalloc)
6945 {
6946         btrfs_get_block_group(cache);
6947         if (delalloc)
6948                 down_read(&cache->data_rwsem);
6949 }
6950
6951 static struct btrfs_block_group_cache *
6952 btrfs_lock_cluster(struct btrfs_block_group_cache *block_group,
6953                    struct btrfs_free_cluster *cluster,
6954                    int delalloc)
6955 {
6956         struct btrfs_block_group_cache *used_bg;
6957         bool locked = false;
6958 again:
6959         spin_lock(&cluster->refill_lock);
6960         if (locked) {
6961                 if (used_bg == cluster->block_group)
6962                         return used_bg;
6963
6964                 up_read(&used_bg->data_rwsem);
6965                 btrfs_put_block_group(used_bg);
6966         }
6967
6968         used_bg = cluster->block_group;
6969         if (!used_bg)
6970                 return NULL;
6971
6972         if (used_bg == block_group)
6973                 return used_bg;
6974
6975         btrfs_get_block_group(used_bg);
6976
6977         if (!delalloc)
6978                 return used_bg;
6979
6980         if (down_read_trylock(&used_bg->data_rwsem))
6981                 return used_bg;
6982
6983         spin_unlock(&cluster->refill_lock);
6984         down_read(&used_bg->data_rwsem);
6985         locked = true;
6986         goto again;
6987 }
6988
6989 static inline void
6990 btrfs_release_block_group(struct btrfs_block_group_cache *cache,
6991                          int delalloc)
6992 {
6993         if (delalloc)
6994                 up_read(&cache->data_rwsem);
6995         btrfs_put_block_group(cache);
6996 }
6997
6998 /*
6999  * walks the btree of allocated extents and find a hole of a given size.
7000  * The key ins is changed to record the hole:
7001  * ins->objectid == start position
7002  * ins->flags = BTRFS_EXTENT_ITEM_KEY
7003  * ins->offset == the size of the hole.
7004  * Any available blocks before search_start are skipped.
7005  *
7006  * If there is no suitable free space, we will record the max size of
7007  * the free space extent currently.
7008  */
7009 static noinline int find_free_extent(struct btrfs_root *orig_root,
7010                                      u64 num_bytes, u64 empty_size,
7011                                      u64 hint_byte, struct btrfs_key *ins,
7012                                      u64 flags, int delalloc)
7013 {
7014         int ret = 0;
7015         struct btrfs_root *root = orig_root->fs_info->extent_root;
7016         struct btrfs_free_cluster *last_ptr = NULL;
7017         struct btrfs_block_group_cache *block_group = NULL;
7018         u64 search_start = 0;
7019         u64 max_extent_size = 0;
7020         u64 empty_cluster = 0;
7021         struct btrfs_space_info *space_info;
7022         int loop = 0;
7023         int index = __get_raid_index(flags);
7024         int alloc_type = (flags & BTRFS_BLOCK_GROUP_DATA) ?
7025                 RESERVE_ALLOC_NO_ACCOUNT : RESERVE_ALLOC;
7026         bool failed_cluster_refill = false;
7027         bool failed_alloc = false;
7028         bool use_cluster = true;
7029         bool have_caching_bg = false;
7030         bool full_search = false;
7031
7032         WARN_ON(num_bytes < root->sectorsize);
7033         ins->type = BTRFS_EXTENT_ITEM_KEY;
7034         ins->objectid = 0;
7035         ins->offset = 0;
7036
7037         trace_find_free_extent(orig_root, num_bytes, empty_size, flags);
7038
7039         space_info = __find_space_info(root->fs_info, flags);
7040         if (!space_info) {
7041                 btrfs_err(root->fs_info, "No space info for %llu", flags);
7042                 return -ENOSPC;
7043         }
7044
7045         /*
7046          * If our free space is heavily fragmented we may not be able to make
7047          * big contiguous allocations, so instead of doing the expensive search
7048          * for free space, simply return ENOSPC with our max_extent_size so we
7049          * can go ahead and search for a more manageable chunk.
7050          *
7051          * If our max_extent_size is large enough for our allocation simply
7052          * disable clustering since we will likely not be able to find enough
7053          * space to create a cluster and induce latency trying.
7054          */
7055         if (unlikely(space_info->max_extent_size)) {
7056                 spin_lock(&space_info->lock);
7057                 if (space_info->max_extent_size &&
7058                     num_bytes > space_info->max_extent_size) {
7059                         ins->offset = space_info->max_extent_size;
7060                         spin_unlock(&space_info->lock);
7061                         return -ENOSPC;
7062                 } else if (space_info->max_extent_size) {
7063                         use_cluster = false;
7064                 }
7065                 spin_unlock(&space_info->lock);
7066         }
7067
7068         last_ptr = fetch_cluster_info(orig_root, space_info, &empty_cluster);
7069         if (last_ptr) {
7070                 spin_lock(&last_ptr->lock);
7071                 if (last_ptr->block_group)
7072                         hint_byte = last_ptr->window_start;
7073                 if (last_ptr->fragmented) {
7074                         /*
7075                          * We still set window_start so we can keep track of the
7076                          * last place we found an allocation to try and save
7077                          * some time.
7078                          */
7079                         hint_byte = last_ptr->window_start;
7080                         use_cluster = false;
7081                 }
7082                 spin_unlock(&last_ptr->lock);
7083         }
7084
7085         search_start = max(search_start, first_logical_byte(root, 0));
7086         search_start = max(search_start, hint_byte);
7087         if (search_start == hint_byte) {
7088                 block_group = btrfs_lookup_block_group(root->fs_info,
7089                                                        search_start);
7090                 /*
7091                  * we don't want to use the block group if it doesn't match our
7092                  * allocation bits, or if its not cached.
7093                  *
7094                  * However if we are re-searching with an ideal block group
7095                  * picked out then we don't care that the block group is cached.
7096                  */
7097                 if (block_group && block_group_bits(block_group, flags) &&
7098                     block_group->cached != BTRFS_CACHE_NO) {
7099                         down_read(&space_info->groups_sem);
7100                         if (list_empty(&block_group->list) ||
7101                             block_group->ro) {
7102                                 /*
7103                                  * someone is removing this block group,
7104                                  * we can't jump into the have_block_group
7105                                  * target because our list pointers are not
7106                                  * valid
7107                                  */
7108                                 btrfs_put_block_group(block_group);
7109                                 up_read(&space_info->groups_sem);
7110                         } else {
7111                                 index = get_block_group_index(block_group);
7112                                 btrfs_lock_block_group(block_group, delalloc);
7113                                 goto have_block_group;
7114                         }
7115                 } else if (block_group) {
7116                         btrfs_put_block_group(block_group);
7117                 }
7118         }
7119 search:
7120         have_caching_bg = false;
7121         if (index == 0 || index == __get_raid_index(flags))
7122                 full_search = true;
7123         down_read(&space_info->groups_sem);
7124         list_for_each_entry(block_group, &space_info->block_groups[index],
7125                             list) {
7126                 u64 offset;
7127                 int cached;
7128
7129                 btrfs_grab_block_group(block_group, delalloc);
7130                 search_start = block_group->key.objectid;
7131
7132                 /*
7133                  * this can happen if we end up cycling through all the
7134                  * raid types, but we want to make sure we only allocate
7135                  * for the proper type.
7136                  */
7137                 if (!block_group_bits(block_group, flags)) {
7138                     u64 extra = BTRFS_BLOCK_GROUP_DUP |
7139                                 BTRFS_BLOCK_GROUP_RAID1 |
7140                                 BTRFS_BLOCK_GROUP_RAID5 |
7141                                 BTRFS_BLOCK_GROUP_RAID6 |
7142                                 BTRFS_BLOCK_GROUP_RAID10;
7143
7144                         /*
7145                          * if they asked for extra copies and this block group
7146                          * doesn't provide them, bail.  This does allow us to
7147                          * fill raid0 from raid1.
7148                          */
7149                         if ((flags & extra) && !(block_group->flags & extra))
7150                                 goto loop;
7151                 }
7152
7153 have_block_group:
7154                 cached = block_group_cache_done(block_group);
7155                 if (unlikely(!cached)) {
7156                         have_caching_bg = true;
7157                         ret = cache_block_group(block_group, 0);
7158                         BUG_ON(ret < 0);
7159                         ret = 0;
7160                 }
7161
7162                 if (unlikely(block_group->cached == BTRFS_CACHE_ERROR))
7163                         goto loop;
7164                 if (unlikely(block_group->ro))
7165                         goto loop;
7166
7167                 /*
7168                  * Ok we want to try and use the cluster allocator, so
7169                  * lets look there
7170                  */
7171                 if (last_ptr && use_cluster) {
7172                         struct btrfs_block_group_cache *used_block_group;
7173                         unsigned long aligned_cluster;
7174                         /*
7175                          * the refill lock keeps out other
7176                          * people trying to start a new cluster
7177                          */
7178                         used_block_group = btrfs_lock_cluster(block_group,
7179                                                               last_ptr,
7180                                                               delalloc);
7181                         if (!used_block_group)
7182                                 goto refill_cluster;
7183
7184                         if (used_block_group != block_group &&
7185                             (used_block_group->ro ||
7186                              !block_group_bits(used_block_group, flags)))
7187                                 goto release_cluster;
7188
7189                         offset = btrfs_alloc_from_cluster(used_block_group,
7190                                                 last_ptr,
7191                                                 num_bytes,
7192                                                 used_block_group->key.objectid,
7193                                                 &max_extent_size);
7194                         if (offset) {
7195                                 /* we have a block, we're done */
7196                                 spin_unlock(&last_ptr->refill_lock);
7197                                 trace_btrfs_reserve_extent_cluster(root,
7198                                                 used_block_group,
7199                                                 search_start, num_bytes);
7200                                 if (used_block_group != block_group) {
7201                                         btrfs_release_block_group(block_group,
7202                                                                   delalloc);
7203                                         block_group = used_block_group;
7204                                 }
7205                                 goto checks;
7206                         }
7207
7208                         WARN_ON(last_ptr->block_group != used_block_group);
7209 release_cluster:
7210                         /* If we are on LOOP_NO_EMPTY_SIZE, we can't
7211                          * set up a new clusters, so lets just skip it
7212                          * and let the allocator find whatever block
7213                          * it can find.  If we reach this point, we
7214                          * will have tried the cluster allocator
7215                          * plenty of times and not have found
7216                          * anything, so we are likely way too
7217                          * fragmented for the clustering stuff to find
7218                          * anything.
7219                          *
7220                          * However, if the cluster is taken from the
7221                          * current block group, release the cluster
7222                          * first, so that we stand a better chance of
7223                          * succeeding in the unclustered
7224                          * allocation.  */
7225                         if (loop >= LOOP_NO_EMPTY_SIZE &&
7226                             used_block_group != block_group) {
7227                                 spin_unlock(&last_ptr->refill_lock);
7228                                 btrfs_release_block_group(used_block_group,
7229                                                           delalloc);
7230                                 goto unclustered_alloc;
7231                         }
7232
7233                         /*
7234                          * this cluster didn't work out, free it and
7235                          * start over
7236                          */
7237                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
7238
7239                         if (used_block_group != block_group)
7240                                 btrfs_release_block_group(used_block_group,
7241                                                           delalloc);
7242 refill_cluster:
7243                         if (loop >= LOOP_NO_EMPTY_SIZE) {
7244                                 spin_unlock(&last_ptr->refill_lock);
7245                                 goto unclustered_alloc;
7246                         }
7247
7248                         aligned_cluster = max_t(unsigned long,
7249                                                 empty_cluster + empty_size,
7250                                               block_group->full_stripe_len);
7251
7252                         /* allocate a cluster in this block group */
7253                         ret = btrfs_find_space_cluster(root, block_group,
7254                                                        last_ptr, search_start,
7255                                                        num_bytes,
7256                                                        aligned_cluster);
7257                         if (ret == 0) {
7258                                 /*
7259                                  * now pull our allocation out of this
7260                                  * cluster
7261                                  */
7262                                 offset = btrfs_alloc_from_cluster(block_group,
7263                                                         last_ptr,
7264                                                         num_bytes,
7265                                                         search_start,
7266                                                         &max_extent_size);
7267                                 if (offset) {
7268                                         /* we found one, proceed */
7269                                         spin_unlock(&last_ptr->refill_lock);
7270                                         trace_btrfs_reserve_extent_cluster(root,
7271                                                 block_group, search_start,
7272                                                 num_bytes);
7273                                         goto checks;
7274                                 }
7275                         } else if (!cached && loop > LOOP_CACHING_NOWAIT
7276                                    && !failed_cluster_refill) {
7277                                 spin_unlock(&last_ptr->refill_lock);
7278
7279                                 failed_cluster_refill = true;
7280                                 wait_block_group_cache_progress(block_group,
7281                                        num_bytes + empty_cluster + empty_size);
7282                                 goto have_block_group;
7283                         }
7284
7285                         /*
7286                          * at this point we either didn't find a cluster
7287                          * or we weren't able to allocate a block from our
7288                          * cluster.  Free the cluster we've been trying
7289                          * to use, and go to the next block group
7290                          */
7291                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
7292                         spin_unlock(&last_ptr->refill_lock);
7293                         goto loop;
7294                 }
7295
7296 unclustered_alloc:
7297                 /*
7298                  * We are doing an unclustered alloc, set the fragmented flag so
7299                  * we don't bother trying to setup a cluster again until we get
7300                  * more space.
7301                  */
7302                 if (unlikely(last_ptr)) {
7303                         spin_lock(&last_ptr->lock);
7304                         last_ptr->fragmented = 1;
7305                         spin_unlock(&last_ptr->lock);
7306                 }
7307                 spin_lock(&block_group->free_space_ctl->tree_lock);
7308                 if (cached &&
7309                     block_group->free_space_ctl->free_space <
7310                     num_bytes + empty_cluster + empty_size) {
7311                         if (block_group->free_space_ctl->free_space >
7312                             max_extent_size)
7313                                 max_extent_size =
7314                                         block_group->free_space_ctl->free_space;
7315                         spin_unlock(&block_group->free_space_ctl->tree_lock);
7316                         goto loop;
7317                 }
7318                 spin_unlock(&block_group->free_space_ctl->tree_lock);
7319
7320                 offset = btrfs_find_space_for_alloc(block_group, search_start,
7321                                                     num_bytes, empty_size,
7322                                                     &max_extent_size);
7323                 /*
7324                  * If we didn't find a chunk, and we haven't failed on this
7325                  * block group before, and this block group is in the middle of
7326                  * caching and we are ok with waiting, then go ahead and wait
7327                  * for progress to be made, and set failed_alloc to true.
7328                  *
7329                  * If failed_alloc is true then we've already waited on this
7330                  * block group once and should move on to the next block group.
7331                  */
7332                 if (!offset && !failed_alloc && !cached &&
7333                     loop > LOOP_CACHING_NOWAIT) {
7334                         wait_block_group_cache_progress(block_group,
7335                                                 num_bytes + empty_size);
7336                         failed_alloc = true;
7337                         goto have_block_group;
7338                 } else if (!offset) {
7339                         goto loop;
7340                 }
7341 checks:
7342                 search_start = ALIGN(offset, root->stripesize);
7343
7344                 /* move on to the next group */
7345                 if (search_start + num_bytes >
7346                     block_group->key.objectid + block_group->key.offset) {
7347                         btrfs_add_free_space(block_group, offset, num_bytes);
7348                         goto loop;
7349                 }
7350
7351                 if (offset < search_start)
7352                         btrfs_add_free_space(block_group, offset,
7353                                              search_start - offset);
7354                 BUG_ON(offset > search_start);
7355
7356                 ret = btrfs_update_reserved_bytes(block_group, num_bytes,
7357                                                   alloc_type, delalloc);
7358                 if (ret == -EAGAIN) {
7359                         btrfs_add_free_space(block_group, offset, num_bytes);
7360                         goto loop;
7361                 }
7362
7363                 /* we are all good, lets return */
7364                 ins->objectid = search_start;
7365                 ins->offset = num_bytes;
7366
7367                 trace_btrfs_reserve_extent(orig_root, block_group,
7368                                            search_start, num_bytes);
7369                 btrfs_release_block_group(block_group, delalloc);
7370                 break;
7371 loop:
7372                 failed_cluster_refill = false;
7373                 failed_alloc = false;
7374                 BUG_ON(index != get_block_group_index(block_group));
7375                 btrfs_release_block_group(block_group, delalloc);
7376         }
7377         up_read(&space_info->groups_sem);
7378
7379         if (!ins->objectid && loop >= LOOP_CACHING_WAIT && have_caching_bg)
7380                 goto search;
7381
7382         if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
7383                 goto search;
7384
7385         /*
7386          * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
7387          *                      caching kthreads as we move along
7388          * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
7389          * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
7390          * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
7391          *                      again
7392          */
7393         if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
7394                 index = 0;
7395                 if (loop == LOOP_CACHING_NOWAIT) {
7396                         /*
7397                          * We want to skip the LOOP_CACHING_WAIT step if we
7398                          * don't have any unached bgs and we've alrelady done a
7399                          * full search through.
7400                          */
7401                         if (have_caching_bg || !full_search)
7402                                 loop = LOOP_CACHING_WAIT;
7403                         else
7404                                 loop = LOOP_ALLOC_CHUNK;
7405                 } else {
7406                         loop++;
7407                 }
7408
7409                 if (loop == LOOP_ALLOC_CHUNK) {
7410                         struct btrfs_trans_handle *trans;
7411                         int exist = 0;
7412
7413                         trans = current->journal_info;
7414                         if (trans)
7415                                 exist = 1;
7416                         else
7417                                 trans = btrfs_join_transaction(root);
7418
7419                         if (IS_ERR(trans)) {
7420                                 ret = PTR_ERR(trans);
7421                                 goto out;
7422                         }
7423
7424                         ret = do_chunk_alloc(trans, root, flags,
7425                                              CHUNK_ALLOC_FORCE);
7426
7427                         /*
7428                          * If we can't allocate a new chunk we've already looped
7429                          * through at least once, move on to the NO_EMPTY_SIZE
7430                          * case.
7431                          */
7432                         if (ret == -ENOSPC)
7433                                 loop = LOOP_NO_EMPTY_SIZE;
7434
7435                         /*
7436                          * Do not bail out on ENOSPC since we
7437                          * can do more things.
7438                          */
7439                         if (ret < 0 && ret != -ENOSPC)
7440                                 btrfs_abort_transaction(trans,
7441                                                         root, ret);
7442                         else
7443                                 ret = 0;
7444                         if (!exist)
7445                                 btrfs_end_transaction(trans, root);
7446                         if (ret)
7447                                 goto out;
7448                 }
7449
7450                 if (loop == LOOP_NO_EMPTY_SIZE) {
7451                         /*
7452                          * Don't loop again if we already have no empty_size and
7453                          * no empty_cluster.
7454                          */
7455                         if (empty_size == 0 &&
7456                             empty_cluster == 0) {
7457                                 ret = -ENOSPC;
7458                                 goto out;
7459                         }
7460                         empty_size = 0;
7461                         empty_cluster = 0;
7462                 }
7463
7464                 goto search;
7465         } else if (!ins->objectid) {
7466                 ret = -ENOSPC;
7467         } else if (ins->objectid) {
7468                 if (!use_cluster && last_ptr) {
7469                         spin_lock(&last_ptr->lock);
7470                         last_ptr->window_start = ins->objectid;
7471                         spin_unlock(&last_ptr->lock);
7472                 }
7473                 ret = 0;
7474         }
7475 out:
7476         if (ret == -ENOSPC) {
7477                 spin_lock(&space_info->lock);
7478                 space_info->max_extent_size = max_extent_size;
7479                 spin_unlock(&space_info->lock);
7480                 ins->offset = max_extent_size;
7481         }
7482         return ret;
7483 }
7484
7485 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
7486                             int dump_block_groups)
7487 {
7488         struct btrfs_block_group_cache *cache;
7489         int index = 0;
7490
7491         spin_lock(&info->lock);
7492         printk(KERN_INFO "BTRFS: space_info %llu has %llu free, is %sfull\n",
7493                info->flags,
7494                info->total_bytes - info->bytes_used - info->bytes_pinned -
7495                info->bytes_reserved - info->bytes_readonly,
7496                (info->full) ? "" : "not ");
7497         printk(KERN_INFO "BTRFS: space_info total=%llu, used=%llu, pinned=%llu, "
7498                "reserved=%llu, may_use=%llu, readonly=%llu\n",
7499                info->total_bytes, info->bytes_used, info->bytes_pinned,
7500                info->bytes_reserved, info->bytes_may_use,
7501                info->bytes_readonly);
7502         spin_unlock(&info->lock);
7503
7504         if (!dump_block_groups)
7505                 return;
7506
7507         down_read(&info->groups_sem);
7508 again:
7509         list_for_each_entry(cache, &info->block_groups[index], list) {
7510                 spin_lock(&cache->lock);
7511                 printk(KERN_INFO "BTRFS: "
7512                            "block group %llu has %llu bytes, "
7513                            "%llu used %llu pinned %llu reserved %s\n",
7514                        cache->key.objectid, cache->key.offset,
7515                        btrfs_block_group_used(&cache->item), cache->pinned,
7516                        cache->reserved, cache->ro ? "[readonly]" : "");
7517                 btrfs_dump_free_space(cache, bytes);
7518                 spin_unlock(&cache->lock);
7519         }
7520         if (++index < BTRFS_NR_RAID_TYPES)
7521                 goto again;
7522         up_read(&info->groups_sem);
7523 }
7524
7525 int btrfs_reserve_extent(struct btrfs_root *root,
7526                          u64 num_bytes, u64 min_alloc_size,
7527                          u64 empty_size, u64 hint_byte,
7528                          struct btrfs_key *ins, int is_data, int delalloc)
7529 {
7530         bool final_tried = num_bytes == min_alloc_size;
7531         u64 flags;
7532         int ret;
7533
7534         flags = btrfs_get_alloc_profile(root, is_data);
7535 again:
7536         WARN_ON(num_bytes < root->sectorsize);
7537         ret = find_free_extent(root, num_bytes, empty_size, hint_byte, ins,
7538                                flags, delalloc);
7539
7540         if (ret == -ENOSPC) {
7541                 if (!final_tried && ins->offset) {
7542                         num_bytes = min(num_bytes >> 1, ins->offset);
7543                         num_bytes = round_down(num_bytes, root->sectorsize);
7544                         num_bytes = max(num_bytes, min_alloc_size);
7545                         if (num_bytes == min_alloc_size)
7546                                 final_tried = true;
7547                         goto again;
7548                 } else if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
7549                         struct btrfs_space_info *sinfo;
7550
7551                         sinfo = __find_space_info(root->fs_info, flags);
7552                         btrfs_err(root->fs_info, "allocation failed flags %llu, wanted %llu",
7553                                 flags, num_bytes);
7554                         if (sinfo)
7555                                 dump_space_info(sinfo, num_bytes, 1);
7556                 }
7557         }
7558
7559         return ret;
7560 }
7561
7562 static int __btrfs_free_reserved_extent(struct btrfs_root *root,
7563                                         u64 start, u64 len,
7564                                         int pin, int delalloc)
7565 {
7566         struct btrfs_block_group_cache *cache;
7567         int ret = 0;
7568
7569         cache = btrfs_lookup_block_group(root->fs_info, start);
7570         if (!cache) {
7571                 btrfs_err(root->fs_info, "Unable to find block group for %llu",
7572                         start);
7573                 return -ENOSPC;
7574         }
7575
7576         if (pin)
7577                 pin_down_extent(root, cache, start, len, 1);
7578         else {
7579                 if (btrfs_test_opt(root, DISCARD))
7580                         ret = btrfs_discard_extent(root, start, len, NULL);
7581                 btrfs_add_free_space(cache, start, len);
7582                 btrfs_update_reserved_bytes(cache, len, RESERVE_FREE, delalloc);
7583         }
7584
7585         btrfs_put_block_group(cache);
7586
7587         trace_btrfs_reserved_extent_free(root, start, len);
7588
7589         return ret;
7590 }
7591
7592 int btrfs_free_reserved_extent(struct btrfs_root *root,
7593                                u64 start, u64 len, int delalloc)
7594 {
7595         return __btrfs_free_reserved_extent(root, start, len, 0, delalloc);
7596 }
7597
7598 int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,
7599                                        u64 start, u64 len)
7600 {
7601         return __btrfs_free_reserved_extent(root, start, len, 1, 0);
7602 }
7603
7604 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
7605                                       struct btrfs_root *root,
7606                                       u64 parent, u64 root_objectid,
7607                                       u64 flags, u64 owner, u64 offset,
7608                                       struct btrfs_key *ins, int ref_mod)
7609 {
7610         int ret;
7611         struct btrfs_fs_info *fs_info = root->fs_info;
7612         struct btrfs_extent_item *extent_item;
7613         struct btrfs_extent_inline_ref *iref;
7614         struct btrfs_path *path;
7615         struct extent_buffer *leaf;
7616         int type;
7617         u32 size;
7618
7619         if (parent > 0)
7620                 type = BTRFS_SHARED_DATA_REF_KEY;
7621         else
7622                 type = BTRFS_EXTENT_DATA_REF_KEY;
7623
7624         size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
7625
7626         path = btrfs_alloc_path();
7627         if (!path)
7628                 return -ENOMEM;
7629
7630         path->leave_spinning = 1;
7631         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
7632                                       ins, size);
7633         if (ret) {
7634                 btrfs_free_path(path);
7635                 return ret;
7636         }
7637
7638         leaf = path->nodes[0];
7639         extent_item = btrfs_item_ptr(leaf, path->slots[0],
7640                                      struct btrfs_extent_item);
7641         btrfs_set_extent_refs(leaf, extent_item, ref_mod);
7642         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
7643         btrfs_set_extent_flags(leaf, extent_item,
7644                                flags | BTRFS_EXTENT_FLAG_DATA);
7645
7646         iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
7647         btrfs_set_extent_inline_ref_type(leaf, iref, type);
7648         if (parent > 0) {
7649                 struct btrfs_shared_data_ref *ref;
7650                 ref = (struct btrfs_shared_data_ref *)(iref + 1);
7651                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
7652                 btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
7653         } else {
7654                 struct btrfs_extent_data_ref *ref;
7655                 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
7656                 btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
7657                 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
7658                 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
7659                 btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
7660         }
7661
7662         btrfs_mark_buffer_dirty(path->nodes[0]);
7663         btrfs_free_path(path);
7664
7665         ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
7666         if (ret) { /* -ENOENT, logic error */
7667                 btrfs_err(fs_info, "update block group failed for %llu %llu",
7668                         ins->objectid, ins->offset);
7669                 BUG();
7670         }
7671         trace_btrfs_reserved_extent_alloc(root, ins->objectid, ins->offset);
7672         return ret;
7673 }
7674
7675 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
7676                                      struct btrfs_root *root,
7677                                      u64 parent, u64 root_objectid,
7678                                      u64 flags, struct btrfs_disk_key *key,
7679                                      int level, struct btrfs_key *ins,
7680                                      int no_quota)
7681 {
7682         int ret;
7683         struct btrfs_fs_info *fs_info = root->fs_info;
7684         struct btrfs_extent_item *extent_item;
7685         struct btrfs_tree_block_info *block_info;
7686         struct btrfs_extent_inline_ref *iref;
7687         struct btrfs_path *path;
7688         struct extent_buffer *leaf;
7689         u32 size = sizeof(*extent_item) + sizeof(*iref);
7690         u64 num_bytes = ins->offset;
7691         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
7692                                                  SKINNY_METADATA);
7693
7694         if (!skinny_metadata)
7695                 size += sizeof(*block_info);
7696
7697         path = btrfs_alloc_path();
7698         if (!path) {
7699                 btrfs_free_and_pin_reserved_extent(root, ins->objectid,
7700                                                    root->nodesize);
7701                 return -ENOMEM;
7702         }
7703
7704         path->leave_spinning = 1;
7705         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
7706                                       ins, size);
7707         if (ret) {
7708                 btrfs_free_path(path);
7709                 btrfs_free_and_pin_reserved_extent(root, ins->objectid,
7710                                                    root->nodesize);
7711                 return ret;
7712         }
7713
7714         leaf = path->nodes[0];
7715         extent_item = btrfs_item_ptr(leaf, path->slots[0],
7716                                      struct btrfs_extent_item);
7717         btrfs_set_extent_refs(leaf, extent_item, 1);
7718         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
7719         btrfs_set_extent_flags(leaf, extent_item,
7720                                flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
7721
7722         if (skinny_metadata) {
7723                 iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
7724                 num_bytes = root->nodesize;
7725         } else {
7726                 block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
7727                 btrfs_set_tree_block_key(leaf, block_info, key);
7728                 btrfs_set_tree_block_level(leaf, block_info, level);
7729                 iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
7730         }
7731
7732         if (parent > 0) {
7733                 BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
7734                 btrfs_set_extent_inline_ref_type(leaf, iref,
7735                                                  BTRFS_SHARED_BLOCK_REF_KEY);
7736                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
7737         } else {
7738                 btrfs_set_extent_inline_ref_type(leaf, iref,
7739                                                  BTRFS_TREE_BLOCK_REF_KEY);
7740                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
7741         }
7742
7743         btrfs_mark_buffer_dirty(leaf);
7744         btrfs_free_path(path);
7745
7746         ret = update_block_group(trans, root, ins->objectid, root->nodesize,
7747                                  1);
7748         if (ret) { /* -ENOENT, logic error */
7749                 btrfs_err(fs_info, "update block group failed for %llu %llu",
7750                         ins->objectid, ins->offset);
7751                 BUG();
7752         }
7753
7754         trace_btrfs_reserved_extent_alloc(root, ins->objectid, root->nodesize);
7755         return ret;
7756 }
7757
7758 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
7759                                      struct btrfs_root *root,
7760                                      u64 root_objectid, u64 owner,
7761                                      u64 offset, struct btrfs_key *ins)
7762 {
7763         int ret;
7764
7765         BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
7766
7767         ret = btrfs_add_delayed_data_ref(root->fs_info, trans, ins->objectid,
7768                                          ins->offset, 0,
7769                                          root_objectid, owner, offset,
7770                                          BTRFS_ADD_DELAYED_EXTENT, NULL, 0);
7771         return ret;
7772 }
7773
7774 /*
7775  * this is used by the tree logging recovery code.  It records that
7776  * an extent has been allocated and makes sure to clear the free
7777  * space cache bits as well
7778  */
7779 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
7780                                    struct btrfs_root *root,
7781                                    u64 root_objectid, u64 owner, u64 offset,
7782                                    struct btrfs_key *ins)
7783 {
7784         int ret;
7785         struct btrfs_block_group_cache *block_group;
7786
7787         /*
7788          * Mixed block groups will exclude before processing the log so we only
7789          * need to do the exlude dance if this fs isn't mixed.
7790          */
7791         if (!btrfs_fs_incompat(root->fs_info, MIXED_GROUPS)) {
7792                 ret = __exclude_logged_extent(root, ins->objectid, ins->offset);
7793                 if (ret)
7794                         return ret;
7795         }
7796
7797         block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
7798         if (!block_group)
7799                 return -EINVAL;
7800
7801         ret = btrfs_update_reserved_bytes(block_group, ins->offset,
7802                                           RESERVE_ALLOC_NO_ACCOUNT, 0);
7803         BUG_ON(ret); /* logic error */
7804         ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
7805                                          0, owner, offset, ins, 1);
7806         btrfs_put_block_group(block_group);
7807         return ret;
7808 }
7809
7810 static struct extent_buffer *
7811 btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
7812                       u64 bytenr, int level)
7813 {
7814         struct extent_buffer *buf;
7815
7816         buf = btrfs_find_create_tree_block(root, bytenr);
7817         if (!buf)
7818                 return ERR_PTR(-ENOMEM);
7819         btrfs_set_header_generation(buf, trans->transid);
7820         btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
7821         btrfs_tree_lock(buf);
7822         clean_tree_block(trans, root->fs_info, buf);
7823         clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
7824
7825         btrfs_set_lock_blocking(buf);
7826         btrfs_set_buffer_uptodate(buf);
7827
7828         if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
7829                 buf->log_index = root->log_transid % 2;
7830                 /*
7831                  * we allow two log transactions at a time, use different
7832                  * EXENT bit to differentiate dirty pages.
7833                  */
7834                 if (buf->log_index == 0)
7835                         set_extent_dirty(&root->dirty_log_pages, buf->start,
7836                                         buf->start + buf->len - 1, GFP_NOFS);
7837                 else
7838                         set_extent_new(&root->dirty_log_pages, buf->start,
7839                                         buf->start + buf->len - 1, GFP_NOFS);
7840         } else {
7841                 buf->log_index = -1;
7842                 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
7843                          buf->start + buf->len - 1, GFP_NOFS);
7844         }
7845         trans->blocks_used++;
7846         /* this returns a buffer locked for blocking */
7847         return buf;
7848 }
7849
7850 static struct btrfs_block_rsv *
7851 use_block_rsv(struct btrfs_trans_handle *trans,
7852               struct btrfs_root *root, u32 blocksize)
7853 {
7854         struct btrfs_block_rsv *block_rsv;
7855         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
7856         int ret;
7857         bool global_updated = false;
7858
7859         block_rsv = get_block_rsv(trans, root);
7860
7861         if (unlikely(block_rsv->size == 0))
7862                 goto try_reserve;
7863 again:
7864         ret = block_rsv_use_bytes(block_rsv, blocksize);
7865         if (!ret)
7866                 return block_rsv;
7867
7868         if (block_rsv->failfast)
7869                 return ERR_PTR(ret);
7870
7871         if (block_rsv->type == BTRFS_BLOCK_RSV_GLOBAL && !global_updated) {
7872                 global_updated = true;
7873                 update_global_block_rsv(root->fs_info);
7874                 goto again;
7875         }
7876
7877         if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
7878                 static DEFINE_RATELIMIT_STATE(_rs,
7879                                 DEFAULT_RATELIMIT_INTERVAL * 10,
7880                                 /*DEFAULT_RATELIMIT_BURST*/ 1);
7881                 if (__ratelimit(&_rs))
7882                         WARN(1, KERN_DEBUG
7883                                 "BTRFS: block rsv returned %d\n", ret);
7884         }
7885 try_reserve:
7886         ret = reserve_metadata_bytes(root, block_rsv, blocksize,
7887                                      BTRFS_RESERVE_NO_FLUSH);
7888         if (!ret)
7889                 return block_rsv;
7890         /*
7891          * If we couldn't reserve metadata bytes try and use some from
7892          * the global reserve if its space type is the same as the global
7893          * reservation.
7894          */
7895         if (block_rsv->type != BTRFS_BLOCK_RSV_GLOBAL &&
7896             block_rsv->space_info == global_rsv->space_info) {
7897                 ret = block_rsv_use_bytes(global_rsv, blocksize);
7898                 if (!ret)
7899                         return global_rsv;
7900         }
7901         return ERR_PTR(ret);
7902 }
7903
7904 static void unuse_block_rsv(struct btrfs_fs_info *fs_info,
7905                             struct btrfs_block_rsv *block_rsv, u32 blocksize)
7906 {
7907         block_rsv_add_bytes(block_rsv, blocksize, 0);
7908         block_rsv_release_bytes(fs_info, block_rsv, NULL, 0);
7909 }
7910
7911 /*
7912  * finds a free extent and does all the dirty work required for allocation
7913  * returns the tree buffer or an ERR_PTR on error.
7914  */
7915 struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
7916                                         struct btrfs_root *root,
7917                                         u64 parent, u64 root_objectid,
7918                                         struct btrfs_disk_key *key, int level,
7919                                         u64 hint, u64 empty_size)
7920 {
7921         struct btrfs_key ins;
7922         struct btrfs_block_rsv *block_rsv;
7923         struct extent_buffer *buf;
7924         struct btrfs_delayed_extent_op *extent_op;
7925         u64 flags = 0;
7926         int ret;
7927         u32 blocksize = root->nodesize;
7928         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
7929                                                  SKINNY_METADATA);
7930
7931         if (btrfs_test_is_dummy_root(root)) {
7932                 buf = btrfs_init_new_buffer(trans, root, root->alloc_bytenr,
7933                                             level);
7934                 if (!IS_ERR(buf))
7935                         root->alloc_bytenr += blocksize;
7936                 return buf;
7937         }
7938
7939         block_rsv = use_block_rsv(trans, root, blocksize);
7940         if (IS_ERR(block_rsv))
7941                 return ERR_CAST(block_rsv);
7942
7943         ret = btrfs_reserve_extent(root, blocksize, blocksize,
7944                                    empty_size, hint, &ins, 0, 0);
7945         if (ret)
7946                 goto out_unuse;
7947
7948         buf = btrfs_init_new_buffer(trans, root, ins.objectid, level);
7949         if (IS_ERR(buf)) {
7950                 ret = PTR_ERR(buf);
7951                 goto out_free_reserved;
7952         }
7953
7954         if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
7955                 if (parent == 0)
7956                         parent = ins.objectid;
7957                 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
7958         } else
7959                 BUG_ON(parent > 0);
7960
7961         if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
7962                 extent_op = btrfs_alloc_delayed_extent_op();
7963                 if (!extent_op) {
7964                         ret = -ENOMEM;
7965                         goto out_free_buf;
7966                 }
7967                 if (key)
7968                         memcpy(&extent_op->key, key, sizeof(extent_op->key));
7969                 else
7970                         memset(&extent_op->key, 0, sizeof(extent_op->key));
7971                 extent_op->flags_to_set = flags;
7972                 if (skinny_metadata)
7973                         extent_op->update_key = 0;
7974                 else
7975                         extent_op->update_key = 1;
7976                 extent_op->update_flags = 1;
7977                 extent_op->is_data = 0;
7978                 extent_op->level = level;
7979
7980                 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
7981                                                  ins.objectid, ins.offset,
7982                                                  parent, root_objectid, level,
7983                                                  BTRFS_ADD_DELAYED_EXTENT,
7984                                                  extent_op, 0);
7985                 if (ret)
7986                         goto out_free_delayed;
7987         }
7988         return buf;
7989
7990 out_free_delayed:
7991         btrfs_free_delayed_extent_op(extent_op);
7992 out_free_buf:
7993         free_extent_buffer(buf);
7994 out_free_reserved:
7995         btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 0);
7996 out_unuse:
7997         unuse_block_rsv(root->fs_info, block_rsv, blocksize);
7998         return ERR_PTR(ret);
7999 }
8000
8001 struct walk_control {
8002         u64 refs[BTRFS_MAX_LEVEL];
8003         u64 flags[BTRFS_MAX_LEVEL];
8004         struct btrfs_key update_progress;
8005         int stage;
8006         int level;
8007         int shared_level;
8008         int update_ref;
8009         int keep_locks;
8010         int reada_slot;
8011         int reada_count;
8012         int for_reloc;
8013 };
8014
8015 #define DROP_REFERENCE  1
8016 #define UPDATE_BACKREF  2
8017
8018 static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
8019                                      struct btrfs_root *root,
8020                                      struct walk_control *wc,
8021                                      struct btrfs_path *path)
8022 {
8023         u64 bytenr;
8024         u64 generation;
8025         u64 refs;
8026         u64 flags;
8027         u32 nritems;
8028         u32 blocksize;
8029         struct btrfs_key key;
8030         struct extent_buffer *eb;
8031         int ret;
8032         int slot;
8033         int nread = 0;
8034
8035         if (path->slots[wc->level] < wc->reada_slot) {
8036                 wc->reada_count = wc->reada_count * 2 / 3;
8037                 wc->reada_count = max(wc->reada_count, 2);
8038         } else {
8039                 wc->reada_count = wc->reada_count * 3 / 2;
8040                 wc->reada_count = min_t(int, wc->reada_count,
8041                                         BTRFS_NODEPTRS_PER_BLOCK(root));
8042         }
8043
8044         eb = path->nodes[wc->level];
8045         nritems = btrfs_header_nritems(eb);
8046         blocksize = root->nodesize;
8047
8048         for (slot = path->slots[wc->level]; slot < nritems; slot++) {
8049                 if (nread >= wc->reada_count)
8050                         break;
8051
8052                 cond_resched();
8053                 bytenr = btrfs_node_blockptr(eb, slot);
8054                 generation = btrfs_node_ptr_generation(eb, slot);
8055
8056                 if (slot == path->slots[wc->level])
8057                         goto reada;
8058
8059                 if (wc->stage == UPDATE_BACKREF &&
8060                     generation <= root->root_key.offset)
8061                         continue;
8062
8063                 /* We don't lock the tree block, it's OK to be racy here */
8064                 ret = btrfs_lookup_extent_info(trans, root, bytenr,
8065                                                wc->level - 1, 1, &refs,
8066                                                &flags);
8067                 /* We don't care about errors in readahead. */
8068                 if (ret < 0)
8069                         continue;
8070                 BUG_ON(refs == 0);
8071
8072                 if (wc->stage == DROP_REFERENCE) {
8073                         if (refs == 1)
8074                                 goto reada;
8075
8076                         if (wc->level == 1 &&
8077                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
8078                                 continue;
8079                         if (!wc->update_ref ||
8080                             generation <= root->root_key.offset)
8081                                 continue;
8082                         btrfs_node_key_to_cpu(eb, &key, slot);
8083                         ret = btrfs_comp_cpu_keys(&key,
8084                                                   &wc->update_progress);
8085                         if (ret < 0)
8086                                 continue;
8087                 } else {
8088                         if (wc->level == 1 &&
8089                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
8090                                 continue;
8091                 }
8092 reada:
8093                 readahead_tree_block(root, bytenr);
8094                 nread++;
8095         }
8096         wc->reada_slot = slot;
8097 }
8098
8099 /*
8100  * TODO: Modify related function to add related node/leaf to dirty_extent_root,
8101  * for later qgroup accounting.
8102  *
8103  * Current, this function does nothing.
8104  */
8105 static int account_leaf_items(struct btrfs_trans_handle *trans,
8106                               struct btrfs_root *root,
8107                               struct extent_buffer *eb)
8108 {
8109         int nr = btrfs_header_nritems(eb);
8110         int i, extent_type;
8111         struct btrfs_key key;
8112         struct btrfs_file_extent_item *fi;
8113         u64 bytenr, num_bytes;
8114
8115         for (i = 0; i < nr; i++) {
8116                 btrfs_item_key_to_cpu(eb, &key, i);
8117
8118                 if (key.type != BTRFS_EXTENT_DATA_KEY)
8119                         continue;
8120
8121                 fi = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
8122                 /* filter out non qgroup-accountable extents  */
8123                 extent_type = btrfs_file_extent_type(eb, fi);
8124
8125                 if (extent_type == BTRFS_FILE_EXTENT_INLINE)
8126                         continue;
8127
8128                 bytenr = btrfs_file_extent_disk_bytenr(eb, fi);
8129                 if (!bytenr)
8130                         continue;
8131
8132                 num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi);
8133         }
8134         return 0;
8135 }
8136
8137 /*
8138  * Walk up the tree from the bottom, freeing leaves and any interior
8139  * nodes which have had all slots visited. If a node (leaf or
8140  * interior) is freed, the node above it will have it's slot
8141  * incremented. The root node will never be freed.
8142  *
8143  * At the end of this function, we should have a path which has all
8144  * slots incremented to the next position for a search. If we need to
8145  * read a new node it will be NULL and the node above it will have the
8146  * correct slot selected for a later read.
8147  *
8148  * If we increment the root nodes slot counter past the number of
8149  * elements, 1 is returned to signal completion of the search.
8150  */
8151 static int adjust_slots_upwards(struct btrfs_root *root,
8152                                 struct btrfs_path *path, int root_level)
8153 {
8154         int level = 0;
8155         int nr, slot;
8156         struct extent_buffer *eb;
8157
8158         if (root_level == 0)
8159                 return 1;
8160
8161         while (level <= root_level) {
8162                 eb = path->nodes[level];
8163                 nr = btrfs_header_nritems(eb);
8164                 path->slots[level]++;
8165                 slot = path->slots[level];
8166                 if (slot >= nr || level == 0) {
8167                         /*
8168                          * Don't free the root -  we will detect this
8169                          * condition after our loop and return a
8170                          * positive value for caller to stop walking the tree.
8171                          */
8172                         if (level != root_level) {
8173                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
8174                                 path->locks[level] = 0;
8175
8176                                 free_extent_buffer(eb);
8177                                 path->nodes[level] = NULL;
8178                                 path->slots[level] = 0;
8179                         }
8180                 } else {
8181                         /*
8182                          * We have a valid slot to walk back down
8183                          * from. Stop here so caller can process these
8184                          * new nodes.
8185                          */
8186                         break;
8187                 }
8188
8189                 level++;
8190         }
8191
8192         eb = path->nodes[root_level];
8193         if (path->slots[root_level] >= btrfs_header_nritems(eb))
8194                 return 1;
8195
8196         return 0;
8197 }
8198
8199 /*
8200  * root_eb is the subtree root and is locked before this function is called.
8201  * TODO: Modify this function to mark all (including complete shared node)
8202  * to dirty_extent_root to allow it get accounted in qgroup.
8203  */
8204 static int account_shared_subtree(struct btrfs_trans_handle *trans,
8205                                   struct btrfs_root *root,
8206                                   struct extent_buffer *root_eb,
8207                                   u64 root_gen,
8208                                   int root_level)
8209 {
8210         int ret = 0;
8211         int level;
8212         struct extent_buffer *eb = root_eb;
8213         struct btrfs_path *path = NULL;
8214
8215         BUG_ON(root_level < 0 || root_level > BTRFS_MAX_LEVEL);
8216         BUG_ON(root_eb == NULL);
8217
8218         if (!root->fs_info->quota_enabled)
8219                 return 0;
8220
8221         if (!extent_buffer_uptodate(root_eb)) {
8222                 ret = btrfs_read_buffer(root_eb, root_gen);
8223                 if (ret)
8224                         goto out;
8225         }
8226
8227         if (root_level == 0) {
8228                 ret = account_leaf_items(trans, root, root_eb);
8229                 goto out;
8230         }
8231
8232         path = btrfs_alloc_path();
8233         if (!path)
8234                 return -ENOMEM;
8235
8236         /*
8237          * Walk down the tree.  Missing extent blocks are filled in as
8238          * we go. Metadata is accounted every time we read a new
8239          * extent block.
8240          *
8241          * When we reach a leaf, we account for file extent items in it,
8242          * walk back up the tree (adjusting slot pointers as we go)
8243          * and restart the search process.
8244          */
8245         extent_buffer_get(root_eb); /* For path */
8246         path->nodes[root_level] = root_eb;
8247         path->slots[root_level] = 0;
8248         path->locks[root_level] = 0; /* so release_path doesn't try to unlock */
8249 walk_down:
8250         level = root_level;
8251         while (level >= 0) {
8252                 if (path->nodes[level] == NULL) {
8253                         int parent_slot;
8254                         u64 child_gen;
8255                         u64 child_bytenr;
8256
8257                         /* We need to get child blockptr/gen from
8258                          * parent before we can read it. */
8259                         eb = path->nodes[level + 1];
8260                         parent_slot = path->slots[level + 1];
8261                         child_bytenr = btrfs_node_blockptr(eb, parent_slot);
8262                         child_gen = btrfs_node_ptr_generation(eb, parent_slot);
8263
8264                         eb = read_tree_block(root, child_bytenr, child_gen);
8265                         if (IS_ERR(eb)) {
8266                                 ret = PTR_ERR(eb);
8267                                 goto out;
8268                         } else if (!extent_buffer_uptodate(eb)) {
8269                                 free_extent_buffer(eb);
8270                                 ret = -EIO;
8271                                 goto out;
8272                         }
8273
8274                         path->nodes[level] = eb;
8275                         path->slots[level] = 0;
8276
8277                         btrfs_tree_read_lock(eb);
8278                         btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
8279                         path->locks[level] = BTRFS_READ_LOCK_BLOCKING;
8280                 }
8281
8282                 if (level == 0) {
8283                         ret = account_leaf_items(trans, root, path->nodes[level]);
8284                         if (ret)
8285                                 goto out;
8286
8287                         /* Nonzero return here means we completed our search */
8288                         ret = adjust_slots_upwards(root, path, root_level);
8289                         if (ret)
8290                                 break;
8291
8292                         /* Restart search with new slots */
8293                         goto walk_down;
8294                 }
8295
8296                 level--;
8297         }
8298
8299         ret = 0;
8300 out:
8301         btrfs_free_path(path);
8302
8303         return ret;
8304 }
8305
8306 /*
8307  * helper to process tree block while walking down the tree.
8308  *
8309  * when wc->stage == UPDATE_BACKREF, this function updates
8310  * back refs for pointers in the block.
8311  *
8312  * NOTE: return value 1 means we should stop walking down.
8313  */
8314 static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
8315                                    struct btrfs_root *root,
8316                                    struct btrfs_path *path,
8317                                    struct walk_control *wc, int lookup_info)
8318 {
8319         int level = wc->level;
8320         struct extent_buffer *eb = path->nodes[level];
8321         u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
8322         int ret;
8323
8324         if (wc->stage == UPDATE_BACKREF &&
8325             btrfs_header_owner(eb) != root->root_key.objectid)
8326                 return 1;
8327
8328         /*
8329          * when reference count of tree block is 1, it won't increase
8330          * again. once full backref flag is set, we never clear it.
8331          */
8332         if (lookup_info &&
8333             ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
8334              (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
8335                 BUG_ON(!path->locks[level]);
8336                 ret = btrfs_lookup_extent_info(trans, root,
8337                                                eb->start, level, 1,
8338                                                &wc->refs[level],
8339                                                &wc->flags[level]);
8340                 BUG_ON(ret == -ENOMEM);
8341                 if (ret)
8342                         return ret;
8343                 BUG_ON(wc->refs[level] == 0);
8344         }
8345
8346         if (wc->stage == DROP_REFERENCE) {
8347                 if (wc->refs[level] > 1)
8348                         return 1;
8349
8350                 if (path->locks[level] && !wc->keep_locks) {
8351                         btrfs_tree_unlock_rw(eb, path->locks[level]);
8352                         path->locks[level] = 0;
8353                 }
8354                 return 0;
8355         }
8356
8357         /* wc->stage == UPDATE_BACKREF */
8358         if (!(wc->flags[level] & flag)) {
8359                 BUG_ON(!path->locks[level]);
8360                 ret = btrfs_inc_ref(trans, root, eb, 1);
8361                 BUG_ON(ret); /* -ENOMEM */
8362                 ret = btrfs_dec_ref(trans, root, eb, 0);
8363                 BUG_ON(ret); /* -ENOMEM */
8364                 ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
8365                                                   eb->len, flag,
8366                                                   btrfs_header_level(eb), 0);
8367                 BUG_ON(ret); /* -ENOMEM */
8368                 wc->flags[level] |= flag;
8369         }
8370
8371         /*
8372          * the block is shared by multiple trees, so it's not good to
8373          * keep the tree lock
8374          */
8375         if (path->locks[level] && level > 0) {
8376                 btrfs_tree_unlock_rw(eb, path->locks[level]);
8377                 path->locks[level] = 0;
8378         }
8379         return 0;
8380 }
8381
8382 /*
8383  * helper to process tree block pointer.
8384  *
8385  * when wc->stage == DROP_REFERENCE, this function checks
8386  * reference count of the block pointed to. if the block
8387  * is shared and we need update back refs for the subtree
8388  * rooted at the block, this function changes wc->stage to
8389  * UPDATE_BACKREF. if the block is shared and there is no
8390  * need to update back, this function drops the reference
8391  * to the block.
8392  *
8393  * NOTE: return value 1 means we should stop walking down.
8394  */
8395 static noinline int do_walk_down(struct btrfs_trans_handle *trans,
8396                                  struct btrfs_root *root,
8397                                  struct btrfs_path *path,
8398                                  struct walk_control *wc, int *lookup_info)
8399 {
8400         u64 bytenr;
8401         u64 generation;
8402         u64 parent;
8403         u32 blocksize;
8404         struct btrfs_key key;
8405         struct extent_buffer *next;
8406         int level = wc->level;
8407         int reada = 0;
8408         int ret = 0;
8409         bool need_account = false;
8410
8411         generation = btrfs_node_ptr_generation(path->nodes[level],
8412                                                path->slots[level]);
8413         /*
8414          * if the lower level block was created before the snapshot
8415          * was created, we know there is no need to update back refs
8416          * for the subtree
8417          */
8418         if (wc->stage == UPDATE_BACKREF &&
8419             generation <= root->root_key.offset) {
8420                 *lookup_info = 1;
8421                 return 1;
8422         }
8423
8424         bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
8425         blocksize = root->nodesize;
8426
8427         next = btrfs_find_tree_block(root->fs_info, bytenr);
8428         if (!next) {
8429                 next = btrfs_find_create_tree_block(root, bytenr);
8430                 if (!next)
8431                         return -ENOMEM;
8432                 btrfs_set_buffer_lockdep_class(root->root_key.objectid, next,
8433                                                level - 1);
8434                 reada = 1;
8435         }
8436         btrfs_tree_lock(next);
8437         btrfs_set_lock_blocking(next);
8438
8439         ret = btrfs_lookup_extent_info(trans, root, bytenr, level - 1, 1,
8440                                        &wc->refs[level - 1],
8441                                        &wc->flags[level - 1]);
8442         if (ret < 0) {
8443                 btrfs_tree_unlock(next);
8444                 return ret;
8445         }
8446
8447         if (unlikely(wc->refs[level - 1] == 0)) {
8448                 btrfs_err(root->fs_info, "Missing references.");
8449                 BUG();
8450         }
8451         *lookup_info = 0;
8452
8453         if (wc->stage == DROP_REFERENCE) {
8454                 if (wc->refs[level - 1] > 1) {
8455                         need_account = true;
8456                         if (level == 1 &&
8457                             (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
8458                                 goto skip;
8459
8460                         if (!wc->update_ref ||
8461                             generation <= root->root_key.offset)
8462                                 goto skip;
8463
8464                         btrfs_node_key_to_cpu(path->nodes[level], &key,
8465                                               path->slots[level]);
8466                         ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
8467                         if (ret < 0)
8468                                 goto skip;
8469
8470                         wc->stage = UPDATE_BACKREF;
8471                         wc->shared_level = level - 1;
8472                 }
8473         } else {
8474                 if (level == 1 &&
8475                     (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
8476                         goto skip;
8477         }
8478
8479         if (!btrfs_buffer_uptodate(next, generation, 0)) {
8480                 btrfs_tree_unlock(next);
8481                 free_extent_buffer(next);
8482                 next = NULL;
8483                 *lookup_info = 1;
8484         }
8485
8486         if (!next) {
8487                 if (reada && level == 1)
8488                         reada_walk_down(trans, root, wc, path);
8489                 next = read_tree_block(root, bytenr, generation);
8490                 if (IS_ERR(next)) {
8491                         return PTR_ERR(next);
8492                 } else if (!extent_buffer_uptodate(next)) {
8493                         free_extent_buffer(next);
8494                         return -EIO;
8495                 }
8496                 btrfs_tree_lock(next);
8497                 btrfs_set_lock_blocking(next);
8498         }
8499
8500         level--;
8501         BUG_ON(level != btrfs_header_level(next));
8502         path->nodes[level] = next;
8503         path->slots[level] = 0;
8504         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8505         wc->level = level;
8506         if (wc->level == 1)
8507                 wc->reada_slot = 0;
8508         return 0;
8509 skip:
8510         wc->refs[level - 1] = 0;
8511         wc->flags[level - 1] = 0;
8512         if (wc->stage == DROP_REFERENCE) {
8513                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
8514                         parent = path->nodes[level]->start;
8515                 } else {
8516                         BUG_ON(root->root_key.objectid !=
8517                                btrfs_header_owner(path->nodes[level]));
8518                         parent = 0;
8519                 }
8520
8521                 if (need_account) {
8522                         ret = account_shared_subtree(trans, root, next,
8523                                                      generation, level - 1);
8524                         if (ret) {
8525                                 btrfs_err_rl(root->fs_info,
8526                                         "Error "
8527                                         "%d accounting shared subtree. Quota "
8528                                         "is out of sync, rescan required.",
8529                                         ret);
8530                         }
8531                 }
8532                 ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
8533                                 root->root_key.objectid, level - 1, 0, 0);
8534                 BUG_ON(ret); /* -ENOMEM */
8535         }
8536         btrfs_tree_unlock(next);
8537         free_extent_buffer(next);
8538         *lookup_info = 1;
8539         return 1;
8540 }
8541
8542 /*
8543  * helper to process tree block while walking up the tree.
8544  *
8545  * when wc->stage == DROP_REFERENCE, this function drops
8546  * reference count on the block.
8547  *
8548  * when wc->stage == UPDATE_BACKREF, this function changes
8549  * wc->stage back to DROP_REFERENCE if we changed wc->stage
8550  * to UPDATE_BACKREF previously while processing the block.
8551  *
8552  * NOTE: return value 1 means we should stop walking up.
8553  */
8554 static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
8555                                  struct btrfs_root *root,
8556                                  struct btrfs_path *path,
8557                                  struct walk_control *wc)
8558 {
8559         int ret;
8560         int level = wc->level;
8561         struct extent_buffer *eb = path->nodes[level];
8562         u64 parent = 0;
8563
8564         if (wc->stage == UPDATE_BACKREF) {
8565                 BUG_ON(wc->shared_level < level);
8566                 if (level < wc->shared_level)
8567                         goto out;
8568
8569                 ret = find_next_key(path, level + 1, &wc->update_progress);
8570                 if (ret > 0)
8571                         wc->update_ref = 0;
8572
8573                 wc->stage = DROP_REFERENCE;
8574                 wc->shared_level = -1;
8575                 path->slots[level] = 0;
8576
8577                 /*
8578                  * check reference count again if the block isn't locked.
8579                  * we should start walking down the tree again if reference
8580                  * count is one.
8581                  */
8582                 if (!path->locks[level]) {
8583                         BUG_ON(level == 0);
8584                         btrfs_tree_lock(eb);
8585                         btrfs_set_lock_blocking(eb);
8586                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8587
8588                         ret = btrfs_lookup_extent_info(trans, root,
8589                                                        eb->start, level, 1,
8590                                                        &wc->refs[level],
8591                                                        &wc->flags[level]);
8592                         if (ret < 0) {
8593                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
8594                                 path->locks[level] = 0;
8595                                 return ret;
8596                         }
8597                         BUG_ON(wc->refs[level] == 0);
8598                         if (wc->refs[level] == 1) {
8599                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
8600                                 path->locks[level] = 0;
8601                                 return 1;
8602                         }
8603                 }
8604         }
8605
8606         /* wc->stage == DROP_REFERENCE */
8607         BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
8608
8609         if (wc->refs[level] == 1) {
8610                 if (level == 0) {
8611                         if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
8612                                 ret = btrfs_dec_ref(trans, root, eb, 1);
8613                         else
8614                                 ret = btrfs_dec_ref(trans, root, eb, 0);
8615                         BUG_ON(ret); /* -ENOMEM */
8616                         ret = account_leaf_items(trans, root, eb);
8617                         if (ret) {
8618                                 btrfs_err_rl(root->fs_info,
8619                                         "error "
8620                                         "%d accounting leaf items. Quota "
8621                                         "is out of sync, rescan required.",
8622                                         ret);
8623                         }
8624                 }
8625                 /* make block locked assertion in clean_tree_block happy */
8626                 if (!path->locks[level] &&
8627                     btrfs_header_generation(eb) == trans->transid) {
8628                         btrfs_tree_lock(eb);
8629                         btrfs_set_lock_blocking(eb);
8630                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8631                 }
8632                 clean_tree_block(trans, root->fs_info, eb);
8633         }
8634
8635         if (eb == root->node) {
8636                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
8637                         parent = eb->start;
8638                 else
8639                         BUG_ON(root->root_key.objectid !=
8640                                btrfs_header_owner(eb));
8641         } else {
8642                 if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
8643                         parent = path->nodes[level + 1]->start;
8644                 else
8645                         BUG_ON(root->root_key.objectid !=
8646                                btrfs_header_owner(path->nodes[level + 1]));
8647         }
8648
8649         btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
8650 out:
8651         wc->refs[level] = 0;
8652         wc->flags[level] = 0;
8653         return 0;
8654 }
8655
8656 static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
8657                                    struct btrfs_root *root,
8658                                    struct btrfs_path *path,
8659                                    struct walk_control *wc)
8660 {
8661         int level = wc->level;
8662         int lookup_info = 1;
8663         int ret;
8664
8665         while (level >= 0) {
8666                 ret = walk_down_proc(trans, root, path, wc, lookup_info);
8667                 if (ret > 0)
8668                         break;
8669
8670                 if (level == 0)
8671                         break;
8672
8673                 if (path->slots[level] >=
8674                     btrfs_header_nritems(path->nodes[level]))
8675                         break;
8676
8677                 ret = do_walk_down(trans, root, path, wc, &lookup_info);
8678                 if (ret > 0) {
8679                         path->slots[level]++;
8680                         continue;
8681                 } else if (ret < 0)
8682                         return ret;
8683                 level = wc->level;
8684         }
8685         return 0;
8686 }
8687
8688 static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
8689                                  struct btrfs_root *root,
8690                                  struct btrfs_path *path,
8691                                  struct walk_control *wc, int max_level)
8692 {
8693         int level = wc->level;
8694         int ret;
8695
8696         path->slots[level] = btrfs_header_nritems(path->nodes[level]);
8697         while (level < max_level && path->nodes[level]) {
8698                 wc->level = level;
8699                 if (path->slots[level] + 1 <
8700                     btrfs_header_nritems(path->nodes[level])) {
8701                         path->slots[level]++;
8702                         return 0;
8703                 } else {
8704                         ret = walk_up_proc(trans, root, path, wc);
8705                         if (ret > 0)
8706                                 return 0;
8707
8708                         if (path->locks[level]) {
8709                                 btrfs_tree_unlock_rw(path->nodes[level],
8710                                                      path->locks[level]);
8711                                 path->locks[level] = 0;
8712                         }
8713                         free_extent_buffer(path->nodes[level]);
8714                         path->nodes[level] = NULL;
8715                         level++;
8716                 }
8717         }
8718         return 1;
8719 }
8720
8721 /*
8722  * drop a subvolume tree.
8723  *
8724  * this function traverses the tree freeing any blocks that only
8725  * referenced by the tree.
8726  *
8727  * when a shared tree block is found. this function decreases its
8728  * reference count by one. if update_ref is true, this function
8729  * also make sure backrefs for the shared block and all lower level
8730  * blocks are properly updated.
8731  *
8732  * If called with for_reloc == 0, may exit early with -EAGAIN
8733  */
8734 int btrfs_drop_snapshot(struct btrfs_root *root,
8735                          struct btrfs_block_rsv *block_rsv, int update_ref,
8736                          int for_reloc)
8737 {
8738         struct btrfs_path *path;
8739         struct btrfs_trans_handle *trans;
8740         struct btrfs_root *tree_root = root->fs_info->tree_root;
8741         struct btrfs_root_item *root_item = &root->root_item;
8742         struct walk_control *wc;
8743         struct btrfs_key key;
8744         int err = 0;
8745         int ret;
8746         int level;
8747         bool root_dropped = false;
8748
8749         btrfs_debug(root->fs_info, "Drop subvolume %llu", root->objectid);
8750
8751         path = btrfs_alloc_path();
8752         if (!path) {
8753                 err = -ENOMEM;
8754                 goto out;
8755         }
8756
8757         wc = kzalloc(sizeof(*wc), GFP_NOFS);
8758         if (!wc) {
8759                 btrfs_free_path(path);
8760                 err = -ENOMEM;
8761                 goto out;
8762         }
8763
8764         trans = btrfs_start_transaction(tree_root, 0);
8765         if (IS_ERR(trans)) {
8766                 err = PTR_ERR(trans);
8767                 goto out_free;
8768         }
8769
8770         if (block_rsv)
8771                 trans->block_rsv = block_rsv;
8772
8773         if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
8774                 level = btrfs_header_level(root->node);
8775                 path->nodes[level] = btrfs_lock_root_node(root);
8776                 btrfs_set_lock_blocking(path->nodes[level]);
8777                 path->slots[level] = 0;
8778                 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8779                 memset(&wc->update_progress, 0,
8780                        sizeof(wc->update_progress));
8781         } else {
8782                 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
8783                 memcpy(&wc->update_progress, &key,
8784                        sizeof(wc->update_progress));
8785
8786                 level = root_item->drop_level;
8787                 BUG_ON(level == 0);
8788                 path->lowest_level = level;
8789                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
8790                 path->lowest_level = 0;
8791                 if (ret < 0) {
8792                         err = ret;
8793                         goto out_end_trans;
8794                 }
8795                 WARN_ON(ret > 0);
8796
8797                 /*
8798                  * unlock our path, this is safe because only this
8799                  * function is allowed to delete this snapshot
8800                  */
8801                 btrfs_unlock_up_safe(path, 0);
8802
8803                 level = btrfs_header_level(root->node);
8804                 while (1) {
8805                         btrfs_tree_lock(path->nodes[level]);
8806                         btrfs_set_lock_blocking(path->nodes[level]);
8807                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8808
8809                         ret = btrfs_lookup_extent_info(trans, root,
8810                                                 path->nodes[level]->start,
8811                                                 level, 1, &wc->refs[level],
8812                                                 &wc->flags[level]);
8813                         if (ret < 0) {
8814                                 err = ret;
8815                                 goto out_end_trans;
8816                         }
8817                         BUG_ON(wc->refs[level] == 0);
8818
8819                         if (level == root_item->drop_level)
8820                                 break;
8821
8822                         btrfs_tree_unlock(path->nodes[level]);
8823                         path->locks[level] = 0;
8824                         WARN_ON(wc->refs[level] != 1);
8825                         level--;
8826                 }
8827         }
8828
8829         wc->level = level;
8830         wc->shared_level = -1;
8831         wc->stage = DROP_REFERENCE;
8832         wc->update_ref = update_ref;
8833         wc->keep_locks = 0;
8834         wc->for_reloc = for_reloc;
8835         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
8836
8837         while (1) {
8838
8839                 ret = walk_down_tree(trans, root, path, wc);
8840                 if (ret < 0) {
8841                         err = ret;
8842                         break;
8843                 }
8844
8845                 ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
8846                 if (ret < 0) {
8847                         err = ret;
8848                         break;
8849                 }
8850
8851                 if (ret > 0) {
8852                         BUG_ON(wc->stage != DROP_REFERENCE);
8853                         break;
8854                 }
8855
8856                 if (wc->stage == DROP_REFERENCE) {
8857                         level = wc->level;
8858                         btrfs_node_key(path->nodes[level],
8859                                        &root_item->drop_progress,
8860                                        path->slots[level]);
8861                         root_item->drop_level = level;
8862                 }
8863
8864                 BUG_ON(wc->level == 0);
8865                 if (btrfs_should_end_transaction(trans, tree_root) ||
8866                     (!for_reloc && btrfs_need_cleaner_sleep(root))) {
8867                         ret = btrfs_update_root(trans, tree_root,
8868                                                 &root->root_key,
8869                                                 root_item);
8870                         if (ret) {
8871                                 btrfs_abort_transaction(trans, tree_root, ret);
8872                                 err = ret;
8873                                 goto out_end_trans;
8874                         }
8875
8876                         btrfs_end_transaction_throttle(trans, tree_root);
8877                         if (!for_reloc && btrfs_need_cleaner_sleep(root)) {
8878                                 pr_debug("BTRFS: drop snapshot early exit\n");
8879                                 err = -EAGAIN;
8880                                 goto out_free;
8881                         }
8882
8883                         trans = btrfs_start_transaction(tree_root, 0);
8884                         if (IS_ERR(trans)) {
8885                                 err = PTR_ERR(trans);
8886                                 goto out_free;
8887                         }
8888                         if (block_rsv)
8889                                 trans->block_rsv = block_rsv;
8890                 }
8891         }
8892         btrfs_release_path(path);
8893         if (err)
8894                 goto out_end_trans;
8895
8896         ret = btrfs_del_root(trans, tree_root, &root->root_key);
8897         if (ret) {
8898                 btrfs_abort_transaction(trans, tree_root, ret);
8899                 goto out_end_trans;
8900         }
8901
8902         if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
8903                 ret = btrfs_find_root(tree_root, &root->root_key, path,
8904                                       NULL, NULL);
8905                 if (ret < 0) {
8906                         btrfs_abort_transaction(trans, tree_root, ret);
8907                         err = ret;
8908                         goto out_end_trans;
8909                 } else if (ret > 0) {
8910                         /* if we fail to delete the orphan item this time
8911                          * around, it'll get picked up the next time.
8912                          *
8913                          * The most common failure here is just -ENOENT.
8914                          */
8915                         btrfs_del_orphan_item(trans, tree_root,
8916                                               root->root_key.objectid);
8917                 }
8918         }
8919
8920         if (test_bit(BTRFS_ROOT_IN_RADIX, &root->state)) {
8921                 btrfs_add_dropped_root(trans, root);
8922         } else {
8923                 free_extent_buffer(root->node);
8924                 free_extent_buffer(root->commit_root);
8925                 btrfs_put_fs_root(root);
8926         }
8927         root_dropped = true;
8928 out_end_trans:
8929         btrfs_end_transaction_throttle(trans, tree_root);
8930 out_free:
8931         kfree(wc);
8932         btrfs_free_path(path);
8933 out:
8934         /*
8935          * So if we need to stop dropping the snapshot for whatever reason we
8936          * need to make sure to add it back to the dead root list so that we
8937          * keep trying to do the work later.  This also cleans up roots if we
8938          * don't have it in the radix (like when we recover after a power fail
8939          * or unmount) so we don't leak memory.
8940          */
8941         if (!for_reloc && root_dropped == false)
8942                 btrfs_add_dead_root(root);
8943         if (err && err != -EAGAIN)
8944                 btrfs_std_error(root->fs_info, err, NULL);
8945         return err;
8946 }
8947
8948 /*
8949  * drop subtree rooted at tree block 'node'.
8950  *
8951  * NOTE: this function will unlock and release tree block 'node'
8952  * only used by relocation code
8953  */
8954 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
8955                         struct btrfs_root *root,
8956                         struct extent_buffer *node,
8957                         struct extent_buffer *parent)
8958 {
8959         struct btrfs_path *path;
8960         struct walk_control *wc;
8961         int level;
8962         int parent_level;
8963         int ret = 0;
8964         int wret;
8965
8966         BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
8967
8968         path = btrfs_alloc_path();
8969         if (!path)
8970                 return -ENOMEM;
8971
8972         wc = kzalloc(sizeof(*wc), GFP_NOFS);
8973         if (!wc) {
8974                 btrfs_free_path(path);
8975                 return -ENOMEM;
8976         }
8977
8978         btrfs_assert_tree_locked(parent);
8979         parent_level = btrfs_header_level(parent);
8980         extent_buffer_get(parent);
8981         path->nodes[parent_level] = parent;
8982         path->slots[parent_level] = btrfs_header_nritems(parent);
8983
8984         btrfs_assert_tree_locked(node);
8985         level = btrfs_header_level(node);
8986         path->nodes[level] = node;
8987         path->slots[level] = 0;
8988         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8989
8990         wc->refs[parent_level] = 1;
8991         wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
8992         wc->level = level;
8993         wc->shared_level = -1;
8994         wc->stage = DROP_REFERENCE;
8995         wc->update_ref = 0;
8996         wc->keep_locks = 1;
8997         wc->for_reloc = 1;
8998         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
8999
9000         while (1) {
9001                 wret = walk_down_tree(trans, root, path, wc);
9002                 if (wret < 0) {
9003                         ret = wret;
9004                         break;
9005                 }
9006
9007                 wret = walk_up_tree(trans, root, path, wc, parent_level);
9008                 if (wret < 0)
9009                         ret = wret;
9010                 if (wret != 0)
9011                         break;
9012         }
9013
9014         kfree(wc);
9015         btrfs_free_path(path);
9016         return ret;
9017 }
9018
9019 static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
9020 {
9021         u64 num_devices;
9022         u64 stripped;
9023
9024         /*
9025          * if restripe for this chunk_type is on pick target profile and
9026          * return, otherwise do the usual balance
9027          */
9028         stripped = get_restripe_target(root->fs_info, flags);
9029         if (stripped)
9030                 return extended_to_chunk(stripped);
9031
9032         num_devices = root->fs_info->fs_devices->rw_devices;
9033
9034         stripped = BTRFS_BLOCK_GROUP_RAID0 |
9035                 BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6 |
9036                 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
9037
9038         if (num_devices == 1) {
9039                 stripped |= BTRFS_BLOCK_GROUP_DUP;
9040                 stripped = flags & ~stripped;
9041
9042                 /* turn raid0 into single device chunks */
9043                 if (flags & BTRFS_BLOCK_GROUP_RAID0)
9044                         return stripped;
9045
9046                 /* turn mirroring into duplication */
9047                 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
9048                              BTRFS_BLOCK_GROUP_RAID10))
9049                         return stripped | BTRFS_BLOCK_GROUP_DUP;
9050         } else {
9051                 /* they already had raid on here, just return */
9052                 if (flags & stripped)
9053                         return flags;
9054
9055                 stripped |= BTRFS_BLOCK_GROUP_DUP;
9056                 stripped = flags & ~stripped;
9057
9058                 /* switch duplicated blocks with raid1 */
9059                 if (flags & BTRFS_BLOCK_GROUP_DUP)
9060                         return stripped | BTRFS_BLOCK_GROUP_RAID1;
9061
9062                 /* this is drive concat, leave it alone */
9063         }
9064
9065         return flags;
9066 }
9067
9068 static int inc_block_group_ro(struct btrfs_block_group_cache *cache, int force)
9069 {
9070         struct btrfs_space_info *sinfo = cache->space_info;
9071         u64 num_bytes;
9072         u64 min_allocable_bytes;
9073         int ret = -ENOSPC;
9074
9075         /*
9076          * We need some metadata space and system metadata space for
9077          * allocating chunks in some corner cases until we force to set
9078          * it to be readonly.
9079          */
9080         if ((sinfo->flags &
9081              (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) &&
9082             !force)
9083                 min_allocable_bytes = 1 * 1024 * 1024;
9084         else
9085                 min_allocable_bytes = 0;
9086
9087         spin_lock(&sinfo->lock);
9088         spin_lock(&cache->lock);
9089
9090         if (cache->ro) {
9091                 cache->ro++;
9092                 ret = 0;
9093                 goto out;
9094         }
9095
9096         num_bytes = cache->key.offset - cache->reserved - cache->pinned -
9097                     cache->bytes_super - btrfs_block_group_used(&cache->item);
9098
9099         if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned +
9100             sinfo->bytes_may_use + sinfo->bytes_readonly + num_bytes +
9101             min_allocable_bytes <= sinfo->total_bytes) {
9102                 sinfo->bytes_readonly += num_bytes;
9103                 cache->ro++;
9104                 list_add_tail(&cache->ro_list, &sinfo->ro_bgs);
9105                 ret = 0;
9106         }
9107 out:
9108         spin_unlock(&cache->lock);
9109         spin_unlock(&sinfo->lock);
9110         return ret;
9111 }
9112
9113 int btrfs_inc_block_group_ro(struct btrfs_root *root,
9114                              struct btrfs_block_group_cache *cache)
9115
9116 {
9117         struct btrfs_trans_handle *trans;
9118         u64 alloc_flags;
9119         int ret;
9120
9121 again:
9122         trans = btrfs_join_transaction(root);
9123         if (IS_ERR(trans))
9124                 return PTR_ERR(trans);
9125
9126         /*
9127          * we're not allowed to set block groups readonly after the dirty
9128          * block groups cache has started writing.  If it already started,
9129          * back off and let this transaction commit
9130          */
9131         mutex_lock(&root->fs_info->ro_block_group_mutex);
9132         if (test_bit(BTRFS_TRANS_DIRTY_BG_RUN, &trans->transaction->flags)) {
9133                 u64 transid = trans->transid;
9134
9135                 mutex_unlock(&root->fs_info->ro_block_group_mutex);
9136                 btrfs_end_transaction(trans, root);
9137
9138                 ret = btrfs_wait_for_commit(root, transid);
9139                 if (ret)
9140                         return ret;
9141                 goto again;
9142         }
9143
9144         /*
9145          * if we are changing raid levels, try to allocate a corresponding
9146          * block group with the new raid level.
9147          */
9148         alloc_flags = update_block_group_flags(root, cache->flags);
9149         if (alloc_flags != cache->flags) {
9150                 ret = do_chunk_alloc(trans, root, alloc_flags,
9151                                      CHUNK_ALLOC_FORCE);
9152                 /*
9153                  * ENOSPC is allowed here, we may have enough space
9154                  * already allocated at the new raid level to
9155                  * carry on
9156                  */
9157                 if (ret == -ENOSPC)
9158                         ret = 0;
9159                 if (ret < 0)
9160                         goto out;
9161         }
9162
9163         ret = inc_block_group_ro(cache, 0);
9164         if (!ret)
9165                 goto out;
9166         alloc_flags = get_alloc_profile(root, cache->space_info->flags);
9167         ret = do_chunk_alloc(trans, root, alloc_flags,
9168                              CHUNK_ALLOC_FORCE);
9169         if (ret < 0)
9170                 goto out;
9171         ret = inc_block_group_ro(cache, 0);
9172 out:
9173         if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) {
9174                 alloc_flags = update_block_group_flags(root, cache->flags);
9175                 lock_chunks(root->fs_info->chunk_root);
9176                 check_system_chunk(trans, root, alloc_flags);
9177                 unlock_chunks(root->fs_info->chunk_root);
9178         }
9179         mutex_unlock(&root->fs_info->ro_block_group_mutex);
9180
9181         btrfs_end_transaction(trans, root);
9182         return ret;
9183 }
9184
9185 int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
9186                             struct btrfs_root *root, u64 type)
9187 {
9188         u64 alloc_flags = get_alloc_profile(root, type);
9189         return do_chunk_alloc(trans, root, alloc_flags,
9190                               CHUNK_ALLOC_FORCE);
9191 }
9192
9193 /*
9194  * helper to account the unused space of all the readonly block group in the
9195  * space_info. takes mirrors into account.
9196  */
9197 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
9198 {
9199         struct btrfs_block_group_cache *block_group;
9200         u64 free_bytes = 0;
9201         int factor;
9202
9203         /* It's df, we don't care if it's racey */
9204         if (list_empty(&sinfo->ro_bgs))
9205                 return 0;
9206
9207         spin_lock(&sinfo->lock);
9208         list_for_each_entry(block_group, &sinfo->ro_bgs, ro_list) {
9209                 spin_lock(&block_group->lock);
9210
9211                 if (!block_group->ro) {
9212                         spin_unlock(&block_group->lock);
9213                         continue;
9214                 }
9215
9216                 if (block_group->flags & (BTRFS_BLOCK_GROUP_RAID1 |
9217                                           BTRFS_BLOCK_GROUP_RAID10 |
9218                                           BTRFS_BLOCK_GROUP_DUP))
9219                         factor = 2;
9220                 else
9221                         factor = 1;
9222
9223                 free_bytes += (block_group->key.offset -
9224                                btrfs_block_group_used(&block_group->item)) *
9225                                factor;
9226
9227                 spin_unlock(&block_group->lock);
9228         }
9229         spin_unlock(&sinfo->lock);
9230
9231         return free_bytes;
9232 }
9233
9234 void btrfs_dec_block_group_ro(struct btrfs_root *root,
9235                               struct btrfs_block_group_cache *cache)
9236 {
9237         struct btrfs_space_info *sinfo = cache->space_info;
9238         u64 num_bytes;
9239
9240         BUG_ON(!cache->ro);
9241
9242         spin_lock(&sinfo->lock);
9243         spin_lock(&cache->lock);
9244         if (!--cache->ro) {
9245                 num_bytes = cache->key.offset - cache->reserved -
9246                             cache->pinned - cache->bytes_super -
9247                             btrfs_block_group_used(&cache->item);
9248                 sinfo->bytes_readonly -= num_bytes;
9249                 list_del_init(&cache->ro_list);
9250         }
9251         spin_unlock(&cache->lock);
9252         spin_unlock(&sinfo->lock);
9253 }
9254
9255 /*
9256  * checks to see if its even possible to relocate this block group.
9257  *
9258  * @return - -1 if it's not a good idea to relocate this block group, 0 if its
9259  * ok to go ahead and try.
9260  */
9261 int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
9262 {
9263         struct btrfs_block_group_cache *block_group;
9264         struct btrfs_space_info *space_info;
9265         struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
9266         struct btrfs_device *device;
9267         struct btrfs_trans_handle *trans;
9268         u64 min_free;
9269         u64 dev_min = 1;
9270         u64 dev_nr = 0;
9271         u64 target;
9272         int index;
9273         int full = 0;
9274         int ret = 0;
9275
9276         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
9277
9278         /* odd, couldn't find the block group, leave it alone */
9279         if (!block_group)
9280                 return -1;
9281
9282         min_free = btrfs_block_group_used(&block_group->item);
9283
9284         /* no bytes used, we're good */
9285         if (!min_free)
9286                 goto out;
9287
9288         space_info = block_group->space_info;
9289         spin_lock(&space_info->lock);
9290
9291         full = space_info->full;
9292
9293         /*
9294          * if this is the last block group we have in this space, we can't
9295          * relocate it unless we're able to allocate a new chunk below.
9296          *
9297          * Otherwise, we need to make sure we have room in the space to handle
9298          * all of the extents from this block group.  If we can, we're good
9299          */
9300         if ((space_info->total_bytes != block_group->key.offset) &&
9301             (space_info->bytes_used + space_info->bytes_reserved +
9302              space_info->bytes_pinned + space_info->bytes_readonly +
9303              min_free < space_info->total_bytes)) {
9304                 spin_unlock(&space_info->lock);
9305                 goto out;
9306         }
9307         spin_unlock(&space_info->lock);
9308
9309         /*
9310          * ok we don't have enough space, but maybe we have free space on our
9311          * devices to allocate new chunks for relocation, so loop through our
9312          * alloc devices and guess if we have enough space.  if this block
9313          * group is going to be restriped, run checks against the target
9314          * profile instead of the current one.
9315          */
9316         ret = -1;
9317
9318         /*
9319          * index:
9320          *      0: raid10
9321          *      1: raid1
9322          *      2: dup
9323          *      3: raid0
9324          *      4: single
9325          */
9326         target = get_restripe_target(root->fs_info, block_group->flags);
9327         if (target) {
9328                 index = __get_raid_index(extended_to_chunk(target));
9329         } else {
9330                 /*
9331                  * this is just a balance, so if we were marked as full
9332                  * we know there is no space for a new chunk
9333                  */
9334                 if (full)
9335                         goto out;
9336
9337                 index = get_block_group_index(block_group);
9338         }
9339
9340         if (index == BTRFS_RAID_RAID10) {
9341                 dev_min = 4;
9342                 /* Divide by 2 */
9343                 min_free >>= 1;
9344         } else if (index == BTRFS_RAID_RAID1) {
9345                 dev_min = 2;
9346         } else if (index == BTRFS_RAID_DUP) {
9347                 /* Multiply by 2 */
9348                 min_free <<= 1;
9349         } else if (index == BTRFS_RAID_RAID0) {
9350                 dev_min = fs_devices->rw_devices;
9351                 min_free = div64_u64(min_free, dev_min);
9352         }
9353
9354         /* We need to do this so that we can look at pending chunks */
9355         trans = btrfs_join_transaction(root);
9356         if (IS_ERR(trans)) {
9357                 ret = PTR_ERR(trans);
9358                 goto out;
9359         }
9360
9361         mutex_lock(&root->fs_info->chunk_mutex);
9362         list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
9363                 u64 dev_offset;
9364
9365                 /*
9366                  * check to make sure we can actually find a chunk with enough
9367                  * space to fit our block group in.
9368                  */
9369                 if (device->total_bytes > device->bytes_used + min_free &&
9370                     !device->is_tgtdev_for_dev_replace) {
9371                         ret = find_free_dev_extent(trans, device, min_free,
9372                                                    &dev_offset, NULL);
9373                         if (!ret)
9374                                 dev_nr++;
9375
9376                         if (dev_nr >= dev_min)
9377                                 break;
9378
9379                         ret = -1;
9380                 }
9381         }
9382         mutex_unlock(&root->fs_info->chunk_mutex);
9383         btrfs_end_transaction(trans, root);
9384 out:
9385         btrfs_put_block_group(block_group);
9386         return ret;
9387 }
9388
9389 static int find_first_block_group(struct btrfs_root *root,
9390                 struct btrfs_path *path, struct btrfs_key *key)
9391 {
9392         int ret = 0;
9393         struct btrfs_key found_key;
9394         struct extent_buffer *leaf;
9395         int slot;
9396
9397         ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
9398         if (ret < 0)
9399                 goto out;
9400
9401         while (1) {
9402                 slot = path->slots[0];
9403                 leaf = path->nodes[0];
9404                 if (slot >= btrfs_header_nritems(leaf)) {
9405                         ret = btrfs_next_leaf(root, path);
9406                         if (ret == 0)
9407                                 continue;
9408                         if (ret < 0)
9409                                 goto out;
9410                         break;
9411                 }
9412                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
9413
9414                 if (found_key.objectid >= key->objectid &&
9415                     found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
9416                         ret = 0;
9417                         goto out;
9418                 }
9419                 path->slots[0]++;
9420         }
9421 out:
9422         return ret;
9423 }
9424
9425 void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
9426 {
9427         struct btrfs_block_group_cache *block_group;
9428         u64 last = 0;
9429
9430         while (1) {
9431                 struct inode *inode;
9432
9433                 block_group = btrfs_lookup_first_block_group(info, last);
9434                 while (block_group) {
9435                         spin_lock(&block_group->lock);
9436                         if (block_group->iref)
9437                                 break;
9438                         spin_unlock(&block_group->lock);
9439                         block_group = next_block_group(info->tree_root,
9440                                                        block_group);
9441                 }
9442                 if (!block_group) {
9443                         if (last == 0)
9444                                 break;
9445                         last = 0;
9446                         continue;
9447                 }
9448
9449                 inode = block_group->inode;
9450                 block_group->iref = 0;
9451                 block_group->inode = NULL;
9452                 spin_unlock(&block_group->lock);
9453                 iput(inode);
9454                 last = block_group->key.objectid + block_group->key.offset;
9455                 btrfs_put_block_group(block_group);
9456         }
9457 }
9458
9459 int btrfs_free_block_groups(struct btrfs_fs_info *info)
9460 {
9461         struct btrfs_block_group_cache *block_group;
9462         struct btrfs_space_info *space_info;
9463         struct btrfs_caching_control *caching_ctl;
9464         struct rb_node *n;
9465
9466         down_write(&info->commit_root_sem);
9467         while (!list_empty(&info->caching_block_groups)) {
9468                 caching_ctl = list_entry(info->caching_block_groups.next,
9469                                          struct btrfs_caching_control, list);
9470                 list_del(&caching_ctl->list);
9471                 put_caching_control(caching_ctl);
9472         }
9473         up_write(&info->commit_root_sem);
9474
9475         spin_lock(&info->unused_bgs_lock);
9476         while (!list_empty(&info->unused_bgs)) {
9477                 block_group = list_first_entry(&info->unused_bgs,
9478                                                struct btrfs_block_group_cache,
9479                                                bg_list);
9480                 list_del_init(&block_group->bg_list);
9481                 btrfs_put_block_group(block_group);
9482         }
9483         spin_unlock(&info->unused_bgs_lock);
9484
9485         spin_lock(&info->block_group_cache_lock);
9486         while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
9487                 block_group = rb_entry(n, struct btrfs_block_group_cache,
9488                                        cache_node);
9489                 rb_erase(&block_group->cache_node,
9490                          &info->block_group_cache_tree);
9491                 RB_CLEAR_NODE(&block_group->cache_node);
9492                 spin_unlock(&info->block_group_cache_lock);
9493
9494                 down_write(&block_group->space_info->groups_sem);
9495                 list_del(&block_group->list);
9496                 up_write(&block_group->space_info->groups_sem);
9497
9498                 if (block_group->cached == BTRFS_CACHE_STARTED)
9499                         wait_block_group_cache_done(block_group);
9500
9501                 /*
9502                  * We haven't cached this block group, which means we could
9503                  * possibly have excluded extents on this block group.
9504                  */
9505                 if (block_group->cached == BTRFS_CACHE_NO ||
9506                     block_group->cached == BTRFS_CACHE_ERROR)
9507                         free_excluded_extents(info->extent_root, block_group);
9508
9509                 btrfs_remove_free_space_cache(block_group);
9510                 btrfs_put_block_group(block_group);
9511
9512                 spin_lock(&info->block_group_cache_lock);
9513         }
9514         spin_unlock(&info->block_group_cache_lock);
9515
9516         /* now that all the block groups are freed, go through and
9517          * free all the space_info structs.  This is only called during
9518          * the final stages of unmount, and so we know nobody is
9519          * using them.  We call synchronize_rcu() once before we start,
9520          * just to be on the safe side.
9521          */
9522         synchronize_rcu();
9523
9524         release_global_block_rsv(info);
9525
9526         while (!list_empty(&info->space_info)) {
9527                 int i;
9528
9529                 space_info = list_entry(info->space_info.next,
9530                                         struct btrfs_space_info,
9531                                         list);
9532                 if (btrfs_test_opt(info->tree_root, ENOSPC_DEBUG)) {
9533                         if (WARN_ON(space_info->bytes_pinned > 0 ||
9534                             space_info->bytes_reserved > 0 ||
9535                             space_info->bytes_may_use > 0)) {
9536                                 dump_space_info(space_info, 0, 0);
9537                         }
9538                 }
9539                 list_del(&space_info->list);
9540                 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
9541                         struct kobject *kobj;
9542                         kobj = space_info->block_group_kobjs[i];
9543                         space_info->block_group_kobjs[i] = NULL;
9544                         if (kobj) {
9545                                 kobject_del(kobj);
9546                                 kobject_put(kobj);
9547                         }
9548                 }
9549                 kobject_del(&space_info->kobj);
9550                 kobject_put(&space_info->kobj);
9551         }
9552         return 0;
9553 }
9554
9555 static void __link_block_group(struct btrfs_space_info *space_info,
9556                                struct btrfs_block_group_cache *cache)
9557 {
9558         int index = get_block_group_index(cache);
9559         bool first = false;
9560
9561         down_write(&space_info->groups_sem);
9562         if (list_empty(&space_info->block_groups[index]))
9563                 first = true;
9564         list_add_tail(&cache->list, &space_info->block_groups[index]);
9565         up_write(&space_info->groups_sem);
9566
9567         if (first) {
9568                 struct raid_kobject *rkobj;
9569                 int ret;
9570
9571                 rkobj = kzalloc(sizeof(*rkobj), GFP_NOFS);
9572                 if (!rkobj)
9573                         goto out_err;
9574                 rkobj->raid_type = index;
9575                 kobject_init(&rkobj->kobj, &btrfs_raid_ktype);
9576                 ret = kobject_add(&rkobj->kobj, &space_info->kobj,
9577                                   "%s", get_raid_name(index));
9578                 if (ret) {
9579                         kobject_put(&rkobj->kobj);
9580                         goto out_err;
9581                 }
9582                 space_info->block_group_kobjs[index] = &rkobj->kobj;
9583         }
9584
9585         return;
9586 out_err:
9587         pr_warn("BTRFS: failed to add kobject for block cache. ignoring.\n");
9588 }
9589
9590 static struct btrfs_block_group_cache *
9591 btrfs_create_block_group_cache(struct btrfs_root *root, u64 start, u64 size)
9592 {
9593         struct btrfs_block_group_cache *cache;
9594
9595         cache = kzalloc(sizeof(*cache), GFP_NOFS);
9596         if (!cache)
9597                 return NULL;
9598
9599         cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
9600                                         GFP_NOFS);
9601         if (!cache->free_space_ctl) {
9602                 kfree(cache);
9603                 return NULL;
9604         }
9605
9606         cache->key.objectid = start;
9607         cache->key.offset = size;
9608         cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
9609
9610         cache->sectorsize = root->sectorsize;
9611         cache->fs_info = root->fs_info;
9612         cache->full_stripe_len = btrfs_full_stripe_len(root,
9613                                                &root->fs_info->mapping_tree,
9614                                                start);
9615         atomic_set(&cache->count, 1);
9616         spin_lock_init(&cache->lock);
9617         init_rwsem(&cache->data_rwsem);
9618         INIT_LIST_HEAD(&cache->list);
9619         INIT_LIST_HEAD(&cache->cluster_list);
9620         INIT_LIST_HEAD(&cache->bg_list);
9621         INIT_LIST_HEAD(&cache->ro_list);
9622         INIT_LIST_HEAD(&cache->dirty_list);
9623         INIT_LIST_HEAD(&cache->io_list);
9624         btrfs_init_free_space_ctl(cache);
9625         atomic_set(&cache->trimming, 0);
9626
9627         return cache;
9628 }
9629
9630 int btrfs_read_block_groups(struct btrfs_root *root)
9631 {
9632         struct btrfs_path *path;
9633         int ret;
9634         struct btrfs_block_group_cache *cache;
9635         struct btrfs_fs_info *info = root->fs_info;
9636         struct btrfs_space_info *space_info;
9637         struct btrfs_key key;
9638         struct btrfs_key found_key;
9639         struct extent_buffer *leaf;
9640         int need_clear = 0;
9641         u64 cache_gen;
9642
9643         root = info->extent_root;
9644         key.objectid = 0;
9645         key.offset = 0;
9646         key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
9647         path = btrfs_alloc_path();
9648         if (!path)
9649                 return -ENOMEM;
9650         path->reada = 1;
9651
9652         cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy);
9653         if (btrfs_test_opt(root, SPACE_CACHE) &&
9654             btrfs_super_generation(root->fs_info->super_copy) != cache_gen)
9655                 need_clear = 1;
9656         if (btrfs_test_opt(root, CLEAR_CACHE))
9657                 need_clear = 1;
9658
9659         while (1) {
9660                 ret = find_first_block_group(root, path, &key);
9661                 if (ret > 0)
9662                         break;
9663                 if (ret != 0)
9664                         goto error;
9665
9666                 leaf = path->nodes[0];
9667                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
9668
9669                 cache = btrfs_create_block_group_cache(root, found_key.objectid,
9670                                                        found_key.offset);
9671                 if (!cache) {
9672                         ret = -ENOMEM;
9673                         goto error;
9674                 }
9675
9676                 if (need_clear) {
9677                         /*
9678                          * When we mount with old space cache, we need to
9679                          * set BTRFS_DC_CLEAR and set dirty flag.
9680                          *
9681                          * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
9682                          *    truncate the old free space cache inode and
9683                          *    setup a new one.
9684                          * b) Setting 'dirty flag' makes sure that we flush
9685                          *    the new space cache info onto disk.
9686                          */
9687                         if (btrfs_test_opt(root, SPACE_CACHE))
9688                                 cache->disk_cache_state = BTRFS_DC_CLEAR;
9689                 }
9690
9691                 read_extent_buffer(leaf, &cache->item,
9692                                    btrfs_item_ptr_offset(leaf, path->slots[0]),
9693                                    sizeof(cache->item));
9694                 cache->flags = btrfs_block_group_flags(&cache->item);
9695
9696                 key.objectid = found_key.objectid + found_key.offset;
9697                 btrfs_release_path(path);
9698
9699                 /*
9700                  * We need to exclude the super stripes now so that the space
9701                  * info has super bytes accounted for, otherwise we'll think
9702                  * we have more space than we actually do.
9703                  */
9704                 ret = exclude_super_stripes(root, cache);
9705                 if (ret) {
9706                         /*
9707                          * We may have excluded something, so call this just in
9708                          * case.
9709                          */
9710                         free_excluded_extents(root, cache);
9711                         btrfs_put_block_group(cache);
9712                         goto error;
9713                 }
9714
9715                 /*
9716                  * check for two cases, either we are full, and therefore
9717                  * don't need to bother with the caching work since we won't
9718                  * find any space, or we are empty, and we can just add all
9719                  * the space in and be done with it.  This saves us _alot_ of
9720                  * time, particularly in the full case.
9721                  */
9722                 if (found_key.offset == btrfs_block_group_used(&cache->item)) {
9723                         cache->last_byte_to_unpin = (u64)-1;
9724                         cache->cached = BTRFS_CACHE_FINISHED;
9725                         free_excluded_extents(root, cache);
9726                 } else if (btrfs_block_group_used(&cache->item) == 0) {
9727                         cache->last_byte_to_unpin = (u64)-1;
9728                         cache->cached = BTRFS_CACHE_FINISHED;
9729                         add_new_free_space(cache, root->fs_info,
9730                                            found_key.objectid,
9731                                            found_key.objectid +
9732                                            found_key.offset);
9733                         free_excluded_extents(root, cache);
9734                 }
9735
9736                 ret = btrfs_add_block_group_cache(root->fs_info, cache);
9737                 if (ret) {
9738                         btrfs_remove_free_space_cache(cache);
9739                         btrfs_put_block_group(cache);
9740                         goto error;
9741                 }
9742
9743                 ret = update_space_info(info, cache->flags, found_key.offset,
9744                                         btrfs_block_group_used(&cache->item),
9745                                         &space_info);
9746                 if (ret) {
9747                         btrfs_remove_free_space_cache(cache);
9748                         spin_lock(&info->block_group_cache_lock);
9749                         rb_erase(&cache->cache_node,
9750                                  &info->block_group_cache_tree);
9751                         RB_CLEAR_NODE(&cache->cache_node);
9752                         spin_unlock(&info->block_group_cache_lock);
9753                         btrfs_put_block_group(cache);
9754                         goto error;
9755                 }
9756
9757                 cache->space_info = space_info;
9758                 spin_lock(&cache->space_info->lock);
9759                 cache->space_info->bytes_readonly += cache->bytes_super;
9760                 spin_unlock(&cache->space_info->lock);
9761
9762                 __link_block_group(space_info, cache);
9763
9764                 set_avail_alloc_bits(root->fs_info, cache->flags);
9765                 if (btrfs_chunk_readonly(root, cache->key.objectid)) {
9766                         inc_block_group_ro(cache, 1);
9767                 } else if (btrfs_block_group_used(&cache->item) == 0) {
9768                         spin_lock(&info->unused_bgs_lock);
9769                         /* Should always be true but just in case. */
9770                         if (list_empty(&cache->bg_list)) {
9771                                 btrfs_get_block_group(cache);
9772                                 list_add_tail(&cache->bg_list,
9773                                               &info->unused_bgs);
9774                         }
9775                         spin_unlock(&info->unused_bgs_lock);
9776                 }
9777         }
9778
9779         list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) {
9780                 if (!(get_alloc_profile(root, space_info->flags) &
9781                       (BTRFS_BLOCK_GROUP_RAID10 |
9782                        BTRFS_BLOCK_GROUP_RAID1 |
9783                        BTRFS_BLOCK_GROUP_RAID5 |
9784                        BTRFS_BLOCK_GROUP_RAID6 |
9785                        BTRFS_BLOCK_GROUP_DUP)))
9786                         continue;
9787                 /*
9788                  * avoid allocating from un-mirrored block group if there are
9789                  * mirrored block groups.
9790                  */
9791                 list_for_each_entry(cache,
9792                                 &space_info->block_groups[BTRFS_RAID_RAID0],
9793                                 list)
9794                         inc_block_group_ro(cache, 1);
9795                 list_for_each_entry(cache,
9796                                 &space_info->block_groups[BTRFS_RAID_SINGLE],
9797                                 list)
9798                         inc_block_group_ro(cache, 1);
9799         }
9800
9801         init_global_block_rsv(info);
9802         ret = 0;
9803 error:
9804         btrfs_free_path(path);
9805         return ret;
9806 }
9807
9808 void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
9809                                        struct btrfs_root *root)
9810 {
9811         struct btrfs_block_group_cache *block_group, *tmp;
9812         struct btrfs_root *extent_root = root->fs_info->extent_root;
9813         struct btrfs_block_group_item item;
9814         struct btrfs_key key;
9815         int ret = 0;
9816         bool can_flush_pending_bgs = trans->can_flush_pending_bgs;
9817
9818         trans->can_flush_pending_bgs = false;
9819         list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) {
9820                 if (ret)
9821                         goto next;
9822
9823                 spin_lock(&block_group->lock);
9824                 memcpy(&item, &block_group->item, sizeof(item));
9825                 memcpy(&key, &block_group->key, sizeof(key));
9826                 spin_unlock(&block_group->lock);
9827
9828                 ret = btrfs_insert_item(trans, extent_root, &key, &item,
9829                                         sizeof(item));
9830                 if (ret)
9831                         btrfs_abort_transaction(trans, extent_root, ret);
9832                 ret = btrfs_finish_chunk_alloc(trans, extent_root,
9833                                                key.objectid, key.offset);
9834                 if (ret)
9835                         btrfs_abort_transaction(trans, extent_root, ret);
9836 next:
9837                 list_del_init(&block_group->bg_list);
9838         }
9839         trans->can_flush_pending_bgs = can_flush_pending_bgs;
9840 }
9841
9842 int btrfs_make_block_group(struct btrfs_trans_handle *trans,
9843                            struct btrfs_root *root, u64 bytes_used,
9844                            u64 type, u64 chunk_objectid, u64 chunk_offset,
9845                            u64 size)
9846 {
9847         int ret;
9848         struct btrfs_root *extent_root;
9849         struct btrfs_block_group_cache *cache;
9850
9851         extent_root = root->fs_info->extent_root;
9852
9853         btrfs_set_log_full_commit(root->fs_info, trans);
9854
9855         cache = btrfs_create_block_group_cache(root, chunk_offset, size);
9856         if (!cache)
9857                 return -ENOMEM;
9858
9859         btrfs_set_block_group_used(&cache->item, bytes_used);
9860         btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
9861         btrfs_set_block_group_flags(&cache->item, type);
9862
9863         cache->flags = type;
9864         cache->last_byte_to_unpin = (u64)-1;
9865         cache->cached = BTRFS_CACHE_FINISHED;
9866         ret = exclude_super_stripes(root, cache);
9867         if (ret) {
9868                 /*
9869                  * We may have excluded something, so call this just in
9870                  * case.
9871                  */
9872                 free_excluded_extents(root, cache);
9873                 btrfs_put_block_group(cache);
9874                 return ret;
9875         }
9876
9877         add_new_free_space(cache, root->fs_info, chunk_offset,
9878                            chunk_offset + size);
9879
9880         free_excluded_extents(root, cache);
9881
9882 #ifdef CONFIG_BTRFS_DEBUG
9883         if (btrfs_should_fragment_free_space(root, cache)) {
9884                 u64 new_bytes_used = size - bytes_used;
9885
9886                 bytes_used += new_bytes_used >> 1;
9887                 fragment_free_space(root, cache);
9888         }
9889 #endif
9890         /*
9891          * Call to ensure the corresponding space_info object is created and
9892          * assigned to our block group, but don't update its counters just yet.
9893          * We want our bg to be added to the rbtree with its ->space_info set.
9894          */
9895         ret = update_space_info(root->fs_info, cache->flags, 0, 0,
9896                                 &cache->space_info);
9897         if (ret) {
9898                 btrfs_remove_free_space_cache(cache);
9899                 btrfs_put_block_group(cache);
9900                 return ret;
9901         }
9902
9903         ret = btrfs_add_block_group_cache(root->fs_info, cache);
9904         if (ret) {
9905                 btrfs_remove_free_space_cache(cache);
9906                 btrfs_put_block_group(cache);
9907                 return ret;
9908         }
9909
9910         /*
9911          * Now that our block group has its ->space_info set and is inserted in
9912          * the rbtree, update the space info's counters.
9913          */
9914         ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
9915                                 &cache->space_info);
9916         if (ret) {
9917                 btrfs_remove_free_space_cache(cache);
9918                 spin_lock(&root->fs_info->block_group_cache_lock);
9919                 rb_erase(&cache->cache_node,
9920                          &root->fs_info->block_group_cache_tree);
9921                 RB_CLEAR_NODE(&cache->cache_node);
9922                 spin_unlock(&root->fs_info->block_group_cache_lock);
9923                 btrfs_put_block_group(cache);
9924                 return ret;
9925         }
9926         update_global_block_rsv(root->fs_info);
9927
9928         spin_lock(&cache->space_info->lock);
9929         cache->space_info->bytes_readonly += cache->bytes_super;
9930         spin_unlock(&cache->space_info->lock);
9931
9932         __link_block_group(cache->space_info, cache);
9933
9934         list_add_tail(&cache->bg_list, &trans->new_bgs);
9935
9936         set_avail_alloc_bits(extent_root->fs_info, type);
9937
9938         return 0;
9939 }
9940
9941 static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
9942 {
9943         u64 extra_flags = chunk_to_extended(flags) &
9944                                 BTRFS_EXTENDED_PROFILE_MASK;
9945
9946         write_seqlock(&fs_info->profiles_lock);
9947         if (flags & BTRFS_BLOCK_GROUP_DATA)
9948                 fs_info->avail_data_alloc_bits &= ~extra_flags;
9949         if (flags & BTRFS_BLOCK_GROUP_METADATA)
9950                 fs_info->avail_metadata_alloc_bits &= ~extra_flags;
9951         if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
9952                 fs_info->avail_system_alloc_bits &= ~extra_flags;
9953         write_sequnlock(&fs_info->profiles_lock);
9954 }
9955
9956 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
9957                              struct btrfs_root *root, u64 group_start,
9958                              struct extent_map *em)
9959 {
9960         struct btrfs_path *path;
9961         struct btrfs_block_group_cache *block_group;
9962         struct btrfs_free_cluster *cluster;
9963         struct btrfs_root *tree_root = root->fs_info->tree_root;
9964         struct btrfs_key key;
9965         struct inode *inode;
9966         struct kobject *kobj = NULL;
9967         int ret;
9968         int index;
9969         int factor;
9970         struct btrfs_caching_control *caching_ctl = NULL;
9971         bool remove_em;
9972
9973         root = root->fs_info->extent_root;
9974
9975         block_group = btrfs_lookup_block_group(root->fs_info, group_start);
9976         BUG_ON(!block_group);
9977         BUG_ON(!block_group->ro);
9978
9979         /*
9980          * Free the reserved super bytes from this block group before
9981          * remove it.
9982          */
9983         free_excluded_extents(root, block_group);
9984
9985         memcpy(&key, &block_group->key, sizeof(key));
9986         index = get_block_group_index(block_group);
9987         if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP |
9988                                   BTRFS_BLOCK_GROUP_RAID1 |
9989                                   BTRFS_BLOCK_GROUP_RAID10))
9990                 factor = 2;
9991         else
9992                 factor = 1;
9993
9994         /* make sure this block group isn't part of an allocation cluster */
9995         cluster = &root->fs_info->data_alloc_cluster;
9996         spin_lock(&cluster->refill_lock);
9997         btrfs_return_cluster_to_free_space(block_group, cluster);
9998         spin_unlock(&cluster->refill_lock);
9999
10000         /*
10001          * make sure this block group isn't part of a metadata
10002          * allocation cluster
10003          */
10004         cluster = &root->fs_info->meta_alloc_cluster;
10005         spin_lock(&cluster->refill_lock);
10006         btrfs_return_cluster_to_free_space(block_group, cluster);
10007         spin_unlock(&cluster->refill_lock);
10008
10009         path = btrfs_alloc_path();
10010         if (!path) {
10011                 ret = -ENOMEM;
10012                 goto out;
10013         }
10014
10015         /*
10016          * get the inode first so any iput calls done for the io_list
10017          * aren't the final iput (no unlinks allowed now)
10018          */
10019         inode = lookup_free_space_inode(tree_root, block_group, path);
10020
10021         mutex_lock(&trans->transaction->cache_write_mutex);
10022         /*
10023          * make sure our free spache cache IO is done before remove the
10024          * free space inode
10025          */
10026         spin_lock(&trans->transaction->dirty_bgs_lock);
10027         if (!list_empty(&block_group->io_list)) {
10028                 list_del_init(&block_group->io_list);
10029
10030                 WARN_ON(!IS_ERR(inode) && inode != block_group->io_ctl.inode);
10031
10032                 spin_unlock(&trans->transaction->dirty_bgs_lock);
10033                 btrfs_wait_cache_io(root, trans, block_group,
10034                                     &block_group->io_ctl, path,
10035                                     block_group->key.objectid);
10036                 btrfs_put_block_group(block_group);
10037                 spin_lock(&trans->transaction->dirty_bgs_lock);
10038         }
10039
10040         if (!list_empty(&block_group->dirty_list)) {
10041                 list_del_init(&block_group->dirty_list);
10042                 btrfs_put_block_group(block_group);
10043         }
10044         spin_unlock(&trans->transaction->dirty_bgs_lock);
10045         mutex_unlock(&trans->transaction->cache_write_mutex);
10046
10047         if (!IS_ERR(inode)) {
10048                 ret = btrfs_orphan_add(trans, inode);
10049                 if (ret) {
10050                         btrfs_add_delayed_iput(inode);
10051                         goto out;
10052                 }
10053                 clear_nlink(inode);
10054                 /* One for the block groups ref */
10055                 spin_lock(&block_group->lock);
10056                 if (block_group->iref) {
10057                         block_group->iref = 0;
10058                         block_group->inode = NULL;
10059                         spin_unlock(&block_group->lock);
10060                         iput(inode);
10061                 } else {
10062                         spin_unlock(&block_group->lock);
10063                 }
10064                 /* One for our lookup ref */
10065                 btrfs_add_delayed_iput(inode);
10066         }
10067
10068         key.objectid = BTRFS_FREE_SPACE_OBJECTID;
10069         key.offset = block_group->key.objectid;
10070         key.type = 0;
10071
10072         ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
10073         if (ret < 0)
10074                 goto out;
10075         if (ret > 0)
10076                 btrfs_release_path(path);
10077         if (ret == 0) {
10078                 ret = btrfs_del_item(trans, tree_root, path);
10079                 if (ret)
10080                         goto out;
10081                 btrfs_release_path(path);
10082         }
10083
10084         spin_lock(&root->fs_info->block_group_cache_lock);
10085         rb_erase(&block_group->cache_node,
10086                  &root->fs_info->block_group_cache_tree);
10087         RB_CLEAR_NODE(&block_group->cache_node);
10088
10089         if (root->fs_info->first_logical_byte == block_group->key.objectid)
10090                 root->fs_info->first_logical_byte = (u64)-1;
10091         spin_unlock(&root->fs_info->block_group_cache_lock);
10092
10093         down_write(&block_group->space_info->groups_sem);
10094         /*
10095          * we must use list_del_init so people can check to see if they
10096          * are still on the list after taking the semaphore
10097          */
10098         list_del_init(&block_group->list);
10099         if (list_empty(&block_group->space_info->block_groups[index])) {
10100                 kobj = block_group->space_info->block_group_kobjs[index];
10101                 block_group->space_info->block_group_kobjs[index] = NULL;
10102                 clear_avail_alloc_bits(root->fs_info, block_group->flags);
10103         }
10104         up_write(&block_group->space_info->groups_sem);
10105         if (kobj) {
10106                 kobject_del(kobj);
10107                 kobject_put(kobj);
10108         }
10109
10110         if (block_group->has_caching_ctl)
10111                 caching_ctl = get_caching_control(block_group);
10112         if (block_group->cached == BTRFS_CACHE_STARTED)
10113                 wait_block_group_cache_done(block_group);
10114         if (block_group->has_caching_ctl) {
10115                 down_write(&root->fs_info->commit_root_sem);
10116                 if (!caching_ctl) {
10117                         struct btrfs_caching_control *ctl;
10118
10119                         list_for_each_entry(ctl,
10120                                     &root->fs_info->caching_block_groups, list)
10121                                 if (ctl->block_group == block_group) {
10122                                         caching_ctl = ctl;
10123                                         atomic_inc(&caching_ctl->count);
10124                                         break;
10125                                 }
10126                 }
10127                 if (caching_ctl)
10128                         list_del_init(&caching_ctl->list);
10129                 up_write(&root->fs_info->commit_root_sem);
10130                 if (caching_ctl) {
10131                         /* Once for the caching bgs list and once for us. */
10132                         put_caching_control(caching_ctl);
10133                         put_caching_control(caching_ctl);
10134                 }
10135         }
10136
10137         spin_lock(&trans->transaction->dirty_bgs_lock);
10138         if (!list_empty(&block_group->dirty_list)) {
10139                 WARN_ON(1);
10140         }
10141         if (!list_empty(&block_group->io_list)) {
10142                 WARN_ON(1);
10143         }
10144         spin_unlock(&trans->transaction->dirty_bgs_lock);
10145         btrfs_remove_free_space_cache(block_group);
10146
10147         spin_lock(&block_group->space_info->lock);
10148         list_del_init(&block_group->ro_list);
10149
10150         if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
10151                 WARN_ON(block_group->space_info->total_bytes
10152                         < block_group->key.offset);
10153                 WARN_ON(block_group->space_info->bytes_readonly
10154                         < block_group->key.offset);
10155                 WARN_ON(block_group->space_info->disk_total
10156                         < block_group->key.offset * factor);
10157         }
10158         block_group->space_info->total_bytes -= block_group->key.offset;
10159         block_group->space_info->bytes_readonly -= block_group->key.offset;
10160         block_group->space_info->disk_total -= block_group->key.offset * factor;
10161
10162         spin_unlock(&block_group->space_info->lock);
10163
10164         memcpy(&key, &block_group->key, sizeof(key));
10165
10166         lock_chunks(root);
10167         if (!list_empty(&em->list)) {
10168                 /* We're in the transaction->pending_chunks list. */
10169                 free_extent_map(em);
10170         }
10171         spin_lock(&block_group->lock);
10172         block_group->removed = 1;
10173         /*
10174          * At this point trimming can't start on this block group, because we
10175          * removed the block group from the tree fs_info->block_group_cache_tree
10176          * so no one can't find it anymore and even if someone already got this
10177          * block group before we removed it from the rbtree, they have already
10178          * incremented block_group->trimming - if they didn't, they won't find
10179          * any free space entries because we already removed them all when we
10180          * called btrfs_remove_free_space_cache().
10181          *
10182          * And we must not remove the extent map from the fs_info->mapping_tree
10183          * to prevent the same logical address range and physical device space
10184          * ranges from being reused for a new block group. This is because our
10185          * fs trim operation (btrfs_trim_fs() / btrfs_ioctl_fitrim()) is
10186          * completely transactionless, so while it is trimming a range the
10187          * currently running transaction might finish and a new one start,
10188          * allowing for new block groups to be created that can reuse the same
10189          * physical device locations unless we take this special care.
10190          *
10191          * There may also be an implicit trim operation if the file system
10192          * is mounted with -odiscard. The same protections must remain
10193          * in place until the extents have been discarded completely when
10194          * the transaction commit has completed.
10195          */
10196         remove_em = (atomic_read(&block_group->trimming) == 0);
10197         /*
10198          * Make sure a trimmer task always sees the em in the pinned_chunks list
10199          * if it sees block_group->removed == 1 (needs to lock block_group->lock
10200          * before checking block_group->removed).
10201          */
10202         if (!remove_em) {
10203                 /*
10204                  * Our em might be in trans->transaction->pending_chunks which
10205                  * is protected by fs_info->chunk_mutex ([lock|unlock]_chunks),
10206                  * and so is the fs_info->pinned_chunks list.
10207                  *
10208                  * So at this point we must be holding the chunk_mutex to avoid
10209                  * any races with chunk allocation (more specifically at
10210                  * volumes.c:contains_pending_extent()), to ensure it always
10211                  * sees the em, either in the pending_chunks list or in the
10212                  * pinned_chunks list.
10213                  */
10214                 list_move_tail(&em->list, &root->fs_info->pinned_chunks);
10215         }
10216         spin_unlock(&block_group->lock);
10217
10218         if (remove_em) {
10219                 struct extent_map_tree *em_tree;
10220
10221                 em_tree = &root->fs_info->mapping_tree.map_tree;
10222                 write_lock(&em_tree->lock);
10223                 /*
10224                  * The em might be in the pending_chunks list, so make sure the
10225                  * chunk mutex is locked, since remove_extent_mapping() will
10226                  * delete us from that list.
10227                  */
10228                 remove_extent_mapping(em_tree, em);
10229                 write_unlock(&em_tree->lock);
10230                 /* once for the tree */
10231                 free_extent_map(em);
10232         }
10233
10234         unlock_chunks(root);
10235
10236         btrfs_put_block_group(block_group);
10237         btrfs_put_block_group(block_group);
10238
10239         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
10240         if (ret > 0)
10241                 ret = -EIO;
10242         if (ret < 0)
10243                 goto out;
10244
10245         ret = btrfs_del_item(trans, root, path);
10246 out:
10247         btrfs_free_path(path);
10248         return ret;
10249 }
10250
10251 /*
10252  * Process the unused_bgs list and remove any that don't have any allocated
10253  * space inside of them.
10254  */
10255 void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
10256 {
10257         struct btrfs_block_group_cache *block_group;
10258         struct btrfs_space_info *space_info;
10259         struct btrfs_root *root = fs_info->extent_root;
10260         struct btrfs_trans_handle *trans;
10261         int ret = 0;
10262
10263         if (!fs_info->open)
10264                 return;
10265
10266         spin_lock(&fs_info->unused_bgs_lock);
10267         while (!list_empty(&fs_info->unused_bgs)) {
10268                 u64 start, end;
10269                 int trimming;
10270
10271                 block_group = list_first_entry(&fs_info->unused_bgs,
10272                                                struct btrfs_block_group_cache,
10273                                                bg_list);
10274                 space_info = block_group->space_info;
10275                 list_del_init(&block_group->bg_list);
10276                 if (ret || btrfs_mixed_space_info(space_info)) {
10277                         btrfs_put_block_group(block_group);
10278                         continue;
10279                 }
10280                 spin_unlock(&fs_info->unused_bgs_lock);
10281
10282                 mutex_lock(&root->fs_info->delete_unused_bgs_mutex);
10283
10284                 /* Don't want to race with allocators so take the groups_sem */
10285                 down_write(&space_info->groups_sem);
10286                 spin_lock(&block_group->lock);
10287                 if (block_group->reserved ||
10288                     btrfs_block_group_used(&block_group->item) ||
10289                     block_group->ro) {
10290                         /*
10291                          * We want to bail if we made new allocations or have
10292                          * outstanding allocations in this block group.  We do
10293                          * the ro check in case balance is currently acting on
10294                          * this block group.
10295                          */
10296                         spin_unlock(&block_group->lock);
10297                         up_write(&space_info->groups_sem);
10298                         goto next;
10299                 }
10300                 spin_unlock(&block_group->lock);
10301
10302                 /* We don't want to force the issue, only flip if it's ok. */
10303                 ret = inc_block_group_ro(block_group, 0);
10304                 up_write(&space_info->groups_sem);
10305                 if (ret < 0) {
10306                         ret = 0;
10307                         goto next;
10308                 }
10309
10310                 /*
10311                  * Want to do this before we do anything else so we can recover
10312                  * properly if we fail to join the transaction.
10313                  */
10314                 /* 1 for btrfs_orphan_reserve_metadata() */
10315                 trans = btrfs_start_transaction(root, 1);
10316                 if (IS_ERR(trans)) {
10317                         btrfs_dec_block_group_ro(root, block_group);
10318                         ret = PTR_ERR(trans);
10319                         goto next;
10320                 }
10321
10322                 /*
10323                  * We could have pending pinned extents for this block group,
10324                  * just delete them, we don't care about them anymore.
10325                  */
10326                 start = block_group->key.objectid;
10327                 end = start + block_group->key.offset - 1;
10328                 /*
10329                  * Hold the unused_bg_unpin_mutex lock to avoid racing with
10330                  * btrfs_finish_extent_commit(). If we are at transaction N,
10331                  * another task might be running finish_extent_commit() for the
10332                  * previous transaction N - 1, and have seen a range belonging
10333                  * to the block group in freed_extents[] before we were able to
10334                  * clear the whole block group range from freed_extents[]. This
10335                  * means that task can lookup for the block group after we
10336                  * unpinned it from freed_extents[] and removed it, leading to
10337                  * a BUG_ON() at btrfs_unpin_extent_range().
10338                  */
10339                 mutex_lock(&fs_info->unused_bg_unpin_mutex);
10340                 ret = clear_extent_bits(&fs_info->freed_extents[0], start, end,
10341                                   EXTENT_DIRTY, GFP_NOFS);
10342                 if (ret) {
10343                         mutex_unlock(&fs_info->unused_bg_unpin_mutex);
10344                         btrfs_dec_block_group_ro(root, block_group);
10345                         goto end_trans;
10346                 }
10347                 ret = clear_extent_bits(&fs_info->freed_extents[1], start, end,
10348                                   EXTENT_DIRTY, GFP_NOFS);
10349                 if (ret) {
10350                         mutex_unlock(&fs_info->unused_bg_unpin_mutex);
10351                         btrfs_dec_block_group_ro(root, block_group);
10352                         goto end_trans;
10353                 }
10354                 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
10355
10356                 /* Reset pinned so btrfs_put_block_group doesn't complain */
10357                 spin_lock(&space_info->lock);
10358                 spin_lock(&block_group->lock);
10359
10360                 space_info->bytes_pinned -= block_group->pinned;
10361                 space_info->bytes_readonly += block_group->pinned;
10362                 percpu_counter_add(&space_info->total_bytes_pinned,
10363                                    -block_group->pinned);
10364                 block_group->pinned = 0;
10365
10366                 spin_unlock(&block_group->lock);
10367                 spin_unlock(&space_info->lock);
10368
10369                 /* DISCARD can flip during remount */
10370                 trimming = btrfs_test_opt(root, DISCARD);
10371
10372                 /* Implicit trim during transaction commit. */
10373                 if (trimming)
10374                         btrfs_get_block_group_trimming(block_group);
10375
10376                 /*
10377                  * Btrfs_remove_chunk will abort the transaction if things go
10378                  * horribly wrong.
10379                  */
10380                 ret = btrfs_remove_chunk(trans, root,
10381                                          block_group->key.objectid);
10382
10383                 if (ret) {
10384                         if (trimming)
10385                                 btrfs_put_block_group_trimming(block_group);
10386                         goto end_trans;
10387                 }
10388
10389                 /*
10390                  * If we're not mounted with -odiscard, we can just forget
10391                  * about this block group. Otherwise we'll need to wait
10392                  * until transaction commit to do the actual discard.
10393                  */
10394                 if (trimming) {
10395                         WARN_ON(!list_empty(&block_group->bg_list));
10396                         spin_lock(&trans->transaction->deleted_bgs_lock);
10397                         list_move(&block_group->bg_list,
10398                                   &trans->transaction->deleted_bgs);
10399                         spin_unlock(&trans->transaction->deleted_bgs_lock);
10400                         btrfs_get_block_group(block_group);
10401                 }
10402 end_trans:
10403                 btrfs_end_transaction(trans, root);
10404 next:
10405                 mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
10406                 btrfs_put_block_group(block_group);
10407                 spin_lock(&fs_info->unused_bgs_lock);
10408         }
10409         spin_unlock(&fs_info->unused_bgs_lock);
10410 }
10411
10412 int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
10413 {
10414         struct btrfs_space_info *space_info;
10415         struct btrfs_super_block *disk_super;
10416         u64 features;
10417         u64 flags;
10418         int mixed = 0;
10419         int ret;
10420
10421         disk_super = fs_info->super_copy;
10422         if (!btrfs_super_root(disk_super))
10423                 return 1;
10424
10425         features = btrfs_super_incompat_flags(disk_super);
10426         if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
10427                 mixed = 1;
10428
10429         flags = BTRFS_BLOCK_GROUP_SYSTEM;
10430         ret = update_space_info(fs_info, flags, 0, 0, &space_info);
10431         if (ret)
10432                 goto out;
10433
10434         if (mixed) {
10435                 flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
10436                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
10437         } else {
10438                 flags = BTRFS_BLOCK_GROUP_METADATA;
10439                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
10440                 if (ret)
10441                         goto out;
10442
10443                 flags = BTRFS_BLOCK_GROUP_DATA;
10444                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
10445         }
10446 out:
10447         return ret;
10448 }
10449
10450 int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
10451 {
10452         return unpin_extent_range(root, start, end, false);
10453 }
10454
10455 /*
10456  * It used to be that old block groups would be left around forever.
10457  * Iterating over them would be enough to trim unused space.  Since we
10458  * now automatically remove them, we also need to iterate over unallocated
10459  * space.
10460  *
10461  * We don't want a transaction for this since the discard may take a
10462  * substantial amount of time.  We don't require that a transaction be
10463  * running, but we do need to take a running transaction into account
10464  * to ensure that we're not discarding chunks that were released in
10465  * the current transaction.
10466  *
10467  * Holding the chunks lock will prevent other threads from allocating
10468  * or releasing chunks, but it won't prevent a running transaction
10469  * from committing and releasing the memory that the pending chunks
10470  * list head uses.  For that, we need to take a reference to the
10471  * transaction.
10472  */
10473 static int btrfs_trim_free_extents(struct btrfs_device *device,
10474                                    u64 minlen, u64 *trimmed)
10475 {
10476         u64 start = 0, len = 0;
10477         int ret;
10478
10479         *trimmed = 0;
10480
10481         /* Not writeable = nothing to do. */
10482         if (!device->writeable)
10483                 return 0;
10484
10485         /* No free space = nothing to do. */
10486         if (device->total_bytes <= device->bytes_used)
10487                 return 0;
10488
10489         ret = 0;
10490
10491         while (1) {
10492                 struct btrfs_fs_info *fs_info = device->dev_root->fs_info;
10493                 struct btrfs_transaction *trans;
10494                 u64 bytes;
10495
10496                 ret = mutex_lock_interruptible(&fs_info->chunk_mutex);
10497                 if (ret)
10498                         return ret;
10499
10500                 down_read(&fs_info->commit_root_sem);
10501
10502                 spin_lock(&fs_info->trans_lock);
10503                 trans = fs_info->running_transaction;
10504                 if (trans)
10505                         atomic_inc(&trans->use_count);
10506                 spin_unlock(&fs_info->trans_lock);
10507
10508                 ret = find_free_dev_extent_start(trans, device, minlen, start,
10509                                                  &start, &len);
10510                 if (trans)
10511                         btrfs_put_transaction(trans);
10512
10513                 if (ret) {
10514                         up_read(&fs_info->commit_root_sem);
10515                         mutex_unlock(&fs_info->chunk_mutex);
10516                         if (ret == -ENOSPC)
10517                                 ret = 0;
10518                         break;
10519                 }
10520
10521                 ret = btrfs_issue_discard(device->bdev, start, len, &bytes);
10522                 up_read(&fs_info->commit_root_sem);
10523                 mutex_unlock(&fs_info->chunk_mutex);
10524
10525                 if (ret)
10526                         break;
10527
10528                 start += len;
10529                 *trimmed += bytes;
10530
10531                 if (fatal_signal_pending(current)) {
10532                         ret = -ERESTARTSYS;
10533                         break;
10534                 }
10535
10536                 cond_resched();
10537         }
10538
10539         return ret;
10540 }
10541
10542 int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
10543 {
10544         struct btrfs_fs_info *fs_info = root->fs_info;
10545         struct btrfs_block_group_cache *cache = NULL;
10546         struct btrfs_device *device;
10547         struct list_head *devices;
10548         u64 group_trimmed;
10549         u64 start;
10550         u64 end;
10551         u64 trimmed = 0;
10552         u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
10553         int ret = 0;
10554
10555         /*
10556          * try to trim all FS space, our block group may start from non-zero.
10557          */
10558         if (range->len == total_bytes)
10559                 cache = btrfs_lookup_first_block_group(fs_info, range->start);
10560         else
10561                 cache = btrfs_lookup_block_group(fs_info, range->start);
10562
10563         while (cache) {
10564                 if (cache->key.objectid >= (range->start + range->len)) {
10565                         btrfs_put_block_group(cache);
10566                         break;
10567                 }
10568
10569                 start = max(range->start, cache->key.objectid);
10570                 end = min(range->start + range->len,
10571                                 cache->key.objectid + cache->key.offset);
10572
10573                 if (end - start >= range->minlen) {
10574                         if (!block_group_cache_done(cache)) {
10575                                 ret = cache_block_group(cache, 0);
10576                                 if (ret) {
10577                                         btrfs_put_block_group(cache);
10578                                         break;
10579                                 }
10580                                 ret = wait_block_group_cache_done(cache);
10581                                 if (ret) {
10582                                         btrfs_put_block_group(cache);
10583                                         break;
10584                                 }
10585                         }
10586                         ret = btrfs_trim_block_group(cache,
10587                                                      &group_trimmed,
10588                                                      start,
10589                                                      end,
10590                                                      range->minlen);
10591
10592                         trimmed += group_trimmed;
10593                         if (ret) {
10594                                 btrfs_put_block_group(cache);
10595                                 break;
10596                         }
10597                 }
10598
10599                 cache = next_block_group(fs_info->tree_root, cache);
10600         }
10601
10602         mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
10603         devices = &root->fs_info->fs_devices->alloc_list;
10604         list_for_each_entry(device, devices, dev_alloc_list) {
10605                 ret = btrfs_trim_free_extents(device, range->minlen,
10606                                               &group_trimmed);
10607                 if (ret)
10608                         break;
10609
10610                 trimmed += group_trimmed;
10611         }
10612         mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
10613
10614         range->len = trimmed;
10615         return ret;
10616 }
10617
10618 /*
10619  * btrfs_{start,end}_write_no_snapshoting() are similar to
10620  * mnt_{want,drop}_write(), they are used to prevent some tasks from writing
10621  * data into the page cache through nocow before the subvolume is snapshoted,
10622  * but flush the data into disk after the snapshot creation, or to prevent
10623  * operations while snapshoting is ongoing and that cause the snapshot to be
10624  * inconsistent (writes followed by expanding truncates for example).
10625  */
10626 void btrfs_end_write_no_snapshoting(struct btrfs_root *root)
10627 {
10628         percpu_counter_dec(&root->subv_writers->counter);
10629         /*
10630          * Make sure counter is updated before we wake up waiters.
10631          */
10632         smp_mb();
10633         if (waitqueue_active(&root->subv_writers->wait))
10634                 wake_up(&root->subv_writers->wait);
10635 }
10636
10637 int btrfs_start_write_no_snapshoting(struct btrfs_root *root)
10638 {
10639         if (atomic_read(&root->will_be_snapshoted))
10640                 return 0;
10641
10642         percpu_counter_inc(&root->subv_writers->counter);
10643         /*
10644          * Make sure counter is updated before we check for snapshot creation.
10645          */
10646         smp_mb();
10647         if (atomic_read(&root->will_be_snapshoted)) {
10648                 btrfs_end_write_no_snapshoting(root);
10649                 return 0;
10650         }
10651         return 1;
10652 }
This page took 0.712147 seconds and 4 git commands to generate.