]> Git Repo - linux.git/blob - fs/btrfs/space-info.c
btrfs: always abort the transaction if we abort a trans handle
[linux.git] / fs / btrfs / space-info.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "misc.h"
4 #include "ctree.h"
5 #include "space-info.h"
6 #include "sysfs.h"
7 #include "volumes.h"
8 #include "free-space-cache.h"
9 #include "ordered-data.h"
10 #include "transaction.h"
11 #include "block-group.h"
12
13 /*
14  * HOW DOES SPACE RESERVATION WORK
15  *
16  * If you want to know about delalloc specifically, there is a separate comment
17  * for that with the delalloc code.  This comment is about how the whole system
18  * works generally.
19  *
20  * BASIC CONCEPTS
21  *
22  *   1) space_info.  This is the ultimate arbiter of how much space we can use.
23  *   There's a description of the bytes_ fields with the struct declaration,
24  *   refer to that for specifics on each field.  Suffice it to say that for
25  *   reservations we care about total_bytes - SUM(space_info->bytes_) when
26  *   determining if there is space to make an allocation.  There is a space_info
27  *   for METADATA, SYSTEM, and DATA areas.
28  *
29  *   2) block_rsv's.  These are basically buckets for every different type of
30  *   metadata reservation we have.  You can see the comment in the block_rsv
31  *   code on the rules for each type, but generally block_rsv->reserved is how
32  *   much space is accounted for in space_info->bytes_may_use.
33  *
34  *   3) btrfs_calc*_size.  These are the worst case calculations we used based
35  *   on the number of items we will want to modify.  We have one for changing
36  *   items, and one for inserting new items.  Generally we use these helpers to
37  *   determine the size of the block reserves, and then use the actual bytes
38  *   values to adjust the space_info counters.
39  *
40  * MAKING RESERVATIONS, THE NORMAL CASE
41  *
42  *   We call into either btrfs_reserve_data_bytes() or
43  *   btrfs_reserve_metadata_bytes(), depending on which we're looking for, with
44  *   num_bytes we want to reserve.
45  *
46  *   ->reserve
47  *     space_info->bytes_may_reserve += num_bytes
48  *
49  *   ->extent allocation
50  *     Call btrfs_add_reserved_bytes() which does
51  *     space_info->bytes_may_reserve -= num_bytes
52  *     space_info->bytes_reserved += extent_bytes
53  *
54  *   ->insert reference
55  *     Call btrfs_update_block_group() which does
56  *     space_info->bytes_reserved -= extent_bytes
57  *     space_info->bytes_used += extent_bytes
58  *
59  * MAKING RESERVATIONS, FLUSHING NORMALLY (non-priority)
60  *
61  *   Assume we are unable to simply make the reservation because we do not have
62  *   enough space
63  *
64  *   -> __reserve_bytes
65  *     create a reserve_ticket with ->bytes set to our reservation, add it to
66  *     the tail of space_info->tickets, kick async flush thread
67  *
68  *   ->handle_reserve_ticket
69  *     wait on ticket->wait for ->bytes to be reduced to 0, or ->error to be set
70  *     on the ticket.
71  *
72  *   -> btrfs_async_reclaim_metadata_space/btrfs_async_reclaim_data_space
73  *     Flushes various things attempting to free up space.
74  *
75  *   -> btrfs_try_granting_tickets()
76  *     This is called by anything that either subtracts space from
77  *     space_info->bytes_may_use, ->bytes_pinned, etc, or adds to the
78  *     space_info->total_bytes.  This loops through the ->priority_tickets and
79  *     then the ->tickets list checking to see if the reservation can be
80  *     completed.  If it can the space is added to space_info->bytes_may_use and
81  *     the ticket is woken up.
82  *
83  *   -> ticket wakeup
84  *     Check if ->bytes == 0, if it does we got our reservation and we can carry
85  *     on, if not return the appropriate error (ENOSPC, but can be EINTR if we
86  *     were interrupted.)
87  *
88  * MAKING RESERVATIONS, FLUSHING HIGH PRIORITY
89  *
90  *   Same as the above, except we add ourselves to the
91  *   space_info->priority_tickets, and we do not use ticket->wait, we simply
92  *   call flush_space() ourselves for the states that are safe for us to call
93  *   without deadlocking and hope for the best.
94  *
95  * THE FLUSHING STATES
96  *
97  *   Generally speaking we will have two cases for each state, a "nice" state
98  *   and a "ALL THE THINGS" state.  In btrfs we delay a lot of work in order to
99  *   reduce the locking over head on the various trees, and even to keep from
100  *   doing any work at all in the case of delayed refs.  Each of these delayed
101  *   things however hold reservations, and so letting them run allows us to
102  *   reclaim space so we can make new reservations.
103  *
104  *   FLUSH_DELAYED_ITEMS
105  *     Every inode has a delayed item to update the inode.  Take a simple write
106  *     for example, we would update the inode item at write time to update the
107  *     mtime, and then again at finish_ordered_io() time in order to update the
108  *     isize or bytes.  We keep these delayed items to coalesce these operations
109  *     into a single operation done on demand.  These are an easy way to reclaim
110  *     metadata space.
111  *
112  *   FLUSH_DELALLOC
113  *     Look at the delalloc comment to get an idea of how much space is reserved
114  *     for delayed allocation.  We can reclaim some of this space simply by
115  *     running delalloc, but usually we need to wait for ordered extents to
116  *     reclaim the bulk of this space.
117  *
118  *   FLUSH_DELAYED_REFS
119  *     We have a block reserve for the outstanding delayed refs space, and every
120  *     delayed ref operation holds a reservation.  Running these is a quick way
121  *     to reclaim space, but we want to hold this until the end because COW can
122  *     churn a lot and we can avoid making some extent tree modifications if we
123  *     are able to delay for as long as possible.
124  *
125  *   ALLOC_CHUNK
126  *     We will skip this the first time through space reservation, because of
127  *     overcommit and we don't want to have a lot of useless metadata space when
128  *     our worst case reservations will likely never come true.
129  *
130  *   RUN_DELAYED_IPUTS
131  *     If we're freeing inodes we're likely freeing checksums, file extent
132  *     items, and extent tree items.  Loads of space could be freed up by these
133  *     operations, however they won't be usable until the transaction commits.
134  *
135  *   COMMIT_TRANS
136  *     may_commit_transaction() is the ultimate arbiter on whether we commit the
137  *     transaction or not.  In order to avoid constantly churning we do all the
138  *     above flushing first and then commit the transaction as the last resort.
139  *     However we need to take into account things like pinned space that would
140  *     be freed, plus any delayed work we may not have gotten rid of in the case
141  *     of metadata.
142  *
143  *   FORCE_COMMIT_TRANS
144  *     For use by the preemptive flusher.  We use this to bypass the ticketing
145  *     checks in may_commit_transaction, as we have more information about the
146  *     overall state of the system and may want to commit the transaction ahead
147  *     of actual ENOSPC conditions.
148  *
149  * OVERCOMMIT
150  *
151  *   Because we hold so many reservations for metadata we will allow you to
152  *   reserve more space than is currently free in the currently allocate
153  *   metadata space.  This only happens with metadata, data does not allow
154  *   overcommitting.
155  *
156  *   You can see the current logic for when we allow overcommit in
157  *   btrfs_can_overcommit(), but it only applies to unallocated space.  If there
158  *   is no unallocated space to be had, all reservations are kept within the
159  *   free space in the allocated metadata chunks.
160  *
161  *   Because of overcommitting, you generally want to use the
162  *   btrfs_can_overcommit() logic for metadata allocations, as it does the right
163  *   thing with or without extra unallocated space.
164  */
165
166 u64 __pure btrfs_space_info_used(struct btrfs_space_info *s_info,
167                           bool may_use_included)
168 {
169         ASSERT(s_info);
170         return s_info->bytes_used + s_info->bytes_reserved +
171                 s_info->bytes_pinned + s_info->bytes_readonly +
172                 s_info->bytes_zone_unusable +
173                 (may_use_included ? s_info->bytes_may_use : 0);
174 }
175
176 /*
177  * after adding space to the filesystem, we need to clear the full flags
178  * on all the space infos.
179  */
180 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
181 {
182         struct list_head *head = &info->space_info;
183         struct btrfs_space_info *found;
184
185         list_for_each_entry(found, head, list)
186                 found->full = 0;
187 }
188
189 static int create_space_info(struct btrfs_fs_info *info, u64 flags)
190 {
191
192         struct btrfs_space_info *space_info;
193         int i;
194         int ret;
195
196         space_info = kzalloc(sizeof(*space_info), GFP_NOFS);
197         if (!space_info)
198                 return -ENOMEM;
199
200         ret = percpu_counter_init(&space_info->total_bytes_pinned, 0,
201                                  GFP_KERNEL);
202         if (ret) {
203                 kfree(space_info);
204                 return ret;
205         }
206
207         for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
208                 INIT_LIST_HEAD(&space_info->block_groups[i]);
209         init_rwsem(&space_info->groups_sem);
210         spin_lock_init(&space_info->lock);
211         space_info->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
212         space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
213         INIT_LIST_HEAD(&space_info->ro_bgs);
214         INIT_LIST_HEAD(&space_info->tickets);
215         INIT_LIST_HEAD(&space_info->priority_tickets);
216         space_info->clamp = 1;
217
218         ret = btrfs_sysfs_add_space_info_type(info, space_info);
219         if (ret)
220                 return ret;
221
222         list_add(&space_info->list, &info->space_info);
223         if (flags & BTRFS_BLOCK_GROUP_DATA)
224                 info->data_sinfo = space_info;
225
226         return ret;
227 }
228
229 int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
230 {
231         struct btrfs_super_block *disk_super;
232         u64 features;
233         u64 flags;
234         int mixed = 0;
235         int ret;
236
237         disk_super = fs_info->super_copy;
238         if (!btrfs_super_root(disk_super))
239                 return -EINVAL;
240
241         features = btrfs_super_incompat_flags(disk_super);
242         if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
243                 mixed = 1;
244
245         flags = BTRFS_BLOCK_GROUP_SYSTEM;
246         ret = create_space_info(fs_info, flags);
247         if (ret)
248                 goto out;
249
250         if (mixed) {
251                 flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
252                 ret = create_space_info(fs_info, flags);
253         } else {
254                 flags = BTRFS_BLOCK_GROUP_METADATA;
255                 ret = create_space_info(fs_info, flags);
256                 if (ret)
257                         goto out;
258
259                 flags = BTRFS_BLOCK_GROUP_DATA;
260                 ret = create_space_info(fs_info, flags);
261         }
262 out:
263         return ret;
264 }
265
266 void btrfs_update_space_info(struct btrfs_fs_info *info, u64 flags,
267                              u64 total_bytes, u64 bytes_used,
268                              u64 bytes_readonly, u64 bytes_zone_unusable,
269                              struct btrfs_space_info **space_info)
270 {
271         struct btrfs_space_info *found;
272         int factor;
273
274         factor = btrfs_bg_type_to_factor(flags);
275
276         found = btrfs_find_space_info(info, flags);
277         ASSERT(found);
278         spin_lock(&found->lock);
279         found->total_bytes += total_bytes;
280         found->disk_total += total_bytes * factor;
281         found->bytes_used += bytes_used;
282         found->disk_used += bytes_used * factor;
283         found->bytes_readonly += bytes_readonly;
284         found->bytes_zone_unusable += bytes_zone_unusable;
285         if (total_bytes > 0)
286                 found->full = 0;
287         btrfs_try_granting_tickets(info, found);
288         spin_unlock(&found->lock);
289         *space_info = found;
290 }
291
292 struct btrfs_space_info *btrfs_find_space_info(struct btrfs_fs_info *info,
293                                                u64 flags)
294 {
295         struct list_head *head = &info->space_info;
296         struct btrfs_space_info *found;
297
298         flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
299
300         list_for_each_entry(found, head, list) {
301                 if (found->flags & flags)
302                         return found;
303         }
304         return NULL;
305 }
306
307 static u64 calc_available_free_space(struct btrfs_fs_info *fs_info,
308                           struct btrfs_space_info *space_info,
309                           enum btrfs_reserve_flush_enum flush)
310 {
311         u64 profile;
312         u64 avail;
313         int factor;
314
315         if (space_info->flags & BTRFS_BLOCK_GROUP_SYSTEM)
316                 profile = btrfs_system_alloc_profile(fs_info);
317         else
318                 profile = btrfs_metadata_alloc_profile(fs_info);
319
320         avail = atomic64_read(&fs_info->free_chunk_space);
321
322         /*
323          * If we have dup, raid1 or raid10 then only half of the free
324          * space is actually usable.  For raid56, the space info used
325          * doesn't include the parity drive, so we don't have to
326          * change the math
327          */
328         factor = btrfs_bg_type_to_factor(profile);
329         avail = div_u64(avail, factor);
330
331         /*
332          * If we aren't flushing all things, let us overcommit up to
333          * 1/2th of the space. If we can flush, don't let us overcommit
334          * too much, let it overcommit up to 1/8 of the space.
335          */
336         if (flush == BTRFS_RESERVE_FLUSH_ALL)
337                 avail >>= 3;
338         else
339                 avail >>= 1;
340         return avail;
341 }
342
343 int btrfs_can_overcommit(struct btrfs_fs_info *fs_info,
344                          struct btrfs_space_info *space_info, u64 bytes,
345                          enum btrfs_reserve_flush_enum flush)
346 {
347         u64 avail;
348         u64 used;
349
350         /* Don't overcommit when in mixed mode */
351         if (space_info->flags & BTRFS_BLOCK_GROUP_DATA)
352                 return 0;
353
354         used = btrfs_space_info_used(space_info, true);
355         avail = calc_available_free_space(fs_info, space_info, flush);
356
357         if (used + bytes < space_info->total_bytes + avail)
358                 return 1;
359         return 0;
360 }
361
362 static void remove_ticket(struct btrfs_space_info *space_info,
363                           struct reserve_ticket *ticket)
364 {
365         if (!list_empty(&ticket->list)) {
366                 list_del_init(&ticket->list);
367                 ASSERT(space_info->reclaim_size >= ticket->bytes);
368                 space_info->reclaim_size -= ticket->bytes;
369         }
370 }
371
372 /*
373  * This is for space we already have accounted in space_info->bytes_may_use, so
374  * basically when we're returning space from block_rsv's.
375  */
376 void btrfs_try_granting_tickets(struct btrfs_fs_info *fs_info,
377                                 struct btrfs_space_info *space_info)
378 {
379         struct list_head *head;
380         enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_NO_FLUSH;
381
382         lockdep_assert_held(&space_info->lock);
383
384         head = &space_info->priority_tickets;
385 again:
386         while (!list_empty(head)) {
387                 struct reserve_ticket *ticket;
388                 u64 used = btrfs_space_info_used(space_info, true);
389
390                 ticket = list_first_entry(head, struct reserve_ticket, list);
391
392                 /* Check and see if our ticket can be satisified now. */
393                 if ((used + ticket->bytes <= space_info->total_bytes) ||
394                     btrfs_can_overcommit(fs_info, space_info, ticket->bytes,
395                                          flush)) {
396                         btrfs_space_info_update_bytes_may_use(fs_info,
397                                                               space_info,
398                                                               ticket->bytes);
399                         remove_ticket(space_info, ticket);
400                         ticket->bytes = 0;
401                         space_info->tickets_id++;
402                         wake_up(&ticket->wait);
403                 } else {
404                         break;
405                 }
406         }
407
408         if (head == &space_info->priority_tickets) {
409                 head = &space_info->tickets;
410                 flush = BTRFS_RESERVE_FLUSH_ALL;
411                 goto again;
412         }
413 }
414
415 #define DUMP_BLOCK_RSV(fs_info, rsv_name)                               \
416 do {                                                                    \
417         struct btrfs_block_rsv *__rsv = &(fs_info)->rsv_name;           \
418         spin_lock(&__rsv->lock);                                        \
419         btrfs_info(fs_info, #rsv_name ": size %llu reserved %llu",      \
420                    __rsv->size, __rsv->reserved);                       \
421         spin_unlock(&__rsv->lock);                                      \
422 } while (0)
423
424 static void __btrfs_dump_space_info(struct btrfs_fs_info *fs_info,
425                                     struct btrfs_space_info *info)
426 {
427         lockdep_assert_held(&info->lock);
428
429         btrfs_info(fs_info, "space_info %llu has %llu free, is %sfull",
430                    info->flags,
431                    info->total_bytes - btrfs_space_info_used(info, true),
432                    info->full ? "" : "not ");
433         btrfs_info(fs_info,
434                 "space_info total=%llu, used=%llu, pinned=%llu, reserved=%llu, may_use=%llu, readonly=%llu zone_unusable=%llu",
435                 info->total_bytes, info->bytes_used, info->bytes_pinned,
436                 info->bytes_reserved, info->bytes_may_use,
437                 info->bytes_readonly, info->bytes_zone_unusable);
438
439         DUMP_BLOCK_RSV(fs_info, global_block_rsv);
440         DUMP_BLOCK_RSV(fs_info, trans_block_rsv);
441         DUMP_BLOCK_RSV(fs_info, chunk_block_rsv);
442         DUMP_BLOCK_RSV(fs_info, delayed_block_rsv);
443         DUMP_BLOCK_RSV(fs_info, delayed_refs_rsv);
444
445 }
446
447 void btrfs_dump_space_info(struct btrfs_fs_info *fs_info,
448                            struct btrfs_space_info *info, u64 bytes,
449                            int dump_block_groups)
450 {
451         struct btrfs_block_group *cache;
452         int index = 0;
453
454         spin_lock(&info->lock);
455         __btrfs_dump_space_info(fs_info, info);
456         spin_unlock(&info->lock);
457
458         if (!dump_block_groups)
459                 return;
460
461         down_read(&info->groups_sem);
462 again:
463         list_for_each_entry(cache, &info->block_groups[index], list) {
464                 spin_lock(&cache->lock);
465                 btrfs_info(fs_info,
466                         "block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %llu zone_unusable %s",
467                         cache->start, cache->length, cache->used, cache->pinned,
468                         cache->reserved, cache->zone_unusable,
469                         cache->ro ? "[readonly]" : "");
470                 spin_unlock(&cache->lock);
471                 btrfs_dump_free_space(cache, bytes);
472         }
473         if (++index < BTRFS_NR_RAID_TYPES)
474                 goto again;
475         up_read(&info->groups_sem);
476 }
477
478 static inline u64 calc_reclaim_items_nr(struct btrfs_fs_info *fs_info,
479                                         u64 to_reclaim)
480 {
481         u64 bytes;
482         u64 nr;
483
484         bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
485         nr = div64_u64(to_reclaim, bytes);
486         if (!nr)
487                 nr = 1;
488         return nr;
489 }
490
491 #define EXTENT_SIZE_PER_ITEM    SZ_256K
492
493 /*
494  * shrink metadata reservation for delalloc
495  */
496 static void shrink_delalloc(struct btrfs_fs_info *fs_info,
497                             struct btrfs_space_info *space_info,
498                             u64 to_reclaim, bool wait_ordered,
499                             bool for_preempt)
500 {
501         struct btrfs_trans_handle *trans;
502         u64 delalloc_bytes;
503         u64 ordered_bytes;
504         u64 items;
505         long time_left;
506         int loops;
507
508         /* Calc the number of the pages we need flush for space reservation */
509         if (to_reclaim == U64_MAX) {
510                 items = U64_MAX;
511         } else {
512                 /*
513                  * to_reclaim is set to however much metadata we need to
514                  * reclaim, but reclaiming that much data doesn't really track
515                  * exactly, so increase the amount to reclaim by 2x in order to
516                  * make sure we're flushing enough delalloc to hopefully reclaim
517                  * some metadata reservations.
518                  */
519                 items = calc_reclaim_items_nr(fs_info, to_reclaim) * 2;
520                 to_reclaim = items * EXTENT_SIZE_PER_ITEM;
521         }
522
523         trans = (struct btrfs_trans_handle *)current->journal_info;
524
525         delalloc_bytes = percpu_counter_sum_positive(
526                                                 &fs_info->delalloc_bytes);
527         ordered_bytes = percpu_counter_sum_positive(&fs_info->ordered_bytes);
528         if (delalloc_bytes == 0 && ordered_bytes == 0)
529                 return;
530
531         /*
532          * If we are doing more ordered than delalloc we need to just wait on
533          * ordered extents, otherwise we'll waste time trying to flush delalloc
534          * that likely won't give us the space back we need.
535          */
536         if (ordered_bytes > delalloc_bytes && !for_preempt)
537                 wait_ordered = true;
538
539         loops = 0;
540         while ((delalloc_bytes || ordered_bytes) && loops < 3) {
541                 u64 temp = min(delalloc_bytes, to_reclaim) >> PAGE_SHIFT;
542                 long nr_pages = min_t(u64, temp, LONG_MAX);
543
544                 btrfs_start_delalloc_roots(fs_info, nr_pages, true);
545
546                 loops++;
547                 if (wait_ordered && !trans) {
548                         btrfs_wait_ordered_roots(fs_info, items, 0, (u64)-1);
549                 } else {
550                         time_left = schedule_timeout_killable(1);
551                         if (time_left)
552                                 break;
553                 }
554
555                 /*
556                  * If we are for preemption we just want a one-shot of delalloc
557                  * flushing so we can stop flushing if we decide we don't need
558                  * to anymore.
559                  */
560                 if (for_preempt)
561                         break;
562
563                 spin_lock(&space_info->lock);
564                 if (list_empty(&space_info->tickets) &&
565                     list_empty(&space_info->priority_tickets)) {
566                         spin_unlock(&space_info->lock);
567                         break;
568                 }
569                 spin_unlock(&space_info->lock);
570
571                 delalloc_bytes = percpu_counter_sum_positive(
572                                                 &fs_info->delalloc_bytes);
573                 ordered_bytes = percpu_counter_sum_positive(
574                                                 &fs_info->ordered_bytes);
575         }
576 }
577
578 /**
579  * Possibly commit the transaction if its ok to
580  *
581  * @fs_info:    the filesystem
582  * @space_info: space_info we are checking for commit, either data or metadata
583  *
584  * This will check to make sure that committing the transaction will actually
585  * get us somewhere and then commit the transaction if it does.  Otherwise it
586  * will return -ENOSPC.
587  */
588 static int may_commit_transaction(struct btrfs_fs_info *fs_info,
589                                   struct btrfs_space_info *space_info)
590 {
591         struct reserve_ticket *ticket = NULL;
592         struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_block_rsv;
593         struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
594         struct btrfs_block_rsv *trans_rsv = &fs_info->trans_block_rsv;
595         struct btrfs_trans_handle *trans;
596         u64 reclaim_bytes = 0;
597         u64 bytes_needed = 0;
598         u64 cur_free_bytes = 0;
599
600         trans = (struct btrfs_trans_handle *)current->journal_info;
601         if (trans)
602                 return -EAGAIN;
603
604         spin_lock(&space_info->lock);
605         cur_free_bytes = btrfs_space_info_used(space_info, true);
606         if (cur_free_bytes < space_info->total_bytes)
607                 cur_free_bytes = space_info->total_bytes - cur_free_bytes;
608         else
609                 cur_free_bytes = 0;
610
611         if (!list_empty(&space_info->priority_tickets))
612                 ticket = list_first_entry(&space_info->priority_tickets,
613                                           struct reserve_ticket, list);
614         else if (!list_empty(&space_info->tickets))
615                 ticket = list_first_entry(&space_info->tickets,
616                                           struct reserve_ticket, list);
617         if (ticket)
618                 bytes_needed = ticket->bytes;
619
620         if (bytes_needed > cur_free_bytes)
621                 bytes_needed -= cur_free_bytes;
622         else
623                 bytes_needed = 0;
624         spin_unlock(&space_info->lock);
625
626         if (!bytes_needed)
627                 return 0;
628
629         trans = btrfs_join_transaction(fs_info->extent_root);
630         if (IS_ERR(trans))
631                 return PTR_ERR(trans);
632
633         /*
634          * See if there is enough pinned space to make this reservation, or if
635          * we have block groups that are going to be freed, allowing us to
636          * possibly do a chunk allocation the next loop through.
637          */
638         if (test_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags) ||
639             __percpu_counter_compare(&space_info->total_bytes_pinned,
640                                      bytes_needed,
641                                      BTRFS_TOTAL_BYTES_PINNED_BATCH) >= 0)
642                 goto commit;
643
644         /*
645          * See if there is some space in the delayed insertion reserve for this
646          * reservation.  If the space_info's don't match (like for DATA or
647          * SYSTEM) then just go enospc, reclaiming this space won't recover any
648          * space to satisfy those reservations.
649          */
650         if (space_info != delayed_rsv->space_info)
651                 goto enospc;
652
653         spin_lock(&delayed_rsv->lock);
654         reclaim_bytes += delayed_rsv->reserved;
655         spin_unlock(&delayed_rsv->lock);
656
657         spin_lock(&delayed_refs_rsv->lock);
658         reclaim_bytes += delayed_refs_rsv->reserved;
659         spin_unlock(&delayed_refs_rsv->lock);
660
661         spin_lock(&trans_rsv->lock);
662         reclaim_bytes += trans_rsv->reserved;
663         spin_unlock(&trans_rsv->lock);
664
665         if (reclaim_bytes >= bytes_needed)
666                 goto commit;
667         bytes_needed -= reclaim_bytes;
668
669         if (__percpu_counter_compare(&space_info->total_bytes_pinned,
670                                    bytes_needed,
671                                    BTRFS_TOTAL_BYTES_PINNED_BATCH) < 0)
672                 goto enospc;
673
674 commit:
675         return btrfs_commit_transaction(trans);
676 enospc:
677         btrfs_end_transaction(trans);
678         return -ENOSPC;
679 }
680
681 /*
682  * Try to flush some data based on policy set by @state. This is only advisory
683  * and may fail for various reasons. The caller is supposed to examine the
684  * state of @space_info to detect the outcome.
685  */
686 static void flush_space(struct btrfs_fs_info *fs_info,
687                        struct btrfs_space_info *space_info, u64 num_bytes,
688                        enum btrfs_flush_state state, bool for_preempt)
689 {
690         struct btrfs_root *root = fs_info->extent_root;
691         struct btrfs_trans_handle *trans;
692         int nr;
693         int ret = 0;
694
695         switch (state) {
696         case FLUSH_DELAYED_ITEMS_NR:
697         case FLUSH_DELAYED_ITEMS:
698                 if (state == FLUSH_DELAYED_ITEMS_NR)
699                         nr = calc_reclaim_items_nr(fs_info, num_bytes) * 2;
700                 else
701                         nr = -1;
702
703                 trans = btrfs_join_transaction(root);
704                 if (IS_ERR(trans)) {
705                         ret = PTR_ERR(trans);
706                         break;
707                 }
708                 ret = btrfs_run_delayed_items_nr(trans, nr);
709                 btrfs_end_transaction(trans);
710                 break;
711         case FLUSH_DELALLOC:
712         case FLUSH_DELALLOC_WAIT:
713                 shrink_delalloc(fs_info, space_info, num_bytes,
714                                 state == FLUSH_DELALLOC_WAIT, for_preempt);
715                 break;
716         case FLUSH_DELAYED_REFS_NR:
717         case FLUSH_DELAYED_REFS:
718                 trans = btrfs_join_transaction(root);
719                 if (IS_ERR(trans)) {
720                         ret = PTR_ERR(trans);
721                         break;
722                 }
723                 if (state == FLUSH_DELAYED_REFS_NR)
724                         nr = calc_reclaim_items_nr(fs_info, num_bytes);
725                 else
726                         nr = 0;
727                 btrfs_run_delayed_refs(trans, nr);
728                 btrfs_end_transaction(trans);
729                 break;
730         case ALLOC_CHUNK:
731         case ALLOC_CHUNK_FORCE:
732                 trans = btrfs_join_transaction(root);
733                 if (IS_ERR(trans)) {
734                         ret = PTR_ERR(trans);
735                         break;
736                 }
737                 ret = btrfs_chunk_alloc(trans,
738                                 btrfs_get_alloc_profile(fs_info, space_info->flags),
739                                 (state == ALLOC_CHUNK) ? CHUNK_ALLOC_NO_FORCE :
740                                         CHUNK_ALLOC_FORCE);
741                 btrfs_end_transaction(trans);
742                 if (ret > 0 || ret == -ENOSPC)
743                         ret = 0;
744                 break;
745         case RUN_DELAYED_IPUTS:
746                 /*
747                  * If we have pending delayed iputs then we could free up a
748                  * bunch of pinned space, so make sure we run the iputs before
749                  * we do our pinned bytes check below.
750                  */
751                 btrfs_run_delayed_iputs(fs_info);
752                 btrfs_wait_on_delayed_iputs(fs_info);
753                 break;
754         case COMMIT_TRANS:
755                 ret = may_commit_transaction(fs_info, space_info);
756                 break;
757         case FORCE_COMMIT_TRANS:
758                 trans = btrfs_join_transaction(root);
759                 if (IS_ERR(trans)) {
760                         ret = PTR_ERR(trans);
761                         break;
762                 }
763                 ret = btrfs_commit_transaction(trans);
764                 break;
765         default:
766                 ret = -ENOSPC;
767                 break;
768         }
769
770         trace_btrfs_flush_space(fs_info, space_info->flags, num_bytes, state,
771                                 ret, for_preempt);
772         return;
773 }
774
775 static inline u64
776 btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info,
777                                  struct btrfs_space_info *space_info)
778 {
779         u64 used;
780         u64 avail;
781         u64 to_reclaim = space_info->reclaim_size;
782
783         lockdep_assert_held(&space_info->lock);
784
785         avail = calc_available_free_space(fs_info, space_info,
786                                           BTRFS_RESERVE_FLUSH_ALL);
787         used = btrfs_space_info_used(space_info, true);
788
789         /*
790          * We may be flushing because suddenly we have less space than we had
791          * before, and now we're well over-committed based on our current free
792          * space.  If that's the case add in our overage so we make sure to put
793          * appropriate pressure on the flushing state machine.
794          */
795         if (space_info->total_bytes + avail < used)
796                 to_reclaim += used - (space_info->total_bytes + avail);
797
798         return to_reclaim;
799 }
800
801 static bool need_preemptive_reclaim(struct btrfs_fs_info *fs_info,
802                                     struct btrfs_space_info *space_info)
803 {
804         u64 global_rsv_size = fs_info->global_block_rsv.reserved;
805         u64 ordered, delalloc;
806         u64 thresh = div_factor_fine(space_info->total_bytes, 98);
807         u64 used;
808
809         /* If we're just plain full then async reclaim just slows us down. */
810         if ((space_info->bytes_used + space_info->bytes_reserved +
811              global_rsv_size) >= thresh)
812                 return false;
813
814         /*
815          * We have tickets queued, bail so we don't compete with the async
816          * flushers.
817          */
818         if (space_info->reclaim_size)
819                 return false;
820
821         /*
822          * If we have over half of the free space occupied by reservations or
823          * pinned then we want to start flushing.
824          *
825          * We do not do the traditional thing here, which is to say
826          *
827          *   if (used >= ((total_bytes + avail) / 2))
828          *     return 1;
829          *
830          * because this doesn't quite work how we want.  If we had more than 50%
831          * of the space_info used by bytes_used and we had 0 available we'd just
832          * constantly run the background flusher.  Instead we want it to kick in
833          * if our reclaimable space exceeds our clamped free space.
834          *
835          * Our clamping range is 2^1 -> 2^8.  Practically speaking that means
836          * the following:
837          *
838          * Amount of RAM        Minimum threshold       Maximum threshold
839          *
840          *        256GiB                     1GiB                  128GiB
841          *        128GiB                   512MiB                   64GiB
842          *         64GiB                   256MiB                   32GiB
843          *         32GiB                   128MiB                   16GiB
844          *         16GiB                    64MiB                    8GiB
845          *
846          * These are the range our thresholds will fall in, corresponding to how
847          * much delalloc we need for the background flusher to kick in.
848          */
849
850         thresh = calc_available_free_space(fs_info, space_info,
851                                            BTRFS_RESERVE_FLUSH_ALL);
852         used = space_info->bytes_used + space_info->bytes_reserved +
853                space_info->bytes_readonly + global_rsv_size;
854         if (used < space_info->total_bytes)
855                 thresh += space_info->total_bytes - used;
856         thresh >>= space_info->clamp;
857
858         used = space_info->bytes_pinned;
859
860         /*
861          * If we have more ordered bytes than delalloc bytes then we're either
862          * doing a lot of DIO, or we simply don't have a lot of delalloc waiting
863          * around.  Preemptive flushing is only useful in that it can free up
864          * space before tickets need to wait for things to finish.  In the case
865          * of ordered extents, preemptively waiting on ordered extents gets us
866          * nothing, if our reservations are tied up in ordered extents we'll
867          * simply have to slow down writers by forcing them to wait on ordered
868          * extents.
869          *
870          * In the case that ordered is larger than delalloc, only include the
871          * block reserves that we would actually be able to directly reclaim
872          * from.  In this case if we're heavy on metadata operations this will
873          * clearly be heavy enough to warrant preemptive flushing.  In the case
874          * of heavy DIO or ordered reservations, preemptive flushing will just
875          * waste time and cause us to slow down.
876          *
877          * We want to make sure we truly are maxed out on ordered however, so
878          * cut ordered in half, and if it's still higher than delalloc then we
879          * can keep flushing.  This is to avoid the case where we start
880          * flushing, and now delalloc == ordered and we stop preemptively
881          * flushing when we could still have several gigs of delalloc to flush.
882          */
883         ordered = percpu_counter_read_positive(&fs_info->ordered_bytes) >> 1;
884         delalloc = percpu_counter_read_positive(&fs_info->delalloc_bytes);
885         if (ordered >= delalloc)
886                 used += fs_info->delayed_refs_rsv.reserved +
887                         fs_info->delayed_block_rsv.reserved;
888         else
889                 used += space_info->bytes_may_use - global_rsv_size;
890
891         return (used >= thresh && !btrfs_fs_closing(fs_info) &&
892                 !test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state));
893 }
894
895 static bool steal_from_global_rsv(struct btrfs_fs_info *fs_info,
896                                   struct btrfs_space_info *space_info,
897                                   struct reserve_ticket *ticket)
898 {
899         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
900         u64 min_bytes;
901
902         if (global_rsv->space_info != space_info)
903                 return false;
904
905         spin_lock(&global_rsv->lock);
906         min_bytes = div_factor(global_rsv->size, 1);
907         if (global_rsv->reserved < min_bytes + ticket->bytes) {
908                 spin_unlock(&global_rsv->lock);
909                 return false;
910         }
911         global_rsv->reserved -= ticket->bytes;
912         remove_ticket(space_info, ticket);
913         ticket->bytes = 0;
914         wake_up(&ticket->wait);
915         space_info->tickets_id++;
916         if (global_rsv->reserved < global_rsv->size)
917                 global_rsv->full = 0;
918         spin_unlock(&global_rsv->lock);
919
920         return true;
921 }
922
923 /*
924  * maybe_fail_all_tickets - we've exhausted our flushing, start failing tickets
925  * @fs_info - fs_info for this fs
926  * @space_info - the space info we were flushing
927  *
928  * We call this when we've exhausted our flushing ability and haven't made
929  * progress in satisfying tickets.  The reservation code handles tickets in
930  * order, so if there is a large ticket first and then smaller ones we could
931  * very well satisfy the smaller tickets.  This will attempt to wake up any
932  * tickets in the list to catch this case.
933  *
934  * This function returns true if it was able to make progress by clearing out
935  * other tickets, or if it stumbles across a ticket that was smaller than the
936  * first ticket.
937  */
938 static bool maybe_fail_all_tickets(struct btrfs_fs_info *fs_info,
939                                    struct btrfs_space_info *space_info)
940 {
941         struct reserve_ticket *ticket;
942         u64 tickets_id = space_info->tickets_id;
943         u64 first_ticket_bytes = 0;
944
945         if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
946                 btrfs_info(fs_info, "cannot satisfy tickets, dumping space info");
947                 __btrfs_dump_space_info(fs_info, space_info);
948         }
949
950         while (!list_empty(&space_info->tickets) &&
951                tickets_id == space_info->tickets_id) {
952                 ticket = list_first_entry(&space_info->tickets,
953                                           struct reserve_ticket, list);
954
955                 if (ticket->steal &&
956                     steal_from_global_rsv(fs_info, space_info, ticket))
957                         return true;
958
959                 /*
960                  * may_commit_transaction will avoid committing the transaction
961                  * if it doesn't feel like the space reclaimed by the commit
962                  * would result in the ticket succeeding.  However if we have a
963                  * smaller ticket in the queue it may be small enough to be
964                  * satisified by committing the transaction, so if any
965                  * subsequent ticket is smaller than the first ticket go ahead
966                  * and send us back for another loop through the enospc flushing
967                  * code.
968                  */
969                 if (first_ticket_bytes == 0)
970                         first_ticket_bytes = ticket->bytes;
971                 else if (first_ticket_bytes > ticket->bytes)
972                         return true;
973
974                 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG))
975                         btrfs_info(fs_info, "failing ticket with %llu bytes",
976                                    ticket->bytes);
977
978                 remove_ticket(space_info, ticket);
979                 ticket->error = -ENOSPC;
980                 wake_up(&ticket->wait);
981
982                 /*
983                  * We're just throwing tickets away, so more flushing may not
984                  * trip over btrfs_try_granting_tickets, so we need to call it
985                  * here to see if we can make progress with the next ticket in
986                  * the list.
987                  */
988                 btrfs_try_granting_tickets(fs_info, space_info);
989         }
990         return (tickets_id != space_info->tickets_id);
991 }
992
993 /*
994  * This is for normal flushers, we can wait all goddamned day if we want to.  We
995  * will loop and continuously try to flush as long as we are making progress.
996  * We count progress as clearing off tickets each time we have to loop.
997  */
998 static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
999 {
1000         struct btrfs_fs_info *fs_info;
1001         struct btrfs_space_info *space_info;
1002         u64 to_reclaim;
1003         enum btrfs_flush_state flush_state;
1004         int commit_cycles = 0;
1005         u64 last_tickets_id;
1006
1007         fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work);
1008         space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
1009
1010         spin_lock(&space_info->lock);
1011         to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info);
1012         if (!to_reclaim) {
1013                 space_info->flush = 0;
1014                 spin_unlock(&space_info->lock);
1015                 return;
1016         }
1017         last_tickets_id = space_info->tickets_id;
1018         spin_unlock(&space_info->lock);
1019
1020         flush_state = FLUSH_DELAYED_ITEMS_NR;
1021         do {
1022                 flush_space(fs_info, space_info, to_reclaim, flush_state, false);
1023                 spin_lock(&space_info->lock);
1024                 if (list_empty(&space_info->tickets)) {
1025                         space_info->flush = 0;
1026                         spin_unlock(&space_info->lock);
1027                         return;
1028                 }
1029                 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info,
1030                                                               space_info);
1031                 if (last_tickets_id == space_info->tickets_id) {
1032                         flush_state++;
1033                 } else {
1034                         last_tickets_id = space_info->tickets_id;
1035                         flush_state = FLUSH_DELAYED_ITEMS_NR;
1036                         if (commit_cycles)
1037                                 commit_cycles--;
1038                 }
1039
1040                 /*
1041                  * We don't want to force a chunk allocation until we've tried
1042                  * pretty hard to reclaim space.  Think of the case where we
1043                  * freed up a bunch of space and so have a lot of pinned space
1044                  * to reclaim.  We would rather use that than possibly create a
1045                  * underutilized metadata chunk.  So if this is our first run
1046                  * through the flushing state machine skip ALLOC_CHUNK_FORCE and
1047                  * commit the transaction.  If nothing has changed the next go
1048                  * around then we can force a chunk allocation.
1049                  */
1050                 if (flush_state == ALLOC_CHUNK_FORCE && !commit_cycles)
1051                         flush_state++;
1052
1053                 if (flush_state > COMMIT_TRANS) {
1054                         commit_cycles++;
1055                         if (commit_cycles > 2) {
1056                                 if (maybe_fail_all_tickets(fs_info, space_info)) {
1057                                         flush_state = FLUSH_DELAYED_ITEMS_NR;
1058                                         commit_cycles--;
1059                                 } else {
1060                                         space_info->flush = 0;
1061                                 }
1062                         } else {
1063                                 flush_state = FLUSH_DELAYED_ITEMS_NR;
1064                         }
1065                 }
1066                 spin_unlock(&space_info->lock);
1067         } while (flush_state <= COMMIT_TRANS);
1068 }
1069
1070 /*
1071  * This handles pre-flushing of metadata space before we get to the point that
1072  * we need to start blocking threads on tickets.  The logic here is different
1073  * from the other flush paths because it doesn't rely on tickets to tell us how
1074  * much we need to flush, instead it attempts to keep us below the 80% full
1075  * watermark of space by flushing whichever reservation pool is currently the
1076  * largest.
1077  */
1078 static void btrfs_preempt_reclaim_metadata_space(struct work_struct *work)
1079 {
1080         struct btrfs_fs_info *fs_info;
1081         struct btrfs_space_info *space_info;
1082         struct btrfs_block_rsv *delayed_block_rsv;
1083         struct btrfs_block_rsv *delayed_refs_rsv;
1084         struct btrfs_block_rsv *global_rsv;
1085         struct btrfs_block_rsv *trans_rsv;
1086         int loops = 0;
1087
1088         fs_info = container_of(work, struct btrfs_fs_info,
1089                                preempt_reclaim_work);
1090         space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
1091         delayed_block_rsv = &fs_info->delayed_block_rsv;
1092         delayed_refs_rsv = &fs_info->delayed_refs_rsv;
1093         global_rsv = &fs_info->global_block_rsv;
1094         trans_rsv = &fs_info->trans_block_rsv;
1095
1096         spin_lock(&space_info->lock);
1097         while (need_preemptive_reclaim(fs_info, space_info)) {
1098                 enum btrfs_flush_state flush;
1099                 u64 delalloc_size = 0;
1100                 u64 to_reclaim, block_rsv_size;
1101                 u64 global_rsv_size = global_rsv->reserved;
1102
1103                 loops++;
1104
1105                 /*
1106                  * We don't have a precise counter for the metadata being
1107                  * reserved for delalloc, so we'll approximate it by subtracting
1108                  * out the block rsv's space from the bytes_may_use.  If that
1109                  * amount is higher than the individual reserves, then we can
1110                  * assume it's tied up in delalloc reservations.
1111                  */
1112                 block_rsv_size = global_rsv_size +
1113                         delayed_block_rsv->reserved +
1114                         delayed_refs_rsv->reserved +
1115                         trans_rsv->reserved;
1116                 if (block_rsv_size < space_info->bytes_may_use)
1117                         delalloc_size = space_info->bytes_may_use - block_rsv_size;
1118                 spin_unlock(&space_info->lock);
1119
1120                 /*
1121                  * We don't want to include the global_rsv in our calculation,
1122                  * because that's space we can't touch.  Subtract it from the
1123                  * block_rsv_size for the next checks.
1124                  */
1125                 block_rsv_size -= global_rsv_size;
1126
1127                 /*
1128                  * We really want to avoid flushing delalloc too much, as it
1129                  * could result in poor allocation patterns, so only flush it if
1130                  * it's larger than the rest of the pools combined.
1131                  */
1132                 if (delalloc_size > block_rsv_size) {
1133                         to_reclaim = delalloc_size;
1134                         flush = FLUSH_DELALLOC;
1135                 } else if (space_info->bytes_pinned >
1136                            (delayed_block_rsv->reserved +
1137                             delayed_refs_rsv->reserved)) {
1138                         to_reclaim = space_info->bytes_pinned;
1139                         flush = FORCE_COMMIT_TRANS;
1140                 } else if (delayed_block_rsv->reserved >
1141                            delayed_refs_rsv->reserved) {
1142                         to_reclaim = delayed_block_rsv->reserved;
1143                         flush = FLUSH_DELAYED_ITEMS_NR;
1144                 } else {
1145                         to_reclaim = delayed_refs_rsv->reserved;
1146                         flush = FLUSH_DELAYED_REFS_NR;
1147                 }
1148
1149                 /*
1150                  * We don't want to reclaim everything, just a portion, so scale
1151                  * down the to_reclaim by 1/4.  If it takes us down to 0,
1152                  * reclaim 1 items worth.
1153                  */
1154                 to_reclaim >>= 2;
1155                 if (!to_reclaim)
1156                         to_reclaim = btrfs_calc_insert_metadata_size(fs_info, 1);
1157                 flush_space(fs_info, space_info, to_reclaim, flush, true);
1158                 cond_resched();
1159                 spin_lock(&space_info->lock);
1160         }
1161
1162         /* We only went through once, back off our clamping. */
1163         if (loops == 1 && !space_info->reclaim_size)
1164                 space_info->clamp = max(1, space_info->clamp - 1);
1165         trace_btrfs_done_preemptive_reclaim(fs_info, space_info);
1166         spin_unlock(&space_info->lock);
1167 }
1168
1169 /*
1170  * FLUSH_DELALLOC_WAIT:
1171  *   Space is freed from flushing delalloc in one of two ways.
1172  *
1173  *   1) compression is on and we allocate less space than we reserved
1174  *   2) we are overwriting existing space
1175  *
1176  *   For #1 that extra space is reclaimed as soon as the delalloc pages are
1177  *   COWed, by way of btrfs_add_reserved_bytes() which adds the actual extent
1178  *   length to ->bytes_reserved, and subtracts the reserved space from
1179  *   ->bytes_may_use.
1180  *
1181  *   For #2 this is trickier.  Once the ordered extent runs we will drop the
1182  *   extent in the range we are overwriting, which creates a delayed ref for
1183  *   that freed extent.  This however is not reclaimed until the transaction
1184  *   commits, thus the next stages.
1185  *
1186  * RUN_DELAYED_IPUTS
1187  *   If we are freeing inodes, we want to make sure all delayed iputs have
1188  *   completed, because they could have been on an inode with i_nlink == 0, and
1189  *   thus have been truncated and freed up space.  But again this space is not
1190  *   immediately re-usable, it comes in the form of a delayed ref, which must be
1191  *   run and then the transaction must be committed.
1192  *
1193  * FLUSH_DELAYED_REFS
1194  *   The above two cases generate delayed refs that will affect
1195  *   ->total_bytes_pinned.  However this counter can be inconsistent with
1196  *   reality if there are outstanding delayed refs.  This is because we adjust
1197  *   the counter based solely on the current set of delayed refs and disregard
1198  *   any on-disk state which might include more refs.  So for example, if we
1199  *   have an extent with 2 references, but we only drop 1, we'll see that there
1200  *   is a negative delayed ref count for the extent and assume that the space
1201  *   will be freed, and thus increase ->total_bytes_pinned.
1202  *
1203  *   Running the delayed refs gives us the actual real view of what will be
1204  *   freed at the transaction commit time.  This stage will not actually free
1205  *   space for us, it just makes sure that may_commit_transaction() has all of
1206  *   the information it needs to make the right decision.
1207  *
1208  * COMMIT_TRANS
1209  *   This is where we reclaim all of the pinned space generated by the previous
1210  *   two stages.  We will not commit the transaction if we don't think we're
1211  *   likely to satisfy our request, which means if our current free space +
1212  *   total_bytes_pinned < reservation we will not commit.  This is why the
1213  *   previous states are actually important, to make sure we know for sure
1214  *   whether committing the transaction will allow us to make progress.
1215  *
1216  * ALLOC_CHUNK_FORCE
1217  *   For data we start with alloc chunk force, however we could have been full
1218  *   before, and then the transaction commit could have freed new block groups,
1219  *   so if we now have space to allocate do the force chunk allocation.
1220  */
1221 static const enum btrfs_flush_state data_flush_states[] = {
1222         FLUSH_DELALLOC_WAIT,
1223         RUN_DELAYED_IPUTS,
1224         FLUSH_DELAYED_REFS,
1225         COMMIT_TRANS,
1226         ALLOC_CHUNK_FORCE,
1227 };
1228
1229 static void btrfs_async_reclaim_data_space(struct work_struct *work)
1230 {
1231         struct btrfs_fs_info *fs_info;
1232         struct btrfs_space_info *space_info;
1233         u64 last_tickets_id;
1234         enum btrfs_flush_state flush_state = 0;
1235
1236         fs_info = container_of(work, struct btrfs_fs_info, async_data_reclaim_work);
1237         space_info = fs_info->data_sinfo;
1238
1239         spin_lock(&space_info->lock);
1240         if (list_empty(&space_info->tickets)) {
1241                 space_info->flush = 0;
1242                 spin_unlock(&space_info->lock);
1243                 return;
1244         }
1245         last_tickets_id = space_info->tickets_id;
1246         spin_unlock(&space_info->lock);
1247
1248         while (!space_info->full) {
1249                 flush_space(fs_info, space_info, U64_MAX, ALLOC_CHUNK_FORCE, false);
1250                 spin_lock(&space_info->lock);
1251                 if (list_empty(&space_info->tickets)) {
1252                         space_info->flush = 0;
1253                         spin_unlock(&space_info->lock);
1254                         return;
1255                 }
1256                 last_tickets_id = space_info->tickets_id;
1257                 spin_unlock(&space_info->lock);
1258         }
1259
1260         while (flush_state < ARRAY_SIZE(data_flush_states)) {
1261                 flush_space(fs_info, space_info, U64_MAX,
1262                             data_flush_states[flush_state], false);
1263                 spin_lock(&space_info->lock);
1264                 if (list_empty(&space_info->tickets)) {
1265                         space_info->flush = 0;
1266                         spin_unlock(&space_info->lock);
1267                         return;
1268                 }
1269
1270                 if (last_tickets_id == space_info->tickets_id) {
1271                         flush_state++;
1272                 } else {
1273                         last_tickets_id = space_info->tickets_id;
1274                         flush_state = 0;
1275                 }
1276
1277                 if (flush_state >= ARRAY_SIZE(data_flush_states)) {
1278                         if (space_info->full) {
1279                                 if (maybe_fail_all_tickets(fs_info, space_info))
1280                                         flush_state = 0;
1281                                 else
1282                                         space_info->flush = 0;
1283                         } else {
1284                                 flush_state = 0;
1285                         }
1286                 }
1287                 spin_unlock(&space_info->lock);
1288         }
1289 }
1290
1291 void btrfs_init_async_reclaim_work(struct btrfs_fs_info *fs_info)
1292 {
1293         INIT_WORK(&fs_info->async_reclaim_work, btrfs_async_reclaim_metadata_space);
1294         INIT_WORK(&fs_info->async_data_reclaim_work, btrfs_async_reclaim_data_space);
1295         INIT_WORK(&fs_info->preempt_reclaim_work,
1296                   btrfs_preempt_reclaim_metadata_space);
1297 }
1298
1299 static const enum btrfs_flush_state priority_flush_states[] = {
1300         FLUSH_DELAYED_ITEMS_NR,
1301         FLUSH_DELAYED_ITEMS,
1302         ALLOC_CHUNK,
1303 };
1304
1305 static const enum btrfs_flush_state evict_flush_states[] = {
1306         FLUSH_DELAYED_ITEMS_NR,
1307         FLUSH_DELAYED_ITEMS,
1308         FLUSH_DELAYED_REFS_NR,
1309         FLUSH_DELAYED_REFS,
1310         FLUSH_DELALLOC,
1311         FLUSH_DELALLOC_WAIT,
1312         ALLOC_CHUNK,
1313         COMMIT_TRANS,
1314 };
1315
1316 static void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info,
1317                                 struct btrfs_space_info *space_info,
1318                                 struct reserve_ticket *ticket,
1319                                 const enum btrfs_flush_state *states,
1320                                 int states_nr)
1321 {
1322         u64 to_reclaim;
1323         int flush_state;
1324
1325         spin_lock(&space_info->lock);
1326         to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info);
1327         if (!to_reclaim) {
1328                 spin_unlock(&space_info->lock);
1329                 return;
1330         }
1331         spin_unlock(&space_info->lock);
1332
1333         flush_state = 0;
1334         do {
1335                 flush_space(fs_info, space_info, to_reclaim, states[flush_state],
1336                             false);
1337                 flush_state++;
1338                 spin_lock(&space_info->lock);
1339                 if (ticket->bytes == 0) {
1340                         spin_unlock(&space_info->lock);
1341                         return;
1342                 }
1343                 spin_unlock(&space_info->lock);
1344         } while (flush_state < states_nr);
1345 }
1346
1347 static void priority_reclaim_data_space(struct btrfs_fs_info *fs_info,
1348                                         struct btrfs_space_info *space_info,
1349                                         struct reserve_ticket *ticket)
1350 {
1351         while (!space_info->full) {
1352                 flush_space(fs_info, space_info, U64_MAX, ALLOC_CHUNK_FORCE, false);
1353                 spin_lock(&space_info->lock);
1354                 if (ticket->bytes == 0) {
1355                         spin_unlock(&space_info->lock);
1356                         return;
1357                 }
1358                 spin_unlock(&space_info->lock);
1359         }
1360 }
1361
1362 static void wait_reserve_ticket(struct btrfs_fs_info *fs_info,
1363                                 struct btrfs_space_info *space_info,
1364                                 struct reserve_ticket *ticket)
1365
1366 {
1367         DEFINE_WAIT(wait);
1368         int ret = 0;
1369
1370         spin_lock(&space_info->lock);
1371         while (ticket->bytes > 0 && ticket->error == 0) {
1372                 ret = prepare_to_wait_event(&ticket->wait, &wait, TASK_KILLABLE);
1373                 if (ret) {
1374                         /*
1375                          * Delete us from the list. After we unlock the space
1376                          * info, we don't want the async reclaim job to reserve
1377                          * space for this ticket. If that would happen, then the
1378                          * ticket's task would not known that space was reserved
1379                          * despite getting an error, resulting in a space leak
1380                          * (bytes_may_use counter of our space_info).
1381                          */
1382                         remove_ticket(space_info, ticket);
1383                         ticket->error = -EINTR;
1384                         break;
1385                 }
1386                 spin_unlock(&space_info->lock);
1387
1388                 schedule();
1389
1390                 finish_wait(&ticket->wait, &wait);
1391                 spin_lock(&space_info->lock);
1392         }
1393         spin_unlock(&space_info->lock);
1394 }
1395
1396 /**
1397  * Do the appropriate flushing and waiting for a ticket
1398  *
1399  * @fs_info:    the filesystem
1400  * @space_info: space info for the reservation
1401  * @ticket:     ticket for the reservation
1402  * @start_ns:   timestamp when the reservation started
1403  * @orig_bytes: amount of bytes originally reserved
1404  * @flush:      how much we can flush
1405  *
1406  * This does the work of figuring out how to flush for the ticket, waiting for
1407  * the reservation, and returning the appropriate error if there is one.
1408  */
1409 static int handle_reserve_ticket(struct btrfs_fs_info *fs_info,
1410                                  struct btrfs_space_info *space_info,
1411                                  struct reserve_ticket *ticket,
1412                                  u64 start_ns, u64 orig_bytes,
1413                                  enum btrfs_reserve_flush_enum flush)
1414 {
1415         int ret;
1416
1417         switch (flush) {
1418         case BTRFS_RESERVE_FLUSH_DATA:
1419         case BTRFS_RESERVE_FLUSH_ALL:
1420         case BTRFS_RESERVE_FLUSH_ALL_STEAL:
1421                 wait_reserve_ticket(fs_info, space_info, ticket);
1422                 break;
1423         case BTRFS_RESERVE_FLUSH_LIMIT:
1424                 priority_reclaim_metadata_space(fs_info, space_info, ticket,
1425                                                 priority_flush_states,
1426                                                 ARRAY_SIZE(priority_flush_states));
1427                 break;
1428         case BTRFS_RESERVE_FLUSH_EVICT:
1429                 priority_reclaim_metadata_space(fs_info, space_info, ticket,
1430                                                 evict_flush_states,
1431                                                 ARRAY_SIZE(evict_flush_states));
1432                 break;
1433         case BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE:
1434                 priority_reclaim_data_space(fs_info, space_info, ticket);
1435                 break;
1436         default:
1437                 ASSERT(0);
1438                 break;
1439         }
1440
1441         spin_lock(&space_info->lock);
1442         ret = ticket->error;
1443         if (ticket->bytes || ticket->error) {
1444                 /*
1445                  * We were a priority ticket, so we need to delete ourselves
1446                  * from the list.  Because we could have other priority tickets
1447                  * behind us that require less space, run
1448                  * btrfs_try_granting_tickets() to see if their reservations can
1449                  * now be made.
1450                  */
1451                 if (!list_empty(&ticket->list)) {
1452                         remove_ticket(space_info, ticket);
1453                         btrfs_try_granting_tickets(fs_info, space_info);
1454                 }
1455
1456                 if (!ret)
1457                         ret = -ENOSPC;
1458         }
1459         spin_unlock(&space_info->lock);
1460         ASSERT(list_empty(&ticket->list));
1461         /*
1462          * Check that we can't have an error set if the reservation succeeded,
1463          * as that would confuse tasks and lead them to error out without
1464          * releasing reserved space (if an error happens the expectation is that
1465          * space wasn't reserved at all).
1466          */
1467         ASSERT(!(ticket->bytes == 0 && ticket->error));
1468         trace_btrfs_reserve_ticket(fs_info, space_info->flags, orig_bytes,
1469                                    start_ns, flush, ticket->error);
1470         return ret;
1471 }
1472
1473 /*
1474  * This returns true if this flush state will go through the ordinary flushing
1475  * code.
1476  */
1477 static inline bool is_normal_flushing(enum btrfs_reserve_flush_enum flush)
1478 {
1479         return  (flush == BTRFS_RESERVE_FLUSH_ALL) ||
1480                 (flush == BTRFS_RESERVE_FLUSH_ALL_STEAL);
1481 }
1482
1483 static inline void maybe_clamp_preempt(struct btrfs_fs_info *fs_info,
1484                                        struct btrfs_space_info *space_info)
1485 {
1486         u64 ordered = percpu_counter_sum_positive(&fs_info->ordered_bytes);
1487         u64 delalloc = percpu_counter_sum_positive(&fs_info->delalloc_bytes);
1488
1489         /*
1490          * If we're heavy on ordered operations then clamping won't help us.  We
1491          * need to clamp specifically to keep up with dirty'ing buffered
1492          * writers, because there's not a 1:1 correlation of writing delalloc
1493          * and freeing space, like there is with flushing delayed refs or
1494          * delayed nodes.  If we're already more ordered than delalloc then
1495          * we're keeping up, otherwise we aren't and should probably clamp.
1496          */
1497         if (ordered < delalloc)
1498                 space_info->clamp = min(space_info->clamp + 1, 8);
1499 }
1500
1501 /**
1502  * Try to reserve bytes from the block_rsv's space
1503  *
1504  * @fs_info:    the filesystem
1505  * @space_info: space info we want to allocate from
1506  * @orig_bytes: number of bytes we want
1507  * @flush:      whether or not we can flush to make our reservation
1508  *
1509  * This will reserve orig_bytes number of bytes from the space info associated
1510  * with the block_rsv.  If there is not enough space it will make an attempt to
1511  * flush out space to make room.  It will do this by flushing delalloc if
1512  * possible or committing the transaction.  If flush is 0 then no attempts to
1513  * regain reservations will be made and this will fail if there is not enough
1514  * space already.
1515  */
1516 static int __reserve_bytes(struct btrfs_fs_info *fs_info,
1517                            struct btrfs_space_info *space_info, u64 orig_bytes,
1518                            enum btrfs_reserve_flush_enum flush)
1519 {
1520         struct work_struct *async_work;
1521         struct reserve_ticket ticket;
1522         u64 start_ns = 0;
1523         u64 used;
1524         int ret = 0;
1525         bool pending_tickets;
1526
1527         ASSERT(orig_bytes);
1528         ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_ALL);
1529
1530         if (flush == BTRFS_RESERVE_FLUSH_DATA)
1531                 async_work = &fs_info->async_data_reclaim_work;
1532         else
1533                 async_work = &fs_info->async_reclaim_work;
1534
1535         spin_lock(&space_info->lock);
1536         ret = -ENOSPC;
1537         used = btrfs_space_info_used(space_info, true);
1538
1539         /*
1540          * We don't want NO_FLUSH allocations to jump everybody, they can
1541          * generally handle ENOSPC in a different way, so treat them the same as
1542          * normal flushers when it comes to skipping pending tickets.
1543          */
1544         if (is_normal_flushing(flush) || (flush == BTRFS_RESERVE_NO_FLUSH))
1545                 pending_tickets = !list_empty(&space_info->tickets) ||
1546                         !list_empty(&space_info->priority_tickets);
1547         else
1548                 pending_tickets = !list_empty(&space_info->priority_tickets);
1549
1550         /*
1551          * Carry on if we have enough space (short-circuit) OR call
1552          * can_overcommit() to ensure we can overcommit to continue.
1553          */
1554         if (!pending_tickets &&
1555             ((used + orig_bytes <= space_info->total_bytes) ||
1556              btrfs_can_overcommit(fs_info, space_info, orig_bytes, flush))) {
1557                 btrfs_space_info_update_bytes_may_use(fs_info, space_info,
1558                                                       orig_bytes);
1559                 ret = 0;
1560         }
1561
1562         /*
1563          * If we couldn't make a reservation then setup our reservation ticket
1564          * and kick the async worker if it's not already running.
1565          *
1566          * If we are a priority flusher then we just need to add our ticket to
1567          * the list and we will do our own flushing further down.
1568          */
1569         if (ret && flush != BTRFS_RESERVE_NO_FLUSH) {
1570                 ticket.bytes = orig_bytes;
1571                 ticket.error = 0;
1572                 space_info->reclaim_size += ticket.bytes;
1573                 init_waitqueue_head(&ticket.wait);
1574                 ticket.steal = (flush == BTRFS_RESERVE_FLUSH_ALL_STEAL);
1575                 if (trace_btrfs_reserve_ticket_enabled())
1576                         start_ns = ktime_get_ns();
1577
1578                 if (flush == BTRFS_RESERVE_FLUSH_ALL ||
1579                     flush == BTRFS_RESERVE_FLUSH_ALL_STEAL ||
1580                     flush == BTRFS_RESERVE_FLUSH_DATA) {
1581                         list_add_tail(&ticket.list, &space_info->tickets);
1582                         if (!space_info->flush) {
1583                                 /*
1584                                  * We were forced to add a reserve ticket, so
1585                                  * our preemptive flushing is unable to keep
1586                                  * up.  Clamp down on the threshold for the
1587                                  * preemptive flushing in order to keep up with
1588                                  * the workload.
1589                                  */
1590                                 maybe_clamp_preempt(fs_info, space_info);
1591
1592                                 space_info->flush = 1;
1593                                 trace_btrfs_trigger_flush(fs_info,
1594                                                           space_info->flags,
1595                                                           orig_bytes, flush,
1596                                                           "enospc");
1597                                 queue_work(system_unbound_wq, async_work);
1598                         }
1599                 } else {
1600                         list_add_tail(&ticket.list,
1601                                       &space_info->priority_tickets);
1602                 }
1603         } else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
1604                 used += orig_bytes;
1605                 /*
1606                  * We will do the space reservation dance during log replay,
1607                  * which means we won't have fs_info->fs_root set, so don't do
1608                  * the async reclaim as we will panic.
1609                  */
1610                 if (!test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags) &&
1611                     !work_busy(&fs_info->preempt_reclaim_work) &&
1612                     need_preemptive_reclaim(fs_info, space_info)) {
1613                         trace_btrfs_trigger_flush(fs_info, space_info->flags,
1614                                                   orig_bytes, flush, "preempt");
1615                         queue_work(system_unbound_wq,
1616                                    &fs_info->preempt_reclaim_work);
1617                 }
1618         }
1619         spin_unlock(&space_info->lock);
1620         if (!ret || flush == BTRFS_RESERVE_NO_FLUSH)
1621                 return ret;
1622
1623         return handle_reserve_ticket(fs_info, space_info, &ticket, start_ns,
1624                                      orig_bytes, flush);
1625 }
1626
1627 /**
1628  * Trye to reserve metadata bytes from the block_rsv's space
1629  *
1630  * @root:       the root we're allocating for
1631  * @block_rsv:  block_rsv we're allocating for
1632  * @orig_bytes: number of bytes we want
1633  * @flush:      whether or not we can flush to make our reservation
1634  *
1635  * This will reserve orig_bytes number of bytes from the space info associated
1636  * with the block_rsv.  If there is not enough space it will make an attempt to
1637  * flush out space to make room.  It will do this by flushing delalloc if
1638  * possible or committing the transaction.  If flush is 0 then no attempts to
1639  * regain reservations will be made and this will fail if there is not enough
1640  * space already.
1641  */
1642 int btrfs_reserve_metadata_bytes(struct btrfs_root *root,
1643                                  struct btrfs_block_rsv *block_rsv,
1644                                  u64 orig_bytes,
1645                                  enum btrfs_reserve_flush_enum flush)
1646 {
1647         struct btrfs_fs_info *fs_info = root->fs_info;
1648         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
1649         int ret;
1650
1651         ret = __reserve_bytes(fs_info, block_rsv->space_info, orig_bytes, flush);
1652         if (ret == -ENOSPC &&
1653             unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) {
1654                 if (block_rsv != global_rsv &&
1655                     !btrfs_block_rsv_use_bytes(global_rsv, orig_bytes))
1656                         ret = 0;
1657         }
1658         if (ret == -ENOSPC) {
1659                 trace_btrfs_space_reservation(fs_info, "space_info:enospc",
1660                                               block_rsv->space_info->flags,
1661                                               orig_bytes, 1);
1662
1663                 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG))
1664                         btrfs_dump_space_info(fs_info, block_rsv->space_info,
1665                                               orig_bytes, 0);
1666         }
1667         return ret;
1668 }
1669
1670 /**
1671  * Try to reserve data bytes for an allocation
1672  *
1673  * @fs_info: the filesystem
1674  * @bytes:   number of bytes we need
1675  * @flush:   how we are allowed to flush
1676  *
1677  * This will reserve bytes from the data space info.  If there is not enough
1678  * space then we will attempt to flush space as specified by flush.
1679  */
1680 int btrfs_reserve_data_bytes(struct btrfs_fs_info *fs_info, u64 bytes,
1681                              enum btrfs_reserve_flush_enum flush)
1682 {
1683         struct btrfs_space_info *data_sinfo = fs_info->data_sinfo;
1684         int ret;
1685
1686         ASSERT(flush == BTRFS_RESERVE_FLUSH_DATA ||
1687                flush == BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE);
1688         ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_DATA);
1689
1690         ret = __reserve_bytes(fs_info, data_sinfo, bytes, flush);
1691         if (ret == -ENOSPC) {
1692                 trace_btrfs_space_reservation(fs_info, "space_info:enospc",
1693                                               data_sinfo->flags, bytes, 1);
1694                 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG))
1695                         btrfs_dump_space_info(fs_info, data_sinfo, bytes, 0);
1696         }
1697         return ret;
1698 }
This page took 0.131562 seconds and 4 git commands to generate.