]> Git Repo - linux.git/blob - fs/ext4/mballoc.c
leds: class: If no default trigger is given, make hw_control trigger the default...
[linux.git] / fs / ext4 / mballoc.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2003-2006, Cluster File Systems, Inc, [email protected]
4  * Written by Alex Tomas <[email protected]>
5  */
6
7
8 /*
9  * mballoc.c contains the multiblocks allocation routines
10  */
11
12 #include "ext4_jbd2.h"
13 #include "mballoc.h"
14 #include <linux/log2.h>
15 #include <linux/module.h>
16 #include <linux/slab.h>
17 #include <linux/nospec.h>
18 #include <linux/backing-dev.h>
19 #include <linux/freezer.h>
20 #include <trace/events/ext4.h>
21 #include <kunit/static_stub.h>
22
23 /*
24  * MUSTDO:
25  *   - test ext4_ext_search_left() and ext4_ext_search_right()
26  *   - search for metadata in few groups
27  *
28  * TODO v4:
29  *   - normalization should take into account whether file is still open
30  *   - discard preallocations if no free space left (policy?)
31  *   - don't normalize tails
32  *   - quota
33  *   - reservation for superuser
34  *
35  * TODO v3:
36  *   - bitmap read-ahead (proposed by Oleg Drokin aka green)
37  *   - track min/max extents in each group for better group selection
38  *   - mb_mark_used() may allocate chunk right after splitting buddy
39  *   - tree of groups sorted by number of free blocks
40  *   - error handling
41  */
42
43 /*
44  * The allocation request involve request for multiple number of blocks
45  * near to the goal(block) value specified.
46  *
47  * During initialization phase of the allocator we decide to use the
48  * group preallocation or inode preallocation depending on the size of
49  * the file. The size of the file could be the resulting file size we
50  * would have after allocation, or the current file size, which ever
51  * is larger. If the size is less than sbi->s_mb_stream_request we
52  * select to use the group preallocation. The default value of
53  * s_mb_stream_request is 16 blocks. This can also be tuned via
54  * /sys/fs/ext4/<partition>/mb_stream_req. The value is represented in
55  * terms of number of blocks.
56  *
57  * The main motivation for having small file use group preallocation is to
58  * ensure that we have small files closer together on the disk.
59  *
60  * First stage the allocator looks at the inode prealloc list,
61  * ext4_inode_info->i_prealloc_list, which contains list of prealloc
62  * spaces for this particular inode. The inode prealloc space is
63  * represented as:
64  *
65  * pa_lstart -> the logical start block for this prealloc space
66  * pa_pstart -> the physical start block for this prealloc space
67  * pa_len    -> length for this prealloc space (in clusters)
68  * pa_free   ->  free space available in this prealloc space (in clusters)
69  *
70  * The inode preallocation space is used looking at the _logical_ start
71  * block. If only the logical file block falls within the range of prealloc
72  * space we will consume the particular prealloc space. This makes sure that
73  * we have contiguous physical blocks representing the file blocks
74  *
75  * The important thing to be noted in case of inode prealloc space is that
76  * we don't modify the values associated to inode prealloc space except
77  * pa_free.
78  *
79  * If we are not able to find blocks in the inode prealloc space and if we
80  * have the group allocation flag set then we look at the locality group
81  * prealloc space. These are per CPU prealloc list represented as
82  *
83  * ext4_sb_info.s_locality_groups[smp_processor_id()]
84  *
85  * The reason for having a per cpu locality group is to reduce the contention
86  * between CPUs. It is possible to get scheduled at this point.
87  *
88  * The locality group prealloc space is used looking at whether we have
89  * enough free space (pa_free) within the prealloc space.
90  *
91  * If we can't allocate blocks via inode prealloc or/and locality group
92  * prealloc then we look at the buddy cache. The buddy cache is represented
93  * by ext4_sb_info.s_buddy_cache (struct inode) whose file offset gets
94  * mapped to the buddy and bitmap information regarding different
95  * groups. The buddy information is attached to buddy cache inode so that
96  * we can access them through the page cache. The information regarding
97  * each group is loaded via ext4_mb_load_buddy.  The information involve
98  * block bitmap and buddy information. The information are stored in the
99  * inode as:
100  *
101  *  {                        page                        }
102  *  [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]...
103  *
104  *
105  * one block each for bitmap and buddy information.  So for each group we
106  * take up 2 blocks. A page can contain blocks_per_page (PAGE_SIZE /
107  * blocksize) blocks.  So it can have information regarding groups_per_page
108  * which is blocks_per_page/2
109  *
110  * The buddy cache inode is not stored on disk. The inode is thrown
111  * away when the filesystem is unmounted.
112  *
113  * We look for count number of blocks in the buddy cache. If we were able
114  * to locate that many free blocks we return with additional information
115  * regarding rest of the contiguous physical block available
116  *
117  * Before allocating blocks via buddy cache we normalize the request
118  * blocks. This ensure we ask for more blocks that we needed. The extra
119  * blocks that we get after allocation is added to the respective prealloc
120  * list. In case of inode preallocation we follow a list of heuristics
121  * based on file size. This can be found in ext4_mb_normalize_request. If
122  * we are doing a group prealloc we try to normalize the request to
123  * sbi->s_mb_group_prealloc.  The default value of s_mb_group_prealloc is
124  * dependent on the cluster size; for non-bigalloc file systems, it is
125  * 512 blocks. This can be tuned via
126  * /sys/fs/ext4/<partition>/mb_group_prealloc. The value is represented in
127  * terms of number of blocks. If we have mounted the file system with -O
128  * stripe=<value> option the group prealloc request is normalized to the
129  * smallest multiple of the stripe value (sbi->s_stripe) which is
130  * greater than the default mb_group_prealloc.
131  *
132  * If "mb_optimize_scan" mount option is set, we maintain in memory group info
133  * structures in two data structures:
134  *
135  * 1) Array of largest free order lists (sbi->s_mb_largest_free_orders)
136  *
137  *    Locking: sbi->s_mb_largest_free_orders_locks(array of rw locks)
138  *
139  *    This is an array of lists where the index in the array represents the
140  *    largest free order in the buddy bitmap of the participating group infos of
141  *    that list. So, there are exactly MB_NUM_ORDERS(sb) (which means total
142  *    number of buddy bitmap orders possible) number of lists. Group-infos are
143  *    placed in appropriate lists.
144  *
145  * 2) Average fragment size lists (sbi->s_mb_avg_fragment_size)
146  *
147  *    Locking: sbi->s_mb_avg_fragment_size_locks(array of rw locks)
148  *
149  *    This is an array of lists where in the i-th list there are groups with
150  *    average fragment size >= 2^i and < 2^(i+1). The average fragment size
151  *    is computed as ext4_group_info->bb_free / ext4_group_info->bb_fragments.
152  *    Note that we don't bother with a special list for completely empty groups
153  *    so we only have MB_NUM_ORDERS(sb) lists.
154  *
155  * When "mb_optimize_scan" mount option is set, mballoc consults the above data
156  * structures to decide the order in which groups are to be traversed for
157  * fulfilling an allocation request.
158  *
159  * At CR_POWER2_ALIGNED , we look for groups which have the largest_free_order
160  * >= the order of the request. We directly look at the largest free order list
161  * in the data structure (1) above where largest_free_order = order of the
162  * request. If that list is empty, we look at remaining list in the increasing
163  * order of largest_free_order. This allows us to perform CR_POWER2_ALIGNED
164  * lookup in O(1) time.
165  *
166  * At CR_GOAL_LEN_FAST, we only consider groups where
167  * average fragment size > request size. So, we lookup a group which has average
168  * fragment size just above or equal to request size using our average fragment
169  * size group lists (data structure 2) in O(1) time.
170  *
171  * At CR_BEST_AVAIL_LEN, we aim to optimize allocations which can't be satisfied
172  * in CR_GOAL_LEN_FAST. The fact that we couldn't find a group in
173  * CR_GOAL_LEN_FAST suggests that there is no BG that has avg
174  * fragment size > goal length. So before falling to the slower
175  * CR_GOAL_LEN_SLOW, in CR_BEST_AVAIL_LEN we proactively trim goal length and
176  * then use the same fragment lists as CR_GOAL_LEN_FAST to find a BG with a big
177  * enough average fragment size. This increases the chances of finding a
178  * suitable block group in O(1) time and results in faster allocation at the
179  * cost of reduced size of allocation.
180  *
181  * If "mb_optimize_scan" mount option is not set, mballoc traverses groups in
182  * linear order which requires O(N) search time for each CR_POWER2_ALIGNED and
183  * CR_GOAL_LEN_FAST phase.
184  *
185  * The regular allocator (using the buddy cache) supports a few tunables.
186  *
187  * /sys/fs/ext4/<partition>/mb_min_to_scan
188  * /sys/fs/ext4/<partition>/mb_max_to_scan
189  * /sys/fs/ext4/<partition>/mb_order2_req
190  * /sys/fs/ext4/<partition>/mb_linear_limit
191  *
192  * The regular allocator uses buddy scan only if the request len is power of
193  * 2 blocks and the order of allocation is >= sbi->s_mb_order2_reqs. The
194  * value of s_mb_order2_reqs can be tuned via
195  * /sys/fs/ext4/<partition>/mb_order2_req.  If the request len is equal to
196  * stripe size (sbi->s_stripe), we try to search for contiguous block in
197  * stripe size. This should result in better allocation on RAID setups. If
198  * not, we search in the specific group using bitmap for best extents. The
199  * tunable min_to_scan and max_to_scan control the behaviour here.
200  * min_to_scan indicate how long the mballoc __must__ look for a best
201  * extent and max_to_scan indicates how long the mballoc __can__ look for a
202  * best extent in the found extents. Searching for the blocks starts with
203  * the group specified as the goal value in allocation context via
204  * ac_g_ex. Each group is first checked based on the criteria whether it
205  * can be used for allocation. ext4_mb_good_group explains how the groups are
206  * checked.
207  *
208  * When "mb_optimize_scan" is turned on, as mentioned above, the groups may not
209  * get traversed linearly. That may result in subsequent allocations being not
210  * close to each other. And so, the underlying device may get filled up in a
211  * non-linear fashion. While that may not matter on non-rotational devices, for
212  * rotational devices that may result in higher seek times. "mb_linear_limit"
213  * tells mballoc how many groups mballoc should search linearly before
214  * performing consulting above data structures for more efficient lookups. For
215  * non rotational devices, this value defaults to 0 and for rotational devices
216  * this is set to MB_DEFAULT_LINEAR_LIMIT.
217  *
218  * Both the prealloc space are getting populated as above. So for the first
219  * request we will hit the buddy cache which will result in this prealloc
220  * space getting filled. The prealloc space is then later used for the
221  * subsequent request.
222  */
223
224 /*
225  * mballoc operates on the following data:
226  *  - on-disk bitmap
227  *  - in-core buddy (actually includes buddy and bitmap)
228  *  - preallocation descriptors (PAs)
229  *
230  * there are two types of preallocations:
231  *  - inode
232  *    assiged to specific inode and can be used for this inode only.
233  *    it describes part of inode's space preallocated to specific
234  *    physical blocks. any block from that preallocated can be used
235  *    independent. the descriptor just tracks number of blocks left
236  *    unused. so, before taking some block from descriptor, one must
237  *    make sure corresponded logical block isn't allocated yet. this
238  *    also means that freeing any block within descriptor's range
239  *    must discard all preallocated blocks.
240  *  - locality group
241  *    assigned to specific locality group which does not translate to
242  *    permanent set of inodes: inode can join and leave group. space
243  *    from this type of preallocation can be used for any inode. thus
244  *    it's consumed from the beginning to the end.
245  *
246  * relation between them can be expressed as:
247  *    in-core buddy = on-disk bitmap + preallocation descriptors
248  *
249  * this mean blocks mballoc considers used are:
250  *  - allocated blocks (persistent)
251  *  - preallocated blocks (non-persistent)
252  *
253  * consistency in mballoc world means that at any time a block is either
254  * free or used in ALL structures. notice: "any time" should not be read
255  * literally -- time is discrete and delimited by locks.
256  *
257  *  to keep it simple, we don't use block numbers, instead we count number of
258  *  blocks: how many blocks marked used/free in on-disk bitmap, buddy and PA.
259  *
260  * all operations can be expressed as:
261  *  - init buddy:                       buddy = on-disk + PAs
262  *  - new PA:                           buddy += N; PA = N
263  *  - use inode PA:                     on-disk += N; PA -= N
264  *  - discard inode PA                  buddy -= on-disk - PA; PA = 0
265  *  - use locality group PA             on-disk += N; PA -= N
266  *  - discard locality group PA         buddy -= PA; PA = 0
267  *  note: 'buddy -= on-disk - PA' is used to show that on-disk bitmap
268  *        is used in real operation because we can't know actual used
269  *        bits from PA, only from on-disk bitmap
270  *
271  * if we follow this strict logic, then all operations above should be atomic.
272  * given some of them can block, we'd have to use something like semaphores
273  * killing performance on high-end SMP hardware. let's try to relax it using
274  * the following knowledge:
275  *  1) if buddy is referenced, it's already initialized
276  *  2) while block is used in buddy and the buddy is referenced,
277  *     nobody can re-allocate that block
278  *  3) we work on bitmaps and '+' actually means 'set bits'. if on-disk has
279  *     bit set and PA claims same block, it's OK. IOW, one can set bit in
280  *     on-disk bitmap if buddy has same bit set or/and PA covers corresponded
281  *     block
282  *
283  * so, now we're building a concurrency table:
284  *  - init buddy vs.
285  *    - new PA
286  *      blocks for PA are allocated in the buddy, buddy must be referenced
287  *      until PA is linked to allocation group to avoid concurrent buddy init
288  *    - use inode PA
289  *      we need to make sure that either on-disk bitmap or PA has uptodate data
290  *      given (3) we care that PA-=N operation doesn't interfere with init
291  *    - discard inode PA
292  *      the simplest way would be to have buddy initialized by the discard
293  *    - use locality group PA
294  *      again PA-=N must be serialized with init
295  *    - discard locality group PA
296  *      the simplest way would be to have buddy initialized by the discard
297  *  - new PA vs.
298  *    - use inode PA
299  *      i_data_sem serializes them
300  *    - discard inode PA
301  *      discard process must wait until PA isn't used by another process
302  *    - use locality group PA
303  *      some mutex should serialize them
304  *    - discard locality group PA
305  *      discard process must wait until PA isn't used by another process
306  *  - use inode PA
307  *    - use inode PA
308  *      i_data_sem or another mutex should serializes them
309  *    - discard inode PA
310  *      discard process must wait until PA isn't used by another process
311  *    - use locality group PA
312  *      nothing wrong here -- they're different PAs covering different blocks
313  *    - discard locality group PA
314  *      discard process must wait until PA isn't used by another process
315  *
316  * now we're ready to make few consequences:
317  *  - PA is referenced and while it is no discard is possible
318  *  - PA is referenced until block isn't marked in on-disk bitmap
319  *  - PA changes only after on-disk bitmap
320  *  - discard must not compete with init. either init is done before
321  *    any discard or they're serialized somehow
322  *  - buddy init as sum of on-disk bitmap and PAs is done atomically
323  *
324  * a special case when we've used PA to emptiness. no need to modify buddy
325  * in this case, but we should care about concurrent init
326  *
327  */
328
329  /*
330  * Logic in few words:
331  *
332  *  - allocation:
333  *    load group
334  *    find blocks
335  *    mark bits in on-disk bitmap
336  *    release group
337  *
338  *  - use preallocation:
339  *    find proper PA (per-inode or group)
340  *    load group
341  *    mark bits in on-disk bitmap
342  *    release group
343  *    release PA
344  *
345  *  - free:
346  *    load group
347  *    mark bits in on-disk bitmap
348  *    release group
349  *
350  *  - discard preallocations in group:
351  *    mark PAs deleted
352  *    move them onto local list
353  *    load on-disk bitmap
354  *    load group
355  *    remove PA from object (inode or locality group)
356  *    mark free blocks in-core
357  *
358  *  - discard inode's preallocations:
359  */
360
361 /*
362  * Locking rules
363  *
364  * Locks:
365  *  - bitlock on a group        (group)
366  *  - object (inode/locality)   (object)
367  *  - per-pa lock               (pa)
368  *  - cr_power2_aligned lists lock      (cr_power2_aligned)
369  *  - cr_goal_len_fast lists lock       (cr_goal_len_fast)
370  *
371  * Paths:
372  *  - new pa
373  *    object
374  *    group
375  *
376  *  - find and use pa:
377  *    pa
378  *
379  *  - release consumed pa:
380  *    pa
381  *    group
382  *    object
383  *
384  *  - generate in-core bitmap:
385  *    group
386  *        pa
387  *
388  *  - discard all for given object (inode, locality group):
389  *    object
390  *        pa
391  *    group
392  *
393  *  - discard all for given group:
394  *    group
395  *        pa
396  *    group
397  *        object
398  *
399  *  - allocation path (ext4_mb_regular_allocator)
400  *    group
401  *    cr_power2_aligned/cr_goal_len_fast
402  */
403 static struct kmem_cache *ext4_pspace_cachep;
404 static struct kmem_cache *ext4_ac_cachep;
405 static struct kmem_cache *ext4_free_data_cachep;
406
407 /* We create slab caches for groupinfo data structures based on the
408  * superblock block size.  There will be one per mounted filesystem for
409  * each unique s_blocksize_bits */
410 #define NR_GRPINFO_CACHES 8
411 static struct kmem_cache *ext4_groupinfo_caches[NR_GRPINFO_CACHES];
412
413 static const char * const ext4_groupinfo_slab_names[NR_GRPINFO_CACHES] = {
414         "ext4_groupinfo_1k", "ext4_groupinfo_2k", "ext4_groupinfo_4k",
415         "ext4_groupinfo_8k", "ext4_groupinfo_16k", "ext4_groupinfo_32k",
416         "ext4_groupinfo_64k", "ext4_groupinfo_128k"
417 };
418
419 static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
420                                         ext4_group_t group);
421 static void ext4_mb_new_preallocation(struct ext4_allocation_context *ac);
422
423 static bool ext4_mb_good_group(struct ext4_allocation_context *ac,
424                                ext4_group_t group, enum criteria cr);
425
426 static int ext4_try_to_trim_range(struct super_block *sb,
427                 struct ext4_buddy *e4b, ext4_grpblk_t start,
428                 ext4_grpblk_t max, ext4_grpblk_t minblocks);
429
430 /*
431  * The algorithm using this percpu seq counter goes below:
432  * 1. We sample the percpu discard_pa_seq counter before trying for block
433  *    allocation in ext4_mb_new_blocks().
434  * 2. We increment this percpu discard_pa_seq counter when we either allocate
435  *    or free these blocks i.e. while marking those blocks as used/free in
436  *    mb_mark_used()/mb_free_blocks().
437  * 3. We also increment this percpu seq counter when we successfully identify
438  *    that the bb_prealloc_list is not empty and hence proceed for discarding
439  *    of those PAs inside ext4_mb_discard_group_preallocations().
440  *
441  * Now to make sure that the regular fast path of block allocation is not
442  * affected, as a small optimization we only sample the percpu seq counter
443  * on that cpu. Only when the block allocation fails and when freed blocks
444  * found were 0, that is when we sample percpu seq counter for all cpus using
445  * below function ext4_get_discard_pa_seq_sum(). This happens after making
446  * sure that all the PAs on grp->bb_prealloc_list got freed or if it's empty.
447  */
448 static DEFINE_PER_CPU(u64, discard_pa_seq);
449 static inline u64 ext4_get_discard_pa_seq_sum(void)
450 {
451         int __cpu;
452         u64 __seq = 0;
453
454         for_each_possible_cpu(__cpu)
455                 __seq += per_cpu(discard_pa_seq, __cpu);
456         return __seq;
457 }
458
459 static inline void *mb_correct_addr_and_bit(int *bit, void *addr)
460 {
461 #if BITS_PER_LONG == 64
462         *bit += ((unsigned long) addr & 7UL) << 3;
463         addr = (void *) ((unsigned long) addr & ~7UL);
464 #elif BITS_PER_LONG == 32
465         *bit += ((unsigned long) addr & 3UL) << 3;
466         addr = (void *) ((unsigned long) addr & ~3UL);
467 #else
468 #error "how many bits you are?!"
469 #endif
470         return addr;
471 }
472
473 static inline int mb_test_bit(int bit, void *addr)
474 {
475         /*
476          * ext4_test_bit on architecture like powerpc
477          * needs unsigned long aligned address
478          */
479         addr = mb_correct_addr_and_bit(&bit, addr);
480         return ext4_test_bit(bit, addr);
481 }
482
483 static inline void mb_set_bit(int bit, void *addr)
484 {
485         addr = mb_correct_addr_and_bit(&bit, addr);
486         ext4_set_bit(bit, addr);
487 }
488
489 static inline void mb_clear_bit(int bit, void *addr)
490 {
491         addr = mb_correct_addr_and_bit(&bit, addr);
492         ext4_clear_bit(bit, addr);
493 }
494
495 static inline int mb_test_and_clear_bit(int bit, void *addr)
496 {
497         addr = mb_correct_addr_and_bit(&bit, addr);
498         return ext4_test_and_clear_bit(bit, addr);
499 }
500
501 static inline int mb_find_next_zero_bit(void *addr, int max, int start)
502 {
503         int fix = 0, ret, tmpmax;
504         addr = mb_correct_addr_and_bit(&fix, addr);
505         tmpmax = max + fix;
506         start += fix;
507
508         ret = ext4_find_next_zero_bit(addr, tmpmax, start) - fix;
509         if (ret > max)
510                 return max;
511         return ret;
512 }
513
514 static inline int mb_find_next_bit(void *addr, int max, int start)
515 {
516         int fix = 0, ret, tmpmax;
517         addr = mb_correct_addr_and_bit(&fix, addr);
518         tmpmax = max + fix;
519         start += fix;
520
521         ret = ext4_find_next_bit(addr, tmpmax, start) - fix;
522         if (ret > max)
523                 return max;
524         return ret;
525 }
526
527 static void *mb_find_buddy(struct ext4_buddy *e4b, int order, int *max)
528 {
529         char *bb;
530
531         BUG_ON(e4b->bd_bitmap == e4b->bd_buddy);
532         BUG_ON(max == NULL);
533
534         if (order > e4b->bd_blkbits + 1) {
535                 *max = 0;
536                 return NULL;
537         }
538
539         /* at order 0 we see each particular block */
540         if (order == 0) {
541                 *max = 1 << (e4b->bd_blkbits + 3);
542                 return e4b->bd_bitmap;
543         }
544
545         bb = e4b->bd_buddy + EXT4_SB(e4b->bd_sb)->s_mb_offsets[order];
546         *max = EXT4_SB(e4b->bd_sb)->s_mb_maxs[order];
547
548         return bb;
549 }
550
551 #ifdef DOUBLE_CHECK
552 static void mb_free_blocks_double(struct inode *inode, struct ext4_buddy *e4b,
553                            int first, int count)
554 {
555         int i;
556         struct super_block *sb = e4b->bd_sb;
557
558         if (unlikely(e4b->bd_info->bb_bitmap == NULL))
559                 return;
560         assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group));
561         for (i = 0; i < count; i++) {
562                 if (!mb_test_bit(first + i, e4b->bd_info->bb_bitmap)) {
563                         ext4_fsblk_t blocknr;
564
565                         blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
566                         blocknr += EXT4_C2B(EXT4_SB(sb), first + i);
567                         ext4_grp_locked_error(sb, e4b->bd_group,
568                                               inode ? inode->i_ino : 0,
569                                               blocknr,
570                                               "freeing block already freed "
571                                               "(bit %u)",
572                                               first + i);
573                         ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group,
574                                         EXT4_GROUP_INFO_BBITMAP_CORRUPT);
575                 }
576                 mb_clear_bit(first + i, e4b->bd_info->bb_bitmap);
577         }
578 }
579
580 static void mb_mark_used_double(struct ext4_buddy *e4b, int first, int count)
581 {
582         int i;
583
584         if (unlikely(e4b->bd_info->bb_bitmap == NULL))
585                 return;
586         assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
587         for (i = 0; i < count; i++) {
588                 BUG_ON(mb_test_bit(first + i, e4b->bd_info->bb_bitmap));
589                 mb_set_bit(first + i, e4b->bd_info->bb_bitmap);
590         }
591 }
592
593 static void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)
594 {
595         if (unlikely(e4b->bd_info->bb_bitmap == NULL))
596                 return;
597         if (memcmp(e4b->bd_info->bb_bitmap, bitmap, e4b->bd_sb->s_blocksize)) {
598                 unsigned char *b1, *b2;
599                 int i;
600                 b1 = (unsigned char *) e4b->bd_info->bb_bitmap;
601                 b2 = (unsigned char *) bitmap;
602                 for (i = 0; i < e4b->bd_sb->s_blocksize; i++) {
603                         if (b1[i] != b2[i]) {
604                                 ext4_msg(e4b->bd_sb, KERN_ERR,
605                                          "corruption in group %u "
606                                          "at byte %u(%u): %x in copy != %x "
607                                          "on disk/prealloc",
608                                          e4b->bd_group, i, i * 8, b1[i], b2[i]);
609                                 BUG();
610                         }
611                 }
612         }
613 }
614
615 static void mb_group_bb_bitmap_alloc(struct super_block *sb,
616                         struct ext4_group_info *grp, ext4_group_t group)
617 {
618         struct buffer_head *bh;
619
620         grp->bb_bitmap = kmalloc(sb->s_blocksize, GFP_NOFS);
621         if (!grp->bb_bitmap)
622                 return;
623
624         bh = ext4_read_block_bitmap(sb, group);
625         if (IS_ERR_OR_NULL(bh)) {
626                 kfree(grp->bb_bitmap);
627                 grp->bb_bitmap = NULL;
628                 return;
629         }
630
631         memcpy(grp->bb_bitmap, bh->b_data, sb->s_blocksize);
632         put_bh(bh);
633 }
634
635 static void mb_group_bb_bitmap_free(struct ext4_group_info *grp)
636 {
637         kfree(grp->bb_bitmap);
638 }
639
640 #else
641 static inline void mb_free_blocks_double(struct inode *inode,
642                                 struct ext4_buddy *e4b, int first, int count)
643 {
644         return;
645 }
646 static inline void mb_mark_used_double(struct ext4_buddy *e4b,
647                                                 int first, int count)
648 {
649         return;
650 }
651 static inline void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)
652 {
653         return;
654 }
655
656 static inline void mb_group_bb_bitmap_alloc(struct super_block *sb,
657                         struct ext4_group_info *grp, ext4_group_t group)
658 {
659         return;
660 }
661
662 static inline void mb_group_bb_bitmap_free(struct ext4_group_info *grp)
663 {
664         return;
665 }
666 #endif
667
668 #ifdef AGGRESSIVE_CHECK
669
670 #define MB_CHECK_ASSERT(assert)                                         \
671 do {                                                                    \
672         if (!(assert)) {                                                \
673                 printk(KERN_EMERG                                       \
674                         "Assertion failure in %s() at %s:%d: \"%s\"\n", \
675                         function, file, line, # assert);                \
676                 BUG();                                                  \
677         }                                                               \
678 } while (0)
679
680 static int __mb_check_buddy(struct ext4_buddy *e4b, char *file,
681                                 const char *function, int line)
682 {
683         struct super_block *sb = e4b->bd_sb;
684         int order = e4b->bd_blkbits + 1;
685         int max;
686         int max2;
687         int i;
688         int j;
689         int k;
690         int count;
691         struct ext4_group_info *grp;
692         int fragments = 0;
693         int fstart;
694         struct list_head *cur;
695         void *buddy;
696         void *buddy2;
697
698         if (e4b->bd_info->bb_check_counter++ % 10)
699                 return 0;
700
701         while (order > 1) {
702                 buddy = mb_find_buddy(e4b, order, &max);
703                 MB_CHECK_ASSERT(buddy);
704                 buddy2 = mb_find_buddy(e4b, order - 1, &max2);
705                 MB_CHECK_ASSERT(buddy2);
706                 MB_CHECK_ASSERT(buddy != buddy2);
707                 MB_CHECK_ASSERT(max * 2 == max2);
708
709                 count = 0;
710                 for (i = 0; i < max; i++) {
711
712                         if (mb_test_bit(i, buddy)) {
713                                 /* only single bit in buddy2 may be 0 */
714                                 if (!mb_test_bit(i << 1, buddy2)) {
715                                         MB_CHECK_ASSERT(
716                                                 mb_test_bit((i<<1)+1, buddy2));
717                                 }
718                                 continue;
719                         }
720
721                         /* both bits in buddy2 must be 1 */
722                         MB_CHECK_ASSERT(mb_test_bit(i << 1, buddy2));
723                         MB_CHECK_ASSERT(mb_test_bit((i << 1) + 1, buddy2));
724
725                         for (j = 0; j < (1 << order); j++) {
726                                 k = (i * (1 << order)) + j;
727                                 MB_CHECK_ASSERT(
728                                         !mb_test_bit(k, e4b->bd_bitmap));
729                         }
730                         count++;
731                 }
732                 MB_CHECK_ASSERT(e4b->bd_info->bb_counters[order] == count);
733                 order--;
734         }
735
736         fstart = -1;
737         buddy = mb_find_buddy(e4b, 0, &max);
738         for (i = 0; i < max; i++) {
739                 if (!mb_test_bit(i, buddy)) {
740                         MB_CHECK_ASSERT(i >= e4b->bd_info->bb_first_free);
741                         if (fstart == -1) {
742                                 fragments++;
743                                 fstart = i;
744                         }
745                         continue;
746                 }
747                 fstart = -1;
748                 /* check used bits only */
749                 for (j = 0; j < e4b->bd_blkbits + 1; j++) {
750                         buddy2 = mb_find_buddy(e4b, j, &max2);
751                         k = i >> j;
752                         MB_CHECK_ASSERT(k < max2);
753                         MB_CHECK_ASSERT(mb_test_bit(k, buddy2));
754                 }
755         }
756         MB_CHECK_ASSERT(!EXT4_MB_GRP_NEED_INIT(e4b->bd_info));
757         MB_CHECK_ASSERT(e4b->bd_info->bb_fragments == fragments);
758
759         grp = ext4_get_group_info(sb, e4b->bd_group);
760         if (!grp)
761                 return NULL;
762         list_for_each(cur, &grp->bb_prealloc_list) {
763                 ext4_group_t groupnr;
764                 struct ext4_prealloc_space *pa;
765                 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
766                 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &groupnr, &k);
767                 MB_CHECK_ASSERT(groupnr == e4b->bd_group);
768                 for (i = 0; i < pa->pa_len; i++)
769                         MB_CHECK_ASSERT(mb_test_bit(k + i, buddy));
770         }
771         return 0;
772 }
773 #undef MB_CHECK_ASSERT
774 #define mb_check_buddy(e4b) __mb_check_buddy(e4b,       \
775                                         __FILE__, __func__, __LINE__)
776 #else
777 #define mb_check_buddy(e4b)
778 #endif
779
780 /*
781  * Divide blocks started from @first with length @len into
782  * smaller chunks with power of 2 blocks.
783  * Clear the bits in bitmap which the blocks of the chunk(s) covered,
784  * then increase bb_counters[] for corresponded chunk size.
785  */
786 static void ext4_mb_mark_free_simple(struct super_block *sb,
787                                 void *buddy, ext4_grpblk_t first, ext4_grpblk_t len,
788                                         struct ext4_group_info *grp)
789 {
790         struct ext4_sb_info *sbi = EXT4_SB(sb);
791         ext4_grpblk_t min;
792         ext4_grpblk_t max;
793         ext4_grpblk_t chunk;
794         unsigned int border;
795
796         BUG_ON(len > EXT4_CLUSTERS_PER_GROUP(sb));
797
798         border = 2 << sb->s_blocksize_bits;
799
800         while (len > 0) {
801                 /* find how many blocks can be covered since this position */
802                 max = ffs(first | border) - 1;
803
804                 /* find how many blocks of power 2 we need to mark */
805                 min = fls(len) - 1;
806
807                 if (max < min)
808                         min = max;
809                 chunk = 1 << min;
810
811                 /* mark multiblock chunks only */
812                 grp->bb_counters[min]++;
813                 if (min > 0)
814                         mb_clear_bit(first >> min,
815                                      buddy + sbi->s_mb_offsets[min]);
816
817                 len -= chunk;
818                 first += chunk;
819         }
820 }
821
822 static int mb_avg_fragment_size_order(struct super_block *sb, ext4_grpblk_t len)
823 {
824         int order;
825
826         /*
827          * We don't bother with a special lists groups with only 1 block free
828          * extents and for completely empty groups.
829          */
830         order = fls(len) - 2;
831         if (order < 0)
832                 return 0;
833         if (order == MB_NUM_ORDERS(sb))
834                 order--;
835         return order;
836 }
837
838 /* Move group to appropriate avg_fragment_size list */
839 static void
840 mb_update_avg_fragment_size(struct super_block *sb, struct ext4_group_info *grp)
841 {
842         struct ext4_sb_info *sbi = EXT4_SB(sb);
843         int new_order;
844
845         if (!test_opt2(sb, MB_OPTIMIZE_SCAN) || grp->bb_free == 0)
846                 return;
847
848         new_order = mb_avg_fragment_size_order(sb,
849                                         grp->bb_free / grp->bb_fragments);
850         if (new_order == grp->bb_avg_fragment_size_order)
851                 return;
852
853         if (grp->bb_avg_fragment_size_order != -1) {
854                 write_lock(&sbi->s_mb_avg_fragment_size_locks[
855                                         grp->bb_avg_fragment_size_order]);
856                 list_del(&grp->bb_avg_fragment_size_node);
857                 write_unlock(&sbi->s_mb_avg_fragment_size_locks[
858                                         grp->bb_avg_fragment_size_order]);
859         }
860         grp->bb_avg_fragment_size_order = new_order;
861         write_lock(&sbi->s_mb_avg_fragment_size_locks[
862                                         grp->bb_avg_fragment_size_order]);
863         list_add_tail(&grp->bb_avg_fragment_size_node,
864                 &sbi->s_mb_avg_fragment_size[grp->bb_avg_fragment_size_order]);
865         write_unlock(&sbi->s_mb_avg_fragment_size_locks[
866                                         grp->bb_avg_fragment_size_order]);
867 }
868
869 /*
870  * Choose next group by traversing largest_free_order lists. Updates *new_cr if
871  * cr level needs an update.
872  */
873 static void ext4_mb_choose_next_group_p2_aligned(struct ext4_allocation_context *ac,
874                         enum criteria *new_cr, ext4_group_t *group, ext4_group_t ngroups)
875 {
876         struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
877         struct ext4_group_info *iter;
878         int i;
879
880         if (ac->ac_status == AC_STATUS_FOUND)
881                 return;
882
883         if (unlikely(sbi->s_mb_stats && ac->ac_flags & EXT4_MB_CR_POWER2_ALIGNED_OPTIMIZED))
884                 atomic_inc(&sbi->s_bal_p2_aligned_bad_suggestions);
885
886         for (i = ac->ac_2order; i < MB_NUM_ORDERS(ac->ac_sb); i++) {
887                 if (list_empty(&sbi->s_mb_largest_free_orders[i]))
888                         continue;
889                 read_lock(&sbi->s_mb_largest_free_orders_locks[i]);
890                 if (list_empty(&sbi->s_mb_largest_free_orders[i])) {
891                         read_unlock(&sbi->s_mb_largest_free_orders_locks[i]);
892                         continue;
893                 }
894                 list_for_each_entry(iter, &sbi->s_mb_largest_free_orders[i],
895                                     bb_largest_free_order_node) {
896                         if (sbi->s_mb_stats)
897                                 atomic64_inc(&sbi->s_bal_cX_groups_considered[CR_POWER2_ALIGNED]);
898                         if (likely(ext4_mb_good_group(ac, iter->bb_group, CR_POWER2_ALIGNED))) {
899                                 *group = iter->bb_group;
900                                 ac->ac_flags |= EXT4_MB_CR_POWER2_ALIGNED_OPTIMIZED;
901                                 read_unlock(&sbi->s_mb_largest_free_orders_locks[i]);
902                                 return;
903                         }
904                 }
905                 read_unlock(&sbi->s_mb_largest_free_orders_locks[i]);
906         }
907
908         /* Increment cr and search again if no group is found */
909         *new_cr = CR_GOAL_LEN_FAST;
910 }
911
912 /*
913  * Find a suitable group of given order from the average fragments list.
914  */
915 static struct ext4_group_info *
916 ext4_mb_find_good_group_avg_frag_lists(struct ext4_allocation_context *ac, int order)
917 {
918         struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
919         struct list_head *frag_list = &sbi->s_mb_avg_fragment_size[order];
920         rwlock_t *frag_list_lock = &sbi->s_mb_avg_fragment_size_locks[order];
921         struct ext4_group_info *grp = NULL, *iter;
922         enum criteria cr = ac->ac_criteria;
923
924         if (list_empty(frag_list))
925                 return NULL;
926         read_lock(frag_list_lock);
927         if (list_empty(frag_list)) {
928                 read_unlock(frag_list_lock);
929                 return NULL;
930         }
931         list_for_each_entry(iter, frag_list, bb_avg_fragment_size_node) {
932                 if (sbi->s_mb_stats)
933                         atomic64_inc(&sbi->s_bal_cX_groups_considered[cr]);
934                 if (likely(ext4_mb_good_group(ac, iter->bb_group, cr))) {
935                         grp = iter;
936                         break;
937                 }
938         }
939         read_unlock(frag_list_lock);
940         return grp;
941 }
942
943 /*
944  * Choose next group by traversing average fragment size list of suitable
945  * order. Updates *new_cr if cr level needs an update.
946  */
947 static void ext4_mb_choose_next_group_goal_fast(struct ext4_allocation_context *ac,
948                 enum criteria *new_cr, ext4_group_t *group, ext4_group_t ngroups)
949 {
950         struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
951         struct ext4_group_info *grp = NULL;
952         int i;
953
954         if (unlikely(ac->ac_flags & EXT4_MB_CR_GOAL_LEN_FAST_OPTIMIZED)) {
955                 if (sbi->s_mb_stats)
956                         atomic_inc(&sbi->s_bal_goal_fast_bad_suggestions);
957         }
958
959         for (i = mb_avg_fragment_size_order(ac->ac_sb, ac->ac_g_ex.fe_len);
960              i < MB_NUM_ORDERS(ac->ac_sb); i++) {
961                 grp = ext4_mb_find_good_group_avg_frag_lists(ac, i);
962                 if (grp) {
963                         *group = grp->bb_group;
964                         ac->ac_flags |= EXT4_MB_CR_GOAL_LEN_FAST_OPTIMIZED;
965                         return;
966                 }
967         }
968
969         /*
970          * CR_BEST_AVAIL_LEN works based on the concept that we have
971          * a larger normalized goal len request which can be trimmed to
972          * a smaller goal len such that it can still satisfy original
973          * request len. However, allocation request for non-regular
974          * files never gets normalized.
975          * See function ext4_mb_normalize_request() (EXT4_MB_HINT_DATA).
976          */
977         if (ac->ac_flags & EXT4_MB_HINT_DATA)
978                 *new_cr = CR_BEST_AVAIL_LEN;
979         else
980                 *new_cr = CR_GOAL_LEN_SLOW;
981 }
982
983 /*
984  * We couldn't find a group in CR_GOAL_LEN_FAST so try to find the highest free fragment
985  * order we have and proactively trim the goal request length to that order to
986  * find a suitable group faster.
987  *
988  * This optimizes allocation speed at the cost of slightly reduced
989  * preallocations. However, we make sure that we don't trim the request too
990  * much and fall to CR_GOAL_LEN_SLOW in that case.
991  */
992 static void ext4_mb_choose_next_group_best_avail(struct ext4_allocation_context *ac,
993                 enum criteria *new_cr, ext4_group_t *group, ext4_group_t ngroups)
994 {
995         struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
996         struct ext4_group_info *grp = NULL;
997         int i, order, min_order;
998         unsigned long num_stripe_clusters = 0;
999
1000         if (unlikely(ac->ac_flags & EXT4_MB_CR_BEST_AVAIL_LEN_OPTIMIZED)) {
1001                 if (sbi->s_mb_stats)
1002                         atomic_inc(&sbi->s_bal_best_avail_bad_suggestions);
1003         }
1004
1005         /*
1006          * mb_avg_fragment_size_order() returns order in a way that makes
1007          * retrieving back the length using (1 << order) inaccurate. Hence, use
1008          * fls() instead since we need to know the actual length while modifying
1009          * goal length.
1010          */
1011         order = fls(ac->ac_g_ex.fe_len) - 1;
1012         min_order = order - sbi->s_mb_best_avail_max_trim_order;
1013         if (min_order < 0)
1014                 min_order = 0;
1015
1016         if (sbi->s_stripe > 0) {
1017                 /*
1018                  * We are assuming that stripe size is always a multiple of
1019                  * cluster ratio otherwise __ext4_fill_super exists early.
1020                  */
1021                 num_stripe_clusters = EXT4_NUM_B2C(sbi, sbi->s_stripe);
1022                 if (1 << min_order < num_stripe_clusters)
1023                         /*
1024                          * We consider 1 order less because later we round
1025                          * up the goal len to num_stripe_clusters
1026                          */
1027                         min_order = fls(num_stripe_clusters) - 1;
1028         }
1029
1030         if (1 << min_order < ac->ac_o_ex.fe_len)
1031                 min_order = fls(ac->ac_o_ex.fe_len);
1032
1033         for (i = order; i >= min_order; i--) {
1034                 int frag_order;
1035                 /*
1036                  * Scale down goal len to make sure we find something
1037                  * in the free fragments list. Basically, reduce
1038                  * preallocations.
1039                  */
1040                 ac->ac_g_ex.fe_len = 1 << i;
1041
1042                 if (num_stripe_clusters > 0) {
1043                         /*
1044                          * Try to round up the adjusted goal length to
1045                          * stripe size (in cluster units) multiple for
1046                          * efficiency.
1047                          */
1048                         ac->ac_g_ex.fe_len = roundup(ac->ac_g_ex.fe_len,
1049                                                      num_stripe_clusters);
1050                 }
1051
1052                 frag_order = mb_avg_fragment_size_order(ac->ac_sb,
1053                                                         ac->ac_g_ex.fe_len);
1054
1055                 grp = ext4_mb_find_good_group_avg_frag_lists(ac, frag_order);
1056                 if (grp) {
1057                         *group = grp->bb_group;
1058                         ac->ac_flags |= EXT4_MB_CR_BEST_AVAIL_LEN_OPTIMIZED;
1059                         return;
1060                 }
1061         }
1062
1063         /* Reset goal length to original goal length before falling into CR_GOAL_LEN_SLOW */
1064         ac->ac_g_ex.fe_len = ac->ac_orig_goal_len;
1065         *new_cr = CR_GOAL_LEN_SLOW;
1066 }
1067
1068 static inline int should_optimize_scan(struct ext4_allocation_context *ac)
1069 {
1070         if (unlikely(!test_opt2(ac->ac_sb, MB_OPTIMIZE_SCAN)))
1071                 return 0;
1072         if (ac->ac_criteria >= CR_GOAL_LEN_SLOW)
1073                 return 0;
1074         if (!ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS))
1075                 return 0;
1076         return 1;
1077 }
1078
1079 /*
1080  * Return next linear group for allocation. If linear traversal should not be
1081  * performed, this function just returns the same group
1082  */
1083 static ext4_group_t
1084 next_linear_group(struct ext4_allocation_context *ac, ext4_group_t group,
1085                   ext4_group_t ngroups)
1086 {
1087         if (!should_optimize_scan(ac))
1088                 goto inc_and_return;
1089
1090         if (ac->ac_groups_linear_remaining) {
1091                 ac->ac_groups_linear_remaining--;
1092                 goto inc_and_return;
1093         }
1094
1095         return group;
1096 inc_and_return:
1097         /*
1098          * Artificially restricted ngroups for non-extent
1099          * files makes group > ngroups possible on first loop.
1100          */
1101         return group + 1 >= ngroups ? 0 : group + 1;
1102 }
1103
1104 /*
1105  * ext4_mb_choose_next_group: choose next group for allocation.
1106  *
1107  * @ac        Allocation Context
1108  * @new_cr    This is an output parameter. If the there is no good group
1109  *            available at current CR level, this field is updated to indicate
1110  *            the new cr level that should be used.
1111  * @group     This is an input / output parameter. As an input it indicates the
1112  *            next group that the allocator intends to use for allocation. As
1113  *            output, this field indicates the next group that should be used as
1114  *            determined by the optimization functions.
1115  * @ngroups   Total number of groups
1116  */
1117 static void ext4_mb_choose_next_group(struct ext4_allocation_context *ac,
1118                 enum criteria *new_cr, ext4_group_t *group, ext4_group_t ngroups)
1119 {
1120         *new_cr = ac->ac_criteria;
1121
1122         if (!should_optimize_scan(ac) || ac->ac_groups_linear_remaining) {
1123                 *group = next_linear_group(ac, *group, ngroups);
1124                 return;
1125         }
1126
1127         if (*new_cr == CR_POWER2_ALIGNED) {
1128                 ext4_mb_choose_next_group_p2_aligned(ac, new_cr, group, ngroups);
1129         } else if (*new_cr == CR_GOAL_LEN_FAST) {
1130                 ext4_mb_choose_next_group_goal_fast(ac, new_cr, group, ngroups);
1131         } else if (*new_cr == CR_BEST_AVAIL_LEN) {
1132                 ext4_mb_choose_next_group_best_avail(ac, new_cr, group, ngroups);
1133         } else {
1134                 /*
1135                  * TODO: For CR=2, we can arrange groups in an rb tree sorted by
1136                  * bb_free. But until that happens, we should never come here.
1137                  */
1138                 WARN_ON(1);
1139         }
1140 }
1141
1142 /*
1143  * Cache the order of the largest free extent we have available in this block
1144  * group.
1145  */
1146 static void
1147 mb_set_largest_free_order(struct super_block *sb, struct ext4_group_info *grp)
1148 {
1149         struct ext4_sb_info *sbi = EXT4_SB(sb);
1150         int i;
1151
1152         for (i = MB_NUM_ORDERS(sb) - 1; i >= 0; i--)
1153                 if (grp->bb_counters[i] > 0)
1154                         break;
1155         /* No need to move between order lists? */
1156         if (!test_opt2(sb, MB_OPTIMIZE_SCAN) ||
1157             i == grp->bb_largest_free_order) {
1158                 grp->bb_largest_free_order = i;
1159                 return;
1160         }
1161
1162         if (grp->bb_largest_free_order >= 0) {
1163                 write_lock(&sbi->s_mb_largest_free_orders_locks[
1164                                               grp->bb_largest_free_order]);
1165                 list_del_init(&grp->bb_largest_free_order_node);
1166                 write_unlock(&sbi->s_mb_largest_free_orders_locks[
1167                                               grp->bb_largest_free_order]);
1168         }
1169         grp->bb_largest_free_order = i;
1170         if (grp->bb_largest_free_order >= 0 && grp->bb_free) {
1171                 write_lock(&sbi->s_mb_largest_free_orders_locks[
1172                                               grp->bb_largest_free_order]);
1173                 list_add_tail(&grp->bb_largest_free_order_node,
1174                       &sbi->s_mb_largest_free_orders[grp->bb_largest_free_order]);
1175                 write_unlock(&sbi->s_mb_largest_free_orders_locks[
1176                                               grp->bb_largest_free_order]);
1177         }
1178 }
1179
1180 static noinline_for_stack
1181 void ext4_mb_generate_buddy(struct super_block *sb,
1182                             void *buddy, void *bitmap, ext4_group_t group,
1183                             struct ext4_group_info *grp)
1184 {
1185         struct ext4_sb_info *sbi = EXT4_SB(sb);
1186         ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
1187         ext4_grpblk_t i = 0;
1188         ext4_grpblk_t first;
1189         ext4_grpblk_t len;
1190         unsigned free = 0;
1191         unsigned fragments = 0;
1192         unsigned long long period = get_cycles();
1193
1194         /* initialize buddy from bitmap which is aggregation
1195          * of on-disk bitmap and preallocations */
1196         i = mb_find_next_zero_bit(bitmap, max, 0);
1197         grp->bb_first_free = i;
1198         while (i < max) {
1199                 fragments++;
1200                 first = i;
1201                 i = mb_find_next_bit(bitmap, max, i);
1202                 len = i - first;
1203                 free += len;
1204                 if (len > 1)
1205                         ext4_mb_mark_free_simple(sb, buddy, first, len, grp);
1206                 else
1207                         grp->bb_counters[0]++;
1208                 if (i < max)
1209                         i = mb_find_next_zero_bit(bitmap, max, i);
1210         }
1211         grp->bb_fragments = fragments;
1212
1213         if (free != grp->bb_free) {
1214                 ext4_grp_locked_error(sb, group, 0, 0,
1215                                       "block bitmap and bg descriptor "
1216                                       "inconsistent: %u vs %u free clusters",
1217                                       free, grp->bb_free);
1218                 /*
1219                  * If we intend to continue, we consider group descriptor
1220                  * corrupt and update bb_free using bitmap value
1221                  */
1222                 grp->bb_free = free;
1223                 ext4_mark_group_bitmap_corrupted(sb, group,
1224                                         EXT4_GROUP_INFO_BBITMAP_CORRUPT);
1225         }
1226         mb_set_largest_free_order(sb, grp);
1227         mb_update_avg_fragment_size(sb, grp);
1228
1229         clear_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state));
1230
1231         period = get_cycles() - period;
1232         atomic_inc(&sbi->s_mb_buddies_generated);
1233         atomic64_add(period, &sbi->s_mb_generation_time);
1234 }
1235
1236 /* The buddy information is attached the buddy cache inode
1237  * for convenience. The information regarding each group
1238  * is loaded via ext4_mb_load_buddy. The information involve
1239  * block bitmap and buddy information. The information are
1240  * stored in the inode as
1241  *
1242  * {                        page                        }
1243  * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]...
1244  *
1245  *
1246  * one block each for bitmap and buddy information.
1247  * So for each group we take up 2 blocks. A page can
1248  * contain blocks_per_page (PAGE_SIZE / blocksize)  blocks.
1249  * So it can have information regarding groups_per_page which
1250  * is blocks_per_page/2
1251  *
1252  * Locking note:  This routine takes the block group lock of all groups
1253  * for this page; do not hold this lock when calling this routine!
1254  */
1255
1256 static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp)
1257 {
1258         ext4_group_t ngroups;
1259         unsigned int blocksize;
1260         int blocks_per_page;
1261         int groups_per_page;
1262         int err = 0;
1263         int i;
1264         ext4_group_t first_group, group;
1265         int first_block;
1266         struct super_block *sb;
1267         struct buffer_head *bhs;
1268         struct buffer_head **bh = NULL;
1269         struct inode *inode;
1270         char *data;
1271         char *bitmap;
1272         struct ext4_group_info *grinfo;
1273
1274         inode = page->mapping->host;
1275         sb = inode->i_sb;
1276         ngroups = ext4_get_groups_count(sb);
1277         blocksize = i_blocksize(inode);
1278         blocks_per_page = PAGE_SIZE / blocksize;
1279
1280         mb_debug(sb, "init page %lu\n", page->index);
1281
1282         groups_per_page = blocks_per_page >> 1;
1283         if (groups_per_page == 0)
1284                 groups_per_page = 1;
1285
1286         /* allocate buffer_heads to read bitmaps */
1287         if (groups_per_page > 1) {
1288                 i = sizeof(struct buffer_head *) * groups_per_page;
1289                 bh = kzalloc(i, gfp);
1290                 if (bh == NULL)
1291                         return -ENOMEM;
1292         } else
1293                 bh = &bhs;
1294
1295         first_group = page->index * blocks_per_page / 2;
1296
1297         /* read all groups the page covers into the cache */
1298         for (i = 0, group = first_group; i < groups_per_page; i++, group++) {
1299                 if (group >= ngroups)
1300                         break;
1301
1302                 grinfo = ext4_get_group_info(sb, group);
1303                 if (!grinfo)
1304                         continue;
1305                 /*
1306                  * If page is uptodate then we came here after online resize
1307                  * which added some new uninitialized group info structs, so
1308                  * we must skip all initialized uptodate buddies on the page,
1309                  * which may be currently in use by an allocating task.
1310                  */
1311                 if (PageUptodate(page) && !EXT4_MB_GRP_NEED_INIT(grinfo)) {
1312                         bh[i] = NULL;
1313                         continue;
1314                 }
1315                 bh[i] = ext4_read_block_bitmap_nowait(sb, group, false);
1316                 if (IS_ERR(bh[i])) {
1317                         err = PTR_ERR(bh[i]);
1318                         bh[i] = NULL;
1319                         goto out;
1320                 }
1321                 mb_debug(sb, "read bitmap for group %u\n", group);
1322         }
1323
1324         /* wait for I/O completion */
1325         for (i = 0, group = first_group; i < groups_per_page; i++, group++) {
1326                 int err2;
1327
1328                 if (!bh[i])
1329                         continue;
1330                 err2 = ext4_wait_block_bitmap(sb, group, bh[i]);
1331                 if (!err)
1332                         err = err2;
1333         }
1334
1335         first_block = page->index * blocks_per_page;
1336         for (i = 0; i < blocks_per_page; i++) {
1337                 group = (first_block + i) >> 1;
1338                 if (group >= ngroups)
1339                         break;
1340
1341                 if (!bh[group - first_group])
1342                         /* skip initialized uptodate buddy */
1343                         continue;
1344
1345                 if (!buffer_verified(bh[group - first_group]))
1346                         /* Skip faulty bitmaps */
1347                         continue;
1348                 err = 0;
1349
1350                 /*
1351                  * data carry information regarding this
1352                  * particular group in the format specified
1353                  * above
1354                  *
1355                  */
1356                 data = page_address(page) + (i * blocksize);
1357                 bitmap = bh[group - first_group]->b_data;
1358
1359                 /*
1360                  * We place the buddy block and bitmap block
1361                  * close together
1362                  */
1363                 grinfo = ext4_get_group_info(sb, group);
1364                 if (!grinfo) {
1365                         err = -EFSCORRUPTED;
1366                         goto out;
1367                 }
1368                 if ((first_block + i) & 1) {
1369                         /* this is block of buddy */
1370                         BUG_ON(incore == NULL);
1371                         mb_debug(sb, "put buddy for group %u in page %lu/%x\n",
1372                                 group, page->index, i * blocksize);
1373                         trace_ext4_mb_buddy_bitmap_load(sb, group);
1374                         grinfo->bb_fragments = 0;
1375                         memset(grinfo->bb_counters, 0,
1376                                sizeof(*grinfo->bb_counters) *
1377                                (MB_NUM_ORDERS(sb)));
1378                         /*
1379                          * incore got set to the group block bitmap below
1380                          */
1381                         ext4_lock_group(sb, group);
1382                         /* init the buddy */
1383                         memset(data, 0xff, blocksize);
1384                         ext4_mb_generate_buddy(sb, data, incore, group, grinfo);
1385                         ext4_unlock_group(sb, group);
1386                         incore = NULL;
1387                 } else {
1388                         /* this is block of bitmap */
1389                         BUG_ON(incore != NULL);
1390                         mb_debug(sb, "put bitmap for group %u in page %lu/%x\n",
1391                                 group, page->index, i * blocksize);
1392                         trace_ext4_mb_bitmap_load(sb, group);
1393
1394                         /* see comments in ext4_mb_put_pa() */
1395                         ext4_lock_group(sb, group);
1396                         memcpy(data, bitmap, blocksize);
1397
1398                         /* mark all preallocated blks used in in-core bitmap */
1399                         ext4_mb_generate_from_pa(sb, data, group);
1400                         WARN_ON_ONCE(!RB_EMPTY_ROOT(&grinfo->bb_free_root));
1401                         ext4_unlock_group(sb, group);
1402
1403                         /* set incore so that the buddy information can be
1404                          * generated using this
1405                          */
1406                         incore = data;
1407                 }
1408         }
1409         SetPageUptodate(page);
1410
1411 out:
1412         if (bh) {
1413                 for (i = 0; i < groups_per_page; i++)
1414                         brelse(bh[i]);
1415                 if (bh != &bhs)
1416                         kfree(bh);
1417         }
1418         return err;
1419 }
1420
1421 /*
1422  * Lock the buddy and bitmap pages. This make sure other parallel init_group
1423  * on the same buddy page doesn't happen whild holding the buddy page lock.
1424  * Return locked buddy and bitmap pages on e4b struct. If buddy and bitmap
1425  * are on the same page e4b->bd_buddy_page is NULL and return value is 0.
1426  */
1427 static int ext4_mb_get_buddy_page_lock(struct super_block *sb,
1428                 ext4_group_t group, struct ext4_buddy *e4b, gfp_t gfp)
1429 {
1430         struct inode *inode = EXT4_SB(sb)->s_buddy_cache;
1431         int block, pnum, poff;
1432         int blocks_per_page;
1433         struct page *page;
1434
1435         e4b->bd_buddy_page = NULL;
1436         e4b->bd_bitmap_page = NULL;
1437
1438         blocks_per_page = PAGE_SIZE / sb->s_blocksize;
1439         /*
1440          * the buddy cache inode stores the block bitmap
1441          * and buddy information in consecutive blocks.
1442          * So for each group we need two blocks.
1443          */
1444         block = group * 2;
1445         pnum = block / blocks_per_page;
1446         poff = block % blocks_per_page;
1447         page = find_or_create_page(inode->i_mapping, pnum, gfp);
1448         if (!page)
1449                 return -ENOMEM;
1450         BUG_ON(page->mapping != inode->i_mapping);
1451         e4b->bd_bitmap_page = page;
1452         e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
1453
1454         if (blocks_per_page >= 2) {
1455                 /* buddy and bitmap are on the same page */
1456                 return 0;
1457         }
1458
1459         /* blocks_per_page == 1, hence we need another page for the buddy */
1460         page = find_or_create_page(inode->i_mapping, block + 1, gfp);
1461         if (!page)
1462                 return -ENOMEM;
1463         BUG_ON(page->mapping != inode->i_mapping);
1464         e4b->bd_buddy_page = page;
1465         return 0;
1466 }
1467
1468 static void ext4_mb_put_buddy_page_lock(struct ext4_buddy *e4b)
1469 {
1470         if (e4b->bd_bitmap_page) {
1471                 unlock_page(e4b->bd_bitmap_page);
1472                 put_page(e4b->bd_bitmap_page);
1473         }
1474         if (e4b->bd_buddy_page) {
1475                 unlock_page(e4b->bd_buddy_page);
1476                 put_page(e4b->bd_buddy_page);
1477         }
1478 }
1479
1480 /*
1481  * Locking note:  This routine calls ext4_mb_init_cache(), which takes the
1482  * block group lock of all groups for this page; do not hold the BG lock when
1483  * calling this routine!
1484  */
1485 static noinline_for_stack
1486 int ext4_mb_init_group(struct super_block *sb, ext4_group_t group, gfp_t gfp)
1487 {
1488
1489         struct ext4_group_info *this_grp;
1490         struct ext4_buddy e4b;
1491         struct page *page;
1492         int ret = 0;
1493
1494         might_sleep();
1495         mb_debug(sb, "init group %u\n", group);
1496         this_grp = ext4_get_group_info(sb, group);
1497         if (!this_grp)
1498                 return -EFSCORRUPTED;
1499
1500         /*
1501          * This ensures that we don't reinit the buddy cache
1502          * page which map to the group from which we are already
1503          * allocating. If we are looking at the buddy cache we would
1504          * have taken a reference using ext4_mb_load_buddy and that
1505          * would have pinned buddy page to page cache.
1506          * The call to ext4_mb_get_buddy_page_lock will mark the
1507          * page accessed.
1508          */
1509         ret = ext4_mb_get_buddy_page_lock(sb, group, &e4b, gfp);
1510         if (ret || !EXT4_MB_GRP_NEED_INIT(this_grp)) {
1511                 /*
1512                  * somebody initialized the group
1513                  * return without doing anything
1514                  */
1515                 goto err;
1516         }
1517
1518         page = e4b.bd_bitmap_page;
1519         ret = ext4_mb_init_cache(page, NULL, gfp);
1520         if (ret)
1521                 goto err;
1522         if (!PageUptodate(page)) {
1523                 ret = -EIO;
1524                 goto err;
1525         }
1526
1527         if (e4b.bd_buddy_page == NULL) {
1528                 /*
1529                  * If both the bitmap and buddy are in
1530                  * the same page we don't need to force
1531                  * init the buddy
1532                  */
1533                 ret = 0;
1534                 goto err;
1535         }
1536         /* init buddy cache */
1537         page = e4b.bd_buddy_page;
1538         ret = ext4_mb_init_cache(page, e4b.bd_bitmap, gfp);
1539         if (ret)
1540                 goto err;
1541         if (!PageUptodate(page)) {
1542                 ret = -EIO;
1543                 goto err;
1544         }
1545 err:
1546         ext4_mb_put_buddy_page_lock(&e4b);
1547         return ret;
1548 }
1549
1550 /*
1551  * Locking note:  This routine calls ext4_mb_init_cache(), which takes the
1552  * block group lock of all groups for this page; do not hold the BG lock when
1553  * calling this routine!
1554  */
1555 static noinline_for_stack int
1556 ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
1557                        struct ext4_buddy *e4b, gfp_t gfp)
1558 {
1559         int blocks_per_page;
1560         int block;
1561         int pnum;
1562         int poff;
1563         struct page *page;
1564         int ret;
1565         struct ext4_group_info *grp;
1566         struct ext4_sb_info *sbi = EXT4_SB(sb);
1567         struct inode *inode = sbi->s_buddy_cache;
1568
1569         might_sleep();
1570         mb_debug(sb, "load group %u\n", group);
1571
1572         blocks_per_page = PAGE_SIZE / sb->s_blocksize;
1573         grp = ext4_get_group_info(sb, group);
1574         if (!grp)
1575                 return -EFSCORRUPTED;
1576
1577         e4b->bd_blkbits = sb->s_blocksize_bits;
1578         e4b->bd_info = grp;
1579         e4b->bd_sb = sb;
1580         e4b->bd_group = group;
1581         e4b->bd_buddy_page = NULL;
1582         e4b->bd_bitmap_page = NULL;
1583
1584         if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
1585                 /*
1586                  * we need full data about the group
1587                  * to make a good selection
1588                  */
1589                 ret = ext4_mb_init_group(sb, group, gfp);
1590                 if (ret)
1591                         return ret;
1592         }
1593
1594         /*
1595          * the buddy cache inode stores the block bitmap
1596          * and buddy information in consecutive blocks.
1597          * So for each group we need two blocks.
1598          */
1599         block = group * 2;
1600         pnum = block / blocks_per_page;
1601         poff = block % blocks_per_page;
1602
1603         /* we could use find_or_create_page(), but it locks page
1604          * what we'd like to avoid in fast path ... */
1605         page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED);
1606         if (page == NULL || !PageUptodate(page)) {
1607                 if (page)
1608                         /*
1609                          * drop the page reference and try
1610                          * to get the page with lock. If we
1611                          * are not uptodate that implies
1612                          * somebody just created the page but
1613                          * is yet to initialize the same. So
1614                          * wait for it to initialize.
1615                          */
1616                         put_page(page);
1617                 page = find_or_create_page(inode->i_mapping, pnum, gfp);
1618                 if (page) {
1619                         if (WARN_RATELIMIT(page->mapping != inode->i_mapping,
1620         "ext4: bitmap's paging->mapping != inode->i_mapping\n")) {
1621                                 /* should never happen */
1622                                 unlock_page(page);
1623                                 ret = -EINVAL;
1624                                 goto err;
1625                         }
1626                         if (!PageUptodate(page)) {
1627                                 ret = ext4_mb_init_cache(page, NULL, gfp);
1628                                 if (ret) {
1629                                         unlock_page(page);
1630                                         goto err;
1631                                 }
1632                                 mb_cmp_bitmaps(e4b, page_address(page) +
1633                                                (poff * sb->s_blocksize));
1634                         }
1635                         unlock_page(page);
1636                 }
1637         }
1638         if (page == NULL) {
1639                 ret = -ENOMEM;
1640                 goto err;
1641         }
1642         if (!PageUptodate(page)) {
1643                 ret = -EIO;
1644                 goto err;
1645         }
1646
1647         /* Pages marked accessed already */
1648         e4b->bd_bitmap_page = page;
1649         e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
1650
1651         block++;
1652         pnum = block / blocks_per_page;
1653         poff = block % blocks_per_page;
1654
1655         page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED);
1656         if (page == NULL || !PageUptodate(page)) {
1657                 if (page)
1658                         put_page(page);
1659                 page = find_or_create_page(inode->i_mapping, pnum, gfp);
1660                 if (page) {
1661                         if (WARN_RATELIMIT(page->mapping != inode->i_mapping,
1662         "ext4: buddy bitmap's page->mapping != inode->i_mapping\n")) {
1663                                 /* should never happen */
1664                                 unlock_page(page);
1665                                 ret = -EINVAL;
1666                                 goto err;
1667                         }
1668                         if (!PageUptodate(page)) {
1669                                 ret = ext4_mb_init_cache(page, e4b->bd_bitmap,
1670                                                          gfp);
1671                                 if (ret) {
1672                                         unlock_page(page);
1673                                         goto err;
1674                                 }
1675                         }
1676                         unlock_page(page);
1677                 }
1678         }
1679         if (page == NULL) {
1680                 ret = -ENOMEM;
1681                 goto err;
1682         }
1683         if (!PageUptodate(page)) {
1684                 ret = -EIO;
1685                 goto err;
1686         }
1687
1688         /* Pages marked accessed already */
1689         e4b->bd_buddy_page = page;
1690         e4b->bd_buddy = page_address(page) + (poff * sb->s_blocksize);
1691
1692         return 0;
1693
1694 err:
1695         if (page)
1696                 put_page(page);
1697         if (e4b->bd_bitmap_page)
1698                 put_page(e4b->bd_bitmap_page);
1699
1700         e4b->bd_buddy = NULL;
1701         e4b->bd_bitmap = NULL;
1702         return ret;
1703 }
1704
1705 static int ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
1706                               struct ext4_buddy *e4b)
1707 {
1708         return ext4_mb_load_buddy_gfp(sb, group, e4b, GFP_NOFS);
1709 }
1710
1711 static void ext4_mb_unload_buddy(struct ext4_buddy *e4b)
1712 {
1713         if (e4b->bd_bitmap_page)
1714                 put_page(e4b->bd_bitmap_page);
1715         if (e4b->bd_buddy_page)
1716                 put_page(e4b->bd_buddy_page);
1717 }
1718
1719
1720 static int mb_find_order_for_block(struct ext4_buddy *e4b, int block)
1721 {
1722         int order = 1, max;
1723         void *bb;
1724
1725         BUG_ON(e4b->bd_bitmap == e4b->bd_buddy);
1726         BUG_ON(block >= (1 << (e4b->bd_blkbits + 3)));
1727
1728         while (order <= e4b->bd_blkbits + 1) {
1729                 bb = mb_find_buddy(e4b, order, &max);
1730                 if (!mb_test_bit(block >> order, bb)) {
1731                         /* this block is part of buddy of order 'order' */
1732                         return order;
1733                 }
1734                 order++;
1735         }
1736         return 0;
1737 }
1738
1739 static void mb_clear_bits(void *bm, int cur, int len)
1740 {
1741         __u32 *addr;
1742
1743         len = cur + len;
1744         while (cur < len) {
1745                 if ((cur & 31) == 0 && (len - cur) >= 32) {
1746                         /* fast path: clear whole word at once */
1747                         addr = bm + (cur >> 3);
1748                         *addr = 0;
1749                         cur += 32;
1750                         continue;
1751                 }
1752                 mb_clear_bit(cur, bm);
1753                 cur++;
1754         }
1755 }
1756
1757 /* clear bits in given range
1758  * will return first found zero bit if any, -1 otherwise
1759  */
1760 static int mb_test_and_clear_bits(void *bm, int cur, int len)
1761 {
1762         __u32 *addr;
1763         int zero_bit = -1;
1764
1765         len = cur + len;
1766         while (cur < len) {
1767                 if ((cur & 31) == 0 && (len - cur) >= 32) {
1768                         /* fast path: clear whole word at once */
1769                         addr = bm + (cur >> 3);
1770                         if (*addr != (__u32)(-1) && zero_bit == -1)
1771                                 zero_bit = cur + mb_find_next_zero_bit(addr, 32, 0);
1772                         *addr = 0;
1773                         cur += 32;
1774                         continue;
1775                 }
1776                 if (!mb_test_and_clear_bit(cur, bm) && zero_bit == -1)
1777                         zero_bit = cur;
1778                 cur++;
1779         }
1780
1781         return zero_bit;
1782 }
1783
1784 void mb_set_bits(void *bm, int cur, int len)
1785 {
1786         __u32 *addr;
1787
1788         len = cur + len;
1789         while (cur < len) {
1790                 if ((cur & 31) == 0 && (len - cur) >= 32) {
1791                         /* fast path: set whole word at once */
1792                         addr = bm + (cur >> 3);
1793                         *addr = 0xffffffff;
1794                         cur += 32;
1795                         continue;
1796                 }
1797                 mb_set_bit(cur, bm);
1798                 cur++;
1799         }
1800 }
1801
1802 static inline int mb_buddy_adjust_border(int* bit, void* bitmap, int side)
1803 {
1804         if (mb_test_bit(*bit + side, bitmap)) {
1805                 mb_clear_bit(*bit, bitmap);
1806                 (*bit) -= side;
1807                 return 1;
1808         }
1809         else {
1810                 (*bit) += side;
1811                 mb_set_bit(*bit, bitmap);
1812                 return -1;
1813         }
1814 }
1815
1816 static void mb_buddy_mark_free(struct ext4_buddy *e4b, int first, int last)
1817 {
1818         int max;
1819         int order = 1;
1820         void *buddy = mb_find_buddy(e4b, order, &max);
1821
1822         while (buddy) {
1823                 void *buddy2;
1824
1825                 /* Bits in range [first; last] are known to be set since
1826                  * corresponding blocks were allocated. Bits in range
1827                  * (first; last) will stay set because they form buddies on
1828                  * upper layer. We just deal with borders if they don't
1829                  * align with upper layer and then go up.
1830                  * Releasing entire group is all about clearing
1831                  * single bit of highest order buddy.
1832                  */
1833
1834                 /* Example:
1835                  * ---------------------------------
1836                  * |   1   |   1   |   1   |   1   |
1837                  * ---------------------------------
1838                  * | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 1 |
1839                  * ---------------------------------
1840                  *   0   1   2   3   4   5   6   7
1841                  *      \_____________________/
1842                  *
1843                  * Neither [1] nor [6] is aligned to above layer.
1844                  * Left neighbour [0] is free, so mark it busy,
1845                  * decrease bb_counters and extend range to
1846                  * [0; 6]
1847                  * Right neighbour [7] is busy. It can't be coaleasced with [6], so
1848                  * mark [6] free, increase bb_counters and shrink range to
1849                  * [0; 5].
1850                  * Then shift range to [0; 2], go up and do the same.
1851                  */
1852
1853
1854                 if (first & 1)
1855                         e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&first, buddy, -1);
1856                 if (!(last & 1))
1857                         e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&last, buddy, 1);
1858                 if (first > last)
1859                         break;
1860                 order++;
1861
1862                 buddy2 = mb_find_buddy(e4b, order, &max);
1863                 if (!buddy2) {
1864                         mb_clear_bits(buddy, first, last - first + 1);
1865                         e4b->bd_info->bb_counters[order - 1] += last - first + 1;
1866                         break;
1867                 }
1868                 first >>= 1;
1869                 last >>= 1;
1870                 buddy = buddy2;
1871         }
1872 }
1873
1874 static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
1875                            int first, int count)
1876 {
1877         int left_is_free = 0;
1878         int right_is_free = 0;
1879         int block;
1880         int last = first + count - 1;
1881         struct super_block *sb = e4b->bd_sb;
1882
1883         if (WARN_ON(count == 0))
1884                 return;
1885         BUG_ON(last >= (sb->s_blocksize << 3));
1886         assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group));
1887         /* Don't bother if the block group is corrupt. */
1888         if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info)))
1889                 return;
1890
1891         mb_check_buddy(e4b);
1892         mb_free_blocks_double(inode, e4b, first, count);
1893
1894         this_cpu_inc(discard_pa_seq);
1895         e4b->bd_info->bb_free += count;
1896         if (first < e4b->bd_info->bb_first_free)
1897                 e4b->bd_info->bb_first_free = first;
1898
1899         /* access memory sequentially: check left neighbour,
1900          * clear range and then check right neighbour
1901          */
1902         if (first != 0)
1903                 left_is_free = !mb_test_bit(first - 1, e4b->bd_bitmap);
1904         block = mb_test_and_clear_bits(e4b->bd_bitmap, first, count);
1905         if (last + 1 < EXT4_SB(sb)->s_mb_maxs[0])
1906                 right_is_free = !mb_test_bit(last + 1, e4b->bd_bitmap);
1907
1908         if (unlikely(block != -1)) {
1909                 struct ext4_sb_info *sbi = EXT4_SB(sb);
1910                 ext4_fsblk_t blocknr;
1911
1912                 blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
1913                 blocknr += EXT4_C2B(sbi, block);
1914                 if (!(sbi->s_mount_state & EXT4_FC_REPLAY)) {
1915                         ext4_grp_locked_error(sb, e4b->bd_group,
1916                                               inode ? inode->i_ino : 0,
1917                                               blocknr,
1918                                               "freeing already freed block (bit %u); block bitmap corrupt.",
1919                                               block);
1920                         ext4_mark_group_bitmap_corrupted(
1921                                 sb, e4b->bd_group,
1922                                 EXT4_GROUP_INFO_BBITMAP_CORRUPT);
1923                 }
1924                 goto done;
1925         }
1926
1927         /* let's maintain fragments counter */
1928         if (left_is_free && right_is_free)
1929                 e4b->bd_info->bb_fragments--;
1930         else if (!left_is_free && !right_is_free)
1931                 e4b->bd_info->bb_fragments++;
1932
1933         /* buddy[0] == bd_bitmap is a special case, so handle
1934          * it right away and let mb_buddy_mark_free stay free of
1935          * zero order checks.
1936          * Check if neighbours are to be coaleasced,
1937          * adjust bitmap bb_counters and borders appropriately.
1938          */
1939         if (first & 1) {
1940                 first += !left_is_free;
1941                 e4b->bd_info->bb_counters[0] += left_is_free ? -1 : 1;
1942         }
1943         if (!(last & 1)) {
1944                 last -= !right_is_free;
1945                 e4b->bd_info->bb_counters[0] += right_is_free ? -1 : 1;
1946         }
1947
1948         if (first <= last)
1949                 mb_buddy_mark_free(e4b, first >> 1, last >> 1);
1950
1951 done:
1952         mb_set_largest_free_order(sb, e4b->bd_info);
1953         mb_update_avg_fragment_size(sb, e4b->bd_info);
1954         mb_check_buddy(e4b);
1955 }
1956
1957 static int mb_find_extent(struct ext4_buddy *e4b, int block,
1958                                 int needed, struct ext4_free_extent *ex)
1959 {
1960         int max, order, next;
1961         void *buddy;
1962
1963         assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
1964         BUG_ON(ex == NULL);
1965
1966         buddy = mb_find_buddy(e4b, 0, &max);
1967         BUG_ON(buddy == NULL);
1968         BUG_ON(block >= max);
1969         if (mb_test_bit(block, buddy)) {
1970                 ex->fe_len = 0;
1971                 ex->fe_start = 0;
1972                 ex->fe_group = 0;
1973                 return 0;
1974         }
1975
1976         /* find actual order */
1977         order = mb_find_order_for_block(e4b, block);
1978
1979         ex->fe_len = (1 << order) - (block & ((1 << order) - 1));
1980         ex->fe_start = block;
1981         ex->fe_group = e4b->bd_group;
1982
1983         block = block >> order;
1984
1985         while (needed > ex->fe_len &&
1986                mb_find_buddy(e4b, order, &max)) {
1987
1988                 if (block + 1 >= max)
1989                         break;
1990
1991                 next = (block + 1) * (1 << order);
1992                 if (mb_test_bit(next, e4b->bd_bitmap))
1993                         break;
1994
1995                 order = mb_find_order_for_block(e4b, next);
1996
1997                 block = next >> order;
1998                 ex->fe_len += 1 << order;
1999         }
2000
2001         if (ex->fe_start + ex->fe_len > EXT4_CLUSTERS_PER_GROUP(e4b->bd_sb)) {
2002                 /* Should never happen! (but apparently sometimes does?!?) */
2003                 WARN_ON(1);
2004                 ext4_grp_locked_error(e4b->bd_sb, e4b->bd_group, 0, 0,
2005                         "corruption or bug in mb_find_extent "
2006                         "block=%d, order=%d needed=%d ex=%u/%d/%d@%u",
2007                         block, order, needed, ex->fe_group, ex->fe_start,
2008                         ex->fe_len, ex->fe_logical);
2009                 ex->fe_len = 0;
2010                 ex->fe_start = 0;
2011                 ex->fe_group = 0;
2012         }
2013         return ex->fe_len;
2014 }
2015
2016 static int mb_mark_used(struct ext4_buddy *e4b, struct ext4_free_extent *ex)
2017 {
2018         int ord;
2019         int mlen = 0;
2020         int max = 0;
2021         int cur;
2022         int start = ex->fe_start;
2023         int len = ex->fe_len;
2024         unsigned ret = 0;
2025         int len0 = len;
2026         void *buddy;
2027         bool split = false;
2028
2029         BUG_ON(start + len > (e4b->bd_sb->s_blocksize << 3));
2030         BUG_ON(e4b->bd_group != ex->fe_group);
2031         assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
2032         mb_check_buddy(e4b);
2033         mb_mark_used_double(e4b, start, len);
2034
2035         this_cpu_inc(discard_pa_seq);
2036         e4b->bd_info->bb_free -= len;
2037         if (e4b->bd_info->bb_first_free == start)
2038                 e4b->bd_info->bb_first_free += len;
2039
2040         /* let's maintain fragments counter */
2041         if (start != 0)
2042                 mlen = !mb_test_bit(start - 1, e4b->bd_bitmap);
2043         if (start + len < EXT4_SB(e4b->bd_sb)->s_mb_maxs[0])
2044                 max = !mb_test_bit(start + len, e4b->bd_bitmap);
2045         if (mlen && max)
2046                 e4b->bd_info->bb_fragments++;
2047         else if (!mlen && !max)
2048                 e4b->bd_info->bb_fragments--;
2049
2050         /* let's maintain buddy itself */
2051         while (len) {
2052                 if (!split)
2053                         ord = mb_find_order_for_block(e4b, start);
2054
2055                 if (((start >> ord) << ord) == start && len >= (1 << ord)) {
2056                         /* the whole chunk may be allocated at once! */
2057                         mlen = 1 << ord;
2058                         if (!split)
2059                                 buddy = mb_find_buddy(e4b, ord, &max);
2060                         else
2061                                 split = false;
2062                         BUG_ON((start >> ord) >= max);
2063                         mb_set_bit(start >> ord, buddy);
2064                         e4b->bd_info->bb_counters[ord]--;
2065                         start += mlen;
2066                         len -= mlen;
2067                         BUG_ON(len < 0);
2068                         continue;
2069                 }
2070
2071                 /* store for history */
2072                 if (ret == 0)
2073                         ret = len | (ord << 16);
2074
2075                 /* we have to split large buddy */
2076                 BUG_ON(ord <= 0);
2077                 buddy = mb_find_buddy(e4b, ord, &max);
2078                 mb_set_bit(start >> ord, buddy);
2079                 e4b->bd_info->bb_counters[ord]--;
2080
2081                 ord--;
2082                 cur = (start >> ord) & ~1U;
2083                 buddy = mb_find_buddy(e4b, ord, &max);
2084                 mb_clear_bit(cur, buddy);
2085                 mb_clear_bit(cur + 1, buddy);
2086                 e4b->bd_info->bb_counters[ord]++;
2087                 e4b->bd_info->bb_counters[ord]++;
2088                 split = true;
2089         }
2090         mb_set_largest_free_order(e4b->bd_sb, e4b->bd_info);
2091
2092         mb_update_avg_fragment_size(e4b->bd_sb, e4b->bd_info);
2093         mb_set_bits(e4b->bd_bitmap, ex->fe_start, len0);
2094         mb_check_buddy(e4b);
2095
2096         return ret;
2097 }
2098
2099 /*
2100  * Must be called under group lock!
2101  */
2102 static void ext4_mb_use_best_found(struct ext4_allocation_context *ac,
2103                                         struct ext4_buddy *e4b)
2104 {
2105         struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
2106         int ret;
2107
2108         BUG_ON(ac->ac_b_ex.fe_group != e4b->bd_group);
2109         BUG_ON(ac->ac_status == AC_STATUS_FOUND);
2110
2111         ac->ac_b_ex.fe_len = min(ac->ac_b_ex.fe_len, ac->ac_g_ex.fe_len);
2112         ac->ac_b_ex.fe_logical = ac->ac_g_ex.fe_logical;
2113         ret = mb_mark_used(e4b, &ac->ac_b_ex);
2114
2115         /* preallocation can change ac_b_ex, thus we store actually
2116          * allocated blocks for history */
2117         ac->ac_f_ex = ac->ac_b_ex;
2118
2119         ac->ac_status = AC_STATUS_FOUND;
2120         ac->ac_tail = ret & 0xffff;
2121         ac->ac_buddy = ret >> 16;
2122
2123         /*
2124          * take the page reference. We want the page to be pinned
2125          * so that we don't get a ext4_mb_init_cache_call for this
2126          * group until we update the bitmap. That would mean we
2127          * double allocate blocks. The reference is dropped
2128          * in ext4_mb_release_context
2129          */
2130         ac->ac_bitmap_page = e4b->bd_bitmap_page;
2131         get_page(ac->ac_bitmap_page);
2132         ac->ac_buddy_page = e4b->bd_buddy_page;
2133         get_page(ac->ac_buddy_page);
2134         /* store last allocated for subsequent stream allocation */
2135         if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) {
2136                 spin_lock(&sbi->s_md_lock);
2137                 sbi->s_mb_last_group = ac->ac_f_ex.fe_group;
2138                 sbi->s_mb_last_start = ac->ac_f_ex.fe_start;
2139                 spin_unlock(&sbi->s_md_lock);
2140         }
2141         /*
2142          * As we've just preallocated more space than
2143          * user requested originally, we store allocated
2144          * space in a special descriptor.
2145          */
2146         if (ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len)
2147                 ext4_mb_new_preallocation(ac);
2148
2149 }
2150
2151 static void ext4_mb_check_limits(struct ext4_allocation_context *ac,
2152                                         struct ext4_buddy *e4b,
2153                                         int finish_group)
2154 {
2155         struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
2156         struct ext4_free_extent *bex = &ac->ac_b_ex;
2157         struct ext4_free_extent *gex = &ac->ac_g_ex;
2158
2159         if (ac->ac_status == AC_STATUS_FOUND)
2160                 return;
2161         /*
2162          * We don't want to scan for a whole year
2163          */
2164         if (ac->ac_found > sbi->s_mb_max_to_scan &&
2165                         !(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
2166                 ac->ac_status = AC_STATUS_BREAK;
2167                 return;
2168         }
2169
2170         /*
2171          * Haven't found good chunk so far, let's continue
2172          */
2173         if (bex->fe_len < gex->fe_len)
2174                 return;
2175
2176         if (finish_group || ac->ac_found > sbi->s_mb_min_to_scan)
2177                 ext4_mb_use_best_found(ac, e4b);
2178 }
2179
2180 /*
2181  * The routine checks whether found extent is good enough. If it is,
2182  * then the extent gets marked used and flag is set to the context
2183  * to stop scanning. Otherwise, the extent is compared with the
2184  * previous found extent and if new one is better, then it's stored
2185  * in the context. Later, the best found extent will be used, if
2186  * mballoc can't find good enough extent.
2187  *
2188  * The algorithm used is roughly as follows:
2189  *
2190  * * If free extent found is exactly as big as goal, then
2191  *   stop the scan and use it immediately
2192  *
2193  * * If free extent found is smaller than goal, then keep retrying
2194  *   upto a max of sbi->s_mb_max_to_scan times (default 200). After
2195  *   that stop scanning and use whatever we have.
2196  *
2197  * * If free extent found is bigger than goal, then keep retrying
2198  *   upto a max of sbi->s_mb_min_to_scan times (default 10) before
2199  *   stopping the scan and using the extent.
2200  *
2201  *
2202  * FIXME: real allocation policy is to be designed yet!
2203  */
2204 static void ext4_mb_measure_extent(struct ext4_allocation_context *ac,
2205                                         struct ext4_free_extent *ex,
2206                                         struct ext4_buddy *e4b)
2207 {
2208         struct ext4_free_extent *bex = &ac->ac_b_ex;
2209         struct ext4_free_extent *gex = &ac->ac_g_ex;
2210
2211         BUG_ON(ex->fe_len <= 0);
2212         BUG_ON(ex->fe_len > EXT4_CLUSTERS_PER_GROUP(ac->ac_sb));
2213         BUG_ON(ex->fe_start >= EXT4_CLUSTERS_PER_GROUP(ac->ac_sb));
2214         BUG_ON(ac->ac_status != AC_STATUS_CONTINUE);
2215
2216         ac->ac_found++;
2217         ac->ac_cX_found[ac->ac_criteria]++;
2218
2219         /*
2220          * The special case - take what you catch first
2221          */
2222         if (unlikely(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
2223                 *bex = *ex;
2224                 ext4_mb_use_best_found(ac, e4b);
2225                 return;
2226         }
2227
2228         /*
2229          * Let's check whether the chuck is good enough
2230          */
2231         if (ex->fe_len == gex->fe_len) {
2232                 *bex = *ex;
2233                 ext4_mb_use_best_found(ac, e4b);
2234                 return;
2235         }
2236
2237         /*
2238          * If this is first found extent, just store it in the context
2239          */
2240         if (bex->fe_len == 0) {
2241                 *bex = *ex;
2242                 return;
2243         }
2244
2245         /*
2246          * If new found extent is better, store it in the context
2247          */
2248         if (bex->fe_len < gex->fe_len) {
2249                 /* if the request isn't satisfied, any found extent
2250                  * larger than previous best one is better */
2251                 if (ex->fe_len > bex->fe_len)
2252                         *bex = *ex;
2253         } else if (ex->fe_len > gex->fe_len) {
2254                 /* if the request is satisfied, then we try to find
2255                  * an extent that still satisfy the request, but is
2256                  * smaller than previous one */
2257                 if (ex->fe_len < bex->fe_len)
2258                         *bex = *ex;
2259         }
2260
2261         ext4_mb_check_limits(ac, e4b, 0);
2262 }
2263
2264 static noinline_for_stack
2265 void ext4_mb_try_best_found(struct ext4_allocation_context *ac,
2266                                         struct ext4_buddy *e4b)
2267 {
2268         struct ext4_free_extent ex = ac->ac_b_ex;
2269         ext4_group_t group = ex.fe_group;
2270         int max;
2271         int err;
2272
2273         BUG_ON(ex.fe_len <= 0);
2274         err = ext4_mb_load_buddy(ac->ac_sb, group, e4b);
2275         if (err)
2276                 return;
2277
2278         ext4_lock_group(ac->ac_sb, group);
2279         max = mb_find_extent(e4b, ex.fe_start, ex.fe_len, &ex);
2280
2281         if (max > 0) {
2282                 ac->ac_b_ex = ex;
2283                 ext4_mb_use_best_found(ac, e4b);
2284         }
2285
2286         ext4_unlock_group(ac->ac_sb, group);
2287         ext4_mb_unload_buddy(e4b);
2288 }
2289
2290 static noinline_for_stack
2291 int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
2292                                 struct ext4_buddy *e4b)
2293 {
2294         ext4_group_t group = ac->ac_g_ex.fe_group;
2295         int max;
2296         int err;
2297         struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
2298         struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
2299         struct ext4_free_extent ex;
2300
2301         if (!grp)
2302                 return -EFSCORRUPTED;
2303         if (!(ac->ac_flags & (EXT4_MB_HINT_TRY_GOAL | EXT4_MB_HINT_GOAL_ONLY)))
2304                 return 0;
2305         if (grp->bb_free == 0)
2306                 return 0;
2307
2308         err = ext4_mb_load_buddy(ac->ac_sb, group, e4b);
2309         if (err)
2310                 return err;
2311
2312         if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))) {
2313                 ext4_mb_unload_buddy(e4b);
2314                 return 0;
2315         }
2316
2317         ext4_lock_group(ac->ac_sb, group);
2318         max = mb_find_extent(e4b, ac->ac_g_ex.fe_start,
2319                              ac->ac_g_ex.fe_len, &ex);
2320         ex.fe_logical = 0xDEADFA11; /* debug value */
2321
2322         if (max >= ac->ac_g_ex.fe_len &&
2323             ac->ac_g_ex.fe_len == EXT4_B2C(sbi, sbi->s_stripe)) {
2324                 ext4_fsblk_t start;
2325
2326                 start = ext4_grp_offs_to_block(ac->ac_sb, &ex);
2327                 /* use do_div to get remainder (would be 64-bit modulo) */
2328                 if (do_div(start, sbi->s_stripe) == 0) {
2329                         ac->ac_found++;
2330                         ac->ac_b_ex = ex;
2331                         ext4_mb_use_best_found(ac, e4b);
2332                 }
2333         } else if (max >= ac->ac_g_ex.fe_len) {
2334                 BUG_ON(ex.fe_len <= 0);
2335                 BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group);
2336                 BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start);
2337                 ac->ac_found++;
2338                 ac->ac_b_ex = ex;
2339                 ext4_mb_use_best_found(ac, e4b);
2340         } else if (max > 0 && (ac->ac_flags & EXT4_MB_HINT_MERGE)) {
2341                 /* Sometimes, caller may want to merge even small
2342                  * number of blocks to an existing extent */
2343                 BUG_ON(ex.fe_len <= 0);
2344                 BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group);
2345                 BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start);
2346                 ac->ac_found++;
2347                 ac->ac_b_ex = ex;
2348                 ext4_mb_use_best_found(ac, e4b);
2349         }
2350         ext4_unlock_group(ac->ac_sb, group);
2351         ext4_mb_unload_buddy(e4b);
2352
2353         return 0;
2354 }
2355
2356 /*
2357  * The routine scans buddy structures (not bitmap!) from given order
2358  * to max order and tries to find big enough chunk to satisfy the req
2359  */
2360 static noinline_for_stack
2361 void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
2362                                         struct ext4_buddy *e4b)
2363 {
2364         struct super_block *sb = ac->ac_sb;
2365         struct ext4_group_info *grp = e4b->bd_info;
2366         void *buddy;
2367         int i;
2368         int k;
2369         int max;
2370
2371         BUG_ON(ac->ac_2order <= 0);
2372         for (i = ac->ac_2order; i < MB_NUM_ORDERS(sb); i++) {
2373                 if (grp->bb_counters[i] == 0)
2374                         continue;
2375
2376                 buddy = mb_find_buddy(e4b, i, &max);
2377                 if (WARN_RATELIMIT(buddy == NULL,
2378                          "ext4: mb_simple_scan_group: mb_find_buddy failed, (%d)\n", i))
2379                         continue;
2380
2381                 k = mb_find_next_zero_bit(buddy, max, 0);
2382                 if (k >= max) {
2383                         ext4_grp_locked_error(ac->ac_sb, e4b->bd_group, 0, 0,
2384                                 "%d free clusters of order %d. But found 0",
2385                                 grp->bb_counters[i], i);
2386                         ext4_mark_group_bitmap_corrupted(ac->ac_sb,
2387                                          e4b->bd_group,
2388                                         EXT4_GROUP_INFO_BBITMAP_CORRUPT);
2389                         break;
2390                 }
2391                 ac->ac_found++;
2392                 ac->ac_cX_found[ac->ac_criteria]++;
2393
2394                 ac->ac_b_ex.fe_len = 1 << i;
2395                 ac->ac_b_ex.fe_start = k << i;
2396                 ac->ac_b_ex.fe_group = e4b->bd_group;
2397
2398                 ext4_mb_use_best_found(ac, e4b);
2399
2400                 BUG_ON(ac->ac_f_ex.fe_len != ac->ac_g_ex.fe_len);
2401
2402                 if (EXT4_SB(sb)->s_mb_stats)
2403                         atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
2404
2405                 break;
2406         }
2407 }
2408
2409 /*
2410  * The routine scans the group and measures all found extents.
2411  * In order to optimize scanning, caller must pass number of
2412  * free blocks in the group, so the routine can know upper limit.
2413  */
2414 static noinline_for_stack
2415 void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac,
2416                                         struct ext4_buddy *e4b)
2417 {
2418         struct super_block *sb = ac->ac_sb;
2419         void *bitmap = e4b->bd_bitmap;
2420         struct ext4_free_extent ex;
2421         int i, j, freelen;
2422         int free;
2423
2424         free = e4b->bd_info->bb_free;
2425         if (WARN_ON(free <= 0))
2426                 return;
2427
2428         i = e4b->bd_info->bb_first_free;
2429
2430         while (free && ac->ac_status == AC_STATUS_CONTINUE) {
2431                 i = mb_find_next_zero_bit(bitmap,
2432                                                 EXT4_CLUSTERS_PER_GROUP(sb), i);
2433                 if (i >= EXT4_CLUSTERS_PER_GROUP(sb)) {
2434                         /*
2435                          * IF we have corrupt bitmap, we won't find any
2436                          * free blocks even though group info says we
2437                          * have free blocks
2438                          */
2439                         ext4_grp_locked_error(sb, e4b->bd_group, 0, 0,
2440                                         "%d free clusters as per "
2441                                         "group info. But bitmap says 0",
2442                                         free);
2443                         ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group,
2444                                         EXT4_GROUP_INFO_BBITMAP_CORRUPT);
2445                         break;
2446                 }
2447
2448                 if (!ext4_mb_cr_expensive(ac->ac_criteria)) {
2449                         /*
2450                          * In CR_GOAL_LEN_FAST and CR_BEST_AVAIL_LEN, we are
2451                          * sure that this group will have a large enough
2452                          * continuous free extent, so skip over the smaller free
2453                          * extents
2454                          */
2455                         j = mb_find_next_bit(bitmap,
2456                                                 EXT4_CLUSTERS_PER_GROUP(sb), i);
2457                         freelen = j - i;
2458
2459                         if (freelen < ac->ac_g_ex.fe_len) {
2460                                 i = j;
2461                                 free -= freelen;
2462                                 continue;
2463                         }
2464                 }
2465
2466                 mb_find_extent(e4b, i, ac->ac_g_ex.fe_len, &ex);
2467                 if (WARN_ON(ex.fe_len <= 0))
2468                         break;
2469                 if (free < ex.fe_len) {
2470                         ext4_grp_locked_error(sb, e4b->bd_group, 0, 0,
2471                                         "%d free clusters as per "
2472                                         "group info. But got %d blocks",
2473                                         free, ex.fe_len);
2474                         ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group,
2475                                         EXT4_GROUP_INFO_BBITMAP_CORRUPT);
2476                         /*
2477                          * The number of free blocks differs. This mostly
2478                          * indicate that the bitmap is corrupt. So exit
2479                          * without claiming the space.
2480                          */
2481                         break;
2482                 }
2483                 ex.fe_logical = 0xDEADC0DE; /* debug value */
2484                 ext4_mb_measure_extent(ac, &ex, e4b);
2485
2486                 i += ex.fe_len;
2487                 free -= ex.fe_len;
2488         }
2489
2490         ext4_mb_check_limits(ac, e4b, 1);
2491 }
2492
2493 /*
2494  * This is a special case for storages like raid5
2495  * we try to find stripe-aligned chunks for stripe-size-multiple requests
2496  */
2497 static noinline_for_stack
2498 void ext4_mb_scan_aligned(struct ext4_allocation_context *ac,
2499                                  struct ext4_buddy *e4b)
2500 {
2501         struct super_block *sb = ac->ac_sb;
2502         struct ext4_sb_info *sbi = EXT4_SB(sb);
2503         void *bitmap = e4b->bd_bitmap;
2504         struct ext4_free_extent ex;
2505         ext4_fsblk_t first_group_block;
2506         ext4_fsblk_t a;
2507         ext4_grpblk_t i, stripe;
2508         int max;
2509
2510         BUG_ON(sbi->s_stripe == 0);
2511
2512         /* find first stripe-aligned block in group */
2513         first_group_block = ext4_group_first_block_no(sb, e4b->bd_group);
2514
2515         a = first_group_block + sbi->s_stripe - 1;
2516         do_div(a, sbi->s_stripe);
2517         i = (a * sbi->s_stripe) - first_group_block;
2518
2519         stripe = EXT4_B2C(sbi, sbi->s_stripe);
2520         i = EXT4_B2C(sbi, i);
2521         while (i < EXT4_CLUSTERS_PER_GROUP(sb)) {
2522                 if (!mb_test_bit(i, bitmap)) {
2523                         max = mb_find_extent(e4b, i, stripe, &ex);
2524                         if (max >= stripe) {
2525                                 ac->ac_found++;
2526                                 ac->ac_cX_found[ac->ac_criteria]++;
2527                                 ex.fe_logical = 0xDEADF00D; /* debug value */
2528                                 ac->ac_b_ex = ex;
2529                                 ext4_mb_use_best_found(ac, e4b);
2530                                 break;
2531                         }
2532                 }
2533                 i += stripe;
2534         }
2535 }
2536
2537 /*
2538  * This is also called BEFORE we load the buddy bitmap.
2539  * Returns either 1 or 0 indicating that the group is either suitable
2540  * for the allocation or not.
2541  */
2542 static bool ext4_mb_good_group(struct ext4_allocation_context *ac,
2543                                 ext4_group_t group, enum criteria cr)
2544 {
2545         ext4_grpblk_t free, fragments;
2546         int flex_size = ext4_flex_bg_size(EXT4_SB(ac->ac_sb));
2547         struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
2548
2549         BUG_ON(cr < CR_POWER2_ALIGNED || cr >= EXT4_MB_NUM_CRS);
2550
2551         if (unlikely(!grp || EXT4_MB_GRP_BBITMAP_CORRUPT(grp)))
2552                 return false;
2553
2554         free = grp->bb_free;
2555         if (free == 0)
2556                 return false;
2557
2558         fragments = grp->bb_fragments;
2559         if (fragments == 0)
2560                 return false;
2561
2562         switch (cr) {
2563         case CR_POWER2_ALIGNED:
2564                 BUG_ON(ac->ac_2order == 0);
2565
2566                 /* Avoid using the first bg of a flexgroup for data files */
2567                 if ((ac->ac_flags & EXT4_MB_HINT_DATA) &&
2568                     (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) &&
2569                     ((group % flex_size) == 0))
2570                         return false;
2571
2572                 if (free < ac->ac_g_ex.fe_len)
2573                         return false;
2574
2575                 if (ac->ac_2order >= MB_NUM_ORDERS(ac->ac_sb))
2576                         return true;
2577
2578                 if (grp->bb_largest_free_order < ac->ac_2order)
2579                         return false;
2580
2581                 return true;
2582         case CR_GOAL_LEN_FAST:
2583         case CR_BEST_AVAIL_LEN:
2584                 if ((free / fragments) >= ac->ac_g_ex.fe_len)
2585                         return true;
2586                 break;
2587         case CR_GOAL_LEN_SLOW:
2588                 if (free >= ac->ac_g_ex.fe_len)
2589                         return true;
2590                 break;
2591         case CR_ANY_FREE:
2592                 return true;
2593         default:
2594                 BUG();
2595         }
2596
2597         return false;
2598 }
2599
2600 /*
2601  * This could return negative error code if something goes wrong
2602  * during ext4_mb_init_group(). This should not be called with
2603  * ext4_lock_group() held.
2604  *
2605  * Note: because we are conditionally operating with the group lock in
2606  * the EXT4_MB_STRICT_CHECK case, we need to fake out sparse in this
2607  * function using __acquire and __release.  This means we need to be
2608  * super careful before messing with the error path handling via "goto
2609  * out"!
2610  */
2611 static int ext4_mb_good_group_nolock(struct ext4_allocation_context *ac,
2612                                      ext4_group_t group, enum criteria cr)
2613 {
2614         struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
2615         struct super_block *sb = ac->ac_sb;
2616         struct ext4_sb_info *sbi = EXT4_SB(sb);
2617         bool should_lock = ac->ac_flags & EXT4_MB_STRICT_CHECK;
2618         ext4_grpblk_t free;
2619         int ret = 0;
2620
2621         if (!grp)
2622                 return -EFSCORRUPTED;
2623         if (sbi->s_mb_stats)
2624                 atomic64_inc(&sbi->s_bal_cX_groups_considered[ac->ac_criteria]);
2625         if (should_lock) {
2626                 ext4_lock_group(sb, group);
2627                 __release(ext4_group_lock_ptr(sb, group));
2628         }
2629         free = grp->bb_free;
2630         if (free == 0)
2631                 goto out;
2632         /*
2633          * In all criterias except CR_ANY_FREE we try to avoid groups that
2634          * can't possibly satisfy the full goal request due to insufficient
2635          * free blocks.
2636          */
2637         if (cr < CR_ANY_FREE && free < ac->ac_g_ex.fe_len)
2638                 goto out;
2639         if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp)))
2640                 goto out;
2641         if (should_lock) {
2642                 __acquire(ext4_group_lock_ptr(sb, group));
2643                 ext4_unlock_group(sb, group);
2644         }
2645
2646         /* We only do this if the grp has never been initialized */
2647         if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
2648                 struct ext4_group_desc *gdp =
2649                         ext4_get_group_desc(sb, group, NULL);
2650                 int ret;
2651
2652                 /*
2653                  * cr=CR_POWER2_ALIGNED/CR_GOAL_LEN_FAST is a very optimistic
2654                  * search to find large good chunks almost for free. If buddy
2655                  * data is not ready, then this optimization makes no sense. But
2656                  * we never skip the first block group in a flex_bg, since this
2657                  * gets used for metadata block allocation, and we want to make
2658                  * sure we locate metadata blocks in the first block group in
2659                  * the flex_bg if possible.
2660                  */
2661                 if (!ext4_mb_cr_expensive(cr) &&
2662                     (!sbi->s_log_groups_per_flex ||
2663                      ((group & ((1 << sbi->s_log_groups_per_flex) - 1)) != 0)) &&
2664                     !(ext4_has_group_desc_csum(sb) &&
2665                       (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))))
2666                         return 0;
2667                 ret = ext4_mb_init_group(sb, group, GFP_NOFS);
2668                 if (ret)
2669                         return ret;
2670         }
2671
2672         if (should_lock) {
2673                 ext4_lock_group(sb, group);
2674                 __release(ext4_group_lock_ptr(sb, group));
2675         }
2676         ret = ext4_mb_good_group(ac, group, cr);
2677 out:
2678         if (should_lock) {
2679                 __acquire(ext4_group_lock_ptr(sb, group));
2680                 ext4_unlock_group(sb, group);
2681         }
2682         return ret;
2683 }
2684
2685 /*
2686  * Start prefetching @nr block bitmaps starting at @group.
2687  * Return the next group which needs to be prefetched.
2688  */
2689 ext4_group_t ext4_mb_prefetch(struct super_block *sb, ext4_group_t group,
2690                               unsigned int nr, int *cnt)
2691 {
2692         ext4_group_t ngroups = ext4_get_groups_count(sb);
2693         struct buffer_head *bh;
2694         struct blk_plug plug;
2695
2696         blk_start_plug(&plug);
2697         while (nr-- > 0) {
2698                 struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group,
2699                                                                   NULL);
2700                 struct ext4_group_info *grp = ext4_get_group_info(sb, group);
2701
2702                 /*
2703                  * Prefetch block groups with free blocks; but don't
2704                  * bother if it is marked uninitialized on disk, since
2705                  * it won't require I/O to read.  Also only try to
2706                  * prefetch once, so we avoid getblk() call, which can
2707                  * be expensive.
2708                  */
2709                 if (gdp && grp && !EXT4_MB_GRP_TEST_AND_SET_READ(grp) &&
2710                     EXT4_MB_GRP_NEED_INIT(grp) &&
2711                     ext4_free_group_clusters(sb, gdp) > 0 ) {
2712                         bh = ext4_read_block_bitmap_nowait(sb, group, true);
2713                         if (bh && !IS_ERR(bh)) {
2714                                 if (!buffer_uptodate(bh) && cnt)
2715                                         (*cnt)++;
2716                                 brelse(bh);
2717                         }
2718                 }
2719                 if (++group >= ngroups)
2720                         group = 0;
2721         }
2722         blk_finish_plug(&plug);
2723         return group;
2724 }
2725
2726 /*
2727  * Prefetching reads the block bitmap into the buffer cache; but we
2728  * need to make sure that the buddy bitmap in the page cache has been
2729  * initialized.  Note that ext4_mb_init_group() will block if the I/O
2730  * is not yet completed, or indeed if it was not initiated by
2731  * ext4_mb_prefetch did not start the I/O.
2732  *
2733  * TODO: We should actually kick off the buddy bitmap setup in a work
2734  * queue when the buffer I/O is completed, so that we don't block
2735  * waiting for the block allocation bitmap read to finish when
2736  * ext4_mb_prefetch_fini is called from ext4_mb_regular_allocator().
2737  */
2738 void ext4_mb_prefetch_fini(struct super_block *sb, ext4_group_t group,
2739                            unsigned int nr)
2740 {
2741         struct ext4_group_desc *gdp;
2742         struct ext4_group_info *grp;
2743
2744         while (nr-- > 0) {
2745                 if (!group)
2746                         group = ext4_get_groups_count(sb);
2747                 group--;
2748                 gdp = ext4_get_group_desc(sb, group, NULL);
2749                 grp = ext4_get_group_info(sb, group);
2750
2751                 if (grp && gdp && EXT4_MB_GRP_NEED_INIT(grp) &&
2752                     ext4_free_group_clusters(sb, gdp) > 0) {
2753                         if (ext4_mb_init_group(sb, group, GFP_NOFS))
2754                                 break;
2755                 }
2756         }
2757 }
2758
2759 static noinline_for_stack int
2760 ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
2761 {
2762         ext4_group_t prefetch_grp = 0, ngroups, group, i;
2763         enum criteria new_cr, cr = CR_GOAL_LEN_FAST;
2764         int err = 0, first_err = 0;
2765         unsigned int nr = 0, prefetch_ios = 0;
2766         struct ext4_sb_info *sbi;
2767         struct super_block *sb;
2768         struct ext4_buddy e4b;
2769         int lost;
2770
2771         sb = ac->ac_sb;
2772         sbi = EXT4_SB(sb);
2773         ngroups = ext4_get_groups_count(sb);
2774         /* non-extent files are limited to low blocks/groups */
2775         if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)))
2776                 ngroups = sbi->s_blockfile_groups;
2777
2778         BUG_ON(ac->ac_status == AC_STATUS_FOUND);
2779
2780         /* first, try the goal */
2781         err = ext4_mb_find_by_goal(ac, &e4b);
2782         if (err || ac->ac_status == AC_STATUS_FOUND)
2783                 goto out;
2784
2785         if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
2786                 goto out;
2787
2788         /*
2789          * ac->ac_2order is set only if the fe_len is a power of 2
2790          * if ac->ac_2order is set we also set criteria to CR_POWER2_ALIGNED
2791          * so that we try exact allocation using buddy.
2792          */
2793         i = fls(ac->ac_g_ex.fe_len);
2794         ac->ac_2order = 0;
2795         /*
2796          * We search using buddy data only if the order of the request
2797          * is greater than equal to the sbi_s_mb_order2_reqs
2798          * You can tune it via /sys/fs/ext4/<partition>/mb_order2_req
2799          * We also support searching for power-of-two requests only for
2800          * requests upto maximum buddy size we have constructed.
2801          */
2802         if (i >= sbi->s_mb_order2_reqs && i <= MB_NUM_ORDERS(sb)) {
2803                 if (is_power_of_2(ac->ac_g_ex.fe_len))
2804                         ac->ac_2order = array_index_nospec(i - 1,
2805                                                            MB_NUM_ORDERS(sb));
2806         }
2807
2808         /* if stream allocation is enabled, use global goal */
2809         if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) {
2810                 /* TBD: may be hot point */
2811                 spin_lock(&sbi->s_md_lock);
2812                 ac->ac_g_ex.fe_group = sbi->s_mb_last_group;
2813                 ac->ac_g_ex.fe_start = sbi->s_mb_last_start;
2814                 spin_unlock(&sbi->s_md_lock);
2815         }
2816
2817         /*
2818          * Let's just scan groups to find more-less suitable blocks We
2819          * start with CR_GOAL_LEN_FAST, unless it is power of 2
2820          * aligned, in which case let's do that faster approach first.
2821          */
2822         if (ac->ac_2order)
2823                 cr = CR_POWER2_ALIGNED;
2824 repeat:
2825         for (; cr < EXT4_MB_NUM_CRS && ac->ac_status == AC_STATUS_CONTINUE; cr++) {
2826                 ac->ac_criteria = cr;
2827                 /*
2828                  * searching for the right group start
2829                  * from the goal value specified
2830                  */
2831                 group = ac->ac_g_ex.fe_group;
2832                 ac->ac_groups_linear_remaining = sbi->s_mb_max_linear_groups;
2833                 prefetch_grp = group;
2834
2835                 for (i = 0, new_cr = cr; i < ngroups; i++,
2836                      ext4_mb_choose_next_group(ac, &new_cr, &group, ngroups)) {
2837                         int ret = 0;
2838
2839                         cond_resched();
2840                         if (new_cr != cr) {
2841                                 cr = new_cr;
2842                                 goto repeat;
2843                         }
2844
2845                         /*
2846                          * Batch reads of the block allocation bitmaps
2847                          * to get multiple READs in flight; limit
2848                          * prefetching at inexpensive CR, otherwise mballoc
2849                          * can spend a lot of time loading imperfect groups
2850                          */
2851                         if ((prefetch_grp == group) &&
2852                             (ext4_mb_cr_expensive(cr) ||
2853                              prefetch_ios < sbi->s_mb_prefetch_limit)) {
2854                                 nr = sbi->s_mb_prefetch;
2855                                 if (ext4_has_feature_flex_bg(sb)) {
2856                                         nr = 1 << sbi->s_log_groups_per_flex;
2857                                         nr -= group & (nr - 1);
2858                                         nr = min(nr, sbi->s_mb_prefetch);
2859                                 }
2860                                 prefetch_grp = ext4_mb_prefetch(sb, group,
2861                                                         nr, &prefetch_ios);
2862                         }
2863
2864                         /* This now checks without needing the buddy page */
2865                         ret = ext4_mb_good_group_nolock(ac, group, cr);
2866                         if (ret <= 0) {
2867                                 if (!first_err)
2868                                         first_err = ret;
2869                                 continue;
2870                         }
2871
2872                         err = ext4_mb_load_buddy(sb, group, &e4b);
2873                         if (err)
2874                                 goto out;
2875
2876                         ext4_lock_group(sb, group);
2877
2878                         /*
2879                          * We need to check again after locking the
2880                          * block group
2881                          */
2882                         ret = ext4_mb_good_group(ac, group, cr);
2883                         if (ret == 0) {
2884                                 ext4_unlock_group(sb, group);
2885                                 ext4_mb_unload_buddy(&e4b);
2886                                 continue;
2887                         }
2888
2889                         ac->ac_groups_scanned++;
2890                         if (cr == CR_POWER2_ALIGNED)
2891                                 ext4_mb_simple_scan_group(ac, &e4b);
2892                         else {
2893                                 bool is_stripe_aligned = sbi->s_stripe &&
2894                                         !(ac->ac_g_ex.fe_len %
2895                                           EXT4_B2C(sbi, sbi->s_stripe));
2896
2897                                 if ((cr == CR_GOAL_LEN_FAST ||
2898                                      cr == CR_BEST_AVAIL_LEN) &&
2899                                     is_stripe_aligned)
2900                                         ext4_mb_scan_aligned(ac, &e4b);
2901
2902                                 if (ac->ac_status == AC_STATUS_CONTINUE)
2903                                         ext4_mb_complex_scan_group(ac, &e4b);
2904                         }
2905
2906                         ext4_unlock_group(sb, group);
2907                         ext4_mb_unload_buddy(&e4b);
2908
2909                         if (ac->ac_status != AC_STATUS_CONTINUE)
2910                                 break;
2911                 }
2912                 /* Processed all groups and haven't found blocks */
2913                 if (sbi->s_mb_stats && i == ngroups)
2914                         atomic64_inc(&sbi->s_bal_cX_failed[cr]);
2915
2916                 if (i == ngroups && ac->ac_criteria == CR_BEST_AVAIL_LEN)
2917                         /* Reset goal length to original goal length before
2918                          * falling into CR_GOAL_LEN_SLOW */
2919                         ac->ac_g_ex.fe_len = ac->ac_orig_goal_len;
2920         }
2921
2922         if (ac->ac_b_ex.fe_len > 0 && ac->ac_status != AC_STATUS_FOUND &&
2923             !(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
2924                 /*
2925                  * We've been searching too long. Let's try to allocate
2926                  * the best chunk we've found so far
2927                  */
2928                 ext4_mb_try_best_found(ac, &e4b);
2929                 if (ac->ac_status != AC_STATUS_FOUND) {
2930                         /*
2931                          * Someone more lucky has already allocated it.
2932                          * The only thing we can do is just take first
2933                          * found block(s)
2934                          */
2935                         lost = atomic_inc_return(&sbi->s_mb_lost_chunks);
2936                         mb_debug(sb, "lost chunk, group: %u, start: %d, len: %d, lost: %d\n",
2937                                  ac->ac_b_ex.fe_group, ac->ac_b_ex.fe_start,
2938                                  ac->ac_b_ex.fe_len, lost);
2939
2940                         ac->ac_b_ex.fe_group = 0;
2941                         ac->ac_b_ex.fe_start = 0;
2942                         ac->ac_b_ex.fe_len = 0;
2943                         ac->ac_status = AC_STATUS_CONTINUE;
2944                         ac->ac_flags |= EXT4_MB_HINT_FIRST;
2945                         cr = CR_ANY_FREE;
2946                         goto repeat;
2947                 }
2948         }
2949
2950         if (sbi->s_mb_stats && ac->ac_status == AC_STATUS_FOUND)
2951                 atomic64_inc(&sbi->s_bal_cX_hits[ac->ac_criteria]);
2952 out:
2953         if (!err && ac->ac_status != AC_STATUS_FOUND && first_err)
2954                 err = first_err;
2955
2956         mb_debug(sb, "Best len %d, origin len %d, ac_status %u, ac_flags 0x%x, cr %d ret %d\n",
2957                  ac->ac_b_ex.fe_len, ac->ac_o_ex.fe_len, ac->ac_status,
2958                  ac->ac_flags, cr, err);
2959
2960         if (nr)
2961                 ext4_mb_prefetch_fini(sb, prefetch_grp, nr);
2962
2963         return err;
2964 }
2965
2966 static void *ext4_mb_seq_groups_start(struct seq_file *seq, loff_t *pos)
2967 {
2968         struct super_block *sb = pde_data(file_inode(seq->file));
2969         ext4_group_t group;
2970
2971         if (*pos < 0 || *pos >= ext4_get_groups_count(sb))
2972                 return NULL;
2973         group = *pos + 1;
2974         return (void *) ((unsigned long) group);
2975 }
2976
2977 static void *ext4_mb_seq_groups_next(struct seq_file *seq, void *v, loff_t *pos)
2978 {
2979         struct super_block *sb = pde_data(file_inode(seq->file));
2980         ext4_group_t group;
2981
2982         ++*pos;
2983         if (*pos < 0 || *pos >= ext4_get_groups_count(sb))
2984                 return NULL;
2985         group = *pos + 1;
2986         return (void *) ((unsigned long) group);
2987 }
2988
2989 static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
2990 {
2991         struct super_block *sb = pde_data(file_inode(seq->file));
2992         ext4_group_t group = (ext4_group_t) ((unsigned long) v);
2993         int i;
2994         int err, buddy_loaded = 0;
2995         struct ext4_buddy e4b;
2996         struct ext4_group_info *grinfo;
2997         unsigned char blocksize_bits = min_t(unsigned char,
2998                                              sb->s_blocksize_bits,
2999                                              EXT4_MAX_BLOCK_LOG_SIZE);
3000         struct sg {
3001                 struct ext4_group_info info;
3002                 ext4_grpblk_t counters[EXT4_MAX_BLOCK_LOG_SIZE + 2];
3003         } sg;
3004
3005         group--;
3006         if (group == 0)
3007                 seq_puts(seq, "#group: free  frags first ["
3008                               " 2^0   2^1   2^2   2^3   2^4   2^5   2^6  "
3009                               " 2^7   2^8   2^9   2^10  2^11  2^12  2^13  ]\n");
3010
3011         i = (blocksize_bits + 2) * sizeof(sg.info.bb_counters[0]) +
3012                 sizeof(struct ext4_group_info);
3013
3014         grinfo = ext4_get_group_info(sb, group);
3015         if (!grinfo)
3016                 return 0;
3017         /* Load the group info in memory only if not already loaded. */
3018         if (unlikely(EXT4_MB_GRP_NEED_INIT(grinfo))) {
3019                 err = ext4_mb_load_buddy(sb, group, &e4b);
3020                 if (err) {
3021                         seq_printf(seq, "#%-5u: I/O error\n", group);
3022                         return 0;
3023                 }
3024                 buddy_loaded = 1;
3025         }
3026
3027         memcpy(&sg, grinfo, i);
3028
3029         if (buddy_loaded)
3030                 ext4_mb_unload_buddy(&e4b);
3031
3032         seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg.info.bb_free,
3033                         sg.info.bb_fragments, sg.info.bb_first_free);
3034         for (i = 0; i <= 13; i++)
3035                 seq_printf(seq, " %-5u", i <= blocksize_bits + 1 ?
3036                                 sg.info.bb_counters[i] : 0);
3037         seq_puts(seq, " ]\n");
3038
3039         return 0;
3040 }
3041
3042 static void ext4_mb_seq_groups_stop(struct seq_file *seq, void *v)
3043 {
3044 }
3045
3046 const struct seq_operations ext4_mb_seq_groups_ops = {
3047         .start  = ext4_mb_seq_groups_start,
3048         .next   = ext4_mb_seq_groups_next,
3049         .stop   = ext4_mb_seq_groups_stop,
3050         .show   = ext4_mb_seq_groups_show,
3051 };
3052
3053 int ext4_seq_mb_stats_show(struct seq_file *seq, void *offset)
3054 {
3055         struct super_block *sb = seq->private;
3056         struct ext4_sb_info *sbi = EXT4_SB(sb);
3057
3058         seq_puts(seq, "mballoc:\n");
3059         if (!sbi->s_mb_stats) {
3060                 seq_puts(seq, "\tmb stats collection turned off.\n");
3061                 seq_puts(
3062                         seq,
3063                         "\tTo enable, please write \"1\" to sysfs file mb_stats.\n");
3064                 return 0;
3065         }
3066         seq_printf(seq, "\treqs: %u\n", atomic_read(&sbi->s_bal_reqs));
3067         seq_printf(seq, "\tsuccess: %u\n", atomic_read(&sbi->s_bal_success));
3068
3069         seq_printf(seq, "\tgroups_scanned: %u\n",
3070                    atomic_read(&sbi->s_bal_groups_scanned));
3071
3072         /* CR_POWER2_ALIGNED stats */
3073         seq_puts(seq, "\tcr_p2_aligned_stats:\n");
3074         seq_printf(seq, "\t\thits: %llu\n",
3075                    atomic64_read(&sbi->s_bal_cX_hits[CR_POWER2_ALIGNED]));
3076         seq_printf(
3077                 seq, "\t\tgroups_considered: %llu\n",
3078                 atomic64_read(
3079                         &sbi->s_bal_cX_groups_considered[CR_POWER2_ALIGNED]));
3080         seq_printf(seq, "\t\textents_scanned: %u\n",
3081                    atomic_read(&sbi->s_bal_cX_ex_scanned[CR_POWER2_ALIGNED]));
3082         seq_printf(seq, "\t\tuseless_loops: %llu\n",
3083                    atomic64_read(&sbi->s_bal_cX_failed[CR_POWER2_ALIGNED]));
3084         seq_printf(seq, "\t\tbad_suggestions: %u\n",
3085                    atomic_read(&sbi->s_bal_p2_aligned_bad_suggestions));
3086
3087         /* CR_GOAL_LEN_FAST stats */
3088         seq_puts(seq, "\tcr_goal_fast_stats:\n");
3089         seq_printf(seq, "\t\thits: %llu\n",
3090                    atomic64_read(&sbi->s_bal_cX_hits[CR_GOAL_LEN_FAST]));
3091         seq_printf(seq, "\t\tgroups_considered: %llu\n",
3092                    atomic64_read(
3093                            &sbi->s_bal_cX_groups_considered[CR_GOAL_LEN_FAST]));
3094         seq_printf(seq, "\t\textents_scanned: %u\n",
3095                    atomic_read(&sbi->s_bal_cX_ex_scanned[CR_GOAL_LEN_FAST]));
3096         seq_printf(seq, "\t\tuseless_loops: %llu\n",
3097                    atomic64_read(&sbi->s_bal_cX_failed[CR_GOAL_LEN_FAST]));
3098         seq_printf(seq, "\t\tbad_suggestions: %u\n",
3099                    atomic_read(&sbi->s_bal_goal_fast_bad_suggestions));
3100
3101         /* CR_BEST_AVAIL_LEN stats */
3102         seq_puts(seq, "\tcr_best_avail_stats:\n");
3103         seq_printf(seq, "\t\thits: %llu\n",
3104                    atomic64_read(&sbi->s_bal_cX_hits[CR_BEST_AVAIL_LEN]));
3105         seq_printf(
3106                 seq, "\t\tgroups_considered: %llu\n",
3107                 atomic64_read(
3108                         &sbi->s_bal_cX_groups_considered[CR_BEST_AVAIL_LEN]));
3109         seq_printf(seq, "\t\textents_scanned: %u\n",
3110                    atomic_read(&sbi->s_bal_cX_ex_scanned[CR_BEST_AVAIL_LEN]));
3111         seq_printf(seq, "\t\tuseless_loops: %llu\n",
3112                    atomic64_read(&sbi->s_bal_cX_failed[CR_BEST_AVAIL_LEN]));
3113         seq_printf(seq, "\t\tbad_suggestions: %u\n",
3114                    atomic_read(&sbi->s_bal_best_avail_bad_suggestions));
3115
3116         /* CR_GOAL_LEN_SLOW stats */
3117         seq_puts(seq, "\tcr_goal_slow_stats:\n");
3118         seq_printf(seq, "\t\thits: %llu\n",
3119                    atomic64_read(&sbi->s_bal_cX_hits[CR_GOAL_LEN_SLOW]));
3120         seq_printf(seq, "\t\tgroups_considered: %llu\n",
3121                    atomic64_read(
3122                            &sbi->s_bal_cX_groups_considered[CR_GOAL_LEN_SLOW]));
3123         seq_printf(seq, "\t\textents_scanned: %u\n",
3124                    atomic_read(&sbi->s_bal_cX_ex_scanned[CR_GOAL_LEN_SLOW]));
3125         seq_printf(seq, "\t\tuseless_loops: %llu\n",
3126                    atomic64_read(&sbi->s_bal_cX_failed[CR_GOAL_LEN_SLOW]));
3127
3128         /* CR_ANY_FREE stats */
3129         seq_puts(seq, "\tcr_any_free_stats:\n");
3130         seq_printf(seq, "\t\thits: %llu\n",
3131                    atomic64_read(&sbi->s_bal_cX_hits[CR_ANY_FREE]));
3132         seq_printf(
3133                 seq, "\t\tgroups_considered: %llu\n",
3134                 atomic64_read(&sbi->s_bal_cX_groups_considered[CR_ANY_FREE]));
3135         seq_printf(seq, "\t\textents_scanned: %u\n",
3136                    atomic_read(&sbi->s_bal_cX_ex_scanned[CR_ANY_FREE]));
3137         seq_printf(seq, "\t\tuseless_loops: %llu\n",
3138                    atomic64_read(&sbi->s_bal_cX_failed[CR_ANY_FREE]));
3139
3140         /* Aggregates */
3141         seq_printf(seq, "\textents_scanned: %u\n",
3142                    atomic_read(&sbi->s_bal_ex_scanned));
3143         seq_printf(seq, "\t\tgoal_hits: %u\n", atomic_read(&sbi->s_bal_goals));
3144         seq_printf(seq, "\t\tlen_goal_hits: %u\n",
3145                    atomic_read(&sbi->s_bal_len_goals));
3146         seq_printf(seq, "\t\t2^n_hits: %u\n", atomic_read(&sbi->s_bal_2orders));
3147         seq_printf(seq, "\t\tbreaks: %u\n", atomic_read(&sbi->s_bal_breaks));
3148         seq_printf(seq, "\t\tlost: %u\n", atomic_read(&sbi->s_mb_lost_chunks));
3149         seq_printf(seq, "\tbuddies_generated: %u/%u\n",
3150                    atomic_read(&sbi->s_mb_buddies_generated),
3151                    ext4_get_groups_count(sb));
3152         seq_printf(seq, "\tbuddies_time_used: %llu\n",
3153                    atomic64_read(&sbi->s_mb_generation_time));
3154         seq_printf(seq, "\tpreallocated: %u\n",
3155                    atomic_read(&sbi->s_mb_preallocated));
3156         seq_printf(seq, "\tdiscarded: %u\n", atomic_read(&sbi->s_mb_discarded));
3157         return 0;
3158 }
3159
3160 static void *ext4_mb_seq_structs_summary_start(struct seq_file *seq, loff_t *pos)
3161 __acquires(&EXT4_SB(sb)->s_mb_rb_lock)
3162 {
3163         struct super_block *sb = pde_data(file_inode(seq->file));
3164         unsigned long position;
3165
3166         if (*pos < 0 || *pos >= 2*MB_NUM_ORDERS(sb))
3167                 return NULL;
3168         position = *pos + 1;
3169         return (void *) ((unsigned long) position);
3170 }
3171
3172 static void *ext4_mb_seq_structs_summary_next(struct seq_file *seq, void *v, loff_t *pos)
3173 {
3174         struct super_block *sb = pde_data(file_inode(seq->file));
3175         unsigned long position;
3176
3177         ++*pos;
3178         if (*pos < 0 || *pos >= 2*MB_NUM_ORDERS(sb))
3179                 return NULL;
3180         position = *pos + 1;
3181         return (void *) ((unsigned long) position);
3182 }
3183
3184 static int ext4_mb_seq_structs_summary_show(struct seq_file *seq, void *v)
3185 {
3186         struct super_block *sb = pde_data(file_inode(seq->file));
3187         struct ext4_sb_info *sbi = EXT4_SB(sb);
3188         unsigned long position = ((unsigned long) v);
3189         struct ext4_group_info *grp;
3190         unsigned int count;
3191
3192         position--;
3193         if (position >= MB_NUM_ORDERS(sb)) {
3194                 position -= MB_NUM_ORDERS(sb);
3195                 if (position == 0)
3196                         seq_puts(seq, "avg_fragment_size_lists:\n");
3197
3198                 count = 0;
3199                 read_lock(&sbi->s_mb_avg_fragment_size_locks[position]);
3200                 list_for_each_entry(grp, &sbi->s_mb_avg_fragment_size[position],
3201                                     bb_avg_fragment_size_node)
3202                         count++;
3203                 read_unlock(&sbi->s_mb_avg_fragment_size_locks[position]);
3204                 seq_printf(seq, "\tlist_order_%u_groups: %u\n",
3205                                         (unsigned int)position, count);
3206                 return 0;
3207         }
3208
3209         if (position == 0) {
3210                 seq_printf(seq, "optimize_scan: %d\n",
3211                            test_opt2(sb, MB_OPTIMIZE_SCAN) ? 1 : 0);
3212                 seq_puts(seq, "max_free_order_lists:\n");
3213         }
3214         count = 0;
3215         read_lock(&sbi->s_mb_largest_free_orders_locks[position]);
3216         list_for_each_entry(grp, &sbi->s_mb_largest_free_orders[position],
3217                             bb_largest_free_order_node)
3218                 count++;
3219         read_unlock(&sbi->s_mb_largest_free_orders_locks[position]);
3220         seq_printf(seq, "\tlist_order_%u_groups: %u\n",
3221                    (unsigned int)position, count);
3222
3223         return 0;
3224 }
3225
3226 static void ext4_mb_seq_structs_summary_stop(struct seq_file *seq, void *v)
3227 {
3228 }
3229
3230 const struct seq_operations ext4_mb_seq_structs_summary_ops = {
3231         .start  = ext4_mb_seq_structs_summary_start,
3232         .next   = ext4_mb_seq_structs_summary_next,
3233         .stop   = ext4_mb_seq_structs_summary_stop,
3234         .show   = ext4_mb_seq_structs_summary_show,
3235 };
3236
3237 static struct kmem_cache *get_groupinfo_cache(int blocksize_bits)
3238 {
3239         int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE;
3240         struct kmem_cache *cachep = ext4_groupinfo_caches[cache_index];
3241
3242         BUG_ON(!cachep);
3243         return cachep;
3244 }
3245
3246 /*
3247  * Allocate the top-level s_group_info array for the specified number
3248  * of groups
3249  */
3250 int ext4_mb_alloc_groupinfo(struct super_block *sb, ext4_group_t ngroups)
3251 {
3252         struct ext4_sb_info *sbi = EXT4_SB(sb);
3253         unsigned size;
3254         struct ext4_group_info ***old_groupinfo, ***new_groupinfo;
3255
3256         size = (ngroups + EXT4_DESC_PER_BLOCK(sb) - 1) >>
3257                 EXT4_DESC_PER_BLOCK_BITS(sb);
3258         if (size <= sbi->s_group_info_size)
3259                 return 0;
3260
3261         size = roundup_pow_of_two(sizeof(*sbi->s_group_info) * size);
3262         new_groupinfo = kvzalloc(size, GFP_KERNEL);
3263         if (!new_groupinfo) {
3264                 ext4_msg(sb, KERN_ERR, "can't allocate buddy meta group");
3265                 return -ENOMEM;
3266         }
3267         rcu_read_lock();
3268         old_groupinfo = rcu_dereference(sbi->s_group_info);
3269         if (old_groupinfo)
3270                 memcpy(new_groupinfo, old_groupinfo,
3271                        sbi->s_group_info_size * sizeof(*sbi->s_group_info));
3272         rcu_read_unlock();
3273         rcu_assign_pointer(sbi->s_group_info, new_groupinfo);
3274         sbi->s_group_info_size = size / sizeof(*sbi->s_group_info);
3275         if (old_groupinfo)
3276                 ext4_kvfree_array_rcu(old_groupinfo);
3277         ext4_debug("allocated s_groupinfo array for %d meta_bg's\n",
3278                    sbi->s_group_info_size);
3279         return 0;
3280 }
3281
3282 /* Create and initialize ext4_group_info data for the given group. */
3283 int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
3284                           struct ext4_group_desc *desc)
3285 {
3286         int i;
3287         int metalen = 0;
3288         int idx = group >> EXT4_DESC_PER_BLOCK_BITS(sb);
3289         struct ext4_sb_info *sbi = EXT4_SB(sb);
3290         struct ext4_group_info **meta_group_info;
3291         struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
3292
3293         /*
3294          * First check if this group is the first of a reserved block.
3295          * If it's true, we have to allocate a new table of pointers
3296          * to ext4_group_info structures
3297          */
3298         if (group % EXT4_DESC_PER_BLOCK(sb) == 0) {
3299                 metalen = sizeof(*meta_group_info) <<
3300                         EXT4_DESC_PER_BLOCK_BITS(sb);
3301                 meta_group_info = kmalloc(metalen, GFP_NOFS);
3302                 if (meta_group_info == NULL) {
3303                         ext4_msg(sb, KERN_ERR, "can't allocate mem "
3304                                  "for a buddy group");
3305                         return -ENOMEM;
3306                 }
3307                 rcu_read_lock();
3308                 rcu_dereference(sbi->s_group_info)[idx] = meta_group_info;
3309                 rcu_read_unlock();
3310         }
3311
3312         meta_group_info = sbi_array_rcu_deref(sbi, s_group_info, idx);
3313         i = group & (EXT4_DESC_PER_BLOCK(sb) - 1);
3314
3315         meta_group_info[i] = kmem_cache_zalloc(cachep, GFP_NOFS);
3316         if (meta_group_info[i] == NULL) {
3317                 ext4_msg(sb, KERN_ERR, "can't allocate buddy mem");
3318                 goto exit_group_info;
3319         }
3320         set_bit(EXT4_GROUP_INFO_NEED_INIT_BIT,
3321                 &(meta_group_info[i]->bb_state));
3322
3323         /*
3324          * initialize bb_free to be able to skip
3325          * empty groups without initialization
3326          */
3327         if (ext4_has_group_desc_csum(sb) &&
3328             (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
3329                 meta_group_info[i]->bb_free =
3330                         ext4_free_clusters_after_init(sb, group, desc);
3331         } else {
3332                 meta_group_info[i]->bb_free =
3333                         ext4_free_group_clusters(sb, desc);
3334         }
3335
3336         INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list);
3337         init_rwsem(&meta_group_info[i]->alloc_sem);
3338         meta_group_info[i]->bb_free_root = RB_ROOT;
3339         INIT_LIST_HEAD(&meta_group_info[i]->bb_largest_free_order_node);
3340         INIT_LIST_HEAD(&meta_group_info[i]->bb_avg_fragment_size_node);
3341         meta_group_info[i]->bb_largest_free_order = -1;  /* uninit */
3342         meta_group_info[i]->bb_avg_fragment_size_order = -1;  /* uninit */
3343         meta_group_info[i]->bb_group = group;
3344
3345         mb_group_bb_bitmap_alloc(sb, meta_group_info[i], group);
3346         return 0;
3347
3348 exit_group_info:
3349         /* If a meta_group_info table has been allocated, release it now */
3350         if (group % EXT4_DESC_PER_BLOCK(sb) == 0) {
3351                 struct ext4_group_info ***group_info;
3352
3353                 rcu_read_lock();
3354                 group_info = rcu_dereference(sbi->s_group_info);
3355                 kfree(group_info[idx]);
3356                 group_info[idx] = NULL;
3357                 rcu_read_unlock();
3358         }
3359         return -ENOMEM;
3360 } /* ext4_mb_add_groupinfo */
3361
3362 static int ext4_mb_init_backend(struct super_block *sb)
3363 {
3364         ext4_group_t ngroups = ext4_get_groups_count(sb);
3365         ext4_group_t i;
3366         struct ext4_sb_info *sbi = EXT4_SB(sb);
3367         int err;
3368         struct ext4_group_desc *desc;
3369         struct ext4_group_info ***group_info;
3370         struct kmem_cache *cachep;
3371
3372         err = ext4_mb_alloc_groupinfo(sb, ngroups);
3373         if (err)
3374                 return err;
3375
3376         sbi->s_buddy_cache = new_inode(sb);
3377         if (sbi->s_buddy_cache == NULL) {
3378                 ext4_msg(sb, KERN_ERR, "can't get new inode");
3379                 goto err_freesgi;
3380         }
3381         /* To avoid potentially colliding with an valid on-disk inode number,
3382          * use EXT4_BAD_INO for the buddy cache inode number.  This inode is
3383          * not in the inode hash, so it should never be found by iget(), but
3384          * this will avoid confusion if it ever shows up during debugging. */
3385         sbi->s_buddy_cache->i_ino = EXT4_BAD_INO;
3386         EXT4_I(sbi->s_buddy_cache)->i_disksize = 0;
3387         for (i = 0; i < ngroups; i++) {
3388                 cond_resched();
3389                 desc = ext4_get_group_desc(sb, i, NULL);
3390                 if (desc == NULL) {
3391                         ext4_msg(sb, KERN_ERR, "can't read descriptor %u", i);
3392                         goto err_freebuddy;
3393                 }
3394                 if (ext4_mb_add_groupinfo(sb, i, desc) != 0)
3395                         goto err_freebuddy;
3396         }
3397
3398         if (ext4_has_feature_flex_bg(sb)) {
3399                 /* a single flex group is supposed to be read by a single IO.
3400                  * 2 ^ s_log_groups_per_flex != UINT_MAX as s_mb_prefetch is
3401                  * unsigned integer, so the maximum shift is 32.
3402                  */
3403                 if (sbi->s_es->s_log_groups_per_flex >= 32) {
3404                         ext4_msg(sb, KERN_ERR, "too many log groups per flexible block group");
3405                         goto err_freebuddy;
3406                 }
3407                 sbi->s_mb_prefetch = min_t(uint, 1 << sbi->s_es->s_log_groups_per_flex,
3408                         BLK_MAX_SEGMENT_SIZE >> (sb->s_blocksize_bits - 9));
3409                 sbi->s_mb_prefetch *= 8; /* 8 prefetch IOs in flight at most */
3410         } else {
3411                 sbi->s_mb_prefetch = 32;
3412         }
3413         if (sbi->s_mb_prefetch > ext4_get_groups_count(sb))
3414                 sbi->s_mb_prefetch = ext4_get_groups_count(sb);
3415         /* now many real IOs to prefetch within a single allocation at cr=0
3416          * given cr=0 is an CPU-related optimization we shouldn't try to
3417          * load too many groups, at some point we should start to use what
3418          * we've got in memory.
3419          * with an average random access time 5ms, it'd take a second to get
3420          * 200 groups (* N with flex_bg), so let's make this limit 4
3421          */
3422         sbi->s_mb_prefetch_limit = sbi->s_mb_prefetch * 4;
3423         if (sbi->s_mb_prefetch_limit > ext4_get_groups_count(sb))
3424                 sbi->s_mb_prefetch_limit = ext4_get_groups_count(sb);
3425
3426         return 0;
3427
3428 err_freebuddy:
3429         cachep = get_groupinfo_cache(sb->s_blocksize_bits);
3430         while (i-- > 0) {
3431                 struct ext4_group_info *grp = ext4_get_group_info(sb, i);
3432
3433                 if (grp)
3434                         kmem_cache_free(cachep, grp);
3435         }
3436         i = sbi->s_group_info_size;
3437         rcu_read_lock();
3438         group_info = rcu_dereference(sbi->s_group_info);
3439         while (i-- > 0)
3440                 kfree(group_info[i]);
3441         rcu_read_unlock();
3442         iput(sbi->s_buddy_cache);
3443 err_freesgi:
3444         rcu_read_lock();
3445         kvfree(rcu_dereference(sbi->s_group_info));
3446         rcu_read_unlock();
3447         return -ENOMEM;
3448 }
3449
3450 static void ext4_groupinfo_destroy_slabs(void)
3451 {
3452         int i;
3453
3454         for (i = 0; i < NR_GRPINFO_CACHES; i++) {
3455                 kmem_cache_destroy(ext4_groupinfo_caches[i]);
3456                 ext4_groupinfo_caches[i] = NULL;
3457         }
3458 }
3459
3460 static int ext4_groupinfo_create_slab(size_t size)
3461 {
3462         static DEFINE_MUTEX(ext4_grpinfo_slab_create_mutex);
3463         int slab_size;
3464         int blocksize_bits = order_base_2(size);
3465         int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE;
3466         struct kmem_cache *cachep;
3467
3468         if (cache_index >= NR_GRPINFO_CACHES)
3469                 return -EINVAL;
3470
3471         if (unlikely(cache_index < 0))
3472                 cache_index = 0;
3473
3474         mutex_lock(&ext4_grpinfo_slab_create_mutex);
3475         if (ext4_groupinfo_caches[cache_index]) {
3476                 mutex_unlock(&ext4_grpinfo_slab_create_mutex);
3477                 return 0;       /* Already created */
3478         }
3479
3480         slab_size = offsetof(struct ext4_group_info,
3481                                 bb_counters[blocksize_bits + 2]);
3482
3483         cachep = kmem_cache_create(ext4_groupinfo_slab_names[cache_index],
3484                                         slab_size, 0, SLAB_RECLAIM_ACCOUNT,
3485                                         NULL);
3486
3487         ext4_groupinfo_caches[cache_index] = cachep;
3488
3489         mutex_unlock(&ext4_grpinfo_slab_create_mutex);
3490         if (!cachep) {
3491                 printk(KERN_EMERG
3492                        "EXT4-fs: no memory for groupinfo slab cache\n");
3493                 return -ENOMEM;
3494         }
3495
3496         return 0;
3497 }
3498
3499 static void ext4_discard_work(struct work_struct *work)
3500 {
3501         struct ext4_sb_info *sbi = container_of(work,
3502                         struct ext4_sb_info, s_discard_work);
3503         struct super_block *sb = sbi->s_sb;
3504         struct ext4_free_data *fd, *nfd;
3505         struct ext4_buddy e4b;
3506         LIST_HEAD(discard_list);
3507         ext4_group_t grp, load_grp;
3508         int err = 0;
3509
3510         spin_lock(&sbi->s_md_lock);
3511         list_splice_init(&sbi->s_discard_list, &discard_list);
3512         spin_unlock(&sbi->s_md_lock);
3513
3514         load_grp = UINT_MAX;
3515         list_for_each_entry_safe(fd, nfd, &discard_list, efd_list) {
3516                 /*
3517                  * If filesystem is umounting or no memory or suffering
3518                  * from no space, give up the discard
3519                  */
3520                 if ((sb->s_flags & SB_ACTIVE) && !err &&
3521                     !atomic_read(&sbi->s_retry_alloc_pending)) {
3522                         grp = fd->efd_group;
3523                         if (grp != load_grp) {
3524                                 if (load_grp != UINT_MAX)
3525                                         ext4_mb_unload_buddy(&e4b);
3526
3527                                 err = ext4_mb_load_buddy(sb, grp, &e4b);
3528                                 if (err) {
3529                                         kmem_cache_free(ext4_free_data_cachep, fd);
3530                                         load_grp = UINT_MAX;
3531                                         continue;
3532                                 } else {
3533                                         load_grp = grp;
3534                                 }
3535                         }
3536
3537                         ext4_lock_group(sb, grp);
3538                         ext4_try_to_trim_range(sb, &e4b, fd->efd_start_cluster,
3539                                                 fd->efd_start_cluster + fd->efd_count - 1, 1);
3540                         ext4_unlock_group(sb, grp);
3541                 }
3542                 kmem_cache_free(ext4_free_data_cachep, fd);
3543         }
3544
3545         if (load_grp != UINT_MAX)
3546                 ext4_mb_unload_buddy(&e4b);
3547 }
3548
3549 int ext4_mb_init(struct super_block *sb)
3550 {
3551         struct ext4_sb_info *sbi = EXT4_SB(sb);
3552         unsigned i, j;
3553         unsigned offset, offset_incr;
3554         unsigned max;
3555         int ret;
3556
3557         i = MB_NUM_ORDERS(sb) * sizeof(*sbi->s_mb_offsets);
3558
3559         sbi->s_mb_offsets = kmalloc(i, GFP_KERNEL);
3560         if (sbi->s_mb_offsets == NULL) {
3561                 ret = -ENOMEM;
3562                 goto out;
3563         }
3564
3565         i = MB_NUM_ORDERS(sb) * sizeof(*sbi->s_mb_maxs);
3566         sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL);
3567         if (sbi->s_mb_maxs == NULL) {
3568                 ret = -ENOMEM;
3569                 goto out;
3570         }
3571
3572         ret = ext4_groupinfo_create_slab(sb->s_blocksize);
3573         if (ret < 0)
3574                 goto out;
3575
3576         /* order 0 is regular bitmap */
3577         sbi->s_mb_maxs[0] = sb->s_blocksize << 3;
3578         sbi->s_mb_offsets[0] = 0;
3579
3580         i = 1;
3581         offset = 0;
3582         offset_incr = 1 << (sb->s_blocksize_bits - 1);
3583         max = sb->s_blocksize << 2;
3584         do {
3585                 sbi->s_mb_offsets[i] = offset;
3586                 sbi->s_mb_maxs[i] = max;
3587                 offset += offset_incr;
3588                 offset_incr = offset_incr >> 1;
3589                 max = max >> 1;
3590                 i++;
3591         } while (i < MB_NUM_ORDERS(sb));
3592
3593         sbi->s_mb_avg_fragment_size =
3594                 kmalloc_array(MB_NUM_ORDERS(sb), sizeof(struct list_head),
3595                         GFP_KERNEL);
3596         if (!sbi->s_mb_avg_fragment_size) {
3597                 ret = -ENOMEM;
3598                 goto out;
3599         }
3600         sbi->s_mb_avg_fragment_size_locks =
3601                 kmalloc_array(MB_NUM_ORDERS(sb), sizeof(rwlock_t),
3602                         GFP_KERNEL);
3603         if (!sbi->s_mb_avg_fragment_size_locks) {
3604                 ret = -ENOMEM;
3605                 goto out;
3606         }
3607         for (i = 0; i < MB_NUM_ORDERS(sb); i++) {
3608                 INIT_LIST_HEAD(&sbi->s_mb_avg_fragment_size[i]);
3609                 rwlock_init(&sbi->s_mb_avg_fragment_size_locks[i]);
3610         }
3611         sbi->s_mb_largest_free_orders =
3612                 kmalloc_array(MB_NUM_ORDERS(sb), sizeof(struct list_head),
3613                         GFP_KERNEL);
3614         if (!sbi->s_mb_largest_free_orders) {
3615                 ret = -ENOMEM;
3616                 goto out;
3617         }
3618         sbi->s_mb_largest_free_orders_locks =
3619                 kmalloc_array(MB_NUM_ORDERS(sb), sizeof(rwlock_t),
3620                         GFP_KERNEL);
3621         if (!sbi->s_mb_largest_free_orders_locks) {
3622                 ret = -ENOMEM;
3623                 goto out;
3624         }
3625         for (i = 0; i < MB_NUM_ORDERS(sb); i++) {
3626                 INIT_LIST_HEAD(&sbi->s_mb_largest_free_orders[i]);
3627                 rwlock_init(&sbi->s_mb_largest_free_orders_locks[i]);
3628         }
3629
3630         spin_lock_init(&sbi->s_md_lock);
3631         sbi->s_mb_free_pending = 0;
3632         INIT_LIST_HEAD(&sbi->s_freed_data_list[0]);
3633         INIT_LIST_HEAD(&sbi->s_freed_data_list[1]);
3634         INIT_LIST_HEAD(&sbi->s_discard_list);
3635         INIT_WORK(&sbi->s_discard_work, ext4_discard_work);
3636         atomic_set(&sbi->s_retry_alloc_pending, 0);
3637
3638         sbi->s_mb_max_to_scan = MB_DEFAULT_MAX_TO_SCAN;
3639         sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN;
3640         sbi->s_mb_stats = MB_DEFAULT_STATS;
3641         sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD;
3642         sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS;
3643         sbi->s_mb_best_avail_max_trim_order = MB_DEFAULT_BEST_AVAIL_TRIM_ORDER;
3644
3645         /*
3646          * The default group preallocation is 512, which for 4k block
3647          * sizes translates to 2 megabytes.  However for bigalloc file
3648          * systems, this is probably too big (i.e, if the cluster size
3649          * is 1 megabyte, then group preallocation size becomes half a
3650          * gigabyte!).  As a default, we will keep a two megabyte
3651          * group pralloc size for cluster sizes up to 64k, and after
3652          * that, we will force a minimum group preallocation size of
3653          * 32 clusters.  This translates to 8 megs when the cluster
3654          * size is 256k, and 32 megs when the cluster size is 1 meg,
3655          * which seems reasonable as a default.
3656          */
3657         sbi->s_mb_group_prealloc = max(MB_DEFAULT_GROUP_PREALLOC >>
3658                                        sbi->s_cluster_bits, 32);
3659         /*
3660          * If there is a s_stripe > 1, then we set the s_mb_group_prealloc
3661          * to the lowest multiple of s_stripe which is bigger than
3662          * the s_mb_group_prealloc as determined above. We want
3663          * the preallocation size to be an exact multiple of the
3664          * RAID stripe size so that preallocations don't fragment
3665          * the stripes.
3666          */
3667         if (sbi->s_stripe > 1) {
3668                 sbi->s_mb_group_prealloc = roundup(
3669                         sbi->s_mb_group_prealloc, EXT4_B2C(sbi, sbi->s_stripe));
3670         }
3671
3672         sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group);
3673         if (sbi->s_locality_groups == NULL) {
3674                 ret = -ENOMEM;
3675                 goto out;
3676         }
3677         for_each_possible_cpu(i) {
3678                 struct ext4_locality_group *lg;
3679                 lg = per_cpu_ptr(sbi->s_locality_groups, i);
3680                 mutex_init(&lg->lg_mutex);
3681                 for (j = 0; j < PREALLOC_TB_SIZE; j++)
3682                         INIT_LIST_HEAD(&lg->lg_prealloc_list[j]);
3683                 spin_lock_init(&lg->lg_prealloc_lock);
3684         }
3685
3686         if (bdev_nonrot(sb->s_bdev))
3687                 sbi->s_mb_max_linear_groups = 0;
3688         else
3689                 sbi->s_mb_max_linear_groups = MB_DEFAULT_LINEAR_LIMIT;
3690         /* init file for buddy data */
3691         ret = ext4_mb_init_backend(sb);
3692         if (ret != 0)
3693                 goto out_free_locality_groups;
3694
3695         return 0;
3696
3697 out_free_locality_groups:
3698         free_percpu(sbi->s_locality_groups);
3699         sbi->s_locality_groups = NULL;
3700 out:
3701         kfree(sbi->s_mb_avg_fragment_size);
3702         kfree(sbi->s_mb_avg_fragment_size_locks);
3703         kfree(sbi->s_mb_largest_free_orders);
3704         kfree(sbi->s_mb_largest_free_orders_locks);
3705         kfree(sbi->s_mb_offsets);
3706         sbi->s_mb_offsets = NULL;
3707         kfree(sbi->s_mb_maxs);
3708         sbi->s_mb_maxs = NULL;
3709         return ret;
3710 }
3711
3712 /* need to called with the ext4 group lock held */
3713 static int ext4_mb_cleanup_pa(struct ext4_group_info *grp)
3714 {
3715         struct ext4_prealloc_space *pa;
3716         struct list_head *cur, *tmp;
3717         int count = 0;
3718
3719         list_for_each_safe(cur, tmp, &grp->bb_prealloc_list) {
3720                 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
3721                 list_del(&pa->pa_group_list);
3722                 count++;
3723                 kmem_cache_free(ext4_pspace_cachep, pa);
3724         }
3725         return count;
3726 }
3727
3728 int ext4_mb_release(struct super_block *sb)
3729 {
3730         ext4_group_t ngroups = ext4_get_groups_count(sb);
3731         ext4_group_t i;
3732         int num_meta_group_infos;
3733         struct ext4_group_info *grinfo, ***group_info;
3734         struct ext4_sb_info *sbi = EXT4_SB(sb);
3735         struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
3736         int count;
3737
3738         if (test_opt(sb, DISCARD)) {
3739                 /*
3740                  * wait the discard work to drain all of ext4_free_data
3741                  */
3742                 flush_work(&sbi->s_discard_work);
3743                 WARN_ON_ONCE(!list_empty(&sbi->s_discard_list));
3744         }
3745
3746         if (sbi->s_group_info) {
3747                 for (i = 0; i < ngroups; i++) {
3748                         cond_resched();
3749                         grinfo = ext4_get_group_info(sb, i);
3750                         if (!grinfo)
3751                                 continue;
3752                         mb_group_bb_bitmap_free(grinfo);
3753                         ext4_lock_group(sb, i);
3754                         count = ext4_mb_cleanup_pa(grinfo);
3755                         if (count)
3756                                 mb_debug(sb, "mballoc: %d PAs left\n",
3757                                          count);
3758                         ext4_unlock_group(sb, i);
3759                         kmem_cache_free(cachep, grinfo);
3760                 }
3761                 num_meta_group_infos = (ngroups +
3762                                 EXT4_DESC_PER_BLOCK(sb) - 1) >>
3763                         EXT4_DESC_PER_BLOCK_BITS(sb);
3764                 rcu_read_lock();
3765                 group_info = rcu_dereference(sbi->s_group_info);
3766                 for (i = 0; i < num_meta_group_infos; i++)
3767                         kfree(group_info[i]);
3768                 kvfree(group_info);
3769                 rcu_read_unlock();
3770         }
3771         kfree(sbi->s_mb_avg_fragment_size);
3772         kfree(sbi->s_mb_avg_fragment_size_locks);
3773         kfree(sbi->s_mb_largest_free_orders);
3774         kfree(sbi->s_mb_largest_free_orders_locks);
3775         kfree(sbi->s_mb_offsets);
3776         kfree(sbi->s_mb_maxs);
3777         iput(sbi->s_buddy_cache);
3778         if (sbi->s_mb_stats) {
3779                 ext4_msg(sb, KERN_INFO,
3780                        "mballoc: %u blocks %u reqs (%u success)",
3781                                 atomic_read(&sbi->s_bal_allocated),
3782                                 atomic_read(&sbi->s_bal_reqs),
3783                                 atomic_read(&sbi->s_bal_success));
3784                 ext4_msg(sb, KERN_INFO,
3785                       "mballoc: %u extents scanned, %u groups scanned, %u goal hits, "
3786                                 "%u 2^N hits, %u breaks, %u lost",
3787                                 atomic_read(&sbi->s_bal_ex_scanned),
3788                                 atomic_read(&sbi->s_bal_groups_scanned),
3789                                 atomic_read(&sbi->s_bal_goals),
3790                                 atomic_read(&sbi->s_bal_2orders),
3791                                 atomic_read(&sbi->s_bal_breaks),
3792                                 atomic_read(&sbi->s_mb_lost_chunks));
3793                 ext4_msg(sb, KERN_INFO,
3794                        "mballoc: %u generated and it took %llu",
3795                                 atomic_read(&sbi->s_mb_buddies_generated),
3796                                 atomic64_read(&sbi->s_mb_generation_time));
3797                 ext4_msg(sb, KERN_INFO,
3798                        "mballoc: %u preallocated, %u discarded",
3799                                 atomic_read(&sbi->s_mb_preallocated),
3800                                 atomic_read(&sbi->s_mb_discarded));
3801         }
3802
3803         free_percpu(sbi->s_locality_groups);
3804
3805         return 0;
3806 }
3807
3808 static inline int ext4_issue_discard(struct super_block *sb,
3809                 ext4_group_t block_group, ext4_grpblk_t cluster, int count,
3810                 struct bio **biop)
3811 {
3812         ext4_fsblk_t discard_block;
3813
3814         discard_block = (EXT4_C2B(EXT4_SB(sb), cluster) +
3815                          ext4_group_first_block_no(sb, block_group));
3816         count = EXT4_C2B(EXT4_SB(sb), count);
3817         trace_ext4_discard_blocks(sb,
3818                         (unsigned long long) discard_block, count);
3819         if (biop) {
3820                 return __blkdev_issue_discard(sb->s_bdev,
3821                         (sector_t)discard_block << (sb->s_blocksize_bits - 9),
3822                         (sector_t)count << (sb->s_blocksize_bits - 9),
3823                         GFP_NOFS, biop);
3824         } else
3825                 return sb_issue_discard(sb, discard_block, count, GFP_NOFS, 0);
3826 }
3827
3828 static void ext4_free_data_in_buddy(struct super_block *sb,
3829                                     struct ext4_free_data *entry)
3830 {
3831         struct ext4_buddy e4b;
3832         struct ext4_group_info *db;
3833         int err, count = 0;
3834
3835         mb_debug(sb, "gonna free %u blocks in group %u (0x%p):",
3836                  entry->efd_count, entry->efd_group, entry);
3837
3838         err = ext4_mb_load_buddy(sb, entry->efd_group, &e4b);
3839         /* we expect to find existing buddy because it's pinned */
3840         BUG_ON(err != 0);
3841
3842         spin_lock(&EXT4_SB(sb)->s_md_lock);
3843         EXT4_SB(sb)->s_mb_free_pending -= entry->efd_count;
3844         spin_unlock(&EXT4_SB(sb)->s_md_lock);
3845
3846         db = e4b.bd_info;
3847         /* there are blocks to put in buddy to make them really free */
3848         count += entry->efd_count;
3849         ext4_lock_group(sb, entry->efd_group);
3850         /* Take it out of per group rb tree */
3851         rb_erase(&entry->efd_node, &(db->bb_free_root));
3852         mb_free_blocks(NULL, &e4b, entry->efd_start_cluster, entry->efd_count);
3853
3854         /*
3855          * Clear the trimmed flag for the group so that the next
3856          * ext4_trim_fs can trim it.
3857          * If the volume is mounted with -o discard, online discard
3858          * is supported and the free blocks will be trimmed online.
3859          */
3860         if (!test_opt(sb, DISCARD))
3861                 EXT4_MB_GRP_CLEAR_TRIMMED(db);
3862
3863         if (!db->bb_free_root.rb_node) {
3864                 /* No more items in the per group rb tree
3865                  * balance refcounts from ext4_mb_free_metadata()
3866                  */
3867                 put_page(e4b.bd_buddy_page);
3868                 put_page(e4b.bd_bitmap_page);
3869         }
3870         ext4_unlock_group(sb, entry->efd_group);
3871         ext4_mb_unload_buddy(&e4b);
3872
3873         mb_debug(sb, "freed %d blocks in 1 structures\n", count);
3874 }
3875
3876 /*
3877  * This function is called by the jbd2 layer once the commit has finished,
3878  * so we know we can free the blocks that were released with that commit.
3879  */
3880 void ext4_process_freed_data(struct super_block *sb, tid_t commit_tid)
3881 {
3882         struct ext4_sb_info *sbi = EXT4_SB(sb);
3883         struct ext4_free_data *entry, *tmp;
3884         LIST_HEAD(freed_data_list);
3885         struct list_head *s_freed_head = &sbi->s_freed_data_list[commit_tid & 1];
3886         bool wake;
3887
3888         list_replace_init(s_freed_head, &freed_data_list);
3889
3890         list_for_each_entry(entry, &freed_data_list, efd_list)
3891                 ext4_free_data_in_buddy(sb, entry);
3892
3893         if (test_opt(sb, DISCARD)) {
3894                 spin_lock(&sbi->s_md_lock);
3895                 wake = list_empty(&sbi->s_discard_list);
3896                 list_splice_tail(&freed_data_list, &sbi->s_discard_list);
3897                 spin_unlock(&sbi->s_md_lock);
3898                 if (wake)
3899                         queue_work(system_unbound_wq, &sbi->s_discard_work);
3900         } else {
3901                 list_for_each_entry_safe(entry, tmp, &freed_data_list, efd_list)
3902                         kmem_cache_free(ext4_free_data_cachep, entry);
3903         }
3904 }
3905
3906 int __init ext4_init_mballoc(void)
3907 {
3908         ext4_pspace_cachep = KMEM_CACHE(ext4_prealloc_space,
3909                                         SLAB_RECLAIM_ACCOUNT);
3910         if (ext4_pspace_cachep == NULL)
3911                 goto out;
3912
3913         ext4_ac_cachep = KMEM_CACHE(ext4_allocation_context,
3914                                     SLAB_RECLAIM_ACCOUNT);
3915         if (ext4_ac_cachep == NULL)
3916                 goto out_pa_free;
3917
3918         ext4_free_data_cachep = KMEM_CACHE(ext4_free_data,
3919                                            SLAB_RECLAIM_ACCOUNT);
3920         if (ext4_free_data_cachep == NULL)
3921                 goto out_ac_free;
3922
3923         return 0;
3924
3925 out_ac_free:
3926         kmem_cache_destroy(ext4_ac_cachep);
3927 out_pa_free:
3928         kmem_cache_destroy(ext4_pspace_cachep);
3929 out:
3930         return -ENOMEM;
3931 }
3932
3933 void ext4_exit_mballoc(void)
3934 {
3935         /*
3936          * Wait for completion of call_rcu()'s on ext4_pspace_cachep
3937          * before destroying the slab cache.
3938          */
3939         rcu_barrier();
3940         kmem_cache_destroy(ext4_pspace_cachep);
3941         kmem_cache_destroy(ext4_ac_cachep);
3942         kmem_cache_destroy(ext4_free_data_cachep);
3943         ext4_groupinfo_destroy_slabs();
3944 }
3945
3946 #define EXT4_MB_BITMAP_MARKED_CHECK 0x0001
3947 #define EXT4_MB_SYNC_UPDATE 0x0002
3948 static int
3949 ext4_mb_mark_context(handle_t *handle, struct super_block *sb, bool state,
3950                      ext4_group_t group, ext4_grpblk_t blkoff,
3951                      ext4_grpblk_t len, int flags, ext4_grpblk_t *ret_changed)
3952 {
3953         struct ext4_sb_info *sbi = EXT4_SB(sb);
3954         struct buffer_head *bitmap_bh = NULL;
3955         struct ext4_group_desc *gdp;
3956         struct buffer_head *gdp_bh;
3957         int err;
3958         unsigned int i, already, changed = len;
3959
3960         KUNIT_STATIC_STUB_REDIRECT(ext4_mb_mark_context,
3961                                    handle, sb, state, group, blkoff, len,
3962                                    flags, ret_changed);
3963
3964         if (ret_changed)
3965                 *ret_changed = 0;
3966         bitmap_bh = ext4_read_block_bitmap(sb, group);
3967         if (IS_ERR(bitmap_bh))
3968                 return PTR_ERR(bitmap_bh);
3969
3970         if (handle) {
3971                 BUFFER_TRACE(bitmap_bh, "getting write access");
3972                 err = ext4_journal_get_write_access(handle, sb, bitmap_bh,
3973                                                     EXT4_JTR_NONE);
3974                 if (err)
3975                         goto out_err;
3976         }
3977
3978         err = -EIO;
3979         gdp = ext4_get_group_desc(sb, group, &gdp_bh);
3980         if (!gdp)
3981                 goto out_err;
3982
3983         if (handle) {
3984                 BUFFER_TRACE(gdp_bh, "get_write_access");
3985                 err = ext4_journal_get_write_access(handle, sb, gdp_bh,
3986                                                     EXT4_JTR_NONE);
3987                 if (err)
3988                         goto out_err;
3989         }
3990
3991         ext4_lock_group(sb, group);
3992         if (ext4_has_group_desc_csum(sb) &&
3993             (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
3994                 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
3995                 ext4_free_group_clusters_set(sb, gdp,
3996                         ext4_free_clusters_after_init(sb, group, gdp));
3997         }
3998
3999         if (flags & EXT4_MB_BITMAP_MARKED_CHECK) {
4000                 already = 0;
4001                 for (i = 0; i < len; i++)
4002                         if (mb_test_bit(blkoff + i, bitmap_bh->b_data) ==
4003                                         state)
4004                                 already++;
4005                 changed = len - already;
4006         }
4007
4008         if (state) {
4009                 mb_set_bits(bitmap_bh->b_data, blkoff, len);
4010                 ext4_free_group_clusters_set(sb, gdp,
4011                         ext4_free_group_clusters(sb, gdp) - changed);
4012         } else {
4013                 mb_clear_bits(bitmap_bh->b_data, blkoff, len);
4014                 ext4_free_group_clusters_set(sb, gdp,
4015                         ext4_free_group_clusters(sb, gdp) + changed);
4016         }
4017
4018         ext4_block_bitmap_csum_set(sb, gdp, bitmap_bh);
4019         ext4_group_desc_csum_set(sb, group, gdp);
4020         ext4_unlock_group(sb, group);
4021         if (ret_changed)
4022                 *ret_changed = changed;
4023
4024         if (sbi->s_log_groups_per_flex) {
4025                 ext4_group_t flex_group = ext4_flex_group(sbi, group);
4026                 struct flex_groups *fg = sbi_array_rcu_deref(sbi,
4027                                            s_flex_groups, flex_group);
4028
4029                 if (state)
4030                         atomic64_sub(changed, &fg->free_clusters);
4031                 else
4032                         atomic64_add(changed, &fg->free_clusters);
4033         }
4034
4035         err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
4036         if (err)
4037                 goto out_err;
4038         err = ext4_handle_dirty_metadata(handle, NULL, gdp_bh);
4039         if (err)
4040                 goto out_err;
4041
4042         if (flags & EXT4_MB_SYNC_UPDATE) {
4043                 sync_dirty_buffer(bitmap_bh);
4044                 sync_dirty_buffer(gdp_bh);
4045         }
4046
4047 out_err:
4048         brelse(bitmap_bh);
4049         return err;
4050 }
4051
4052 /*
4053  * Check quota and mark chosen space (ac->ac_b_ex) non-free in bitmaps
4054  * Returns 0 if success or error code
4055  */
4056 static noinline_for_stack int
4057 ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
4058                                 handle_t *handle, unsigned int reserv_clstrs)
4059 {
4060         struct ext4_group_desc *gdp;
4061         struct ext4_sb_info *sbi;
4062         struct super_block *sb;
4063         ext4_fsblk_t block;
4064         int err, len;
4065         int flags = 0;
4066         ext4_grpblk_t changed;
4067
4068         BUG_ON(ac->ac_status != AC_STATUS_FOUND);
4069         BUG_ON(ac->ac_b_ex.fe_len <= 0);
4070
4071         sb = ac->ac_sb;
4072         sbi = EXT4_SB(sb);
4073
4074         gdp = ext4_get_group_desc(sb, ac->ac_b_ex.fe_group, NULL);
4075         if (!gdp)
4076                 return -EIO;
4077         ext4_debug("using block group %u(%d)\n", ac->ac_b_ex.fe_group,
4078                         ext4_free_group_clusters(sb, gdp));
4079
4080         block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
4081         len = EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
4082         if (!ext4_inode_block_valid(ac->ac_inode, block, len)) {
4083                 ext4_error(sb, "Allocating blocks %llu-%llu which overlap "
4084                            "fs metadata", block, block+len);
4085                 /* File system mounted not to panic on error
4086                  * Fix the bitmap and return EFSCORRUPTED
4087                  * We leak some of the blocks here.
4088                  */
4089                 err = ext4_mb_mark_context(handle, sb, true,
4090                                            ac->ac_b_ex.fe_group,
4091                                            ac->ac_b_ex.fe_start,
4092                                            ac->ac_b_ex.fe_len,
4093                                            0, NULL);
4094                 if (!err)
4095                         err = -EFSCORRUPTED;
4096                 return err;
4097         }
4098
4099 #ifdef AGGRESSIVE_CHECK
4100         flags |= EXT4_MB_BITMAP_MARKED_CHECK;
4101 #endif
4102         err = ext4_mb_mark_context(handle, sb, true, ac->ac_b_ex.fe_group,
4103                                    ac->ac_b_ex.fe_start, ac->ac_b_ex.fe_len,
4104                                    flags, &changed);
4105
4106         if (err && changed == 0)
4107                 return err;
4108
4109 #ifdef AGGRESSIVE_CHECK
4110         BUG_ON(changed != ac->ac_b_ex.fe_len);
4111 #endif
4112         percpu_counter_sub(&sbi->s_freeclusters_counter, ac->ac_b_ex.fe_len);
4113         /*
4114          * Now reduce the dirty block count also. Should not go negative
4115          */
4116         if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED))
4117                 /* release all the reserved blocks if non delalloc */
4118                 percpu_counter_sub(&sbi->s_dirtyclusters_counter,
4119                                    reserv_clstrs);
4120
4121         return err;
4122 }
4123
4124 /*
4125  * Idempotent helper for Ext4 fast commit replay path to set the state of
4126  * blocks in bitmaps and update counters.
4127  */
4128 void ext4_mb_mark_bb(struct super_block *sb, ext4_fsblk_t block,
4129                      int len, bool state)
4130 {
4131         struct ext4_sb_info *sbi = EXT4_SB(sb);
4132         ext4_group_t group;
4133         ext4_grpblk_t blkoff;
4134         int err = 0;
4135         unsigned int clen, thisgrp_len;
4136
4137         while (len > 0) {
4138                 ext4_get_group_no_and_offset(sb, block, &group, &blkoff);
4139
4140                 /*
4141                  * Check to see if we are freeing blocks across a group
4142                  * boundary.
4143                  * In case of flex_bg, this can happen that (block, len) may
4144                  * span across more than one group. In that case we need to
4145                  * get the corresponding group metadata to work with.
4146                  * For this we have goto again loop.
4147                  */
4148                 thisgrp_len = min_t(unsigned int, (unsigned int)len,
4149                         EXT4_BLOCKS_PER_GROUP(sb) - EXT4_C2B(sbi, blkoff));
4150                 clen = EXT4_NUM_B2C(sbi, thisgrp_len);
4151
4152                 if (!ext4_sb_block_valid(sb, NULL, block, thisgrp_len)) {
4153                         ext4_error(sb, "Marking blocks in system zone - "
4154                                    "Block = %llu, len = %u",
4155                                    block, thisgrp_len);
4156                         break;
4157                 }
4158
4159                 err = ext4_mb_mark_context(NULL, sb, state,
4160                                            group, blkoff, clen,
4161                                            EXT4_MB_BITMAP_MARKED_CHECK |
4162                                            EXT4_MB_SYNC_UPDATE,
4163                                            NULL);
4164                 if (err)
4165                         break;
4166
4167                 block += thisgrp_len;
4168                 len -= thisgrp_len;
4169                 BUG_ON(len < 0);
4170         }
4171 }
4172
4173 /*
4174  * here we normalize request for locality group
4175  * Group request are normalized to s_mb_group_prealloc, which goes to
4176  * s_strip if we set the same via mount option.
4177  * s_mb_group_prealloc can be configured via
4178  * /sys/fs/ext4/<partition>/mb_group_prealloc
4179  *
4180  * XXX: should we try to preallocate more than the group has now?
4181  */
4182 static void ext4_mb_normalize_group_request(struct ext4_allocation_context *ac)
4183 {
4184         struct super_block *sb = ac->ac_sb;
4185         struct ext4_locality_group *lg = ac->ac_lg;
4186
4187         BUG_ON(lg == NULL);
4188         ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_mb_group_prealloc;
4189         mb_debug(sb, "goal %u blocks for locality group\n", ac->ac_g_ex.fe_len);
4190 }
4191
4192 /*
4193  * This function returns the next element to look at during inode
4194  * PA rbtree walk. We assume that we have held the inode PA rbtree lock
4195  * (ei->i_prealloc_lock)
4196  *
4197  * new_start    The start of the range we want to compare
4198  * cur_start    The existing start that we are comparing against
4199  * node The node of the rb_tree
4200  */
4201 static inline struct rb_node*
4202 ext4_mb_pa_rb_next_iter(ext4_lblk_t new_start, ext4_lblk_t cur_start, struct rb_node *node)
4203 {
4204         if (new_start < cur_start)
4205                 return node->rb_left;
4206         else
4207                 return node->rb_right;
4208 }
4209
4210 static inline void
4211 ext4_mb_pa_assert_overlap(struct ext4_allocation_context *ac,
4212                           ext4_lblk_t start, loff_t end)
4213 {
4214         struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4215         struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
4216         struct ext4_prealloc_space *tmp_pa;
4217         ext4_lblk_t tmp_pa_start;
4218         loff_t tmp_pa_end;
4219         struct rb_node *iter;
4220
4221         read_lock(&ei->i_prealloc_lock);
4222         for (iter = ei->i_prealloc_node.rb_node; iter;
4223              iter = ext4_mb_pa_rb_next_iter(start, tmp_pa_start, iter)) {
4224                 tmp_pa = rb_entry(iter, struct ext4_prealloc_space,
4225                                   pa_node.inode_node);
4226                 tmp_pa_start = tmp_pa->pa_lstart;
4227                 tmp_pa_end = pa_logical_end(sbi, tmp_pa);
4228
4229                 spin_lock(&tmp_pa->pa_lock);
4230                 if (tmp_pa->pa_deleted == 0)
4231                         BUG_ON(!(start >= tmp_pa_end || end <= tmp_pa_start));
4232                 spin_unlock(&tmp_pa->pa_lock);
4233         }
4234         read_unlock(&ei->i_prealloc_lock);
4235 }
4236
4237 /*
4238  * Given an allocation context "ac" and a range "start", "end", check
4239  * and adjust boundaries if the range overlaps with any of the existing
4240  * preallocatoins stored in the corresponding inode of the allocation context.
4241  *
4242  * Parameters:
4243  *      ac                      allocation context
4244  *      start                   start of the new range
4245  *      end                     end of the new range
4246  */
4247 static inline void
4248 ext4_mb_pa_adjust_overlap(struct ext4_allocation_context *ac,
4249                           ext4_lblk_t *start, loff_t *end)
4250 {
4251         struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
4252         struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4253         struct ext4_prealloc_space *tmp_pa = NULL, *left_pa = NULL, *right_pa = NULL;
4254         struct rb_node *iter;
4255         ext4_lblk_t new_start, tmp_pa_start, right_pa_start = -1;
4256         loff_t new_end, tmp_pa_end, left_pa_end = -1;
4257
4258         new_start = *start;
4259         new_end = *end;
4260
4261         /*
4262          * Adjust the normalized range so that it doesn't overlap with any
4263          * existing preallocated blocks(PAs). Make sure to hold the rbtree lock
4264          * so it doesn't change underneath us.
4265          */
4266         read_lock(&ei->i_prealloc_lock);
4267
4268         /* Step 1: find any one immediate neighboring PA of the normalized range */
4269         for (iter = ei->i_prealloc_node.rb_node; iter;
4270              iter = ext4_mb_pa_rb_next_iter(ac->ac_o_ex.fe_logical,
4271                                             tmp_pa_start, iter)) {
4272                 tmp_pa = rb_entry(iter, struct ext4_prealloc_space,
4273                                   pa_node.inode_node);
4274                 tmp_pa_start = tmp_pa->pa_lstart;
4275                 tmp_pa_end = pa_logical_end(sbi, tmp_pa);
4276
4277                 /* PA must not overlap original request */
4278                 spin_lock(&tmp_pa->pa_lock);
4279                 if (tmp_pa->pa_deleted == 0)
4280                         BUG_ON(!(ac->ac_o_ex.fe_logical >= tmp_pa_end ||
4281                                  ac->ac_o_ex.fe_logical < tmp_pa_start));
4282                 spin_unlock(&tmp_pa->pa_lock);
4283         }
4284
4285         /*
4286          * Step 2: check if the found PA is left or right neighbor and
4287          * get the other neighbor
4288          */
4289         if (tmp_pa) {
4290                 if (tmp_pa->pa_lstart < ac->ac_o_ex.fe_logical) {
4291                         struct rb_node *tmp;
4292
4293                         left_pa = tmp_pa;
4294                         tmp = rb_next(&left_pa->pa_node.inode_node);
4295                         if (tmp) {
4296                                 right_pa = rb_entry(tmp,
4297                                                     struct ext4_prealloc_space,
4298                                                     pa_node.inode_node);
4299                         }
4300                 } else {
4301                         struct rb_node *tmp;
4302
4303                         right_pa = tmp_pa;
4304                         tmp = rb_prev(&right_pa->pa_node.inode_node);
4305                         if (tmp) {
4306                                 left_pa = rb_entry(tmp,
4307                                                    struct ext4_prealloc_space,
4308                                                    pa_node.inode_node);
4309                         }
4310                 }
4311         }
4312
4313         /* Step 3: get the non deleted neighbors */
4314         if (left_pa) {
4315                 for (iter = &left_pa->pa_node.inode_node;;
4316                      iter = rb_prev(iter)) {
4317                         if (!iter) {
4318                                 left_pa = NULL;
4319                                 break;
4320                         }
4321
4322                         tmp_pa = rb_entry(iter, struct ext4_prealloc_space,
4323                                           pa_node.inode_node);
4324                         left_pa = tmp_pa;
4325                         spin_lock(&tmp_pa->pa_lock);
4326                         if (tmp_pa->pa_deleted == 0) {
4327                                 spin_unlock(&tmp_pa->pa_lock);
4328                                 break;
4329                         }
4330                         spin_unlock(&tmp_pa->pa_lock);
4331                 }
4332         }
4333
4334         if (right_pa) {
4335                 for (iter = &right_pa->pa_node.inode_node;;
4336                      iter = rb_next(iter)) {
4337                         if (!iter) {
4338                                 right_pa = NULL;
4339                                 break;
4340                         }
4341
4342                         tmp_pa = rb_entry(iter, struct ext4_prealloc_space,
4343                                           pa_node.inode_node);
4344                         right_pa = tmp_pa;
4345                         spin_lock(&tmp_pa->pa_lock);
4346                         if (tmp_pa->pa_deleted == 0) {
4347                                 spin_unlock(&tmp_pa->pa_lock);
4348                                 break;
4349                         }
4350                         spin_unlock(&tmp_pa->pa_lock);
4351                 }
4352         }
4353
4354         if (left_pa) {
4355                 left_pa_end = pa_logical_end(sbi, left_pa);
4356                 BUG_ON(left_pa_end > ac->ac_o_ex.fe_logical);
4357         }
4358
4359         if (right_pa) {
4360                 right_pa_start = right_pa->pa_lstart;
4361                 BUG_ON(right_pa_start <= ac->ac_o_ex.fe_logical);
4362         }
4363
4364         /* Step 4: trim our normalized range to not overlap with the neighbors */
4365         if (left_pa) {
4366                 if (left_pa_end > new_start)
4367                         new_start = left_pa_end;
4368         }
4369
4370         if (right_pa) {
4371                 if (right_pa_start < new_end)
4372                         new_end = right_pa_start;
4373         }
4374         read_unlock(&ei->i_prealloc_lock);
4375
4376         /* XXX: extra loop to check we really don't overlap preallocations */
4377         ext4_mb_pa_assert_overlap(ac, new_start, new_end);
4378
4379         *start = new_start;
4380         *end = new_end;
4381 }
4382
4383 /*
4384  * Normalization means making request better in terms of
4385  * size and alignment
4386  */
4387 static noinline_for_stack void
4388 ext4_mb_normalize_request(struct ext4_allocation_context *ac,
4389                                 struct ext4_allocation_request *ar)
4390 {
4391         struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4392         struct ext4_super_block *es = sbi->s_es;
4393         int bsbits, max;
4394         loff_t size, start_off, end;
4395         loff_t orig_size __maybe_unused;
4396         ext4_lblk_t start;
4397
4398         /* do normalize only data requests, metadata requests
4399            do not need preallocation */
4400         if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
4401                 return;
4402
4403         /* sometime caller may want exact blocks */
4404         if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
4405                 return;
4406
4407         /* caller may indicate that preallocation isn't
4408          * required (it's a tail, for example) */
4409         if (ac->ac_flags & EXT4_MB_HINT_NOPREALLOC)
4410                 return;
4411
4412         if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) {
4413                 ext4_mb_normalize_group_request(ac);
4414                 return ;
4415         }
4416
4417         bsbits = ac->ac_sb->s_blocksize_bits;
4418
4419         /* first, let's learn actual file size
4420          * given current request is allocated */
4421         size = extent_logical_end(sbi, &ac->ac_o_ex);
4422         size = size << bsbits;
4423         if (size < i_size_read(ac->ac_inode))
4424                 size = i_size_read(ac->ac_inode);
4425         orig_size = size;
4426
4427         /* max size of free chunks */
4428         max = 2 << bsbits;
4429
4430 #define NRL_CHECK_SIZE(req, size, max, chunk_size)      \
4431                 (req <= (size) || max <= (chunk_size))
4432
4433         /* first, try to predict filesize */
4434         /* XXX: should this table be tunable? */
4435         start_off = 0;
4436         if (size <= 16 * 1024) {
4437                 size = 16 * 1024;
4438         } else if (size <= 32 * 1024) {
4439                 size = 32 * 1024;
4440         } else if (size <= 64 * 1024) {
4441                 size = 64 * 1024;
4442         } else if (size <= 128 * 1024) {
4443                 size = 128 * 1024;
4444         } else if (size <= 256 * 1024) {
4445                 size = 256 * 1024;
4446         } else if (size <= 512 * 1024) {
4447                 size = 512 * 1024;
4448         } else if (size <= 1024 * 1024) {
4449                 size = 1024 * 1024;
4450         } else if (NRL_CHECK_SIZE(size, 4 * 1024 * 1024, max, 2 * 1024)) {
4451                 start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
4452                                                 (21 - bsbits)) << 21;
4453                 size = 2 * 1024 * 1024;
4454         } else if (NRL_CHECK_SIZE(size, 8 * 1024 * 1024, max, 4 * 1024)) {
4455                 start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
4456                                                         (22 - bsbits)) << 22;
4457                 size = 4 * 1024 * 1024;
4458         } else if (NRL_CHECK_SIZE(EXT4_C2B(sbi, ac->ac_o_ex.fe_len),
4459                                         (8<<20)>>bsbits, max, 8 * 1024)) {
4460                 start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
4461                                                         (23 - bsbits)) << 23;
4462                 size = 8 * 1024 * 1024;
4463         } else {
4464                 start_off = (loff_t) ac->ac_o_ex.fe_logical << bsbits;
4465                 size      = (loff_t) EXT4_C2B(sbi,
4466                                               ac->ac_o_ex.fe_len) << bsbits;
4467         }
4468         size = size >> bsbits;
4469         start = start_off >> bsbits;
4470
4471         /*
4472          * For tiny groups (smaller than 8MB) the chosen allocation
4473          * alignment may be larger than group size. Make sure the
4474          * alignment does not move allocation to a different group which
4475          * makes mballoc fail assertions later.
4476          */
4477         start = max(start, rounddown(ac->ac_o_ex.fe_logical,
4478                         (ext4_lblk_t)EXT4_BLOCKS_PER_GROUP(ac->ac_sb)));
4479
4480         /* avoid unnecessary preallocation that may trigger assertions */
4481         if (start + size > EXT_MAX_BLOCKS)
4482                 size = EXT_MAX_BLOCKS - start;
4483
4484         /* don't cover already allocated blocks in selected range */
4485         if (ar->pleft && start <= ar->lleft) {
4486                 size -= ar->lleft + 1 - start;
4487                 start = ar->lleft + 1;
4488         }
4489         if (ar->pright && start + size - 1 >= ar->lright)
4490                 size -= start + size - ar->lright;
4491
4492         /*
4493          * Trim allocation request for filesystems with artificially small
4494          * groups.
4495          */
4496         if (size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb))
4497                 size = EXT4_BLOCKS_PER_GROUP(ac->ac_sb);
4498
4499         end = start + size;
4500
4501         ext4_mb_pa_adjust_overlap(ac, &start, &end);
4502
4503         size = end - start;
4504
4505         /*
4506          * In this function "start" and "size" are normalized for better
4507          * alignment and length such that we could preallocate more blocks.
4508          * This normalization is done such that original request of
4509          * ac->ac_o_ex.fe_logical & fe_len should always lie within "start" and
4510          * "size" boundaries.
4511          * (Note fe_len can be relaxed since FS block allocation API does not
4512          * provide gurantee on number of contiguous blocks allocation since that
4513          * depends upon free space left, etc).
4514          * In case of inode pa, later we use the allocated blocks
4515          * [pa_pstart + fe_logical - pa_lstart, fe_len/size] from the preallocated
4516          * range of goal/best blocks [start, size] to put it at the
4517          * ac_o_ex.fe_logical extent of this inode.
4518          * (See ext4_mb_use_inode_pa() for more details)
4519          */
4520         if (start + size <= ac->ac_o_ex.fe_logical ||
4521                         start > ac->ac_o_ex.fe_logical) {
4522                 ext4_msg(ac->ac_sb, KERN_ERR,
4523                          "start %lu, size %lu, fe_logical %lu",
4524                          (unsigned long) start, (unsigned long) size,
4525                          (unsigned long) ac->ac_o_ex.fe_logical);
4526                 BUG();
4527         }
4528         BUG_ON(size <= 0 || size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
4529
4530         /* now prepare goal request */
4531
4532         /* XXX: is it better to align blocks WRT to logical
4533          * placement or satisfy big request as is */
4534         ac->ac_g_ex.fe_logical = start;
4535         ac->ac_g_ex.fe_len = EXT4_NUM_B2C(sbi, size);
4536         ac->ac_orig_goal_len = ac->ac_g_ex.fe_len;
4537
4538         /* define goal start in order to merge */
4539         if (ar->pright && (ar->lright == (start + size)) &&
4540             ar->pright >= size &&
4541             ar->pright - size >= le32_to_cpu(es->s_first_data_block)) {
4542                 /* merge to the right */
4543                 ext4_get_group_no_and_offset(ac->ac_sb, ar->pright - size,
4544                                                 &ac->ac_g_ex.fe_group,
4545                                                 &ac->ac_g_ex.fe_start);
4546                 ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
4547         }
4548         if (ar->pleft && (ar->lleft + 1 == start) &&
4549             ar->pleft + 1 < ext4_blocks_count(es)) {
4550                 /* merge to the left */
4551                 ext4_get_group_no_and_offset(ac->ac_sb, ar->pleft + 1,
4552                                                 &ac->ac_g_ex.fe_group,
4553                                                 &ac->ac_g_ex.fe_start);
4554                 ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
4555         }
4556
4557         mb_debug(ac->ac_sb, "goal: %lld(was %lld) blocks at %u\n", size,
4558                  orig_size, start);
4559 }
4560
4561 static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
4562 {
4563         struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4564
4565         if (sbi->s_mb_stats && ac->ac_g_ex.fe_len >= 1) {
4566                 atomic_inc(&sbi->s_bal_reqs);
4567                 atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
4568                 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
4569                         atomic_inc(&sbi->s_bal_success);
4570
4571                 atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
4572                 for (int i=0; i<EXT4_MB_NUM_CRS; i++) {
4573                         atomic_add(ac->ac_cX_found[i], &sbi->s_bal_cX_ex_scanned[i]);
4574                 }
4575
4576                 atomic_add(ac->ac_groups_scanned, &sbi->s_bal_groups_scanned);
4577                 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
4578                                 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
4579                         atomic_inc(&sbi->s_bal_goals);
4580                 /* did we allocate as much as normalizer originally wanted? */
4581                 if (ac->ac_f_ex.fe_len == ac->ac_orig_goal_len)
4582                         atomic_inc(&sbi->s_bal_len_goals);
4583
4584                 if (ac->ac_found > sbi->s_mb_max_to_scan)
4585                         atomic_inc(&sbi->s_bal_breaks);
4586         }
4587
4588         if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
4589                 trace_ext4_mballoc_alloc(ac);
4590         else
4591                 trace_ext4_mballoc_prealloc(ac);
4592 }
4593
4594 /*
4595  * Called on failure; free up any blocks from the inode PA for this
4596  * context.  We don't need this for MB_GROUP_PA because we only change
4597  * pa_free in ext4_mb_release_context(), but on failure, we've already
4598  * zeroed out ac->ac_b_ex.fe_len, so group_pa->pa_free is not changed.
4599  */
4600 static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac)
4601 {
4602         struct ext4_prealloc_space *pa = ac->ac_pa;
4603         struct ext4_buddy e4b;
4604         int err;
4605
4606         if (pa == NULL) {
4607                 if (ac->ac_f_ex.fe_len == 0)
4608                         return;
4609                 err = ext4_mb_load_buddy(ac->ac_sb, ac->ac_f_ex.fe_group, &e4b);
4610                 if (WARN_RATELIMIT(err,
4611                                    "ext4: mb_load_buddy failed (%d)", err))
4612                         /*
4613                          * This should never happen since we pin the
4614                          * pages in the ext4_allocation_context so
4615                          * ext4_mb_load_buddy() should never fail.
4616                          */
4617                         return;
4618                 ext4_lock_group(ac->ac_sb, ac->ac_f_ex.fe_group);
4619                 mb_free_blocks(ac->ac_inode, &e4b, ac->ac_f_ex.fe_start,
4620                                ac->ac_f_ex.fe_len);
4621                 ext4_unlock_group(ac->ac_sb, ac->ac_f_ex.fe_group);
4622                 ext4_mb_unload_buddy(&e4b);
4623                 return;
4624         }
4625         if (pa->pa_type == MB_INODE_PA) {
4626                 spin_lock(&pa->pa_lock);
4627                 pa->pa_free += ac->ac_b_ex.fe_len;
4628                 spin_unlock(&pa->pa_lock);
4629         }
4630 }
4631
4632 /*
4633  * use blocks preallocated to inode
4634  */
4635 static void ext4_mb_use_inode_pa(struct ext4_allocation_context *ac,
4636                                 struct ext4_prealloc_space *pa)
4637 {
4638         struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4639         ext4_fsblk_t start;
4640         ext4_fsblk_t end;
4641         int len;
4642
4643         /* found preallocated blocks, use them */
4644         start = pa->pa_pstart + (ac->ac_o_ex.fe_logical - pa->pa_lstart);
4645         end = min(pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len),
4646                   start + EXT4_C2B(sbi, ac->ac_o_ex.fe_len));
4647         len = EXT4_NUM_B2C(sbi, end - start);
4648         ext4_get_group_no_and_offset(ac->ac_sb, start, &ac->ac_b_ex.fe_group,
4649                                         &ac->ac_b_ex.fe_start);
4650         ac->ac_b_ex.fe_len = len;
4651         ac->ac_status = AC_STATUS_FOUND;
4652         ac->ac_pa = pa;
4653
4654         BUG_ON(start < pa->pa_pstart);
4655         BUG_ON(end > pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len));
4656         BUG_ON(pa->pa_free < len);
4657         BUG_ON(ac->ac_b_ex.fe_len <= 0);
4658         pa->pa_free -= len;
4659
4660         mb_debug(ac->ac_sb, "use %llu/%d from inode pa %p\n", start, len, pa);
4661 }
4662
4663 /*
4664  * use blocks preallocated to locality group
4665  */
4666 static void ext4_mb_use_group_pa(struct ext4_allocation_context *ac,
4667                                 struct ext4_prealloc_space *pa)
4668 {
4669         unsigned int len = ac->ac_o_ex.fe_len;
4670
4671         ext4_get_group_no_and_offset(ac->ac_sb, pa->pa_pstart,
4672                                         &ac->ac_b_ex.fe_group,
4673                                         &ac->ac_b_ex.fe_start);
4674         ac->ac_b_ex.fe_len = len;
4675         ac->ac_status = AC_STATUS_FOUND;
4676         ac->ac_pa = pa;
4677
4678         /* we don't correct pa_pstart or pa_len here to avoid
4679          * possible race when the group is being loaded concurrently
4680          * instead we correct pa later, after blocks are marked
4681          * in on-disk bitmap -- see ext4_mb_release_context()
4682          * Other CPUs are prevented from allocating from this pa by lg_mutex
4683          */
4684         mb_debug(ac->ac_sb, "use %u/%u from group pa %p\n",
4685                  pa->pa_lstart, len, pa);
4686 }
4687
4688 /*
4689  * Return the prealloc space that have minimal distance
4690  * from the goal block. @cpa is the prealloc
4691  * space that is having currently known minimal distance
4692  * from the goal block.
4693  */
4694 static struct ext4_prealloc_space *
4695 ext4_mb_check_group_pa(ext4_fsblk_t goal_block,
4696                         struct ext4_prealloc_space *pa,
4697                         struct ext4_prealloc_space *cpa)
4698 {
4699         ext4_fsblk_t cur_distance, new_distance;
4700
4701         if (cpa == NULL) {
4702                 atomic_inc(&pa->pa_count);
4703                 return pa;
4704         }
4705         cur_distance = abs(goal_block - cpa->pa_pstart);
4706         new_distance = abs(goal_block - pa->pa_pstart);
4707
4708         if (cur_distance <= new_distance)
4709                 return cpa;
4710
4711         /* drop the previous reference */
4712         atomic_dec(&cpa->pa_count);
4713         atomic_inc(&pa->pa_count);
4714         return pa;
4715 }
4716
4717 /*
4718  * check if found pa meets EXT4_MB_HINT_GOAL_ONLY
4719  */
4720 static bool
4721 ext4_mb_pa_goal_check(struct ext4_allocation_context *ac,
4722                       struct ext4_prealloc_space *pa)
4723 {
4724         struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4725         ext4_fsblk_t start;
4726
4727         if (likely(!(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)))
4728                 return true;
4729
4730         /*
4731          * If EXT4_MB_HINT_GOAL_ONLY is set, ac_g_ex will not be adjusted
4732          * in ext4_mb_normalize_request and will keep same with ac_o_ex
4733          * from ext4_mb_initialize_context. Choose ac_g_ex here to keep
4734          * consistent with ext4_mb_find_by_goal.
4735          */
4736         start = pa->pa_pstart +
4737                 (ac->ac_g_ex.fe_logical - pa->pa_lstart);
4738         if (ext4_grp_offs_to_block(ac->ac_sb, &ac->ac_g_ex) != start)
4739                 return false;
4740
4741         if (ac->ac_g_ex.fe_len > pa->pa_len -
4742             EXT4_B2C(sbi, ac->ac_g_ex.fe_logical - pa->pa_lstart))
4743                 return false;
4744
4745         return true;
4746 }
4747
4748 /*
4749  * search goal blocks in preallocated space
4750  */
4751 static noinline_for_stack bool
4752 ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
4753 {
4754         struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4755         int order, i;
4756         struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
4757         struct ext4_locality_group *lg;
4758         struct ext4_prealloc_space *tmp_pa = NULL, *cpa = NULL;
4759         struct rb_node *iter;
4760         ext4_fsblk_t goal_block;
4761
4762         /* only data can be preallocated */
4763         if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
4764                 return false;
4765
4766         /*
4767          * first, try per-file preallocation by searching the inode pa rbtree.
4768          *
4769          * Here, we can't do a direct traversal of the tree because
4770          * ext4_mb_discard_group_preallocation() can paralelly mark the pa
4771          * deleted and that can cause direct traversal to skip some entries.
4772          */
4773         read_lock(&ei->i_prealloc_lock);
4774
4775         if (RB_EMPTY_ROOT(&ei->i_prealloc_node)) {
4776                 goto try_group_pa;
4777         }
4778
4779         /*
4780          * Step 1: Find a pa with logical start immediately adjacent to the
4781          * original logical start. This could be on the left or right.
4782          *
4783          * (tmp_pa->pa_lstart never changes so we can skip locking for it).
4784          */
4785         for (iter = ei->i_prealloc_node.rb_node; iter;
4786              iter = ext4_mb_pa_rb_next_iter(ac->ac_o_ex.fe_logical,
4787                                             tmp_pa->pa_lstart, iter)) {
4788                 tmp_pa = rb_entry(iter, struct ext4_prealloc_space,
4789                                   pa_node.inode_node);
4790         }
4791
4792         /*
4793          * Step 2: The adjacent pa might be to the right of logical start, find
4794          * the left adjacent pa. After this step we'd have a valid tmp_pa whose
4795          * logical start is towards the left of original request's logical start
4796          */
4797         if (tmp_pa->pa_lstart > ac->ac_o_ex.fe_logical) {
4798                 struct rb_node *tmp;
4799                 tmp = rb_prev(&tmp_pa->pa_node.inode_node);
4800
4801                 if (tmp) {
4802                         tmp_pa = rb_entry(tmp, struct ext4_prealloc_space,
4803                                             pa_node.inode_node);
4804                 } else {
4805                         /*
4806                          * If there is no adjacent pa to the left then finding
4807                          * an overlapping pa is not possible hence stop searching
4808                          * inode pa tree
4809                          */
4810                         goto try_group_pa;
4811                 }
4812         }
4813
4814         BUG_ON(!(tmp_pa && tmp_pa->pa_lstart <= ac->ac_o_ex.fe_logical));
4815
4816         /*
4817          * Step 3: If the left adjacent pa is deleted, keep moving left to find
4818          * the first non deleted adjacent pa. After this step we should have a
4819          * valid tmp_pa which is guaranteed to be non deleted.
4820          */
4821         for (iter = &tmp_pa->pa_node.inode_node;; iter = rb_prev(iter)) {
4822                 if (!iter) {
4823                         /*
4824                          * no non deleted left adjacent pa, so stop searching
4825                          * inode pa tree
4826                          */
4827                         goto try_group_pa;
4828                 }
4829                 tmp_pa = rb_entry(iter, struct ext4_prealloc_space,
4830                                   pa_node.inode_node);
4831                 spin_lock(&tmp_pa->pa_lock);
4832                 if (tmp_pa->pa_deleted == 0) {
4833                         /*
4834                          * We will keep holding the pa_lock from
4835                          * this point on because we don't want group discard
4836                          * to delete this pa underneath us. Since group
4837                          * discard is anyways an ENOSPC operation it
4838                          * should be okay for it to wait a few more cycles.
4839                          */
4840                         break;
4841                 } else {
4842                         spin_unlock(&tmp_pa->pa_lock);
4843                 }
4844         }
4845
4846         BUG_ON(!(tmp_pa && tmp_pa->pa_lstart <= ac->ac_o_ex.fe_logical));
4847         BUG_ON(tmp_pa->pa_deleted == 1);
4848
4849         /*
4850          * Step 4: We now have the non deleted left adjacent pa. Only this
4851          * pa can possibly satisfy the request hence check if it overlaps
4852          * original logical start and stop searching if it doesn't.
4853          */
4854         if (ac->ac_o_ex.fe_logical >= pa_logical_end(sbi, tmp_pa)) {
4855                 spin_unlock(&tmp_pa->pa_lock);
4856                 goto try_group_pa;
4857         }
4858
4859         /* non-extent files can't have physical blocks past 2^32 */
4860         if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)) &&
4861             (tmp_pa->pa_pstart + EXT4_C2B(sbi, tmp_pa->pa_len) >
4862              EXT4_MAX_BLOCK_FILE_PHYS)) {
4863                 /*
4864                  * Since PAs don't overlap, we won't find any other PA to
4865                  * satisfy this.
4866                  */
4867                 spin_unlock(&tmp_pa->pa_lock);
4868                 goto try_group_pa;
4869         }
4870
4871         if (tmp_pa->pa_free && likely(ext4_mb_pa_goal_check(ac, tmp_pa))) {
4872                 atomic_inc(&tmp_pa->pa_count);
4873                 ext4_mb_use_inode_pa(ac, tmp_pa);
4874                 spin_unlock(&tmp_pa->pa_lock);
4875                 read_unlock(&ei->i_prealloc_lock);
4876                 return true;
4877         } else {
4878                 /*
4879                  * We found a valid overlapping pa but couldn't use it because
4880                  * it had no free blocks. This should ideally never happen
4881                  * because:
4882                  *
4883                  * 1. When a new inode pa is added to rbtree it must have
4884                  *    pa_free > 0 since otherwise we won't actually need
4885                  *    preallocation.
4886                  *
4887                  * 2. An inode pa that is in the rbtree can only have it's
4888                  *    pa_free become zero when another thread calls:
4889                  *      ext4_mb_new_blocks
4890                  *       ext4_mb_use_preallocated
4891                  *        ext4_mb_use_inode_pa
4892                  *
4893                  * 3. Further, after the above calls make pa_free == 0, we will
4894                  *    immediately remove it from the rbtree in:
4895                  *      ext4_mb_new_blocks
4896                  *       ext4_mb_release_context
4897                  *        ext4_mb_put_pa
4898                  *
4899                  * 4. Since the pa_free becoming 0 and pa_free getting removed
4900                  * from tree both happen in ext4_mb_new_blocks, which is always
4901                  * called with i_data_sem held for data allocations, we can be
4902                  * sure that another process will never see a pa in rbtree with
4903                  * pa_free == 0.
4904                  */
4905                 WARN_ON_ONCE(tmp_pa->pa_free == 0);
4906         }
4907         spin_unlock(&tmp_pa->pa_lock);
4908 try_group_pa:
4909         read_unlock(&ei->i_prealloc_lock);
4910
4911         /* can we use group allocation? */
4912         if (!(ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC))
4913                 return false;
4914
4915         /* inode may have no locality group for some reason */
4916         lg = ac->ac_lg;
4917         if (lg == NULL)
4918                 return false;
4919         order  = fls(ac->ac_o_ex.fe_len) - 1;
4920         if (order > PREALLOC_TB_SIZE - 1)
4921                 /* The max size of hash table is PREALLOC_TB_SIZE */
4922                 order = PREALLOC_TB_SIZE - 1;
4923
4924         goal_block = ext4_grp_offs_to_block(ac->ac_sb, &ac->ac_g_ex);
4925         /*
4926          * search for the prealloc space that is having
4927          * minimal distance from the goal block.
4928          */
4929         for (i = order; i < PREALLOC_TB_SIZE; i++) {
4930                 rcu_read_lock();
4931                 list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[i],
4932                                         pa_node.lg_list) {
4933                         spin_lock(&tmp_pa->pa_lock);
4934                         if (tmp_pa->pa_deleted == 0 &&
4935                                         tmp_pa->pa_free >= ac->ac_o_ex.fe_len) {
4936
4937                                 cpa = ext4_mb_check_group_pa(goal_block,
4938                                                                 tmp_pa, cpa);
4939                         }
4940                         spin_unlock(&tmp_pa->pa_lock);
4941                 }
4942                 rcu_read_unlock();
4943         }
4944         if (cpa) {
4945                 ext4_mb_use_group_pa(ac, cpa);
4946                 return true;
4947         }
4948         return false;
4949 }
4950
4951 /*
4952  * the function goes through all preallocation in this group and marks them
4953  * used in in-core bitmap. buddy must be generated from this bitmap
4954  * Need to be called with ext4 group lock held
4955  */
4956 static noinline_for_stack
4957 void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
4958                                         ext4_group_t group)
4959 {
4960         struct ext4_group_info *grp = ext4_get_group_info(sb, group);
4961         struct ext4_prealloc_space *pa;
4962         struct list_head *cur;
4963         ext4_group_t groupnr;
4964         ext4_grpblk_t start;
4965         int preallocated = 0;
4966         int len;
4967
4968         if (!grp)
4969                 return;
4970
4971         /* all form of preallocation discards first load group,
4972          * so the only competing code is preallocation use.
4973          * we don't need any locking here
4974          * notice we do NOT ignore preallocations with pa_deleted
4975          * otherwise we could leave used blocks available for
4976          * allocation in buddy when concurrent ext4_mb_put_pa()
4977          * is dropping preallocation
4978          */
4979         list_for_each(cur, &grp->bb_prealloc_list) {
4980                 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
4981                 spin_lock(&pa->pa_lock);
4982                 ext4_get_group_no_and_offset(sb, pa->pa_pstart,
4983                                              &groupnr, &start);
4984                 len = pa->pa_len;
4985                 spin_unlock(&pa->pa_lock);
4986                 if (unlikely(len == 0))
4987                         continue;
4988                 BUG_ON(groupnr != group);
4989                 mb_set_bits(bitmap, start, len);
4990                 preallocated += len;
4991         }
4992         mb_debug(sb, "preallocated %d for group %u\n", preallocated, group);
4993 }
4994
4995 static void ext4_mb_mark_pa_deleted(struct super_block *sb,
4996                                     struct ext4_prealloc_space *pa)
4997 {
4998         struct ext4_inode_info *ei;
4999
5000         if (pa->pa_deleted) {
5001                 ext4_warning(sb, "deleted pa, type:%d, pblk:%llu, lblk:%u, len:%d\n",
5002                              pa->pa_type, pa->pa_pstart, pa->pa_lstart,
5003                              pa->pa_len);
5004                 return;
5005         }
5006
5007         pa->pa_deleted = 1;
5008
5009         if (pa->pa_type == MB_INODE_PA) {
5010                 ei = EXT4_I(pa->pa_inode);
5011                 atomic_dec(&ei->i_prealloc_active);
5012         }
5013 }
5014
5015 static inline void ext4_mb_pa_free(struct ext4_prealloc_space *pa)
5016 {
5017         BUG_ON(!pa);
5018         BUG_ON(atomic_read(&pa->pa_count));
5019         BUG_ON(pa->pa_deleted == 0);
5020         kmem_cache_free(ext4_pspace_cachep, pa);
5021 }
5022
5023 static void ext4_mb_pa_callback(struct rcu_head *head)
5024 {
5025         struct ext4_prealloc_space *pa;
5026
5027         pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu);
5028         ext4_mb_pa_free(pa);
5029 }
5030
5031 /*
5032  * drops a reference to preallocated space descriptor
5033  * if this was the last reference and the space is consumed
5034  */
5035 static void ext4_mb_put_pa(struct ext4_allocation_context *ac,
5036                         struct super_block *sb, struct ext4_prealloc_space *pa)
5037 {
5038         ext4_group_t grp;
5039         ext4_fsblk_t grp_blk;
5040         struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
5041
5042         /* in this short window concurrent discard can set pa_deleted */
5043         spin_lock(&pa->pa_lock);
5044         if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0) {
5045                 spin_unlock(&pa->pa_lock);
5046                 return;
5047         }
5048
5049         if (pa->pa_deleted == 1) {
5050                 spin_unlock(&pa->pa_lock);
5051                 return;
5052         }
5053
5054         ext4_mb_mark_pa_deleted(sb, pa);
5055         spin_unlock(&pa->pa_lock);
5056
5057         grp_blk = pa->pa_pstart;
5058         /*
5059          * If doing group-based preallocation, pa_pstart may be in the
5060          * next group when pa is used up
5061          */
5062         if (pa->pa_type == MB_GROUP_PA)
5063                 grp_blk--;
5064
5065         grp = ext4_get_group_number(sb, grp_blk);
5066
5067         /*
5068          * possible race:
5069          *
5070          *  P1 (buddy init)                     P2 (regular allocation)
5071          *                                      find block B in PA
5072          *  copy on-disk bitmap to buddy
5073          *                                      mark B in on-disk bitmap
5074          *                                      drop PA from group
5075          *  mark all PAs in buddy
5076          *
5077          * thus, P1 initializes buddy with B available. to prevent this
5078          * we make "copy" and "mark all PAs" atomic and serialize "drop PA"
5079          * against that pair
5080          */
5081         ext4_lock_group(sb, grp);
5082         list_del(&pa->pa_group_list);
5083         ext4_unlock_group(sb, grp);
5084
5085         if (pa->pa_type == MB_INODE_PA) {
5086                 write_lock(pa->pa_node_lock.inode_lock);
5087                 rb_erase(&pa->pa_node.inode_node, &ei->i_prealloc_node);
5088                 write_unlock(pa->pa_node_lock.inode_lock);
5089                 ext4_mb_pa_free(pa);
5090         } else {
5091                 spin_lock(pa->pa_node_lock.lg_lock);
5092                 list_del_rcu(&pa->pa_node.lg_list);
5093                 spin_unlock(pa->pa_node_lock.lg_lock);
5094                 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
5095         }
5096 }
5097
5098 static void ext4_mb_pa_rb_insert(struct rb_root *root, struct rb_node *new)
5099 {
5100         struct rb_node **iter = &root->rb_node, *parent = NULL;
5101         struct ext4_prealloc_space *iter_pa, *new_pa;
5102         ext4_lblk_t iter_start, new_start;
5103
5104         while (*iter) {
5105                 iter_pa = rb_entry(*iter, struct ext4_prealloc_space,
5106                                    pa_node.inode_node);
5107                 new_pa = rb_entry(new, struct ext4_prealloc_space,
5108                                    pa_node.inode_node);
5109                 iter_start = iter_pa->pa_lstart;
5110                 new_start = new_pa->pa_lstart;
5111
5112                 parent = *iter;
5113                 if (new_start < iter_start)
5114                         iter = &((*iter)->rb_left);
5115                 else
5116                         iter = &((*iter)->rb_right);
5117         }
5118
5119         rb_link_node(new, parent, iter);
5120         rb_insert_color(new, root);
5121 }
5122
5123 /*
5124  * creates new preallocated space for given inode
5125  */
5126 static noinline_for_stack void
5127 ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
5128 {
5129         struct super_block *sb = ac->ac_sb;
5130         struct ext4_sb_info *sbi = EXT4_SB(sb);
5131         struct ext4_prealloc_space *pa;
5132         struct ext4_group_info *grp;
5133         struct ext4_inode_info *ei;
5134
5135         /* preallocate only when found space is larger then requested */
5136         BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len);
5137         BUG_ON(ac->ac_status != AC_STATUS_FOUND);
5138         BUG_ON(!S_ISREG(ac->ac_inode->i_mode));
5139         BUG_ON(ac->ac_pa == NULL);
5140
5141         pa = ac->ac_pa;
5142
5143         if (ac->ac_b_ex.fe_len < ac->ac_orig_goal_len) {
5144                 struct ext4_free_extent ex = {
5145                         .fe_logical = ac->ac_g_ex.fe_logical,
5146                         .fe_len = ac->ac_orig_goal_len,
5147                 };
5148                 loff_t orig_goal_end = extent_logical_end(sbi, &ex);
5149
5150                 /* we can't allocate as much as normalizer wants.
5151                  * so, found space must get proper lstart
5152                  * to cover original request */
5153                 BUG_ON(ac->ac_g_ex.fe_logical > ac->ac_o_ex.fe_logical);
5154                 BUG_ON(ac->ac_g_ex.fe_len < ac->ac_o_ex.fe_len);
5155
5156                 /*
5157                  * Use the below logic for adjusting best extent as it keeps
5158                  * fragmentation in check while ensuring logical range of best
5159                  * extent doesn't overflow out of goal extent:
5160                  *
5161                  * 1. Check if best ex can be kept at end of goal (before
5162                  *    cr_best_avail trimmed it) and still cover original start
5163                  * 2. Else, check if best ex can be kept at start of goal and
5164                  *    still cover original start
5165                  * 3. Else, keep the best ex at start of original request.
5166                  */
5167                 ex.fe_len = ac->ac_b_ex.fe_len;
5168
5169                 ex.fe_logical = orig_goal_end - EXT4_C2B(sbi, ex.fe_len);
5170                 if (ac->ac_o_ex.fe_logical >= ex.fe_logical)
5171                         goto adjust_bex;
5172
5173                 ex.fe_logical = ac->ac_g_ex.fe_logical;
5174                 if (ac->ac_o_ex.fe_logical < extent_logical_end(sbi, &ex))
5175                         goto adjust_bex;
5176
5177                 ex.fe_logical = ac->ac_o_ex.fe_logical;
5178 adjust_bex:
5179                 ac->ac_b_ex.fe_logical = ex.fe_logical;
5180
5181                 BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical);
5182                 BUG_ON(ac->ac_o_ex.fe_len > ac->ac_b_ex.fe_len);
5183                 BUG_ON(extent_logical_end(sbi, &ex) > orig_goal_end);
5184         }
5185
5186         pa->pa_lstart = ac->ac_b_ex.fe_logical;
5187         pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
5188         pa->pa_len = ac->ac_b_ex.fe_len;
5189         pa->pa_free = pa->pa_len;
5190         spin_lock_init(&pa->pa_lock);
5191         INIT_LIST_HEAD(&pa->pa_group_list);
5192         pa->pa_deleted = 0;
5193         pa->pa_type = MB_INODE_PA;
5194
5195         mb_debug(sb, "new inode pa %p: %llu/%d for %u\n", pa, pa->pa_pstart,
5196                  pa->pa_len, pa->pa_lstart);
5197         trace_ext4_mb_new_inode_pa(ac, pa);
5198
5199         atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
5200         ext4_mb_use_inode_pa(ac, pa);
5201
5202         ei = EXT4_I(ac->ac_inode);
5203         grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
5204         if (!grp)
5205                 return;
5206
5207         pa->pa_node_lock.inode_lock = &ei->i_prealloc_lock;
5208         pa->pa_inode = ac->ac_inode;
5209
5210         list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
5211
5212         write_lock(pa->pa_node_lock.inode_lock);
5213         ext4_mb_pa_rb_insert(&ei->i_prealloc_node, &pa->pa_node.inode_node);
5214         write_unlock(pa->pa_node_lock.inode_lock);
5215         atomic_inc(&ei->i_prealloc_active);
5216 }
5217
5218 /*
5219  * creates new preallocated space for locality group inodes belongs to
5220  */
5221 static noinline_for_stack void
5222 ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
5223 {
5224         struct super_block *sb = ac->ac_sb;
5225         struct ext4_locality_group *lg;
5226         struct ext4_prealloc_space *pa;
5227         struct ext4_group_info *grp;
5228
5229         /* preallocate only when found space is larger then requested */
5230         BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len);
5231         BUG_ON(ac->ac_status != AC_STATUS_FOUND);
5232         BUG_ON(!S_ISREG(ac->ac_inode->i_mode));
5233         BUG_ON(ac->ac_pa == NULL);
5234
5235         pa = ac->ac_pa;
5236
5237         pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
5238         pa->pa_lstart = pa->pa_pstart;
5239         pa->pa_len = ac->ac_b_ex.fe_len;
5240         pa->pa_free = pa->pa_len;
5241         spin_lock_init(&pa->pa_lock);
5242         INIT_LIST_HEAD(&pa->pa_node.lg_list);
5243         INIT_LIST_HEAD(&pa->pa_group_list);
5244         pa->pa_deleted = 0;
5245         pa->pa_type = MB_GROUP_PA;
5246
5247         mb_debug(sb, "new group pa %p: %llu/%d for %u\n", pa, pa->pa_pstart,
5248                  pa->pa_len, pa->pa_lstart);
5249         trace_ext4_mb_new_group_pa(ac, pa);
5250
5251         ext4_mb_use_group_pa(ac, pa);
5252         atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
5253
5254         grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
5255         if (!grp)
5256                 return;
5257         lg = ac->ac_lg;
5258         BUG_ON(lg == NULL);
5259
5260         pa->pa_node_lock.lg_lock = &lg->lg_prealloc_lock;
5261         pa->pa_inode = NULL;
5262
5263         list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
5264
5265         /*
5266          * We will later add the new pa to the right bucket
5267          * after updating the pa_free in ext4_mb_release_context
5268          */
5269 }
5270
5271 static void ext4_mb_new_preallocation(struct ext4_allocation_context *ac)
5272 {
5273         if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
5274                 ext4_mb_new_group_pa(ac);
5275         else
5276                 ext4_mb_new_inode_pa(ac);
5277 }
5278
5279 /*
5280  * finds all unused blocks in on-disk bitmap, frees them in
5281  * in-core bitmap and buddy.
5282  * @pa must be unlinked from inode and group lists, so that
5283  * nobody else can find/use it.
5284  * the caller MUST hold group/inode locks.
5285  * TODO: optimize the case when there are no in-core structures yet
5286  */
5287 static noinline_for_stack int
5288 ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
5289                         struct ext4_prealloc_space *pa)
5290 {
5291         struct super_block *sb = e4b->bd_sb;
5292         struct ext4_sb_info *sbi = EXT4_SB(sb);
5293         unsigned int end;
5294         unsigned int next;
5295         ext4_group_t group;
5296         ext4_grpblk_t bit;
5297         unsigned long long grp_blk_start;
5298         int free = 0;
5299
5300         BUG_ON(pa->pa_deleted == 0);
5301         ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
5302         grp_blk_start = pa->pa_pstart - EXT4_C2B(sbi, bit);
5303         BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
5304         end = bit + pa->pa_len;
5305
5306         while (bit < end) {
5307                 bit = mb_find_next_zero_bit(bitmap_bh->b_data, end, bit);
5308                 if (bit >= end)
5309                         break;
5310                 next = mb_find_next_bit(bitmap_bh->b_data, end, bit);
5311                 mb_debug(sb, "free preallocated %u/%u in group %u\n",
5312                          (unsigned) ext4_group_first_block_no(sb, group) + bit,
5313                          (unsigned) next - bit, (unsigned) group);
5314                 free += next - bit;
5315
5316                 trace_ext4_mballoc_discard(sb, NULL, group, bit, next - bit);
5317                 trace_ext4_mb_release_inode_pa(pa, (grp_blk_start +
5318                                                     EXT4_C2B(sbi, bit)),
5319                                                next - bit);
5320                 mb_free_blocks(pa->pa_inode, e4b, bit, next - bit);
5321                 bit = next + 1;
5322         }
5323         if (free != pa->pa_free) {
5324                 ext4_msg(e4b->bd_sb, KERN_CRIT,
5325                          "pa %p: logic %lu, phys. %lu, len %d",
5326                          pa, (unsigned long) pa->pa_lstart,
5327                          (unsigned long) pa->pa_pstart,
5328                          pa->pa_len);
5329                 ext4_grp_locked_error(sb, group, 0, 0, "free %u, pa_free %u",
5330                                         free, pa->pa_free);
5331                 /*
5332                  * pa is already deleted so we use the value obtained
5333                  * from the bitmap and continue.
5334                  */
5335         }
5336         atomic_add(free, &sbi->s_mb_discarded);
5337
5338         return 0;
5339 }
5340
5341 static noinline_for_stack int
5342 ext4_mb_release_group_pa(struct ext4_buddy *e4b,
5343                                 struct ext4_prealloc_space *pa)
5344 {
5345         struct super_block *sb = e4b->bd_sb;
5346         ext4_group_t group;
5347         ext4_grpblk_t bit;
5348
5349         trace_ext4_mb_release_group_pa(sb, pa);
5350         BUG_ON(pa->pa_deleted == 0);
5351         ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
5352         if (unlikely(group != e4b->bd_group && pa->pa_len != 0)) {
5353                 ext4_warning(sb, "bad group: expected %u, group %u, pa_start %llu",
5354                              e4b->bd_group, group, pa->pa_pstart);
5355                 return 0;
5356         }
5357         mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
5358         atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
5359         trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
5360
5361         return 0;
5362 }
5363
5364 /*
5365  * releases all preallocations in given group
5366  *
5367  * first, we need to decide discard policy:
5368  * - when do we discard
5369  *   1) ENOSPC
5370  * - how many do we discard
5371  *   1) how many requested
5372  */
5373 static noinline_for_stack int
5374 ext4_mb_discard_group_preallocations(struct super_block *sb,
5375                                      ext4_group_t group, int *busy)
5376 {
5377         struct ext4_group_info *grp = ext4_get_group_info(sb, group);
5378         struct buffer_head *bitmap_bh = NULL;
5379         struct ext4_prealloc_space *pa, *tmp;
5380         LIST_HEAD(list);
5381         struct ext4_buddy e4b;
5382         struct ext4_inode_info *ei;
5383         int err;
5384         int free = 0;
5385
5386         if (!grp)
5387                 return 0;
5388         mb_debug(sb, "discard preallocation for group %u\n", group);
5389         if (list_empty(&grp->bb_prealloc_list))
5390                 goto out_dbg;
5391
5392         bitmap_bh = ext4_read_block_bitmap(sb, group);
5393         if (IS_ERR(bitmap_bh)) {
5394                 err = PTR_ERR(bitmap_bh);
5395                 ext4_error_err(sb, -err,
5396                                "Error %d reading block bitmap for %u",
5397                                err, group);
5398                 goto out_dbg;
5399         }
5400
5401         err = ext4_mb_load_buddy(sb, group, &e4b);
5402         if (err) {
5403                 ext4_warning(sb, "Error %d loading buddy information for %u",
5404                              err, group);
5405                 put_bh(bitmap_bh);
5406                 goto out_dbg;
5407         }
5408
5409         ext4_lock_group(sb, group);
5410         list_for_each_entry_safe(pa, tmp,
5411                                 &grp->bb_prealloc_list, pa_group_list) {
5412                 spin_lock(&pa->pa_lock);
5413                 if (atomic_read(&pa->pa_count)) {
5414                         spin_unlock(&pa->pa_lock);
5415                         *busy = 1;
5416                         continue;
5417                 }
5418                 if (pa->pa_deleted) {
5419                         spin_unlock(&pa->pa_lock);
5420                         continue;
5421                 }
5422
5423                 /* seems this one can be freed ... */
5424                 ext4_mb_mark_pa_deleted(sb, pa);
5425
5426                 if (!free)
5427                         this_cpu_inc(discard_pa_seq);
5428
5429                 /* we can trust pa_free ... */
5430                 free += pa->pa_free;
5431
5432                 spin_unlock(&pa->pa_lock);
5433
5434                 list_del(&pa->pa_group_list);
5435                 list_add(&pa->u.pa_tmp_list, &list);
5436         }
5437
5438         /* now free all selected PAs */
5439         list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
5440
5441                 /* remove from object (inode or locality group) */
5442                 if (pa->pa_type == MB_GROUP_PA) {
5443                         spin_lock(pa->pa_node_lock.lg_lock);
5444                         list_del_rcu(&pa->pa_node.lg_list);
5445                         spin_unlock(pa->pa_node_lock.lg_lock);
5446                 } else {
5447                         write_lock(pa->pa_node_lock.inode_lock);
5448                         ei = EXT4_I(pa->pa_inode);
5449                         rb_erase(&pa->pa_node.inode_node, &ei->i_prealloc_node);
5450                         write_unlock(pa->pa_node_lock.inode_lock);
5451                 }
5452
5453                 list_del(&pa->u.pa_tmp_list);
5454
5455                 if (pa->pa_type == MB_GROUP_PA) {
5456                         ext4_mb_release_group_pa(&e4b, pa);
5457                         call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
5458                 } else {
5459                         ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa);
5460                         ext4_mb_pa_free(pa);
5461                 }
5462         }
5463
5464         ext4_unlock_group(sb, group);
5465         ext4_mb_unload_buddy(&e4b);
5466         put_bh(bitmap_bh);
5467 out_dbg:
5468         mb_debug(sb, "discarded (%d) blocks preallocated for group %u bb_free (%d)\n",
5469                  free, group, grp->bb_free);
5470         return free;
5471 }
5472
5473 /*
5474  * releases all non-used preallocated blocks for given inode
5475  *
5476  * It's important to discard preallocations under i_data_sem
5477  * We don't want another block to be served from the prealloc
5478  * space when we are discarding the inode prealloc space.
5479  *
5480  * FIXME!! Make sure it is valid at all the call sites
5481  */
5482 void ext4_discard_preallocations(struct inode *inode, unsigned int needed)
5483 {
5484         struct ext4_inode_info *ei = EXT4_I(inode);
5485         struct super_block *sb = inode->i_sb;
5486         struct buffer_head *bitmap_bh = NULL;
5487         struct ext4_prealloc_space *pa, *tmp;
5488         ext4_group_t group = 0;
5489         LIST_HEAD(list);
5490         struct ext4_buddy e4b;
5491         struct rb_node *iter;
5492         int err;
5493
5494         if (!S_ISREG(inode->i_mode)) {
5495                 return;
5496         }
5497
5498         if (EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY)
5499                 return;
5500
5501         mb_debug(sb, "discard preallocation for inode %lu\n",
5502                  inode->i_ino);
5503         trace_ext4_discard_preallocations(inode,
5504                         atomic_read(&ei->i_prealloc_active), needed);
5505
5506         if (needed == 0)
5507                 needed = UINT_MAX;
5508
5509 repeat:
5510         /* first, collect all pa's in the inode */
5511         write_lock(&ei->i_prealloc_lock);
5512         for (iter = rb_first(&ei->i_prealloc_node); iter && needed;
5513              iter = rb_next(iter)) {
5514                 pa = rb_entry(iter, struct ext4_prealloc_space,
5515                               pa_node.inode_node);
5516                 BUG_ON(pa->pa_node_lock.inode_lock != &ei->i_prealloc_lock);
5517
5518                 spin_lock(&pa->pa_lock);
5519                 if (atomic_read(&pa->pa_count)) {
5520                         /* this shouldn't happen often - nobody should
5521                          * use preallocation while we're discarding it */
5522                         spin_unlock(&pa->pa_lock);
5523                         write_unlock(&ei->i_prealloc_lock);
5524                         ext4_msg(sb, KERN_ERR,
5525                                  "uh-oh! used pa while discarding");
5526                         WARN_ON(1);
5527                         schedule_timeout_uninterruptible(HZ);
5528                         goto repeat;
5529
5530                 }
5531                 if (pa->pa_deleted == 0) {
5532                         ext4_mb_mark_pa_deleted(sb, pa);
5533                         spin_unlock(&pa->pa_lock);
5534                         rb_erase(&pa->pa_node.inode_node, &ei->i_prealloc_node);
5535                         list_add(&pa->u.pa_tmp_list, &list);
5536                         needed--;
5537                         continue;
5538                 }
5539
5540                 /* someone is deleting pa right now */
5541                 spin_unlock(&pa->pa_lock);
5542                 write_unlock(&ei->i_prealloc_lock);
5543
5544                 /* we have to wait here because pa_deleted
5545                  * doesn't mean pa is already unlinked from
5546                  * the list. as we might be called from
5547                  * ->clear_inode() the inode will get freed
5548                  * and concurrent thread which is unlinking
5549                  * pa from inode's list may access already
5550                  * freed memory, bad-bad-bad */
5551
5552                 /* XXX: if this happens too often, we can
5553                  * add a flag to force wait only in case
5554                  * of ->clear_inode(), but not in case of
5555                  * regular truncate */
5556                 schedule_timeout_uninterruptible(HZ);
5557                 goto repeat;
5558         }
5559         write_unlock(&ei->i_prealloc_lock);
5560
5561         list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
5562                 BUG_ON(pa->pa_type != MB_INODE_PA);
5563                 group = ext4_get_group_number(sb, pa->pa_pstart);
5564
5565                 err = ext4_mb_load_buddy_gfp(sb, group, &e4b,
5566                                              GFP_NOFS|__GFP_NOFAIL);
5567                 if (err) {
5568                         ext4_error_err(sb, -err, "Error %d loading buddy information for %u",
5569                                        err, group);
5570                         continue;
5571                 }
5572
5573                 bitmap_bh = ext4_read_block_bitmap(sb, group);
5574                 if (IS_ERR(bitmap_bh)) {
5575                         err = PTR_ERR(bitmap_bh);
5576                         ext4_error_err(sb, -err, "Error %d reading block bitmap for %u",
5577                                        err, group);
5578                         ext4_mb_unload_buddy(&e4b);
5579                         continue;
5580                 }
5581
5582                 ext4_lock_group(sb, group);
5583                 list_del(&pa->pa_group_list);
5584                 ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa);
5585                 ext4_unlock_group(sb, group);
5586
5587                 ext4_mb_unload_buddy(&e4b);
5588                 put_bh(bitmap_bh);
5589
5590                 list_del(&pa->u.pa_tmp_list);
5591                 ext4_mb_pa_free(pa);
5592         }
5593 }
5594
5595 static int ext4_mb_pa_alloc(struct ext4_allocation_context *ac)
5596 {
5597         struct ext4_prealloc_space *pa;
5598
5599         BUG_ON(ext4_pspace_cachep == NULL);
5600         pa = kmem_cache_zalloc(ext4_pspace_cachep, GFP_NOFS);
5601         if (!pa)
5602                 return -ENOMEM;
5603         atomic_set(&pa->pa_count, 1);
5604         ac->ac_pa = pa;
5605         return 0;
5606 }
5607
5608 static void ext4_mb_pa_put_free(struct ext4_allocation_context *ac)
5609 {
5610         struct ext4_prealloc_space *pa = ac->ac_pa;
5611
5612         BUG_ON(!pa);
5613         ac->ac_pa = NULL;
5614         WARN_ON(!atomic_dec_and_test(&pa->pa_count));
5615         /*
5616          * current function is only called due to an error or due to
5617          * len of found blocks < len of requested blocks hence the PA has not
5618          * been added to grp->bb_prealloc_list. So we don't need to lock it
5619          */
5620         pa->pa_deleted = 1;
5621         ext4_mb_pa_free(pa);
5622 }
5623
5624 #ifdef CONFIG_EXT4_DEBUG
5625 static inline void ext4_mb_show_pa(struct super_block *sb)
5626 {
5627         ext4_group_t i, ngroups;
5628
5629         if (ext4_forced_shutdown(sb))
5630                 return;
5631
5632         ngroups = ext4_get_groups_count(sb);
5633         mb_debug(sb, "groups: ");
5634         for (i = 0; i < ngroups; i++) {
5635                 struct ext4_group_info *grp = ext4_get_group_info(sb, i);
5636                 struct ext4_prealloc_space *pa;
5637                 ext4_grpblk_t start;
5638                 struct list_head *cur;
5639
5640                 if (!grp)
5641                         continue;
5642                 ext4_lock_group(sb, i);
5643                 list_for_each(cur, &grp->bb_prealloc_list) {
5644                         pa = list_entry(cur, struct ext4_prealloc_space,
5645                                         pa_group_list);
5646                         spin_lock(&pa->pa_lock);
5647                         ext4_get_group_no_and_offset(sb, pa->pa_pstart,
5648                                                      NULL, &start);
5649                         spin_unlock(&pa->pa_lock);
5650                         mb_debug(sb, "PA:%u:%d:%d\n", i, start,
5651                                  pa->pa_len);
5652                 }
5653                 ext4_unlock_group(sb, i);
5654                 mb_debug(sb, "%u: %d/%d\n", i, grp->bb_free,
5655                          grp->bb_fragments);
5656         }
5657 }
5658
5659 static void ext4_mb_show_ac(struct ext4_allocation_context *ac)
5660 {
5661         struct super_block *sb = ac->ac_sb;
5662
5663         if (ext4_forced_shutdown(sb))
5664                 return;
5665
5666         mb_debug(sb, "Can't allocate:"
5667                         " Allocation context details:");
5668         mb_debug(sb, "status %u flags 0x%x",
5669                         ac->ac_status, ac->ac_flags);
5670         mb_debug(sb, "orig %lu/%lu/%lu@%lu, "
5671                         "goal %lu/%lu/%lu@%lu, "
5672                         "best %lu/%lu/%lu@%lu cr %d",
5673                         (unsigned long)ac->ac_o_ex.fe_group,
5674                         (unsigned long)ac->ac_o_ex.fe_start,
5675                         (unsigned long)ac->ac_o_ex.fe_len,
5676                         (unsigned long)ac->ac_o_ex.fe_logical,
5677                         (unsigned long)ac->ac_g_ex.fe_group,
5678                         (unsigned long)ac->ac_g_ex.fe_start,
5679                         (unsigned long)ac->ac_g_ex.fe_len,
5680                         (unsigned long)ac->ac_g_ex.fe_logical,
5681                         (unsigned long)ac->ac_b_ex.fe_group,
5682                         (unsigned long)ac->ac_b_ex.fe_start,
5683                         (unsigned long)ac->ac_b_ex.fe_len,
5684                         (unsigned long)ac->ac_b_ex.fe_logical,
5685                         (int)ac->ac_criteria);
5686         mb_debug(sb, "%u found", ac->ac_found);
5687         mb_debug(sb, "used pa: %s, ", ac->ac_pa ? "yes" : "no");
5688         if (ac->ac_pa)
5689                 mb_debug(sb, "pa_type %s\n", ac->ac_pa->pa_type == MB_GROUP_PA ?
5690                          "group pa" : "inode pa");
5691         ext4_mb_show_pa(sb);
5692 }
5693 #else
5694 static inline void ext4_mb_show_pa(struct super_block *sb)
5695 {
5696 }
5697 static inline void ext4_mb_show_ac(struct ext4_allocation_context *ac)
5698 {
5699         ext4_mb_show_pa(ac->ac_sb);
5700 }
5701 #endif
5702
5703 /*
5704  * We use locality group preallocation for small size file. The size of the
5705  * file is determined by the current size or the resulting size after
5706  * allocation which ever is larger
5707  *
5708  * One can tune this size via /sys/fs/ext4/<partition>/mb_stream_req
5709  */
5710 static void ext4_mb_group_or_file(struct ext4_allocation_context *ac)
5711 {
5712         struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
5713         int bsbits = ac->ac_sb->s_blocksize_bits;
5714         loff_t size, isize;
5715         bool inode_pa_eligible, group_pa_eligible;
5716
5717         if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
5718                 return;
5719
5720         if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
5721                 return;
5722
5723         group_pa_eligible = sbi->s_mb_group_prealloc > 0;
5724         inode_pa_eligible = true;
5725         size = extent_logical_end(sbi, &ac->ac_o_ex);
5726         isize = (i_size_read(ac->ac_inode) + ac->ac_sb->s_blocksize - 1)
5727                 >> bsbits;
5728
5729         /* No point in using inode preallocation for closed files */
5730         if ((size == isize) && !ext4_fs_is_busy(sbi) &&
5731             !inode_is_open_for_write(ac->ac_inode))
5732                 inode_pa_eligible = false;
5733
5734         size = max(size, isize);
5735         /* Don't use group allocation for large files */
5736         if (size > sbi->s_mb_stream_request)
5737                 group_pa_eligible = false;
5738
5739         if (!group_pa_eligible) {
5740                 if (inode_pa_eligible)
5741                         ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
5742                 else
5743                         ac->ac_flags |= EXT4_MB_HINT_NOPREALLOC;
5744                 return;
5745         }
5746
5747         BUG_ON(ac->ac_lg != NULL);
5748         /*
5749          * locality group prealloc space are per cpu. The reason for having
5750          * per cpu locality group is to reduce the contention between block
5751          * request from multiple CPUs.
5752          */
5753         ac->ac_lg = raw_cpu_ptr(sbi->s_locality_groups);
5754
5755         /* we're going to use group allocation */
5756         ac->ac_flags |= EXT4_MB_HINT_GROUP_ALLOC;
5757
5758         /* serialize all allocations in the group */
5759         mutex_lock(&ac->ac_lg->lg_mutex);
5760 }
5761
5762 static noinline_for_stack void
5763 ext4_mb_initialize_context(struct ext4_allocation_context *ac,
5764                                 struct ext4_allocation_request *ar)
5765 {
5766         struct super_block *sb = ar->inode->i_sb;
5767         struct ext4_sb_info *sbi = EXT4_SB(sb);
5768         struct ext4_super_block *es = sbi->s_es;
5769         ext4_group_t group;
5770         unsigned int len;
5771         ext4_fsblk_t goal;
5772         ext4_grpblk_t block;
5773
5774         /* we can't allocate > group size */
5775         len = ar->len;
5776
5777         /* just a dirty hack to filter too big requests  */
5778         if (len >= EXT4_CLUSTERS_PER_GROUP(sb))
5779                 len = EXT4_CLUSTERS_PER_GROUP(sb);
5780
5781         /* start searching from the goal */
5782         goal = ar->goal;
5783         if (goal < le32_to_cpu(es->s_first_data_block) ||
5784                         goal >= ext4_blocks_count(es))
5785                 goal = le32_to_cpu(es->s_first_data_block);
5786         ext4_get_group_no_and_offset(sb, goal, &group, &block);
5787
5788         /* set up allocation goals */
5789         ac->ac_b_ex.fe_logical = EXT4_LBLK_CMASK(sbi, ar->logical);
5790         ac->ac_status = AC_STATUS_CONTINUE;
5791         ac->ac_sb = sb;
5792         ac->ac_inode = ar->inode;
5793         ac->ac_o_ex.fe_logical = ac->ac_b_ex.fe_logical;
5794         ac->ac_o_ex.fe_group = group;
5795         ac->ac_o_ex.fe_start = block;
5796         ac->ac_o_ex.fe_len = len;
5797         ac->ac_g_ex = ac->ac_o_ex;
5798         ac->ac_orig_goal_len = ac->ac_g_ex.fe_len;
5799         ac->ac_flags = ar->flags;
5800
5801         /* we have to define context: we'll work with a file or
5802          * locality group. this is a policy, actually */
5803         ext4_mb_group_or_file(ac);
5804
5805         mb_debug(sb, "init ac: %u blocks @ %u, goal %u, flags 0x%x, 2^%d, "
5806                         "left: %u/%u, right %u/%u to %swritable\n",
5807                         (unsigned) ar->len, (unsigned) ar->logical,
5808                         (unsigned) ar->goal, ac->ac_flags, ac->ac_2order,
5809                         (unsigned) ar->lleft, (unsigned) ar->pleft,
5810                         (unsigned) ar->lright, (unsigned) ar->pright,
5811                         inode_is_open_for_write(ar->inode) ? "" : "non-");
5812 }
5813
5814 static noinline_for_stack void
5815 ext4_mb_discard_lg_preallocations(struct super_block *sb,
5816                                         struct ext4_locality_group *lg,
5817                                         int order, int total_entries)
5818 {
5819         ext4_group_t group = 0;
5820         struct ext4_buddy e4b;
5821         LIST_HEAD(discard_list);
5822         struct ext4_prealloc_space *pa, *tmp;
5823
5824         mb_debug(sb, "discard locality group preallocation\n");
5825
5826         spin_lock(&lg->lg_prealloc_lock);
5827         list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[order],
5828                                 pa_node.lg_list,
5829                                 lockdep_is_held(&lg->lg_prealloc_lock)) {
5830                 spin_lock(&pa->pa_lock);
5831                 if (atomic_read(&pa->pa_count)) {
5832                         /*
5833                          * This is the pa that we just used
5834                          * for block allocation. So don't
5835                          * free that
5836                          */
5837                         spin_unlock(&pa->pa_lock);
5838                         continue;
5839                 }
5840                 if (pa->pa_deleted) {
5841                         spin_unlock(&pa->pa_lock);
5842                         continue;
5843                 }
5844                 /* only lg prealloc space */
5845                 BUG_ON(pa->pa_type != MB_GROUP_PA);
5846
5847                 /* seems this one can be freed ... */
5848                 ext4_mb_mark_pa_deleted(sb, pa);
5849                 spin_unlock(&pa->pa_lock);
5850
5851                 list_del_rcu(&pa->pa_node.lg_list);
5852                 list_add(&pa->u.pa_tmp_list, &discard_list);
5853
5854                 total_entries--;
5855                 if (total_entries <= 5) {
5856                         /*
5857                          * we want to keep only 5 entries
5858                          * allowing it to grow to 8. This
5859                          * mak sure we don't call discard
5860                          * soon for this list.
5861                          */
5862                         break;
5863                 }
5864         }
5865         spin_unlock(&lg->lg_prealloc_lock);
5866
5867         list_for_each_entry_safe(pa, tmp, &discard_list, u.pa_tmp_list) {
5868                 int err;
5869
5870                 group = ext4_get_group_number(sb, pa->pa_pstart);
5871                 err = ext4_mb_load_buddy_gfp(sb, group, &e4b,
5872                                              GFP_NOFS|__GFP_NOFAIL);
5873                 if (err) {
5874                         ext4_error_err(sb, -err, "Error %d loading buddy information for %u",
5875                                        err, group);
5876                         continue;
5877                 }
5878                 ext4_lock_group(sb, group);
5879                 list_del(&pa->pa_group_list);
5880                 ext4_mb_release_group_pa(&e4b, pa);
5881                 ext4_unlock_group(sb, group);
5882
5883                 ext4_mb_unload_buddy(&e4b);
5884                 list_del(&pa->u.pa_tmp_list);
5885                 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
5886         }
5887 }
5888
5889 /*
5890  * We have incremented pa_count. So it cannot be freed at this
5891  * point. Also we hold lg_mutex. So no parallel allocation is
5892  * possible from this lg. That means pa_free cannot be updated.
5893  *
5894  * A parallel ext4_mb_discard_group_preallocations is possible.
5895  * which can cause the lg_prealloc_list to be updated.
5896  */
5897
5898 static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac)
5899 {
5900         int order, added = 0, lg_prealloc_count = 1;
5901         struct super_block *sb = ac->ac_sb;
5902         struct ext4_locality_group *lg = ac->ac_lg;
5903         struct ext4_prealloc_space *tmp_pa, *pa = ac->ac_pa;
5904
5905         order = fls(pa->pa_free) - 1;
5906         if (order > PREALLOC_TB_SIZE - 1)
5907                 /* The max size of hash table is PREALLOC_TB_SIZE */
5908                 order = PREALLOC_TB_SIZE - 1;
5909         /* Add the prealloc space to lg */
5910         spin_lock(&lg->lg_prealloc_lock);
5911         list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order],
5912                                 pa_node.lg_list,
5913                                 lockdep_is_held(&lg->lg_prealloc_lock)) {
5914                 spin_lock(&tmp_pa->pa_lock);
5915                 if (tmp_pa->pa_deleted) {
5916                         spin_unlock(&tmp_pa->pa_lock);
5917                         continue;
5918                 }
5919                 if (!added && pa->pa_free < tmp_pa->pa_free) {
5920                         /* Add to the tail of the previous entry */
5921                         list_add_tail_rcu(&pa->pa_node.lg_list,
5922                                                 &tmp_pa->pa_node.lg_list);
5923                         added = 1;
5924                         /*
5925                          * we want to count the total
5926                          * number of entries in the list
5927                          */
5928                 }
5929                 spin_unlock(&tmp_pa->pa_lock);
5930                 lg_prealloc_count++;
5931         }
5932         if (!added)
5933                 list_add_tail_rcu(&pa->pa_node.lg_list,
5934                                         &lg->lg_prealloc_list[order]);
5935         spin_unlock(&lg->lg_prealloc_lock);
5936
5937         /* Now trim the list to be not more than 8 elements */
5938         if (lg_prealloc_count > 8)
5939                 ext4_mb_discard_lg_preallocations(sb, lg,
5940                                                   order, lg_prealloc_count);
5941 }
5942
5943 /*
5944  * release all resource we used in allocation
5945  */
5946 static int ext4_mb_release_context(struct ext4_allocation_context *ac)
5947 {
5948         struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
5949         struct ext4_prealloc_space *pa = ac->ac_pa;
5950         if (pa) {
5951                 if (pa->pa_type == MB_GROUP_PA) {
5952                         /* see comment in ext4_mb_use_group_pa() */
5953                         spin_lock(&pa->pa_lock);
5954                         pa->pa_pstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
5955                         pa->pa_lstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
5956                         pa->pa_free -= ac->ac_b_ex.fe_len;
5957                         pa->pa_len -= ac->ac_b_ex.fe_len;
5958                         spin_unlock(&pa->pa_lock);
5959
5960                         /*
5961                          * We want to add the pa to the right bucket.
5962                          * Remove it from the list and while adding
5963                          * make sure the list to which we are adding
5964                          * doesn't grow big.
5965                          */
5966                         if (likely(pa->pa_free)) {
5967                                 spin_lock(pa->pa_node_lock.lg_lock);
5968                                 list_del_rcu(&pa->pa_node.lg_list);
5969                                 spin_unlock(pa->pa_node_lock.lg_lock);
5970                                 ext4_mb_add_n_trim(ac);
5971                         }
5972                 }
5973
5974                 ext4_mb_put_pa(ac, ac->ac_sb, pa);
5975         }
5976         if (ac->ac_bitmap_page)
5977                 put_page(ac->ac_bitmap_page);
5978         if (ac->ac_buddy_page)
5979                 put_page(ac->ac_buddy_page);
5980         if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
5981                 mutex_unlock(&ac->ac_lg->lg_mutex);
5982         ext4_mb_collect_stats(ac);
5983         return 0;
5984 }
5985
5986 static int ext4_mb_discard_preallocations(struct super_block *sb, int needed)
5987 {
5988         ext4_group_t i, ngroups = ext4_get_groups_count(sb);
5989         int ret;
5990         int freed = 0, busy = 0;
5991         int retry = 0;
5992
5993         trace_ext4_mb_discard_preallocations(sb, needed);
5994
5995         if (needed == 0)
5996                 needed = EXT4_CLUSTERS_PER_GROUP(sb) + 1;
5997  repeat:
5998         for (i = 0; i < ngroups && needed > 0; i++) {
5999                 ret = ext4_mb_discard_group_preallocations(sb, i, &busy);
6000                 freed += ret;
6001                 needed -= ret;
6002                 cond_resched();
6003         }
6004
6005         if (needed > 0 && busy && ++retry < 3) {
6006                 busy = 0;
6007                 goto repeat;
6008         }
6009
6010         return freed;
6011 }
6012
6013 static bool ext4_mb_discard_preallocations_should_retry(struct super_block *sb,
6014                         struct ext4_allocation_context *ac, u64 *seq)
6015 {
6016         int freed;
6017         u64 seq_retry = 0;
6018         bool ret = false;
6019
6020         freed = ext4_mb_discard_preallocations(sb, ac->ac_o_ex.fe_len);
6021         if (freed) {
6022                 ret = true;
6023                 goto out_dbg;
6024         }
6025         seq_retry = ext4_get_discard_pa_seq_sum();
6026         if (!(ac->ac_flags & EXT4_MB_STRICT_CHECK) || seq_retry != *seq) {
6027                 ac->ac_flags |= EXT4_MB_STRICT_CHECK;
6028                 *seq = seq_retry;
6029                 ret = true;
6030         }
6031
6032 out_dbg:
6033         mb_debug(sb, "freed %d, retry ? %s\n", freed, ret ? "yes" : "no");
6034         return ret;
6035 }
6036
6037 /*
6038  * Simple allocator for Ext4 fast commit replay path. It searches for blocks
6039  * linearly starting at the goal block and also excludes the blocks which
6040  * are going to be in use after fast commit replay.
6041  */
6042 static ext4_fsblk_t
6043 ext4_mb_new_blocks_simple(struct ext4_allocation_request *ar, int *errp)
6044 {
6045         struct buffer_head *bitmap_bh;
6046         struct super_block *sb = ar->inode->i_sb;
6047         struct ext4_sb_info *sbi = EXT4_SB(sb);
6048         ext4_group_t group, nr;
6049         ext4_grpblk_t blkoff;
6050         ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
6051         ext4_grpblk_t i = 0;
6052         ext4_fsblk_t goal, block;
6053         struct ext4_super_block *es = sbi->s_es;
6054
6055         goal = ar->goal;
6056         if (goal < le32_to_cpu(es->s_first_data_block) ||
6057                         goal >= ext4_blocks_count(es))
6058                 goal = le32_to_cpu(es->s_first_data_block);
6059
6060         ar->len = 0;
6061         ext4_get_group_no_and_offset(sb, goal, &group, &blkoff);
6062         for (nr = ext4_get_groups_count(sb); nr > 0; nr--) {
6063                 bitmap_bh = ext4_read_block_bitmap(sb, group);
6064                 if (IS_ERR(bitmap_bh)) {
6065                         *errp = PTR_ERR(bitmap_bh);
6066                         pr_warn("Failed to read block bitmap\n");
6067                         return 0;
6068                 }
6069
6070                 while (1) {
6071                         i = mb_find_next_zero_bit(bitmap_bh->b_data, max,
6072                                                 blkoff);
6073                         if (i >= max)
6074                                 break;
6075                         if (ext4_fc_replay_check_excluded(sb,
6076                                 ext4_group_first_block_no(sb, group) +
6077                                 EXT4_C2B(sbi, i))) {
6078                                 blkoff = i + 1;
6079                         } else
6080                                 break;
6081                 }
6082                 brelse(bitmap_bh);
6083                 if (i < max)
6084                         break;
6085
6086                 if (++group >= ext4_get_groups_count(sb))
6087                         group = 0;
6088
6089                 blkoff = 0;
6090         }
6091
6092         if (i >= max) {
6093                 *errp = -ENOSPC;
6094                 return 0;
6095         }
6096
6097         block = ext4_group_first_block_no(sb, group) + EXT4_C2B(sbi, i);
6098         ext4_mb_mark_bb(sb, block, 1, true);
6099         ar->len = 1;
6100
6101         return block;
6102 }
6103
6104 /*
6105  * Main entry point into mballoc to allocate blocks
6106  * it tries to use preallocation first, then falls back
6107  * to usual allocation
6108  */
6109 ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
6110                                 struct ext4_allocation_request *ar, int *errp)
6111 {
6112         struct ext4_allocation_context *ac = NULL;
6113         struct ext4_sb_info *sbi;
6114         struct super_block *sb;
6115         ext4_fsblk_t block = 0;
6116         unsigned int inquota = 0;
6117         unsigned int reserv_clstrs = 0;
6118         int retries = 0;
6119         u64 seq;
6120
6121         might_sleep();
6122         sb = ar->inode->i_sb;
6123         sbi = EXT4_SB(sb);
6124
6125         trace_ext4_request_blocks(ar);
6126         if (sbi->s_mount_state & EXT4_FC_REPLAY)
6127                 return ext4_mb_new_blocks_simple(ar, errp);
6128
6129         /* Allow to use superuser reservation for quota file */
6130         if (ext4_is_quota_file(ar->inode))
6131                 ar->flags |= EXT4_MB_USE_ROOT_BLOCKS;
6132
6133         if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0) {
6134                 /* Without delayed allocation we need to verify
6135                  * there is enough free blocks to do block allocation
6136                  * and verify allocation doesn't exceed the quota limits.
6137                  */
6138                 while (ar->len &&
6139                         ext4_claim_free_clusters(sbi, ar->len, ar->flags)) {
6140
6141                         /* let others to free the space */
6142                         cond_resched();
6143                         ar->len = ar->len >> 1;
6144                 }
6145                 if (!ar->len) {
6146                         ext4_mb_show_pa(sb);
6147                         *errp = -ENOSPC;
6148                         return 0;
6149                 }
6150                 reserv_clstrs = ar->len;
6151                 if (ar->flags & EXT4_MB_USE_ROOT_BLOCKS) {
6152                         dquot_alloc_block_nofail(ar->inode,
6153                                                  EXT4_C2B(sbi, ar->len));
6154                 } else {
6155                         while (ar->len &&
6156                                 dquot_alloc_block(ar->inode,
6157                                                   EXT4_C2B(sbi, ar->len))) {
6158
6159                                 ar->flags |= EXT4_MB_HINT_NOPREALLOC;
6160                                 ar->len--;
6161                         }
6162                 }
6163                 inquota = ar->len;
6164                 if (ar->len == 0) {
6165                         *errp = -EDQUOT;
6166                         goto out;
6167                 }
6168         }
6169
6170         ac = kmem_cache_zalloc(ext4_ac_cachep, GFP_NOFS);
6171         if (!ac) {
6172                 ar->len = 0;
6173                 *errp = -ENOMEM;
6174                 goto out;
6175         }
6176
6177         ext4_mb_initialize_context(ac, ar);
6178
6179         ac->ac_op = EXT4_MB_HISTORY_PREALLOC;
6180         seq = this_cpu_read(discard_pa_seq);
6181         if (!ext4_mb_use_preallocated(ac)) {
6182                 ac->ac_op = EXT4_MB_HISTORY_ALLOC;
6183                 ext4_mb_normalize_request(ac, ar);
6184
6185                 *errp = ext4_mb_pa_alloc(ac);
6186                 if (*errp)
6187                         goto errout;
6188 repeat:
6189                 /* allocate space in core */
6190                 *errp = ext4_mb_regular_allocator(ac);
6191                 /*
6192                  * pa allocated above is added to grp->bb_prealloc_list only
6193                  * when we were able to allocate some block i.e. when
6194                  * ac->ac_status == AC_STATUS_FOUND.
6195                  * And error from above mean ac->ac_status != AC_STATUS_FOUND
6196                  * So we have to free this pa here itself.
6197                  */
6198                 if (*errp) {
6199                         ext4_mb_pa_put_free(ac);
6200                         ext4_discard_allocated_blocks(ac);
6201                         goto errout;
6202                 }
6203                 if (ac->ac_status == AC_STATUS_FOUND &&
6204                         ac->ac_o_ex.fe_len >= ac->ac_f_ex.fe_len)
6205                         ext4_mb_pa_put_free(ac);
6206         }
6207         if (likely(ac->ac_status == AC_STATUS_FOUND)) {
6208                 *errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_clstrs);
6209                 if (*errp) {
6210                         ext4_discard_allocated_blocks(ac);
6211                         goto errout;
6212                 } else {
6213                         block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
6214                         ar->len = ac->ac_b_ex.fe_len;
6215                 }
6216         } else {
6217                 if (++retries < 3 &&
6218                     ext4_mb_discard_preallocations_should_retry(sb, ac, &seq))
6219                         goto repeat;
6220                 /*
6221                  * If block allocation fails then the pa allocated above
6222                  * needs to be freed here itself.
6223                  */
6224                 ext4_mb_pa_put_free(ac);
6225                 *errp = -ENOSPC;
6226         }
6227
6228         if (*errp) {
6229 errout:
6230                 ac->ac_b_ex.fe_len = 0;
6231                 ar->len = 0;
6232                 ext4_mb_show_ac(ac);
6233         }
6234         ext4_mb_release_context(ac);
6235         kmem_cache_free(ext4_ac_cachep, ac);
6236 out:
6237         if (inquota && ar->len < inquota)
6238                 dquot_free_block(ar->inode, EXT4_C2B(sbi, inquota - ar->len));
6239         if (!ar->len) {
6240                 if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0)
6241                         /* release all the reserved blocks if non delalloc */
6242                         percpu_counter_sub(&sbi->s_dirtyclusters_counter,
6243                                                 reserv_clstrs);
6244         }
6245
6246         trace_ext4_allocate_blocks(ar, (unsigned long long)block);
6247
6248         return block;
6249 }
6250
6251 /*
6252  * We can merge two free data extents only if the physical blocks
6253  * are contiguous, AND the extents were freed by the same transaction,
6254  * AND the blocks are associated with the same group.
6255  */
6256 static void ext4_try_merge_freed_extent(struct ext4_sb_info *sbi,
6257                                         struct ext4_free_data *entry,
6258                                         struct ext4_free_data *new_entry,
6259                                         struct rb_root *entry_rb_root)
6260 {
6261         if ((entry->efd_tid != new_entry->efd_tid) ||
6262             (entry->efd_group != new_entry->efd_group))
6263                 return;
6264         if (entry->efd_start_cluster + entry->efd_count ==
6265             new_entry->efd_start_cluster) {
6266                 new_entry->efd_start_cluster = entry->efd_start_cluster;
6267                 new_entry->efd_count += entry->efd_count;
6268         } else if (new_entry->efd_start_cluster + new_entry->efd_count ==
6269                    entry->efd_start_cluster) {
6270                 new_entry->efd_count += entry->efd_count;
6271         } else
6272                 return;
6273         spin_lock(&sbi->s_md_lock);
6274         list_del(&entry->efd_list);
6275         spin_unlock(&sbi->s_md_lock);
6276         rb_erase(&entry->efd_node, entry_rb_root);
6277         kmem_cache_free(ext4_free_data_cachep, entry);
6278 }
6279
6280 static noinline_for_stack void
6281 ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
6282                       struct ext4_free_data *new_entry)
6283 {
6284         ext4_group_t group = e4b->bd_group;
6285         ext4_grpblk_t cluster;
6286         ext4_grpblk_t clusters = new_entry->efd_count;
6287         struct ext4_free_data *entry;
6288         struct ext4_group_info *db = e4b->bd_info;
6289         struct super_block *sb = e4b->bd_sb;
6290         struct ext4_sb_info *sbi = EXT4_SB(sb);
6291         struct rb_node **n = &db->bb_free_root.rb_node, *node;
6292         struct rb_node *parent = NULL, *new_node;
6293
6294         BUG_ON(!ext4_handle_valid(handle));
6295         BUG_ON(e4b->bd_bitmap_page == NULL);
6296         BUG_ON(e4b->bd_buddy_page == NULL);
6297
6298         new_node = &new_entry->efd_node;
6299         cluster = new_entry->efd_start_cluster;
6300
6301         if (!*n) {
6302                 /* first free block exent. We need to
6303                    protect buddy cache from being freed,
6304                  * otherwise we'll refresh it from
6305                  * on-disk bitmap and lose not-yet-available
6306                  * blocks */
6307                 get_page(e4b->bd_buddy_page);
6308                 get_page(e4b->bd_bitmap_page);
6309         }
6310         while (*n) {
6311                 parent = *n;
6312                 entry = rb_entry(parent, struct ext4_free_data, efd_node);
6313                 if (cluster < entry->efd_start_cluster)
6314                         n = &(*n)->rb_left;
6315                 else if (cluster >= (entry->efd_start_cluster + entry->efd_count))
6316                         n = &(*n)->rb_right;
6317                 else {
6318                         ext4_grp_locked_error(sb, group, 0,
6319                                 ext4_group_first_block_no(sb, group) +
6320                                 EXT4_C2B(sbi, cluster),
6321                                 "Block already on to-be-freed list");
6322                         kmem_cache_free(ext4_free_data_cachep, new_entry);
6323                         return;
6324                 }
6325         }
6326
6327         rb_link_node(new_node, parent, n);
6328         rb_insert_color(new_node, &db->bb_free_root);
6329
6330         /* Now try to see the extent can be merged to left and right */
6331         node = rb_prev(new_node);
6332         if (node) {
6333                 entry = rb_entry(node, struct ext4_free_data, efd_node);
6334                 ext4_try_merge_freed_extent(sbi, entry, new_entry,
6335                                             &(db->bb_free_root));
6336         }
6337
6338         node = rb_next(new_node);
6339         if (node) {
6340                 entry = rb_entry(node, struct ext4_free_data, efd_node);
6341                 ext4_try_merge_freed_extent(sbi, entry, new_entry,
6342                                             &(db->bb_free_root));
6343         }
6344
6345         spin_lock(&sbi->s_md_lock);
6346         list_add_tail(&new_entry->efd_list, &sbi->s_freed_data_list[new_entry->efd_tid & 1]);
6347         sbi->s_mb_free_pending += clusters;
6348         spin_unlock(&sbi->s_md_lock);
6349 }
6350
6351 static void ext4_free_blocks_simple(struct inode *inode, ext4_fsblk_t block,
6352                                         unsigned long count)
6353 {
6354         struct super_block *sb = inode->i_sb;
6355         ext4_group_t group;
6356         ext4_grpblk_t blkoff;
6357
6358         ext4_get_group_no_and_offset(sb, block, &group, &blkoff);
6359         ext4_mb_mark_context(NULL, sb, false, group, blkoff, count,
6360                              EXT4_MB_BITMAP_MARKED_CHECK |
6361                              EXT4_MB_SYNC_UPDATE,
6362                              NULL);
6363 }
6364
6365 /**
6366  * ext4_mb_clear_bb() -- helper function for freeing blocks.
6367  *                      Used by ext4_free_blocks()
6368  * @handle:             handle for this transaction
6369  * @inode:              inode
6370  * @block:              starting physical block to be freed
6371  * @count:              number of blocks to be freed
6372  * @flags:              flags used by ext4_free_blocks
6373  */
6374 static void ext4_mb_clear_bb(handle_t *handle, struct inode *inode,
6375                                ext4_fsblk_t block, unsigned long count,
6376                                int flags)
6377 {
6378         struct super_block *sb = inode->i_sb;
6379         struct ext4_group_info *grp;
6380         unsigned int overflow;
6381         ext4_grpblk_t bit;
6382         ext4_group_t block_group;
6383         struct ext4_sb_info *sbi;
6384         struct ext4_buddy e4b;
6385         unsigned int count_clusters;
6386         int err = 0;
6387         int mark_flags = 0;
6388         ext4_grpblk_t changed;
6389
6390         sbi = EXT4_SB(sb);
6391
6392         if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) &&
6393             !ext4_inode_block_valid(inode, block, count)) {
6394                 ext4_error(sb, "Freeing blocks in system zone - "
6395                            "Block = %llu, count = %lu", block, count);
6396                 /* err = 0. ext4_std_error should be a no op */
6397                 goto error_out;
6398         }
6399         flags |= EXT4_FREE_BLOCKS_VALIDATED;
6400
6401 do_more:
6402         overflow = 0;
6403         ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
6404
6405         grp = ext4_get_group_info(sb, block_group);
6406         if (unlikely(!grp || EXT4_MB_GRP_BBITMAP_CORRUPT(grp)))
6407                 return;
6408
6409         /*
6410          * Check to see if we are freeing blocks across a group
6411          * boundary.
6412          */
6413         if (EXT4_C2B(sbi, bit) + count > EXT4_BLOCKS_PER_GROUP(sb)) {
6414                 overflow = EXT4_C2B(sbi, bit) + count -
6415                         EXT4_BLOCKS_PER_GROUP(sb);
6416                 count -= overflow;
6417                 /* The range changed so it's no longer validated */
6418                 flags &= ~EXT4_FREE_BLOCKS_VALIDATED;
6419         }
6420         count_clusters = EXT4_NUM_B2C(sbi, count);
6421         trace_ext4_mballoc_free(sb, inode, block_group, bit, count_clusters);
6422
6423         /* __GFP_NOFAIL: retry infinitely, ignore TIF_MEMDIE and memcg limit. */
6424         err = ext4_mb_load_buddy_gfp(sb, block_group, &e4b,
6425                                      GFP_NOFS|__GFP_NOFAIL);
6426         if (err)
6427                 goto error_out;
6428
6429         if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) &&
6430             !ext4_inode_block_valid(inode, block, count)) {
6431                 ext4_error(sb, "Freeing blocks in system zone - "
6432                            "Block = %llu, count = %lu", block, count);
6433                 /* err = 0. ext4_std_error should be a no op */
6434                 goto error_clean;
6435         }
6436
6437 #ifdef AGGRESSIVE_CHECK
6438         mark_flags |= EXT4_MB_BITMAP_MARKED_CHECK;
6439 #endif
6440         err = ext4_mb_mark_context(handle, sb, false, block_group, bit,
6441                                    count_clusters, mark_flags, &changed);
6442
6443
6444         if (err && changed == 0)
6445                 goto error_clean;
6446
6447 #ifdef AGGRESSIVE_CHECK
6448         BUG_ON(changed != count_clusters);
6449 #endif
6450
6451         /*
6452          * We need to make sure we don't reuse the freed block until after the
6453          * transaction is committed. We make an exception if the inode is to be
6454          * written in writeback mode since writeback mode has weak data
6455          * consistency guarantees.
6456          */
6457         if (ext4_handle_valid(handle) &&
6458             ((flags & EXT4_FREE_BLOCKS_METADATA) ||
6459              !ext4_should_writeback_data(inode))) {
6460                 struct ext4_free_data *new_entry;
6461                 /*
6462                  * We use __GFP_NOFAIL because ext4_free_blocks() is not allowed
6463                  * to fail.
6464                  */
6465                 new_entry = kmem_cache_alloc(ext4_free_data_cachep,
6466                                 GFP_NOFS|__GFP_NOFAIL);
6467                 new_entry->efd_start_cluster = bit;
6468                 new_entry->efd_group = block_group;
6469                 new_entry->efd_count = count_clusters;
6470                 new_entry->efd_tid = handle->h_transaction->t_tid;
6471
6472                 ext4_lock_group(sb, block_group);
6473                 ext4_mb_free_metadata(handle, &e4b, new_entry);
6474         } else {
6475                 if (test_opt(sb, DISCARD)) {
6476                         err = ext4_issue_discard(sb, block_group, bit,
6477                                                  count_clusters, NULL);
6478                         if (err && err != -EOPNOTSUPP)
6479                                 ext4_msg(sb, KERN_WARNING, "discard request in"
6480                                          " group:%u block:%d count:%lu failed"
6481                                          " with %d", block_group, bit, count,
6482                                          err);
6483                 } else
6484                         EXT4_MB_GRP_CLEAR_TRIMMED(e4b.bd_info);
6485
6486                 ext4_lock_group(sb, block_group);
6487                 mb_free_blocks(inode, &e4b, bit, count_clusters);
6488         }
6489
6490         ext4_unlock_group(sb, block_group);
6491
6492         /*
6493          * on a bigalloc file system, defer the s_freeclusters_counter
6494          * update to the caller (ext4_remove_space and friends) so they
6495          * can determine if a cluster freed here should be rereserved
6496          */
6497         if (!(flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER)) {
6498                 if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE))
6499                         dquot_free_block(inode, EXT4_C2B(sbi, count_clusters));
6500                 percpu_counter_add(&sbi->s_freeclusters_counter,
6501                                    count_clusters);
6502         }
6503
6504         if (overflow && !err) {
6505                 block += count;
6506                 count = overflow;
6507                 ext4_mb_unload_buddy(&e4b);
6508                 /* The range changed so it's no longer validated */
6509                 flags &= ~EXT4_FREE_BLOCKS_VALIDATED;
6510                 goto do_more;
6511         }
6512
6513 error_clean:
6514         ext4_mb_unload_buddy(&e4b);
6515 error_out:
6516         ext4_std_error(sb, err);
6517 }
6518
6519 /**
6520  * ext4_free_blocks() -- Free given blocks and update quota
6521  * @handle:             handle for this transaction
6522  * @inode:              inode
6523  * @bh:                 optional buffer of the block to be freed
6524  * @block:              starting physical block to be freed
6525  * @count:              number of blocks to be freed
6526  * @flags:              flags used by ext4_free_blocks
6527  */
6528 void ext4_free_blocks(handle_t *handle, struct inode *inode,
6529                       struct buffer_head *bh, ext4_fsblk_t block,
6530                       unsigned long count, int flags)
6531 {
6532         struct super_block *sb = inode->i_sb;
6533         unsigned int overflow;
6534         struct ext4_sb_info *sbi;
6535
6536         sbi = EXT4_SB(sb);
6537
6538         if (bh) {
6539                 if (block)
6540                         BUG_ON(block != bh->b_blocknr);
6541                 else
6542                         block = bh->b_blocknr;
6543         }
6544
6545         if (sbi->s_mount_state & EXT4_FC_REPLAY) {
6546                 ext4_free_blocks_simple(inode, block, EXT4_NUM_B2C(sbi, count));
6547                 return;
6548         }
6549
6550         might_sleep();
6551
6552         if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) &&
6553             !ext4_inode_block_valid(inode, block, count)) {
6554                 ext4_error(sb, "Freeing blocks not in datazone - "
6555                            "block = %llu, count = %lu", block, count);
6556                 return;
6557         }
6558         flags |= EXT4_FREE_BLOCKS_VALIDATED;
6559
6560         ext4_debug("freeing block %llu\n", block);
6561         trace_ext4_free_blocks(inode, block, count, flags);
6562
6563         if (bh && (flags & EXT4_FREE_BLOCKS_FORGET)) {
6564                 BUG_ON(count > 1);
6565
6566                 ext4_forget(handle, flags & EXT4_FREE_BLOCKS_METADATA,
6567                             inode, bh, block);
6568         }
6569
6570         /*
6571          * If the extent to be freed does not begin on a cluster
6572          * boundary, we need to deal with partial clusters at the
6573          * beginning and end of the extent.  Normally we will free
6574          * blocks at the beginning or the end unless we are explicitly
6575          * requested to avoid doing so.
6576          */
6577         overflow = EXT4_PBLK_COFF(sbi, block);
6578         if (overflow) {
6579                 if (flags & EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER) {
6580                         overflow = sbi->s_cluster_ratio - overflow;
6581                         block += overflow;
6582                         if (count > overflow)
6583                                 count -= overflow;
6584                         else
6585                                 return;
6586                 } else {
6587                         block -= overflow;
6588                         count += overflow;
6589                 }
6590                 /* The range changed so it's no longer validated */
6591                 flags &= ~EXT4_FREE_BLOCKS_VALIDATED;
6592         }
6593         overflow = EXT4_LBLK_COFF(sbi, count);
6594         if (overflow) {
6595                 if (flags & EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER) {
6596                         if (count > overflow)
6597                                 count -= overflow;
6598                         else
6599                                 return;
6600                 } else
6601                         count += sbi->s_cluster_ratio - overflow;
6602                 /* The range changed so it's no longer validated */
6603                 flags &= ~EXT4_FREE_BLOCKS_VALIDATED;
6604         }
6605
6606         if (!bh && (flags & EXT4_FREE_BLOCKS_FORGET)) {
6607                 int i;
6608                 int is_metadata = flags & EXT4_FREE_BLOCKS_METADATA;
6609
6610                 for (i = 0; i < count; i++) {
6611                         cond_resched();
6612                         if (is_metadata)
6613                                 bh = sb_find_get_block(inode->i_sb, block + i);
6614                         ext4_forget(handle, is_metadata, inode, bh, block + i);
6615                 }
6616         }
6617
6618         ext4_mb_clear_bb(handle, inode, block, count, flags);
6619 }
6620
6621 /**
6622  * ext4_group_add_blocks() -- Add given blocks to an existing group
6623  * @handle:                     handle to this transaction
6624  * @sb:                         super block
6625  * @block:                      start physical block to add to the block group
6626  * @count:                      number of blocks to free
6627  *
6628  * This marks the blocks as free in the bitmap and buddy.
6629  */
6630 int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
6631                          ext4_fsblk_t block, unsigned long count)
6632 {
6633         ext4_group_t block_group;
6634         ext4_grpblk_t bit;
6635         struct ext4_sb_info *sbi = EXT4_SB(sb);
6636         struct ext4_buddy e4b;
6637         int err = 0;
6638         ext4_fsblk_t first_cluster = EXT4_B2C(sbi, block);
6639         ext4_fsblk_t last_cluster = EXT4_B2C(sbi, block + count - 1);
6640         unsigned long cluster_count = last_cluster - first_cluster + 1;
6641         ext4_grpblk_t changed;
6642
6643         ext4_debug("Adding block(s) %llu-%llu\n", block, block + count - 1);
6644
6645         if (cluster_count == 0)
6646                 return 0;
6647
6648         ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
6649         /*
6650          * Check to see if we are freeing blocks across a group
6651          * boundary.
6652          */
6653         if (bit + cluster_count > EXT4_CLUSTERS_PER_GROUP(sb)) {
6654                 ext4_warning(sb, "too many blocks added to group %u",
6655                              block_group);
6656                 err = -EINVAL;
6657                 goto error_out;
6658         }
6659
6660         err = ext4_mb_load_buddy(sb, block_group, &e4b);
6661         if (err)
6662                 goto error_out;
6663
6664         if (!ext4_sb_block_valid(sb, NULL, block, count)) {
6665                 ext4_error(sb, "Adding blocks in system zones - "
6666                            "Block = %llu, count = %lu",
6667                            block, count);
6668                 err = -EINVAL;
6669                 goto error_clean;
6670         }
6671
6672         err = ext4_mb_mark_context(handle, sb, false, block_group, bit,
6673                                    cluster_count, EXT4_MB_BITMAP_MARKED_CHECK,
6674                                    &changed);
6675         if (err && changed == 0)
6676                 goto error_clean;
6677
6678         if (changed != cluster_count)
6679                 ext4_error(sb, "bit already cleared in group %u", block_group);
6680
6681         ext4_lock_group(sb, block_group);
6682         mb_free_blocks(NULL, &e4b, bit, cluster_count);
6683         ext4_unlock_group(sb, block_group);
6684         percpu_counter_add(&sbi->s_freeclusters_counter,
6685                            changed);
6686
6687 error_clean:
6688         ext4_mb_unload_buddy(&e4b);
6689 error_out:
6690         ext4_std_error(sb, err);
6691         return err;
6692 }
6693
6694 /**
6695  * ext4_trim_extent -- function to TRIM one single free extent in the group
6696  * @sb:         super block for the file system
6697  * @start:      starting block of the free extent in the alloc. group
6698  * @count:      number of blocks to TRIM
6699  * @e4b:        ext4 buddy for the group
6700  *
6701  * Trim "count" blocks starting at "start" in the "group". To assure that no
6702  * one will allocate those blocks, mark it as used in buddy bitmap. This must
6703  * be called with under the group lock.
6704  */
6705 static int ext4_trim_extent(struct super_block *sb,
6706                 int start, int count, struct ext4_buddy *e4b)
6707 __releases(bitlock)
6708 __acquires(bitlock)
6709 {
6710         struct ext4_free_extent ex;
6711         ext4_group_t group = e4b->bd_group;
6712         int ret = 0;
6713
6714         trace_ext4_trim_extent(sb, group, start, count);
6715
6716         assert_spin_locked(ext4_group_lock_ptr(sb, group));
6717
6718         ex.fe_start = start;
6719         ex.fe_group = group;
6720         ex.fe_len = count;
6721
6722         /*
6723          * Mark blocks used, so no one can reuse them while
6724          * being trimmed.
6725          */
6726         mb_mark_used(e4b, &ex);
6727         ext4_unlock_group(sb, group);
6728         ret = ext4_issue_discard(sb, group, start, count, NULL);
6729         ext4_lock_group(sb, group);
6730         mb_free_blocks(NULL, e4b, start, ex.fe_len);
6731         return ret;
6732 }
6733
6734 static ext4_grpblk_t ext4_last_grp_cluster(struct super_block *sb,
6735                                            ext4_group_t grp)
6736 {
6737         unsigned long nr_clusters_in_group;
6738
6739         if (grp < (ext4_get_groups_count(sb) - 1))
6740                 nr_clusters_in_group = EXT4_CLUSTERS_PER_GROUP(sb);
6741         else
6742                 nr_clusters_in_group = (ext4_blocks_count(EXT4_SB(sb)->s_es) -
6743                                         ext4_group_first_block_no(sb, grp))
6744                                        >> EXT4_CLUSTER_BITS(sb);
6745
6746         return nr_clusters_in_group - 1;
6747 }
6748
6749 static bool ext4_trim_interrupted(void)
6750 {
6751         return fatal_signal_pending(current) || freezing(current);
6752 }
6753
6754 static int ext4_try_to_trim_range(struct super_block *sb,
6755                 struct ext4_buddy *e4b, ext4_grpblk_t start,
6756                 ext4_grpblk_t max, ext4_grpblk_t minblocks)
6757 __acquires(ext4_group_lock_ptr(sb, e4b->bd_group))
6758 __releases(ext4_group_lock_ptr(sb, e4b->bd_group))
6759 {
6760         ext4_grpblk_t next, count, free_count, last, origin_start;
6761         bool set_trimmed = false;
6762         void *bitmap;
6763
6764         last = ext4_last_grp_cluster(sb, e4b->bd_group);
6765         bitmap = e4b->bd_bitmap;
6766         if (start == 0 && max >= last)
6767                 set_trimmed = true;
6768         origin_start = start;
6769         start = max(e4b->bd_info->bb_first_free, start);
6770         count = 0;
6771         free_count = 0;
6772
6773         while (start <= max) {
6774                 start = mb_find_next_zero_bit(bitmap, max + 1, start);
6775                 if (start > max)
6776                         break;
6777
6778                 next = mb_find_next_bit(bitmap, last + 1, start);
6779                 if (origin_start == 0 && next >= last)
6780                         set_trimmed = true;
6781
6782                 if ((next - start) >= minblocks) {
6783                         int ret = ext4_trim_extent(sb, start, next - start, e4b);
6784
6785                         if (ret && ret != -EOPNOTSUPP)
6786                                 return count;
6787                         count += next - start;
6788                 }
6789                 free_count += next - start;
6790                 start = next + 1;
6791
6792                 if (ext4_trim_interrupted())
6793                         return count;
6794
6795                 if (need_resched()) {
6796                         ext4_unlock_group(sb, e4b->bd_group);
6797                         cond_resched();
6798                         ext4_lock_group(sb, e4b->bd_group);
6799                 }
6800
6801                 if ((e4b->bd_info->bb_free - free_count) < minblocks)
6802                         break;
6803         }
6804
6805         if (set_trimmed)
6806                 EXT4_MB_GRP_SET_TRIMMED(e4b->bd_info);
6807
6808         return count;
6809 }
6810
6811 /**
6812  * ext4_trim_all_free -- function to trim all free space in alloc. group
6813  * @sb:                 super block for file system
6814  * @group:              group to be trimmed
6815  * @start:              first group block to examine
6816  * @max:                last group block to examine
6817  * @minblocks:          minimum extent block count
6818  *
6819  * ext4_trim_all_free walks through group's block bitmap searching for free
6820  * extents. When the free extent is found, mark it as used in group buddy
6821  * bitmap. Then issue a TRIM command on this extent and free the extent in
6822  * the group buddy bitmap.
6823  */
6824 static ext4_grpblk_t
6825 ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
6826                    ext4_grpblk_t start, ext4_grpblk_t max,
6827                    ext4_grpblk_t minblocks)
6828 {
6829         struct ext4_buddy e4b;
6830         int ret;
6831
6832         trace_ext4_trim_all_free(sb, group, start, max);
6833
6834         ret = ext4_mb_load_buddy(sb, group, &e4b);
6835         if (ret) {
6836                 ext4_warning(sb, "Error %d loading buddy information for %u",
6837                              ret, group);
6838                 return ret;
6839         }
6840
6841         ext4_lock_group(sb, group);
6842
6843         if (!EXT4_MB_GRP_WAS_TRIMMED(e4b.bd_info) ||
6844             minblocks < EXT4_SB(sb)->s_last_trim_minblks)
6845                 ret = ext4_try_to_trim_range(sb, &e4b, start, max, minblocks);
6846         else
6847                 ret = 0;
6848
6849         ext4_unlock_group(sb, group);
6850         ext4_mb_unload_buddy(&e4b);
6851
6852         ext4_debug("trimmed %d blocks in the group %d\n",
6853                 ret, group);
6854
6855         return ret;
6856 }
6857
6858 /**
6859  * ext4_trim_fs() -- trim ioctl handle function
6860  * @sb:                 superblock for filesystem
6861  * @range:              fstrim_range structure
6862  *
6863  * start:       First Byte to trim
6864  * len:         number of Bytes to trim from start
6865  * minlen:      minimum extent length in Bytes
6866  * ext4_trim_fs goes through all allocation groups containing Bytes from
6867  * start to start+len. For each such a group ext4_trim_all_free function
6868  * is invoked to trim all free space.
6869  */
6870 int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
6871 {
6872         unsigned int discard_granularity = bdev_discard_granularity(sb->s_bdev);
6873         struct ext4_group_info *grp;
6874         ext4_group_t group, first_group, last_group;
6875         ext4_grpblk_t cnt = 0, first_cluster, last_cluster;
6876         uint64_t start, end, minlen, trimmed = 0;
6877         ext4_fsblk_t first_data_blk =
6878                         le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
6879         ext4_fsblk_t max_blks = ext4_blocks_count(EXT4_SB(sb)->s_es);
6880         int ret = 0;
6881
6882         start = range->start >> sb->s_blocksize_bits;
6883         end = start + (range->len >> sb->s_blocksize_bits) - 1;
6884         minlen = EXT4_NUM_B2C(EXT4_SB(sb),
6885                               range->minlen >> sb->s_blocksize_bits);
6886
6887         if (minlen > EXT4_CLUSTERS_PER_GROUP(sb) ||
6888             start >= max_blks ||
6889             range->len < sb->s_blocksize)
6890                 return -EINVAL;
6891         /* No point to try to trim less than discard granularity */
6892         if (range->minlen < discard_granularity) {
6893                 minlen = EXT4_NUM_B2C(EXT4_SB(sb),
6894                                 discard_granularity >> sb->s_blocksize_bits);
6895                 if (minlen > EXT4_CLUSTERS_PER_GROUP(sb))
6896                         goto out;
6897         }
6898         if (end >= max_blks - 1)
6899                 end = max_blks - 1;
6900         if (end <= first_data_blk)
6901                 goto out;
6902         if (start < first_data_blk)
6903                 start = first_data_blk;
6904
6905         /* Determine first and last group to examine based on start and end */
6906         ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) start,
6907                                      &first_group, &first_cluster);
6908         ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) end,
6909                                      &last_group, &last_cluster);
6910
6911         /* end now represents the last cluster to discard in this group */
6912         end = EXT4_CLUSTERS_PER_GROUP(sb) - 1;
6913
6914         for (group = first_group; group <= last_group; group++) {
6915                 if (ext4_trim_interrupted())
6916                         break;
6917                 grp = ext4_get_group_info(sb, group);
6918                 if (!grp)
6919                         continue;
6920                 /* We only do this if the grp has never been initialized */
6921                 if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
6922                         ret = ext4_mb_init_group(sb, group, GFP_NOFS);
6923                         if (ret)
6924                                 break;
6925                 }
6926
6927                 /*
6928                  * For all the groups except the last one, last cluster will
6929                  * always be EXT4_CLUSTERS_PER_GROUP(sb)-1, so we only need to
6930                  * change it for the last group, note that last_cluster is
6931                  * already computed earlier by ext4_get_group_no_and_offset()
6932                  */
6933                 if (group == last_group)
6934                         end = last_cluster;
6935                 if (grp->bb_free >= minlen) {
6936                         cnt = ext4_trim_all_free(sb, group, first_cluster,
6937                                                  end, minlen);
6938                         if (cnt < 0) {
6939                                 ret = cnt;
6940                                 break;
6941                         }
6942                         trimmed += cnt;
6943                 }
6944
6945                 /*
6946                  * For every group except the first one, we are sure
6947                  * that the first cluster to discard will be cluster #0.
6948                  */
6949                 first_cluster = 0;
6950         }
6951
6952         if (!ret)
6953                 EXT4_SB(sb)->s_last_trim_minblks = minlen;
6954
6955 out:
6956         range->len = EXT4_C2B(EXT4_SB(sb), trimmed) << sb->s_blocksize_bits;
6957         return ret;
6958 }
6959
6960 /* Iterate all the free extents in the group. */
6961 int
6962 ext4_mballoc_query_range(
6963         struct super_block              *sb,
6964         ext4_group_t                    group,
6965         ext4_grpblk_t                   start,
6966         ext4_grpblk_t                   end,
6967         ext4_mballoc_query_range_fn     formatter,
6968         void                            *priv)
6969 {
6970         void                            *bitmap;
6971         ext4_grpblk_t                   next;
6972         struct ext4_buddy               e4b;
6973         int                             error;
6974
6975         error = ext4_mb_load_buddy(sb, group, &e4b);
6976         if (error)
6977                 return error;
6978         bitmap = e4b.bd_bitmap;
6979
6980         ext4_lock_group(sb, group);
6981
6982         start = max(e4b.bd_info->bb_first_free, start);
6983         if (end >= EXT4_CLUSTERS_PER_GROUP(sb))
6984                 end = EXT4_CLUSTERS_PER_GROUP(sb) - 1;
6985
6986         while (start <= end) {
6987                 start = mb_find_next_zero_bit(bitmap, end + 1, start);
6988                 if (start > end)
6989                         break;
6990                 next = mb_find_next_bit(bitmap, end + 1, start);
6991
6992                 ext4_unlock_group(sb, group);
6993                 error = formatter(sb, group, start, next - start, priv);
6994                 if (error)
6995                         goto out_unload;
6996                 ext4_lock_group(sb, group);
6997
6998                 start = next + 1;
6999         }
7000
7001         ext4_unlock_group(sb, group);
7002 out_unload:
7003         ext4_mb_unload_buddy(&e4b);
7004
7005         return error;
7006 }
7007
7008 #ifdef CONFIG_EXT4_KUNIT_TESTS
7009 #include "mballoc-test.c"
7010 #endif
This page took 0.431217 seconds and 4 git commands to generate.