2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
10 #include <linux/spinlock.h>
11 #include <linux/completion.h>
12 #include <linux/buffer_head.h>
13 #include <linux/blkdev.h>
14 #include <linux/gfs2_ondisk.h>
15 #include <linux/crc32.h>
16 #include <linux/iomap.h>
17 #include <linux/ktime.h>
33 #include "trace_gfs2.h"
35 /* This doesn't need to be that large as max 64 bit pointers in a 4k
36 * block is 512, so __u16 is fine for that. It saves stack space to
40 struct buffer_head *mp_bh[GFS2_MAX_META_HEIGHT];
41 __u16 mp_list[GFS2_MAX_META_HEIGHT];
42 int mp_fheight; /* find_metapath height */
43 int mp_aheight; /* actual height (lookup height) */
46 static int punch_hole(struct gfs2_inode *ip, u64 offset, u64 length);
49 * gfs2_unstuffer_page - unstuff a stuffed inode into a block cached by a page
51 * @dibh: the dinode buffer
52 * @block: the block number that was allocated
53 * @page: The (optional) page. This is looked up if @page is NULL
58 static int gfs2_unstuffer_page(struct gfs2_inode *ip, struct buffer_head *dibh,
59 u64 block, struct page *page)
61 struct inode *inode = &ip->i_inode;
62 struct buffer_head *bh;
65 if (!page || page->index) {
66 page = find_or_create_page(inode->i_mapping, 0, GFP_NOFS);
72 if (!PageUptodate(page)) {
73 void *kaddr = kmap(page);
74 u64 dsize = i_size_read(inode);
76 if (dsize > gfs2_max_stuffed_size(ip))
77 dsize = gfs2_max_stuffed_size(ip);
79 memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
80 memset(kaddr + dsize, 0, PAGE_SIZE - dsize);
83 SetPageUptodate(page);
86 if (!page_has_buffers(page))
87 create_empty_buffers(page, BIT(inode->i_blkbits),
90 bh = page_buffers(page);
92 if (!buffer_mapped(bh))
93 map_bh(bh, inode->i_sb, block);
95 set_buffer_uptodate(bh);
96 if (gfs2_is_jdata(ip))
97 gfs2_trans_add_data(ip->i_gl, bh);
99 mark_buffer_dirty(bh);
100 gfs2_ordered_add_inode(ip);
112 * gfs2_unstuff_dinode - Unstuff a dinode when the data has grown too big
113 * @ip: The GFS2 inode to unstuff
114 * @page: The (optional) page. This is looked up if the @page is NULL
116 * This routine unstuffs a dinode and returns it to a "normal" state such
117 * that the height can be grown in the traditional way.
122 int gfs2_unstuff_dinode(struct gfs2_inode *ip, struct page *page)
124 struct buffer_head *bh, *dibh;
125 struct gfs2_dinode *di;
127 int isdir = gfs2_is_dir(ip);
130 down_write(&ip->i_rw_mutex);
132 error = gfs2_meta_inode_buffer(ip, &dibh);
136 if (i_size_read(&ip->i_inode)) {
137 /* Get a free block, fill it with the stuffed data,
138 and write it out to disk */
141 error = gfs2_alloc_blocks(ip, &block, &n, 0, NULL);
145 gfs2_trans_add_unrevoke(GFS2_SB(&ip->i_inode), block, 1);
146 error = gfs2_dir_get_new_buffer(ip, block, &bh);
149 gfs2_buffer_copy_tail(bh, sizeof(struct gfs2_meta_header),
150 dibh, sizeof(struct gfs2_dinode));
153 error = gfs2_unstuffer_page(ip, dibh, block, page);
159 /* Set up the pointer to the new block */
161 gfs2_trans_add_meta(ip->i_gl, dibh);
162 di = (struct gfs2_dinode *)dibh->b_data;
163 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
165 if (i_size_read(&ip->i_inode)) {
166 *(__be64 *)(di + 1) = cpu_to_be64(block);
167 gfs2_add_inode_blocks(&ip->i_inode, 1);
168 di->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(&ip->i_inode));
172 di->di_height = cpu_to_be16(1);
177 up_write(&ip->i_rw_mutex);
183 * find_metapath - Find path through the metadata tree
184 * @sdp: The superblock
185 * @block: The disk block to look up
186 * @mp: The metapath to return the result in
187 * @height: The pre-calculated height of the metadata tree
189 * This routine returns a struct metapath structure that defines a path
190 * through the metadata of inode "ip" to get to block "block".
193 * Given: "ip" is a height 3 file, "offset" is 101342453, and this is a
194 * filesystem with a blocksize of 4096.
196 * find_metapath() would return a struct metapath structure set to:
197 * mp_fheight = 3, mp_list[0] = 0, mp_list[1] = 48, and mp_list[2] = 165.
199 * That means that in order to get to the block containing the byte at
200 * offset 101342453, we would load the indirect block pointed to by pointer
201 * 0 in the dinode. We would then load the indirect block pointed to by
202 * pointer 48 in that indirect block. We would then load the data block
203 * pointed to by pointer 165 in that indirect block.
205 * ----------------------------------------
210 * ----------------------------------------
214 * ----------------------------------------
218 * |0 5 6 7 8 9 0 1 2|
219 * ----------------------------------------
223 * ----------------------------------------
228 * ----------------------------------------
232 * ----------------------------------------
233 * | Data block containing offset |
237 * ----------------------------------------
241 static void find_metapath(const struct gfs2_sbd *sdp, u64 block,
242 struct metapath *mp, unsigned int height)
246 mp->mp_fheight = height;
247 for (i = height; i--;)
248 mp->mp_list[i] = do_div(block, sdp->sd_inptrs);
251 static inline unsigned int metapath_branch_start(const struct metapath *mp)
253 if (mp->mp_list[0] == 0)
259 * metaptr1 - Return the first possible metadata pointer in a metapath buffer
260 * @height: The metadata height (0 = dinode)
263 static inline __be64 *metaptr1(unsigned int height, const struct metapath *mp)
265 struct buffer_head *bh = mp->mp_bh[height];
267 return ((__be64 *)(bh->b_data + sizeof(struct gfs2_dinode)));
268 return ((__be64 *)(bh->b_data + sizeof(struct gfs2_meta_header)));
272 * metapointer - Return pointer to start of metadata in a buffer
273 * @height: The metadata height (0 = dinode)
276 * Return a pointer to the block number of the next height of the metadata
277 * tree given a buffer containing the pointer to the current height of the
281 static inline __be64 *metapointer(unsigned int height, const struct metapath *mp)
283 __be64 *p = metaptr1(height, mp);
284 return p + mp->mp_list[height];
287 static inline const __be64 *metaend(unsigned int height, const struct metapath *mp)
289 const struct buffer_head *bh = mp->mp_bh[height];
290 return (const __be64 *)(bh->b_data + bh->b_size);
293 static void clone_metapath(struct metapath *clone, struct metapath *mp)
298 for (hgt = 0; hgt < mp->mp_aheight; hgt++)
299 get_bh(clone->mp_bh[hgt]);
302 static void gfs2_metapath_ra(struct gfs2_glock *gl, __be64 *start, __be64 *end)
306 for (t = start; t < end; t++) {
307 struct buffer_head *rabh;
312 rabh = gfs2_getbuf(gl, be64_to_cpu(*t), CREATE);
313 if (trylock_buffer(rabh)) {
314 if (!buffer_uptodate(rabh)) {
315 rabh->b_end_io = end_buffer_read_sync;
316 submit_bh(REQ_OP_READ,
317 REQ_RAHEAD | REQ_META | REQ_PRIO,
327 static int __fillup_metapath(struct gfs2_inode *ip, struct metapath *mp,
328 unsigned int x, unsigned int h)
331 __be64 *ptr = metapointer(x, mp);
332 u64 dblock = be64_to_cpu(*ptr);
337 ret = gfs2_meta_indirect_buffer(ip, x + 1, dblock, &mp->mp_bh[x + 1]);
341 mp->mp_aheight = x + 1;
346 * lookup_metapath - Walk the metadata tree to a specific point
350 * Assumes that the inode's buffer has already been looked up and
351 * hooked onto mp->mp_bh[0] and that the metapath has been initialised
352 * by find_metapath().
354 * If this function encounters part of the tree which has not been
355 * allocated, it returns the current height of the tree at the point
356 * at which it found the unallocated block. Blocks which are found are
357 * added to the mp->mp_bh[] list.
362 static int lookup_metapath(struct gfs2_inode *ip, struct metapath *mp)
364 return __fillup_metapath(ip, mp, 0, ip->i_height - 1);
368 * fillup_metapath - fill up buffers for the metadata path to a specific height
371 * @h: The height to which it should be mapped
373 * Similar to lookup_metapath, but does lookups for a range of heights
375 * Returns: error or the number of buffers filled
378 static int fillup_metapath(struct gfs2_inode *ip, struct metapath *mp, int h)
384 /* find the first buffer we need to look up. */
385 for (x = h - 1; x > 0; x--) {
390 ret = __fillup_metapath(ip, mp, x, h);
393 return mp->mp_aheight - x - 1;
396 static void release_metapath(struct metapath *mp)
400 for (i = 0; i < GFS2_MAX_META_HEIGHT; i++) {
401 if (mp->mp_bh[i] == NULL)
403 brelse(mp->mp_bh[i]);
409 * gfs2_extent_length - Returns length of an extent of blocks
410 * @bh: The metadata block
411 * @ptr: Current position in @bh
412 * @limit: Max extent length to return
413 * @eob: Set to 1 if we hit "end of block"
415 * Returns: The length of the extent (minimum of one block)
418 static inline unsigned int gfs2_extent_length(struct buffer_head *bh, __be64 *ptr, size_t limit, int *eob)
420 const __be64 *end = (__be64 *)(bh->b_data + bh->b_size);
421 const __be64 *first = ptr;
422 u64 d = be64_to_cpu(*ptr);
430 } while(be64_to_cpu(*ptr) == d);
436 typedef const __be64 *(*gfs2_metadata_walker)(
438 const __be64 *start, const __be64 *end,
439 u64 factor, void *data);
441 #define WALK_STOP ((__be64 *)0)
442 #define WALK_NEXT ((__be64 *)1)
444 static int gfs2_walk_metadata(struct inode *inode, sector_t lblock,
445 u64 len, struct metapath *mp, gfs2_metadata_walker walker,
448 struct metapath clone;
449 struct gfs2_inode *ip = GFS2_I(inode);
450 struct gfs2_sbd *sdp = GFS2_SB(inode);
451 const __be64 *start, *end, *ptr;
456 for (hgt = ip->i_height - 1; hgt >= mp->mp_aheight; hgt--)
457 factor *= sdp->sd_inptrs;
462 /* Walk indirect block. */
463 start = metapointer(hgt, mp);
464 end = metaend(hgt, mp);
466 step = (end - start) * factor;
468 end = start + DIV_ROUND_UP_ULL(len, factor);
470 ptr = walker(mp, start, end, factor, data);
471 if (ptr == WALK_STOP)
476 if (ptr != WALK_NEXT) {
478 mp->mp_list[hgt] += ptr - start;
479 goto fill_up_metapath;
483 /* Decrease height of metapath. */
485 clone_metapath(&clone, mp);
488 brelse(mp->mp_bh[hgt]);
489 mp->mp_bh[hgt] = NULL;
493 factor *= sdp->sd_inptrs;
495 /* Advance in metadata tree. */
496 (mp->mp_list[hgt])++;
497 start = metapointer(hgt, mp);
498 end = metaend(hgt, mp);
500 mp->mp_list[hgt] = 0;
507 /* Increase height of metapath. */
509 clone_metapath(&clone, mp);
512 ret = fillup_metapath(ip, mp, ip->i_height - 1);
517 do_div(factor, sdp->sd_inptrs);
518 mp->mp_aheight = hgt + 1;
521 release_metapath(mp);
525 struct gfs2_hole_walker_args {
529 static const __be64 *gfs2_hole_walker(struct metapath *mp,
530 const __be64 *start, const __be64 *end,
531 u64 factor, void *data)
533 struct gfs2_hole_walker_args *args = data;
536 for (ptr = start; ptr < end; ptr++) {
538 args->blocks += (ptr - start) * factor;
539 if (mp->mp_aheight == mp->mp_fheight)
541 return ptr; /* increase height */
544 args->blocks += (end - start) * factor;
549 * gfs2_hole_size - figure out the size of a hole
551 * @lblock: The logical starting block number
552 * @len: How far to look (in blocks)
553 * @mp: The metapath at lblock
554 * @iomap: The iomap to store the hole size in
556 * This function modifies @mp.
558 * Returns: errno on error
560 static int gfs2_hole_size(struct inode *inode, sector_t lblock, u64 len,
561 struct metapath *mp, struct iomap *iomap)
563 struct gfs2_hole_walker_args args = { };
566 ret = gfs2_walk_metadata(inode, lblock, len, mp, gfs2_hole_walker, &args);
568 iomap->length = args.blocks << inode->i_blkbits;
572 static inline __be64 *gfs2_indirect_init(struct metapath *mp,
573 struct gfs2_glock *gl, unsigned int i,
574 unsigned offset, u64 bn)
576 __be64 *ptr = (__be64 *)(mp->mp_bh[i - 1]->b_data +
577 ((i > 1) ? sizeof(struct gfs2_meta_header) :
578 sizeof(struct gfs2_dinode)));
580 BUG_ON(mp->mp_bh[i] != NULL);
581 mp->mp_bh[i] = gfs2_meta_new(gl, bn);
582 gfs2_trans_add_meta(gl, mp->mp_bh[i]);
583 gfs2_metatype_set(mp->mp_bh[i], GFS2_METATYPE_IN, GFS2_FORMAT_IN);
584 gfs2_buffer_clear_tail(mp->mp_bh[i], sizeof(struct gfs2_meta_header));
586 *ptr = cpu_to_be64(bn);
592 ALLOC_GROW_DEPTH = 1,
593 ALLOC_GROW_HEIGHT = 2,
594 /* ALLOC_UNSTUFF = 3, TBD and rather complicated */
598 * gfs2_iomap_alloc - Build a metadata tree of the requested height
599 * @inode: The GFS2 inode
600 * @iomap: The iomap structure
601 * @flags: iomap flags
602 * @mp: The metapath, with proper height information calculated
604 * In this routine we may have to alloc:
605 * i) Indirect blocks to grow the metadata tree height
606 * ii) Indirect blocks to fill in lower part of the metadata tree
609 * This function is called after gfs2_iomap_get, which works out the
610 * total number of blocks which we need via gfs2_alloc_size.
612 * We then do the actual allocation asking for an extent at a time (if
613 * enough contiguous free blocks are available, there will only be one
614 * allocation request per call) and uses the state machine to initialise
615 * the blocks in order.
617 * Right now, this function will allocate at most one indirect block
618 * worth of data -- with a default block size of 4K, that's slightly
619 * less than 2M. If this limitation is ever removed to allow huge
620 * allocations, we would probably still want to limit the iomap size we
621 * return to avoid stalling other tasks during huge writes; the next
622 * iomap iteration would then find the blocks already allocated.
624 * Returns: errno on error
627 static int gfs2_iomap_alloc(struct inode *inode, struct iomap *iomap,
628 unsigned flags, struct metapath *mp)
630 struct gfs2_inode *ip = GFS2_I(inode);
631 struct gfs2_sbd *sdp = GFS2_SB(inode);
632 struct buffer_head *dibh = mp->mp_bh[0];
634 unsigned n, i, blks, alloced = 0, iblks = 0, branch_start = 0;
635 size_t dblks = iomap->length >> inode->i_blkbits;
636 const unsigned end_of_metadata = mp->mp_fheight - 1;
638 enum alloc_state state;
642 BUG_ON(mp->mp_aheight < 1);
643 BUG_ON(dibh == NULL);
646 gfs2_trans_add_meta(ip->i_gl, dibh);
648 down_write(&ip->i_rw_mutex);
650 if (mp->mp_fheight == mp->mp_aheight) {
651 /* Bottom indirect block exists */
654 /* Need to allocate indirect blocks */
655 if (mp->mp_fheight == ip->i_height) {
656 /* Writing into existing tree, extend tree down */
657 iblks = mp->mp_fheight - mp->mp_aheight;
658 state = ALLOC_GROW_DEPTH;
660 /* Building up tree height */
661 state = ALLOC_GROW_HEIGHT;
662 iblks = mp->mp_fheight - ip->i_height;
663 branch_start = metapath_branch_start(mp);
664 iblks += (mp->mp_fheight - branch_start);
668 /* start of the second part of the function (state machine) */
670 blks = dblks + iblks;
674 ret = gfs2_alloc_blocks(ip, &bn, &n, 0, NULL);
678 if (state != ALLOC_DATA || gfs2_is_jdata(ip))
679 gfs2_trans_add_unrevoke(sdp, bn, n);
681 /* Growing height of tree */
682 case ALLOC_GROW_HEIGHT:
684 ptr = (__be64 *)(dibh->b_data +
685 sizeof(struct gfs2_dinode));
688 for (; i - 1 < mp->mp_fheight - ip->i_height && n > 0;
690 gfs2_indirect_init(mp, ip->i_gl, i, 0, bn++);
691 if (i - 1 == mp->mp_fheight - ip->i_height) {
693 gfs2_buffer_copy_tail(mp->mp_bh[i],
694 sizeof(struct gfs2_meta_header),
695 dibh, sizeof(struct gfs2_dinode));
696 gfs2_buffer_clear_tail(dibh,
697 sizeof(struct gfs2_dinode) +
699 ptr = (__be64 *)(mp->mp_bh[i]->b_data +
700 sizeof(struct gfs2_meta_header));
702 state = ALLOC_GROW_DEPTH;
703 for(i = branch_start; i < mp->mp_fheight; i++) {
704 if (mp->mp_bh[i] == NULL)
706 brelse(mp->mp_bh[i]);
713 /* Branching from existing tree */
714 case ALLOC_GROW_DEPTH:
715 if (i > 1 && i < mp->mp_fheight)
716 gfs2_trans_add_meta(ip->i_gl, mp->mp_bh[i-1]);
717 for (; i < mp->mp_fheight && n > 0; i++, n--)
718 gfs2_indirect_init(mp, ip->i_gl, i,
719 mp->mp_list[i-1], bn++);
720 if (i == mp->mp_fheight)
724 /* Tree complete, adding data blocks */
727 BUG_ON(mp->mp_bh[end_of_metadata] == NULL);
728 gfs2_trans_add_meta(ip->i_gl, mp->mp_bh[end_of_metadata]);
730 ptr = metapointer(end_of_metadata, mp);
731 iomap->addr = bn << inode->i_blkbits;
732 iomap->flags |= IOMAP_F_MERGED | IOMAP_F_NEW;
734 *ptr++ = cpu_to_be64(bn++);
737 } while (iomap->addr == IOMAP_NULL_ADDR);
739 iomap->type = IOMAP_MAPPED;
740 iomap->length = (u64)dblks << inode->i_blkbits;
741 ip->i_height = mp->mp_fheight;
742 gfs2_add_inode_blocks(&ip->i_inode, alloced);
743 gfs2_dinode_out(ip, dibh->b_data);
745 up_write(&ip->i_rw_mutex);
749 #define IOMAP_F_GFS2_BOUNDARY IOMAP_F_PRIVATE
752 * gfs2_alloc_size - Compute the maximum allocation size
755 * @size: Requested size in blocks
757 * Compute the maximum size of the next allocation at @mp.
759 * Returns: size in blocks
761 static u64 gfs2_alloc_size(struct inode *inode, struct metapath *mp, u64 size)
763 struct gfs2_inode *ip = GFS2_I(inode);
764 struct gfs2_sbd *sdp = GFS2_SB(inode);
765 const __be64 *first, *ptr, *end;
768 * For writes to stuffed files, this function is called twice via
769 * gfs2_iomap_get, before and after unstuffing. The size we return the
770 * first time needs to be large enough to get the reservation and
771 * allocation sizes right. The size we return the second time must
772 * be exact or else gfs2_iomap_alloc won't do the right thing.
775 if (gfs2_is_stuffed(ip) || mp->mp_fheight != mp->mp_aheight) {
776 unsigned int maxsize = mp->mp_fheight > 1 ?
777 sdp->sd_inptrs : sdp->sd_diptrs;
778 maxsize -= mp->mp_list[mp->mp_fheight - 1];
784 first = metapointer(ip->i_height - 1, mp);
785 end = metaend(ip->i_height - 1, mp);
786 if (end - first > size)
788 for (ptr = first; ptr < end; ptr++) {
796 * gfs2_iomap_get - Map blocks from an inode to disk blocks
798 * @pos: Starting position in bytes
799 * @length: Length to map, in bytes
800 * @flags: iomap flags
801 * @iomap: The iomap structure
806 static int gfs2_iomap_get(struct inode *inode, loff_t pos, loff_t length,
807 unsigned flags, struct iomap *iomap,
810 struct gfs2_inode *ip = GFS2_I(inode);
811 struct gfs2_sbd *sdp = GFS2_SB(inode);
812 loff_t size = i_size_read(inode);
815 sector_t lblock_stop;
819 struct buffer_head *dibh = NULL, *bh;
825 down_read(&ip->i_rw_mutex);
827 ret = gfs2_meta_inode_buffer(ip, &dibh);
832 if (gfs2_is_stuffed(ip)) {
833 if (flags & IOMAP_WRITE) {
834 loff_t max_size = gfs2_max_stuffed_size(ip);
836 if (pos + length > max_size)
838 iomap->length = max_size;
841 if (flags & IOMAP_REPORT) {
847 iomap->length = length;
851 iomap->length = size;
853 iomap->addr = (ip->i_no_addr << inode->i_blkbits) +
854 sizeof(struct gfs2_dinode);
855 iomap->type = IOMAP_INLINE;
856 iomap->inline_data = dibh->b_data + sizeof(struct gfs2_dinode);
861 lblock = pos >> inode->i_blkbits;
862 iomap->offset = lblock << inode->i_blkbits;
863 lblock_stop = (pos + length - 1) >> inode->i_blkbits;
864 len = lblock_stop - lblock + 1;
865 iomap->length = len << inode->i_blkbits;
867 height = ip->i_height;
868 while ((lblock + 1) * sdp->sd_sb.sb_bsize > sdp->sd_heightsize[height])
870 find_metapath(sdp, lblock, mp, height);
871 if (height > ip->i_height || gfs2_is_stuffed(ip))
874 ret = lookup_metapath(ip, mp);
878 if (mp->mp_aheight != ip->i_height)
881 ptr = metapointer(ip->i_height - 1, mp);
885 bh = mp->mp_bh[ip->i_height - 1];
886 len = gfs2_extent_length(bh, ptr, len, &eob);
888 iomap->addr = be64_to_cpu(*ptr) << inode->i_blkbits;
889 iomap->length = len << inode->i_blkbits;
890 iomap->type = IOMAP_MAPPED;
891 iomap->flags |= IOMAP_F_MERGED;
893 iomap->flags |= IOMAP_F_GFS2_BOUNDARY;
896 iomap->bdev = inode->i_sb->s_bdev;
898 up_read(&ip->i_rw_mutex);
902 iomap->addr = IOMAP_NULL_ADDR;
903 iomap->type = IOMAP_HOLE;
904 if (flags & IOMAP_REPORT) {
907 else if (height == ip->i_height)
908 ret = gfs2_hole_size(inode, lblock, len, mp, iomap);
910 iomap->length = size - pos;
911 } else if (flags & IOMAP_WRITE) {
914 if (flags & IOMAP_DIRECT)
915 goto out; /* (see gfs2_file_direct_write) */
917 len = gfs2_alloc_size(inode, mp, len);
918 alloc_size = len << inode->i_blkbits;
919 if (alloc_size < iomap->length)
920 iomap->length = alloc_size;
922 if (pos < size && height == ip->i_height)
923 ret = gfs2_hole_size(inode, lblock, len, mp, iomap);
928 static int gfs2_write_lock(struct inode *inode)
930 struct gfs2_inode *ip = GFS2_I(inode);
931 struct gfs2_sbd *sdp = GFS2_SB(inode);
934 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh);
935 error = gfs2_glock_nq(&ip->i_gh);
938 if (&ip->i_inode == sdp->sd_rindex) {
939 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
941 error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE,
942 GL_NOCACHE, &m_ip->i_gh);
949 gfs2_glock_dq(&ip->i_gh);
951 gfs2_holder_uninit(&ip->i_gh);
955 static void gfs2_write_unlock(struct inode *inode)
957 struct gfs2_inode *ip = GFS2_I(inode);
958 struct gfs2_sbd *sdp = GFS2_SB(inode);
960 if (&ip->i_inode == sdp->sd_rindex) {
961 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
963 gfs2_glock_dq_uninit(&m_ip->i_gh);
965 gfs2_glock_dq_uninit(&ip->i_gh);
968 static void gfs2_iomap_journaled_page_done(struct inode *inode, loff_t pos,
969 unsigned copied, struct page *page,
972 struct gfs2_inode *ip = GFS2_I(inode);
974 gfs2_page_add_databufs(ip, page, offset_in_page(pos), copied);
977 static int gfs2_iomap_begin_write(struct inode *inode, loff_t pos,
978 loff_t length, unsigned flags,
982 struct gfs2_inode *ip = GFS2_I(inode);
983 struct gfs2_sbd *sdp = GFS2_SB(inode);
984 unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
985 bool unstuff, alloc_required;
988 ret = gfs2_write_lock(inode);
992 unstuff = gfs2_is_stuffed(ip) &&
993 pos + length > gfs2_max_stuffed_size(ip);
995 ret = gfs2_iomap_get(inode, pos, length, flags, iomap, mp);
999 alloc_required = unstuff || iomap->type == IOMAP_HOLE;
1001 if (alloc_required || gfs2_is_jdata(ip))
1002 gfs2_write_calc_reserv(ip, iomap->length, &data_blocks,
1005 if (alloc_required) {
1006 struct gfs2_alloc_parms ap = {
1007 .target = data_blocks + ind_blocks
1010 ret = gfs2_quota_lock_check(ip, &ap);
1014 ret = gfs2_inplace_reserve(ip, &ap);
1019 rblocks = RES_DINODE + ind_blocks;
1020 if (gfs2_is_jdata(ip))
1021 rblocks += data_blocks;
1022 if (ind_blocks || data_blocks)
1023 rblocks += RES_STATFS + RES_QUOTA;
1024 if (inode == sdp->sd_rindex)
1025 rblocks += 2 * RES_STATFS;
1027 rblocks += gfs2_rg_blocks(ip, data_blocks + ind_blocks);
1029 ret = gfs2_trans_begin(sdp, rblocks, iomap->length >> inode->i_blkbits);
1031 goto out_trans_fail;
1034 ret = gfs2_unstuff_dinode(ip, NULL);
1037 release_metapath(mp);
1038 ret = gfs2_iomap_get(inode, iomap->offset, iomap->length,
1044 if (iomap->type == IOMAP_HOLE) {
1045 ret = gfs2_iomap_alloc(inode, iomap, flags, mp);
1047 gfs2_trans_end(sdp);
1048 gfs2_inplace_release(ip);
1049 punch_hole(ip, iomap->offset, iomap->length);
1053 if (!gfs2_is_stuffed(ip) && gfs2_is_jdata(ip))
1054 iomap->page_done = gfs2_iomap_journaled_page_done;
1058 gfs2_trans_end(sdp);
1061 gfs2_inplace_release(ip);
1064 gfs2_quota_unlock(ip);
1066 gfs2_write_unlock(inode);
1070 static int gfs2_iomap_begin(struct inode *inode, loff_t pos, loff_t length,
1071 unsigned flags, struct iomap *iomap)
1073 struct gfs2_inode *ip = GFS2_I(inode);
1074 struct metapath mp = { .mp_aheight = 1, };
1077 iomap->flags |= IOMAP_F_BUFFER_HEAD;
1079 trace_gfs2_iomap_start(ip, pos, length, flags);
1080 if ((flags & IOMAP_WRITE) && !(flags & IOMAP_DIRECT)) {
1081 ret = gfs2_iomap_begin_write(inode, pos, length, flags, iomap, &mp);
1083 ret = gfs2_iomap_get(inode, pos, length, flags, iomap, &mp);
1086 * Silently fall back to buffered I/O for stuffed files or if
1087 * we've hot a hole (see gfs2_file_direct_write).
1089 if ((flags & IOMAP_WRITE) && (flags & IOMAP_DIRECT) &&
1090 iomap->type != IOMAP_MAPPED)
1094 get_bh(mp.mp_bh[0]);
1095 iomap->private = mp.mp_bh[0];
1097 release_metapath(&mp);
1098 trace_gfs2_iomap_end(ip, iomap, ret);
1102 static int gfs2_iomap_end(struct inode *inode, loff_t pos, loff_t length,
1103 ssize_t written, unsigned flags, struct iomap *iomap)
1105 struct gfs2_inode *ip = GFS2_I(inode);
1106 struct gfs2_sbd *sdp = GFS2_SB(inode);
1107 struct gfs2_trans *tr = current->journal_info;
1108 struct buffer_head *dibh = iomap->private;
1110 if ((flags & (IOMAP_WRITE | IOMAP_DIRECT)) != IOMAP_WRITE)
1113 if (iomap->type != IOMAP_INLINE) {
1114 gfs2_ordered_add_inode(ip);
1116 if (tr->tr_num_buf_new)
1117 __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
1119 gfs2_trans_add_meta(ip->i_gl, dibh);
1122 if (inode == sdp->sd_rindex) {
1123 adjust_fs_space(inode);
1124 sdp->sd_rindex_uptodate = 0;
1127 gfs2_trans_end(sdp);
1128 gfs2_inplace_release(ip);
1130 if (length != written && (iomap->flags & IOMAP_F_NEW)) {
1131 /* Deallocate blocks that were just allocated. */
1132 loff_t blockmask = i_blocksize(inode) - 1;
1133 loff_t end = (pos + length) & ~blockmask;
1135 pos = (pos + written + blockmask) & ~blockmask;
1137 truncate_pagecache_range(inode, pos, end - 1);
1138 punch_hole(ip, pos, end - pos);
1142 if (ip->i_qadata && ip->i_qadata->qa_qd_num)
1143 gfs2_quota_unlock(ip);
1144 gfs2_write_unlock(inode);
1152 const struct iomap_ops gfs2_iomap_ops = {
1153 .iomap_begin = gfs2_iomap_begin,
1154 .iomap_end = gfs2_iomap_end,
1158 * gfs2_block_map - Map one or more blocks of an inode to a disk block
1160 * @lblock: The logical block number
1161 * @bh_map: The bh to be mapped
1162 * @create: True if its ok to alloc blocks to satify the request
1164 * The size of the requested mapping is defined in bh_map->b_size.
1166 * Clears buffer_mapped(bh_map) and leaves bh_map->b_size unchanged
1167 * when @lblock is not mapped. Sets buffer_mapped(bh_map) and
1168 * bh_map->b_size to indicate the size of the mapping when @lblock and
1169 * successive blocks are mapped, up to the requested size.
1171 * Sets buffer_boundary() if a read of metadata will be required
1172 * before the next block can be mapped. Sets buffer_new() if new
1173 * blocks were allocated.
1178 int gfs2_block_map(struct inode *inode, sector_t lblock,
1179 struct buffer_head *bh_map, int create)
1181 struct gfs2_inode *ip = GFS2_I(inode);
1182 loff_t pos = (loff_t)lblock << inode->i_blkbits;
1183 loff_t length = bh_map->b_size;
1184 struct metapath mp = { .mp_aheight = 1, };
1185 struct iomap iomap = { };
1188 clear_buffer_mapped(bh_map);
1189 clear_buffer_new(bh_map);
1190 clear_buffer_boundary(bh_map);
1191 trace_gfs2_bmap(ip, bh_map, lblock, create, 1);
1194 ret = gfs2_iomap_get(inode, pos, length, IOMAP_WRITE, &iomap, &mp);
1195 if (!ret && iomap.type == IOMAP_HOLE)
1196 ret = gfs2_iomap_alloc(inode, &iomap, IOMAP_WRITE, &mp);
1197 release_metapath(&mp);
1199 ret = gfs2_iomap_get(inode, pos, length, 0, &iomap, &mp);
1200 release_metapath(&mp);
1205 if (iomap.length > bh_map->b_size) {
1206 iomap.length = bh_map->b_size;
1207 iomap.flags &= ~IOMAP_F_GFS2_BOUNDARY;
1209 if (iomap.addr != IOMAP_NULL_ADDR)
1210 map_bh(bh_map, inode->i_sb, iomap.addr >> inode->i_blkbits);
1211 bh_map->b_size = iomap.length;
1212 if (iomap.flags & IOMAP_F_GFS2_BOUNDARY)
1213 set_buffer_boundary(bh_map);
1214 if (iomap.flags & IOMAP_F_NEW)
1215 set_buffer_new(bh_map);
1218 trace_gfs2_bmap(ip, bh_map, lblock, create, ret);
1223 * Deprecated: do not use in new code
1225 int gfs2_extent_map(struct inode *inode, u64 lblock, int *new, u64 *dblock, unsigned *extlen)
1227 struct buffer_head bh = { .b_state = 0, .b_blocknr = 0 };
1235 bh.b_size = BIT(inode->i_blkbits + (create ? 0 : 5));
1236 ret = gfs2_block_map(inode, lblock, &bh, create);
1237 *extlen = bh.b_size >> inode->i_blkbits;
1238 *dblock = bh.b_blocknr;
1239 if (buffer_new(&bh))
1247 * gfs2_block_zero_range - Deal with zeroing out data
1249 * This is partly borrowed from ext3.
1251 static int gfs2_block_zero_range(struct inode *inode, loff_t from,
1252 unsigned int length)
1254 struct address_space *mapping = inode->i_mapping;
1255 struct gfs2_inode *ip = GFS2_I(inode);
1256 unsigned long index = from >> PAGE_SHIFT;
1257 unsigned offset = from & (PAGE_SIZE-1);
1258 unsigned blocksize, iblock, pos;
1259 struct buffer_head *bh;
1263 page = find_or_create_page(mapping, index, GFP_NOFS);
1267 blocksize = inode->i_sb->s_blocksize;
1268 iblock = index << (PAGE_SHIFT - inode->i_sb->s_blocksize_bits);
1270 if (!page_has_buffers(page))
1271 create_empty_buffers(page, blocksize, 0);
1273 /* Find the buffer that contains "offset" */
1274 bh = page_buffers(page);
1276 while (offset >= pos) {
1277 bh = bh->b_this_page;
1284 if (!buffer_mapped(bh)) {
1285 gfs2_block_map(inode, iblock, bh, 0);
1286 /* unmapped? It's a hole - nothing to do */
1287 if (!buffer_mapped(bh))
1291 /* Ok, it's mapped. Make sure it's up-to-date */
1292 if (PageUptodate(page))
1293 set_buffer_uptodate(bh);
1295 if (!buffer_uptodate(bh)) {
1297 ll_rw_block(REQ_OP_READ, 0, 1, &bh);
1299 /* Uhhuh. Read error. Complain and punt. */
1300 if (!buffer_uptodate(bh))
1305 if (gfs2_is_jdata(ip))
1306 gfs2_trans_add_data(ip->i_gl, bh);
1308 gfs2_ordered_add_inode(ip);
1310 zero_user(page, offset, length);
1311 mark_buffer_dirty(bh);
1318 #define GFS2_JTRUNC_REVOKES 8192
1321 * gfs2_journaled_truncate - Wrapper for truncate_pagecache for jdata files
1322 * @inode: The inode being truncated
1323 * @oldsize: The original (larger) size
1324 * @newsize: The new smaller size
1326 * With jdata files, we have to journal a revoke for each block which is
1327 * truncated. As a result, we need to split this into separate transactions
1328 * if the number of pages being truncated gets too large.
1331 static int gfs2_journaled_truncate(struct inode *inode, u64 oldsize, u64 newsize)
1333 struct gfs2_sbd *sdp = GFS2_SB(inode);
1334 u64 max_chunk = GFS2_JTRUNC_REVOKES * sdp->sd_vfs->s_blocksize;
1338 while (oldsize != newsize) {
1339 struct gfs2_trans *tr;
1342 chunk = oldsize - newsize;
1343 if (chunk > max_chunk)
1346 offs = oldsize & ~PAGE_MASK;
1347 if (offs && chunk > PAGE_SIZE)
1348 chunk = offs + ((chunk - offs) & PAGE_MASK);
1350 truncate_pagecache(inode, oldsize - chunk);
1353 tr = current->journal_info;
1354 if (!test_bit(TR_TOUCHED, &tr->tr_flags))
1357 gfs2_trans_end(sdp);
1358 error = gfs2_trans_begin(sdp, RES_DINODE, GFS2_JTRUNC_REVOKES);
1366 static int trunc_start(struct inode *inode, u64 newsize)
1368 struct gfs2_inode *ip = GFS2_I(inode);
1369 struct gfs2_sbd *sdp = GFS2_SB(inode);
1370 struct buffer_head *dibh = NULL;
1371 int journaled = gfs2_is_jdata(ip);
1372 u64 oldsize = inode->i_size;
1376 error = gfs2_trans_begin(sdp, RES_DINODE + RES_JDATA, GFS2_JTRUNC_REVOKES);
1378 error = gfs2_trans_begin(sdp, RES_DINODE, 0);
1382 error = gfs2_meta_inode_buffer(ip, &dibh);
1386 gfs2_trans_add_meta(ip->i_gl, dibh);
1388 if (gfs2_is_stuffed(ip)) {
1389 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode) + newsize);
1391 unsigned int blocksize = i_blocksize(inode);
1392 unsigned int offs = newsize & (blocksize - 1);
1394 error = gfs2_block_zero_range(inode, newsize,
1399 ip->i_diskflags |= GFS2_DIF_TRUNC_IN_PROG;
1402 i_size_write(inode, newsize);
1403 ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
1404 gfs2_dinode_out(ip, dibh->b_data);
1407 error = gfs2_journaled_truncate(inode, oldsize, newsize);
1409 truncate_pagecache(inode, newsize);
1413 if (current->journal_info)
1414 gfs2_trans_end(sdp);
1418 int gfs2_iomap_get_alloc(struct inode *inode, loff_t pos, loff_t length,
1419 struct iomap *iomap)
1421 struct metapath mp = { .mp_aheight = 1, };
1424 ret = gfs2_iomap_get(inode, pos, length, IOMAP_WRITE, iomap, &mp);
1425 if (!ret && iomap->type == IOMAP_HOLE)
1426 ret = gfs2_iomap_alloc(inode, iomap, IOMAP_WRITE, &mp);
1427 release_metapath(&mp);
1432 * sweep_bh_for_rgrps - find an rgrp in a meta buffer and free blocks therein
1434 * @rg_gh: holder of resource group glock
1435 * @bh: buffer head to sweep
1436 * @start: starting point in bh
1437 * @end: end point in bh
1438 * @meta: true if bh points to metadata (rather than data)
1439 * @btotal: place to keep count of total blocks freed
1441 * We sweep a metadata buffer (provided by the metapath) for blocks we need to
1442 * free, and free them all. However, we do it one rgrp at a time. If this
1443 * block has references to multiple rgrps, we break it into individual
1444 * transactions. This allows other processes to use the rgrps while we're
1445 * focused on a single one, for better concurrency / performance.
1446 * At every transaction boundary, we rewrite the inode into the journal.
1447 * That way the bitmaps are kept consistent with the inode and we can recover
1448 * if we're interrupted by power-outages.
1450 * Returns: 0, or return code if an error occurred.
1451 * *btotal has the total number of blocks freed
1453 static int sweep_bh_for_rgrps(struct gfs2_inode *ip, struct gfs2_holder *rd_gh,
1454 struct buffer_head *bh, __be64 *start, __be64 *end,
1455 bool meta, u32 *btotal)
1457 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1458 struct gfs2_rgrpd *rgd;
1459 struct gfs2_trans *tr;
1461 int blks_outside_rgrp;
1462 u64 bn, bstart, isize_blks;
1463 s64 blen; /* needs to be s64 or gfs2_add_inode_blocks breaks */
1465 bool buf_in_tr = false; /* buffer was added to transaction */
1469 if (gfs2_holder_initialized(rd_gh)) {
1470 rgd = gfs2_glock2rgrp(rd_gh->gh_gl);
1471 gfs2_assert_withdraw(sdp,
1472 gfs2_glock_is_locked_by_me(rd_gh->gh_gl));
1474 blks_outside_rgrp = 0;
1478 for (p = start; p < end; p++) {
1481 bn = be64_to_cpu(*p);
1484 if (!rgrp_contains_block(rgd, bn)) {
1485 blks_outside_rgrp++;
1489 rgd = gfs2_blk2rgrpd(sdp, bn, true);
1490 if (unlikely(!rgd)) {
1494 ret = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE,
1499 /* Must be done with the rgrp glock held: */
1500 if (gfs2_rs_active(&ip->i_res) &&
1501 rgd == ip->i_res.rs_rbm.rgd)
1502 gfs2_rs_deltree(&ip->i_res);
1505 /* The size of our transactions will be unknown until we
1506 actually process all the metadata blocks that relate to
1507 the rgrp. So we estimate. We know it can't be more than
1508 the dinode's i_blocks and we don't want to exceed the
1509 journal flush threshold, sd_log_thresh2. */
1510 if (current->journal_info == NULL) {
1511 unsigned int jblocks_rqsted, revokes;
1513 jblocks_rqsted = rgd->rd_length + RES_DINODE +
1515 isize_blks = gfs2_get_inode_blocks(&ip->i_inode);
1516 if (isize_blks > atomic_read(&sdp->sd_log_thresh2))
1518 atomic_read(&sdp->sd_log_thresh2);
1520 jblocks_rqsted += isize_blks;
1521 revokes = jblocks_rqsted;
1523 revokes += end - start;
1524 else if (ip->i_depth)
1525 revokes += sdp->sd_inptrs;
1526 ret = gfs2_trans_begin(sdp, jblocks_rqsted, revokes);
1529 down_write(&ip->i_rw_mutex);
1531 /* check if we will exceed the transaction blocks requested */
1532 tr = current->journal_info;
1533 if (tr->tr_num_buf_new + RES_STATFS +
1534 RES_QUOTA >= atomic_read(&sdp->sd_log_thresh2)) {
1535 /* We set blks_outside_rgrp to ensure the loop will
1536 be repeated for the same rgrp, but with a new
1538 blks_outside_rgrp++;
1539 /* This next part is tricky. If the buffer was added
1540 to the transaction, we've already set some block
1541 pointers to 0, so we better follow through and free
1542 them, or we will introduce corruption (so break).
1543 This may be impossible, or at least rare, but I
1544 decided to cover the case regardless.
1546 If the buffer was not added to the transaction
1547 (this call), doing so would exceed our transaction
1548 size, so we need to end the transaction and start a
1549 new one (so goto). */
1556 gfs2_trans_add_meta(ip->i_gl, bh);
1559 if (bstart + blen == bn) {
1564 __gfs2_free_blocks(ip, rgd, bstart, (u32)blen, meta);
1566 gfs2_add_inode_blocks(&ip->i_inode, -blen);
1572 __gfs2_free_blocks(ip, rgd, bstart, (u32)blen, meta);
1574 gfs2_add_inode_blocks(&ip->i_inode, -blen);
1577 if (!ret && blks_outside_rgrp) { /* If buffer still has non-zero blocks
1578 outside the rgrp we just processed,
1579 do it all over again. */
1580 if (current->journal_info) {
1581 struct buffer_head *dibh;
1583 ret = gfs2_meta_inode_buffer(ip, &dibh);
1587 /* Every transaction boundary, we rewrite the dinode
1588 to keep its di_blocks current in case of failure. */
1589 ip->i_inode.i_mtime = ip->i_inode.i_ctime =
1590 current_time(&ip->i_inode);
1591 gfs2_trans_add_meta(ip->i_gl, dibh);
1592 gfs2_dinode_out(ip, dibh->b_data);
1594 up_write(&ip->i_rw_mutex);
1595 gfs2_trans_end(sdp);
1597 gfs2_glock_dq_uninit(rd_gh);
1605 static bool mp_eq_to_hgt(struct metapath *mp, __u16 *list, unsigned int h)
1607 if (memcmp(mp->mp_list, list, h * sizeof(mp->mp_list[0])))
1613 * find_nonnull_ptr - find a non-null pointer given a metapath and height
1614 * @mp: starting metapath
1615 * @h: desired height to search
1617 * Assumes the metapath is valid (with buffers) out to height h.
1618 * Returns: true if a non-null pointer was found in the metapath buffer
1619 * false if all remaining pointers are NULL in the buffer
1621 static bool find_nonnull_ptr(struct gfs2_sbd *sdp, struct metapath *mp,
1623 __u16 *end_list, unsigned int end_aligned)
1625 struct buffer_head *bh = mp->mp_bh[h];
1626 __be64 *first, *ptr, *end;
1628 first = metaptr1(h, mp);
1629 ptr = first + mp->mp_list[h];
1630 end = (__be64 *)(bh->b_data + bh->b_size);
1631 if (end_list && mp_eq_to_hgt(mp, end_list, h)) {
1632 bool keep_end = h < end_aligned;
1633 end = first + end_list[h] + keep_end;
1637 if (*ptr) { /* if we have a non-null pointer */
1638 mp->mp_list[h] = ptr - first;
1640 if (h < GFS2_MAX_META_HEIGHT)
1649 enum dealloc_states {
1650 DEALLOC_MP_FULL = 0, /* Strip a metapath with all buffers read in */
1651 DEALLOC_MP_LOWER = 1, /* lower the metapath strip height */
1652 DEALLOC_FILL_MP = 2, /* Fill in the metapath to the given height. */
1653 DEALLOC_DONE = 3, /* process complete */
1657 metapointer_range(struct metapath *mp, int height,
1658 __u16 *start_list, unsigned int start_aligned,
1659 __u16 *end_list, unsigned int end_aligned,
1660 __be64 **start, __be64 **end)
1662 struct buffer_head *bh = mp->mp_bh[height];
1665 first = metaptr1(height, mp);
1667 if (mp_eq_to_hgt(mp, start_list, height)) {
1668 bool keep_start = height < start_aligned;
1669 *start = first + start_list[height] + keep_start;
1671 *end = (__be64 *)(bh->b_data + bh->b_size);
1672 if (end_list && mp_eq_to_hgt(mp, end_list, height)) {
1673 bool keep_end = height < end_aligned;
1674 *end = first + end_list[height] + keep_end;
1678 static inline bool walk_done(struct gfs2_sbd *sdp,
1679 struct metapath *mp, int height,
1680 __u16 *end_list, unsigned int end_aligned)
1685 bool keep_end = height < end_aligned;
1686 if (!mp_eq_to_hgt(mp, end_list, height))
1688 end = end_list[height] + keep_end;
1690 end = (height > 0) ? sdp->sd_inptrs : sdp->sd_diptrs;
1691 return mp->mp_list[height] >= end;
1695 * punch_hole - deallocate blocks in a file
1696 * @ip: inode to truncate
1697 * @offset: the start of the hole
1698 * @length: the size of the hole (or 0 for truncate)
1700 * Punch a hole into a file or truncate a file at a given position. This
1701 * function operates in whole blocks (@offset and @length are rounded
1702 * accordingly); partially filled blocks must be cleared otherwise.
1704 * This function works from the bottom up, and from the right to the left. In
1705 * other words, it strips off the highest layer (data) before stripping any of
1706 * the metadata. Doing it this way is best in case the operation is interrupted
1707 * by power failure, etc. The dinode is rewritten in every transaction to
1708 * guarantee integrity.
1710 static int punch_hole(struct gfs2_inode *ip, u64 offset, u64 length)
1712 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1713 u64 maxsize = sdp->sd_heightsize[ip->i_height];
1714 struct metapath mp = {};
1715 struct buffer_head *dibh, *bh;
1716 struct gfs2_holder rd_gh;
1717 unsigned int bsize_shift = sdp->sd_sb.sb_bsize_shift;
1718 u64 lblock = (offset + (1 << bsize_shift) - 1) >> bsize_shift;
1719 __u16 start_list[GFS2_MAX_META_HEIGHT];
1720 __u16 __end_list[GFS2_MAX_META_HEIGHT], *end_list = NULL;
1721 unsigned int start_aligned, uninitialized_var(end_aligned);
1722 unsigned int strip_h = ip->i_height - 1;
1725 int mp_h; /* metapath buffers are read in to this height */
1727 __be64 *start, *end;
1729 if (offset >= maxsize) {
1731 * The starting point lies beyond the allocated meta-data;
1732 * there are no blocks do deallocate.
1738 * The start position of the hole is defined by lblock, start_list, and
1739 * start_aligned. The end position of the hole is defined by lend,
1740 * end_list, and end_aligned.
1742 * start_aligned and end_aligned define down to which height the start
1743 * and end positions are aligned to the metadata tree (i.e., the
1744 * position is a multiple of the metadata granularity at the height
1745 * above). This determines at which heights additional meta pointers
1746 * needs to be preserved for the remaining data.
1750 u64 end_offset = offset + length;
1754 * Clip the end at the maximum file size for the given height:
1755 * that's how far the metadata goes; files bigger than that
1756 * will have additional layers of indirection.
1758 if (end_offset > maxsize)
1759 end_offset = maxsize;
1760 lend = end_offset >> bsize_shift;
1765 find_metapath(sdp, lend, &mp, ip->i_height);
1766 end_list = __end_list;
1767 memcpy(end_list, mp.mp_list, sizeof(mp.mp_list));
1769 for (mp_h = ip->i_height - 1; mp_h > 0; mp_h--) {
1776 find_metapath(sdp, lblock, &mp, ip->i_height);
1777 memcpy(start_list, mp.mp_list, sizeof(start_list));
1779 for (mp_h = ip->i_height - 1; mp_h > 0; mp_h--) {
1780 if (start_list[mp_h])
1783 start_aligned = mp_h;
1785 ret = gfs2_meta_inode_buffer(ip, &dibh);
1790 ret = lookup_metapath(ip, &mp);
1794 /* issue read-ahead on metadata */
1795 for (mp_h = 0; mp_h < mp.mp_aheight - 1; mp_h++) {
1796 metapointer_range(&mp, mp_h, start_list, start_aligned,
1797 end_list, end_aligned, &start, &end);
1798 gfs2_metapath_ra(ip->i_gl, start, end);
1801 if (mp.mp_aheight == ip->i_height)
1802 state = DEALLOC_MP_FULL; /* We have a complete metapath */
1804 state = DEALLOC_FILL_MP; /* deal with partial metapath */
1806 ret = gfs2_rindex_update(sdp);
1810 ret = gfs2_quota_hold(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE);
1813 gfs2_holder_mark_uninitialized(&rd_gh);
1817 while (state != DEALLOC_DONE) {
1819 /* Truncate a full metapath at the given strip height.
1820 * Note that strip_h == mp_h in order to be in this state. */
1821 case DEALLOC_MP_FULL:
1822 bh = mp.mp_bh[mp_h];
1823 gfs2_assert_withdraw(sdp, bh);
1824 if (gfs2_assert_withdraw(sdp,
1825 prev_bnr != bh->b_blocknr)) {
1826 printk(KERN_EMERG "GFS2: fsid=%s:inode %llu, "
1827 "block:%llu, i_h:%u, s_h:%u, mp_h:%u\n",
1829 (unsigned long long)ip->i_no_addr,
1830 prev_bnr, ip->i_height, strip_h, mp_h);
1832 prev_bnr = bh->b_blocknr;
1834 if (gfs2_metatype_check(sdp, bh,
1835 (mp_h ? GFS2_METATYPE_IN :
1836 GFS2_METATYPE_DI))) {
1842 * Below, passing end_aligned as 0 gives us the
1843 * metapointer range excluding the end point: the end
1844 * point is the first metapath we must not deallocate!
1847 metapointer_range(&mp, mp_h, start_list, start_aligned,
1848 end_list, 0 /* end_aligned */,
1850 ret = sweep_bh_for_rgrps(ip, &rd_gh, mp.mp_bh[mp_h],
1852 mp_h != ip->i_height - 1,
1855 /* If we hit an error or just swept dinode buffer,
1858 state = DEALLOC_DONE;
1861 state = DEALLOC_MP_LOWER;
1864 /* lower the metapath strip height */
1865 case DEALLOC_MP_LOWER:
1866 /* We're done with the current buffer, so release it,
1867 unless it's the dinode buffer. Then back up to the
1868 previous pointer. */
1870 brelse(mp.mp_bh[mp_h]);
1871 mp.mp_bh[mp_h] = NULL;
1873 /* If we can't get any lower in height, we've stripped
1874 off all we can. Next step is to back up and start
1875 stripping the previous level of metadata. */
1878 memcpy(mp.mp_list, start_list, sizeof(start_list));
1880 state = DEALLOC_FILL_MP;
1883 mp.mp_list[mp_h] = 0;
1884 mp_h--; /* search one metadata height down */
1886 if (walk_done(sdp, &mp, mp_h, end_list, end_aligned))
1888 /* Here we've found a part of the metapath that is not
1889 * allocated. We need to search at that height for the
1890 * next non-null pointer. */
1891 if (find_nonnull_ptr(sdp, &mp, mp_h, end_list, end_aligned)) {
1892 state = DEALLOC_FILL_MP;
1895 /* No more non-null pointers at this height. Back up
1896 to the previous height and try again. */
1897 break; /* loop around in the same state */
1899 /* Fill the metapath with buffers to the given height. */
1900 case DEALLOC_FILL_MP:
1901 /* Fill the buffers out to the current height. */
1902 ret = fillup_metapath(ip, &mp, mp_h);
1906 /* On the first pass, issue read-ahead on metadata. */
1907 if (mp.mp_aheight > 1 && strip_h == ip->i_height - 1) {
1908 unsigned int height = mp.mp_aheight - 1;
1910 /* No read-ahead for data blocks. */
1911 if (mp.mp_aheight - 1 == strip_h)
1914 for (; height >= mp.mp_aheight - ret; height--) {
1915 metapointer_range(&mp, height,
1916 start_list, start_aligned,
1917 end_list, end_aligned,
1919 gfs2_metapath_ra(ip->i_gl, start, end);
1923 /* If buffers found for the entire strip height */
1924 if (mp.mp_aheight - 1 == strip_h) {
1925 state = DEALLOC_MP_FULL;
1928 if (mp.mp_aheight < ip->i_height) /* We have a partial height */
1929 mp_h = mp.mp_aheight - 1;
1931 /* If we find a non-null block pointer, crawl a bit
1932 higher up in the metapath and try again, otherwise
1933 we need to look lower for a new starting point. */
1934 if (find_nonnull_ptr(sdp, &mp, mp_h, end_list, end_aligned))
1937 state = DEALLOC_MP_LOWER;
1943 if (current->journal_info == NULL) {
1944 ret = gfs2_trans_begin(sdp, RES_DINODE + RES_STATFS +
1948 down_write(&ip->i_rw_mutex);
1950 gfs2_statfs_change(sdp, 0, +btotal, 0);
1951 gfs2_quota_change(ip, -(s64)btotal, ip->i_inode.i_uid,
1953 ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
1954 gfs2_trans_add_meta(ip->i_gl, dibh);
1955 gfs2_dinode_out(ip, dibh->b_data);
1956 up_write(&ip->i_rw_mutex);
1957 gfs2_trans_end(sdp);
1961 if (gfs2_holder_initialized(&rd_gh))
1962 gfs2_glock_dq_uninit(&rd_gh);
1963 if (current->journal_info) {
1964 up_write(&ip->i_rw_mutex);
1965 gfs2_trans_end(sdp);
1968 gfs2_quota_unhold(ip);
1970 release_metapath(&mp);
1974 static int trunc_end(struct gfs2_inode *ip)
1976 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1977 struct buffer_head *dibh;
1980 error = gfs2_trans_begin(sdp, RES_DINODE, 0);
1984 down_write(&ip->i_rw_mutex);
1986 error = gfs2_meta_inode_buffer(ip, &dibh);
1990 if (!i_size_read(&ip->i_inode)) {
1992 ip->i_goal = ip->i_no_addr;
1993 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
1994 gfs2_ordered_del_inode(ip);
1996 ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
1997 ip->i_diskflags &= ~GFS2_DIF_TRUNC_IN_PROG;
1999 gfs2_trans_add_meta(ip->i_gl, dibh);
2000 gfs2_dinode_out(ip, dibh->b_data);
2004 up_write(&ip->i_rw_mutex);
2005 gfs2_trans_end(sdp);
2010 * do_shrink - make a file smaller
2012 * @newsize: the size to make the file
2014 * Called with an exclusive lock on @inode. The @size must
2015 * be equal to or smaller than the current inode size.
2020 static int do_shrink(struct inode *inode, u64 newsize)
2022 struct gfs2_inode *ip = GFS2_I(inode);
2025 error = trunc_start(inode, newsize);
2028 if (gfs2_is_stuffed(ip))
2031 error = punch_hole(ip, newsize, 0);
2033 error = trunc_end(ip);
2038 void gfs2_trim_blocks(struct inode *inode)
2042 ret = do_shrink(inode, inode->i_size);
2047 * do_grow - Touch and update inode size
2049 * @size: The new size
2051 * This function updates the timestamps on the inode and
2052 * may also increase the size of the inode. This function
2053 * must not be called with @size any smaller than the current
2056 * Although it is not strictly required to unstuff files here,
2057 * earlier versions of GFS2 have a bug in the stuffed file reading
2058 * code which will result in a buffer overrun if the size is larger
2059 * than the max stuffed file size. In order to prevent this from
2060 * occurring, such files are unstuffed, but in other cases we can
2061 * just update the inode size directly.
2063 * Returns: 0 on success, or -ve on error
2066 static int do_grow(struct inode *inode, u64 size)
2068 struct gfs2_inode *ip = GFS2_I(inode);
2069 struct gfs2_sbd *sdp = GFS2_SB(inode);
2070 struct gfs2_alloc_parms ap = { .target = 1, };
2071 struct buffer_head *dibh;
2075 if (gfs2_is_stuffed(ip) && size > gfs2_max_stuffed_size(ip)) {
2076 error = gfs2_quota_lock_check(ip, &ap);
2080 error = gfs2_inplace_reserve(ip, &ap);
2082 goto do_grow_qunlock;
2086 error = gfs2_trans_begin(sdp, RES_DINODE + RES_STATFS + RES_RG_BIT +
2088 gfs2_is_jdata(ip) ? RES_JDATA : 0) +
2089 (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF ?
2092 goto do_grow_release;
2095 error = gfs2_unstuff_dinode(ip, NULL);
2100 error = gfs2_meta_inode_buffer(ip, &dibh);
2104 i_size_write(inode, size);
2105 ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
2106 gfs2_trans_add_meta(ip->i_gl, dibh);
2107 gfs2_dinode_out(ip, dibh->b_data);
2111 gfs2_trans_end(sdp);
2114 gfs2_inplace_release(ip);
2116 gfs2_quota_unlock(ip);
2122 * gfs2_setattr_size - make a file a given size
2124 * @newsize: the size to make the file
2126 * The file size can grow, shrink, or stay the same size. This
2127 * is called holding i_rwsem and an exclusive glock on the inode
2133 int gfs2_setattr_size(struct inode *inode, u64 newsize)
2135 struct gfs2_inode *ip = GFS2_I(inode);
2138 BUG_ON(!S_ISREG(inode->i_mode));
2140 ret = inode_newsize_ok(inode, newsize);
2144 inode_dio_wait(inode);
2146 ret = gfs2_rsqa_alloc(ip);
2150 if (newsize >= inode->i_size) {
2151 ret = do_grow(inode, newsize);
2155 ret = do_shrink(inode, newsize);
2157 gfs2_rsqa_delete(ip, NULL);
2161 int gfs2_truncatei_resume(struct gfs2_inode *ip)
2164 error = punch_hole(ip, i_size_read(&ip->i_inode), 0);
2166 error = trunc_end(ip);
2170 int gfs2_file_dealloc(struct gfs2_inode *ip)
2172 return punch_hole(ip, 0, 0);
2176 * gfs2_free_journal_extents - Free cached journal bmap info
2181 void gfs2_free_journal_extents(struct gfs2_jdesc *jd)
2183 struct gfs2_journal_extent *jext;
2185 while(!list_empty(&jd->extent_list)) {
2186 jext = list_entry(jd->extent_list.next, struct gfs2_journal_extent, list);
2187 list_del(&jext->list);
2193 * gfs2_add_jextent - Add or merge a new extent to extent cache
2194 * @jd: The journal descriptor
2195 * @lblock: The logical block at start of new extent
2196 * @dblock: The physical block at start of new extent
2197 * @blocks: Size of extent in fs blocks
2199 * Returns: 0 on success or -ENOMEM
2202 static int gfs2_add_jextent(struct gfs2_jdesc *jd, u64 lblock, u64 dblock, u64 blocks)
2204 struct gfs2_journal_extent *jext;
2206 if (!list_empty(&jd->extent_list)) {
2207 jext = list_entry(jd->extent_list.prev, struct gfs2_journal_extent, list);
2208 if ((jext->dblock + jext->blocks) == dblock) {
2209 jext->blocks += blocks;
2214 jext = kzalloc(sizeof(struct gfs2_journal_extent), GFP_NOFS);
2217 jext->dblock = dblock;
2218 jext->lblock = lblock;
2219 jext->blocks = blocks;
2220 list_add_tail(&jext->list, &jd->extent_list);
2226 * gfs2_map_journal_extents - Cache journal bmap info
2227 * @sdp: The super block
2228 * @jd: The journal to map
2230 * Create a reusable "extent" mapping from all logical
2231 * blocks to all physical blocks for the given journal. This will save
2232 * us time when writing journal blocks. Most journals will have only one
2233 * extent that maps all their logical blocks. That's because gfs2.mkfs
2234 * arranges the journal blocks sequentially to maximize performance.
2235 * So the extent would map the first block for the entire file length.
2236 * However, gfs2_jadd can happen while file activity is happening, so
2237 * those journals may not be sequential. Less likely is the case where
2238 * the users created their own journals by mounting the metafs and
2239 * laying it out. But it's still possible. These journals might have
2242 * Returns: 0 on success, or error on failure
2245 int gfs2_map_journal_extents(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd)
2249 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
2250 struct buffer_head bh;
2251 unsigned int shift = sdp->sd_sb.sb_bsize_shift;
2256 start = ktime_get();
2257 lblock_stop = i_size_read(jd->jd_inode) >> shift;
2258 size = (lblock_stop - lblock) << shift;
2260 WARN_ON(!list_empty(&jd->extent_list));
2266 rc = gfs2_block_map(jd->jd_inode, lblock, &bh, 0);
2267 if (rc || !buffer_mapped(&bh))
2269 rc = gfs2_add_jextent(jd, lblock, bh.b_blocknr, bh.b_size >> shift);
2273 lblock += (bh.b_size >> ip->i_inode.i_blkbits);
2277 fs_info(sdp, "journal %d mapped with %u extents in %lldms\n", jd->jd_jid,
2278 jd->nr_extents, ktime_ms_delta(end, start));
2282 fs_warn(sdp, "error %d mapping journal %u at offset %llu (extent %u)\n",
2284 (unsigned long long)(i_size_read(jd->jd_inode) - size),
2286 fs_warn(sdp, "bmap=%d lblock=%llu block=%llu, state=0x%08lx, size=%llu\n",
2287 rc, (unsigned long long)lblock, (unsigned long long)bh.b_blocknr,
2288 bh.b_state, (unsigned long long)bh.b_size);
2289 gfs2_free_journal_extents(jd);
2294 * gfs2_write_alloc_required - figure out if a write will require an allocation
2295 * @ip: the file being written to
2296 * @offset: the offset to write to
2297 * @len: the number of bytes being written
2299 * Returns: 1 if an alloc is required, 0 otherwise
2302 int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset,
2305 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
2306 struct buffer_head bh;
2308 u64 lblock, lblock_stop, size;
2314 if (gfs2_is_stuffed(ip)) {
2315 if (offset + len > gfs2_max_stuffed_size(ip))
2320 shift = sdp->sd_sb.sb_bsize_shift;
2321 BUG_ON(gfs2_is_dir(ip));
2322 end_of_file = (i_size_read(&ip->i_inode) + sdp->sd_sb.sb_bsize - 1) >> shift;
2323 lblock = offset >> shift;
2324 lblock_stop = (offset + len + sdp->sd_sb.sb_bsize - 1) >> shift;
2325 if (lblock_stop > end_of_file && ip != GFS2_I(sdp->sd_rindex))
2328 size = (lblock_stop - lblock) << shift;
2332 gfs2_block_map(&ip->i_inode, lblock, &bh, 0);
2333 if (!buffer_mapped(&bh))
2336 lblock += (bh.b_size >> ip->i_inode.i_blkbits);
2342 static int stuffed_zero_range(struct inode *inode, loff_t offset, loff_t length)
2344 struct gfs2_inode *ip = GFS2_I(inode);
2345 struct buffer_head *dibh;
2348 if (offset >= inode->i_size)
2350 if (offset + length > inode->i_size)
2351 length = inode->i_size - offset;
2353 error = gfs2_meta_inode_buffer(ip, &dibh);
2356 gfs2_trans_add_meta(ip->i_gl, dibh);
2357 memset(dibh->b_data + sizeof(struct gfs2_dinode) + offset, 0,
2363 static int gfs2_journaled_truncate_range(struct inode *inode, loff_t offset,
2366 struct gfs2_sbd *sdp = GFS2_SB(inode);
2367 loff_t max_chunk = GFS2_JTRUNC_REVOKES * sdp->sd_vfs->s_blocksize;
2371 struct gfs2_trans *tr;
2376 if (chunk > max_chunk)
2379 offs = offset & ~PAGE_MASK;
2380 if (offs && chunk > PAGE_SIZE)
2381 chunk = offs + ((chunk - offs) & PAGE_MASK);
2383 truncate_pagecache_range(inode, offset, chunk);
2387 tr = current->journal_info;
2388 if (!test_bit(TR_TOUCHED, &tr->tr_flags))
2391 gfs2_trans_end(sdp);
2392 error = gfs2_trans_begin(sdp, RES_DINODE, GFS2_JTRUNC_REVOKES);
2399 int __gfs2_punch_hole(struct file *file, loff_t offset, loff_t length)
2401 struct inode *inode = file_inode(file);
2402 struct gfs2_inode *ip = GFS2_I(inode);
2403 struct gfs2_sbd *sdp = GFS2_SB(inode);
2406 if (gfs2_is_jdata(ip))
2407 error = gfs2_trans_begin(sdp, RES_DINODE + 2 * RES_JDATA,
2408 GFS2_JTRUNC_REVOKES);
2410 error = gfs2_trans_begin(sdp, RES_DINODE, 0);
2414 if (gfs2_is_stuffed(ip)) {
2415 error = stuffed_zero_range(inode, offset, length);
2419 unsigned int start_off, end_len, blocksize;
2421 blocksize = i_blocksize(inode);
2422 start_off = offset & (blocksize - 1);
2423 end_len = (offset + length) & (blocksize - 1);
2425 unsigned int len = length;
2426 if (length > blocksize - start_off)
2427 len = blocksize - start_off;
2428 error = gfs2_block_zero_range(inode, offset, len);
2431 if (start_off + length < blocksize)
2435 error = gfs2_block_zero_range(inode,
2436 offset + length - end_len, end_len);
2442 if (gfs2_is_jdata(ip)) {
2443 BUG_ON(!current->journal_info);
2444 gfs2_journaled_truncate_range(inode, offset, length);
2446 truncate_pagecache_range(inode, offset, offset + length - 1);
2448 file_update_time(file);
2449 mark_inode_dirty(inode);
2451 if (current->journal_info)
2452 gfs2_trans_end(sdp);
2454 if (!gfs2_is_stuffed(ip))
2455 error = punch_hole(ip, offset, length);
2458 if (current->journal_info)
2459 gfs2_trans_end(sdp);