2 * linux/fs/ufs/balloc.c
6 * Charles University, Faculty of Mathematics and Physics
12 #include <linux/stat.h>
13 #include <linux/time.h>
14 #include <linux/string.h>
15 #include <linux/buffer_head.h>
16 #include <linux/capability.h>
17 #include <linux/bitops.h>
18 #include <linux/bio.h>
19 #include <asm/byteorder.h>
26 #define INVBLOCK ((u64)-1L)
28 static u64 ufs_add_fragments(struct inode *, u64, unsigned, unsigned);
29 static u64 ufs_alloc_fragments(struct inode *, unsigned, u64, unsigned, int *);
30 static u64 ufs_alloccg_block(struct inode *, struct ufs_cg_private_info *, u64, int *);
31 static u64 ufs_bitmap_search (struct super_block *, struct ufs_cg_private_info *, u64, unsigned);
32 static unsigned char ufs_fragtable_8fpb[], ufs_fragtable_other[];
33 static void ufs_clusteracct(struct super_block *, struct ufs_cg_private_info *, unsigned, int);
36 * Free 'count' fragments from fragment number 'fragment'
38 void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count)
40 struct super_block * sb;
41 struct ufs_sb_private_info * uspi;
42 struct ufs_cg_private_info * ucpi;
43 struct ufs_cylinder_group * ucg;
44 unsigned cgno, bit, end_bit, bbase, blkmap, i;
48 uspi = UFS_SB(sb)->s_uspi;
50 UFSD("ENTER, fragment %llu, count %u\n",
51 (unsigned long long)fragment, count);
53 if (ufs_fragnum(fragment) + count > uspi->s_fpg)
54 ufs_error (sb, "ufs_free_fragments", "internal error");
56 mutex_lock(&UFS_SB(sb)->s_lock);
58 cgno = ufs_dtog(uspi, fragment);
59 bit = ufs_dtogd(uspi, fragment);
60 if (cgno >= uspi->s_ncg) {
61 ufs_panic (sb, "ufs_free_fragments", "freeing blocks are outside device");
65 ucpi = ufs_load_cylinder (sb, cgno);
68 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
69 if (!ufs_cg_chkmagic(sb, ucg)) {
70 ufs_panic (sb, "ufs_free_fragments", "internal error, bad magic number on cg %u", cgno);
74 end_bit = bit + count;
75 bbase = ufs_blknum (bit);
76 blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
77 ufs_fragacct (sb, blkmap, ucg->cg_frsum, -1);
78 for (i = bit; i < end_bit; i++) {
79 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, i))
80 ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, i);
82 ufs_error (sb, "ufs_free_fragments",
83 "bit already cleared for fragment %u", i);
86 fs32_add(sb, &ucg->cg_cs.cs_nffree, count);
87 uspi->cs_total.cs_nffree += count;
88 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
89 blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
90 ufs_fragacct(sb, blkmap, ucg->cg_frsum, 1);
93 * Trying to reassemble free fragments into block
95 blkno = ufs_fragstoblks (bbase);
96 if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
97 fs32_sub(sb, &ucg->cg_cs.cs_nffree, uspi->s_fpb);
98 uspi->cs_total.cs_nffree -= uspi->s_fpb;
99 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, uspi->s_fpb);
100 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
101 ufs_clusteracct (sb, ucpi, blkno, 1);
102 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
103 uspi->cs_total.cs_nbfree++;
104 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
105 if (uspi->fs_magic != UFS2_MAGIC) {
106 unsigned cylno = ufs_cbtocylno (bbase);
108 fs16_add(sb, &ubh_cg_blks(ucpi, cylno,
109 ufs_cbtorpos(bbase)), 1);
110 fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
114 ubh_mark_buffer_dirty (USPI_UBH(uspi));
115 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
116 if (sb->s_flags & MS_SYNCHRONOUS)
117 ubh_sync_block(UCPI_UBH(ucpi));
118 ufs_mark_sb_dirty(sb);
120 mutex_unlock(&UFS_SB(sb)->s_lock);
125 mutex_unlock(&UFS_SB(sb)->s_lock);
126 UFSD("EXIT (FAILED)\n");
131 * Free 'count' fragments from fragment number 'fragment' (free whole blocks)
133 void ufs_free_blocks(struct inode *inode, u64 fragment, unsigned count)
135 struct super_block * sb;
136 struct ufs_sb_private_info * uspi;
137 struct ufs_cg_private_info * ucpi;
138 struct ufs_cylinder_group * ucg;
139 unsigned overflow, cgno, bit, end_bit, i;
143 uspi = UFS_SB(sb)->s_uspi;
145 UFSD("ENTER, fragment %llu, count %u\n",
146 (unsigned long long)fragment, count);
148 if ((fragment & uspi->s_fpbmask) || (count & uspi->s_fpbmask)) {
149 ufs_error (sb, "ufs_free_blocks", "internal error, "
150 "fragment %llu, count %u\n",
151 (unsigned long long)fragment, count);
155 mutex_lock(&UFS_SB(sb)->s_lock);
159 cgno = ufs_dtog(uspi, fragment);
160 bit = ufs_dtogd(uspi, fragment);
161 if (cgno >= uspi->s_ncg) {
162 ufs_panic (sb, "ufs_free_blocks", "freeing blocks are outside device");
165 end_bit = bit + count;
166 if (end_bit > uspi->s_fpg) {
167 overflow = bit + count - uspi->s_fpg;
172 ucpi = ufs_load_cylinder (sb, cgno);
175 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
176 if (!ufs_cg_chkmagic(sb, ucg)) {
177 ufs_panic (sb, "ufs_free_blocks", "internal error, bad magic number on cg %u", cgno);
181 for (i = bit; i < end_bit; i += uspi->s_fpb) {
182 blkno = ufs_fragstoblks(i);
183 if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
184 ufs_error(sb, "ufs_free_blocks", "freeing free fragment");
186 ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
187 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
188 ufs_clusteracct (sb, ucpi, blkno, 1);
190 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
191 uspi->cs_total.cs_nbfree++;
192 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
194 if (uspi->fs_magic != UFS2_MAGIC) {
195 unsigned cylno = ufs_cbtocylno(i);
197 fs16_add(sb, &ubh_cg_blks(ucpi, cylno,
198 ufs_cbtorpos(i)), 1);
199 fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
203 ubh_mark_buffer_dirty (USPI_UBH(uspi));
204 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
205 if (sb->s_flags & MS_SYNCHRONOUS)
206 ubh_sync_block(UCPI_UBH(ucpi));
214 ufs_mark_sb_dirty(sb);
215 mutex_unlock(&UFS_SB(sb)->s_lock);
220 mutex_unlock(&UFS_SB(sb)->s_lock);
222 UFSD("EXIT (FAILED)\n");
227 * Modify inode page cache in such way:
228 * have - blocks with b_blocknr equal to oldb...oldb+count-1
229 * get - blocks with b_blocknr equal to newb...newb+count-1
230 * also we suppose that oldb...oldb+count-1 blocks
231 * situated at the end of file.
233 * We can come here from ufs_writepage or ufs_prepare_write,
234 * locked_page is argument of these functions, so we already lock it.
236 static void ufs_change_blocknr(struct inode *inode, sector_t beg,
237 unsigned int count, sector_t oldb,
238 sector_t newb, struct page *locked_page)
240 const unsigned blks_per_page =
241 1 << (PAGE_SHIFT - inode->i_blkbits);
242 const unsigned mask = blks_per_page - 1;
243 struct address_space * const mapping = inode->i_mapping;
244 pgoff_t index, cur_index, last_index;
245 unsigned pos, j, lblock;
248 struct buffer_head *head, *bh;
250 UFSD("ENTER, ino %lu, count %u, oldb %llu, newb %llu\n",
252 (unsigned long long)oldb, (unsigned long long)newb);
254 BUG_ON(!locked_page);
255 BUG_ON(!PageLocked(locked_page));
257 cur_index = locked_page->index;
259 last_index = end >> (PAGE_SHIFT - inode->i_blkbits);
260 for (i = beg; i < end; i = (i | mask) + 1) {
261 index = i >> (PAGE_SHIFT - inode->i_blkbits);
263 if (likely(cur_index != index)) {
264 page = ufs_get_locked_page(mapping, index);
265 if (!page)/* it was truncated */
267 if (IS_ERR(page)) {/* or EIO */
268 ufs_error(inode->i_sb, __func__,
269 "read of page %llu failed\n",
270 (unsigned long long)index);
276 head = page_buffers(page);
279 for (j = 0; j < pos; ++j)
280 bh = bh->b_this_page;
283 if (unlikely(index == last_index))
286 lblock = blks_per_page;
293 if (!buffer_mapped(bh))
294 map_bh(bh, inode->i_sb, oldb + pos);
295 if (!buffer_uptodate(bh)) {
296 ll_rw_block(REQ_OP_READ, 0, 1, &bh);
298 if (!buffer_uptodate(bh)) {
299 ufs_error(inode->i_sb, __func__,
300 "read of block failed\n");
305 UFSD(" change from %llu to %llu, pos %u\n",
306 (unsigned long long)(pos + oldb),
307 (unsigned long long)(pos + newb), pos);
309 bh->b_blocknr = newb + pos;
310 unmap_underlying_metadata(bh->b_bdev,
312 mark_buffer_dirty(bh);
314 bh = bh->b_this_page;
315 } while (bh != head);
317 if (likely(cur_index != index))
318 ufs_put_locked_page(page);
323 static void ufs_clear_frags(struct inode *inode, sector_t beg, unsigned int n,
326 struct buffer_head *bh;
327 sector_t end = beg + n;
329 for (; beg < end; ++beg) {
330 bh = sb_getblk(inode->i_sb, beg);
332 memset(bh->b_data, 0, inode->i_sb->s_blocksize);
333 set_buffer_uptodate(bh);
334 mark_buffer_dirty(bh);
336 if (IS_SYNC(inode) || sync)
337 sync_dirty_buffer(bh);
342 u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
343 u64 goal, unsigned count, int *err,
344 struct page *locked_page)
346 struct super_block * sb;
347 struct ufs_sb_private_info * uspi;
348 struct ufs_super_block_first * usb1;
349 unsigned cgno, oldcount, newcount;
350 u64 tmp, request, result;
352 UFSD("ENTER, ino %lu, fragment %llu, goal %llu, count %u\n",
353 inode->i_ino, (unsigned long long)fragment,
354 (unsigned long long)goal, count);
357 uspi = UFS_SB(sb)->s_uspi;
358 usb1 = ubh_get_usb_first(uspi);
361 mutex_lock(&UFS_SB(sb)->s_lock);
362 tmp = ufs_data_ptr_to_cpu(sb, p);
364 if (count + ufs_fragnum(fragment) > uspi->s_fpb) {
365 ufs_warning(sb, "ufs_new_fragments", "internal warning"
366 " fragment %llu, count %u",
367 (unsigned long long)fragment, count);
368 count = uspi->s_fpb - ufs_fragnum(fragment);
370 oldcount = ufs_fragnum (fragment);
371 newcount = oldcount + count;
374 * Somebody else has just allocated our fragments
378 ufs_error(sb, "ufs_new_fragments", "internal error, "
379 "fragment %llu, tmp %llu\n",
380 (unsigned long long)fragment,
381 (unsigned long long)tmp);
382 mutex_unlock(&UFS_SB(sb)->s_lock);
385 if (fragment < UFS_I(inode)->i_lastfrag) {
386 UFSD("EXIT (ALREADY ALLOCATED)\n");
387 mutex_unlock(&UFS_SB(sb)->s_lock);
393 UFSD("EXIT (ALREADY ALLOCATED)\n");
394 mutex_unlock(&UFS_SB(sb)->s_lock);
400 * There is not enough space for user on the device
402 if (!capable(CAP_SYS_RESOURCE) && ufs_freespace(uspi, UFS_MINFREE) <= 0) {
403 mutex_unlock(&UFS_SB(sb)->s_lock);
404 UFSD("EXIT (FAILED)\n");
408 if (goal >= uspi->s_size)
411 cgno = ufs_inotocg (inode->i_ino);
413 cgno = ufs_dtog(uspi, goal);
416 * allocate new fragment
419 result = ufs_alloc_fragments (inode, cgno, goal, count, err);
421 ufs_clear_frags(inode, result + oldcount,
422 newcount - oldcount, locked_page != NULL);
423 write_seqlock(&UFS_I(inode)->meta_lock);
424 ufs_cpu_to_data_ptr(sb, p, result);
425 write_sequnlock(&UFS_I(inode)->meta_lock);
427 UFS_I(inode)->i_lastfrag =
428 max(UFS_I(inode)->i_lastfrag, fragment + count);
430 mutex_unlock(&UFS_SB(sb)->s_lock);
431 UFSD("EXIT, result %llu\n", (unsigned long long)result);
438 result = ufs_add_fragments(inode, tmp, oldcount, newcount);
441 UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag,
443 ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
444 locked_page != NULL);
445 mutex_unlock(&UFS_SB(sb)->s_lock);
446 UFSD("EXIT, result %llu\n", (unsigned long long)result);
451 * allocate new block and move data
453 switch (fs32_to_cpu(sb, usb1->fs_optim)) {
456 if (uspi->s_minfree < 5 || uspi->cs_total.cs_nffree
457 > uspi->s_dsize * uspi->s_minfree / (2 * 100))
459 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
462 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
465 request = uspi->s_fpb;
466 if (uspi->cs_total.cs_nffree < uspi->s_dsize *
467 (uspi->s_minfree - 2) / 100)
469 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
472 result = ufs_alloc_fragments (inode, cgno, goal, request, err);
474 ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
475 locked_page != NULL);
476 ufs_change_blocknr(inode, fragment - oldcount, oldcount,
477 uspi->s_sbbase + tmp,
478 uspi->s_sbbase + result, locked_page);
479 write_seqlock(&UFS_I(inode)->meta_lock);
480 ufs_cpu_to_data_ptr(sb, p, result);
481 write_sequnlock(&UFS_I(inode)->meta_lock);
483 UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag,
485 mutex_unlock(&UFS_SB(sb)->s_lock);
486 if (newcount < request)
487 ufs_free_fragments (inode, result + newcount, request - newcount);
488 ufs_free_fragments (inode, tmp, oldcount);
489 UFSD("EXIT, result %llu\n", (unsigned long long)result);
493 mutex_unlock(&UFS_SB(sb)->s_lock);
494 UFSD("EXIT (FAILED)\n");
498 static u64 ufs_add_fragments(struct inode *inode, u64 fragment,
499 unsigned oldcount, unsigned newcount)
501 struct super_block * sb;
502 struct ufs_sb_private_info * uspi;
503 struct ufs_cg_private_info * ucpi;
504 struct ufs_cylinder_group * ucg;
505 unsigned cgno, fragno, fragoff, count, fragsize, i;
507 UFSD("ENTER, fragment %llu, oldcount %u, newcount %u\n",
508 (unsigned long long)fragment, oldcount, newcount);
511 uspi = UFS_SB(sb)->s_uspi;
512 count = newcount - oldcount;
514 cgno = ufs_dtog(uspi, fragment);
515 if (fs32_to_cpu(sb, UFS_SB(sb)->fs_cs(cgno).cs_nffree) < count)
517 if ((ufs_fragnum (fragment) + newcount) > uspi->s_fpb)
519 ucpi = ufs_load_cylinder (sb, cgno);
522 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
523 if (!ufs_cg_chkmagic(sb, ucg)) {
524 ufs_panic (sb, "ufs_add_fragments",
525 "internal error, bad magic number on cg %u", cgno);
529 fragno = ufs_dtogd(uspi, fragment);
530 fragoff = ufs_fragnum (fragno);
531 for (i = oldcount; i < newcount; i++)
532 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
535 * Block can be extended
537 ucg->cg_time = cpu_to_fs32(sb, get_seconds());
538 for (i = newcount; i < (uspi->s_fpb - fragoff); i++)
539 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
541 fragsize = i - oldcount;
542 if (!fs32_to_cpu(sb, ucg->cg_frsum[fragsize]))
543 ufs_panic (sb, "ufs_add_fragments",
544 "internal error or corrupted bitmap on cg %u", cgno);
545 fs32_sub(sb, &ucg->cg_frsum[fragsize], 1);
546 if (fragsize != count)
547 fs32_add(sb, &ucg->cg_frsum[fragsize - count], 1);
548 for (i = oldcount; i < newcount; i++)
549 ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i);
551 fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
552 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
553 uspi->cs_total.cs_nffree -= count;
555 ubh_mark_buffer_dirty (USPI_UBH(uspi));
556 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
557 if (sb->s_flags & MS_SYNCHRONOUS)
558 ubh_sync_block(UCPI_UBH(ucpi));
559 ufs_mark_sb_dirty(sb);
561 UFSD("EXIT, fragment %llu\n", (unsigned long long)fragment);
566 #define UFS_TEST_FREE_SPACE_CG \
567 ucg = (struct ufs_cylinder_group *) UFS_SB(sb)->s_ucg[cgno]->b_data; \
568 if (fs32_to_cpu(sb, ucg->cg_cs.cs_nbfree)) \
570 for (k = count; k < uspi->s_fpb; k++) \
571 if (fs32_to_cpu(sb, ucg->cg_frsum[k])) \
574 static u64 ufs_alloc_fragments(struct inode *inode, unsigned cgno,
575 u64 goal, unsigned count, int *err)
577 struct super_block * sb;
578 struct ufs_sb_private_info * uspi;
579 struct ufs_cg_private_info * ucpi;
580 struct ufs_cylinder_group * ucg;
581 unsigned oldcg, i, j, k, allocsize;
584 UFSD("ENTER, ino %lu, cgno %u, goal %llu, count %u\n",
585 inode->i_ino, cgno, (unsigned long long)goal, count);
588 uspi = UFS_SB(sb)->s_uspi;
592 * 1. searching on preferred cylinder group
594 UFS_TEST_FREE_SPACE_CG
597 * 2. quadratic rehash
599 for (j = 1; j < uspi->s_ncg; j *= 2) {
601 if (cgno >= uspi->s_ncg)
603 UFS_TEST_FREE_SPACE_CG
607 * 3. brute force search
608 * We start at i = 2 ( 0 is checked at 1.step, 1 at 2.step )
610 cgno = (oldcg + 1) % uspi->s_ncg;
611 for (j = 2; j < uspi->s_ncg; j++) {
613 if (cgno >= uspi->s_ncg)
615 UFS_TEST_FREE_SPACE_CG
618 UFSD("EXIT (FAILED)\n");
622 ucpi = ufs_load_cylinder (sb, cgno);
625 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
626 if (!ufs_cg_chkmagic(sb, ucg))
627 ufs_panic (sb, "ufs_alloc_fragments",
628 "internal error, bad magic number on cg %u", cgno);
629 ucg->cg_time = cpu_to_fs32(sb, get_seconds());
631 if (count == uspi->s_fpb) {
632 result = ufs_alloccg_block (inode, ucpi, goal, err);
633 if (result == INVBLOCK)
638 for (allocsize = count; allocsize < uspi->s_fpb; allocsize++)
639 if (fs32_to_cpu(sb, ucg->cg_frsum[allocsize]) != 0)
642 if (allocsize == uspi->s_fpb) {
643 result = ufs_alloccg_block (inode, ucpi, goal, err);
644 if (result == INVBLOCK)
646 goal = ufs_dtogd(uspi, result);
647 for (i = count; i < uspi->s_fpb; i++)
648 ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i);
649 i = uspi->s_fpb - count;
651 fs32_add(sb, &ucg->cg_cs.cs_nffree, i);
652 uspi->cs_total.cs_nffree += i;
653 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, i);
654 fs32_add(sb, &ucg->cg_frsum[i], 1);
658 result = ufs_bitmap_search (sb, ucpi, goal, allocsize);
659 if (result == INVBLOCK)
661 for (i = 0; i < count; i++)
662 ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, result + i);
664 fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
665 uspi->cs_total.cs_nffree -= count;
666 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
667 fs32_sub(sb, &ucg->cg_frsum[allocsize], 1);
669 if (count != allocsize)
670 fs32_add(sb, &ucg->cg_frsum[allocsize - count], 1);
673 ubh_mark_buffer_dirty (USPI_UBH(uspi));
674 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
675 if (sb->s_flags & MS_SYNCHRONOUS)
676 ubh_sync_block(UCPI_UBH(ucpi));
677 ufs_mark_sb_dirty(sb);
679 result += cgno * uspi->s_fpg;
680 UFSD("EXIT3, result %llu\n", (unsigned long long)result);
684 static u64 ufs_alloccg_block(struct inode *inode,
685 struct ufs_cg_private_info *ucpi,
688 struct super_block * sb;
689 struct ufs_sb_private_info * uspi;
690 struct ufs_cylinder_group * ucg;
693 UFSD("ENTER, goal %llu\n", (unsigned long long)goal);
696 uspi = UFS_SB(sb)->s_uspi;
697 ucg = ubh_get_ucg(UCPI_UBH(ucpi));
700 goal = ucpi->c_rotor;
703 goal = ufs_blknum (goal);
704 goal = ufs_dtogd(uspi, goal);
707 * If the requested block is available, use it.
709 if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, ufs_fragstoblks(goal))) {
715 result = ufs_bitmap_search (sb, ucpi, goal, uspi->s_fpb);
716 if (result == INVBLOCK)
718 ucpi->c_rotor = result;
720 blkno = ufs_fragstoblks(result);
721 ubh_clrblock (UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
722 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
723 ufs_clusteracct (sb, ucpi, blkno, -1);
725 fs32_sub(sb, &ucg->cg_cs.cs_nbfree, 1);
726 uspi->cs_total.cs_nbfree--;
727 fs32_sub(sb, &UFS_SB(sb)->fs_cs(ucpi->c_cgx).cs_nbfree, 1);
729 if (uspi->fs_magic != UFS2_MAGIC) {
730 unsigned cylno = ufs_cbtocylno((unsigned)result);
732 fs16_sub(sb, &ubh_cg_blks(ucpi, cylno,
733 ufs_cbtorpos((unsigned)result)), 1);
734 fs32_sub(sb, &ubh_cg_blktot(ucpi, cylno), 1);
737 UFSD("EXIT, result %llu\n", (unsigned long long)result);
742 static unsigned ubh_scanc(struct ufs_sb_private_info *uspi,
743 struct ufs_buffer_head *ubh,
744 unsigned begin, unsigned size,
745 unsigned char *table, unsigned char mask)
747 unsigned rest, offset;
751 offset = begin & ~uspi->s_fmask;
752 begin >>= uspi->s_fshift;
754 if ((offset + size) < uspi->s_fsize)
757 rest = uspi->s_fsize - offset;
759 cp = ubh->bh[begin]->b_data + offset;
760 while ((table[*cp++] & mask) == 0 && --rest)
767 return (size + rest);
771 * Find a block of the specified size in the specified cylinder group.
772 * @sp: pointer to super block
773 * @ucpi: pointer to cylinder group info
774 * @goal: near which block we want find new one
775 * @count: specified size
777 static u64 ufs_bitmap_search(struct super_block *sb,
778 struct ufs_cg_private_info *ucpi,
779 u64 goal, unsigned count)
782 * Bit patterns for identifying fragments in the block map
783 * used as ((map & mask_arr) == want_arr)
785 static const int mask_arr[9] = {
786 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff, 0x1ff, 0x3ff
788 static const int want_arr[9] = {
789 0x0, 0x2, 0x6, 0xe, 0x1e, 0x3e, 0x7e, 0xfe, 0x1fe
791 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
792 unsigned start, length, loc;
793 unsigned pos, want, blockmap, mask, end;
796 UFSD("ENTER, cg %u, goal %llu, count %u\n", ucpi->c_cgx,
797 (unsigned long long)goal, count);
800 start = ufs_dtogd(uspi, goal) >> 3;
802 start = ucpi->c_frotor >> 3;
804 length = ((uspi->s_fpg + 7) >> 3) - start;
805 loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff + start, length,
806 (uspi->s_fpb == 8) ? ufs_fragtable_8fpb : ufs_fragtable_other,
807 1 << (count - 1 + (uspi->s_fpb & 7)));
810 loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff, length,
811 (uspi->s_fpb == 8) ? ufs_fragtable_8fpb :
813 1 << (count - 1 + (uspi->s_fpb & 7)));
815 ufs_error(sb, "ufs_bitmap_search",
816 "bitmap corrupted on cg %u, start %u,"
817 " length %u, count %u, freeoff %u\n",
818 ucpi->c_cgx, start, length, count,
824 result = (start + length - loc) << 3;
825 ucpi->c_frotor = result;
828 * found the byte in the map
831 for (end = result + 8; result < end; result += uspi->s_fpb) {
832 blockmap = ubh_blkmap(UCPI_UBH(ucpi), ucpi->c_freeoff, result);
834 mask = mask_arr[count];
835 want = want_arr[count];
836 for (pos = 0; pos <= uspi->s_fpb - count; pos++) {
837 if ((blockmap & mask) == want) {
838 UFSD("EXIT, result %llu\n",
839 (unsigned long long)result);
847 ufs_error(sb, "ufs_bitmap_search", "block not in map on cg %u\n",
849 UFSD("EXIT (FAILED)\n");
853 static void ufs_clusteracct(struct super_block * sb,
854 struct ufs_cg_private_info * ucpi, unsigned blkno, int cnt)
856 struct ufs_sb_private_info * uspi;
857 int i, start, end, forw, back;
859 uspi = UFS_SB(sb)->s_uspi;
860 if (uspi->s_contigsumsize <= 0)
864 ubh_setbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
866 ubh_clrbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
869 * Find the size of the cluster going forward.
872 end = start + uspi->s_contigsumsize;
873 if ( end >= ucpi->c_nclusterblks)
874 end = ucpi->c_nclusterblks;
875 i = ubh_find_next_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, end, start);
881 * Find the size of the cluster going backward.
884 end = start - uspi->s_contigsumsize;
887 i = ubh_find_last_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, start, end);
893 * Account for old cluster and the possibly new forward and
897 if (i > uspi->s_contigsumsize)
898 i = uspi->s_contigsumsize;
899 fs32_add(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (i << 2)), cnt);
901 fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (back << 2)), cnt);
903 fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (forw << 2)), cnt);
907 static unsigned char ufs_fragtable_8fpb[] = {
908 0x00, 0x01, 0x01, 0x02, 0x01, 0x01, 0x02, 0x04, 0x01, 0x01, 0x01, 0x03, 0x02, 0x03, 0x04, 0x08,
909 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x02, 0x03, 0x03, 0x02, 0x04, 0x05, 0x08, 0x10,
910 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
911 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x04, 0x05, 0x05, 0x06, 0x08, 0x09, 0x10, 0x20,
912 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
913 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
914 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
915 0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x08, 0x09, 0x09, 0x0A, 0x10, 0x11, 0x20, 0x40,
916 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
917 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
918 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
919 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x05, 0x05, 0x05, 0x07, 0x09, 0x09, 0x11, 0x21,
920 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
921 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x02, 0x03, 0x03, 0x02, 0x06, 0x07, 0x0A, 0x12,
922 0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x05, 0x05, 0x05, 0x07, 0x06, 0x07, 0x04, 0x0C,
923 0x08, 0x09, 0x09, 0x0A, 0x09, 0x09, 0x0A, 0x0C, 0x10, 0x11, 0x11, 0x12, 0x20, 0x21, 0x40, 0x80,
926 static unsigned char ufs_fragtable_other[] = {
927 0x00, 0x16, 0x16, 0x2A, 0x16, 0x16, 0x26, 0x4E, 0x16, 0x16, 0x16, 0x3E, 0x2A, 0x3E, 0x4E, 0x8A,
928 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
929 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
930 0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
931 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
932 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
933 0x26, 0x36, 0x36, 0x2E, 0x36, 0x36, 0x26, 0x6E, 0x36, 0x36, 0x36, 0x3E, 0x2E, 0x3E, 0x6E, 0xAE,
934 0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
935 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
936 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
937 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
938 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
939 0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
940 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
941 0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
942 0x8A, 0x9E, 0x9E, 0xAA, 0x9E, 0x9E, 0xAE, 0xCE, 0x9E, 0x9E, 0x9E, 0xBE, 0xAA, 0xBE, 0xCE, 0x8A,