1 // SPDX-License-Identifier: GPL-2.0
3 * linux/fs/ufs/balloc.c
7 * Charles University, Faculty of Mathematics and Physics
13 #include <linux/stat.h>
14 #include <linux/time.h>
15 #include <linux/string.h>
16 #include <linux/buffer_head.h>
17 #include <linux/capability.h>
18 #include <linux/bitops.h>
19 #include <linux/bio.h>
20 #include <asm/byteorder.h>
27 #define INVBLOCK ((u64)-1L)
29 static u64 ufs_add_fragments(struct inode *, u64, unsigned, unsigned);
30 static u64 ufs_alloc_fragments(struct inode *, unsigned, u64, unsigned, int *);
31 static u64 ufs_alloccg_block(struct inode *, struct ufs_cg_private_info *, u64, int *);
32 static u64 ufs_bitmap_search (struct super_block *, struct ufs_cg_private_info *, u64, unsigned);
33 static unsigned char ufs_fragtable_8fpb[], ufs_fragtable_other[];
34 static void ufs_clusteracct(struct super_block *, struct ufs_cg_private_info *, unsigned, int);
37 * Free 'count' fragments from fragment number 'fragment'
39 void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count)
41 struct super_block * sb;
42 struct ufs_sb_private_info * uspi;
43 struct ufs_cg_private_info * ucpi;
44 struct ufs_cylinder_group * ucg;
45 unsigned cgno, bit, end_bit, bbase, blkmap, i;
49 uspi = UFS_SB(sb)->s_uspi;
51 UFSD("ENTER, fragment %llu, count %u\n",
52 (unsigned long long)fragment, count);
54 if (ufs_fragnum(fragment) + count > uspi->s_fpg)
55 ufs_error (sb, "ufs_free_fragments", "internal error");
57 mutex_lock(&UFS_SB(sb)->s_lock);
59 cgno = ufs_dtog(uspi, fragment);
60 bit = ufs_dtogd(uspi, fragment);
61 if (cgno >= uspi->s_ncg) {
62 ufs_panic (sb, "ufs_free_fragments", "freeing blocks are outside device");
66 ucpi = ufs_load_cylinder (sb, cgno);
69 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
70 if (!ufs_cg_chkmagic(sb, ucg)) {
71 ufs_panic (sb, "ufs_free_fragments", "internal error, bad magic number on cg %u", cgno);
75 end_bit = bit + count;
76 bbase = ufs_blknum (bit);
77 blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
78 ufs_fragacct (sb, blkmap, ucg->cg_frsum, -1);
79 for (i = bit; i < end_bit; i++) {
80 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, i))
81 ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, i);
83 ufs_error (sb, "ufs_free_fragments",
84 "bit already cleared for fragment %u", i);
87 inode_sub_bytes(inode, count << uspi->s_fshift);
88 fs32_add(sb, &ucg->cg_cs.cs_nffree, count);
89 uspi->cs_total.cs_nffree += count;
90 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
91 blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
92 ufs_fragacct(sb, blkmap, ucg->cg_frsum, 1);
95 * Trying to reassemble free fragments into block
97 blkno = ufs_fragstoblks (bbase);
98 if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
99 fs32_sub(sb, &ucg->cg_cs.cs_nffree, uspi->s_fpb);
100 uspi->cs_total.cs_nffree -= uspi->s_fpb;
101 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, uspi->s_fpb);
102 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
103 ufs_clusteracct (sb, ucpi, blkno, 1);
104 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
105 uspi->cs_total.cs_nbfree++;
106 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
107 if (uspi->fs_magic != UFS2_MAGIC) {
108 unsigned cylno = ufs_cbtocylno (bbase);
110 fs16_add(sb, &ubh_cg_blks(ucpi, cylno,
111 ufs_cbtorpos(bbase)), 1);
112 fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
116 ubh_mark_buffer_dirty (USPI_UBH(uspi));
117 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
118 if (sb->s_flags & SB_SYNCHRONOUS)
119 ubh_sync_block(UCPI_UBH(ucpi));
120 ufs_mark_sb_dirty(sb);
122 mutex_unlock(&UFS_SB(sb)->s_lock);
127 mutex_unlock(&UFS_SB(sb)->s_lock);
128 UFSD("EXIT (FAILED)\n");
133 * Free 'count' fragments from fragment number 'fragment' (free whole blocks)
135 void ufs_free_blocks(struct inode *inode, u64 fragment, unsigned count)
137 struct super_block * sb;
138 struct ufs_sb_private_info * uspi;
139 struct ufs_cg_private_info * ucpi;
140 struct ufs_cylinder_group * ucg;
141 unsigned overflow, cgno, bit, end_bit, i;
145 uspi = UFS_SB(sb)->s_uspi;
147 UFSD("ENTER, fragment %llu, count %u\n",
148 (unsigned long long)fragment, count);
150 if ((fragment & uspi->s_fpbmask) || (count & uspi->s_fpbmask)) {
151 ufs_error (sb, "ufs_free_blocks", "internal error, "
152 "fragment %llu, count %u\n",
153 (unsigned long long)fragment, count);
157 mutex_lock(&UFS_SB(sb)->s_lock);
161 cgno = ufs_dtog(uspi, fragment);
162 bit = ufs_dtogd(uspi, fragment);
163 if (cgno >= uspi->s_ncg) {
164 ufs_panic (sb, "ufs_free_blocks", "freeing blocks are outside device");
167 end_bit = bit + count;
168 if (end_bit > uspi->s_fpg) {
169 overflow = bit + count - uspi->s_fpg;
174 ucpi = ufs_load_cylinder (sb, cgno);
177 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
178 if (!ufs_cg_chkmagic(sb, ucg)) {
179 ufs_panic (sb, "ufs_free_blocks", "internal error, bad magic number on cg %u", cgno);
183 for (i = bit; i < end_bit; i += uspi->s_fpb) {
184 blkno = ufs_fragstoblks(i);
185 if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
186 ufs_error(sb, "ufs_free_blocks", "freeing free fragment");
188 ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
189 inode_sub_bytes(inode, uspi->s_fpb << uspi->s_fshift);
190 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
191 ufs_clusteracct (sb, ucpi, blkno, 1);
193 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
194 uspi->cs_total.cs_nbfree++;
195 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
197 if (uspi->fs_magic != UFS2_MAGIC) {
198 unsigned cylno = ufs_cbtocylno(i);
200 fs16_add(sb, &ubh_cg_blks(ucpi, cylno,
201 ufs_cbtorpos(i)), 1);
202 fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
206 ubh_mark_buffer_dirty (USPI_UBH(uspi));
207 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
208 if (sb->s_flags & SB_SYNCHRONOUS)
209 ubh_sync_block(UCPI_UBH(ucpi));
217 ufs_mark_sb_dirty(sb);
218 mutex_unlock(&UFS_SB(sb)->s_lock);
223 mutex_unlock(&UFS_SB(sb)->s_lock);
225 UFSD("EXIT (FAILED)\n");
230 * Modify inode page cache in such way:
231 * have - blocks with b_blocknr equal to oldb...oldb+count-1
232 * get - blocks with b_blocknr equal to newb...newb+count-1
233 * also we suppose that oldb...oldb+count-1 blocks
234 * situated at the end of file.
236 * We can come here from ufs_writepage or ufs_prepare_write,
237 * locked_page is argument of these functions, so we already lock it.
239 static void ufs_change_blocknr(struct inode *inode, sector_t beg,
240 unsigned int count, sector_t oldb,
241 sector_t newb, struct page *locked_page)
243 struct folio *folio, *locked_folio = page_folio(locked_page);
244 const unsigned blks_per_page =
245 1 << (PAGE_SHIFT - inode->i_blkbits);
246 const unsigned mask = blks_per_page - 1;
247 struct address_space * const mapping = inode->i_mapping;
248 pgoff_t index, cur_index, last_index;
249 unsigned pos, j, lblock;
251 struct buffer_head *head, *bh;
253 UFSD("ENTER, ino %lu, count %u, oldb %llu, newb %llu\n",
255 (unsigned long long)oldb, (unsigned long long)newb);
257 BUG_ON(!folio_test_locked(locked_folio));
259 cur_index = locked_folio->index;
261 last_index = end >> (PAGE_SHIFT - inode->i_blkbits);
262 for (i = beg; i < end; i = (i | mask) + 1) {
263 index = i >> (PAGE_SHIFT - inode->i_blkbits);
265 if (likely(cur_index != index)) {
266 folio = ufs_get_locked_folio(mapping, index);
267 if (!folio) /* it was truncated */
269 if (IS_ERR(folio)) {/* or EIO */
270 ufs_error(inode->i_sb, __func__,
271 "read of page %llu failed\n",
272 (unsigned long long)index);
276 folio = locked_folio;
278 head = folio_buffers(folio);
281 for (j = 0; j < pos; ++j)
282 bh = bh->b_this_page;
284 if (unlikely(index == last_index))
287 lblock = blks_per_page;
294 if (!buffer_mapped(bh))
295 map_bh(bh, inode->i_sb, oldb + pos);
296 if (bh_read(bh, 0) < 0) {
297 ufs_error(inode->i_sb, __func__,
298 "read of block failed\n");
302 UFSD(" change from %llu to %llu, pos %u\n",
303 (unsigned long long)(pos + oldb),
304 (unsigned long long)(pos + newb), pos);
306 bh->b_blocknr = newb + pos;
307 clean_bdev_bh_alias(bh);
308 mark_buffer_dirty(bh);
310 bh = bh->b_this_page;
311 } while (bh != head);
313 if (likely(cur_index != index))
314 ufs_put_locked_folio(folio);
319 static void ufs_clear_frags(struct inode *inode, sector_t beg, unsigned int n,
322 struct buffer_head *bh;
323 sector_t end = beg + n;
325 for (; beg < end; ++beg) {
326 bh = sb_getblk(inode->i_sb, beg);
328 memset(bh->b_data, 0, inode->i_sb->s_blocksize);
329 set_buffer_uptodate(bh);
330 mark_buffer_dirty(bh);
332 if (IS_SYNC(inode) || sync)
333 sync_dirty_buffer(bh);
338 u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
339 u64 goal, unsigned count, int *err,
340 struct page *locked_page)
342 struct super_block * sb;
343 struct ufs_sb_private_info * uspi;
344 struct ufs_super_block_first * usb1;
345 unsigned cgno, oldcount, newcount;
346 u64 tmp, request, result;
348 UFSD("ENTER, ino %lu, fragment %llu, goal %llu, count %u\n",
349 inode->i_ino, (unsigned long long)fragment,
350 (unsigned long long)goal, count);
353 uspi = UFS_SB(sb)->s_uspi;
354 usb1 = ubh_get_usb_first(uspi);
357 mutex_lock(&UFS_SB(sb)->s_lock);
358 tmp = ufs_data_ptr_to_cpu(sb, p);
360 if (count + ufs_fragnum(fragment) > uspi->s_fpb) {
361 ufs_warning(sb, "ufs_new_fragments", "internal warning"
362 " fragment %llu, count %u",
363 (unsigned long long)fragment, count);
364 count = uspi->s_fpb - ufs_fragnum(fragment);
366 oldcount = ufs_fragnum (fragment);
367 newcount = oldcount + count;
370 * Somebody else has just allocated our fragments
374 ufs_error(sb, "ufs_new_fragments", "internal error, "
375 "fragment %llu, tmp %llu\n",
376 (unsigned long long)fragment,
377 (unsigned long long)tmp);
378 mutex_unlock(&UFS_SB(sb)->s_lock);
381 if (fragment < UFS_I(inode)->i_lastfrag) {
382 UFSD("EXIT (ALREADY ALLOCATED)\n");
383 mutex_unlock(&UFS_SB(sb)->s_lock);
389 UFSD("EXIT (ALREADY ALLOCATED)\n");
390 mutex_unlock(&UFS_SB(sb)->s_lock);
396 * There is not enough space for user on the device
398 if (unlikely(ufs_freefrags(uspi) <= uspi->s_root_blocks)) {
399 if (!capable(CAP_SYS_RESOURCE)) {
400 mutex_unlock(&UFS_SB(sb)->s_lock);
401 UFSD("EXIT (FAILED)\n");
406 if (goal >= uspi->s_size)
409 cgno = ufs_inotocg (inode->i_ino);
411 cgno = ufs_dtog(uspi, goal);
414 * allocate new fragment
417 result = ufs_alloc_fragments (inode, cgno, goal, count, err);
419 ufs_clear_frags(inode, result + oldcount,
420 newcount - oldcount, locked_page != NULL);
422 write_seqlock(&UFS_I(inode)->meta_lock);
423 ufs_cpu_to_data_ptr(sb, p, result);
424 UFS_I(inode)->i_lastfrag =
425 max(UFS_I(inode)->i_lastfrag, fragment + count);
426 write_sequnlock(&UFS_I(inode)->meta_lock);
428 mutex_unlock(&UFS_SB(sb)->s_lock);
429 UFSD("EXIT, result %llu\n", (unsigned long long)result);
436 result = ufs_add_fragments(inode, tmp, oldcount, newcount);
439 read_seqlock_excl(&UFS_I(inode)->meta_lock);
440 UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag,
442 read_sequnlock_excl(&UFS_I(inode)->meta_lock);
443 ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
444 locked_page != NULL);
445 mutex_unlock(&UFS_SB(sb)->s_lock);
446 UFSD("EXIT, result %llu\n", (unsigned long long)result);
451 * allocate new block and move data
453 if (fs32_to_cpu(sb, usb1->fs_optim) == UFS_OPTSPACE) {
455 if (uspi->cs_total.cs_nffree < uspi->s_space_to_time)
456 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
458 request = uspi->s_fpb;
459 if (uspi->cs_total.cs_nffree > uspi->s_time_to_space)
460 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTSPACE);
462 result = ufs_alloc_fragments (inode, cgno, goal, request, err);
464 ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
465 locked_page != NULL);
466 mutex_unlock(&UFS_SB(sb)->s_lock);
467 ufs_change_blocknr(inode, fragment - oldcount, oldcount,
468 uspi->s_sbbase + tmp,
469 uspi->s_sbbase + result, locked_page);
471 write_seqlock(&UFS_I(inode)->meta_lock);
472 ufs_cpu_to_data_ptr(sb, p, result);
473 UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag,
475 write_sequnlock(&UFS_I(inode)->meta_lock);
476 if (newcount < request)
477 ufs_free_fragments (inode, result + newcount, request - newcount);
478 ufs_free_fragments (inode, tmp, oldcount);
479 UFSD("EXIT, result %llu\n", (unsigned long long)result);
483 mutex_unlock(&UFS_SB(sb)->s_lock);
484 UFSD("EXIT (FAILED)\n");
488 static bool try_add_frags(struct inode *inode, unsigned frags)
490 unsigned size = frags * i_blocksize(inode);
491 spin_lock(&inode->i_lock);
492 __inode_add_bytes(inode, size);
493 if (unlikely((u32)inode->i_blocks != inode->i_blocks)) {
494 __inode_sub_bytes(inode, size);
495 spin_unlock(&inode->i_lock);
498 spin_unlock(&inode->i_lock);
502 static u64 ufs_add_fragments(struct inode *inode, u64 fragment,
503 unsigned oldcount, unsigned newcount)
505 struct super_block * sb;
506 struct ufs_sb_private_info * uspi;
507 struct ufs_cg_private_info * ucpi;
508 struct ufs_cylinder_group * ucg;
509 unsigned cgno, fragno, fragoff, count, fragsize, i;
511 UFSD("ENTER, fragment %llu, oldcount %u, newcount %u\n",
512 (unsigned long long)fragment, oldcount, newcount);
515 uspi = UFS_SB(sb)->s_uspi;
516 count = newcount - oldcount;
518 cgno = ufs_dtog(uspi, fragment);
519 if (fs32_to_cpu(sb, UFS_SB(sb)->fs_cs(cgno).cs_nffree) < count)
521 if ((ufs_fragnum (fragment) + newcount) > uspi->s_fpb)
523 ucpi = ufs_load_cylinder (sb, cgno);
526 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
527 if (!ufs_cg_chkmagic(sb, ucg)) {
528 ufs_panic (sb, "ufs_add_fragments",
529 "internal error, bad magic number on cg %u", cgno);
533 fragno = ufs_dtogd(uspi, fragment);
534 fragoff = ufs_fragnum (fragno);
535 for (i = oldcount; i < newcount; i++)
536 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
539 if (!try_add_frags(inode, count))
542 * Block can be extended
544 ucg->cg_time = ufs_get_seconds(sb);
545 for (i = newcount; i < (uspi->s_fpb - fragoff); i++)
546 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
548 fragsize = i - oldcount;
549 if (!fs32_to_cpu(sb, ucg->cg_frsum[fragsize]))
550 ufs_panic (sb, "ufs_add_fragments",
551 "internal error or corrupted bitmap on cg %u", cgno);
552 fs32_sub(sb, &ucg->cg_frsum[fragsize], 1);
553 if (fragsize != count)
554 fs32_add(sb, &ucg->cg_frsum[fragsize - count], 1);
555 for (i = oldcount; i < newcount; i++)
556 ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i);
558 fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
559 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
560 uspi->cs_total.cs_nffree -= count;
562 ubh_mark_buffer_dirty (USPI_UBH(uspi));
563 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
564 if (sb->s_flags & SB_SYNCHRONOUS)
565 ubh_sync_block(UCPI_UBH(ucpi));
566 ufs_mark_sb_dirty(sb);
568 UFSD("EXIT, fragment %llu\n", (unsigned long long)fragment);
573 #define UFS_TEST_FREE_SPACE_CG \
574 ucg = (struct ufs_cylinder_group *) UFS_SB(sb)->s_ucg[cgno]->b_data; \
575 if (fs32_to_cpu(sb, ucg->cg_cs.cs_nbfree)) \
577 for (k = count; k < uspi->s_fpb; k++) \
578 if (fs32_to_cpu(sb, ucg->cg_frsum[k])) \
581 static u64 ufs_alloc_fragments(struct inode *inode, unsigned cgno,
582 u64 goal, unsigned count, int *err)
584 struct super_block * sb;
585 struct ufs_sb_private_info * uspi;
586 struct ufs_cg_private_info * ucpi;
587 struct ufs_cylinder_group * ucg;
588 unsigned oldcg, i, j, k, allocsize;
591 UFSD("ENTER, ino %lu, cgno %u, goal %llu, count %u\n",
592 inode->i_ino, cgno, (unsigned long long)goal, count);
595 uspi = UFS_SB(sb)->s_uspi;
599 * 1. searching on preferred cylinder group
601 UFS_TEST_FREE_SPACE_CG
604 * 2. quadratic rehash
606 for (j = 1; j < uspi->s_ncg; j *= 2) {
608 if (cgno >= uspi->s_ncg)
610 UFS_TEST_FREE_SPACE_CG
614 * 3. brute force search
615 * We start at i = 2 ( 0 is checked at 1.step, 1 at 2.step )
617 cgno = (oldcg + 1) % uspi->s_ncg;
618 for (j = 2; j < uspi->s_ncg; j++) {
620 if (cgno >= uspi->s_ncg)
622 UFS_TEST_FREE_SPACE_CG
625 UFSD("EXIT (FAILED)\n");
629 ucpi = ufs_load_cylinder (sb, cgno);
632 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
633 if (!ufs_cg_chkmagic(sb, ucg))
634 ufs_panic (sb, "ufs_alloc_fragments",
635 "internal error, bad magic number on cg %u", cgno);
636 ucg->cg_time = ufs_get_seconds(sb);
638 if (count == uspi->s_fpb) {
639 result = ufs_alloccg_block (inode, ucpi, goal, err);
640 if (result == INVBLOCK)
645 for (allocsize = count; allocsize < uspi->s_fpb; allocsize++)
646 if (fs32_to_cpu(sb, ucg->cg_frsum[allocsize]) != 0)
649 if (allocsize == uspi->s_fpb) {
650 result = ufs_alloccg_block (inode, ucpi, goal, err);
651 if (result == INVBLOCK)
653 goal = ufs_dtogd(uspi, result);
654 for (i = count; i < uspi->s_fpb; i++)
655 ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i);
656 i = uspi->s_fpb - count;
658 inode_sub_bytes(inode, i << uspi->s_fshift);
659 fs32_add(sb, &ucg->cg_cs.cs_nffree, i);
660 uspi->cs_total.cs_nffree += i;
661 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, i);
662 fs32_add(sb, &ucg->cg_frsum[i], 1);
666 result = ufs_bitmap_search (sb, ucpi, goal, allocsize);
667 if (result == INVBLOCK)
669 if (!try_add_frags(inode, count))
671 for (i = 0; i < count; i++)
672 ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, result + i);
674 fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
675 uspi->cs_total.cs_nffree -= count;
676 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
677 fs32_sub(sb, &ucg->cg_frsum[allocsize], 1);
679 if (count != allocsize)
680 fs32_add(sb, &ucg->cg_frsum[allocsize - count], 1);
683 ubh_mark_buffer_dirty (USPI_UBH(uspi));
684 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
685 if (sb->s_flags & SB_SYNCHRONOUS)
686 ubh_sync_block(UCPI_UBH(ucpi));
687 ufs_mark_sb_dirty(sb);
689 result += cgno * uspi->s_fpg;
690 UFSD("EXIT3, result %llu\n", (unsigned long long)result);
694 static u64 ufs_alloccg_block(struct inode *inode,
695 struct ufs_cg_private_info *ucpi,
698 struct super_block * sb;
699 struct ufs_sb_private_info * uspi;
700 struct ufs_cylinder_group * ucg;
703 UFSD("ENTER, goal %llu\n", (unsigned long long)goal);
706 uspi = UFS_SB(sb)->s_uspi;
707 ucg = ubh_get_ucg(UCPI_UBH(ucpi));
710 goal = ucpi->c_rotor;
713 goal = ufs_blknum (goal);
714 goal = ufs_dtogd(uspi, goal);
717 * If the requested block is available, use it.
719 if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, ufs_fragstoblks(goal))) {
725 result = ufs_bitmap_search (sb, ucpi, goal, uspi->s_fpb);
726 if (result == INVBLOCK)
728 ucpi->c_rotor = result;
730 if (!try_add_frags(inode, uspi->s_fpb))
732 blkno = ufs_fragstoblks(result);
733 ubh_clrblock (UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
734 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
735 ufs_clusteracct (sb, ucpi, blkno, -1);
737 fs32_sub(sb, &ucg->cg_cs.cs_nbfree, 1);
738 uspi->cs_total.cs_nbfree--;
739 fs32_sub(sb, &UFS_SB(sb)->fs_cs(ucpi->c_cgx).cs_nbfree, 1);
741 if (uspi->fs_magic != UFS2_MAGIC) {
742 unsigned cylno = ufs_cbtocylno((unsigned)result);
744 fs16_sub(sb, &ubh_cg_blks(ucpi, cylno,
745 ufs_cbtorpos((unsigned)result)), 1);
746 fs32_sub(sb, &ubh_cg_blktot(ucpi, cylno), 1);
749 UFSD("EXIT, result %llu\n", (unsigned long long)result);
754 static unsigned ubh_scanc(struct ufs_sb_private_info *uspi,
755 struct ufs_buffer_head *ubh,
756 unsigned begin, unsigned size,
757 unsigned char *table, unsigned char mask)
759 unsigned rest, offset;
763 offset = begin & ~uspi->s_fmask;
764 begin >>= uspi->s_fshift;
766 if ((offset + size) < uspi->s_fsize)
769 rest = uspi->s_fsize - offset;
771 cp = ubh->bh[begin]->b_data + offset;
772 while ((table[*cp++] & mask) == 0 && --rest)
779 return (size + rest);
783 * Find a block of the specified size in the specified cylinder group.
784 * @sp: pointer to super block
785 * @ucpi: pointer to cylinder group info
786 * @goal: near which block we want find new one
787 * @count: specified size
789 static u64 ufs_bitmap_search(struct super_block *sb,
790 struct ufs_cg_private_info *ucpi,
791 u64 goal, unsigned count)
794 * Bit patterns for identifying fragments in the block map
795 * used as ((map & mask_arr) == want_arr)
797 static const int mask_arr[9] = {
798 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff, 0x1ff, 0x3ff
800 static const int want_arr[9] = {
801 0x0, 0x2, 0x6, 0xe, 0x1e, 0x3e, 0x7e, 0xfe, 0x1fe
803 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
804 unsigned start, length, loc;
805 unsigned pos, want, blockmap, mask, end;
808 UFSD("ENTER, cg %u, goal %llu, count %u\n", ucpi->c_cgx,
809 (unsigned long long)goal, count);
812 start = ufs_dtogd(uspi, goal) >> 3;
814 start = ucpi->c_frotor >> 3;
816 length = ((uspi->s_fpg + 7) >> 3) - start;
817 loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff + start, length,
818 (uspi->s_fpb == 8) ? ufs_fragtable_8fpb : ufs_fragtable_other,
819 1 << (count - 1 + (uspi->s_fpb & 7)));
822 loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff, length,
823 (uspi->s_fpb == 8) ? ufs_fragtable_8fpb :
825 1 << (count - 1 + (uspi->s_fpb & 7)));
827 ufs_error(sb, "ufs_bitmap_search",
828 "bitmap corrupted on cg %u, start %u,"
829 " length %u, count %u, freeoff %u\n",
830 ucpi->c_cgx, start, length, count,
836 result = (start + length - loc) << 3;
837 ucpi->c_frotor = result;
840 * found the byte in the map
843 for (end = result + 8; result < end; result += uspi->s_fpb) {
844 blockmap = ubh_blkmap(UCPI_UBH(ucpi), ucpi->c_freeoff, result);
846 mask = mask_arr[count];
847 want = want_arr[count];
848 for (pos = 0; pos <= uspi->s_fpb - count; pos++) {
849 if ((blockmap & mask) == want) {
850 UFSD("EXIT, result %llu\n",
851 (unsigned long long)result);
859 ufs_error(sb, "ufs_bitmap_search", "block not in map on cg %u\n",
861 UFSD("EXIT (FAILED)\n");
865 static void ufs_clusteracct(struct super_block * sb,
866 struct ufs_cg_private_info * ucpi, unsigned blkno, int cnt)
868 struct ufs_sb_private_info * uspi;
869 int i, start, end, forw, back;
871 uspi = UFS_SB(sb)->s_uspi;
872 if (uspi->s_contigsumsize <= 0)
876 ubh_setbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
878 ubh_clrbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
881 * Find the size of the cluster going forward.
884 end = start + uspi->s_contigsumsize;
885 if ( end >= ucpi->c_nclusterblks)
886 end = ucpi->c_nclusterblks;
887 i = ubh_find_next_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, end, start);
893 * Find the size of the cluster going backward.
896 end = start - uspi->s_contigsumsize;
899 i = ubh_find_last_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, start, end);
905 * Account for old cluster and the possibly new forward and
909 if (i > uspi->s_contigsumsize)
910 i = uspi->s_contigsumsize;
911 fs32_add(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (i << 2)), cnt);
913 fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (back << 2)), cnt);
915 fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (forw << 2)), cnt);
919 static unsigned char ufs_fragtable_8fpb[] = {
920 0x00, 0x01, 0x01, 0x02, 0x01, 0x01, 0x02, 0x04, 0x01, 0x01, 0x01, 0x03, 0x02, 0x03, 0x04, 0x08,
921 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x02, 0x03, 0x03, 0x02, 0x04, 0x05, 0x08, 0x10,
922 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
923 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x04, 0x05, 0x05, 0x06, 0x08, 0x09, 0x10, 0x20,
924 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
925 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
926 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
927 0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x08, 0x09, 0x09, 0x0A, 0x10, 0x11, 0x20, 0x40,
928 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
929 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
930 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
931 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x05, 0x05, 0x05, 0x07, 0x09, 0x09, 0x11, 0x21,
932 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
933 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x02, 0x03, 0x03, 0x02, 0x06, 0x07, 0x0A, 0x12,
934 0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x05, 0x05, 0x05, 0x07, 0x06, 0x07, 0x04, 0x0C,
935 0x08, 0x09, 0x09, 0x0A, 0x09, 0x09, 0x0A, 0x0C, 0x10, 0x11, 0x11, 0x12, 0x20, 0x21, 0x40, 0x80,
938 static unsigned char ufs_fragtable_other[] = {
939 0x00, 0x16, 0x16, 0x2A, 0x16, 0x16, 0x26, 0x4E, 0x16, 0x16, 0x16, 0x3E, 0x2A, 0x3E, 0x4E, 0x8A,
940 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
941 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
942 0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
943 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
944 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
945 0x26, 0x36, 0x36, 0x2E, 0x36, 0x36, 0x26, 0x6E, 0x36, 0x36, 0x36, 0x3E, 0x2E, 0x3E, 0x6E, 0xAE,
946 0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
947 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
948 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
949 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
950 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
951 0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
952 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
953 0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
954 0x8A, 0x9E, 0x9E, 0xAA, 0x9E, 0x9E, 0xAE, 0xCE, 0x9E, 0x9E, 0x9E, 0xBE, 0xAA, 0xBE, 0xCE, 0x8A,