]> Git Repo - linux.git/blame - fs/ufs/balloc.c
Merge tag 'powerpc-6.12-3' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc...
[linux.git] / fs / ufs / balloc.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
1da177e4
LT
2/*
3 * linux/fs/ufs/balloc.c
4 *
5 * Copyright (C) 1998
6 * Daniel Pirkl <[email protected]>
7 * Charles University, Faculty of Mathematics and Physics
54fb996a
ED
8 *
9 * UFS2 write support Evgeniy Dushistov <[email protected]>, 2007
1da177e4
LT
10 */
11
12#include <linux/fs.h>
1da177e4
LT
13#include <linux/stat.h>
14#include <linux/time.h>
15#include <linux/string.h>
1da177e4 16#include <linux/buffer_head.h>
16f7e0fe 17#include <linux/capability.h>
1da177e4 18#include <linux/bitops.h>
2f8b5444 19#include <linux/bio.h>
1da177e4
LT
20#include <asm/byteorder.h>
21
e5420598 22#include "ufs_fs.h"
bcd6d4ec 23#include "ufs.h"
1da177e4
LT
24#include "swab.h"
25#include "util.h"
26
54fb996a
ED
27#define INVBLOCK ((u64)-1L)
28
45641c82 29static u64 ufs_add_fragments(struct inode *, u64, unsigned, unsigned);
54fb996a
ED
30static u64 ufs_alloc_fragments(struct inode *, unsigned, u64, unsigned, int *);
31static u64 ufs_alloccg_block(struct inode *, struct ufs_cg_private_info *, u64, int *);
32static u64 ufs_bitmap_search (struct super_block *, struct ufs_cg_private_info *, u64, unsigned);
1da177e4
LT
33static unsigned char ufs_fragtable_8fpb[], ufs_fragtable_other[];
34static void ufs_clusteracct(struct super_block *, struct ufs_cg_private_info *, unsigned, int);
35
36/*
37 * Free 'count' fragments from fragment number 'fragment'
38 */
54fb996a 39void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count)
6ef4d6bf 40{
1da177e4
LT
41 struct super_block * sb;
42 struct ufs_sb_private_info * uspi;
1da177e4
LT
43 struct ufs_cg_private_info * ucpi;
44 struct ufs_cylinder_group * ucg;
54fb996a
ED
45 unsigned cgno, bit, end_bit, bbase, blkmap, i;
46 u64 blkno;
1da177e4
LT
47
48 sb = inode->i_sb;
49 uspi = UFS_SB(sb)->s_uspi;
1da177e4 50
54fb996a
ED
51 UFSD("ENTER, fragment %llu, count %u\n",
52 (unsigned long long)fragment, count);
1da177e4
LT
53
54 if (ufs_fragnum(fragment) + count > uspi->s_fpg)
55 ufs_error (sb, "ufs_free_fragments", "internal error");
cdd9eefd
FF
56
57 mutex_lock(&UFS_SB(sb)->s_lock);
1da177e4 58
54fb996a
ED
59 cgno = ufs_dtog(uspi, fragment);
60 bit = ufs_dtogd(uspi, fragment);
1da177e4
LT
61 if (cgno >= uspi->s_ncg) {
62 ufs_panic (sb, "ufs_free_fragments", "freeing blocks are outside device");
63 goto failed;
64 }
65
66 ucpi = ufs_load_cylinder (sb, cgno);
67 if (!ucpi)
68 goto failed;
9695ef16 69 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
1da177e4
LT
70 if (!ufs_cg_chkmagic(sb, ucg)) {
71 ufs_panic (sb, "ufs_free_fragments", "internal error, bad magic number on cg %u", cgno);
72 goto failed;
73 }
74
75 end_bit = bit + count;
76 bbase = ufs_blknum (bit);
9695ef16 77 blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
1da177e4
LT
78 ufs_fragacct (sb, blkmap, ucg->cg_frsum, -1);
79 for (i = bit; i < end_bit; i++) {
9695ef16
ED
80 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, i))
81 ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, i);
7b4ee73e
E
82 else
83 ufs_error (sb, "ufs_free_fragments",
84 "bit already cleared for fragment %u", i);
1da177e4 85 }
eb315d2a
AV
86
87 inode_sub_bytes(inode, count << uspi->s_fshift);
1da177e4 88 fs32_add(sb, &ucg->cg_cs.cs_nffree, count);
ee3ffd6c 89 uspi->cs_total.cs_nffree += count;
1da177e4 90 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
9695ef16 91 blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
1da177e4
LT
92 ufs_fragacct(sb, blkmap, ucg->cg_frsum, 1);
93
94 /*
95 * Trying to reassemble free fragments into block
96 */
97 blkno = ufs_fragstoblks (bbase);
9695ef16 98 if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
1da177e4 99 fs32_sub(sb, &ucg->cg_cs.cs_nffree, uspi->s_fpb);
ee3ffd6c 100 uspi->cs_total.cs_nffree -= uspi->s_fpb;
1da177e4
LT
101 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, uspi->s_fpb);
102 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
103 ufs_clusteracct (sb, ucpi, blkno, 1);
104 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
ee3ffd6c 105 uspi->cs_total.cs_nbfree++;
1da177e4 106 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
54fb996a
ED
107 if (uspi->fs_magic != UFS2_MAGIC) {
108 unsigned cylno = ufs_cbtocylno (bbase);
109
110 fs16_add(sb, &ubh_cg_blks(ucpi, cylno,
111 ufs_cbtorpos(bbase)), 1);
112 fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
113 }
1da177e4
LT
114 }
115
9695ef16
ED
116 ubh_mark_buffer_dirty (USPI_UBH(uspi));
117 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
1751e8a6 118 if (sb->s_flags & SB_SYNCHRONOUS)
9cb569d6 119 ubh_sync_block(UCPI_UBH(ucpi));
9e9ad5f4 120 ufs_mark_sb_dirty(sb);
cdd9eefd
FF
121
122 mutex_unlock(&UFS_SB(sb)->s_lock);
abf5d15f 123 UFSD("EXIT\n");
1da177e4
LT
124 return;
125
126failed:
cdd9eefd 127 mutex_unlock(&UFS_SB(sb)->s_lock);
abf5d15f 128 UFSD("EXIT (FAILED)\n");
1da177e4
LT
129 return;
130}
131
132/*
133 * Free 'count' fragments from fragment number 'fragment' (free whole blocks)
134 */
54fb996a 135void ufs_free_blocks(struct inode *inode, u64 fragment, unsigned count)
6ef4d6bf 136{
1da177e4
LT
137 struct super_block * sb;
138 struct ufs_sb_private_info * uspi;
1da177e4
LT
139 struct ufs_cg_private_info * ucpi;
140 struct ufs_cylinder_group * ucg;
54fb996a
ED
141 unsigned overflow, cgno, bit, end_bit, i;
142 u64 blkno;
1da177e4
LT
143
144 sb = inode->i_sb;
145 uspi = UFS_SB(sb)->s_uspi;
1da177e4 146
54fb996a
ED
147 UFSD("ENTER, fragment %llu, count %u\n",
148 (unsigned long long)fragment, count);
1da177e4
LT
149
150 if ((fragment & uspi->s_fpbmask) || (count & uspi->s_fpbmask)) {
151 ufs_error (sb, "ufs_free_blocks", "internal error, "
54fb996a
ED
152 "fragment %llu, count %u\n",
153 (unsigned long long)fragment, count);
1da177e4
LT
154 goto failed;
155 }
156
cdd9eefd 157 mutex_lock(&UFS_SB(sb)->s_lock);
1da177e4
LT
158
159do_more:
160 overflow = 0;
54fb996a
ED
161 cgno = ufs_dtog(uspi, fragment);
162 bit = ufs_dtogd(uspi, fragment);
1da177e4
LT
163 if (cgno >= uspi->s_ncg) {
164 ufs_panic (sb, "ufs_free_blocks", "freeing blocks are outside device");
2e006393 165 goto failed_unlock;
1da177e4
LT
166 }
167 end_bit = bit + count;
168 if (end_bit > uspi->s_fpg) {
169 overflow = bit + count - uspi->s_fpg;
170 count -= overflow;
171 end_bit -= overflow;
172 }
173
174 ucpi = ufs_load_cylinder (sb, cgno);
175 if (!ucpi)
2e006393 176 goto failed_unlock;
9695ef16 177 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
1da177e4
LT
178 if (!ufs_cg_chkmagic(sb, ucg)) {
179 ufs_panic (sb, "ufs_free_blocks", "internal error, bad magic number on cg %u", cgno);
2e006393 180 goto failed_unlock;
1da177e4
LT
181 }
182
183 for (i = bit; i < end_bit; i += uspi->s_fpb) {
184 blkno = ufs_fragstoblks(i);
9695ef16 185 if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
1da177e4
LT
186 ufs_error(sb, "ufs_free_blocks", "freeing free fragment");
187 }
9695ef16 188 ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
eb315d2a 189 inode_sub_bytes(inode, uspi->s_fpb << uspi->s_fshift);
1da177e4
LT
190 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
191 ufs_clusteracct (sb, ucpi, blkno, 1);
1da177e4
LT
192
193 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
ee3ffd6c 194 uspi->cs_total.cs_nbfree++;
1da177e4 195 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
54fb996a
ED
196
197 if (uspi->fs_magic != UFS2_MAGIC) {
198 unsigned cylno = ufs_cbtocylno(i);
199
200 fs16_add(sb, &ubh_cg_blks(ucpi, cylno,
201 ufs_cbtorpos(i)), 1);
202 fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
203 }
1da177e4
LT
204 }
205
9695ef16
ED
206 ubh_mark_buffer_dirty (USPI_UBH(uspi));
207 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
1751e8a6 208 if (sb->s_flags & SB_SYNCHRONOUS)
9cb569d6 209 ubh_sync_block(UCPI_UBH(ucpi));
1da177e4
LT
210
211 if (overflow) {
212 fragment += count;
213 count = overflow;
214 goto do_more;
215 }
216
9e9ad5f4 217 ufs_mark_sb_dirty(sb);
cdd9eefd 218 mutex_unlock(&UFS_SB(sb)->s_lock);
abf5d15f 219 UFSD("EXIT\n");
1da177e4
LT
220 return;
221
2e006393 222failed_unlock:
cdd9eefd 223 mutex_unlock(&UFS_SB(sb)->s_lock);
2e006393 224failed:
abf5d15f 225 UFSD("EXIT (FAILED)\n");
1da177e4
LT
226 return;
227}
228
6ef4d6bf
ED
229/*
230 * Modify inode page cache in such way:
231 * have - blocks with b_blocknr equal to oldb...oldb+count-1
232 * get - blocks with b_blocknr equal to newb...newb+count-1
233 * also we suppose that oldb...oldb+count-1 blocks
234 * situated at the end of file.
235 *
236 * We can come here from ufs_writepage or ufs_prepare_write,
237 * locked_page is argument of these functions, so we already lock it.
238 */
5431bf97
ED
239static void ufs_change_blocknr(struct inode *inode, sector_t beg,
240 unsigned int count, sector_t oldb,
241 sector_t newb, struct page *locked_page)
6ef4d6bf 242{
c7e8812c 243 struct folio *folio, *locked_folio = page_folio(locked_page);
5431bf97 244 const unsigned blks_per_page =
09cbfeaf 245 1 << (PAGE_SHIFT - inode->i_blkbits);
5431bf97 246 const unsigned mask = blks_per_page - 1;
efee2b81 247 struct address_space * const mapping = inode->i_mapping;
5431bf97
ED
248 pgoff_t index, cur_index, last_index;
249 unsigned pos, j, lblock;
250 sector_t end, i;
6ef4d6bf
ED
251 struct buffer_head *head, *bh;
252
5431bf97
ED
253 UFSD("ENTER, ino %lu, count %u, oldb %llu, newb %llu\n",
254 inode->i_ino, count,
255 (unsigned long long)oldb, (unsigned long long)newb);
6ef4d6bf 256
c7e8812c 257 BUG_ON(!folio_test_locked(locked_folio));
6ef4d6bf 258
c7e8812c 259 cur_index = locked_folio->index;
5431bf97 260 end = count + beg;
09cbfeaf 261 last_index = end >> (PAGE_SHIFT - inode->i_blkbits);
5431bf97 262 for (i = beg; i < end; i = (i | mask) + 1) {
09cbfeaf 263 index = i >> (PAGE_SHIFT - inode->i_blkbits);
6ef4d6bf
ED
264
265 if (likely(cur_index != index)) {
c7e8812c
MWO
266 folio = ufs_get_locked_folio(mapping, index);
267 if (!folio) /* it was truncated */
5431bf97 268 continue;
c7e8812c 269 if (IS_ERR(folio)) {/* or EIO */
9746077a 270 ufs_error(inode->i_sb, __func__,
5431bf97
ED
271 "read of page %llu failed\n",
272 (unsigned long long)index);
6ef4d6bf 273 continue;
5431bf97 274 }
6ef4d6bf 275 } else
c7e8812c 276 folio = locked_folio;
6ef4d6bf 277
c7e8812c 278 head = folio_buffers(folio);
6ef4d6bf 279 bh = head;
5431bf97 280 pos = i & mask;
efee2b81
ED
281 for (j = 0; j < pos; ++j)
282 bh = bh->b_this_page;
5431bf97 283
5431bf97
ED
284 if (unlikely(index == last_index))
285 lblock = end & mask;
286 else
287 lblock = blks_per_page;
288
6ef4d6bf 289 do {
5431bf97
ED
290 if (j >= lblock)
291 break;
292 pos = (i - beg) + j;
293
294 if (!buffer_mapped(bh))
295 map_bh(bh, inode->i_sb, oldb + pos);
6799b698
ZY
296 if (bh_read(bh, 0) < 0) {
297 ufs_error(inode->i_sb, __func__,
298 "read of block failed\n");
299 break;
6ef4d6bf
ED
300 }
301
5431bf97 302 UFSD(" change from %llu to %llu, pos %u\n",
9df13039
AM
303 (unsigned long long)(pos + oldb),
304 (unsigned long long)(pos + newb), pos);
5431bf97
ED
305
306 bh->b_blocknr = newb + pos;
e64855c6 307 clean_bdev_bh_alias(bh);
5431bf97
ED
308 mark_buffer_dirty(bh);
309 ++j;
6ef4d6bf
ED
310 bh = bh->b_this_page;
311 } while (bh != head);
312
10e5dce0 313 if (likely(cur_index != index))
c7e8812c 314 ufs_put_locked_folio(folio);
6ef4d6bf 315 }
abf5d15f 316 UFSD("EXIT\n");
6ef4d6bf
ED
317}
318
d63b7090
ED
319static void ufs_clear_frags(struct inode *inode, sector_t beg, unsigned int n,
320 int sync)
321{
322 struct buffer_head *bh;
323 sector_t end = beg + n;
324
325 for (; beg < end; ++beg) {
326 bh = sb_getblk(inode->i_sb, beg);
327 lock_buffer(bh);
328 memset(bh->b_data, 0, inode->i_sb->s_blocksize);
329 set_buffer_uptodate(bh);
330 mark_buffer_dirty(bh);
331 unlock_buffer(bh);
332 if (IS_SYNC(inode) || sync)
333 sync_dirty_buffer(bh);
334 brelse(bh);
335 }
336}
337
54fb996a
ED
338u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
339 u64 goal, unsigned count, int *err,
340 struct page *locked_page)
1da177e4
LT
341{
342 struct super_block * sb;
343 struct ufs_sb_private_info * uspi;
344 struct ufs_super_block_first * usb1;
54fb996a
ED
345 unsigned cgno, oldcount, newcount;
346 u64 tmp, request, result;
1da177e4 347
54fb996a
ED
348 UFSD("ENTER, ino %lu, fragment %llu, goal %llu, count %u\n",
349 inode->i_ino, (unsigned long long)fragment,
350 (unsigned long long)goal, count);
1da177e4
LT
351
352 sb = inode->i_sb;
353 uspi = UFS_SB(sb)->s_uspi;
7b4ee73e 354 usb1 = ubh_get_usb_first(uspi);
1da177e4
LT
355 *err = -ENOSPC;
356
cdd9eefd 357 mutex_lock(&UFS_SB(sb)->s_lock);
54fb996a
ED
358 tmp = ufs_data_ptr_to_cpu(sb, p);
359
1da177e4 360 if (count + ufs_fragnum(fragment) > uspi->s_fpb) {
54fb996a
ED
361 ufs_warning(sb, "ufs_new_fragments", "internal warning"
362 " fragment %llu, count %u",
363 (unsigned long long)fragment, count);
1da177e4
LT
364 count = uspi->s_fpb - ufs_fragnum(fragment);
365 }
366 oldcount = ufs_fragnum (fragment);
367 newcount = oldcount + count;
368
369 /*
370 * Somebody else has just allocated our fragments
371 */
372 if (oldcount) {
373 if (!tmp) {
54fb996a
ED
374 ufs_error(sb, "ufs_new_fragments", "internal error, "
375 "fragment %llu, tmp %llu\n",
376 (unsigned long long)fragment,
377 (unsigned long long)tmp);
cdd9eefd 378 mutex_unlock(&UFS_SB(sb)->s_lock);
54fb996a 379 return INVBLOCK;
1da177e4
LT
380 }
381 if (fragment < UFS_I(inode)->i_lastfrag) {
abf5d15f 382 UFSD("EXIT (ALREADY ALLOCATED)\n");
cdd9eefd 383 mutex_unlock(&UFS_SB(sb)->s_lock);
1da177e4
LT
384 return 0;
385 }
386 }
387 else {
388 if (tmp) {
abf5d15f 389 UFSD("EXIT (ALREADY ALLOCATED)\n");
cdd9eefd 390 mutex_unlock(&UFS_SB(sb)->s_lock);
1da177e4
LT
391 return 0;
392 }
393 }
394
395 /*
396 * There is not enough space for user on the device
397 */
c596961d 398 if (unlikely(ufs_freefrags(uspi) <= uspi->s_root_blocks)) {
b451cec4
AV
399 if (!capable(CAP_SYS_RESOURCE)) {
400 mutex_unlock(&UFS_SB(sb)->s_lock);
401 UFSD("EXIT (FAILED)\n");
402 return 0;
403 }
1da177e4
LT
404 }
405
406 if (goal >= uspi->s_size)
407 goal = 0;
408 if (goal == 0)
409 cgno = ufs_inotocg (inode->i_ino);
410 else
54fb996a 411 cgno = ufs_dtog(uspi, goal);
1da177e4
LT
412
413 /*
414 * allocate new fragment
415 */
416 if (oldcount == 0) {
417 result = ufs_alloc_fragments (inode, cgno, goal, count, err);
418 if (result) {
bd2843fe
AV
419 ufs_clear_frags(inode, result + oldcount,
420 newcount - oldcount, locked_page != NULL);
09bf4f5b 421 *err = 0;
724bb09f 422 write_seqlock(&UFS_I(inode)->meta_lock);
54fb996a 423 ufs_cpu_to_data_ptr(sb, p, result);
54fb996a 424 UFS_I(inode)->i_lastfrag =
1d582723 425 max(UFS_I(inode)->i_lastfrag, fragment + count);
09bf4f5b 426 write_sequnlock(&UFS_I(inode)->meta_lock);
1da177e4 427 }
cdd9eefd 428 mutex_unlock(&UFS_SB(sb)->s_lock);
54fb996a 429 UFSD("EXIT, result %llu\n", (unsigned long long)result);
1da177e4
LT
430 return result;
431 }
432
433 /*
434 * resize block
435 */
45641c82 436 result = ufs_add_fragments(inode, tmp, oldcount, newcount);
1da177e4
LT
437 if (result) {
438 *err = 0;
09bf4f5b 439 read_seqlock_excl(&UFS_I(inode)->meta_lock);
1d582723
DC
440 UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag,
441 fragment + count);
09bf4f5b 442 read_sequnlock_excl(&UFS_I(inode)->meta_lock);
d63b7090
ED
443 ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
444 locked_page != NULL);
cdd9eefd 445 mutex_unlock(&UFS_SB(sb)->s_lock);
54fb996a 446 UFSD("EXIT, result %llu\n", (unsigned long long)result);
1da177e4
LT
447 return result;
448 }
449
450 /*
451 * allocate new block and move data
452 */
77e9ce32 453 if (fs32_to_cpu(sb, usb1->fs_optim) == UFS_OPTSPACE) {
1da177e4 454 request = newcount;
77e9ce32
AV
455 if (uspi->cs_total.cs_nffree < uspi->s_space_to_time)
456 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
457 } else {
1da177e4 458 request = uspi->s_fpb;
77e9ce32
AV
459 if (uspi->cs_total.cs_nffree > uspi->s_time_to_space)
460 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTSPACE);
1da177e4
LT
461 }
462 result = ufs_alloc_fragments (inode, cgno, goal, request, err);
463 if (result) {
efee2b81
ED
464 ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
465 locked_page != NULL);
289dec5b 466 mutex_unlock(&UFS_SB(sb)->s_lock);
4b25a37e
ED
467 ufs_change_blocknr(inode, fragment - oldcount, oldcount,
468 uspi->s_sbbase + tmp,
469 uspi->s_sbbase + result, locked_page);
09bf4f5b 470 *err = 0;
724bb09f 471 write_seqlock(&UFS_I(inode)->meta_lock);
54fb996a 472 ufs_cpu_to_data_ptr(sb, p, result);
1d582723
DC
473 UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag,
474 fragment + count);
09bf4f5b 475 write_sequnlock(&UFS_I(inode)->meta_lock);
1da177e4
LT
476 if (newcount < request)
477 ufs_free_fragments (inode, result + newcount, request - newcount);
478 ufs_free_fragments (inode, tmp, oldcount);
54fb996a 479 UFSD("EXIT, result %llu\n", (unsigned long long)result);
1da177e4
LT
480 return result;
481 }
482
cdd9eefd 483 mutex_unlock(&UFS_SB(sb)->s_lock);
abf5d15f 484 UFSD("EXIT (FAILED)\n");
1da177e4
LT
485 return 0;
486}
487
eb315d2a
AV
488static bool try_add_frags(struct inode *inode, unsigned frags)
489{
490 unsigned size = frags * i_blocksize(inode);
491 spin_lock(&inode->i_lock);
492 __inode_add_bytes(inode, size);
493 if (unlikely((u32)inode->i_blocks != inode->i_blocks)) {
494 __inode_sub_bytes(inode, size);
495 spin_unlock(&inode->i_lock);
496 return false;
497 }
498 spin_unlock(&inode->i_lock);
499 return true;
500}
501
54fb996a 502static u64 ufs_add_fragments(struct inode *inode, u64 fragment,
45641c82 503 unsigned oldcount, unsigned newcount)
1da177e4
LT
504{
505 struct super_block * sb;
506 struct ufs_sb_private_info * uspi;
1da177e4
LT
507 struct ufs_cg_private_info * ucpi;
508 struct ufs_cylinder_group * ucg;
509 unsigned cgno, fragno, fragoff, count, fragsize, i;
510
54fb996a
ED
511 UFSD("ENTER, fragment %llu, oldcount %u, newcount %u\n",
512 (unsigned long long)fragment, oldcount, newcount);
1da177e4
LT
513
514 sb = inode->i_sb;
515 uspi = UFS_SB(sb)->s_uspi;
1da177e4
LT
516 count = newcount - oldcount;
517
54fb996a 518 cgno = ufs_dtog(uspi, fragment);
1da177e4
LT
519 if (fs32_to_cpu(sb, UFS_SB(sb)->fs_cs(cgno).cs_nffree) < count)
520 return 0;
521 if ((ufs_fragnum (fragment) + newcount) > uspi->s_fpb)
522 return 0;
523 ucpi = ufs_load_cylinder (sb, cgno);
524 if (!ucpi)
525 return 0;
9695ef16 526 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
1da177e4
LT
527 if (!ufs_cg_chkmagic(sb, ucg)) {
528 ufs_panic (sb, "ufs_add_fragments",
529 "internal error, bad magic number on cg %u", cgno);
530 return 0;
531 }
532
54fb996a 533 fragno = ufs_dtogd(uspi, fragment);
1da177e4
LT
534 fragoff = ufs_fragnum (fragno);
535 for (i = oldcount; i < newcount; i++)
9695ef16 536 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
1da177e4 537 return 0;
eb315d2a
AV
538
539 if (!try_add_frags(inode, count))
540 return 0;
1da177e4
LT
541 /*
542 * Block can be extended
543 */
a3fda0ff 544 ucg->cg_time = ufs_get_seconds(sb);
1da177e4 545 for (i = newcount; i < (uspi->s_fpb - fragoff); i++)
9695ef16 546 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
1da177e4
LT
547 break;
548 fragsize = i - oldcount;
549 if (!fs32_to_cpu(sb, ucg->cg_frsum[fragsize]))
550 ufs_panic (sb, "ufs_add_fragments",
551 "internal error or corrupted bitmap on cg %u", cgno);
552 fs32_sub(sb, &ucg->cg_frsum[fragsize], 1);
553 if (fragsize != count)
554 fs32_add(sb, &ucg->cg_frsum[fragsize - count], 1);
555 for (i = oldcount; i < newcount; i++)
9695ef16 556 ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i);
1da177e4
LT
557
558 fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
559 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
ee3ffd6c 560 uspi->cs_total.cs_nffree -= count;
1da177e4 561
9695ef16
ED
562 ubh_mark_buffer_dirty (USPI_UBH(uspi));
563 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
1751e8a6 564 if (sb->s_flags & SB_SYNCHRONOUS)
9cb569d6 565 ubh_sync_block(UCPI_UBH(ucpi));
9e9ad5f4 566 ufs_mark_sb_dirty(sb);
1da177e4 567
54fb996a 568 UFSD("EXIT, fragment %llu\n", (unsigned long long)fragment);
1da177e4
LT
569
570 return fragment;
571}
572
573#define UFS_TEST_FREE_SPACE_CG \
574 ucg = (struct ufs_cylinder_group *) UFS_SB(sb)->s_ucg[cgno]->b_data; \
575 if (fs32_to_cpu(sb, ucg->cg_cs.cs_nbfree)) \
576 goto cg_found; \
577 for (k = count; k < uspi->s_fpb; k++) \
578 if (fs32_to_cpu(sb, ucg->cg_frsum[k])) \
579 goto cg_found;
580
54fb996a
ED
581static u64 ufs_alloc_fragments(struct inode *inode, unsigned cgno,
582 u64 goal, unsigned count, int *err)
1da177e4
LT
583{
584 struct super_block * sb;
585 struct ufs_sb_private_info * uspi;
1da177e4
LT
586 struct ufs_cg_private_info * ucpi;
587 struct ufs_cylinder_group * ucg;
54fb996a
ED
588 unsigned oldcg, i, j, k, allocsize;
589 u64 result;
1da177e4 590
54fb996a
ED
591 UFSD("ENTER, ino %lu, cgno %u, goal %llu, count %u\n",
592 inode->i_ino, cgno, (unsigned long long)goal, count);
1da177e4
LT
593
594 sb = inode->i_sb;
595 uspi = UFS_SB(sb)->s_uspi;
1da177e4
LT
596 oldcg = cgno;
597
598 /*
599 * 1. searching on preferred cylinder group
600 */
601 UFS_TEST_FREE_SPACE_CG
602
603 /*
604 * 2. quadratic rehash
605 */
606 for (j = 1; j < uspi->s_ncg; j *= 2) {
607 cgno += j;
608 if (cgno >= uspi->s_ncg)
609 cgno -= uspi->s_ncg;
610 UFS_TEST_FREE_SPACE_CG
611 }
612
613 /*
614 * 3. brute force search
615 * We start at i = 2 ( 0 is checked at 1.step, 1 at 2.step )
616 */
617 cgno = (oldcg + 1) % uspi->s_ncg;
618 for (j = 2; j < uspi->s_ncg; j++) {
619 cgno++;
620 if (cgno >= uspi->s_ncg)
621 cgno = 0;
622 UFS_TEST_FREE_SPACE_CG
623 }
624
abf5d15f 625 UFSD("EXIT (FAILED)\n");
1da177e4
LT
626 return 0;
627
628cg_found:
629 ucpi = ufs_load_cylinder (sb, cgno);
630 if (!ucpi)
631 return 0;
9695ef16 632 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
1da177e4
LT
633 if (!ufs_cg_chkmagic(sb, ucg))
634 ufs_panic (sb, "ufs_alloc_fragments",
635 "internal error, bad magic number on cg %u", cgno);
a3fda0ff 636 ucg->cg_time = ufs_get_seconds(sb);
1da177e4
LT
637
638 if (count == uspi->s_fpb) {
639 result = ufs_alloccg_block (inode, ucpi, goal, err);
54fb996a 640 if (result == INVBLOCK)
1da177e4
LT
641 return 0;
642 goto succed;
643 }
644
645 for (allocsize = count; allocsize < uspi->s_fpb; allocsize++)
646 if (fs32_to_cpu(sb, ucg->cg_frsum[allocsize]) != 0)
647 break;
648
649 if (allocsize == uspi->s_fpb) {
650 result = ufs_alloccg_block (inode, ucpi, goal, err);
54fb996a 651 if (result == INVBLOCK)
1da177e4 652 return 0;
54fb996a 653 goal = ufs_dtogd(uspi, result);
1da177e4 654 for (i = count; i < uspi->s_fpb; i++)
9695ef16 655 ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i);
1da177e4 656 i = uspi->s_fpb - count;
1da177e4 657
eb315d2a 658 inode_sub_bytes(inode, i << uspi->s_fshift);
1da177e4 659 fs32_add(sb, &ucg->cg_cs.cs_nffree, i);
ee3ffd6c 660 uspi->cs_total.cs_nffree += i;
1da177e4
LT
661 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, i);
662 fs32_add(sb, &ucg->cg_frsum[i], 1);
663 goto succed;
664 }
665
666 result = ufs_bitmap_search (sb, ucpi, goal, allocsize);
54fb996a 667 if (result == INVBLOCK)
1da177e4 668 return 0;
eb315d2a
AV
669 if (!try_add_frags(inode, count))
670 return 0;
1da177e4 671 for (i = 0; i < count; i++)
9695ef16 672 ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, result + i);
1da177e4
LT
673
674 fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
ee3ffd6c 675 uspi->cs_total.cs_nffree -= count;
1da177e4
LT
676 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
677 fs32_sub(sb, &ucg->cg_frsum[allocsize], 1);
678
679 if (count != allocsize)
680 fs32_add(sb, &ucg->cg_frsum[allocsize - count], 1);
681
682succed:
9695ef16
ED
683 ubh_mark_buffer_dirty (USPI_UBH(uspi));
684 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
1751e8a6 685 if (sb->s_flags & SB_SYNCHRONOUS)
9cb569d6 686 ubh_sync_block(UCPI_UBH(ucpi));
9e9ad5f4 687 ufs_mark_sb_dirty(sb);
1da177e4
LT
688
689 result += cgno * uspi->s_fpg;
54fb996a 690 UFSD("EXIT3, result %llu\n", (unsigned long long)result);
1da177e4
LT
691 return result;
692}
693
54fb996a
ED
694static u64 ufs_alloccg_block(struct inode *inode,
695 struct ufs_cg_private_info *ucpi,
696 u64 goal, int *err)
1da177e4
LT
697{
698 struct super_block * sb;
699 struct ufs_sb_private_info * uspi;
1da177e4 700 struct ufs_cylinder_group * ucg;
54fb996a 701 u64 result, blkno;
1da177e4 702
54fb996a 703 UFSD("ENTER, goal %llu\n", (unsigned long long)goal);
1da177e4
LT
704
705 sb = inode->i_sb;
706 uspi = UFS_SB(sb)->s_uspi;
9695ef16 707 ucg = ubh_get_ucg(UCPI_UBH(ucpi));
1da177e4
LT
708
709 if (goal == 0) {
710 goal = ucpi->c_rotor;
711 goto norot;
712 }
713 goal = ufs_blknum (goal);
54fb996a 714 goal = ufs_dtogd(uspi, goal);
1da177e4
LT
715
716 /*
717 * If the requested block is available, use it.
718 */
9695ef16 719 if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, ufs_fragstoblks(goal))) {
1da177e4
LT
720 result = goal;
721 goto gotit;
722 }
723
724norot:
725 result = ufs_bitmap_search (sb, ucpi, goal, uspi->s_fpb);
54fb996a
ED
726 if (result == INVBLOCK)
727 return INVBLOCK;
1da177e4
LT
728 ucpi->c_rotor = result;
729gotit:
eb315d2a
AV
730 if (!try_add_frags(inode, uspi->s_fpb))
731 return 0;
1da177e4 732 blkno = ufs_fragstoblks(result);
9695ef16 733 ubh_clrblock (UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
1da177e4
LT
734 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
735 ufs_clusteracct (sb, ucpi, blkno, -1);
1da177e4
LT
736
737 fs32_sub(sb, &ucg->cg_cs.cs_nbfree, 1);
ee3ffd6c 738 uspi->cs_total.cs_nbfree--;
1da177e4 739 fs32_sub(sb, &UFS_SB(sb)->fs_cs(ucpi->c_cgx).cs_nbfree, 1);
54fb996a
ED
740
741 if (uspi->fs_magic != UFS2_MAGIC) {
742 unsigned cylno = ufs_cbtocylno((unsigned)result);
743
744 fs16_sub(sb, &ubh_cg_blks(ucpi, cylno,
745 ufs_cbtorpos((unsigned)result)), 1);
746 fs32_sub(sb, &ubh_cg_blktot(ucpi, cylno), 1);
747 }
1da177e4 748
54fb996a 749 UFSD("EXIT, result %llu\n", (unsigned long long)result);
1da177e4
LT
750
751 return result;
752}
753
3e41f597
ED
754static unsigned ubh_scanc(struct ufs_sb_private_info *uspi,
755 struct ufs_buffer_head *ubh,
756 unsigned begin, unsigned size,
757 unsigned char *table, unsigned char mask)
1da177e4 758{
3e41f597
ED
759 unsigned rest, offset;
760 unsigned char *cp;
1da177e4 761
1da177e4 762
3e41f597
ED
763 offset = begin & ~uspi->s_fmask;
764 begin >>= uspi->s_fshift;
765 for (;;) {
766 if ((offset + size) < uspi->s_fsize)
767 rest = size;
768 else
769 rest = uspi->s_fsize - offset;
770 size -= rest;
771 cp = ubh->bh[begin]->b_data + offset;
772 while ((table[*cp++] & mask) == 0 && --rest)
773 ;
774 if (rest || !size)
775 break;
776 begin++;
777 offset = 0;
778 }
779 return (size + rest);
780}
781
782/*
783 * Find a block of the specified size in the specified cylinder group.
784 * @sp: pointer to super block
785 * @ucpi: pointer to cylinder group info
786 * @goal: near which block we want find new one
787 * @count: specified size
788 */
54fb996a
ED
789static u64 ufs_bitmap_search(struct super_block *sb,
790 struct ufs_cg_private_info *ucpi,
791 u64 goal, unsigned count)
3e41f597
ED
792{
793 /*
794 * Bit patterns for identifying fragments in the block map
795 * used as ((map & mask_arr) == want_arr)
796 */
797 static const int mask_arr[9] = {
798 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff, 0x1ff, 0x3ff
799 };
800 static const int want_arr[9] = {
801 0x0, 0x2, 0x6, 0xe, 0x1e, 0x3e, 0x7e, 0xfe, 0x1fe
802 };
803 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
54fb996a 804 unsigned start, length, loc;
3e41f597 805 unsigned pos, want, blockmap, mask, end;
54fb996a 806 u64 result;
3e41f597 807
54fb996a
ED
808 UFSD("ENTER, cg %u, goal %llu, count %u\n", ucpi->c_cgx,
809 (unsigned long long)goal, count);
3e41f597 810
1da177e4 811 if (goal)
54fb996a 812 start = ufs_dtogd(uspi, goal) >> 3;
1da177e4
LT
813 else
814 start = ucpi->c_frotor >> 3;
815
816 length = ((uspi->s_fpg + 7) >> 3) - start;
3e41f597 817 loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff + start, length,
1da177e4
LT
818 (uspi->s_fpb == 8) ? ufs_fragtable_8fpb : ufs_fragtable_other,
819 1 << (count - 1 + (uspi->s_fpb & 7)));
3e41f597 820 if (loc == 0) {
1da177e4 821 length = start + 1;
3e41f597
ED
822 loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff, length,
823 (uspi->s_fpb == 8) ? ufs_fragtable_8fpb :
824 ufs_fragtable_other,
825 1 << (count - 1 + (uspi->s_fpb & 7)));
826 if (loc == 0) {
827 ufs_error(sb, "ufs_bitmap_search",
828 "bitmap corrupted on cg %u, start %u,"
829 " length %u, count %u, freeoff %u\n",
830 ucpi->c_cgx, start, length, count,
831 ucpi->c_freeoff);
54fb996a 832 return INVBLOCK;
1da177e4
LT
833 }
834 start = 0;
835 }
3e41f597 836 result = (start + length - loc) << 3;
1da177e4
LT
837 ucpi->c_frotor = result;
838
839 /*
840 * found the byte in the map
841 */
3e41f597
ED
842
843 for (end = result + 8; result < end; result += uspi->s_fpb) {
844 blockmap = ubh_blkmap(UCPI_UBH(ucpi), ucpi->c_freeoff, result);
845 blockmap <<= 1;
846 mask = mask_arr[count];
847 want = want_arr[count];
848 for (pos = 0; pos <= uspi->s_fpb - count; pos++) {
849 if ((blockmap & mask) == want) {
54fb996a
ED
850 UFSD("EXIT, result %llu\n",
851 (unsigned long long)result);
3e41f597
ED
852 return result + pos;
853 }
854 mask <<= 1;
855 want <<= 1;
856 }
857 }
858
859 ufs_error(sb, "ufs_bitmap_search", "block not in map on cg %u\n",
860 ucpi->c_cgx);
abf5d15f 861 UFSD("EXIT (FAILED)\n");
54fb996a 862 return INVBLOCK;
1da177e4
LT
863}
864
865static void ufs_clusteracct(struct super_block * sb,
866 struct ufs_cg_private_info * ucpi, unsigned blkno, int cnt)
867{
868 struct ufs_sb_private_info * uspi;
869 int i, start, end, forw, back;
870
871 uspi = UFS_SB(sb)->s_uspi;
872 if (uspi->s_contigsumsize <= 0)
873 return;
874
875 if (cnt > 0)
9695ef16 876 ubh_setbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
1da177e4 877 else
9695ef16 878 ubh_clrbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
1da177e4
LT
879
880 /*
881 * Find the size of the cluster going forward.
882 */
883 start = blkno + 1;
884 end = start + uspi->s_contigsumsize;
885 if ( end >= ucpi->c_nclusterblks)
886 end = ucpi->c_nclusterblks;
9695ef16 887 i = ubh_find_next_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, end, start);
1da177e4
LT
888 if (i > end)
889 i = end;
890 forw = i - start;
891
892 /*
893 * Find the size of the cluster going backward.
894 */
895 start = blkno - 1;
896 end = start - uspi->s_contigsumsize;
897 if (end < 0 )
898 end = -1;
9695ef16 899 i = ubh_find_last_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, start, end);
1da177e4
LT
900 if ( i < end)
901 i = end;
902 back = start - i;
903
904 /*
905 * Account for old cluster and the possibly new forward and
906 * back clusters.
907 */
908 i = back + forw + 1;
909 if (i > uspi->s_contigsumsize)
910 i = uspi->s_contigsumsize;
9695ef16 911 fs32_add(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (i << 2)), cnt);
1da177e4 912 if (back > 0)
9695ef16 913 fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (back << 2)), cnt);
1da177e4 914 if (forw > 0)
9695ef16 915 fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (forw << 2)), cnt);
1da177e4
LT
916}
917
918
919static unsigned char ufs_fragtable_8fpb[] = {
920 0x00, 0x01, 0x01, 0x02, 0x01, 0x01, 0x02, 0x04, 0x01, 0x01, 0x01, 0x03, 0x02, 0x03, 0x04, 0x08,
921 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x02, 0x03, 0x03, 0x02, 0x04, 0x05, 0x08, 0x10,
922 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
923 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x04, 0x05, 0x05, 0x06, 0x08, 0x09, 0x10, 0x20,
924 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
925 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
926 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
927 0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x08, 0x09, 0x09, 0x0A, 0x10, 0x11, 0x20, 0x40,
928 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
929 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
930 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
931 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x05, 0x05, 0x05, 0x07, 0x09, 0x09, 0x11, 0x21,
932 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
933 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x02, 0x03, 0x03, 0x02, 0x06, 0x07, 0x0A, 0x12,
934 0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x05, 0x05, 0x05, 0x07, 0x06, 0x07, 0x04, 0x0C,
935 0x08, 0x09, 0x09, 0x0A, 0x09, 0x09, 0x0A, 0x0C, 0x10, 0x11, 0x11, 0x12, 0x20, 0x21, 0x40, 0x80,
936};
937
938static unsigned char ufs_fragtable_other[] = {
939 0x00, 0x16, 0x16, 0x2A, 0x16, 0x16, 0x26, 0x4E, 0x16, 0x16, 0x16, 0x3E, 0x2A, 0x3E, 0x4E, 0x8A,
940 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
941 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
942 0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
943 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
944 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
945 0x26, 0x36, 0x36, 0x2E, 0x36, 0x36, 0x26, 0x6E, 0x36, 0x36, 0x36, 0x3E, 0x2E, 0x3E, 0x6E, 0xAE,
946 0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
947 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
948 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
949 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
950 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
951 0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
952 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
953 0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
954 0x8A, 0x9E, 0x9E, 0xAA, 0x9E, 0x9E, 0xAE, 0xCE, 0x9E, 0x9E, 0x9E, 0xBE, 0xAA, 0xBE, 0xCE, 0x8A,
955};
This page took 1.42416 seconds and 4 git commands to generate.