]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/fs/hpfs/alloc.c | |
3 | * | |
4 | * Mikulas Patocka ([email protected]), 1998-1999 | |
5 | * | |
6 | * HPFS bitmap operations | |
7 | */ | |
8 | ||
9 | #include "hpfs_fn.h" | |
10 | ||
11 | static int hpfs_alloc_if_possible_nolock(struct super_block *s, secno sec); | |
12 | ||
13 | /* | |
14 | * Check if a sector is allocated in bitmap | |
15 | * This is really slow. Turned on only if chk==2 | |
16 | */ | |
17 | ||
18 | static int chk_if_allocated(struct super_block *s, secno sec, char *msg) | |
19 | { | |
20 | struct quad_buffer_head qbh; | |
21 | unsigned *bmp; | |
22 | if (!(bmp = hpfs_map_bitmap(s, sec >> 14, &qbh, "chk"))) goto fail; | |
23 | if ((bmp[(sec & 0x3fff) >> 5] >> (sec & 0x1f)) & 1) { | |
24 | hpfs_error(s, "sector '%s' - %08x not allocated in bitmap", msg, sec); | |
25 | goto fail1; | |
26 | } | |
27 | hpfs_brelse4(&qbh); | |
28 | if (sec >= hpfs_sb(s)->sb_dirband_start && sec < hpfs_sb(s)->sb_dirband_start + hpfs_sb(s)->sb_dirband_size) { | |
29 | unsigned ssec = (sec - hpfs_sb(s)->sb_dirband_start) / 4; | |
30 | if (!(bmp = hpfs_map_dnode_bitmap(s, &qbh))) goto fail; | |
31 | if ((bmp[ssec >> 5] >> (ssec & 0x1f)) & 1) { | |
32 | hpfs_error(s, "sector '%s' - %08x not allocated in directory bitmap", msg, sec); | |
33 | goto fail1; | |
34 | } | |
35 | hpfs_brelse4(&qbh); | |
36 | } | |
37 | return 0; | |
38 | fail1: | |
39 | hpfs_brelse4(&qbh); | |
40 | fail: | |
41 | return 1; | |
42 | } | |
43 | ||
44 | /* | |
45 | * Check if sector(s) have proper number and additionally check if they're | |
46 | * allocated in bitmap. | |
47 | */ | |
48 | ||
49 | int hpfs_chk_sectors(struct super_block *s, secno start, int len, char *msg) | |
50 | { | |
51 | if (start + len < start || start < 0x12 || | |
52 | start + len > hpfs_sb(s)->sb_fs_size) { | |
53 | hpfs_error(s, "sector(s) '%s' badly placed at %08x", msg, start); | |
54 | return 1; | |
55 | } | |
56 | if (hpfs_sb(s)->sb_chk>=2) { | |
57 | int i; | |
58 | for (i = 0; i < len; i++) | |
59 | if (chk_if_allocated(s, start + i, msg)) return 1; | |
60 | } | |
61 | return 0; | |
62 | } | |
63 | ||
64 | static secno alloc_in_bmp(struct super_block *s, secno near, unsigned n, unsigned forward) | |
65 | { | |
66 | struct quad_buffer_head qbh; | |
67 | unsigned *bmp; | |
68 | unsigned bs = near & ~0x3fff; | |
69 | unsigned nr = (near & 0x3fff) & ~(n - 1); | |
70 | /*unsigned mnr;*/ | |
71 | unsigned i, q; | |
72 | int a, b; | |
73 | secno ret = 0; | |
74 | if (n != 1 && n != 4) { | |
75 | hpfs_error(s, "Bad allocation size: %d", n); | |
76 | return 0; | |
77 | } | |
78 | lock_super(s); | |
79 | if (bs != ~0x3fff) { | |
80 | if (!(bmp = hpfs_map_bitmap(s, near >> 14, &qbh, "aib"))) goto uls; | |
81 | } else { | |
82 | if (!(bmp = hpfs_map_dnode_bitmap(s, &qbh))) goto uls; | |
83 | } | |
84 | if (!tstbits(bmp, nr, n + forward)) { | |
85 | ret = bs + nr; | |
86 | goto rt; | |
87 | } | |
88 | /*if (!tstbits(bmp, nr + n, n + forward)) { | |
89 | ret = bs + nr + n; | |
90 | goto rt; | |
91 | }*/ | |
92 | q = nr + n; b = 0; | |
93 | while ((a = tstbits(bmp, q, n + forward)) != 0) { | |
94 | q += a; | |
95 | if (n != 1) q = ((q-1)&~(n-1))+n; | |
96 | if (!b) { | |
97 | if (q>>5 != nr>>5) { | |
98 | b = 1; | |
99 | q = nr & 0x1f; | |
100 | } | |
101 | } else if (q > nr) break; | |
102 | } | |
103 | if (!a) { | |
104 | ret = bs + q; | |
105 | goto rt; | |
106 | } | |
107 | nr >>= 5; | |
108 | /*for (i = nr + 1; i != nr; i++, i &= 0x1ff) {*/ | |
109 | i = nr; | |
110 | do { | |
111 | if (!bmp[i]) goto cont; | |
112 | if (n + forward >= 0x3f && bmp[i] != -1) goto cont; | |
113 | q = i<<5; | |
114 | if (i > 0) { | |
115 | unsigned k = bmp[i-1]; | |
116 | while (k & 0x80000000) { | |
117 | q--; k <<= 1; | |
118 | } | |
119 | } | |
120 | if (n != 1) q = ((q-1)&~(n-1))+n; | |
121 | while ((a = tstbits(bmp, q, n + forward)) != 0) { | |
122 | q += a; | |
123 | if (n != 1) q = ((q-1)&~(n-1))+n; | |
124 | if (q>>5 > i) break; | |
125 | } | |
126 | if (!a) { | |
127 | ret = bs + q; | |
128 | goto rt; | |
129 | } | |
130 | cont: | |
131 | i++, i &= 0x1ff; | |
132 | } while (i != nr); | |
133 | rt: | |
134 | if (ret) { | |
135 | if (hpfs_sb(s)->sb_chk && ((ret >> 14) != (bs >> 14) || (bmp[(ret & 0x3fff) >> 5] | ~(((1 << n) - 1) << (ret & 0x1f))) != 0xffffffff)) { | |
136 | hpfs_error(s, "Allocation doesn't work! Wanted %d, allocated at %08x", n, ret); | |
137 | ret = 0; | |
138 | goto b; | |
139 | } | |
140 | bmp[(ret & 0x3fff) >> 5] &= ~(((1 << n) - 1) << (ret & 0x1f)); | |
141 | hpfs_mark_4buffers_dirty(&qbh); | |
142 | } | |
143 | b: | |
144 | hpfs_brelse4(&qbh); | |
145 | uls: | |
146 | unlock_super(s); | |
147 | return ret; | |
148 | } | |
149 | ||
150 | /* | |
151 | * Allocation strategy: 1) search place near the sector specified | |
152 | * 2) search bitmap where free sectors last found | |
153 | * 3) search all bitmaps | |
154 | * 4) search all bitmaps ignoring number of pre-allocated | |
155 | * sectors | |
156 | */ | |
157 | ||
158 | secno hpfs_alloc_sector(struct super_block *s, secno near, unsigned n, int forward, int lock) | |
159 | { | |
160 | secno sec; | |
161 | int i; | |
162 | unsigned n_bmps; | |
163 | struct hpfs_sb_info *sbi = hpfs_sb(s); | |
164 | int f_p = 0; | |
165 | int near_bmp; | |
166 | if (forward < 0) { | |
167 | forward = -forward; | |
168 | f_p = 1; | |
169 | } | |
170 | if (lock) hpfs_lock_creation(s); | |
171 | n_bmps = (sbi->sb_fs_size + 0x4000 - 1) >> 14; | |
172 | if (near && near < sbi->sb_fs_size) { | |
173 | if ((sec = alloc_in_bmp(s, near, n, f_p ? forward : forward/4))) goto ret; | |
174 | near_bmp = near >> 14; | |
175 | } else near_bmp = n_bmps / 2; | |
176 | /* | |
177 | if (b != -1) { | |
178 | if ((sec = alloc_in_bmp(s, b<<14, n, f_p ? forward : forward/2))) { | |
179 | b &= 0x0fffffff; | |
180 | goto ret; | |
181 | } | |
182 | if (b > 0x10000000) if ((sec = alloc_in_bmp(s, (b&0xfffffff)<<14, n, f_p ? forward : 0))) goto ret; | |
183 | */ | |
184 | if (!f_p) if (forward > sbi->sb_max_fwd_alloc) forward = sbi->sb_max_fwd_alloc; | |
185 | less_fwd: | |
186 | for (i = 0; i < n_bmps; i++) { | |
187 | if (near_bmp+i < n_bmps && ((sec = alloc_in_bmp(s, (near_bmp+i) << 14, n, forward)))) { | |
188 | sbi->sb_c_bitmap = near_bmp+i; | |
189 | goto ret; | |
190 | } | |
191 | if (!forward) { | |
192 | if (near_bmp-i-1 >= 0 && ((sec = alloc_in_bmp(s, (near_bmp-i-1) << 14, n, forward)))) { | |
193 | sbi->sb_c_bitmap = near_bmp-i-1; | |
194 | goto ret; | |
195 | } | |
196 | } else { | |
197 | if (near_bmp+i >= n_bmps && ((sec = alloc_in_bmp(s, (near_bmp+i-n_bmps) << 14, n, forward)))) { | |
198 | sbi->sb_c_bitmap = near_bmp+i-n_bmps; | |
199 | goto ret; | |
200 | } | |
201 | } | |
202 | if (i == 1 && sbi->sb_c_bitmap != -1 && ((sec = alloc_in_bmp(s, (sbi->sb_c_bitmap) << 14, n, forward)))) { | |
203 | goto ret; | |
204 | } | |
205 | } | |
206 | if (!f_p) { | |
207 | if (forward) { | |
208 | sbi->sb_max_fwd_alloc = forward * 3 / 4; | |
209 | forward /= 2; | |
210 | goto less_fwd; | |
211 | } | |
212 | } | |
213 | sec = 0; | |
214 | ret: | |
215 | if (sec && f_p) { | |
216 | for (i = 0; i < forward; i++) { | |
217 | if (!hpfs_alloc_if_possible_nolock(s, sec + i + 1)) { | |
218 | hpfs_error(s, "Prealloc doesn't work! Wanted %d, allocated at %08x, can't allocate %d", forward, sec, i); | |
219 | sec = 0; | |
220 | break; | |
221 | } | |
222 | } | |
223 | } | |
224 | if (lock) hpfs_unlock_creation(s); | |
225 | return sec; | |
226 | } | |
227 | ||
228 | static secno alloc_in_dirband(struct super_block *s, secno near, int lock) | |
229 | { | |
230 | unsigned nr = near; | |
231 | secno sec; | |
232 | struct hpfs_sb_info *sbi = hpfs_sb(s); | |
233 | if (nr < sbi->sb_dirband_start) | |
234 | nr = sbi->sb_dirband_start; | |
235 | if (nr >= sbi->sb_dirband_start + sbi->sb_dirband_size) | |
236 | nr = sbi->sb_dirband_start + sbi->sb_dirband_size - 4; | |
237 | nr -= sbi->sb_dirband_start; | |
238 | nr >>= 2; | |
239 | if (lock) hpfs_lock_creation(s); | |
240 | sec = alloc_in_bmp(s, (~0x3fff) | nr, 1, 0); | |
241 | if (lock) hpfs_unlock_creation(s); | |
242 | if (!sec) return 0; | |
243 | return ((sec & 0x3fff) << 2) + sbi->sb_dirband_start; | |
244 | } | |
245 | ||
246 | /* Alloc sector if it's free */ | |
247 | ||
248 | static int hpfs_alloc_if_possible_nolock(struct super_block *s, secno sec) | |
249 | { | |
250 | struct quad_buffer_head qbh; | |
251 | unsigned *bmp; | |
252 | lock_super(s); | |
253 | if (!(bmp = hpfs_map_bitmap(s, sec >> 14, &qbh, "aip"))) goto end; | |
254 | if (bmp[(sec & 0x3fff) >> 5] & (1 << (sec & 0x1f))) { | |
255 | bmp[(sec & 0x3fff) >> 5] &= ~(1 << (sec & 0x1f)); | |
256 | hpfs_mark_4buffers_dirty(&qbh); | |
257 | hpfs_brelse4(&qbh); | |
258 | unlock_super(s); | |
259 | return 1; | |
260 | } | |
261 | hpfs_brelse4(&qbh); | |
262 | end: | |
263 | unlock_super(s); | |
264 | return 0; | |
265 | } | |
266 | ||
267 | int hpfs_alloc_if_possible(struct super_block *s, secno sec) | |
268 | { | |
269 | int r; | |
270 | hpfs_lock_creation(s); | |
271 | r = hpfs_alloc_if_possible_nolock(s, sec); | |
272 | hpfs_unlock_creation(s); | |
273 | return r; | |
274 | } | |
275 | ||
276 | /* Free sectors in bitmaps */ | |
277 | ||
278 | void hpfs_free_sectors(struct super_block *s, secno sec, unsigned n) | |
279 | { | |
280 | struct quad_buffer_head qbh; | |
281 | unsigned *bmp; | |
282 | struct hpfs_sb_info *sbi = hpfs_sb(s); | |
283 | /*printk("2 - ");*/ | |
284 | if (!n) return; | |
285 | if (sec < 0x12) { | |
286 | hpfs_error(s, "Trying to free reserved sector %08x", sec); | |
287 | return; | |
288 | } | |
289 | lock_super(s); | |
290 | sbi->sb_max_fwd_alloc += n > 0xffff ? 0xffff : n; | |
291 | if (sbi->sb_max_fwd_alloc > 0xffffff) sbi->sb_max_fwd_alloc = 0xffffff; | |
292 | new_map: | |
293 | if (!(bmp = hpfs_map_bitmap(s, sec >> 14, &qbh, "free"))) { | |
294 | unlock_super(s); | |
295 | return; | |
296 | } | |
297 | new_tst: | |
298 | if ((bmp[(sec & 0x3fff) >> 5] >> (sec & 0x1f) & 1)) { | |
299 | hpfs_error(s, "sector %08x not allocated", sec); | |
300 | hpfs_brelse4(&qbh); | |
301 | unlock_super(s); | |
302 | return; | |
303 | } | |
304 | bmp[(sec & 0x3fff) >> 5] |= 1 << (sec & 0x1f); | |
305 | if (!--n) { | |
306 | hpfs_mark_4buffers_dirty(&qbh); | |
307 | hpfs_brelse4(&qbh); | |
308 | unlock_super(s); | |
309 | return; | |
310 | } | |
311 | if (!(++sec & 0x3fff)) { | |
312 | hpfs_mark_4buffers_dirty(&qbh); | |
313 | hpfs_brelse4(&qbh); | |
314 | goto new_map; | |
315 | } | |
316 | goto new_tst; | |
317 | } | |
318 | ||
319 | /* | |
320 | * Check if there are at least n free dnodes on the filesystem. | |
321 | * Called before adding to dnode. If we run out of space while | |
322 | * splitting dnodes, it would corrupt dnode tree. | |
323 | */ | |
324 | ||
325 | int hpfs_check_free_dnodes(struct super_block *s, int n) | |
326 | { | |
327 | int n_bmps = (hpfs_sb(s)->sb_fs_size + 0x4000 - 1) >> 14; | |
328 | int b = hpfs_sb(s)->sb_c_bitmap & 0x0fffffff; | |
329 | int i, j; | |
330 | unsigned *bmp; | |
331 | struct quad_buffer_head qbh; | |
332 | if ((bmp = hpfs_map_dnode_bitmap(s, &qbh))) { | |
333 | for (j = 0; j < 512; j++) { | |
334 | unsigned k; | |
335 | if (!bmp[j]) continue; | |
336 | for (k = bmp[j]; k; k >>= 1) if (k & 1) if (!--n) { | |
337 | hpfs_brelse4(&qbh); | |
338 | return 0; | |
339 | } | |
340 | } | |
341 | } | |
342 | hpfs_brelse4(&qbh); | |
343 | i = 0; | |
344 | if (hpfs_sb(s)->sb_c_bitmap != -1) { | |
345 | bmp = hpfs_map_bitmap(s, b, &qbh, "chkdn1"); | |
346 | goto chk_bmp; | |
347 | } | |
348 | chk_next: | |
349 | if (i == b) i++; | |
350 | if (i >= n_bmps) return 1; | |
351 | bmp = hpfs_map_bitmap(s, i, &qbh, "chkdn2"); | |
352 | chk_bmp: | |
353 | if (bmp) { | |
354 | for (j = 0; j < 512; j++) { | |
355 | unsigned k; | |
356 | if (!bmp[j]) continue; | |
357 | for (k = 0xf; k; k <<= 4) | |
358 | if ((bmp[j] & k) == k) { | |
359 | if (!--n) { | |
360 | hpfs_brelse4(&qbh); | |
361 | return 0; | |
362 | } | |
363 | } | |
364 | } | |
365 | hpfs_brelse4(&qbh); | |
366 | } | |
367 | i++; | |
368 | goto chk_next; | |
369 | } | |
370 | ||
371 | void hpfs_free_dnode(struct super_block *s, dnode_secno dno) | |
372 | { | |
373 | if (hpfs_sb(s)->sb_chk) if (dno & 3) { | |
374 | hpfs_error(s, "hpfs_free_dnode: dnode %08x not aligned", dno); | |
375 | return; | |
376 | } | |
377 | if (dno < hpfs_sb(s)->sb_dirband_start || | |
378 | dno >= hpfs_sb(s)->sb_dirband_start + hpfs_sb(s)->sb_dirband_size) { | |
379 | hpfs_free_sectors(s, dno, 4); | |
380 | } else { | |
381 | struct quad_buffer_head qbh; | |
382 | unsigned *bmp; | |
383 | unsigned ssec = (dno - hpfs_sb(s)->sb_dirband_start) / 4; | |
384 | lock_super(s); | |
385 | if (!(bmp = hpfs_map_dnode_bitmap(s, &qbh))) { | |
386 | unlock_super(s); | |
387 | return; | |
388 | } | |
389 | bmp[ssec >> 5] |= 1 << (ssec & 0x1f); | |
390 | hpfs_mark_4buffers_dirty(&qbh); | |
391 | hpfs_brelse4(&qbh); | |
392 | unlock_super(s); | |
393 | } | |
394 | } | |
395 | ||
396 | struct dnode *hpfs_alloc_dnode(struct super_block *s, secno near, | |
397 | dnode_secno *dno, struct quad_buffer_head *qbh, | |
398 | int lock) | |
399 | { | |
400 | struct dnode *d; | |
401 | if (hpfs_count_one_bitmap(s, hpfs_sb(s)->sb_dmap) > FREE_DNODES_ADD) { | |
402 | if (!(*dno = alloc_in_dirband(s, near, lock))) | |
403 | if (!(*dno = hpfs_alloc_sector(s, near, 4, 0, lock))) return NULL; | |
404 | } else { | |
405 | if (!(*dno = hpfs_alloc_sector(s, near, 4, 0, lock))) | |
406 | if (!(*dno = alloc_in_dirband(s, near, lock))) return NULL; | |
407 | } | |
408 | if (!(d = hpfs_get_4sectors(s, *dno, qbh))) { | |
409 | hpfs_free_dnode(s, *dno); | |
410 | return NULL; | |
411 | } | |
412 | memset(d, 0, 2048); | |
413 | d->magic = DNODE_MAGIC; | |
414 | d->first_free = 52; | |
415 | d->dirent[0] = 32; | |
416 | d->dirent[2] = 8; | |
417 | d->dirent[30] = 1; | |
418 | d->dirent[31] = 255; | |
419 | d->self = *dno; | |
420 | return d; | |
421 | } | |
422 | ||
423 | struct fnode *hpfs_alloc_fnode(struct super_block *s, secno near, fnode_secno *fno, | |
424 | struct buffer_head **bh) | |
425 | { | |
426 | struct fnode *f; | |
427 | if (!(*fno = hpfs_alloc_sector(s, near, 1, FNODE_ALLOC_FWD, 1))) return NULL; | |
428 | if (!(f = hpfs_get_sector(s, *fno, bh))) { | |
429 | hpfs_free_sectors(s, *fno, 1); | |
430 | return NULL; | |
431 | } | |
432 | memset(f, 0, 512); | |
433 | f->magic = FNODE_MAGIC; | |
434 | f->ea_offs = 0xc4; | |
435 | f->btree.n_free_nodes = 8; | |
436 | f->btree.first_free = 8; | |
437 | return f; | |
438 | } | |
439 | ||
440 | struct anode *hpfs_alloc_anode(struct super_block *s, secno near, anode_secno *ano, | |
441 | struct buffer_head **bh) | |
442 | { | |
443 | struct anode *a; | |
444 | if (!(*ano = hpfs_alloc_sector(s, near, 1, ANODE_ALLOC_FWD, 1))) return NULL; | |
445 | if (!(a = hpfs_get_sector(s, *ano, bh))) { | |
446 | hpfs_free_sectors(s, *ano, 1); | |
447 | return NULL; | |
448 | } | |
449 | memset(a, 0, 512); | |
450 | a->magic = ANODE_MAGIC; | |
451 | a->self = *ano; | |
452 | a->btree.n_free_nodes = 40; | |
453 | a->btree.n_used_nodes = 0; | |
454 | a->btree.first_free = 8; | |
455 | return a; | |
456 | } |