]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
1da177e4 LT |
2 | /* |
3 | * linux/fs/hfsplus/btree.c | |
4 | * | |
5 | * Copyright (C) 2001 | |
6 | * Brad Boyer ([email protected]) | |
7 | * (C) 2003 Ardis Technologies <[email protected]> | |
8 | * | |
9 | * Handle opening/closing btree | |
10 | */ | |
11 | ||
12 | #include <linux/slab.h> | |
13 | #include <linux/pagemap.h> | |
e1b5c1d3 | 14 | #include <linux/log2.h> |
1da177e4 LT |
15 | |
16 | #include "hfsplus_fs.h" | |
17 | #include "hfsplus_raw.h" | |
18 | ||
b3b5b0f0 VD |
19 | /* |
20 | * Initial source code of clump size calculation is gotten | |
21 | * from http://opensource.apple.com/tarballs/diskdev_cmds/ | |
22 | */ | |
23 | #define CLUMP_ENTRIES 15 | |
24 | ||
25 | static short clumptbl[CLUMP_ENTRIES * 3] = { | |
26 | /* | |
27 | * Volume Attributes Catalog Extents | |
28 | * Size Clump (MB) Clump (MB) Clump (MB) | |
29 | */ | |
30 | /* 1GB */ 4, 4, 4, | |
31 | /* 2GB */ 6, 6, 4, | |
32 | /* 4GB */ 8, 8, 4, | |
33 | /* 8GB */ 11, 11, 5, | |
34 | /* | |
35 | * For volumes 16GB and larger, we want to make sure that a full OS | |
36 | * install won't require fragmentation of the Catalog or Attributes | |
37 | * B-trees. We do this by making the clump sizes sufficiently large, | |
38 | * and by leaving a gap after the B-trees for them to grow into. | |
39 | * | |
40 | * For SnowLeopard 10A298, a FullNetInstall with all packages selected | |
41 | * results in: | |
42 | * Catalog B-tree Header | |
43 | * nodeSize: 8192 | |
44 | * totalNodes: 31616 | |
45 | * freeNodes: 1978 | |
46 | * (used = 231.55 MB) | |
47 | * Attributes B-tree Header | |
48 | * nodeSize: 8192 | |
49 | * totalNodes: 63232 | |
50 | * freeNodes: 958 | |
51 | * (used = 486.52 MB) | |
52 | * | |
53 | * We also want Time Machine backup volumes to have a sufficiently | |
54 | * large clump size to reduce fragmentation. | |
55 | * | |
56 | * The series of numbers for Catalog and Attribute form a geometric | |
57 | * series. For Catalog (16GB to 512GB), each term is 8**(1/5) times | |
58 | * the previous term. For Attributes (16GB to 512GB), each term is | |
59 | * 4**(1/5) times the previous term. For 1TB to 16TB, each term is | |
60 | * 2**(1/5) times the previous term. | |
61 | */ | |
62 | /* 16GB */ 64, 32, 5, | |
63 | /* 32GB */ 84, 49, 6, | |
64 | /* 64GB */ 111, 74, 7, | |
65 | /* 128GB */ 147, 111, 8, | |
66 | /* 256GB */ 194, 169, 9, | |
67 | /* 512GB */ 256, 256, 11, | |
68 | /* 1TB */ 294, 294, 14, | |
69 | /* 2TB */ 338, 338, 16, | |
70 | /* 4TB */ 388, 388, 20, | |
71 | /* 8TB */ 446, 446, 25, | |
72 | /* 16TB */ 512, 512, 32 | |
73 | }; | |
74 | ||
75 | u32 hfsplus_calc_btree_clump_size(u32 block_size, u32 node_size, | |
76 | u64 sectors, int file_id) | |
77 | { | |
78 | u32 mod = max(node_size, block_size); | |
79 | u32 clump_size; | |
80 | int column; | |
81 | int i; | |
82 | ||
83 | /* Figure out which column of the above table to use for this file. */ | |
84 | switch (file_id) { | |
85 | case HFSPLUS_ATTR_CNID: | |
86 | column = 0; | |
87 | break; | |
88 | case HFSPLUS_CAT_CNID: | |
89 | column = 1; | |
90 | break; | |
91 | default: | |
92 | column = 2; | |
93 | break; | |
94 | } | |
95 | ||
96 | /* | |
97 | * The default clump size is 0.8% of the volume size. And | |
98 | * it must also be a multiple of the node and block size. | |
99 | */ | |
100 | if (sectors < 0x200000) { | |
101 | clump_size = sectors << 2; /* 0.8 % */ | |
102 | if (clump_size < (8 * node_size)) | |
103 | clump_size = 8 * node_size; | |
104 | } else { | |
105 | /* turn exponent into table index... */ | |
106 | for (i = 0, sectors = sectors >> 22; | |
107 | sectors && (i < CLUMP_ENTRIES - 1); | |
108 | ++i, sectors = sectors >> 1) { | |
109 | /* empty body */ | |
110 | } | |
111 | ||
112 | clump_size = clumptbl[column + (i) * 3] * 1024 * 1024; | |
113 | } | |
114 | ||
115 | /* | |
116 | * Round the clump size to a multiple of node and block size. | |
117 | * NOTE: This rounds down. | |
118 | */ | |
119 | clump_size /= mod; | |
120 | clump_size *= mod; | |
121 | ||
122 | /* | |
123 | * Rounding down could have rounded down to 0 if the block size was | |
124 | * greater than the clump size. If so, just use one block or node. | |
125 | */ | |
126 | if (clump_size == 0) | |
127 | clump_size = mod; | |
128 | ||
129 | return clump_size; | |
130 | } | |
1da177e4 LT |
131 | |
132 | /* Get a reference to a B*Tree and do some initial checks */ | |
133 | struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id) | |
134 | { | |
135 | struct hfs_btree *tree; | |
136 | struct hfs_btree_header_rec *head; | |
137 | struct address_space *mapping; | |
63525391 | 138 | struct inode *inode; |
1da177e4 LT |
139 | struct page *page; |
140 | unsigned int size; | |
141 | ||
f8314dc6 | 142 | tree = kzalloc(sizeof(*tree), GFP_KERNEL); |
1da177e4 LT |
143 | if (!tree) |
144 | return NULL; | |
1da177e4 | 145 | |
467c3d9c | 146 | mutex_init(&tree->tree_lock); |
1da177e4 | 147 | spin_lock_init(&tree->hash_lock); |
1da177e4 LT |
148 | tree->sb = sb; |
149 | tree->cnid = id; | |
63525391 DH |
150 | inode = hfsplus_iget(sb, id); |
151 | if (IS_ERR(inode)) | |
1da177e4 | 152 | goto free_tree; |
63525391 | 153 | tree->inode = inode; |
1da177e4 | 154 | |
ee527162 | 155 | if (!HFSPLUS_I(tree->inode)->first_blocks) { |
d6142673 | 156 | pr_err("invalid btree extent records (0 size)\n"); |
ee527162 JM |
157 | goto free_inode; |
158 | } | |
159 | ||
1da177e4 | 160 | mapping = tree->inode->i_mapping; |
090d2b18 | 161 | page = read_mapping_page(mapping, 0, NULL); |
1da177e4 | 162 | if (IS_ERR(page)) |
ee527162 | 163 | goto free_inode; |
1da177e4 LT |
164 | |
165 | /* Load the header */ | |
2753cc28 AS |
166 | head = (struct hfs_btree_header_rec *)(kmap(page) + |
167 | sizeof(struct hfs_bnode_desc)); | |
1da177e4 LT |
168 | tree->root = be32_to_cpu(head->root); |
169 | tree->leaf_count = be32_to_cpu(head->leaf_count); | |
170 | tree->leaf_head = be32_to_cpu(head->leaf_head); | |
171 | tree->leaf_tail = be32_to_cpu(head->leaf_tail); | |
172 | tree->node_count = be32_to_cpu(head->node_count); | |
173 | tree->free_nodes = be32_to_cpu(head->free_nodes); | |
174 | tree->attributes = be32_to_cpu(head->attributes); | |
175 | tree->node_size = be16_to_cpu(head->node_size); | |
176 | tree->max_key_len = be16_to_cpu(head->max_key_len); | |
177 | tree->depth = be16_to_cpu(head->depth); | |
178 | ||
9250f925 ES |
179 | /* Verify the tree and set the correct compare function */ |
180 | switch (id) { | |
181 | case HFSPLUS_EXT_CNID: | |
182 | if (tree->max_key_len != HFSPLUS_EXT_KEYLEN - sizeof(u16)) { | |
d6142673 | 183 | pr_err("invalid extent max_key_len %d\n", |
9250f925 ES |
184 | tree->max_key_len); |
185 | goto fail_page; | |
186 | } | |
13571a69 | 187 | if (tree->attributes & HFS_TREE_VARIDXKEYS) { |
d6142673 | 188 | pr_err("invalid extent btree flag\n"); |
13571a69 CH |
189 | goto fail_page; |
190 | } | |
191 | ||
2179d372 | 192 | tree->keycmp = hfsplus_ext_cmp_key; |
9250f925 ES |
193 | break; |
194 | case HFSPLUS_CAT_CNID: | |
195 | if (tree->max_key_len != HFSPLUS_CAT_KEYLEN - sizeof(u16)) { | |
d6142673 | 196 | pr_err("invalid catalog max_key_len %d\n", |
9250f925 ES |
197 | tree->max_key_len); |
198 | goto fail_page; | |
199 | } | |
13571a69 | 200 | if (!(tree->attributes & HFS_TREE_VARIDXKEYS)) { |
d6142673 | 201 | pr_err("invalid catalog btree flag\n"); |
13571a69 CH |
202 | goto fail_page; |
203 | } | |
9250f925 | 204 | |
84adede3 | 205 | if (test_bit(HFSPLUS_SB_HFSX, &HFSPLUS_SB(sb)->flags) && |
2179d372 DE |
206 | (head->key_type == HFSPLUS_KEY_BINARY)) |
207 | tree->keycmp = hfsplus_cat_bin_cmp_key; | |
d45bce8f | 208 | else { |
2179d372 | 209 | tree->keycmp = hfsplus_cat_case_cmp_key; |
84adede3 | 210 | set_bit(HFSPLUS_SB_CASEFOLD, &HFSPLUS_SB(sb)->flags); |
d45bce8f | 211 | } |
9250f925 | 212 | break; |
324ef39a VD |
213 | case HFSPLUS_ATTR_CNID: |
214 | if (tree->max_key_len != HFSPLUS_ATTR_KEYLEN - sizeof(u16)) { | |
d6142673 | 215 | pr_err("invalid attributes max_key_len %d\n", |
324ef39a VD |
216 | tree->max_key_len); |
217 | goto fail_page; | |
218 | } | |
219 | tree->keycmp = hfsplus_attr_bin_cmp_key; | |
220 | break; | |
9250f925 | 221 | default: |
d6142673 | 222 | pr_err("unknown B*Tree requested\n"); |
2179d372 DE |
223 | goto fail_page; |
224 | } | |
225 | ||
13571a69 | 226 | if (!(tree->attributes & HFS_TREE_BIGKEYS)) { |
d6142673 | 227 | pr_err("invalid btree flag\n"); |
13571a69 CH |
228 | goto fail_page; |
229 | } | |
230 | ||
1da177e4 | 231 | size = tree->node_size; |
e1b5c1d3 | 232 | if (!is_power_of_2(size)) |
1da177e4 LT |
233 | goto fail_page; |
234 | if (!tree->node_count) | |
235 | goto fail_page; | |
9250f925 | 236 | |
1da177e4 LT |
237 | tree->node_size_shift = ffs(size) - 1; |
238 | ||
2753cc28 | 239 | tree->pages_per_bnode = |
09cbfeaf KS |
240 | (tree->node_size + PAGE_SIZE - 1) >> |
241 | PAGE_SHIFT; | |
1da177e4 LT |
242 | |
243 | kunmap(page); | |
09cbfeaf | 244 | put_page(page); |
1da177e4 LT |
245 | return tree; |
246 | ||
247 | fail_page: | |
09cbfeaf | 248 | put_page(page); |
ee527162 | 249 | free_inode: |
9250f925 | 250 | tree->inode->i_mapping->a_ops = &hfsplus_aops; |
1da177e4 | 251 | iput(tree->inode); |
ee527162 | 252 | free_tree: |
1da177e4 LT |
253 | kfree(tree); |
254 | return NULL; | |
255 | } | |
256 | ||
257 | /* Release resources used by a btree */ | |
258 | void hfs_btree_close(struct hfs_btree *tree) | |
259 | { | |
260 | struct hfs_bnode *node; | |
261 | int i; | |
262 | ||
263 | if (!tree) | |
264 | return; | |
265 | ||
266 | for (i = 0; i < NODE_HASH_SIZE; i++) { | |
267 | while ((node = tree->node_hash[i])) { | |
268 | tree->node_hash[i] = node->next_hash; | |
269 | if (atomic_read(&node->refcnt)) | |
d6142673 | 270 | pr_crit("node %d:%d " |
2753cc28 AS |
271 | "still has %d user(s)!\n", |
272 | node->tree->cnid, node->this, | |
273 | atomic_read(&node->refcnt)); | |
1da177e4 LT |
274 | hfs_bnode_free(node); |
275 | tree->node_hash_cnt--; | |
276 | } | |
277 | } | |
278 | iput(tree->inode); | |
279 | kfree(tree); | |
280 | } | |
281 | ||
81cc7fad | 282 | int hfs_btree_write(struct hfs_btree *tree) |
1da177e4 LT |
283 | { |
284 | struct hfs_btree_header_rec *head; | |
285 | struct hfs_bnode *node; | |
286 | struct page *page; | |
287 | ||
288 | node = hfs_bnode_find(tree, 0); | |
289 | if (IS_ERR(node)) | |
290 | /* panic? */ | |
81cc7fad | 291 | return -EIO; |
1da177e4 LT |
292 | /* Load the header */ |
293 | page = node->page[0]; | |
2753cc28 AS |
294 | head = (struct hfs_btree_header_rec *)(kmap(page) + |
295 | sizeof(struct hfs_bnode_desc)); | |
1da177e4 LT |
296 | |
297 | head->root = cpu_to_be32(tree->root); | |
298 | head->leaf_count = cpu_to_be32(tree->leaf_count); | |
299 | head->leaf_head = cpu_to_be32(tree->leaf_head); | |
300 | head->leaf_tail = cpu_to_be32(tree->leaf_tail); | |
301 | head->node_count = cpu_to_be32(tree->node_count); | |
302 | head->free_nodes = cpu_to_be32(tree->free_nodes); | |
303 | head->attributes = cpu_to_be32(tree->attributes); | |
304 | head->depth = cpu_to_be16(tree->depth); | |
305 | ||
306 | kunmap(page); | |
307 | set_page_dirty(page); | |
308 | hfs_bnode_put(node); | |
81cc7fad | 309 | return 0; |
1da177e4 LT |
310 | } |
311 | ||
312 | static struct hfs_bnode *hfs_bmap_new_bmap(struct hfs_bnode *prev, u32 idx) | |
313 | { | |
314 | struct hfs_btree *tree = prev->tree; | |
315 | struct hfs_bnode *node; | |
316 | struct hfs_bnode_desc desc; | |
317 | __be32 cnid; | |
318 | ||
319 | node = hfs_bnode_create(tree, idx); | |
320 | if (IS_ERR(node)) | |
321 | return node; | |
322 | ||
323 | tree->free_nodes--; | |
324 | prev->next = idx; | |
325 | cnid = cpu_to_be32(idx); | |
326 | hfs_bnode_write(prev, &cnid, offsetof(struct hfs_bnode_desc, next), 4); | |
327 | ||
328 | node->type = HFS_NODE_MAP; | |
329 | node->num_recs = 1; | |
330 | hfs_bnode_clear(node, 0, tree->node_size); | |
331 | desc.next = 0; | |
332 | desc.prev = 0; | |
333 | desc.type = HFS_NODE_MAP; | |
334 | desc.height = 0; | |
335 | desc.num_recs = cpu_to_be16(1); | |
336 | desc.reserved = 0; | |
337 | hfs_bnode_write(node, &desc, 0, sizeof(desc)); | |
338 | hfs_bnode_write_u16(node, 14, 0x8000); | |
339 | hfs_bnode_write_u16(node, tree->node_size - 2, 14); | |
340 | hfs_bnode_write_u16(node, tree->node_size - 4, tree->node_size - 6); | |
341 | ||
342 | return node; | |
343 | } | |
344 | ||
d92915c3 EF |
345 | /* Make sure @tree has enough space for the @rsvd_nodes */ |
346 | int hfs_bmap_reserve(struct hfs_btree *tree, int rsvd_nodes) | |
1da177e4 | 347 | { |
d92915c3 EF |
348 | struct inode *inode = tree->inode; |
349 | struct hfsplus_inode_info *hip = HFSPLUS_I(inode); | |
350 | u32 count; | |
351 | int res; | |
1da177e4 | 352 | |
d92915c3 EF |
353 | if (rsvd_nodes <= 0) |
354 | return 0; | |
1da177e4 | 355 | |
d92915c3 | 356 | while (tree->free_nodes < rsvd_nodes) { |
2cd282a1 | 357 | res = hfsplus_file_extend(inode, hfs_bnode_need_zeroout(tree)); |
1da177e4 | 358 | if (res) |
d92915c3 | 359 | return res; |
6af502de CH |
360 | hip->phys_size = inode->i_size = |
361 | (loff_t)hip->alloc_blocks << | |
dd73a01a | 362 | HFSPLUS_SB(tree->sb)->alloc_blksz_shift; |
6af502de CH |
363 | hip->fs_blocks = |
364 | hip->alloc_blocks << HFSPLUS_SB(tree->sb)->fs_shift; | |
1da177e4 LT |
365 | inode_set_bytes(inode, inode->i_size); |
366 | count = inode->i_size >> tree->node_size_shift; | |
d92915c3 | 367 | tree->free_nodes += count - tree->node_count; |
1da177e4 LT |
368 | tree->node_count = count; |
369 | } | |
d92915c3 EF |
370 | return 0; |
371 | } | |
372 | ||
373 | struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree) | |
374 | { | |
375 | struct hfs_bnode *node, *next_node; | |
376 | struct page **pagep; | |
377 | u32 nidx, idx; | |
378 | unsigned off; | |
379 | u16 off16; | |
380 | u16 len; | |
381 | u8 *data, byte, m; | |
382 | int i, res; | |
383 | ||
384 | res = hfs_bmap_reserve(tree, 1); | |
385 | if (res) | |
386 | return ERR_PTR(res); | |
1da177e4 LT |
387 | |
388 | nidx = 0; | |
389 | node = hfs_bnode_find(tree, nidx); | |
390 | if (IS_ERR(node)) | |
391 | return node; | |
487798df AM |
392 | len = hfs_brec_lenoff(node, 2, &off16); |
393 | off = off16; | |
1da177e4 LT |
394 | |
395 | off += node->page_offset; | |
09cbfeaf | 396 | pagep = node->page + (off >> PAGE_SHIFT); |
1da177e4 | 397 | data = kmap(*pagep); |
09cbfeaf | 398 | off &= ~PAGE_MASK; |
1da177e4 LT |
399 | idx = 0; |
400 | ||
401 | for (;;) { | |
402 | while (len) { | |
403 | byte = data[off]; | |
404 | if (byte != 0xff) { | |
405 | for (m = 0x80, i = 0; i < 8; m >>= 1, i++) { | |
406 | if (!(byte & m)) { | |
407 | idx += i; | |
408 | data[off] |= m; | |
409 | set_page_dirty(*pagep); | |
410 | kunmap(*pagep); | |
411 | tree->free_nodes--; | |
412 | mark_inode_dirty(tree->inode); | |
413 | hfs_bnode_put(node); | |
2753cc28 AS |
414 | return hfs_bnode_create(tree, |
415 | idx); | |
1da177e4 LT |
416 | } |
417 | } | |
418 | } | |
09cbfeaf | 419 | if (++off >= PAGE_SIZE) { |
1da177e4 LT |
420 | kunmap(*pagep); |
421 | data = kmap(*++pagep); | |
422 | off = 0; | |
423 | } | |
424 | idx += 8; | |
425 | len--; | |
426 | } | |
427 | kunmap(*pagep); | |
428 | nidx = node->next; | |
429 | if (!nidx) { | |
c2b3e1f7 | 430 | hfs_dbg(BNODE_MOD, "create new bmap node\n"); |
1da177e4 LT |
431 | next_node = hfs_bmap_new_bmap(node, idx); |
432 | } else | |
433 | next_node = hfs_bnode_find(tree, nidx); | |
434 | hfs_bnode_put(node); | |
435 | if (IS_ERR(next_node)) | |
436 | return next_node; | |
437 | node = next_node; | |
438 | ||
487798df AM |
439 | len = hfs_brec_lenoff(node, 0, &off16); |
440 | off = off16; | |
1da177e4 | 441 | off += node->page_offset; |
09cbfeaf | 442 | pagep = node->page + (off >> PAGE_SHIFT); |
1da177e4 | 443 | data = kmap(*pagep); |
09cbfeaf | 444 | off &= ~PAGE_MASK; |
1da177e4 LT |
445 | } |
446 | } | |
447 | ||
448 | void hfs_bmap_free(struct hfs_bnode *node) | |
449 | { | |
450 | struct hfs_btree *tree; | |
451 | struct page *page; | |
452 | u16 off, len; | |
453 | u32 nidx; | |
454 | u8 *data, byte, m; | |
455 | ||
c2b3e1f7 | 456 | hfs_dbg(BNODE_MOD, "btree_free_node: %u\n", node->this); |
0bf3ba53 | 457 | BUG_ON(!node->this); |
1da177e4 LT |
458 | tree = node->tree; |
459 | nidx = node->this; | |
460 | node = hfs_bnode_find(tree, 0); | |
461 | if (IS_ERR(node)) | |
462 | return; | |
463 | len = hfs_brec_lenoff(node, 2, &off); | |
464 | while (nidx >= len * 8) { | |
465 | u32 i; | |
466 | ||
467 | nidx -= len * 8; | |
468 | i = node->next; | |
469 | hfs_bnode_put(node); | |
470 | if (!i) { | |
471 | /* panic */; | |
d6142673 | 472 | pr_crit("unable to free bnode %u. " |
2753cc28 AS |
473 | "bmap not found!\n", |
474 | node->this); | |
1da177e4 LT |
475 | return; |
476 | } | |
477 | node = hfs_bnode_find(tree, i); | |
478 | if (IS_ERR(node)) | |
479 | return; | |
480 | if (node->type != HFS_NODE_MAP) { | |
481 | /* panic */; | |
d6142673 | 482 | pr_crit("invalid bmap found! " |
2753cc28 AS |
483 | "(%u,%d)\n", |
484 | node->this, node->type); | |
1da177e4 LT |
485 | hfs_bnode_put(node); |
486 | return; | |
487 | } | |
488 | len = hfs_brec_lenoff(node, 0, &off); | |
489 | } | |
490 | off += node->page_offset + nidx / 8; | |
09cbfeaf | 491 | page = node->page[off >> PAGE_SHIFT]; |
1da177e4 | 492 | data = kmap(page); |
09cbfeaf | 493 | off &= ~PAGE_MASK; |
1da177e4 LT |
494 | m = 1 << (~nidx & 7); |
495 | byte = data[off]; | |
496 | if (!(byte & m)) { | |
d6142673 | 497 | pr_crit("trying to free free bnode " |
2753cc28 AS |
498 | "%u(%d)\n", |
499 | node->this, node->type); | |
1da177e4 LT |
500 | kunmap(page); |
501 | hfs_bnode_put(node); | |
502 | return; | |
503 | } | |
504 | data[off] = byte & ~m; | |
505 | set_page_dirty(page); | |
506 | kunmap(page); | |
507 | hfs_bnode_put(node); | |
508 | tree->free_nodes++; | |
509 | mark_inode_dirty(tree->inode); | |
510 | } |