]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/fs/hfs/bnode.c | |
3 | * | |
4 | * Copyright (C) 2001 | |
5 | * Brad Boyer ([email protected]) | |
6 | * (C) 2003 Ardis Technologies <[email protected]> | |
7 | * | |
8 | * Handle basic btree node operations | |
9 | */ | |
10 | ||
11 | #include <linux/pagemap.h> | |
12 | #include <linux/swap.h> | |
13 | ||
14 | #include "btree.h" | |
15 | ||
1da177e4 LT |
16 | void hfs_bnode_read(struct hfs_bnode *node, void *buf, |
17 | int off, int len) | |
18 | { | |
19 | struct page *page; | |
20 | ||
21 | off += node->page_offset; | |
22 | page = node->page[0]; | |
23 | ||
24 | memcpy(buf, kmap(page) + off, len); | |
25 | kunmap(page); | |
26 | } | |
27 | ||
28 | u16 hfs_bnode_read_u16(struct hfs_bnode *node, int off) | |
29 | { | |
30 | __be16 data; | |
31 | // optimize later... | |
32 | hfs_bnode_read(node, &data, off, 2); | |
33 | return be16_to_cpu(data); | |
34 | } | |
35 | ||
36 | u8 hfs_bnode_read_u8(struct hfs_bnode *node, int off) | |
37 | { | |
38 | u8 data; | |
39 | // optimize later... | |
40 | hfs_bnode_read(node, &data, off, 1); | |
41 | return data; | |
42 | } | |
43 | ||
44 | void hfs_bnode_read_key(struct hfs_bnode *node, void *key, int off) | |
45 | { | |
46 | struct hfs_btree *tree; | |
47 | int key_len; | |
48 | ||
49 | tree = node->tree; | |
50 | if (node->type == HFS_NODE_LEAF || | |
51 | tree->attributes & HFS_TREE_VARIDXKEYS) | |
52 | key_len = hfs_bnode_read_u8(node, off) + 1; | |
53 | else | |
54 | key_len = tree->max_key_len + 1; | |
55 | ||
56 | hfs_bnode_read(node, key, off, key_len); | |
57 | } | |
58 | ||
59 | void hfs_bnode_write(struct hfs_bnode *node, void *buf, int off, int len) | |
60 | { | |
61 | struct page *page; | |
62 | ||
63 | off += node->page_offset; | |
64 | page = node->page[0]; | |
65 | ||
66 | memcpy(kmap(page) + off, buf, len); | |
67 | kunmap(page); | |
68 | set_page_dirty(page); | |
69 | } | |
70 | ||
71 | void hfs_bnode_write_u16(struct hfs_bnode *node, int off, u16 data) | |
72 | { | |
73 | __be16 v = cpu_to_be16(data); | |
74 | // optimize later... | |
75 | hfs_bnode_write(node, &v, off, 2); | |
76 | } | |
77 | ||
78 | void hfs_bnode_write_u8(struct hfs_bnode *node, int off, u8 data) | |
79 | { | |
80 | // optimize later... | |
81 | hfs_bnode_write(node, &data, off, 1); | |
82 | } | |
83 | ||
84 | void hfs_bnode_clear(struct hfs_bnode *node, int off, int len) | |
85 | { | |
86 | struct page *page; | |
87 | ||
88 | off += node->page_offset; | |
89 | page = node->page[0]; | |
90 | ||
91 | memset(kmap(page) + off, 0, len); | |
92 | kunmap(page); | |
93 | set_page_dirty(page); | |
94 | } | |
95 | ||
96 | void hfs_bnode_copy(struct hfs_bnode *dst_node, int dst, | |
97 | struct hfs_bnode *src_node, int src, int len) | |
98 | { | |
99 | struct hfs_btree *tree; | |
100 | struct page *src_page, *dst_page; | |
101 | ||
102 | dprint(DBG_BNODE_MOD, "copybytes: %u,%u,%u\n", dst, src, len); | |
103 | if (!len) | |
104 | return; | |
105 | tree = src_node->tree; | |
106 | src += src_node->page_offset; | |
107 | dst += dst_node->page_offset; | |
108 | src_page = src_node->page[0]; | |
109 | dst_page = dst_node->page[0]; | |
110 | ||
111 | memcpy(kmap(dst_page) + dst, kmap(src_page) + src, len); | |
112 | kunmap(src_page); | |
113 | kunmap(dst_page); | |
114 | set_page_dirty(dst_page); | |
115 | } | |
116 | ||
117 | void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len) | |
118 | { | |
119 | struct page *page; | |
120 | void *ptr; | |
121 | ||
122 | dprint(DBG_BNODE_MOD, "movebytes: %u,%u,%u\n", dst, src, len); | |
123 | if (!len) | |
124 | return; | |
125 | src += node->page_offset; | |
126 | dst += node->page_offset; | |
127 | page = node->page[0]; | |
128 | ptr = kmap(page); | |
129 | memmove(ptr + dst, ptr + src, len); | |
130 | kunmap(page); | |
131 | set_page_dirty(page); | |
132 | } | |
133 | ||
134 | void hfs_bnode_dump(struct hfs_bnode *node) | |
135 | { | |
136 | struct hfs_bnode_desc desc; | |
137 | __be32 cnid; | |
138 | int i, off, key_off; | |
139 | ||
140 | dprint(DBG_BNODE_MOD, "bnode: %d\n", node->this); | |
141 | hfs_bnode_read(node, &desc, 0, sizeof(desc)); | |
142 | dprint(DBG_BNODE_MOD, "%d, %d, %d, %d, %d\n", | |
143 | be32_to_cpu(desc.next), be32_to_cpu(desc.prev), | |
144 | desc.type, desc.height, be16_to_cpu(desc.num_recs)); | |
145 | ||
146 | off = node->tree->node_size - 2; | |
147 | for (i = be16_to_cpu(desc.num_recs); i >= 0; off -= 2, i--) { | |
148 | key_off = hfs_bnode_read_u16(node, off); | |
149 | dprint(DBG_BNODE_MOD, " %d", key_off); | |
150 | if (i && node->type == HFS_NODE_INDEX) { | |
151 | int tmp; | |
152 | ||
153 | if (node->tree->attributes & HFS_TREE_VARIDXKEYS) | |
154 | tmp = (hfs_bnode_read_u8(node, key_off) | 1) + 1; | |
155 | else | |
156 | tmp = node->tree->max_key_len + 1; | |
157 | dprint(DBG_BNODE_MOD, " (%d,%d", tmp, hfs_bnode_read_u8(node, key_off)); | |
158 | hfs_bnode_read(node, &cnid, key_off + tmp, 4); | |
159 | dprint(DBG_BNODE_MOD, ",%d)", be32_to_cpu(cnid)); | |
160 | } else if (i && node->type == HFS_NODE_LEAF) { | |
161 | int tmp; | |
162 | ||
163 | tmp = hfs_bnode_read_u8(node, key_off); | |
164 | dprint(DBG_BNODE_MOD, " (%d)", tmp); | |
165 | } | |
166 | } | |
167 | dprint(DBG_BNODE_MOD, "\n"); | |
168 | } | |
169 | ||
170 | void hfs_bnode_unlink(struct hfs_bnode *node) | |
171 | { | |
172 | struct hfs_btree *tree; | |
173 | struct hfs_bnode *tmp; | |
174 | __be32 cnid; | |
175 | ||
176 | tree = node->tree; | |
177 | if (node->prev) { | |
178 | tmp = hfs_bnode_find(tree, node->prev); | |
179 | if (IS_ERR(tmp)) | |
180 | return; | |
181 | tmp->next = node->next; | |
182 | cnid = cpu_to_be32(tmp->next); | |
183 | hfs_bnode_write(tmp, &cnid, offsetof(struct hfs_bnode_desc, next), 4); | |
184 | hfs_bnode_put(tmp); | |
185 | } else if (node->type == HFS_NODE_LEAF) | |
186 | tree->leaf_head = node->next; | |
187 | ||
188 | if (node->next) { | |
189 | tmp = hfs_bnode_find(tree, node->next); | |
190 | if (IS_ERR(tmp)) | |
191 | return; | |
192 | tmp->prev = node->prev; | |
193 | cnid = cpu_to_be32(tmp->prev); | |
194 | hfs_bnode_write(tmp, &cnid, offsetof(struct hfs_bnode_desc, prev), 4); | |
195 | hfs_bnode_put(tmp); | |
196 | } else if (node->type == HFS_NODE_LEAF) | |
197 | tree->leaf_tail = node->prev; | |
198 | ||
199 | // move down? | |
200 | if (!node->prev && !node->next) { | |
7cf3cc30 | 201 | printk(KERN_DEBUG "hfs_btree_del_level\n"); |
1da177e4 LT |
202 | } |
203 | if (!node->parent) { | |
204 | tree->root = 0; | |
205 | tree->depth = 0; | |
206 | } | |
207 | set_bit(HFS_BNODE_DELETED, &node->flags); | |
208 | } | |
209 | ||
210 | static inline int hfs_bnode_hash(u32 num) | |
211 | { | |
212 | num = (num >> 16) + num; | |
213 | num += num >> 8; | |
214 | return num & (NODE_HASH_SIZE - 1); | |
215 | } | |
216 | ||
217 | struct hfs_bnode *hfs_bnode_findhash(struct hfs_btree *tree, u32 cnid) | |
218 | { | |
219 | struct hfs_bnode *node; | |
220 | ||
221 | if (cnid >= tree->node_count) { | |
7cf3cc30 | 222 | printk(KERN_ERR "hfs: request for non-existent node %d in B*Tree\n", cnid); |
1da177e4 LT |
223 | return NULL; |
224 | } | |
225 | ||
226 | for (node = tree->node_hash[hfs_bnode_hash(cnid)]; | |
227 | node; node = node->next_hash) { | |
228 | if (node->this == cnid) { | |
229 | return node; | |
230 | } | |
231 | } | |
232 | return NULL; | |
233 | } | |
234 | ||
235 | static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid) | |
236 | { | |
237 | struct super_block *sb; | |
238 | struct hfs_bnode *node, *node2; | |
239 | struct address_space *mapping; | |
240 | struct page *page; | |
241 | int size, block, i, hash; | |
242 | loff_t off; | |
243 | ||
244 | if (cnid >= tree->node_count) { | |
7cf3cc30 | 245 | printk(KERN_ERR "hfs: request for non-existent node %d in B*Tree\n", cnid); |
1da177e4 LT |
246 | return NULL; |
247 | } | |
248 | ||
249 | sb = tree->inode->i_sb; | |
250 | size = sizeof(struct hfs_bnode) + tree->pages_per_bnode * | |
251 | sizeof(struct page *); | |
252 | node = kmalloc(size, GFP_KERNEL); | |
253 | if (!node) | |
254 | return NULL; | |
255 | memset(node, 0, size); | |
256 | node->tree = tree; | |
257 | node->this = cnid; | |
258 | set_bit(HFS_BNODE_NEW, &node->flags); | |
259 | atomic_set(&node->refcnt, 1); | |
260 | dprint(DBG_BNODE_REFS, "new_node(%d:%d): 1\n", | |
261 | node->tree->cnid, node->this); | |
262 | init_waitqueue_head(&node->lock_wq); | |
263 | spin_lock(&tree->hash_lock); | |
264 | node2 = hfs_bnode_findhash(tree, cnid); | |
265 | if (!node2) { | |
266 | hash = hfs_bnode_hash(cnid); | |
267 | node->next_hash = tree->node_hash[hash]; | |
268 | tree->node_hash[hash] = node; | |
269 | tree->node_hash_cnt++; | |
270 | } else { | |
271 | spin_unlock(&tree->hash_lock); | |
272 | kfree(node); | |
273 | wait_event(node2->lock_wq, !test_bit(HFS_BNODE_NEW, &node2->flags)); | |
274 | return node2; | |
275 | } | |
276 | spin_unlock(&tree->hash_lock); | |
277 | ||
278 | mapping = tree->inode->i_mapping; | |
279 | off = (loff_t)cnid * tree->node_size; | |
280 | block = off >> PAGE_CACHE_SHIFT; | |
281 | node->page_offset = off & ~PAGE_CACHE_MASK; | |
282 | for (i = 0; i < tree->pages_per_bnode; i++) { | |
090d2b18 | 283 | page = read_mapping_page(mapping, block++, NULL); |
1da177e4 LT |
284 | if (IS_ERR(page)) |
285 | goto fail; | |
286 | if (PageError(page)) { | |
287 | page_cache_release(page); | |
288 | goto fail; | |
289 | } | |
1da177e4 | 290 | page_cache_release(page); |
1da177e4 LT |
291 | node->page[i] = page; |
292 | } | |
293 | ||
294 | return node; | |
295 | fail: | |
296 | set_bit(HFS_BNODE_ERROR, &node->flags); | |
297 | return node; | |
298 | } | |
299 | ||
300 | void hfs_bnode_unhash(struct hfs_bnode *node) | |
301 | { | |
302 | struct hfs_bnode **p; | |
303 | ||
304 | dprint(DBG_BNODE_REFS, "remove_node(%d:%d): %d\n", | |
305 | node->tree->cnid, node->this, atomic_read(&node->refcnt)); | |
306 | for (p = &node->tree->node_hash[hfs_bnode_hash(node->this)]; | |
307 | *p && *p != node; p = &(*p)->next_hash) | |
308 | ; | |
4d4ef9ab | 309 | BUG_ON(!*p); |
1da177e4 LT |
310 | *p = node->next_hash; |
311 | node->tree->node_hash_cnt--; | |
312 | } | |
313 | ||
314 | /* Load a particular node out of a tree */ | |
315 | struct hfs_bnode *hfs_bnode_find(struct hfs_btree *tree, u32 num) | |
316 | { | |
317 | struct hfs_bnode *node; | |
318 | struct hfs_bnode_desc *desc; | |
319 | int i, rec_off, off, next_off; | |
320 | int entry_size, key_size; | |
321 | ||
322 | spin_lock(&tree->hash_lock); | |
323 | node = hfs_bnode_findhash(tree, num); | |
324 | if (node) { | |
325 | hfs_bnode_get(node); | |
326 | spin_unlock(&tree->hash_lock); | |
327 | wait_event(node->lock_wq, !test_bit(HFS_BNODE_NEW, &node->flags)); | |
328 | if (test_bit(HFS_BNODE_ERROR, &node->flags)) | |
329 | goto node_error; | |
330 | return node; | |
331 | } | |
332 | spin_unlock(&tree->hash_lock); | |
333 | node = __hfs_bnode_create(tree, num); | |
334 | if (!node) | |
335 | return ERR_PTR(-ENOMEM); | |
336 | if (test_bit(HFS_BNODE_ERROR, &node->flags)) | |
337 | goto node_error; | |
338 | if (!test_bit(HFS_BNODE_NEW, &node->flags)) | |
339 | return node; | |
340 | ||
341 | desc = (struct hfs_bnode_desc *)(kmap(node->page[0]) + node->page_offset); | |
342 | node->prev = be32_to_cpu(desc->prev); | |
343 | node->next = be32_to_cpu(desc->next); | |
344 | node->num_recs = be16_to_cpu(desc->num_recs); | |
345 | node->type = desc->type; | |
346 | node->height = desc->height; | |
347 | kunmap(node->page[0]); | |
348 | ||
349 | switch (node->type) { | |
350 | case HFS_NODE_HEADER: | |
351 | case HFS_NODE_MAP: | |
352 | if (node->height != 0) | |
353 | goto node_error; | |
354 | break; | |
355 | case HFS_NODE_LEAF: | |
356 | if (node->height != 1) | |
357 | goto node_error; | |
358 | break; | |
359 | case HFS_NODE_INDEX: | |
360 | if (node->height <= 1 || node->height > tree->depth) | |
361 | goto node_error; | |
362 | break; | |
363 | default: | |
364 | goto node_error; | |
365 | } | |
366 | ||
367 | rec_off = tree->node_size - 2; | |
368 | off = hfs_bnode_read_u16(node, rec_off); | |
369 | if (off != sizeof(struct hfs_bnode_desc)) | |
370 | goto node_error; | |
371 | for (i = 1; i <= node->num_recs; off = next_off, i++) { | |
372 | rec_off -= 2; | |
373 | next_off = hfs_bnode_read_u16(node, rec_off); | |
374 | if (next_off <= off || | |
375 | next_off > tree->node_size || | |
376 | next_off & 1) | |
377 | goto node_error; | |
378 | entry_size = next_off - off; | |
379 | if (node->type != HFS_NODE_INDEX && | |
380 | node->type != HFS_NODE_LEAF) | |
381 | continue; | |
382 | key_size = hfs_bnode_read_u8(node, off) + 1; | |
383 | if (key_size >= entry_size /*|| key_size & 1*/) | |
384 | goto node_error; | |
385 | } | |
386 | clear_bit(HFS_BNODE_NEW, &node->flags); | |
387 | wake_up(&node->lock_wq); | |
388 | return node; | |
389 | ||
390 | node_error: | |
391 | set_bit(HFS_BNODE_ERROR, &node->flags); | |
392 | clear_bit(HFS_BNODE_NEW, &node->flags); | |
393 | wake_up(&node->lock_wq); | |
394 | hfs_bnode_put(node); | |
395 | return ERR_PTR(-EIO); | |
396 | } | |
397 | ||
398 | void hfs_bnode_free(struct hfs_bnode *node) | |
399 | { | |
400 | //int i; | |
401 | ||
402 | //for (i = 0; i < node->tree->pages_per_bnode; i++) | |
403 | // if (node->page[i]) | |
404 | // page_cache_release(node->page[i]); | |
405 | kfree(node); | |
406 | } | |
407 | ||
408 | struct hfs_bnode *hfs_bnode_create(struct hfs_btree *tree, u32 num) | |
409 | { | |
410 | struct hfs_bnode *node; | |
411 | struct page **pagep; | |
412 | int i; | |
413 | ||
414 | spin_lock(&tree->hash_lock); | |
415 | node = hfs_bnode_findhash(tree, num); | |
416 | spin_unlock(&tree->hash_lock); | |
4d4ef9ab | 417 | BUG_ON(node); |
1da177e4 LT |
418 | node = __hfs_bnode_create(tree, num); |
419 | if (!node) | |
420 | return ERR_PTR(-ENOMEM); | |
421 | if (test_bit(HFS_BNODE_ERROR, &node->flags)) { | |
422 | hfs_bnode_put(node); | |
423 | return ERR_PTR(-EIO); | |
424 | } | |
425 | ||
426 | pagep = node->page; | |
427 | memset(kmap(*pagep) + node->page_offset, 0, | |
428 | min((int)PAGE_CACHE_SIZE, (int)tree->node_size)); | |
429 | set_page_dirty(*pagep); | |
430 | kunmap(*pagep); | |
431 | for (i = 1; i < tree->pages_per_bnode; i++) { | |
432 | memset(kmap(*++pagep), 0, PAGE_CACHE_SIZE); | |
433 | set_page_dirty(*pagep); | |
434 | kunmap(*pagep); | |
435 | } | |
436 | clear_bit(HFS_BNODE_NEW, &node->flags); | |
437 | wake_up(&node->lock_wq); | |
438 | ||
439 | return node; | |
440 | } | |
441 | ||
442 | void hfs_bnode_get(struct hfs_bnode *node) | |
443 | { | |
444 | if (node) { | |
445 | atomic_inc(&node->refcnt); | |
1da177e4 LT |
446 | dprint(DBG_BNODE_REFS, "get_node(%d:%d): %d\n", |
447 | node->tree->cnid, node->this, atomic_read(&node->refcnt)); | |
448 | } | |
449 | } | |
450 | ||
451 | /* Dispose of resources used by a node */ | |
452 | void hfs_bnode_put(struct hfs_bnode *node) | |
453 | { | |
454 | if (node) { | |
455 | struct hfs_btree *tree = node->tree; | |
456 | int i; | |
457 | ||
458 | dprint(DBG_BNODE_REFS, "put_node(%d:%d): %d\n", | |
459 | node->tree->cnid, node->this, atomic_read(&node->refcnt)); | |
4d4ef9ab | 460 | BUG_ON(!atomic_read(&node->refcnt)); |
a5e3985f | 461 | if (!atomic_dec_and_lock(&node->refcnt, &tree->hash_lock)) |
1da177e4 | 462 | return; |
1da177e4 | 463 | for (i = 0; i < tree->pages_per_bnode; i++) { |
74f9c9c2 RZ |
464 | if (!node->page[i]) |
465 | continue; | |
1da177e4 | 466 | mark_page_accessed(node->page[i]); |
1da177e4 LT |
467 | } |
468 | ||
469 | if (test_bit(HFS_BNODE_DELETED, &node->flags)) { | |
470 | hfs_bnode_unhash(node); | |
471 | spin_unlock(&tree->hash_lock); | |
472 | hfs_bmap_free(node); | |
473 | hfs_bnode_free(node); | |
474 | return; | |
475 | } | |
476 | spin_unlock(&tree->hash_lock); | |
477 | } | |
478 | } |