diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /fs/hfsplus/bnode.c |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'fs/hfsplus/bnode.c')
-rw-r--r-- | fs/hfsplus/bnode.c | 662 |
1 files changed, 662 insertions, 0 deletions
diff --git a/fs/hfsplus/bnode.c b/fs/hfsplus/bnode.c new file mode 100644 index 000000000000..267872e84d71 --- /dev/null +++ b/fs/hfsplus/bnode.c | |||
@@ -0,0 +1,662 @@ | |||
1 | /* | ||
2 | * linux/fs/hfsplus/bnode.c | ||
3 | * | ||
4 | * Copyright (C) 2001 | ||
5 | * Brad Boyer (flar@allandria.com) | ||
6 | * (C) 2003 Ardis Technologies <roman@ardistech.com> | ||
7 | * | ||
8 | * Handle basic btree node operations | ||
9 | */ | ||
10 | |||
11 | #include <linux/string.h> | ||
12 | #include <linux/slab.h> | ||
13 | #include <linux/pagemap.h> | ||
14 | #include <linux/fs.h> | ||
15 | #include <linux/swap.h> | ||
16 | #include <linux/version.h> | ||
17 | |||
18 | #include "hfsplus_fs.h" | ||
19 | #include "hfsplus_raw.h" | ||
20 | |||
21 | #define REF_PAGES 0 | ||
22 | |||
23 | /* Copy a specified range of bytes from the raw data of a node */ | ||
24 | void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len) | ||
25 | { | ||
26 | struct page **pagep; | ||
27 | int l; | ||
28 | |||
29 | off += node->page_offset; | ||
30 | pagep = node->page + (off >> PAGE_CACHE_SHIFT); | ||
31 | off &= ~PAGE_CACHE_MASK; | ||
32 | |||
33 | l = min(len, (int)PAGE_CACHE_SIZE - off); | ||
34 | memcpy(buf, kmap(*pagep) + off, l); | ||
35 | kunmap(*pagep); | ||
36 | |||
37 | while ((len -= l) != 0) { | ||
38 | buf += l; | ||
39 | l = min(len, (int)PAGE_CACHE_SIZE); | ||
40 | memcpy(buf, kmap(*++pagep), l); | ||
41 | kunmap(*pagep); | ||
42 | } | ||
43 | } | ||
44 | |||
45 | u16 hfs_bnode_read_u16(struct hfs_bnode *node, int off) | ||
46 | { | ||
47 | __be16 data; | ||
48 | // optimize later... | ||
49 | hfs_bnode_read(node, &data, off, 2); | ||
50 | return be16_to_cpu(data); | ||
51 | } | ||
52 | |||
53 | u8 hfs_bnode_read_u8(struct hfs_bnode *node, int off) | ||
54 | { | ||
55 | u8 data; | ||
56 | // optimize later... | ||
57 | hfs_bnode_read(node, &data, off, 1); | ||
58 | return data; | ||
59 | } | ||
60 | |||
61 | void hfs_bnode_read_key(struct hfs_bnode *node, void *key, int off) | ||
62 | { | ||
63 | struct hfs_btree *tree; | ||
64 | int key_len; | ||
65 | |||
66 | tree = node->tree; | ||
67 | if (node->type == HFS_NODE_LEAF || | ||
68 | tree->attributes & HFS_TREE_VARIDXKEYS) | ||
69 | key_len = hfs_bnode_read_u16(node, off) + 2; | ||
70 | else | ||
71 | key_len = tree->max_key_len + 2; | ||
72 | |||
73 | hfs_bnode_read(node, key, off, key_len); | ||
74 | } | ||
75 | |||
76 | void hfs_bnode_write(struct hfs_bnode *node, void *buf, int off, int len) | ||
77 | { | ||
78 | struct page **pagep; | ||
79 | int l; | ||
80 | |||
81 | off += node->page_offset; | ||
82 | pagep = node->page + (off >> PAGE_CACHE_SHIFT); | ||
83 | off &= ~PAGE_CACHE_MASK; | ||
84 | |||
85 | l = min(len, (int)PAGE_CACHE_SIZE - off); | ||
86 | memcpy(kmap(*pagep) + off, buf, l); | ||
87 | set_page_dirty(*pagep); | ||
88 | kunmap(*pagep); | ||
89 | |||
90 | while ((len -= l) != 0) { | ||
91 | buf += l; | ||
92 | l = min(len, (int)PAGE_CACHE_SIZE); | ||
93 | memcpy(kmap(*++pagep), buf, l); | ||
94 | set_page_dirty(*pagep); | ||
95 | kunmap(*pagep); | ||
96 | } | ||
97 | } | ||
98 | |||
99 | void hfs_bnode_write_u16(struct hfs_bnode *node, int off, u16 data) | ||
100 | { | ||
101 | __be16 v = cpu_to_be16(data); | ||
102 | // optimize later... | ||
103 | hfs_bnode_write(node, &v, off, 2); | ||
104 | } | ||
105 | |||
106 | void hfs_bnode_clear(struct hfs_bnode *node, int off, int len) | ||
107 | { | ||
108 | struct page **pagep; | ||
109 | int l; | ||
110 | |||
111 | off += node->page_offset; | ||
112 | pagep = node->page + (off >> PAGE_CACHE_SHIFT); | ||
113 | off &= ~PAGE_CACHE_MASK; | ||
114 | |||
115 | l = min(len, (int)PAGE_CACHE_SIZE - off); | ||
116 | memset(kmap(*pagep) + off, 0, l); | ||
117 | set_page_dirty(*pagep); | ||
118 | kunmap(*pagep); | ||
119 | |||
120 | while ((len -= l) != 0) { | ||
121 | l = min(len, (int)PAGE_CACHE_SIZE); | ||
122 | memset(kmap(*++pagep), 0, l); | ||
123 | set_page_dirty(*pagep); | ||
124 | kunmap(*pagep); | ||
125 | } | ||
126 | } | ||
127 | |||
128 | void hfs_bnode_copy(struct hfs_bnode *dst_node, int dst, | ||
129 | struct hfs_bnode *src_node, int src, int len) | ||
130 | { | ||
131 | struct hfs_btree *tree; | ||
132 | struct page **src_page, **dst_page; | ||
133 | int l; | ||
134 | |||
135 | dprint(DBG_BNODE_MOD, "copybytes: %u,%u,%u\n", dst, src, len); | ||
136 | if (!len) | ||
137 | return; | ||
138 | tree = src_node->tree; | ||
139 | src += src_node->page_offset; | ||
140 | dst += dst_node->page_offset; | ||
141 | src_page = src_node->page + (src >> PAGE_CACHE_SHIFT); | ||
142 | src &= ~PAGE_CACHE_MASK; | ||
143 | dst_page = dst_node->page + (dst >> PAGE_CACHE_SHIFT); | ||
144 | dst &= ~PAGE_CACHE_MASK; | ||
145 | |||
146 | if (src == dst) { | ||
147 | l = min(len, (int)PAGE_CACHE_SIZE - src); | ||
148 | memcpy(kmap(*dst_page) + src, kmap(*src_page) + src, l); | ||
149 | kunmap(*src_page); | ||
150 | set_page_dirty(*dst_page); | ||
151 | kunmap(*dst_page); | ||
152 | |||
153 | while ((len -= l) != 0) { | ||
154 | l = min(len, (int)PAGE_CACHE_SIZE); | ||
155 | memcpy(kmap(*++dst_page), kmap(*++src_page), l); | ||
156 | kunmap(*src_page); | ||
157 | set_page_dirty(*dst_page); | ||
158 | kunmap(*dst_page); | ||
159 | } | ||
160 | } else { | ||
161 | void *src_ptr, *dst_ptr; | ||
162 | |||
163 | do { | ||
164 | src_ptr = kmap(*src_page) + src; | ||
165 | dst_ptr = kmap(*dst_page) + dst; | ||
166 | if (PAGE_CACHE_SIZE - src < PAGE_CACHE_SIZE - dst) { | ||
167 | l = PAGE_CACHE_SIZE - src; | ||
168 | src = 0; | ||
169 | dst += l; | ||
170 | } else { | ||
171 | l = PAGE_CACHE_SIZE - dst; | ||
172 | src += l; | ||
173 | dst = 0; | ||
174 | } | ||
175 | l = min(len, l); | ||
176 | memcpy(dst_ptr, src_ptr, l); | ||
177 | kunmap(*src_page); | ||
178 | set_page_dirty(*dst_page); | ||
179 | kunmap(*dst_page); | ||
180 | if (!dst) | ||
181 | dst_page++; | ||
182 | else | ||
183 | src_page++; | ||
184 | } while ((len -= l)); | ||
185 | } | ||
186 | } | ||
187 | |||
188 | void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len) | ||
189 | { | ||
190 | struct page **src_page, **dst_page; | ||
191 | int l; | ||
192 | |||
193 | dprint(DBG_BNODE_MOD, "movebytes: %u,%u,%u\n", dst, src, len); | ||
194 | if (!len) | ||
195 | return; | ||
196 | src += node->page_offset; | ||
197 | dst += node->page_offset; | ||
198 | if (dst > src) { | ||
199 | src += len - 1; | ||
200 | src_page = node->page + (src >> PAGE_CACHE_SHIFT); | ||
201 | src = (src & ~PAGE_CACHE_MASK) + 1; | ||
202 | dst += len - 1; | ||
203 | dst_page = node->page + (dst >> PAGE_CACHE_SHIFT); | ||
204 | dst = (dst & ~PAGE_CACHE_MASK) + 1; | ||
205 | |||
206 | if (src == dst) { | ||
207 | while (src < len) { | ||
208 | memmove(kmap(*dst_page), kmap(*src_page), src); | ||
209 | kunmap(*src_page); | ||
210 | set_page_dirty(*dst_page); | ||
211 | kunmap(*dst_page); | ||
212 | len -= src; | ||
213 | src = PAGE_CACHE_SIZE; | ||
214 | src_page--; | ||
215 | dst_page--; | ||
216 | } | ||
217 | src -= len; | ||
218 | memmove(kmap(*dst_page) + src, kmap(*src_page) + src, len); | ||
219 | kunmap(*src_page); | ||
220 | set_page_dirty(*dst_page); | ||
221 | kunmap(*dst_page); | ||
222 | } else { | ||
223 | void *src_ptr, *dst_ptr; | ||
224 | |||
225 | do { | ||
226 | src_ptr = kmap(*src_page) + src; | ||
227 | dst_ptr = kmap(*dst_page) + dst; | ||
228 | if (src < dst) { | ||
229 | l = src; | ||
230 | src = PAGE_CACHE_SIZE; | ||
231 | dst -= l; | ||
232 | } else { | ||
233 | l = dst; | ||
234 | src -= l; | ||
235 | dst = PAGE_CACHE_SIZE; | ||
236 | } | ||
237 | l = min(len, l); | ||
238 | memmove(dst_ptr - l, src_ptr - l, l); | ||
239 | kunmap(*src_page); | ||
240 | set_page_dirty(*dst_page); | ||
241 | kunmap(*dst_page); | ||
242 | if (dst == PAGE_CACHE_SIZE) | ||
243 | dst_page--; | ||
244 | else | ||
245 | src_page--; | ||
246 | } while ((len -= l)); | ||
247 | } | ||
248 | } else { | ||
249 | src_page = node->page + (src >> PAGE_CACHE_SHIFT); | ||
250 | src &= ~PAGE_CACHE_MASK; | ||
251 | dst_page = node->page + (dst >> PAGE_CACHE_SHIFT); | ||
252 | dst &= ~PAGE_CACHE_MASK; | ||
253 | |||
254 | if (src == dst) { | ||
255 | l = min(len, (int)PAGE_CACHE_SIZE - src); | ||
256 | memmove(kmap(*dst_page) + src, kmap(*src_page) + src, l); | ||
257 | kunmap(*src_page); | ||
258 | set_page_dirty(*dst_page); | ||
259 | kunmap(*dst_page); | ||
260 | |||
261 | while ((len -= l) != 0) { | ||
262 | l = min(len, (int)PAGE_CACHE_SIZE); | ||
263 | memmove(kmap(*++dst_page), kmap(*++src_page), l); | ||
264 | kunmap(*src_page); | ||
265 | set_page_dirty(*dst_page); | ||
266 | kunmap(*dst_page); | ||
267 | } | ||
268 | } else { | ||
269 | void *src_ptr, *dst_ptr; | ||
270 | |||
271 | do { | ||
272 | src_ptr = kmap(*src_page) + src; | ||
273 | dst_ptr = kmap(*dst_page) + dst; | ||
274 | if (PAGE_CACHE_SIZE - src < PAGE_CACHE_SIZE - dst) { | ||
275 | l = PAGE_CACHE_SIZE - src; | ||
276 | src = 0; | ||
277 | dst += l; | ||
278 | } else { | ||
279 | l = PAGE_CACHE_SIZE - dst; | ||
280 | src += l; | ||
281 | dst = 0; | ||
282 | } | ||
283 | l = min(len, l); | ||
284 | memmove(dst_ptr, src_ptr, l); | ||
285 | kunmap(*src_page); | ||
286 | set_page_dirty(*dst_page); | ||
287 | kunmap(*dst_page); | ||
288 | if (!dst) | ||
289 | dst_page++; | ||
290 | else | ||
291 | src_page++; | ||
292 | } while ((len -= l)); | ||
293 | } | ||
294 | } | ||
295 | } | ||
296 | |||
297 | void hfs_bnode_dump(struct hfs_bnode *node) | ||
298 | { | ||
299 | struct hfs_bnode_desc desc; | ||
300 | __be32 cnid; | ||
301 | int i, off, key_off; | ||
302 | |||
303 | dprint(DBG_BNODE_MOD, "bnode: %d\n", node->this); | ||
304 | hfs_bnode_read(node, &desc, 0, sizeof(desc)); | ||
305 | dprint(DBG_BNODE_MOD, "%d, %d, %d, %d, %d\n", | ||
306 | be32_to_cpu(desc.next), be32_to_cpu(desc.prev), | ||
307 | desc.type, desc.height, be16_to_cpu(desc.num_recs)); | ||
308 | |||
309 | off = node->tree->node_size - 2; | ||
310 | for (i = be16_to_cpu(desc.num_recs); i >= 0; off -= 2, i--) { | ||
311 | key_off = hfs_bnode_read_u16(node, off); | ||
312 | dprint(DBG_BNODE_MOD, " %d", key_off); | ||
313 | if (i && node->type == HFS_NODE_INDEX) { | ||
314 | int tmp; | ||
315 | |||
316 | if (node->tree->attributes & HFS_TREE_VARIDXKEYS) | ||
317 | tmp = hfs_bnode_read_u16(node, key_off) + 2; | ||
318 | else | ||
319 | tmp = node->tree->max_key_len + 2; | ||
320 | dprint(DBG_BNODE_MOD, " (%d", tmp); | ||
321 | hfs_bnode_read(node, &cnid, key_off + tmp, 4); | ||
322 | dprint(DBG_BNODE_MOD, ",%d)", be32_to_cpu(cnid)); | ||
323 | } else if (i && node->type == HFS_NODE_LEAF) { | ||
324 | int tmp; | ||
325 | |||
326 | tmp = hfs_bnode_read_u16(node, key_off); | ||
327 | dprint(DBG_BNODE_MOD, " (%d)", tmp); | ||
328 | } | ||
329 | } | ||
330 | dprint(DBG_BNODE_MOD, "\n"); | ||
331 | } | ||
332 | |||
333 | void hfs_bnode_unlink(struct hfs_bnode *node) | ||
334 | { | ||
335 | struct hfs_btree *tree; | ||
336 | struct hfs_bnode *tmp; | ||
337 | __be32 cnid; | ||
338 | |||
339 | tree = node->tree; | ||
340 | if (node->prev) { | ||
341 | tmp = hfs_bnode_find(tree, node->prev); | ||
342 | if (IS_ERR(tmp)) | ||
343 | return; | ||
344 | tmp->next = node->next; | ||
345 | cnid = cpu_to_be32(tmp->next); | ||
346 | hfs_bnode_write(tmp, &cnid, offsetof(struct hfs_bnode_desc, next), 4); | ||
347 | hfs_bnode_put(tmp); | ||
348 | } else if (node->type == HFS_NODE_LEAF) | ||
349 | tree->leaf_head = node->next; | ||
350 | |||
351 | if (node->next) { | ||
352 | tmp = hfs_bnode_find(tree, node->next); | ||
353 | if (IS_ERR(tmp)) | ||
354 | return; | ||
355 | tmp->prev = node->prev; | ||
356 | cnid = cpu_to_be32(tmp->prev); | ||
357 | hfs_bnode_write(tmp, &cnid, offsetof(struct hfs_bnode_desc, prev), 4); | ||
358 | hfs_bnode_put(tmp); | ||
359 | } else if (node->type == HFS_NODE_LEAF) | ||
360 | tree->leaf_tail = node->prev; | ||
361 | |||
362 | // move down? | ||
363 | if (!node->prev && !node->next) { | ||
364 | printk("hfs_btree_del_level\n"); | ||
365 | } | ||
366 | if (!node->parent) { | ||
367 | tree->root = 0; | ||
368 | tree->depth = 0; | ||
369 | } | ||
370 | set_bit(HFS_BNODE_DELETED, &node->flags); | ||
371 | } | ||
372 | |||
373 | static inline int hfs_bnode_hash(u32 num) | ||
374 | { | ||
375 | num = (num >> 16) + num; | ||
376 | num += num >> 8; | ||
377 | return num & (NODE_HASH_SIZE - 1); | ||
378 | } | ||
379 | |||
380 | struct hfs_bnode *hfs_bnode_findhash(struct hfs_btree *tree, u32 cnid) | ||
381 | { | ||
382 | struct hfs_bnode *node; | ||
383 | |||
384 | if (cnid >= tree->node_count) { | ||
385 | printk("HFS+-fs: request for non-existent node %d in B*Tree\n", cnid); | ||
386 | return NULL; | ||
387 | } | ||
388 | |||
389 | for (node = tree->node_hash[hfs_bnode_hash(cnid)]; | ||
390 | node; node = node->next_hash) { | ||
391 | if (node->this == cnid) { | ||
392 | return node; | ||
393 | } | ||
394 | } | ||
395 | return NULL; | ||
396 | } | ||
397 | |||
398 | static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid) | ||
399 | { | ||
400 | struct super_block *sb; | ||
401 | struct hfs_bnode *node, *node2; | ||
402 | struct address_space *mapping; | ||
403 | struct page *page; | ||
404 | int size, block, i, hash; | ||
405 | loff_t off; | ||
406 | |||
407 | if (cnid >= tree->node_count) { | ||
408 | printk("HFS+-fs: request for non-existent node %d in B*Tree\n", cnid); | ||
409 | return NULL; | ||
410 | } | ||
411 | |||
412 | sb = tree->inode->i_sb; | ||
413 | size = sizeof(struct hfs_bnode) + tree->pages_per_bnode * | ||
414 | sizeof(struct page *); | ||
415 | node = kmalloc(size, GFP_KERNEL); | ||
416 | if (!node) | ||
417 | return NULL; | ||
418 | memset(node, 0, size); | ||
419 | node->tree = tree; | ||
420 | node->this = cnid; | ||
421 | set_bit(HFS_BNODE_NEW, &node->flags); | ||
422 | atomic_set(&node->refcnt, 1); | ||
423 | dprint(DBG_BNODE_REFS, "new_node(%d:%d): 1\n", | ||
424 | node->tree->cnid, node->this); | ||
425 | init_waitqueue_head(&node->lock_wq); | ||
426 | spin_lock(&tree->hash_lock); | ||
427 | node2 = hfs_bnode_findhash(tree, cnid); | ||
428 | if (!node2) { | ||
429 | hash = hfs_bnode_hash(cnid); | ||
430 | node->next_hash = tree->node_hash[hash]; | ||
431 | tree->node_hash[hash] = node; | ||
432 | tree->node_hash_cnt++; | ||
433 | } else { | ||
434 | spin_unlock(&tree->hash_lock); | ||
435 | kfree(node); | ||
436 | wait_event(node2->lock_wq, !test_bit(HFS_BNODE_NEW, &node2->flags)); | ||
437 | return node2; | ||
438 | } | ||
439 | spin_unlock(&tree->hash_lock); | ||
440 | |||
441 | mapping = tree->inode->i_mapping; | ||
442 | off = (loff_t)cnid << tree->node_size_shift; | ||
443 | block = off >> PAGE_CACHE_SHIFT; | ||
444 | node->page_offset = off & ~PAGE_CACHE_MASK; | ||
445 | for (i = 0; i < tree->pages_per_bnode; block++, i++) { | ||
446 | page = read_cache_page(mapping, block, (filler_t *)mapping->a_ops->readpage, NULL); | ||
447 | if (IS_ERR(page)) | ||
448 | goto fail; | ||
449 | if (PageError(page)) { | ||
450 | page_cache_release(page); | ||
451 | goto fail; | ||
452 | } | ||
453 | #if !REF_PAGES | ||
454 | page_cache_release(page); | ||
455 | #endif | ||
456 | node->page[i] = page; | ||
457 | } | ||
458 | |||
459 | return node; | ||
460 | fail: | ||
461 | set_bit(HFS_BNODE_ERROR, &node->flags); | ||
462 | return node; | ||
463 | } | ||
464 | |||
465 | void hfs_bnode_unhash(struct hfs_bnode *node) | ||
466 | { | ||
467 | struct hfs_bnode **p; | ||
468 | |||
469 | dprint(DBG_BNODE_REFS, "remove_node(%d:%d): %d\n", | ||
470 | node->tree->cnid, node->this, atomic_read(&node->refcnt)); | ||
471 | for (p = &node->tree->node_hash[hfs_bnode_hash(node->this)]; | ||
472 | *p && *p != node; p = &(*p)->next_hash) | ||
473 | ; | ||
474 | if (!*p) | ||
475 | BUG(); | ||
476 | *p = node->next_hash; | ||
477 | node->tree->node_hash_cnt--; | ||
478 | } | ||
479 | |||
480 | /* Load a particular node out of a tree */ | ||
481 | struct hfs_bnode *hfs_bnode_find(struct hfs_btree *tree, u32 num) | ||
482 | { | ||
483 | struct hfs_bnode *node; | ||
484 | struct hfs_bnode_desc *desc; | ||
485 | int i, rec_off, off, next_off; | ||
486 | int entry_size, key_size; | ||
487 | |||
488 | spin_lock(&tree->hash_lock); | ||
489 | node = hfs_bnode_findhash(tree, num); | ||
490 | if (node) { | ||
491 | hfs_bnode_get(node); | ||
492 | spin_unlock(&tree->hash_lock); | ||
493 | wait_event(node->lock_wq, !test_bit(HFS_BNODE_NEW, &node->flags)); | ||
494 | if (test_bit(HFS_BNODE_ERROR, &node->flags)) | ||
495 | goto node_error; | ||
496 | return node; | ||
497 | } | ||
498 | spin_unlock(&tree->hash_lock); | ||
499 | node = __hfs_bnode_create(tree, num); | ||
500 | if (!node) | ||
501 | return ERR_PTR(-ENOMEM); | ||
502 | if (test_bit(HFS_BNODE_ERROR, &node->flags)) | ||
503 | goto node_error; | ||
504 | if (!test_bit(HFS_BNODE_NEW, &node->flags)) | ||
505 | return node; | ||
506 | |||
507 | desc = (struct hfs_bnode_desc *)(kmap(node->page[0]) + node->page_offset); | ||
508 | node->prev = be32_to_cpu(desc->prev); | ||
509 | node->next = be32_to_cpu(desc->next); | ||
510 | node->num_recs = be16_to_cpu(desc->num_recs); | ||
511 | node->type = desc->type; | ||
512 | node->height = desc->height; | ||
513 | kunmap(node->page[0]); | ||
514 | |||
515 | switch (node->type) { | ||
516 | case HFS_NODE_HEADER: | ||
517 | case HFS_NODE_MAP: | ||
518 | if (node->height != 0) | ||
519 | goto node_error; | ||
520 | break; | ||
521 | case HFS_NODE_LEAF: | ||
522 | if (node->height != 1) | ||
523 | goto node_error; | ||
524 | break; | ||
525 | case HFS_NODE_INDEX: | ||
526 | if (node->height <= 1 || node->height > tree->depth) | ||
527 | goto node_error; | ||
528 | break; | ||
529 | default: | ||
530 | goto node_error; | ||
531 | } | ||
532 | |||
533 | rec_off = tree->node_size - 2; | ||
534 | off = hfs_bnode_read_u16(node, rec_off); | ||
535 | if (off != sizeof(struct hfs_bnode_desc)) | ||
536 | goto node_error; | ||
537 | for (i = 1; i <= node->num_recs; off = next_off, i++) { | ||
538 | rec_off -= 2; | ||
539 | next_off = hfs_bnode_read_u16(node, rec_off); | ||
540 | if (next_off <= off || | ||
541 | next_off > tree->node_size || | ||
542 | next_off & 1) | ||
543 | goto node_error; | ||
544 | entry_size = next_off - off; | ||
545 | if (node->type != HFS_NODE_INDEX && | ||
546 | node->type != HFS_NODE_LEAF) | ||
547 | continue; | ||
548 | key_size = hfs_bnode_read_u16(node, off) + 2; | ||
549 | if (key_size >= entry_size || key_size & 1) | ||
550 | goto node_error; | ||
551 | } | ||
552 | clear_bit(HFS_BNODE_NEW, &node->flags); | ||
553 | wake_up(&node->lock_wq); | ||
554 | return node; | ||
555 | |||
556 | node_error: | ||
557 | set_bit(HFS_BNODE_ERROR, &node->flags); | ||
558 | clear_bit(HFS_BNODE_NEW, &node->flags); | ||
559 | wake_up(&node->lock_wq); | ||
560 | hfs_bnode_put(node); | ||
561 | return ERR_PTR(-EIO); | ||
562 | } | ||
563 | |||
564 | void hfs_bnode_free(struct hfs_bnode *node) | ||
565 | { | ||
566 | //int i; | ||
567 | |||
568 | //for (i = 0; i < node->tree->pages_per_bnode; i++) | ||
569 | // if (node->page[i]) | ||
570 | // page_cache_release(node->page[i]); | ||
571 | kfree(node); | ||
572 | } | ||
573 | |||
574 | struct hfs_bnode *hfs_bnode_create(struct hfs_btree *tree, u32 num) | ||
575 | { | ||
576 | struct hfs_bnode *node; | ||
577 | struct page **pagep; | ||
578 | int i; | ||
579 | |||
580 | spin_lock(&tree->hash_lock); | ||
581 | node = hfs_bnode_findhash(tree, num); | ||
582 | spin_unlock(&tree->hash_lock); | ||
583 | if (node) { | ||
584 | printk("new node %u already hashed?\n", num); | ||
585 | BUG(); | ||
586 | } | ||
587 | node = __hfs_bnode_create(tree, num); | ||
588 | if (!node) | ||
589 | return ERR_PTR(-ENOMEM); | ||
590 | if (test_bit(HFS_BNODE_ERROR, &node->flags)) { | ||
591 | hfs_bnode_put(node); | ||
592 | return ERR_PTR(-EIO); | ||
593 | } | ||
594 | |||
595 | pagep = node->page; | ||
596 | memset(kmap(*pagep) + node->page_offset, 0, | ||
597 | min((int)PAGE_CACHE_SIZE, (int)tree->node_size)); | ||
598 | set_page_dirty(*pagep); | ||
599 | kunmap(*pagep); | ||
600 | for (i = 1; i < tree->pages_per_bnode; i++) { | ||
601 | memset(kmap(*++pagep), 0, PAGE_CACHE_SIZE); | ||
602 | set_page_dirty(*pagep); | ||
603 | kunmap(*pagep); | ||
604 | } | ||
605 | clear_bit(HFS_BNODE_NEW, &node->flags); | ||
606 | wake_up(&node->lock_wq); | ||
607 | |||
608 | return node; | ||
609 | } | ||
610 | |||
611 | void hfs_bnode_get(struct hfs_bnode *node) | ||
612 | { | ||
613 | if (node) { | ||
614 | atomic_inc(&node->refcnt); | ||
615 | #if REF_PAGES | ||
616 | { | ||
617 | int i; | ||
618 | for (i = 0; i < node->tree->pages_per_bnode; i++) | ||
619 | get_page(node->page[i]); | ||
620 | } | ||
621 | #endif | ||
622 | dprint(DBG_BNODE_REFS, "get_node(%d:%d): %d\n", | ||
623 | node->tree->cnid, node->this, atomic_read(&node->refcnt)); | ||
624 | } | ||
625 | } | ||
626 | |||
627 | /* Dispose of resources used by a node */ | ||
628 | void hfs_bnode_put(struct hfs_bnode *node) | ||
629 | { | ||
630 | if (node) { | ||
631 | struct hfs_btree *tree = node->tree; | ||
632 | int i; | ||
633 | |||
634 | dprint(DBG_BNODE_REFS, "put_node(%d:%d): %d\n", | ||
635 | node->tree->cnid, node->this, atomic_read(&node->refcnt)); | ||
636 | if (!atomic_read(&node->refcnt)) | ||
637 | BUG(); | ||
638 | if (!atomic_dec_and_lock(&node->refcnt, &tree->hash_lock)) { | ||
639 | #if REF_PAGES | ||
640 | for (i = 0; i < tree->pages_per_bnode; i++) | ||
641 | put_page(node->page[i]); | ||
642 | #endif | ||
643 | return; | ||
644 | } | ||
645 | for (i = 0; i < tree->pages_per_bnode; i++) { | ||
646 | mark_page_accessed(node->page[i]); | ||
647 | #if REF_PAGES | ||
648 | put_page(node->page[i]); | ||
649 | #endif | ||
650 | } | ||
651 | |||
652 | if (test_bit(HFS_BNODE_DELETED, &node->flags)) { | ||
653 | hfs_bnode_unhash(node); | ||
654 | spin_unlock(&tree->hash_lock); | ||
655 | hfs_bmap_free(node); | ||
656 | hfs_bnode_free(node); | ||
657 | return; | ||
658 | } | ||
659 | spin_unlock(&tree->hash_lock); | ||
660 | } | ||
661 | } | ||
662 | |||