diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /fs/hfs/bnode.c |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'fs/hfs/bnode.c')
-rw-r--r-- | fs/hfs/bnode.c | 498 |
1 files changed, 498 insertions, 0 deletions
diff --git a/fs/hfs/bnode.c b/fs/hfs/bnode.c new file mode 100644 index 000000000000..6ad1211f84ed --- /dev/null +++ b/fs/hfs/bnode.c | |||
@@ -0,0 +1,498 @@ | |||
1 | /* | ||
2 | * linux/fs/hfs/bnode.c | ||
3 | * | ||
4 | * Copyright (C) 2001 | ||
5 | * Brad Boyer (flar@allandria.com) | ||
6 | * (C) 2003 Ardis Technologies <roman@ardistech.com> | ||
7 | * | ||
8 | * Handle basic btree node operations | ||
9 | */ | ||
10 | |||
11 | #include <linux/pagemap.h> | ||
12 | #include <linux/swap.h> | ||
13 | |||
14 | #include "btree.h" | ||
15 | |||
16 | #define REF_PAGES 0 | ||
17 | |||
18 | void hfs_bnode_read(struct hfs_bnode *node, void *buf, | ||
19 | int off, int len) | ||
20 | { | ||
21 | struct page *page; | ||
22 | |||
23 | off += node->page_offset; | ||
24 | page = node->page[0]; | ||
25 | |||
26 | memcpy(buf, kmap(page) + off, len); | ||
27 | kunmap(page); | ||
28 | } | ||
29 | |||
30 | u16 hfs_bnode_read_u16(struct hfs_bnode *node, int off) | ||
31 | { | ||
32 | __be16 data; | ||
33 | // optimize later... | ||
34 | hfs_bnode_read(node, &data, off, 2); | ||
35 | return be16_to_cpu(data); | ||
36 | } | ||
37 | |||
38 | u8 hfs_bnode_read_u8(struct hfs_bnode *node, int off) | ||
39 | { | ||
40 | u8 data; | ||
41 | // optimize later... | ||
42 | hfs_bnode_read(node, &data, off, 1); | ||
43 | return data; | ||
44 | } | ||
45 | |||
46 | void hfs_bnode_read_key(struct hfs_bnode *node, void *key, int off) | ||
47 | { | ||
48 | struct hfs_btree *tree; | ||
49 | int key_len; | ||
50 | |||
51 | tree = node->tree; | ||
52 | if (node->type == HFS_NODE_LEAF || | ||
53 | tree->attributes & HFS_TREE_VARIDXKEYS) | ||
54 | key_len = hfs_bnode_read_u8(node, off) + 1; | ||
55 | else | ||
56 | key_len = tree->max_key_len + 1; | ||
57 | |||
58 | hfs_bnode_read(node, key, off, key_len); | ||
59 | } | ||
60 | |||
61 | void hfs_bnode_write(struct hfs_bnode *node, void *buf, int off, int len) | ||
62 | { | ||
63 | struct page *page; | ||
64 | |||
65 | off += node->page_offset; | ||
66 | page = node->page[0]; | ||
67 | |||
68 | memcpy(kmap(page) + off, buf, len); | ||
69 | kunmap(page); | ||
70 | set_page_dirty(page); | ||
71 | } | ||
72 | |||
73 | void hfs_bnode_write_u16(struct hfs_bnode *node, int off, u16 data) | ||
74 | { | ||
75 | __be16 v = cpu_to_be16(data); | ||
76 | // optimize later... | ||
77 | hfs_bnode_write(node, &v, off, 2); | ||
78 | } | ||
79 | |||
80 | void hfs_bnode_write_u8(struct hfs_bnode *node, int off, u8 data) | ||
81 | { | ||
82 | // optimize later... | ||
83 | hfs_bnode_write(node, &data, off, 1); | ||
84 | } | ||
85 | |||
86 | void hfs_bnode_clear(struct hfs_bnode *node, int off, int len) | ||
87 | { | ||
88 | struct page *page; | ||
89 | |||
90 | off += node->page_offset; | ||
91 | page = node->page[0]; | ||
92 | |||
93 | memset(kmap(page) + off, 0, len); | ||
94 | kunmap(page); | ||
95 | set_page_dirty(page); | ||
96 | } | ||
97 | |||
98 | void hfs_bnode_copy(struct hfs_bnode *dst_node, int dst, | ||
99 | struct hfs_bnode *src_node, int src, int len) | ||
100 | { | ||
101 | struct hfs_btree *tree; | ||
102 | struct page *src_page, *dst_page; | ||
103 | |||
104 | dprint(DBG_BNODE_MOD, "copybytes: %u,%u,%u\n", dst, src, len); | ||
105 | if (!len) | ||
106 | return; | ||
107 | tree = src_node->tree; | ||
108 | src += src_node->page_offset; | ||
109 | dst += dst_node->page_offset; | ||
110 | src_page = src_node->page[0]; | ||
111 | dst_page = dst_node->page[0]; | ||
112 | |||
113 | memcpy(kmap(dst_page) + dst, kmap(src_page) + src, len); | ||
114 | kunmap(src_page); | ||
115 | kunmap(dst_page); | ||
116 | set_page_dirty(dst_page); | ||
117 | } | ||
118 | |||
119 | void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len) | ||
120 | { | ||
121 | struct page *page; | ||
122 | void *ptr; | ||
123 | |||
124 | dprint(DBG_BNODE_MOD, "movebytes: %u,%u,%u\n", dst, src, len); | ||
125 | if (!len) | ||
126 | return; | ||
127 | src += node->page_offset; | ||
128 | dst += node->page_offset; | ||
129 | page = node->page[0]; | ||
130 | ptr = kmap(page); | ||
131 | memmove(ptr + dst, ptr + src, len); | ||
132 | kunmap(page); | ||
133 | set_page_dirty(page); | ||
134 | } | ||
135 | |||
136 | void hfs_bnode_dump(struct hfs_bnode *node) | ||
137 | { | ||
138 | struct hfs_bnode_desc desc; | ||
139 | __be32 cnid; | ||
140 | int i, off, key_off; | ||
141 | |||
142 | dprint(DBG_BNODE_MOD, "bnode: %d\n", node->this); | ||
143 | hfs_bnode_read(node, &desc, 0, sizeof(desc)); | ||
144 | dprint(DBG_BNODE_MOD, "%d, %d, %d, %d, %d\n", | ||
145 | be32_to_cpu(desc.next), be32_to_cpu(desc.prev), | ||
146 | desc.type, desc.height, be16_to_cpu(desc.num_recs)); | ||
147 | |||
148 | off = node->tree->node_size - 2; | ||
149 | for (i = be16_to_cpu(desc.num_recs); i >= 0; off -= 2, i--) { | ||
150 | key_off = hfs_bnode_read_u16(node, off); | ||
151 | dprint(DBG_BNODE_MOD, " %d", key_off); | ||
152 | if (i && node->type == HFS_NODE_INDEX) { | ||
153 | int tmp; | ||
154 | |||
155 | if (node->tree->attributes & HFS_TREE_VARIDXKEYS) | ||
156 | tmp = (hfs_bnode_read_u8(node, key_off) | 1) + 1; | ||
157 | else | ||
158 | tmp = node->tree->max_key_len + 1; | ||
159 | dprint(DBG_BNODE_MOD, " (%d,%d", tmp, hfs_bnode_read_u8(node, key_off)); | ||
160 | hfs_bnode_read(node, &cnid, key_off + tmp, 4); | ||
161 | dprint(DBG_BNODE_MOD, ",%d)", be32_to_cpu(cnid)); | ||
162 | } else if (i && node->type == HFS_NODE_LEAF) { | ||
163 | int tmp; | ||
164 | |||
165 | tmp = hfs_bnode_read_u8(node, key_off); | ||
166 | dprint(DBG_BNODE_MOD, " (%d)", tmp); | ||
167 | } | ||
168 | } | ||
169 | dprint(DBG_BNODE_MOD, "\n"); | ||
170 | } | ||
171 | |||
172 | void hfs_bnode_unlink(struct hfs_bnode *node) | ||
173 | { | ||
174 | struct hfs_btree *tree; | ||
175 | struct hfs_bnode *tmp; | ||
176 | __be32 cnid; | ||
177 | |||
178 | tree = node->tree; | ||
179 | if (node->prev) { | ||
180 | tmp = hfs_bnode_find(tree, node->prev); | ||
181 | if (IS_ERR(tmp)) | ||
182 | return; | ||
183 | tmp->next = node->next; | ||
184 | cnid = cpu_to_be32(tmp->next); | ||
185 | hfs_bnode_write(tmp, &cnid, offsetof(struct hfs_bnode_desc, next), 4); | ||
186 | hfs_bnode_put(tmp); | ||
187 | } else if (node->type == HFS_NODE_LEAF) | ||
188 | tree->leaf_head = node->next; | ||
189 | |||
190 | if (node->next) { | ||
191 | tmp = hfs_bnode_find(tree, node->next); | ||
192 | if (IS_ERR(tmp)) | ||
193 | return; | ||
194 | tmp->prev = node->prev; | ||
195 | cnid = cpu_to_be32(tmp->prev); | ||
196 | hfs_bnode_write(tmp, &cnid, offsetof(struct hfs_bnode_desc, prev), 4); | ||
197 | hfs_bnode_put(tmp); | ||
198 | } else if (node->type == HFS_NODE_LEAF) | ||
199 | tree->leaf_tail = node->prev; | ||
200 | |||
201 | // move down? | ||
202 | if (!node->prev && !node->next) { | ||
203 | printk("hfs_btree_del_level\n"); | ||
204 | } | ||
205 | if (!node->parent) { | ||
206 | tree->root = 0; | ||
207 | tree->depth = 0; | ||
208 | } | ||
209 | set_bit(HFS_BNODE_DELETED, &node->flags); | ||
210 | } | ||
211 | |||
212 | static inline int hfs_bnode_hash(u32 num) | ||
213 | { | ||
214 | num = (num >> 16) + num; | ||
215 | num += num >> 8; | ||
216 | return num & (NODE_HASH_SIZE - 1); | ||
217 | } | ||
218 | |||
219 | struct hfs_bnode *hfs_bnode_findhash(struct hfs_btree *tree, u32 cnid) | ||
220 | { | ||
221 | struct hfs_bnode *node; | ||
222 | |||
223 | if (cnid >= tree->node_count) { | ||
224 | printk("HFS: request for non-existent node %d in B*Tree\n", cnid); | ||
225 | return NULL; | ||
226 | } | ||
227 | |||
228 | for (node = tree->node_hash[hfs_bnode_hash(cnid)]; | ||
229 | node; node = node->next_hash) { | ||
230 | if (node->this == cnid) { | ||
231 | return node; | ||
232 | } | ||
233 | } | ||
234 | return NULL; | ||
235 | } | ||
236 | |||
237 | static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid) | ||
238 | { | ||
239 | struct super_block *sb; | ||
240 | struct hfs_bnode *node, *node2; | ||
241 | struct address_space *mapping; | ||
242 | struct page *page; | ||
243 | int size, block, i, hash; | ||
244 | loff_t off; | ||
245 | |||
246 | if (cnid >= tree->node_count) { | ||
247 | printk("HFS: request for non-existent node %d in B*Tree\n", cnid); | ||
248 | return NULL; | ||
249 | } | ||
250 | |||
251 | sb = tree->inode->i_sb; | ||
252 | size = sizeof(struct hfs_bnode) + tree->pages_per_bnode * | ||
253 | sizeof(struct page *); | ||
254 | node = kmalloc(size, GFP_KERNEL); | ||
255 | if (!node) | ||
256 | return NULL; | ||
257 | memset(node, 0, size); | ||
258 | node->tree = tree; | ||
259 | node->this = cnid; | ||
260 | set_bit(HFS_BNODE_NEW, &node->flags); | ||
261 | atomic_set(&node->refcnt, 1); | ||
262 | dprint(DBG_BNODE_REFS, "new_node(%d:%d): 1\n", | ||
263 | node->tree->cnid, node->this); | ||
264 | init_waitqueue_head(&node->lock_wq); | ||
265 | spin_lock(&tree->hash_lock); | ||
266 | node2 = hfs_bnode_findhash(tree, cnid); | ||
267 | if (!node2) { | ||
268 | hash = hfs_bnode_hash(cnid); | ||
269 | node->next_hash = tree->node_hash[hash]; | ||
270 | tree->node_hash[hash] = node; | ||
271 | tree->node_hash_cnt++; | ||
272 | } else { | ||
273 | spin_unlock(&tree->hash_lock); | ||
274 | kfree(node); | ||
275 | wait_event(node2->lock_wq, !test_bit(HFS_BNODE_NEW, &node2->flags)); | ||
276 | return node2; | ||
277 | } | ||
278 | spin_unlock(&tree->hash_lock); | ||
279 | |||
280 | mapping = tree->inode->i_mapping; | ||
281 | off = (loff_t)cnid * tree->node_size; | ||
282 | block = off >> PAGE_CACHE_SHIFT; | ||
283 | node->page_offset = off & ~PAGE_CACHE_MASK; | ||
284 | for (i = 0; i < tree->pages_per_bnode; i++) { | ||
285 | page = read_cache_page(mapping, block++, (filler_t *)mapping->a_ops->readpage, NULL); | ||
286 | if (IS_ERR(page)) | ||
287 | goto fail; | ||
288 | if (PageError(page)) { | ||
289 | page_cache_release(page); | ||
290 | goto fail; | ||
291 | } | ||
292 | #if !REF_PAGES | ||
293 | page_cache_release(page); | ||
294 | #endif | ||
295 | node->page[i] = page; | ||
296 | } | ||
297 | |||
298 | return node; | ||
299 | fail: | ||
300 | set_bit(HFS_BNODE_ERROR, &node->flags); | ||
301 | return node; | ||
302 | } | ||
303 | |||
304 | void hfs_bnode_unhash(struct hfs_bnode *node) | ||
305 | { | ||
306 | struct hfs_bnode **p; | ||
307 | |||
308 | dprint(DBG_BNODE_REFS, "remove_node(%d:%d): %d\n", | ||
309 | node->tree->cnid, node->this, atomic_read(&node->refcnt)); | ||
310 | for (p = &node->tree->node_hash[hfs_bnode_hash(node->this)]; | ||
311 | *p && *p != node; p = &(*p)->next_hash) | ||
312 | ; | ||
313 | if (!*p) | ||
314 | BUG(); | ||
315 | *p = node->next_hash; | ||
316 | node->tree->node_hash_cnt--; | ||
317 | } | ||
318 | |||
319 | /* Load a particular node out of a tree */ | ||
320 | struct hfs_bnode *hfs_bnode_find(struct hfs_btree *tree, u32 num) | ||
321 | { | ||
322 | struct hfs_bnode *node; | ||
323 | struct hfs_bnode_desc *desc; | ||
324 | int i, rec_off, off, next_off; | ||
325 | int entry_size, key_size; | ||
326 | |||
327 | spin_lock(&tree->hash_lock); | ||
328 | node = hfs_bnode_findhash(tree, num); | ||
329 | if (node) { | ||
330 | hfs_bnode_get(node); | ||
331 | spin_unlock(&tree->hash_lock); | ||
332 | wait_event(node->lock_wq, !test_bit(HFS_BNODE_NEW, &node->flags)); | ||
333 | if (test_bit(HFS_BNODE_ERROR, &node->flags)) | ||
334 | goto node_error; | ||
335 | return node; | ||
336 | } | ||
337 | spin_unlock(&tree->hash_lock); | ||
338 | node = __hfs_bnode_create(tree, num); | ||
339 | if (!node) | ||
340 | return ERR_PTR(-ENOMEM); | ||
341 | if (test_bit(HFS_BNODE_ERROR, &node->flags)) | ||
342 | goto node_error; | ||
343 | if (!test_bit(HFS_BNODE_NEW, &node->flags)) | ||
344 | return node; | ||
345 | |||
346 | desc = (struct hfs_bnode_desc *)(kmap(node->page[0]) + node->page_offset); | ||
347 | node->prev = be32_to_cpu(desc->prev); | ||
348 | node->next = be32_to_cpu(desc->next); | ||
349 | node->num_recs = be16_to_cpu(desc->num_recs); | ||
350 | node->type = desc->type; | ||
351 | node->height = desc->height; | ||
352 | kunmap(node->page[0]); | ||
353 | |||
354 | switch (node->type) { | ||
355 | case HFS_NODE_HEADER: | ||
356 | case HFS_NODE_MAP: | ||
357 | if (node->height != 0) | ||
358 | goto node_error; | ||
359 | break; | ||
360 | case HFS_NODE_LEAF: | ||
361 | if (node->height != 1) | ||
362 | goto node_error; | ||
363 | break; | ||
364 | case HFS_NODE_INDEX: | ||
365 | if (node->height <= 1 || node->height > tree->depth) | ||
366 | goto node_error; | ||
367 | break; | ||
368 | default: | ||
369 | goto node_error; | ||
370 | } | ||
371 | |||
372 | rec_off = tree->node_size - 2; | ||
373 | off = hfs_bnode_read_u16(node, rec_off); | ||
374 | if (off != sizeof(struct hfs_bnode_desc)) | ||
375 | goto node_error; | ||
376 | for (i = 1; i <= node->num_recs; off = next_off, i++) { | ||
377 | rec_off -= 2; | ||
378 | next_off = hfs_bnode_read_u16(node, rec_off); | ||
379 | if (next_off <= off || | ||
380 | next_off > tree->node_size || | ||
381 | next_off & 1) | ||
382 | goto node_error; | ||
383 | entry_size = next_off - off; | ||
384 | if (node->type != HFS_NODE_INDEX && | ||
385 | node->type != HFS_NODE_LEAF) | ||
386 | continue; | ||
387 | key_size = hfs_bnode_read_u8(node, off) + 1; | ||
388 | if (key_size >= entry_size /*|| key_size & 1*/) | ||
389 | goto node_error; | ||
390 | } | ||
391 | clear_bit(HFS_BNODE_NEW, &node->flags); | ||
392 | wake_up(&node->lock_wq); | ||
393 | return node; | ||
394 | |||
395 | node_error: | ||
396 | set_bit(HFS_BNODE_ERROR, &node->flags); | ||
397 | clear_bit(HFS_BNODE_NEW, &node->flags); | ||
398 | wake_up(&node->lock_wq); | ||
399 | hfs_bnode_put(node); | ||
400 | return ERR_PTR(-EIO); | ||
401 | } | ||
402 | |||
403 | void hfs_bnode_free(struct hfs_bnode *node) | ||
404 | { | ||
405 | //int i; | ||
406 | |||
407 | //for (i = 0; i < node->tree->pages_per_bnode; i++) | ||
408 | // if (node->page[i]) | ||
409 | // page_cache_release(node->page[i]); | ||
410 | kfree(node); | ||
411 | } | ||
412 | |||
413 | struct hfs_bnode *hfs_bnode_create(struct hfs_btree *tree, u32 num) | ||
414 | { | ||
415 | struct hfs_bnode *node; | ||
416 | struct page **pagep; | ||
417 | int i; | ||
418 | |||
419 | spin_lock(&tree->hash_lock); | ||
420 | node = hfs_bnode_findhash(tree, num); | ||
421 | spin_unlock(&tree->hash_lock); | ||
422 | if (node) | ||
423 | BUG(); | ||
424 | node = __hfs_bnode_create(tree, num); | ||
425 | if (!node) | ||
426 | return ERR_PTR(-ENOMEM); | ||
427 | if (test_bit(HFS_BNODE_ERROR, &node->flags)) { | ||
428 | hfs_bnode_put(node); | ||
429 | return ERR_PTR(-EIO); | ||
430 | } | ||
431 | |||
432 | pagep = node->page; | ||
433 | memset(kmap(*pagep) + node->page_offset, 0, | ||
434 | min((int)PAGE_CACHE_SIZE, (int)tree->node_size)); | ||
435 | set_page_dirty(*pagep); | ||
436 | kunmap(*pagep); | ||
437 | for (i = 1; i < tree->pages_per_bnode; i++) { | ||
438 | memset(kmap(*++pagep), 0, PAGE_CACHE_SIZE); | ||
439 | set_page_dirty(*pagep); | ||
440 | kunmap(*pagep); | ||
441 | } | ||
442 | clear_bit(HFS_BNODE_NEW, &node->flags); | ||
443 | wake_up(&node->lock_wq); | ||
444 | |||
445 | return node; | ||
446 | } | ||
447 | |||
448 | void hfs_bnode_get(struct hfs_bnode *node) | ||
449 | { | ||
450 | if (node) { | ||
451 | atomic_inc(&node->refcnt); | ||
452 | #if REF_PAGES | ||
453 | { | ||
454 | int i; | ||
455 | for (i = 0; i < node->tree->pages_per_bnode; i++) | ||
456 | get_page(node->page[i]); | ||
457 | } | ||
458 | #endif | ||
459 | dprint(DBG_BNODE_REFS, "get_node(%d:%d): %d\n", | ||
460 | node->tree->cnid, node->this, atomic_read(&node->refcnt)); | ||
461 | } | ||
462 | } | ||
463 | |||
464 | /* Dispose of resources used by a node */ | ||
465 | void hfs_bnode_put(struct hfs_bnode *node) | ||
466 | { | ||
467 | if (node) { | ||
468 | struct hfs_btree *tree = node->tree; | ||
469 | int i; | ||
470 | |||
471 | dprint(DBG_BNODE_REFS, "put_node(%d:%d): %d\n", | ||
472 | node->tree->cnid, node->this, atomic_read(&node->refcnt)); | ||
473 | if (!atomic_read(&node->refcnt)) | ||
474 | BUG(); | ||
475 | if (!atomic_dec_and_lock(&node->refcnt, &tree->hash_lock)) { | ||
476 | #if REF_PAGES | ||
477 | for (i = 0; i < tree->pages_per_bnode; i++) | ||
478 | put_page(node->page[i]); | ||
479 | #endif | ||
480 | return; | ||
481 | } | ||
482 | for (i = 0; i < tree->pages_per_bnode; i++) { | ||
483 | mark_page_accessed(node->page[i]); | ||
484 | #if REF_PAGES | ||
485 | put_page(node->page[i]); | ||
486 | #endif | ||
487 | } | ||
488 | |||
489 | if (test_bit(HFS_BNODE_DELETED, &node->flags)) { | ||
490 | hfs_bnode_unhash(node); | ||
491 | spin_unlock(&tree->hash_lock); | ||
492 | hfs_bmap_free(node); | ||
493 | hfs_bnode_free(node); | ||
494 | return; | ||
495 | } | ||
496 | spin_unlock(&tree->hash_lock); | ||
497 | } | ||
498 | } | ||