diff options
Diffstat (limited to 'fs/hfsplus/bnode.c')
-rw-r--r-- | fs/hfsplus/bnode.c | 41 |
1 files changed, 28 insertions, 13 deletions
diff --git a/fs/hfsplus/bnode.c b/fs/hfsplus/bnode.c index c8aa1659b838..cf7dc8dac766 100644 --- a/fs/hfsplus/bnode.c +++ b/fs/hfsplus/bnode.c | |||
@@ -212,7 +212,8 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len) | |||
212 | dst_page--; | 212 | dst_page--; |
213 | } | 213 | } |
214 | src -= len; | 214 | src -= len; |
215 | memmove(kmap(*dst_page) + src, kmap(*src_page) + src, len); | 215 | memmove(kmap(*dst_page) + src, |
216 | kmap(*src_page) + src, len); | ||
216 | kunmap(*src_page); | 217 | kunmap(*src_page); |
217 | set_page_dirty(*dst_page); | 218 | set_page_dirty(*dst_page); |
218 | kunmap(*dst_page); | 219 | kunmap(*dst_page); |
@@ -250,14 +251,16 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len) | |||
250 | 251 | ||
251 | if (src == dst) { | 252 | if (src == dst) { |
252 | l = min(len, (int)PAGE_CACHE_SIZE - src); | 253 | l = min(len, (int)PAGE_CACHE_SIZE - src); |
253 | memmove(kmap(*dst_page) + src, kmap(*src_page) + src, l); | 254 | memmove(kmap(*dst_page) + src, |
255 | kmap(*src_page) + src, l); | ||
254 | kunmap(*src_page); | 256 | kunmap(*src_page); |
255 | set_page_dirty(*dst_page); | 257 | set_page_dirty(*dst_page); |
256 | kunmap(*dst_page); | 258 | kunmap(*dst_page); |
257 | 259 | ||
258 | while ((len -= l) != 0) { | 260 | while ((len -= l) != 0) { |
259 | l = min(len, (int)PAGE_CACHE_SIZE); | 261 | l = min(len, (int)PAGE_CACHE_SIZE); |
260 | memmove(kmap(*++dst_page), kmap(*++src_page), l); | 262 | memmove(kmap(*++dst_page), |
263 | kmap(*++src_page), l); | ||
261 | kunmap(*src_page); | 264 | kunmap(*src_page); |
262 | set_page_dirty(*dst_page); | 265 | set_page_dirty(*dst_page); |
263 | kunmap(*dst_page); | 266 | kunmap(*dst_page); |
@@ -268,7 +271,8 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len) | |||
268 | do { | 271 | do { |
269 | src_ptr = kmap(*src_page) + src; | 272 | src_ptr = kmap(*src_page) + src; |
270 | dst_ptr = kmap(*dst_page) + dst; | 273 | dst_ptr = kmap(*dst_page) + dst; |
271 | if (PAGE_CACHE_SIZE - src < PAGE_CACHE_SIZE - dst) { | 274 | if (PAGE_CACHE_SIZE - src < |
275 | PAGE_CACHE_SIZE - dst) { | ||
272 | l = PAGE_CACHE_SIZE - src; | 276 | l = PAGE_CACHE_SIZE - src; |
273 | src = 0; | 277 | src = 0; |
274 | dst += l; | 278 | dst += l; |
@@ -340,7 +344,8 @@ void hfs_bnode_unlink(struct hfs_bnode *node) | |||
340 | return; | 344 | return; |
341 | tmp->next = node->next; | 345 | tmp->next = node->next; |
342 | cnid = cpu_to_be32(tmp->next); | 346 | cnid = cpu_to_be32(tmp->next); |
343 | hfs_bnode_write(tmp, &cnid, offsetof(struct hfs_bnode_desc, next), 4); | 347 | hfs_bnode_write(tmp, &cnid, |
348 | offsetof(struct hfs_bnode_desc, next), 4); | ||
344 | hfs_bnode_put(tmp); | 349 | hfs_bnode_put(tmp); |
345 | } else if (node->type == HFS_NODE_LEAF) | 350 | } else if (node->type == HFS_NODE_LEAF) |
346 | tree->leaf_head = node->next; | 351 | tree->leaf_head = node->next; |
@@ -351,7 +356,8 @@ void hfs_bnode_unlink(struct hfs_bnode *node) | |||
351 | return; | 356 | return; |
352 | tmp->prev = node->prev; | 357 | tmp->prev = node->prev; |
353 | cnid = cpu_to_be32(tmp->prev); | 358 | cnid = cpu_to_be32(tmp->prev); |
354 | hfs_bnode_write(tmp, &cnid, offsetof(struct hfs_bnode_desc, prev), 4); | 359 | hfs_bnode_write(tmp, &cnid, |
360 | offsetof(struct hfs_bnode_desc, prev), 4); | ||
355 | hfs_bnode_put(tmp); | 361 | hfs_bnode_put(tmp); |
356 | } else if (node->type == HFS_NODE_LEAF) | 362 | } else if (node->type == HFS_NODE_LEAF) |
357 | tree->leaf_tail = node->prev; | 363 | tree->leaf_tail = node->prev; |
@@ -379,7 +385,9 @@ struct hfs_bnode *hfs_bnode_findhash(struct hfs_btree *tree, u32 cnid) | |||
379 | struct hfs_bnode *node; | 385 | struct hfs_bnode *node; |
380 | 386 | ||
381 | if (cnid >= tree->node_count) { | 387 | if (cnid >= tree->node_count) { |
382 | printk(KERN_ERR "hfs: request for non-existent node %d in B*Tree\n", cnid); | 388 | printk(KERN_ERR "hfs: request for non-existent node " |
389 | "%d in B*Tree\n", | ||
390 | cnid); | ||
383 | return NULL; | 391 | return NULL; |
384 | } | 392 | } |
385 | 393 | ||
@@ -402,7 +410,9 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid) | |||
402 | loff_t off; | 410 | loff_t off; |
403 | 411 | ||
404 | if (cnid >= tree->node_count) { | 412 | if (cnid >= tree->node_count) { |
405 | printk(KERN_ERR "hfs: request for non-existent node %d in B*Tree\n", cnid); | 413 | printk(KERN_ERR "hfs: request for non-existent node " |
414 | "%d in B*Tree\n", | ||
415 | cnid); | ||
406 | return NULL; | 416 | return NULL; |
407 | } | 417 | } |
408 | 418 | ||
@@ -429,7 +439,8 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid) | |||
429 | } else { | 439 | } else { |
430 | spin_unlock(&tree->hash_lock); | 440 | spin_unlock(&tree->hash_lock); |
431 | kfree(node); | 441 | kfree(node); |
432 | wait_event(node2->lock_wq, !test_bit(HFS_BNODE_NEW, &node2->flags)); | 442 | wait_event(node2->lock_wq, |
443 | !test_bit(HFS_BNODE_NEW, &node2->flags)); | ||
433 | return node2; | 444 | return node2; |
434 | } | 445 | } |
435 | spin_unlock(&tree->hash_lock); | 446 | spin_unlock(&tree->hash_lock); |
@@ -483,7 +494,8 @@ struct hfs_bnode *hfs_bnode_find(struct hfs_btree *tree, u32 num) | |||
483 | if (node) { | 494 | if (node) { |
484 | hfs_bnode_get(node); | 495 | hfs_bnode_get(node); |
485 | spin_unlock(&tree->hash_lock); | 496 | spin_unlock(&tree->hash_lock); |
486 | wait_event(node->lock_wq, !test_bit(HFS_BNODE_NEW, &node->flags)); | 497 | wait_event(node->lock_wq, |
498 | !test_bit(HFS_BNODE_NEW, &node->flags)); | ||
487 | if (test_bit(HFS_BNODE_ERROR, &node->flags)) | 499 | if (test_bit(HFS_BNODE_ERROR, &node->flags)) |
488 | goto node_error; | 500 | goto node_error; |
489 | return node; | 501 | return node; |
@@ -497,7 +509,8 @@ struct hfs_bnode *hfs_bnode_find(struct hfs_btree *tree, u32 num) | |||
497 | if (!test_bit(HFS_BNODE_NEW, &node->flags)) | 509 | if (!test_bit(HFS_BNODE_NEW, &node->flags)) |
498 | return node; | 510 | return node; |
499 | 511 | ||
500 | desc = (struct hfs_bnode_desc *)(kmap(node->page[0]) + node->page_offset); | 512 | desc = (struct hfs_bnode_desc *)(kmap(node->page[0]) + |
513 | node->page_offset); | ||
501 | node->prev = be32_to_cpu(desc->prev); | 514 | node->prev = be32_to_cpu(desc->prev); |
502 | node->next = be32_to_cpu(desc->next); | 515 | node->next = be32_to_cpu(desc->next); |
503 | node->num_recs = be16_to_cpu(desc->num_recs); | 516 | node->num_recs = be16_to_cpu(desc->num_recs); |
@@ -607,7 +620,8 @@ void hfs_bnode_get(struct hfs_bnode *node) | |||
607 | if (node) { | 620 | if (node) { |
608 | atomic_inc(&node->refcnt); | 621 | atomic_inc(&node->refcnt); |
609 | dprint(DBG_BNODE_REFS, "get_node(%d:%d): %d\n", | 622 | dprint(DBG_BNODE_REFS, "get_node(%d:%d): %d\n", |
610 | node->tree->cnid, node->this, atomic_read(&node->refcnt)); | 623 | node->tree->cnid, node->this, |
624 | atomic_read(&node->refcnt)); | ||
611 | } | 625 | } |
612 | } | 626 | } |
613 | 627 | ||
@@ -619,7 +633,8 @@ void hfs_bnode_put(struct hfs_bnode *node) | |||
619 | int i; | 633 | int i; |
620 | 634 | ||
621 | dprint(DBG_BNODE_REFS, "put_node(%d:%d): %d\n", | 635 | dprint(DBG_BNODE_REFS, "put_node(%d:%d): %d\n", |
622 | node->tree->cnid, node->this, atomic_read(&node->refcnt)); | 636 | node->tree->cnid, node->this, |
637 | atomic_read(&node->refcnt)); | ||
623 | BUG_ON(!atomic_read(&node->refcnt)); | 638 | BUG_ON(!atomic_read(&node->refcnt)); |
624 | if (!atomic_dec_and_lock(&node->refcnt, &tree->hash_lock)) | 639 | if (!atomic_dec_and_lock(&node->refcnt, &tree->hash_lock)) |
625 | return; | 640 | return; |